hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c34679bf1e7909cfa7290e79c3a1b6a773fcca3 | 1,227 | py | Python | MultiAnalysis/lexical-analysis.py | Aczy156/compiler-theory-algorithm | fb8ab65a1315fb206bfa788038dbc61a96957ec9 | [
"MIT"
] | 6 | 2020-12-15T18:37:58.000Z | 2021-09-27T13:47:39.000Z | MultiAnalysis/lexical-analysis.py | Aczy156/Compiling-Principle-Work | fb8ab65a1315fb206bfa788038dbc61a96957ec9 | [
"MIT"
] | null | null | null | MultiAnalysis/lexical-analysis.py | Aczy156/Compiling-Principle-Work | fb8ab65a1315fb206bfa788038dbc61a96957ec9 | [
"MIT"
] | 3 | 2020-06-22T05:33:38.000Z | 2020-07-20T13:54:05.000Z | import re
# valid token
token_dict = {
'int': 1, 'double': 1, 'string': 1, 'if': 1, 'else': 1, 'return': 1, 'main': 1, 'void': 1, 'while': 1, 'for': 1,
'break': 1,
'+': 4, '-': 4, '*': 4, '/': 4, '<': 4, '>': 4, '=': 4, '==': 4,
',': 5, ';': 5, '(': 5, ')': 5, '{': 5,
}
# invalid token
invalid_token = [',', ';', '!', '(', ')']
def myprint(type, tk):
""" 格式化输出 """
print('(\'' + str(type) + '\',\'' + str(tk) + '\')')
def solve():
# 加载文本 转换为token
s = open("test2.txt").read()
token = re.split('([;,\s&%\?\+\*;\-/_:,\(\)\t\000\r\n\0])', s)
# token分割后的一些预处理
# TODO 处理一些特殊情况 main{,for(){,if(){ while --解决
data1 = []
for i in token:
if '){' in i or 'n{' in i:
data1.append(i[0:len(i) - 1]);
data1.append('{')
else:
data1.append(i)
# 过滤
data2 = [i for i in data1 if i not in ['', ' ', '\n']]
# mapping 1key->2allnum->3str+num
for i in data2:
if token_dict.get(i) is not None:
myprint(token_dict.get(i), i)
elif i.isdigit():
myprint(3, i)
else:
# TODO 对 前面的余下的一些进行单词判断,查看是否有错误
myprint(2, i)
if __name__ == '__main__':
solve()
| 24.54 | 116 | 0.431948 | import re
token_dict = {
'int': 1, 'double': 1, 'string': 1, 'if': 1, 'else': 1, 'return': 1, 'main': 1, 'void': 1, 'while': 1, 'for': 1,
'break': 1,
'+': 4, '-': 4, '*': 4, '/': 4, '<': 4, '>': 4, '=': 4, '==': 4,
',': 5, ';': 5, '(': 5, ')': 5, '{': 5,
}
invalid_token = [',', ';', '!', '(', ')']
def myprint(type, tk):
print('(\'' + str(type) + '\',\'' + str(tk) + '\')')
def solve():
s = open("test2.txt").read()
token = re.split('([;,\s&%\?\+\*;\-/_:,\(\)\t\000\r\n\0])', s)
data1 = []
for i in token:
if '){' in i or 'n{' in i:
data1.append(i[0:len(i) - 1]);
data1.append('{')
else:
data1.append(i)
data2 = [i for i in data1 if i not in ['', ' ', '\n']]
for i in data2:
if token_dict.get(i) is not None:
myprint(token_dict.get(i), i)
elif i.isdigit():
myprint(3, i)
else:
myprint(2, i)
if __name__ == '__main__':
solve()
| true | true |
1c3467f60c4ad981307c06f1b929e3d0ecb18b38 | 37,402 | py | Python | pkgs/conf-pkg/src/genie/libs/conf/rip/nxos/tests/test_rip.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/conf-pkg/src/genie/libs/conf/rip/nxos/tests/test_rip.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/conf-pkg/src/genie/libs/conf/rip/nxos/tests/test_rip.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | #!/usr/bin/env python
# Import unittest module
import unittest
from unittest.mock import Mock
from pyats.datastructures import WeakList
# And import what's needed
from genie.tests.conf import TestCase
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Link, Interface
from genie.conf.base.attributes import SubAttributesDict
from genie.libs.conf.rip import Rip
from genie.libs.conf.vrf import Vrf
from genie.libs.conf.address_family import AddressFamily
class test_rip(TestCase):
def test_init(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=10)
rip.add_force_vrf(None)
dev.add_feature(rip)
vrf = Vrf(name='myVrf')
dev.add_feature(vrf)
self.assertEqual(rip.instance_id, 10)
self.assertTrue(isinstance(rip.device_attr, SubAttributesDict))
self.assertTrue(isinstance(rip.device_attr['dev1'].vrf_attr[None].address_family_attr,
SubAttributesDict))
# Let's try multilevel
rip.mega = 'work'
rip.device_attr['myDevice'].value = 'success'
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 3
rip.device_attr['myDevice'].vrf_attr['myVrf'].\
address_family_attr['ipv6 unicast'].distance = 120
self.assertEqual(rip.device_attr['myDevice'].vrf_attr['myVrf'].\
address_family_attr['ipv6 unicast'].distance, 120)
self.assertEqual(rip.mega, 'work')
self.assertEqual(rip.device_attr['myDevice'].mega, 'work')
self.assertEqual(rip.device_attr['fake'].mega, 'work')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].mega, 'work')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].mega, 'work')
self.assertEqual(rip.device_attr['myDevice'].value, 'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].value,
'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].value,
'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths,
3)
with self.assertRaises(AttributeError):
rip.value
with self.assertRaises(ValueError):
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv8'].value,'success'
with self.assertRaises(KeyError):
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 flowspec'].value,'success'
self.assertEqual(\
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths, None)
# Test unknown argument which is not defined in rip object or its
# parent
with self.assertRaises(AttributeError):
rip.device_attr['myDevice'].ff
def test_cfg(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
rip.device_attr['PE1']
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {'PE1':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'
})
vrf1 = Vrf('vrf1')
intf1 = Interface(device=dev, name='Ethernet0/0', vrf=vrf1)
intf1.add_feature(rip)
rip.address_families |= {AddressFamily.ipv6_unicast}
rip.shutdown = False
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
maximum_paths = 10
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
default_metric = 7
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
distance = 127
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_direct_rmap = 'rmap14'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_static_rmap = 'rmap15'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_lisp_rmap = 'rmap16'
# rip.build_config(apply=False)
output = rip.build_config(apply=False)
expected_output = {'PE1': '''\
router rip 1
no shutdown
address-family ipv4 unicast
default-metric 1
distance 120
maximum-paths 2
redistribute lisp route-map rmap3
redistribute direct route-map rmap1
redistribute static route-map rmap2
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap6
redistribute direct route-map rmap4
redistribute static route-map rmap5
exit
vrf vrf1
address-family ipv4 unicast
exit
address-family ipv6 unicast
default-metric 7
distance 127
maximum-paths 10
redistribute lisp route-map rmap16
redistribute direct route-map rmap14
redistribute static route-map rmap15
exit
exit
exit'''}
self.maxDiff = None
self.assertMultiLineDictEqual(output, expected_output)
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
dev.add_feature(rip)
# Mock config
output = rip.build_config(apply=True)
def test_uncfg(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
# Default configuration, let's make sure it works
output = rip.build_unconfig(apply=False)
# There was nothing to unconfigure
self.assertMultiLineDictEqual(output, {})
dev.add_feature(rip)
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {'PE1': 'feature rip\nno router rip 1'})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_disable(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {
'PE1':
'feature rip\n'
'no router rip 1'})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_disable_no_instance(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_unconfig(unconfig_feature=True, apply=False)
self.assertMultiLineDictEqual(output, {'PE1': 'no feature rip'})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(unconfig_feature=True, apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_remove_af(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
output = rip.build_unconfig(
attributes={
'device_attr': {
'dev1': {
'vrf_attr': {
None: {
'address_family_attr': {
'ipv4 unicast': None}}}}}},
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n no address-family ipv4 unicast\n exit'})
def test_remove_vrf(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
vrf1 = Vrf(name='blue')
intf1 = Interface(device=dev1, name='Ethernet0/0', vrf=vrf1)
intf2 = Interface(device=dev2, name='Ethernet0/0', vrf=vrf1)
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
intf1.add_feature(rip)
intf2.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit\n'
' exit'})
output = rip.build_unconfig(\
attributes='device_attr__dev1__vrf_attr__blue',
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n no vrf blue\n exit'})
def test_remove_vrf_af(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
vrf1 = Vrf(name='blue')
intf1 = Interface(device=dev1, name='Ethernet0/0', vrf=vrf1)
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
intf1.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
output = rip.build_unconfig(\
attributes='device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4 unicast',
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n'
' vrf blue\n'
' no address-family ipv4 unicast\n'
' exit\n'
' exit'})
def test_deactivate_feature(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {'PE1':
'feature rip\n'
'no router rip 1'
})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_enable_disable_device1(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
# Verify weaklist property
self.assertEqual(len(rip.devices), 2)
tb.remove_device(dev1)
del dev1
self.assertEqual(len(rip.devices), 1)
def test_multi_device_configuration(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev1.cli = Mock()
dev1.configure = Mock()
dev2.cli = Mock()
dev2.configure = Mock()
dev1.add_feature(rip)
dev2.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
'dev2':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
rip.address_families |= {AddressFamily.ipv6_unicast}
rip.shutdown = True
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 4
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 122
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap_direct'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap_static'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap_direct_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap_static_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp_ipv6'
output = rip.build_config(apply=False)
expected_output = {'dev1': '''\
router rip 1
shutdown
address-family ipv4 unicast
default-metric 1
distance 120
maximum-paths 2
redistribute lisp route-map rmap3
redistribute direct route-map rmap1
redistribute static route-map rmap2
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap6
redistribute direct route-map rmap4
redistribute static route-map rmap5
exit
exit''',
'dev2': '''\
router rip 1
shutdown
address-family ipv4 unicast
default-metric 3
distance 122
maximum-paths 4
redistribute lisp route-map rmap_lisp
redistribute direct route-map rmap_direct
redistribute static route-map rmap_static
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap_lisp_ipv6
redistribute direct route-map rmap_direct_ipv6
redistribute static route-map rmap_static_ipv6
exit
exit'''}
self.maxDiff = None
self.assertMultiLineDictEqual(output, expected_output)
output = rip.build_config(apply=True)
def test_no_device_configuration(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
# Default configuration, let's make sure it works
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {})
rip.shutdown = False
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 4
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 122
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap_direct'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap_static'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap_direct_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap_static_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp_ipv6'
expected_output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {})
output = rip.build_config(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_modify_configurations_nothing_configured(self):
'''Nothing is configured on this rip'''
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
output = rip.build_config(apply=False)
# Nothing should happen, no device was given
self.assertMultiLineDictEqual(output, {})
def test_modify_configuration_first_level(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
# Can either confgiure via kwargs, or attributes
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
'dev2':
'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
})
self.assertEqual(rip.device_attr['dev1'].shutdown, None)
self.assertEqual(rip.device_attr['dev2'].shutdown, None)
rip.shutdown = False
output = rip.build_config(attributes='device_attr__dev1__shutdown', apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'router rip 5\n'
' no shutdown\n'
' exit',
})
rip.shutdown = False
output = rip.build_config(attributes='device_attr__*__shutdown', apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'router rip 5\n'
' no shutdown\n'
' exit',
'dev2':
'router rip 5\n'
' no shutdown\n'
' exit',
})
# XXXJST
# output = rip.build_config(shutdown=False, apply=False)
# self.assertMultiLineDictEqual(output, {'dev1':'router rip 5\n no shutdown',
# 'dev2':'router rip 5\n no shutdown'})
#
# self.assertEqual(rip.device_attr['dev1'].shutdown, False)
# self.assertEqual(rip.device_attr['dev2'].shutdown, False)
#
# # Rest are all into a vrf
# # Let's try without a af , vrf/af
# output = rip.build_config(maximum_paths=3, apply=False)
# self.assertMultiLineDictEqual(output, {'dev1':'',
# 'dev2':''})
#
# # Let's add an af
# rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(maximum_paths=3, apply=False)
# self.assertMultiLineDictEqual(output, {'dev1':'router rip 5\n address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit',
# 'dev2':'router rip 5\n address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit'})
#
# # Mix both together
# output = rip.build_config(maximum_paths=3, shutdown=True, apply=False)
# self.assertMultiLineDictEqual(output, {'dev1':'router rip 5\n shutdown\n '
# 'address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit',
# 'dev2':'router rip 5\n shutdown\n '
# 'address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit'})
#
# # Do the same for vrf now
# rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev2'].vrf_attr['orange'].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(maximum_paths=3, shutdown=False, apply=False)
# self.maxDiff = None
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' no shutdown\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' exit',
# 'dev2': 'router rip 5\n'
# ' no shutdown\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' vrf orange\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' exit'})
#
# # Now test all the fields
#
# output = rip.build_config(maximum_paths=2, default_metric=1,
# distance=120,
# redistribute_direct_rmap='rmap1',
# redistribute_static_rmap='rmap2',
# redistribute_lisp_rmap='rmap3', apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit',
# 'dev2': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' vrf orange\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit'})
#
# # Now test all the fields with None
#
# output = rip.build_unconfig(maximum_paths=True, default_metric=True,
# distance=True,
# redistribute_direct_rmap=True,
# redistribute_static_rmap=True,
# redistribute_lisp_rmap=True, shutdown=True,
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' shutdown\n'
# ' address-family ipv4 unicast\n'
# ' no default-metric\n'
# ' no distance\n'
# ' no maximum-paths\n'
# ' no redistribute lisp route-map rmap3\n'
# ' no redistribute direct route-map rmap1\n'
# ' no redistribute static route-map rmap2\n'
# ' exit\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' no default-metric\n'
# ' no distance\n'
# ' no maximum-paths\n'
# ' no redistribute lisp route-map rmap3\n'
# ' no redistribute direct route-map rmap1\n'
# ' no redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit',
# 'dev2': 'router rip 5\n'
# ' shutdown\n'
# ' address-family ipv4 unicast\n'
# ' no default-metric\n'
# ' no distance\n'
# ' no maximum-paths\n'
# ' no redistribute lisp route-map rmap3\n'
# ' no redistribute direct route-map rmap1\n'
# ' no redistribute static route-map rmap2\n'
# ' exit\n'
# ' vrf orange\n'
# ' address-family ipv4 unicast\n'
# ' no default-metric\n'
# ' no distance\n'
# ' no maximum-paths\n'
# ' no redistribute lisp route-map rmap3\n'
# ' no redistribute direct route-map rmap1\n'
# ' no redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit'})
# XXXJST
# def test_modify_configuration_many_level(self):
#
# # Add a device to it
# tb = Genie.testbed = Testbed()
# dev1 = Device(testbed=tb, name='dev1', os='nxos')
# dev2 = Device(testbed=tb, name='dev2', os='nxos')
# rip = Rip(instance_id=5)
# rip.add_force_vrf(None)
# dev1.add_feature(rip)
# dev2.add_feature(rip)
#
# output = rip.build_config(device_attr__dev1__shutdown=False,
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1':'feature rip\nrouter rip 5\n no shutdown',
# 'dev2':'feature rip\nrouter rip 5'})
#
# # Does not exists
# with self.assertRaises(AttributeError):
# output = rip.build_config(test__dev1__shutdown=False,
# apply=False)
#
# self.assertEqual(rip.device_attr['dev1'].shutdown, False)
# self.assertFalse(hasattr(rip.device_attr['dev2'],' shutdown'))
#
# # Let's add an af
# rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].create()
# rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit',
# 'dev2': ''})
#
# # Mix both together
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False)
#
# self.assertMultiLineDictEqual(output, {'dev1':'router rip 5\n no shutdown\n '
# 'address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit',
# 'dev2':'router rip 5\n no shutdown'})
#
# # What if both are the same !
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# maximum_paths=5, apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' address-family ipv6 unicast\n'
# ' maximum-paths 5\n'
# ' exit',
# 'dev2': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 5\n'
# ' exit'})
#
# # Do the same for vrf now
# rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev2'].vrf_attr['orange'].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' no shutdown\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' exit',
# 'dev2':'router rip 5\n no shutdown'})
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False, devices=[dev2])
# self.assertMultiLineDictEqual(output,
# {'dev2':'router rip 5\n no shutdown'})
# # Now test all the fields
# # Now test all the fields
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=2,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__default_metric=1,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__distance=120,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_direct_rmap='rmap1',
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_static_rmap='rmap2',
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_lisp_rmap='rmap3',
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit',
# 'dev2': ''})
if __name__ == '__main__':
unittest.main()
| 40.044968 | 115 | 0.594273 |
import unittest
from unittest.mock import Mock
from pyats.datastructures import WeakList
from genie.tests.conf import TestCase
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Link, Interface
from genie.conf.base.attributes import SubAttributesDict
from genie.libs.conf.rip import Rip
from genie.libs.conf.vrf import Vrf
from genie.libs.conf.address_family import AddressFamily
class test_rip(TestCase):
def test_init(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=10)
rip.add_force_vrf(None)
dev.add_feature(rip)
vrf = Vrf(name='myVrf')
dev.add_feature(vrf)
self.assertEqual(rip.instance_id, 10)
self.assertTrue(isinstance(rip.device_attr, SubAttributesDict))
self.assertTrue(isinstance(rip.device_attr['dev1'].vrf_attr[None].address_family_attr,
SubAttributesDict))
# Let's try multilevel
rip.mega = 'work'
rip.device_attr['myDevice'].value = 'success'
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 3
rip.device_attr['myDevice'].vrf_attr['myVrf'].\
address_family_attr['ipv6 unicast'].distance = 120
self.assertEqual(rip.device_attr['myDevice'].vrf_attr['myVrf'].\
address_family_attr['ipv6 unicast'].distance, 120)
self.assertEqual(rip.mega, 'work')
self.assertEqual(rip.device_attr['myDevice'].mega, 'work')
self.assertEqual(rip.device_attr['fake'].mega, 'work')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].mega, 'work')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].mega, 'work')
self.assertEqual(rip.device_attr['myDevice'].value, 'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].value,
'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].value,
'success')
self.assertEqual(
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths,
3)
with self.assertRaises(AttributeError):
rip.value
with self.assertRaises(ValueError):
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv8'].value,'success'
with self.assertRaises(KeyError):
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 flowspec'].value,'success'
self.assertEqual(\
rip.device_attr['myDevice'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths, None)
with self.assertRaises(AttributeError):
rip.device_attr['myDevice'].ff
def test_cfg(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
rip.device_attr['PE1']
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {'PE1':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'
})
vrf1 = Vrf('vrf1')
intf1 = Interface(device=dev, name='Ethernet0/0', vrf=vrf1)
intf1.add_feature(rip)
rip.address_families |= {AddressFamily.ipv6_unicast}
rip.shutdown = False
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['PE1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
maximum_paths = 10
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
default_metric = 7
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
distance = 127
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_direct_rmap = 'rmap14'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_static_rmap = 'rmap15'
rip.device_attr['PE1'].vrf_attr['vrf1'].address_family_attr['ipv6 unicast'].\
redistribute_lisp_rmap = 'rmap16'
output = rip.build_config(apply=False)
expected_output = {'PE1': '''\
router rip 1
no shutdown
address-family ipv4 unicast
default-metric 1
distance 120
maximum-paths 2
redistribute lisp route-map rmap3
redistribute direct route-map rmap1
redistribute static route-map rmap2
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap6
redistribute direct route-map rmap4
redistribute static route-map rmap5
exit
vrf vrf1
address-family ipv4 unicast
exit
address-family ipv6 unicast
default-metric 7
distance 127
maximum-paths 10
redistribute lisp route-map rmap16
redistribute direct route-map rmap14
redistribute static route-map rmap15
exit
exit
exit'''}
self.maxDiff = None
self.assertMultiLineDictEqual(output, expected_output)
dev.cli = Mock()
dev.configure = Mock()
dev.add_feature(rip)
output = rip.build_config(apply=True)
def test_uncfg(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
output = rip.build_unconfig(apply=False)
# There was nothing to unconfigure
self.assertMultiLineDictEqual(output, {})
dev.add_feature(rip)
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {'PE1': 'feature rip\nno router rip 1'})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_disable(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {
'PE1':
'feature rip\n'
'no router rip 1'})
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_disable_no_instance(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
output = rip.build_unconfig(unconfig_feature=True, apply=False)
self.assertMultiLineDictEqual(output, {'PE1': 'no feature rip'})
# Set a mock
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(unconfig_feature=True, apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_remove_af(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
output = rip.build_unconfig(
attributes={
'device_attr': {
'dev1': {
'vrf_attr': {
None: {
'address_family_attr': {
'ipv4 unicast': None}}}}}},
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n no address-family ipv4 unicast\n exit'})
def test_remove_vrf(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
vrf1 = Vrf(name='blue')
intf1 = Interface(device=dev1, name='Ethernet0/0', vrf=vrf1)
intf2 = Interface(device=dev2, name='Ethernet0/0', vrf=vrf1)
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
intf1.add_feature(rip)
intf2.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit\n'
' exit'})
output = rip.build_unconfig(\
attributes='device_attr__dev1__vrf_attr__blue',
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n no vrf blue\n exit'})
def test_remove_vrf_af(self):
# Add a device to it
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
vrf1 = Vrf(name='blue')
intf1 = Interface(device=dev1, name='Ethernet0/0', vrf=vrf1)
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
intf1.add_feature(rip)
# Configure rip
rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].distance = 5
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' vrf blue\n'
' address-family ipv4 unicast\n'
' distance 5\n'
' exit\n'
' exit\n'
' exit',
'dev2': 'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
output = rip.build_unconfig(\
attributes='device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4 unicast',
apply=False)
self.assertMultiLineDictEqual(output,
{'dev1': 'router rip 5\n'
' vrf blue\n'
' no address-family ipv4 unicast\n'
' exit\n'
' exit'})
def test_deactivate_feature(self):
tb = Genie.testbed = Testbed()
dev = Device(testbed=tb, name='PE1', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev.add_feature(rip)
# Default configuration, let's make sure it works
output = rip.build_unconfig(apply=False)
self.assertMultiLineDictEqual(output, {'PE1':
'feature rip\n'
'no router rip 1'
})
dev.cli = Mock()
dev.configure = Mock()
output = rip.build_unconfig(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_enable_disable_device1(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
self.assertEqual(len(rip.devices), 2)
tb.remove_device(dev1)
del dev1
self.assertEqual(len(rip.devices), 1)
def test_multi_device_configuration(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
dev1.cli = Mock()
dev1.configure = Mock()
dev2.cli = Mock()
dev2.configure = Mock()
dev1.add_feature(rip)
dev2.add_feature(rip)
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
'dev2':
'feature rip\n'
'router rip 1\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit'})
rip.address_families |= {AddressFamily.ipv6_unicast}
rip.shutdown = True
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 4
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 122
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap_direct'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap_static'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap_direct_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap_static_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp_ipv6'
output = rip.build_config(apply=False)
expected_output = {'dev1': '''\
router rip 1
shutdown
address-family ipv4 unicast
default-metric 1
distance 120
maximum-paths 2
redistribute lisp route-map rmap3
redistribute direct route-map rmap1
redistribute static route-map rmap2
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap6
redistribute direct route-map rmap4
redistribute static route-map rmap5
exit
exit''',
'dev2': '''\
router rip 1
shutdown
address-family ipv4 unicast
default-metric 3
distance 122
maximum-paths 4
redistribute lisp route-map rmap_lisp
redistribute direct route-map rmap_direct
redistribute static route-map rmap_static
exit
address-family ipv6 unicast
default-metric 3
distance 120
maximum-paths 7
redistribute lisp route-map rmap_lisp_ipv6
redistribute direct route-map rmap_direct_ipv6
redistribute static route-map rmap_static_ipv6
exit
exit'''}
self.maxDiff = None
self.assertMultiLineDictEqual(output, expected_output)
output = rip.build_config(apply=True)
def test_no_device_configuration(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
# Default configuration, let's make sure it works
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {})
rip.shutdown = False
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 2
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 1
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap1'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap2'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap3'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap4'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap5'
rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].maximum_paths = 4
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].distance = 122
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_direct_rmap\
= 'rmap_direct'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_static_rmap\
= 'rmap_static'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].maximum_paths = 7
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].default_metric = 3
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].distance = 120
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_direct_rmap\
= 'rmap_direct_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_static_rmap\
= 'rmap_static_ipv6'
rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv6 unicast'].redistribute_lisp_rmap\
= 'rmap_lisp_ipv6'
expected_output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {})
output = rip.build_config(apply=True)
expected_output = None
self.assertEqual(output, expected_output)
def test_modify_configurations_nothing_configured(self):
rip = Rip(instance_id=1)
rip.add_force_vrf(None)
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {})
def test_modify_configuration_first_level(self):
tb = Genie.testbed = Testbed()
dev1 = Device(testbed=tb, name='dev1', os='nxos')
dev2 = Device(testbed=tb, name='dev2', os='nxos')
rip = Rip(instance_id=5)
rip.add_force_vrf(None)
dev1.add_feature(rip)
dev2.add_feature(rip)
output = rip.build_config(apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
'dev2':
'feature rip\n'
'router rip 5\n'
' address-family ipv4 unicast\n'
' exit\n'
' exit',
})
self.assertEqual(rip.device_attr['dev1'].shutdown, None)
self.assertEqual(rip.device_attr['dev2'].shutdown, None)
rip.shutdown = False
output = rip.build_config(attributes='device_attr__dev1__shutdown', apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'router rip 5\n'
' no shutdown\n'
' exit',
})
rip.shutdown = False
output = rip.build_config(attributes='device_attr__*__shutdown', apply=False)
self.assertMultiLineDictEqual(output, {
'dev1':
'router rip 5\n'
' no shutdown\n'
' exit',
'dev2':
'router rip 5\n'
' no shutdown\n'
' exit',
})
s=3, apply=False)
# self.assertMultiLineDictEqual(output, {'dev1':'',
# 'dev2':''})
#
# # Let's add an af
vice_attr['dev1'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev1'].vrf_attr[None].address_family_attr['ipv6 unicast'].create()
# rip.device_attr['dev2'].vrf_attr[None].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit',
# 'dev2': ''})
#
# # Mix both together
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False)
#
# self.assertMultiLineDictEqual(output, {'dev1':'router rip 5\n no shutdown\n '
# 'address-family ipv4 '
# 'unicast\n maximum-paths 3\n exit',
# 'dev2':'router rip 5\n no shutdown'})
#
# # What if both are the same !
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__None__address_family_attr__ipv4__maximum_paths=3,
# maximum_paths=5, apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' address-family ipv6 unicast\n'
# ' maximum-paths 5\n'
# ' exit',
# 'dev2': 'router rip 5\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 5\n'
# ' exit'})
#
# # Do the same for vrf now
# rip.device_attr['dev1'].vrf_attr['blue'].address_family_attr['ipv4 unicast'].create()
# rip.device_attr['dev2'].vrf_attr['orange'].address_family_attr['ipv4 unicast'].create()
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' no shutdown\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' maximum-paths 3\n'
# ' exit\n'
# ' exit',
# 'dev2':'router rip 5\n no shutdown'})
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=3,
# shutdown=False, apply=False, devices=[dev2])
# self.assertMultiLineDictEqual(output,
# {'dev2':'router rip 5\n no shutdown'})
# # Now test all the fields
# # Now test all the fields
#
# output = rip.build_config(\
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__maximum_paths=2,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__default_metric=1,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__distance=120,
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_direct_rmap='rmap1',
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_static_rmap='rmap2',
# device_attr__dev1__vrf_attr__blue__address_family_attr__ipv4__redistribute_lisp_rmap='rmap3',
# apply=False)
# self.assertMultiLineDictEqual(output,
# {'dev1': 'router rip 5\n'
# ' vrf blue\n'
# ' address-family ipv4 unicast\n'
# ' default-metric 1\n'
# ' distance 120\n'
# ' maximum-paths 2\n'
# ' redistribute lisp route-map rmap3\n'
# ' redistribute direct route-map rmap1\n'
# ' redistribute static route-map rmap2\n'
# ' exit\n'
# ' exit',
# 'dev2': ''})
if __name__ == '__main__':
unittest.main()
| true | true |
1c34696f1cfdec0956bb16ca716e88db0d45eff0 | 45 | py | Python | agescx/utilities/__init__.py | dderevjanik/agescx | 32e1b11c7c4205a63a156b0014ec7143c0d0c13b | [
"MIT"
] | 15 | 2016-02-08T19:35:46.000Z | 2021-11-24T06:52:04.000Z | agescx/utilities/__init__.py | heinezen/agescx | 32e1b11c7c4205a63a156b0014ec7143c0d0c13b | [
"MIT"
] | 1 | 2016-01-03T02:54:46.000Z | 2016-01-03T02:54:46.000Z | agescx/utilities/__init__.py | heinezen/agescx | 32e1b11c7c4205a63a156b0014ec7143c0d0c13b | [
"MIT"
] | 5 | 2016-10-05T03:55:29.000Z | 2021-05-14T10:15:57.000Z | from .decoder import *
from .encoder import * | 22.5 | 22 | 0.755556 | from .decoder import *
from .encoder import * | true | true |
1c3469fafd50acdfdbefde198aacc4a1c9a4969b | 5,482 | py | Python | model/densenet169/model3_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | 3 | 2018-05-06T15:15:21.000Z | 2018-05-13T12:31:42.000Z | model/densenet169/model3_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | model/densenet169/model3_val1.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[16, 16, 16],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.0005, 0.00005, 0.000005],
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
tensorboard = keras_util.TensorBoardCallback(log_dir=model_config.record_dir, log_every=20,
model_config=model_config)
start = time.time()
print("####### start train model")
for i in range(len(model_config.epoch)):
print("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256). \
flow_from_files(model_config.train_files,
mode="fit",
target_size=model_config.image_size,
batch_size=model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=0,
callbacks=[checkpoint, clr, tensorboard])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
print("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=0,
callbacks=[checkpoint, clr, tensorboard])
print("####### train model spend %d seconds" % (time.time() - start))
print("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| 48.513274 | 119 | 0.541955 | import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[16, 16, 16],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.0005, 0.00005, 0.000005],
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
tensorboard = keras_util.TensorBoardCallback(log_dir=model_config.record_dir, log_every=20,
model_config=model_config)
start = time.time()
print("####### start train model")
for i in range(len(model_config.epoch)):
print("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256). \
flow_from_files(model_config.train_files,
mode="fit",
target_size=model_config.image_size,
batch_size=model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=0,
callbacks=[checkpoint, clr, tensorboard])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
print("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=0,
callbacks=[checkpoint, clr, tensorboard])
print("####### train model spend %d seconds" % (time.time() - start))
print("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| true | true |
1c346a04d5deace26f5d13429cd06afeab172022 | 1,196 | py | Python | tests/formatters/winlnk.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 27 | 2019-04-05T12:01:49.000Z | 2022-02-08T02:26:25.000Z | tests/formatters/winlnk.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | tests/formatters/winlnk.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 8 | 2019-11-28T08:06:34.000Z | 2020-08-29T13:53:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows Shortcut (LNK) event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import winlnk
from tests.formatters import test_lib
class WinLnkLinkFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Windows Shortcut (LNK) event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = winlnk.WinLnkLinkFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = winlnk.WinLnkLinkFormatter()
expected_attribute_names = [
'description', 'file_size', 'file_attribute_flags', 'drive_type',
'drive_serial_number', 'volume_label', 'local_path',
'network_path', 'command_line_arguments', 'env_var_location',
'relative_path', 'working_directory', 'icon_location',
'link_target']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| 29.170732 | 73 | 0.736622 |
from __future__ import unicode_literals
import unittest
from plaso.formatters import winlnk
from tests.formatters import test_lib
class WinLnkLinkFormatterTest(test_lib.EventFormatterTestCase):
def testInitialization(self):
event_formatter = winlnk.WinLnkLinkFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
event_formatter = winlnk.WinLnkLinkFormatter()
expected_attribute_names = [
'description', 'file_size', 'file_attribute_flags', 'drive_type',
'drive_serial_number', 'volume_label', 'local_path',
'network_path', 'command_line_arguments', 'env_var_location',
'relative_path', 'working_directory', 'icon_location',
'link_target']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| true | true |
1c346c37b76708e41519df306a01018e1fdc6a4c | 59,987 | py | Python | pygments/lexers/_mapping.py | eric-wieser/pygments | 97dce6024f82402916c8212172180227630b9fdb | [
"BSD-2-Clause"
] | null | null | null | pygments/lexers/_mapping.py | eric-wieser/pygments | 97dce6024f82402916c8212172180227630b9fdb | [
"BSD-2-Clause"
] | null | null | null | pygments/lexers/_mapping.py | eric-wieser/pygments | 97dce6024f82402916c8212172180227630b9fdb | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FStarLexer': ('pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm', '*.mjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', ('json-object',), (), ('application/json-object',)),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json', 'Pipfile.lock'), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle', 'juttle'), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md',), ('*.md',), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('ms', 'miniscript'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reason', 'reasonml'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
| 109.265938 | 351 | 0.577975 |
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm', '*.mjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', ('json-object',), (), ('application/json-object',)),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json', 'Pipfile.lock'), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle', 'juttle'), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md',), ('*.md',), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('ms', 'miniscript'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reason', 'reasonml'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
| true | true |
1c346ccbc2ec03e804b8690df4aaf5a79f452a53 | 4,003 | py | Python | torchtext/datasets/multi30k.py | abhinavarora/text | 69f67f3a775f3d3c6f85cfaa4ac3819500b90696 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T17:30:57.000Z | 2022-01-03T17:30:57.000Z | torchtext/datasets/multi30k.py | abhinavarora/text | 69f67f3a775f3d3c6f85cfaa4ac3819500b90696 | [
"BSD-3-Clause"
] | null | null | null | torchtext/datasets/multi30k.py | abhinavarora/text | 69f67f3a775f3d3c6f85cfaa4ac3819500b90696 | [
"BSD-3-Clause"
] | null | null | null | import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = {
"train": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz",
"valid": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz",
"test": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz",
}
MD5 = {
"train": "20140d013d05dd9a72dfde46478663ba05737ce983f478f960c1123c6671be5e",
"valid": "a7aa20e9ebd5ba5adce7909498b94410996040857154dab029851af3a866da8c",
"test": "0681be16a532912288a91ddd573594fbdd57c0fbb81486eff7c55247e35326c2",
}
_PREFIX = {
"train": "train",
"valid": "val",
"test": "test",
}
NUM_LINES = {
"train": 29000,
"valid": 1014,
"test": 1000,
}
DATASET_NAME = "Multi30k"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "valid", "test"))
def Multi30k(root: str, split: Union[Tuple[str], str], language_pair: Tuple[str] = ("de", "en")):
"""Multi30k dataset
For additional details refer to https://www.statmt.org/wmt16/multimodal-task.html#task1
Number of lines per split:
- train: 29000
- valid: 1014
- test: 1000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: ('train', 'valid', 'test')
language_pair: tuple or list containing src and tgt language. Available options are ('de','en') and ('en', 'de')
:return: DataPipe that yields tuple of source and target sentences
:rtype: (str, str)
"""
assert len(language_pair) == 2, "language_pair must contain only 2 elements: src and tgt language respectively"
assert tuple(sorted(language_pair)) == (
"de",
"en",
), "language_pair must be either ('de','en') or ('en', 'de')"
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(URL[split])),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="sha256",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
src_cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, f"{_PREFIX[split]}.{language_pair[0]}")
)
src_cache_decompressed_dp = (
FileOpener(src_cache_decompressed_dp, mode="b")
.read_from_tar()
.filter(lambda x: f"{_PREFIX[split]}.{language_pair[0]}" in x[0])
)
src_cache_decompressed_dp = src_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
tgt_cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, f"{_PREFIX[split]}.{language_pair[1]}")
)
tgt_cache_decompressed_dp = (
FileOpener(tgt_cache_decompressed_dp, mode="b")
.read_from_tar()
.filter(lambda x: f"{_PREFIX[split]}.{language_pair[1]}" in x[0])
)
tgt_cache_decompressed_dp = tgt_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
src_data_dp = FileOpener(src_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
tgt_data_dp = FileOpener(tgt_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
return src_data_dp.zip(tgt_data_dp)
| 36.390909 | 120 | 0.697977 | import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = {
"train": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz",
"valid": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz",
"test": r"http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz",
}
MD5 = {
"train": "20140d013d05dd9a72dfde46478663ba05737ce983f478f960c1123c6671be5e",
"valid": "a7aa20e9ebd5ba5adce7909498b94410996040857154dab029851af3a866da8c",
"test": "0681be16a532912288a91ddd573594fbdd57c0fbb81486eff7c55247e35326c2",
}
_PREFIX = {
"train": "train",
"valid": "val",
"test": "test",
}
NUM_LINES = {
"train": 29000,
"valid": 1014,
"test": 1000,
}
DATASET_NAME = "Multi30k"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "valid", "test"))
def Multi30k(root: str, split: Union[Tuple[str], str], language_pair: Tuple[str] = ("de", "en")):
assert len(language_pair) == 2, "language_pair must contain only 2 elements: src and tgt language respectively"
assert tuple(sorted(language_pair)) == (
"de",
"en",
), "language_pair must be either ('de','en') or ('en', 'de')"
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL[split]])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, os.path.basename(URL[split])),
hash_dict={os.path.join(root, os.path.basename(URL[split])): MD5[split]},
hash_type="sha256",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
src_cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, f"{_PREFIX[split]}.{language_pair[0]}")
)
src_cache_decompressed_dp = (
FileOpener(src_cache_decompressed_dp, mode="b")
.read_from_tar()
.filter(lambda x: f"{_PREFIX[split]}.{language_pair[0]}" in x[0])
)
src_cache_decompressed_dp = src_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
tgt_cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, f"{_PREFIX[split]}.{language_pair[1]}")
)
tgt_cache_decompressed_dp = (
FileOpener(tgt_cache_decompressed_dp, mode="b")
.read_from_tar()
.filter(lambda x: f"{_PREFIX[split]}.{language_pair[1]}" in x[0])
)
tgt_cache_decompressed_dp = tgt_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
src_data_dp = FileOpener(src_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
tgt_data_dp = FileOpener(tgt_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
return src_data_dp.zip(tgt_data_dp)
| true | true |
1c346ccc639aafba00553b5a8aedab756185ab64 | 523 | py | Python | neuroscout/schemas/run.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | null | null | null | neuroscout/schemas/run.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | null | null | null | neuroscout/schemas/run.py | jdkent/neuroscout | 67aaafdf883988e2048197dc9ce4559a28e3b7b6 | [
"BSD-3-Clause"
] | null | null | null | from marshmallow import fields, Schema
class RunSchema(Schema):
id = fields.Int()
session = fields.Str(description='Session number')
acquisition = fields.Str(description='Acquisition')
subject = fields.Str(description='Subject id')
number = fields.Int(description='Run id')
duration = fields.Number(description='Total run duration in seconds.')
dataset_id = fields.Int(description='Dataset run belongs to.')
task = fields.Pluck(
'TaskSchema', 'id', description="Task id and name")
| 37.357143 | 74 | 0.705545 | from marshmallow import fields, Schema
class RunSchema(Schema):
id = fields.Int()
session = fields.Str(description='Session number')
acquisition = fields.Str(description='Acquisition')
subject = fields.Str(description='Subject id')
number = fields.Int(description='Run id')
duration = fields.Number(description='Total run duration in seconds.')
dataset_id = fields.Int(description='Dataset run belongs to.')
task = fields.Pluck(
'TaskSchema', 'id', description="Task id and name")
| true | true |
1c346d4bffe12f88b08655315c1a9c1a84f8d177 | 3,207 | py | Python | Python Code/Wh_manage-master/Wh_manage-master/wh_manage/wh_manage/settings.py | AkashKV-1998/Warehouse-Management-System | 33d96c52064262156ddcd459a36e2f63d4df2c30 | [
"Apache-2.0"
] | 3 | 2021-09-05T16:09:58.000Z | 2022-03-25T14:32:34.000Z | Python Code/Wh_manage-master/Wh_manage-master/wh_manage/wh_manage/settings.py | AkashKV-1998/Warehouse-Management-System | 33d96c52064262156ddcd459a36e2f63d4df2c30 | [
"Apache-2.0"
] | null | null | null | Python Code/Wh_manage-master/Wh_manage-master/wh_manage/wh_manage/settings.py | AkashKV-1998/Warehouse-Management-System | 33d96c52064262156ddcd459a36e2f63d4df2c30 | [
"Apache-2.0"
] | null | null | null | """
Django settings for Warehouse_management project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h1x!iw)3+3pm9#(u(1i&gnzz$5pf(cqtdxh4)=oc(i6mpvel1x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'productDetails',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wh_manage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wh_manage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'Warehouse',
'USER': 'postgres',
'PASSWORD': '31071998',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.251969 | 91 | 0.691612 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'h1x!iw)3+3pm9#(u(1i&gnzz$5pf(cqtdxh4)=oc(i6mpvel1x'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'productDetails',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wh_manage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wh_manage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'Warehouse',
'USER': 'postgres',
'PASSWORD': '31071998',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c346d5c9d8f8b214e1fb56b7cc527962c4a55ce | 428 | py | Python | losses/loss_utils.py | kevinleestone/mmstereo | 6757847000ed19cce607ce7537f2f38eed305cdd | [
"MIT"
] | null | null | null | losses/loss_utils.py | kevinleestone/mmstereo | 6757847000ed19cce607ce7537f2f38eed305cdd | [
"MIT"
] | null | null | null | losses/loss_utils.py | kevinleestone/mmstereo | 6757847000ed19cce607ce7537f2f38eed305cdd | [
"MIT"
] | null | null | null | # Copyright 2021 Toyota Research Institute. All rights reserved.
import torch
import torch.nn.functional as F
def null_loss():
return None, False
def dummy_loss(tensor):
tensor[torch.isnan(tensor)] = 0.0
return F.mse_loss(tensor, torch.zeros_like(tensor)) * 0.0, False
def valid_loss(tensor):
if not torch.any(torch.isnan(tensor)):
return tensor, True
else:
return dummy_loss(tensor)
| 20.380952 | 68 | 0.698598 |
import torch
import torch.nn.functional as F
def null_loss():
return None, False
def dummy_loss(tensor):
tensor[torch.isnan(tensor)] = 0.0
return F.mse_loss(tensor, torch.zeros_like(tensor)) * 0.0, False
def valid_loss(tensor):
if not torch.any(torch.isnan(tensor)):
return tensor, True
else:
return dummy_loss(tensor)
| true | true |
1c346daf5b5dda3bfe5c92e80639f17d67137efc | 1,275 | py | Python | setup.py | bradleycwojcik/euchre-cli | e4ffcdb16720d8dafe6b5b00b50eb923c1fcfe27 | [
"MIT"
] | 3 | 2020-10-07T08:23:12.000Z | 2021-11-20T16:33:40.000Z | setup.py | bradleycwojcik/euchre-cli | e4ffcdb16720d8dafe6b5b00b50eb923c1fcfe27 | [
"MIT"
] | 28 | 2020-07-14T01:29:33.000Z | 2021-11-20T04:48:09.000Z | setup.py | boldandbrad/euchre-cli | 6e03f76c5feb50d677ab2558707182fa7dd5d127 | [
"MIT"
] | 4 | 2020-09-07T04:25:04.000Z | 2021-11-11T07:20:01.000Z | from setuptools import setup, find_packages
# parse version number from euchre/__init__.py:
with open("euchre/__init__.py") as f:
info = {}
for line in f.readlines():
if line.startswith("version"):
exec(line, info)
break
setup_info = dict(
name="euchre-cli",
version=info["version"],
author="Bradley Wojcik",
author_email="bradleycwojcik@gmail.com",
license="MIT",
description="Play euchre in your terminal.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://boldandbrad.github.io/euchre-cli/",
project_urls={
"Source": "https://github.com/boldandbrad/euchre-cli/",
"Bug Tracker": "https://github.com/boldandbrad/euchre-cli/issues",
},
packages=find_packages(),
include_package_data=True,
install_requires=["click>=8", "names==0.3.0", "loguru>=0.5.0"],
extras_require={"dev": ["black", "pytest", "pytest-cov", "pytest-mock", "codecov"]},
python_requires=">=3.8",
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
entry_points="""
[console_scripts]
euchre=euchre.euchre:cli
""",
)
setup(**setup_info)
| 31.097561 | 88 | 0.630588 | from setuptools import setup, find_packages
with open("euchre/__init__.py") as f:
info = {}
for line in f.readlines():
if line.startswith("version"):
exec(line, info)
break
setup_info = dict(
name="euchre-cli",
version=info["version"],
author="Bradley Wojcik",
author_email="bradleycwojcik@gmail.com",
license="MIT",
description="Play euchre in your terminal.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://boldandbrad.github.io/euchre-cli/",
project_urls={
"Source": "https://github.com/boldandbrad/euchre-cli/",
"Bug Tracker": "https://github.com/boldandbrad/euchre-cli/issues",
},
packages=find_packages(),
include_package_data=True,
install_requires=["click>=8", "names==0.3.0", "loguru>=0.5.0"],
extras_require={"dev": ["black", "pytest", "pytest-cov", "pytest-mock", "codecov"]},
python_requires=">=3.8",
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
entry_points="""
[console_scripts]
euchre=euchre.euchre:cli
""",
)
setup(**setup_info)
| true | true |
1c346df455707f69e3f5aae30b421c4f65357cdb | 6,382 | py | Python | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/4-extending_bound_16.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/4-extending_bound_16.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/4-extending_bound_16.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
return frozenset(res)
| 37.763314 | 89 | 0.629583 | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
return frozenset(res)
| true | true |
1c346f4f180311b92db628086017ec37d9afda89 | 181,096 | py | Python | wagtail/core/models/__init__.py | swilltec/wagtail | 7e41ee8706caa65d94b0c59676a7f614bb9ae4d1 | [
"BSD-3-Clause"
] | null | null | null | wagtail/core/models/__init__.py | swilltec/wagtail | 7e41ee8706caa65d94b0c59676a7f614bb9ae4d1 | [
"BSD-3-Clause"
] | null | null | null | wagtail/core/models/__init__.py | swilltec/wagtail | 7e41ee8706caa65d94b0c59676a7f614bb9ae4d1 | [
"BSD-3-Clause"
] | null | null | null | """
wagtail.core.models is split into submodules for maintainability. All definitions intended as
public should be imported here (with 'noqa' comments as required) and outside code should continue
to import them from wagtail.core.models (e.g. `from wagtail.core.models import Site`, not
`from wagtail.core.models.sites import Site`.)
Submodules should take care to keep the direction of dependencies consistent; where possible they
should implement low-level generic functionality which is then imported by higher-level models such
as Page.
"""
import functools
import json
import logging
import uuid
from io import StringIO
from urllib.parse import urlparse
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.db import migrations, models, transaction
from django.db.models import DEFERRED, Q, Value
from django.db.models.expressions import OuterRef, Subquery
from django.db.models.functions import Concat, Substr
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.http import Http404
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import timezone, translation
from django.utils.cache import patch_cache_control
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.text import capfirst, slugify
from django.utils.translation import gettext_lazy as _
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel, get_all_child_relations
from treebeard.mp_tree import MP_Node
from wagtail.core.fields import StreamField
from wagtail.core.forms import TaskStateCommentForm
from wagtail.core.log_actions import page_log_action_registry
from wagtail.core.query import PageQuerySet
from wagtail.core.signals import (
page_published, page_unpublished, post_page_move, pre_page_move, task_approved, task_cancelled,
task_rejected, task_submitted, workflow_approved, workflow_cancelled, workflow_rejected,
workflow_submitted)
from wagtail.core.treebeard import TreebeardPathFixMixin
from wagtail.core.url_routing import RouteResult
from wagtail.core.utils import (
WAGTAIL_APPEND_SLASH, camelcase_to_underscore, find_available_slug, get_content_languages,
get_supported_content_language_variant, resolve_model_string)
from wagtail.search import index
from .audit_log import BaseLogEntry, BaseLogEntryManager, LogEntryQuerySet # noqa
from .collections import ( # noqa
BaseCollectionManager, Collection, CollectionManager, CollectionMember,
CollectionViewRestriction, GroupCollectionPermission, GroupCollectionPermissionManager,
get_root_collection_id)
from .sites import Site, SiteManager, SiteRootPath # noqa
from .view_restrictions import BaseViewRestriction
logger = logging.getLogger('wagtail.core')
PAGE_TEMPLATE_VAR = 'page'
def _extract_field_data(source, exclude_fields=None):
"""
Get dictionaries representing the model's field data.
This excludes many to many fields (which are handled by _copy_m2m_relations)'
"""
exclude_fields = exclude_fields or []
data_dict = {}
for field in source._meta.get_fields():
# Ignore explicitly excluded fields
if field.name in exclude_fields:
continue
# Ignore reverse relations
if field.auto_created:
continue
# Copy parental m2m relations
if field.many_to_many:
if isinstance(field, ParentalManyToManyField):
parental_field = getattr(source, field.name)
if hasattr(parental_field, 'all'):
values = parental_field.all()
if values:
data_dict[field.name] = values
continue
# Ignore parent links (page_ptr)
if isinstance(field, models.OneToOneField) and field.remote_field.parent_link:
continue
if isinstance(field, models.ForeignKey):
# Use attname to copy the ID instead of retrieving the instance
# Note: We first need to set the field to None to unset any object
# that's there already just setting _id on its own won't change the
# field until its saved.
data_dict[field.name] = None
data_dict[field.attname] = getattr(source, field.attname)
else:
data_dict[field.name] = getattr(source, field.name)
return data_dict
def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
"""
Copies non-ParentalManyToMany m2m relations
"""
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
# Copy m2m relations. Ignore explicitly excluded fields, reverse relations, and Parental m2m fields.
if field.many_to_many and field.name not in exclude_fields and not field.auto_created and not isinstance(field, ParentalManyToManyField):
try:
# Do not copy m2m links with a through model that has a ParentalKey to the model being copied - these will be copied as child objects
through_model_parental_links = [field for field in field.through._meta.get_fields() if isinstance(field, ParentalKey) and issubclass(source.__class__, field.related_model)]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value)
def _copy(source, exclude_fields=None, update_attrs=None):
data_dict = _extract_field_data(source, exclude_fields=exclude_fields)
target = source.__class__(**data_dict)
if update_attrs:
for field, value in update_attrs.items():
if field not in data_dict:
continue
setattr(target, field, value)
if isinstance(source, ClusterableModel):
child_object_map = source.copy_all_child_relations(target, exclude=exclude_fields)
else:
child_object_map = {}
return target, child_object_map
def pk(obj):
if isinstance(obj, models.Model):
return obj.pk
else:
return obj
class LocaleManager(models.Manager):
def get_for_language(self, language_code):
"""
Gets a Locale from a language code.
"""
return self.get(language_code=get_supported_content_language_variant(language_code))
class Locale(models.Model):
#: The language code that represents this locale
#:
#: The language code can either be a language code on its own (such as ``en``, ``fr``),
#: or it can include a region code (such as ``en-gb``, ``fr-fr``).
language_code = models.CharField(max_length=100, unique=True)
# Objects excludes any Locales that have been removed from LANGUAGES, This effectively disables them
# The Locale management UI needs to be able to see these so we provide a separate manager `all_objects`
objects = LocaleManager()
all_objects = models.Manager()
class Meta:
ordering = [
"language_code",
]
@classmethod
def get_default(cls):
"""
Returns the default Locale based on the site's LANGUAGE_CODE setting
"""
return cls.objects.get_for_language(settings.LANGUAGE_CODE)
@classmethod
def get_active(cls):
"""
Returns the Locale that corresponds to the currently activated language in Django.
"""
try:
return cls.objects.get_for_language(translation.get_language())
except (cls.DoesNotExist, LookupError):
return cls.get_default()
@transaction.atomic
def delete(self, *args, **kwargs):
# if we're deleting the locale used on the root page node, reassign that to a new locale first
root_page_with_this_locale = Page.objects.filter(depth=1, locale=self)
if root_page_with_this_locale.exists():
# Select the default locale, if one exists and isn't the one being deleted
try:
new_locale = Locale.get_default()
default_locale_is_ok = (new_locale != self)
except (Locale.DoesNotExist, LookupError):
default_locale_is_ok = False
if not default_locale_is_ok:
# fall back on any remaining locale
new_locale = Locale.all_objects.exclude(pk=self.pk).first()
root_page_with_this_locale.update(locale=new_locale)
return super().delete(*args, **kwargs)
def language_code_is_valid(self):
return self.language_code in get_content_languages()
def get_display_name(self):
return get_content_languages().get(self.language_code)
def __str__(self):
return force_str(self.get_display_name() or self.language_code)
class TranslatableMixin(models.Model):
translation_key = models.UUIDField(default=uuid.uuid4, editable=False)
locale = models.ForeignKey(Locale, on_delete=models.PROTECT, related_name="+", editable=False)
class Meta:
abstract = True
unique_together = [("translation_key", "locale")]
@classmethod
def check(cls, **kwargs):
errors = super(TranslatableMixin, cls).check(**kwargs)
is_translation_model = cls.get_translation_model() is cls
# Raise error if subclass has removed the unique_together constraint
# No need to check this on multi-table-inheritance children though as it only needs to be applied to
# the table that has the translation_key/locale fields
if is_translation_model and ("translation_key", "locale") not in cls._meta.unique_together:
errors.append(
checks.Error(
"{0}.{1} is missing a unique_together constraint for the translation key and locale fields"
.format(cls._meta.app_label, cls.__name__),
hint="Add ('translation_key', 'locale') to {}.Meta.unique_together".format(cls.__name__),
obj=cls,
id='wagtailcore.E003',
)
)
return errors
@property
def localized(self):
"""
Finds the translation in the current active language.
If there is no translation in the active language, self is returned.
"""
try:
locale = Locale.get_active()
except (LookupError, Locale.DoesNotExist):
return self
if locale.id == self.locale_id:
return self
return self.get_translation_or_none(locale) or self
def get_translations(self, inclusive=False):
"""
Returns a queryset containing the translations of this instance.
"""
translations = self.__class__.objects.filter(
translation_key=self.translation_key
)
if inclusive is False:
translations = translations.exclude(id=self.id)
return translations
def get_translation(self, locale):
"""
Finds the translation in the specified locale.
If there is no translation in that locale, this raises a ``model.DoesNotExist`` exception.
"""
return self.get_translations(inclusive=True).get(locale_id=pk(locale))
def get_translation_or_none(self, locale):
"""
Finds the translation in the specified locale.
If there is no translation in that locale, this returns None.
"""
try:
return self.get_translation(locale)
except self.__class__.DoesNotExist:
return None
def has_translation(self, locale):
"""
Returns True if a translation exists in the specified locale.
"""
return self.get_translations(inclusive=True).filter(locale_id=pk(locale)).exists()
def copy_for_translation(self, locale):
"""
Creates a copy of this instance with the specified locale.
Note that the copy is initially unsaved.
"""
translated, child_object_map = _copy(self)
translated.locale = locale
# Update locale on any translatable child objects as well
# Note: If this is not a subclass of ClusterableModel, child_object_map will always be '{}'
for (child_relation, old_pk), child_object in child_object_map.items():
if isinstance(child_object, TranslatableMixin):
child_object.locale = locale
return translated
def get_default_locale(self):
"""
Finds the default locale to use for this object.
This will be called just before the initial save.
"""
# Check if the object has any parental keys to another translatable model
# If so, take the locale from the object referenced in that parental key
parental_keys = [
field
for field in self._meta.get_fields()
if isinstance(field, ParentalKey)
and issubclass(field.related_model, TranslatableMixin)
]
if parental_keys:
parent_id = parental_keys[0].value_from_object(self)
return (
parental_keys[0]
.related_model.objects.defer().select_related("locale")
.get(id=parent_id)
.locale
)
return Locale.get_default()
@classmethod
def get_translation_model(cls):
"""
Returns this model's "Translation model".
The "Translation model" is the model that has the ``locale`` and
``translation_key`` fields.
Typically this would be the current model, but it may be a
super-class if multi-table inheritance is in use (as is the case
for ``wagtailcore.Page``).
"""
return cls._meta.get_field("locale").model
def bootstrap_translatable_model(model, locale):
"""
This function populates the "translation_key", and "locale" fields on model instances that were created
before wagtail-localize was added to the site.
This can be called from a data migration, or instead you could use the "boostrap_translatable_models"
management command.
"""
for instance in (
model.objects.filter(translation_key__isnull=True).defer().iterator()
):
instance.translation_key = uuid.uuid4()
instance.locale = locale
instance.save(update_fields=["translation_key", "locale"])
class BootstrapTranslatableModel(migrations.RunPython):
def __init__(self, model_string, language_code=None):
if language_code is None:
language_code = get_supported_content_language_variant(settings.LANGUAGE_CODE)
def forwards(apps, schema_editor):
model = apps.get_model(model_string)
Locale = apps.get_model("wagtailcore.Locale")
locale = Locale.objects.get(language_code=language_code)
bootstrap_translatable_model(model, locale)
def backwards(apps, schema_editor):
pass
super().__init__(forwards, backwards)
class ParentNotTranslatedError(Exception):
"""
Raised when a call to Page.copy_for_translation is made but the
parent page is not translated and copy_parents is False.
"""
pass
class BootstrapTranslatableMixin(TranslatableMixin):
"""
A version of TranslatableMixin without uniqueness constraints.
This is to make it easy to transition existing models to being translatable.
The process is as follows:
- Add BootstrapTranslatableMixin to the model
- Run makemigrations
- Create a data migration for each app, then use the BootstrapTranslatableModel operation in
wagtail.core.models on each model in that app
- Change BootstrapTranslatableMixin to TranslatableMixin
- Run makemigrations again
- Migrate!
"""
translation_key = models.UUIDField(null=True, editable=False)
locale = models.ForeignKey(
Locale, on_delete=models.PROTECT, null=True, related_name="+", editable=False
)
@classmethod
def check(cls, **kwargs):
# skip the check in TranslatableMixin that enforces the unique-together constraint
return super(TranslatableMixin, cls).check(**kwargs)
class Meta:
abstract = True
def get_translatable_models(include_subclasses=False):
"""
Returns a list of all concrete models that inherit from TranslatableMixin.
By default, this only includes models that are direct children of TranslatableMixin,
to get all models, set the include_subclasses attribute to True.
"""
translatable_models = [
model
for model in apps.get_models()
if issubclass(model, TranslatableMixin) and not model._meta.abstract
]
if include_subclasses is False:
# Exclude models that inherit from another translatable model
root_translatable_models = set()
for model in translatable_models:
root_translatable_models.add(model.get_translation_model())
translatable_models = [
model for model in translatable_models if model in root_translatable_models
]
return translatable_models
@receiver(pre_save)
def set_locale_on_new_instance(sender, instance, **kwargs):
if not isinstance(instance, TranslatableMixin):
return
if instance.locale_id is not None:
return
# If this is a fixture load, use the global default Locale
# as the page tree is probably in an flux
if kwargs["raw"]:
instance.locale = Locale.get_default()
return
instance.locale = instance.get_default_locale()
PAGE_MODEL_CLASSES = []
def get_page_models():
"""
Returns a list of all non-abstract Page model classes defined in this project.
"""
return PAGE_MODEL_CLASSES
def get_default_page_content_type():
"""
Returns the content type to use as a default for pages whose content type
has been deleted.
"""
return ContentType.objects.get_for_model(Page)
@functools.lru_cache(maxsize=None)
def get_streamfield_names(model_class):
return tuple(
field.name for field in model_class._meta.concrete_fields
if isinstance(field, StreamField)
)
class BasePageManager(models.Manager):
def get_queryset(self):
return self._queryset_class(self.model).order_by('path')
PageManager = BasePageManager.from_queryset(PageQuerySet)
class PageBase(models.base.ModelBase):
"""Metaclass for Page"""
def __init__(cls, name, bases, dct):
super(PageBase, cls).__init__(name, bases, dct)
if 'template' not in dct:
# Define a default template path derived from the app name and model name
cls.template = "%s/%s.html" % (cls._meta.app_label, camelcase_to_underscore(name))
if 'ajax_template' not in dct:
cls.ajax_template = None
cls._clean_subpage_models = None # to be filled in on first call to cls.clean_subpage_models
cls._clean_parent_page_models = None # to be filled in on first call to cls.clean_parent_page_models
# All pages should be creatable unless explicitly set otherwise.
# This attribute is not inheritable.
if 'is_creatable' not in dct:
cls.is_creatable = not cls._meta.abstract
if not cls._meta.abstract:
# register this type in the list of page content types
PAGE_MODEL_CLASSES.append(cls)
class AbstractPage(TranslatableMixin, TreebeardPathFixMixin, MP_Node):
"""
Abstract superclass for Page. According to Django's inheritance rules, managers set on
abstract models are inherited by subclasses, but managers set on concrete models that are extended
via multi-table inheritance are not. We therefore need to attach PageManager to an abstract
superclass to ensure that it is retained by subclasses of Page.
"""
objects = PageManager()
class Meta:
abstract = True
class Page(AbstractPage, index.Indexed, ClusterableModel, metaclass=PageBase):
title = models.CharField(
verbose_name=_('title'),
max_length=255,
help_text=_("The page title as you'd like it to be seen by the public")
)
# to reflect title of a current draft in the admin UI
draft_title = models.CharField(
max_length=255,
editable=False
)
slug = models.SlugField(
verbose_name=_('slug'),
allow_unicode=True,
max_length=255,
help_text=_("The name of the page as it will appear in URLs e.g http://domain.com/blog/[my-slug]/")
)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='pages',
on_delete=models.SET(get_default_page_content_type)
)
live = models.BooleanField(verbose_name=_('live'), default=True, editable=False)
has_unpublished_changes = models.BooleanField(
verbose_name=_('has unpublished changes'),
default=False,
editable=False
)
url_path = models.TextField(verbose_name=_('URL path'), blank=True, editable=False)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('owner'),
null=True,
blank=True,
editable=True,
on_delete=models.SET_NULL,
related_name='owned_pages'
)
seo_title = models.CharField(
verbose_name=_("title tag"),
max_length=255,
blank=True,
help_text=_("The name of the page displayed on search engine results as the clickable headline.")
)
show_in_menus_default = False
show_in_menus = models.BooleanField(
verbose_name=_('show in menus'),
default=False,
help_text=_("Whether a link to this page will appear in automatically generated menus")
)
search_description = models.TextField(
verbose_name=_('meta description'),
blank=True,
help_text=_("The descriptive text displayed underneath a headline in search engine results.")
)
go_live_at = models.DateTimeField(
verbose_name=_("go live date/time"),
blank=True,
null=True
)
expire_at = models.DateTimeField(
verbose_name=_("expiry date/time"),
blank=True,
null=True
)
expired = models.BooleanField(verbose_name=_('expired'), default=False, editable=False)
locked = models.BooleanField(verbose_name=_('locked'), default=False, editable=False)
locked_at = models.DateTimeField(verbose_name=_('locked at'), null=True, editable=False)
locked_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('locked by'),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL,
related_name='locked_pages'
)
first_published_at = models.DateTimeField(
verbose_name=_('first published at'),
blank=True,
null=True,
db_index=True
)
last_published_at = models.DateTimeField(
verbose_name=_('last published at'),
null=True,
editable=False
)
latest_revision_created_at = models.DateTimeField(
verbose_name=_('latest revision created at'),
null=True,
editable=False
)
live_revision = models.ForeignKey(
'PageRevision',
related_name='+',
verbose_name=_('live revision'),
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False
)
# If non-null, this page is an alias of the linked page
# This means the page is kept in sync with the live version
# of the linked pages and is not editable by users.
alias_of = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False,
related_name='aliases',
)
search_fields = [
index.SearchField('title', partial_match=True, boost=2),
index.AutocompleteField('title'),
index.FilterField('title'),
index.FilterField('id'),
index.FilterField('live'),
index.FilterField('owner'),
index.FilterField('content_type'),
index.FilterField('path'),
index.FilterField('depth'),
index.FilterField('locked'),
index.FilterField('show_in_menus'),
index.FilterField('first_published_at'),
index.FilterField('last_published_at'),
index.FilterField('latest_revision_created_at'),
index.FilterField('locale'),
index.FilterField('translation_key'),
]
# Do not allow plain Page instances to be created through the Wagtail admin
is_creatable = False
# Define the maximum number of instances this page type can have. Default to unlimited.
max_count = None
# Define the maximum number of instances this page can have under a specific parent. Default to unlimited.
max_count_per_parent = None
# An array of additional field names that will not be included when a Page is copied.
exclude_fields_in_copy = []
default_exclude_fields_in_copy = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries', 'comments']
# Define these attributes early to avoid masking errors. (Issue #3078)
# The canonical definition is in wagtailadmin.edit_handlers.
content_panels = []
promote_panels = []
settings_panels = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
# this model is being newly created
# rather than retrieved from the db;
if not self.content_type_id:
# set content type to correctly represent the model class
# that this was created as
self.content_type = ContentType.objects.get_for_model(self)
if 'show_in_menus' not in kwargs:
# if the value is not set on submit refer to the model setting
self.show_in_menus = self.show_in_menus_default
def __str__(self):
return self.title
@classmethod
def get_streamfield_names(cls):
return get_streamfield_names(cls)
def set_url_path(self, parent):
"""
Populate the url_path field based on this page's slug and the specified parent page.
(We pass a parent in here, rather than retrieving it via get_parent, so that we can give
new unsaved pages a meaningful URL when previewing them; at that point the page has not
been assigned a position in the tree, as far as treebeard is concerned.
"""
if parent:
self.url_path = parent.url_path + self.slug + '/'
else:
# a page without a parent is the tree root, which always has a url_path of '/'
self.url_path = '/'
return self.url_path
@staticmethod
def _slug_is_available(slug, parent_page, page=None):
"""
Determine whether the given slug is available for use on a child page of
parent_page. If 'page' is passed, the slug is intended for use on that page
(and so it will be excluded from the duplicate check).
"""
if parent_page is None:
# the root page's slug can be whatever it likes...
return True
siblings = parent_page.get_children()
if page:
siblings = siblings.not_page(page)
return not siblings.filter(slug=slug).exists()
def _get_autogenerated_slug(self, base_slug):
candidate_slug = base_slug
suffix = 1
parent_page = self.get_parent()
while not Page._slug_is_available(candidate_slug, parent_page, self):
# try with incrementing suffix until we find a slug which is available
suffix += 1
candidate_slug = "%s-%d" % (base_slug, suffix)
return candidate_slug
def get_default_locale(self):
"""
Finds the default locale to use for this page.
This will be called just before the initial save.
"""
parent = self.get_parent()
if parent is not None:
return (
parent.specific_class.objects.defer().select_related("locale")
.get(id=parent.id)
.locale
)
return super().get_default_locale()
def full_clean(self, *args, **kwargs):
# Apply fixups that need to happen before per-field validation occurs
if not self.slug:
# Try to auto-populate slug from title
allow_unicode = getattr(settings, 'WAGTAIL_ALLOW_UNICODE_SLUGS', True)
base_slug = slugify(self.title, allow_unicode=allow_unicode)
# only proceed if we get a non-empty base slug back from slugify
if base_slug:
self.slug = self._get_autogenerated_slug(base_slug)
if not self.draft_title:
self.draft_title = self.title
# Set the locale
if self.locale_id is None:
self.locale = self.get_default_locale()
super().full_clean(*args, **kwargs)
def clean(self):
super().clean()
if not Page._slug_is_available(self.slug, self.get_parent(), self):
raise ValidationError({'slug': _("This slug is already in use")})
def is_site_root(self):
"""
Returns True if this page is the root of any site.
This includes translations of site root pages as well.
"""
return Site.objects.filter(root_page__translation_key=self.translation_key).exists()
@transaction.atomic
# ensure that changes are only committed when we have updated all descendant URL paths, to preserve consistency
def save(self, clean=True, user=None, log_action=False, **kwargs):
"""
Overrides default method behaviour to make additional updates unique to pages,
such as updating the ``url_path`` value of descendant page to reflect changes
to this page's slug.
New pages should generally be saved via the ``add_child()`` or ``add_sibling()``
method of an existing page, which will correctly set the ``path`` and ``depth``
fields on the new page before saving it.
By default, pages are validated using ``full_clean()`` before attempting to
save changes to the database, which helps to preserve validity when restoring
pages from historic revisions (which might not necessarily reflect the current
model state). This validation step can be bypassed by calling the method with
``clean=False``.
"""
if clean:
self.full_clean()
update_descendant_url_paths = False
is_new = self.id is None
if is_new:
# we are creating a record. If we're doing things properly, this should happen
# through a treebeard method like add_child, in which case the 'path' field
# has been set and so we can safely call get_parent
self.set_url_path(self.get_parent())
else:
# Check that we are committing the slug to the database
# Basically: If update_fields has been specified, and slug is not included, skip this step
if not ('update_fields' in kwargs and 'slug' not in kwargs['update_fields']):
# see if the slug has changed from the record in the db, in which case we need to
# update url_path of self and all descendants
old_record = Page.objects.get(id=self.id)
if old_record.slug != self.slug:
self.set_url_path(self.get_parent())
update_descendant_url_paths = True
old_url_path = old_record.url_path
new_url_path = self.url_path
result = super().save(**kwargs)
if not is_new and update_descendant_url_paths:
self._update_descendant_url_paths(old_url_path, new_url_path)
# Check if this is a root page of any sites and clear the 'wagtail_site_root_paths' key if so
# Note: New translations of existing site roots are considered site roots as well, so we must
# always check if this page is a site root, even if it's new.
if self.is_site_root():
cache.delete('wagtail_site_root_paths')
# Log
if is_new:
cls = type(self)
logger.info(
"Page created: \"%s\" id=%d content_type=%s.%s path=%s",
self.title,
self.id,
cls._meta.app_label,
cls.__name__,
self.url_path
)
if log_action is not None:
# The default for log_action is False. i.e. don't log unless specifically instructed
# Page creation is a special case that we want logged by default, but allow skipping it
# explicitly by passing log_action=None
if is_new:
PageLogEntry.objects.log_action(
instance=self,
action='wagtail.create',
user=user or self.owner,
content_changed=True,
)
elif log_action:
PageLogEntry.objects.log_action(
instance=self,
action=log_action,
user=user
)
return result
def delete(self, *args, **kwargs):
# Ensure that deletion always happens on an instance of Page, not a specific subclass. This
# works around a bug in treebeard <= 3.0 where calling SpecificPage.delete() fails to delete
# child pages that are not instances of SpecificPage
if type(self) is Page:
user = kwargs.pop('user', None)
def log_deletion(page, user):
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.delete',
user=user,
deleted=True,
)
if self.get_children().exists():
for child in self.get_children():
log_deletion(child.specific, user)
log_deletion(self.specific, user)
# this is a Page instance, so carry on as we were
return super().delete(*args, **kwargs)
else:
# retrieve an actual Page instance and delete that instead of self
return Page.objects.get(id=self.id).delete(*args, **kwargs)
@classmethod
def check(cls, **kwargs):
errors = super(Page, cls).check(**kwargs)
# Check that foreign keys from pages are not configured to cascade
# This is the default Django behaviour which must be explicitly overridden
# to prevent pages disappearing unexpectedly and the tree being corrupted
# get names of foreign keys pointing to parent classes (such as page_ptr)
field_exceptions = [field.name
for model in [cls] + list(cls._meta.get_parent_list())
for field in model._meta.parents.values() if field]
for field in cls._meta.fields:
if isinstance(field, models.ForeignKey) and field.name not in field_exceptions:
if field.remote_field.on_delete == models.CASCADE:
errors.append(
checks.Warning(
"Field hasn't specified on_delete action",
hint="Set on_delete=models.SET_NULL and make sure the field is nullable or set on_delete=models.PROTECT. Wagtail does not allow simple database CASCADE because it will corrupt its tree storage.",
obj=field,
id='wagtailcore.W001',
)
)
if not isinstance(cls.objects, PageManager):
errors.append(
checks.Error(
"Manager does not inherit from PageManager",
hint="Ensure that custom Page managers inherit from wagtail.core.models.PageManager",
obj=cls,
id='wagtailcore.E002',
)
)
try:
cls.clean_subpage_models()
except (ValueError, LookupError) as e:
errors.append(
checks.Error(
"Invalid subpage_types setting for %s" % cls,
hint=str(e),
id='wagtailcore.E002'
)
)
try:
cls.clean_parent_page_models()
except (ValueError, LookupError) as e:
errors.append(
checks.Error(
"Invalid parent_page_types setting for %s" % cls,
hint=str(e),
id='wagtailcore.E002'
)
)
return errors
def _update_descendant_url_paths(self, old_url_path, new_url_path):
(
Page.objects
.filter(path__startswith=self.path)
.exclude(pk=self.pk)
.update(
url_path=Concat(
Value(new_url_path),
Substr('url_path', len(old_url_path) + 1)
)
)
)
def get_specific(self, deferred=False, copy_attrs=None, copy_attrs_exclude=None):
"""
.. versionadded:: 2.12
Return this page in its most specific subclassed form.
.. versionchanged:: 2.13
* When ``copy_attrs`` is not supplied, all known non-field attribute
values are copied to the returned object. Previously, no non-field
values would be copied.
* The ``copy_attrs_exclude`` option was added.
By default, a database query is made to fetch all field values for the
specific object. If you only require access to custom methods or other
non-field attributes on the specific object, you can use
``deferred=True`` to avoid this query. However, any attempts to access
specific field values from the returned object will trigger additional
database queries.
By default, references to all non-field attribute values are copied
from current object to the returned one. This includes:
* Values set by a queryset, for example: annotations, or values set as
a result of using ``select_related()`` or ``prefetch_related()``.
* Any ``cached_property`` values that have been evaluated.
* Attributes set elsewhere in Python code.
For fine-grained control over which non-field values are copied to the
returned object, you can use ``copy_attrs`` to specify a complete list
of attribute names to include. Alternatively, you can use
``copy_attrs_exclude`` to specify a list of attribute names to exclude.
If called on a page object that is already an instance of the most
specific class (e.g. an ``EventPage``), the object will be returned
as is, and no database queries or other operations will be triggered.
If the page was originally created using a page type that has since
been removed from the codebase, a generic ``Page`` object will be
returned (without any custom field values or other functionality
present on the original class). Usually, deleting these pages is the
best course of action, but there is currently no safe way for Wagtail
to do that at migration time.
"""
model_class = self.specific_class
if model_class is None:
# The codebase and database are out of sync (e.g. the model exists
# on a different git branch and migrations were not applied or
# reverted before switching branches). So, the best we can do is
# return the page in it's current form.
return self
if isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
if deferred:
# Generate a tuple of values in the order expected by __init__(),
# with missing values substituted with DEFERRED ()
values = tuple(
getattr(self, f.attname, self.pk if f.primary_key else DEFERRED)
for f in model_class._meta.concrete_fields
)
# Create object from known attribute values
specific_obj = model_class(*values)
specific_obj._state.adding = self._state.adding
else:
# Fetch object from database
specific_obj = model_class._default_manager.get(id=self.id)
# Copy non-field attribute values
if copy_attrs is not None:
for attr in (attr for attr in copy_attrs if attr in self.__dict__):
setattr(specific_obj, attr, getattr(self, attr))
else:
exclude = copy_attrs_exclude or ()
for k, v in (
(k, v) for k, v in self.__dict__.items()
if k not in exclude
):
# only set values that haven't already been set
specific_obj.__dict__.setdefault(k, v)
return specific_obj
@cached_property
def specific(self):
"""
Returns this page in its most specific subclassed form with all field
values fetched from the database. The result is cached in memory.
"""
return self.get_specific()
@cached_property
def specific_deferred(self):
"""
.. versionadded:: 2.12
Returns this page in its most specific subclassed form without any
additional field values being fetched from the database. The result
is cached in memory.
"""
return self.get_specific(deferred=True)
@cached_property
def specific_class(self):
"""
Return the class that this page would be if instantiated in its
most specific form.
If the model class can no longer be found in the codebase, and the
relevant ``ContentType`` has been removed by a database migration,
the return value will be ``None``.
If the model class can no longer be found in the codebase, but the
relevant ``ContentType`` is still present in the database (usually a
result of switching between git branches without running or reverting
database migrations beforehand), the return value will be ``None``.
"""
return self.cached_content_type.model_class()
@property
def cached_content_type(self):
"""
.. versionadded:: 2.10
Return this page's ``content_type`` value from the ``ContentType``
model's cached manager, which will avoid a database query if the
object is already in memory.
"""
return ContentType.objects.get_for_id(self.content_type_id)
@property
def localized_draft(self):
"""
Finds the translation in the current active language.
If there is no translation in the active language, self is returned.
Note: This will return translations that are in draft. If you want to exclude
these, use the ``.localized`` attribute.
"""
try:
locale = Locale.get_active()
except (LookupError, Locale.DoesNotExist):
return self
if locale.id == self.locale_id:
return self
return self.get_translation_or_none(locale) or self
@property
def localized(self):
"""
Finds the translation in the current active language.
If there is no translation in the active language, self is returned.
Note: This will not return the translation if it is in draft.
If you want to include drafts, use the ``.localized_draft`` attribute instead.
"""
localized = self.localized_draft
if not localized.live:
return self
return localized
def route(self, request, path_components):
if path_components:
# request is for a child of this page
child_slug = path_components[0]
remaining_components = path_components[1:]
try:
subpage = self.get_children().get(slug=child_slug)
except Page.DoesNotExist:
raise Http404
return subpage.specific.route(request, remaining_components)
else:
# request is for this very page
if self.live:
return RouteResult(self)
else:
raise Http404
def get_admin_display_title(self):
"""
Return the title for this page as it should appear in the admin backend;
override this if you wish to display extra contextual information about the page,
such as language. By default, returns ``draft_title``.
"""
# Fall back on title if draft_title is blank (which may happen if the page was created
# in a fixture or migration that didn't explicitly handle draft_title)
return self.draft_title or self.title
def save_revision(self, user=None, submitted_for_moderation=False, approved_go_live_at=None, changed=True,
log_action=False, previous_revision=None, clean=True):
"""
Creates and saves a page revision.
:param user: the user performing the action
:param submitted_for_moderation: indicates whether the page was submitted for moderation
:param approved_go_live_at: the date and time the revision is approved to go live
:param changed: indicates whether there were any content changes
:param log_action: flag for logging the action. Pass False to skip logging. Can be passed an action string.
Defaults to 'wagtail.edit' when no 'previous_revision' param is passed, otherwise 'wagtail.revert'
:param previous_revision: indicates a revision reversal. Should be set to the previous revision instance
:param clean: Set this to False to skip cleaning page content before saving this revision
:return: the newly created revision
"""
# Raise an error if this page is an alias.
if self.alias_of_id:
raise RuntimeError(
"save_revision() was called on an alias page. "
"Revisions are not required for alias pages as they are an exact copy of another page."
)
if clean:
self.full_clean()
new_comments = self.comments.filter(pk__isnull=True)
for comment in new_comments:
# We need to ensure comments have an id in the revision, so positions can be identified correctly
comment.save()
# Create revision
revision = self.revisions.create(
content_json=self.to_json(),
user=user,
submitted_for_moderation=submitted_for_moderation,
approved_go_live_at=approved_go_live_at,
)
for comment in new_comments:
comment.revision_created = revision
update_fields = ['comments']
self.latest_revision_created_at = revision.created_at
update_fields.append('latest_revision_created_at')
self.draft_title = self.title
update_fields.append('draft_title')
if changed:
self.has_unpublished_changes = True
update_fields.append('has_unpublished_changes')
if update_fields:
# clean=False because the fields we're updating don't need validation
self.save(update_fields=update_fields, clean=False)
# Log
logger.info("Page edited: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id)
if log_action:
if not previous_revision:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.edit',
user=user,
revision=revision,
content_changed=changed,
)
else:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.revert',
user=user,
data={
'revision': {
'id': previous_revision.id,
'created': previous_revision.created_at.strftime("%d %b %Y %H:%M")
}
},
revision=revision,
content_changed=changed,
)
if submitted_for_moderation:
logger.info("Page submitted for moderation: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id)
return revision
def get_latest_revision(self):
return self.revisions.order_by('-created_at', '-id').first()
def get_latest_revision_as_page(self):
if not self.has_unpublished_changes:
# Use the live database copy in preference to the revision record, as:
# 1) this will pick up any changes that have been made directly to the model,
# such as automated data imports;
# 2) it ensures that inline child objects pick up real database IDs even if
# those are absent from the revision data. (If this wasn't the case, the child
# objects would be recreated with new IDs on next publish - see #1853)
return self.specific
latest_revision = self.get_latest_revision()
if latest_revision:
return latest_revision.as_page_object()
else:
return self.specific
def update_aliases(self, *, revision=None, user=None, _content_json=None, _updated_ids=None):
"""
Publishes all aliases that follow this page with the latest content from this page.
This is called by Wagtail whenever a page with aliases is published.
:param revision: The revision of the original page that we are updating to (used for logging purposes)
:type revision: PageRevision, optional
:param user: The user who is publishing (used for logging purposes)
:type user: User, optional
"""
specific_self = self.specific
# Only compute this if necessary since it's quite a heavy operation
if _content_json is None:
_content_json = self.to_json()
# A list of IDs that have already been updated. This is just in case someone has
# created an alias loop (which is impossible to do with the UI Wagtail provides)
_updated_ids = _updated_ids or []
for alias in self.specific_class.objects.filter(alias_of=self).exclude(id__in=_updated_ids):
# FIXME: Switch to the same fields that are excluded from copy
# We can't do this right now because we can't exclude fields from with_content_json
exclude_fields = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries']
# Copy field content
alias_updated = alias.with_content_json(_content_json)
# Publish the alias if it's currently in draft
alias_updated.live = True
alias_updated.has_unpublished_changes = False
# Copy child relations
child_object_map = specific_self.copy_all_child_relations(target=alias_updated, exclude=exclude_fields)
# Process child objects
# This has two jobs:
# - If the alias is in a different locale, this updates the
# locale of any translatable child objects to match
# - If the alias is not a translation of the original, this
# changes the translation_key field of all child objects
# so they do not clash
if child_object_map:
alias_is_translation = alias.translation_key == self.translation_key
def process_child_object(child_object):
if isinstance(child_object, TranslatableMixin):
# Child object's locale must always match the page
child_object.locale = alias_updated.locale
# If the alias isn't a translation of the original page,
# change the child object's translation_keys so they are
# not either
if not alias_is_translation:
child_object.translation_key = uuid.uuid4()
for (rel, previous_id), child_objects in child_object_map.items():
if previous_id is None:
for child_object in child_objects:
process_child_object(child_object)
else:
process_child_object(child_objects)
# Copy M2M relations
_copy_m2m_relations(specific_self, alias_updated, exclude_fields=exclude_fields)
# Don't change the aliases slug
# Aliases can have their own slugs so they can be siblings of the original
alias_updated.slug = alias.slug
alias_updated.set_url_path(alias_updated.get_parent())
# Aliases don't have revisions, so update fields that would normally be updated by save_revision
alias_updated.draft_title = alias_updated.title
alias_updated.latest_revision_created_at = self.latest_revision_created_at
alias_updated.save(clean=False)
page_published.send(sender=alias_updated.specific_class, instance=alias_updated, revision=revision, alias=True)
# Log the publish of the alias
PageLogEntry.objects.log_action(
instance=alias_updated,
action='wagtail.publish',
user=user,
)
# Update any aliases of that alias
# Design note:
# It could be argued that this will be faster if we just changed these alias-of-alias
# pages to all point to the original page and avoid having to update them recursively.
#
# But, it's useful to have a record of how aliases have been chained.
# For example, In Wagtail Localize, we use aliases to create mirrored trees, but those
# trees themselves could have aliases within them. If an alias within a tree is
# converted to a regular page, we want the alias in the mirrored tree to follow that
# new page and stop receiving updates from the original page.
#
# Doing it this way requires an extra lookup query per alias but this is small in
# comparison to the work required to update the alias.
alias.update_aliases(revision=revision, _content_json=_content_json, _updated_ids=_updated_ids)
update_aliases.alters_data = True
def unpublish(self, set_expired=False, commit=True, user=None, log_action=True):
"""
Unpublish the page by setting ``live`` to ``False``. Does nothing if ``live`` is already ``False``
:param log_action: flag for logging the action. Pass False to skip logging. Can be passed an action string.
Defaults to 'wagtail.unpublish'
"""
if self.live:
self.live = False
self.has_unpublished_changes = True
self.live_revision = None
if set_expired:
self.expired = True
if commit:
# using clean=False to bypass validation
self.save(clean=False)
page_unpublished.send(sender=self.specific_class, instance=self.specific)
if log_action:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.unpublish',
user=user,
)
logger.info("Page unpublished: \"%s\" id=%d", self.title, self.id)
self.revisions.update(approved_go_live_at=None)
# Unpublish aliases
for alias in self.aliases.all():
alias.unpublish()
context_object_name = None
def get_context(self, request, *args, **kwargs):
context = {
PAGE_TEMPLATE_VAR: self,
'self': self,
'request': request,
}
if self.context_object_name:
context[self.context_object_name] = self
return context
def get_template(self, request, *args, **kwargs):
if request.is_ajax():
return self.ajax_template or self.template
else:
return self.template
def serve(self, request, *args, **kwargs):
request.is_preview = getattr(request, 'is_preview', False)
return TemplateResponse(
request,
self.get_template(request, *args, **kwargs),
self.get_context(request, *args, **kwargs)
)
def is_navigable(self):
"""
Return true if it's meaningful to browse subpages of this page -
i.e. it currently has subpages,
or it's at the top level (this rule necessary for empty out-of-the-box sites to have working navigation)
"""
return (not self.is_leaf()) or self.depth == 2
def _get_site_root_paths(self, request=None):
"""
Return ``Site.get_site_root_paths()``, using the cached copy on the
request object if available.
"""
# if we have a request, use that to cache site_root_paths; otherwise, use self
cache_object = request if request else self
try:
return cache_object._wagtail_cached_site_root_paths
except AttributeError:
cache_object._wagtail_cached_site_root_paths = Site.get_site_root_paths()
return cache_object._wagtail_cached_site_root_paths
def get_url_parts(self, request=None):
"""
Determine the URL for this page and return it as a tuple of
``(site_id, site_root_url, page_url_relative_to_site_root)``.
Return None if the page is not routable.
This is used internally by the ``full_url``, ``url``, ``relative_url``
and ``get_site`` properties and methods; pages with custom URL routing
should override this method in order to have those operations return
the custom URLs.
Accepts an optional keyword argument ``request``, which may be used
to avoid repeated database / cache lookups. Typically, a page model
that overrides ``get_url_parts`` should not need to deal with
``request`` directly, and should just pass it to the original method
when calling ``super``.
"""
possible_sites = [
(pk, path, url, language_code)
for pk, path, url, language_code in self._get_site_root_paths(request)
if self.url_path.startswith(path)
]
if not possible_sites:
return None
site_id, root_path, root_url, language_code = possible_sites[0]
site = Site.find_for_request(request)
if site:
for site_id, root_path, root_url, language_code in possible_sites:
if site_id == site.pk:
break
else:
site_id, root_path, root_url, language_code = possible_sites[0]
use_wagtail_i18n = getattr(settings, 'WAGTAIL_I18N_ENABLED', False)
if use_wagtail_i18n:
# If the active language code is a variant of the page's language, then
# use that instead
# This is used when LANGUAGES contain more languages than WAGTAIL_CONTENT_LANGUAGES
try:
if get_supported_content_language_variant(translation.get_language()) == language_code:
language_code = translation.get_language()
except LookupError:
# active language code is not a recognised content language, so leave
# page's language code unchanged
pass
# The page may not be routable because wagtail_serve is not registered
# This may be the case if Wagtail is used headless
try:
if use_wagtail_i18n:
with translation.override(language_code):
page_path = reverse(
'wagtail_serve', args=(self.url_path[len(root_path):],))
else:
page_path = reverse(
'wagtail_serve', args=(self.url_path[len(root_path):],))
except NoReverseMatch:
return (site_id, None, None)
# Remove the trailing slash from the URL reverse generates if
# WAGTAIL_APPEND_SLASH is False and we're not trying to serve
# the root path
if not WAGTAIL_APPEND_SLASH and page_path != '/':
page_path = page_path.rstrip('/')
return (site_id, root_url, page_path)
def get_full_url(self, request=None):
"""Return the full URL (including protocol / domain) to this page, or None if it is not routable"""
url_parts = self.get_url_parts(request=request)
if url_parts is None or url_parts[1] is None and url_parts[2] is None:
# page is not routable
return
site_id, root_url, page_path = url_parts
return root_url + page_path
full_url = property(get_full_url)
def get_url(self, request=None, current_site=None):
"""
Return the 'most appropriate' URL for referring to this page from the pages we serve,
within the Wagtail backend and actual website templates;
this is the local URL (starting with '/') if we're only running a single site
(i.e. we know that whatever the current page is being served from, this link will be on the
same domain), and the full URL (with domain) if not.
Return None if the page is not routable.
Accepts an optional but recommended ``request`` keyword argument that, if provided, will
be used to cache site-level URL information (thereby avoiding repeated database / cache
lookups) and, via the ``Site.find_for_request()`` function, determine whether a relative
or full URL is most appropriate.
"""
# ``current_site`` is purposefully undocumented, as one can simply pass the request and get
# a relative URL based on ``Site.find_for_request()``. Nonetheless, support it here to avoid
# copy/pasting the code to the ``relative_url`` method below.
if current_site is None and request is not None:
site = Site.find_for_request(request)
current_site = site
url_parts = self.get_url_parts(request=request)
if url_parts is None or url_parts[1] is None and url_parts[2] is None:
# page is not routable
return
site_id, root_url, page_path = url_parts
# Get number of unique sites in root paths
# Note: there may be more root paths to sites if there are multiple languages
num_sites = len(set(root_path[0] for root_path in self._get_site_root_paths(request)))
if (current_site is not None and site_id == current_site.id) or num_sites == 1:
# the site matches OR we're only running a single site, so a local URL is sufficient
return page_path
else:
return root_url + page_path
url = property(get_url)
def relative_url(self, current_site, request=None):
"""
Return the 'most appropriate' URL for this page taking into account the site we're currently on;
a local URL if the site matches, or a fully qualified one otherwise.
Return None if the page is not routable.
Accepts an optional but recommended ``request`` keyword argument that, if provided, will
be used to cache site-level URL information (thereby avoiding repeated database / cache
lookups).
"""
return self.get_url(request=request, current_site=current_site)
def get_site(self):
"""
Return the Site object that this page belongs to.
"""
url_parts = self.get_url_parts()
if url_parts is None:
# page is not routable
return
site_id, root_url, page_path = url_parts
return Site.objects.get(id=site_id)
@classmethod
def get_indexed_objects(cls):
content_type = ContentType.objects.get_for_model(cls)
return super(Page, cls).get_indexed_objects().filter(content_type=content_type)
def get_indexed_instance(self):
# This is accessed on save by the wagtailsearch signal handler, and in edge
# cases (e.g. loading test fixtures), may be called before the specific instance's
# entry has been created. In those cases, we aren't ready to be indexed yet, so
# return None.
try:
return self.specific
except self.specific_class.DoesNotExist:
return None
@classmethod
def clean_subpage_models(cls):
"""
Returns the list of subpage types, normalised as model classes.
Throws ValueError if any entry in subpage_types cannot be recognised as a model name,
or LookupError if a model does not exist (or is not a Page subclass).
"""
if cls._clean_subpage_models is None:
subpage_types = getattr(cls, 'subpage_types', None)
if subpage_types is None:
# if subpage_types is not specified on the Page class, allow all page types as subpages
cls._clean_subpage_models = get_page_models()
else:
cls._clean_subpage_models = [
resolve_model_string(model_string, cls._meta.app_label)
for model_string in subpage_types
]
for model in cls._clean_subpage_models:
if not issubclass(model, Page):
raise LookupError("%s is not a Page subclass" % model)
return cls._clean_subpage_models
@classmethod
def clean_parent_page_models(cls):
"""
Returns the list of parent page types, normalised as model classes.
Throws ValueError if any entry in parent_page_types cannot be recognised as a model name,
or LookupError if a model does not exist (or is not a Page subclass).
"""
if cls._clean_parent_page_models is None:
parent_page_types = getattr(cls, 'parent_page_types', None)
if parent_page_types is None:
# if parent_page_types is not specified on the Page class, allow all page types as subpages
cls._clean_parent_page_models = get_page_models()
else:
cls._clean_parent_page_models = [
resolve_model_string(model_string, cls._meta.app_label)
for model_string in parent_page_types
]
for model in cls._clean_parent_page_models:
if not issubclass(model, Page):
raise LookupError("%s is not a Page subclass" % model)
return cls._clean_parent_page_models
@classmethod
def allowed_parent_page_models(cls):
"""
Returns the list of page types that this page type can be a subpage of,
as a list of model classes
"""
return [
parent_model for parent_model in cls.clean_parent_page_models()
if cls in parent_model.clean_subpage_models()
]
@classmethod
def allowed_subpage_models(cls):
"""
Returns the list of page types that this page type can have as subpages,
as a list of model classes
"""
return [
subpage_model for subpage_model in cls.clean_subpage_models()
if cls in subpage_model.clean_parent_page_models()
]
@classmethod
def creatable_subpage_models(cls):
"""
Returns the list of page types that may be created under this page type,
as a list of model classes
"""
return [
page_model for page_model in cls.allowed_subpage_models()
if page_model.is_creatable
]
@classmethod
def can_exist_under(cls, parent):
"""
Checks if this page type can exist as a subpage under a parent page
instance.
See also: :func:`Page.can_create_at` and :func:`Page.can_move_to`
"""
return cls in parent.specific_class.allowed_subpage_models()
@classmethod
def can_create_at(cls, parent):
"""
Checks if this page type can be created as a subpage under a parent
page instance.
"""
can_create = cls.is_creatable and cls.can_exist_under(parent)
if cls.max_count is not None:
can_create = can_create and cls.objects.count() < cls.max_count
if cls.max_count_per_parent is not None:
can_create = can_create and parent.get_children().type(cls).count() < cls.max_count_per_parent
return can_create
def can_move_to(self, parent):
"""
Checks if this page instance can be moved to be a subpage of a parent
page instance.
"""
# Prevent pages from being moved to different language sections
# The only page that can have multi-lingual children is the root page
parent_is_root = parent.depth == 1
if not parent_is_root and parent.locale_id != self.locale_id:
return False
return self.can_exist_under(parent)
@classmethod
def get_verbose_name(cls):
"""
Returns the human-readable "verbose name" of this page model e.g "Blog page".
"""
# This is similar to doing cls._meta.verbose_name.title()
# except this doesn't convert any characters to lowercase
return capfirst(cls._meta.verbose_name)
@property
def status_string(self):
if not self.live:
if self.expired:
return _("expired")
elif self.approved_schedule:
return _("scheduled")
elif self.workflow_in_progress:
return _("in moderation")
else:
return _("draft")
else:
if self.approved_schedule:
return _("live + scheduled")
elif self.workflow_in_progress:
return _("live + in moderation")
elif self.has_unpublished_changes:
return _("live + draft")
else:
return _("live")
@property
def approved_schedule(self):
return self.revisions.exclude(approved_go_live_at__isnull=True).exists()
def has_unpublished_subtree(self):
"""
An awkwardly-defined flag used in determining whether unprivileged editors have
permission to delete this article. Returns true if and only if this page is non-live,
and it has no live children.
"""
return (not self.live) and (not self.get_descendants().filter(live=True).exists())
def move(self, target, pos=None, user=None):
"""
Extension to the treebeard 'move' method to ensure that url_path is updated,
and to emit a 'pre_page_move' and 'post_page_move' signals.
"""
# Determine old and new parents
parent_before = self.get_parent()
if pos in ('first-child', 'last-child', 'sorted-child'):
parent_after = target
else:
parent_after = target.get_parent()
# Determine old and new url_paths
# Fetching new object to avoid affecting `self`
old_self = Page.objects.get(id=self.id)
old_url_path = old_self.url_path
new_url_path = old_self.set_url_path(parent=parent_after)
# Emit pre_page_move signal
pre_page_move.send(
sender=self.specific_class or self.__class__,
instance=self,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Only commit when all descendants are properly updated
with transaction.atomic():
# Allow treebeard to update `path` values
super().move(target, pos=pos)
# Treebeard's move method doesn't actually update the in-memory instance,
# so we need to work with a freshly loaded one now
new_self = Page.objects.get(id=self.id)
new_self.url_path = new_url_path
new_self.save()
# Update descendant paths if url_path has changed
if old_url_path != new_url_path:
new_self._update_descendant_url_paths(old_url_path, new_url_path)
# Emit post_page_move signal
post_page_move.send(
sender=self.specific_class or self.__class__,
instance=new_self,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Log
PageLogEntry.objects.log_action(
instance=self,
# Check if page was reordered (reordering doesn't change the parent)
action='wagtail.reorder' if parent_before.id == target.id else 'wagtail.move',
user=user,
data={
'source': {
'id': parent_before.id,
'title': parent_before.specific_deferred.get_admin_display_title()
},
'destination': {
'id': parent_after.id,
'title': parent_after.specific_deferred.get_admin_display_title()
}
}
)
logger.info("Page moved: \"%s\" id=%d path=%s", self.title, self.id, new_url_path)
def copy(self, recursive=False, to=None, update_attrs=None, copy_revisions=True, keep_live=True, user=None,
process_child_object=None, exclude_fields=None, log_action='wagtail.copy', reset_translation_key=True, _mpnode_attrs=None):
"""
Copies a given page
:param log_action flag for logging the action. Pass None to skip logging.
Can be passed an action string. Defaults to 'wagtail.copy'
"""
if self._state.adding:
raise RuntimeError('Page.copy() called on an unsaved page')
exclude_fields = self.default_exclude_fields_in_copy + self.exclude_fields_in_copy + (exclude_fields or [])
specific_self = self.specific
if keep_live:
base_update_attrs = {
'alias_of': None,
}
else:
base_update_attrs = {
'live': False,
'has_unpublished_changes': True,
'live_revision': None,
'first_published_at': None,
'last_published_at': None,
'alias_of': None,
}
if user:
base_update_attrs['owner'] = user
# When we're not copying for translation, we should give the translation_key a new value
if reset_translation_key:
base_update_attrs['translation_key'] = uuid.uuid4()
if update_attrs:
base_update_attrs.update(update_attrs)
page_copy, child_object_map = _copy(specific_self, exclude_fields=exclude_fields, update_attrs=base_update_attrs)
# Save copied child objects and run process_child_object on them if we need to
for (child_relation, old_pk), child_object in child_object_map.items():
if process_child_object:
process_child_object(specific_self, page_copy, child_relation, child_object)
# When we're not copying for translation, we should give the translation_key a new value for each child object as well
if reset_translation_key and isinstance(child_object, TranslatableMixin):
child_object.translation_key = uuid.uuid4()
# Save the new page
if _mpnode_attrs:
# We've got a tree position already reserved. Perform a quick save
page_copy.path = _mpnode_attrs[0]
page_copy.depth = _mpnode_attrs[1]
page_copy.save(clean=False)
else:
if to:
if recursive and (to == self or to.is_descendant_of(self)):
raise Exception("You cannot copy a tree branch recursively into itself")
page_copy = to.add_child(instance=page_copy)
else:
page_copy = self.add_sibling(instance=page_copy)
_mpnode_attrs = (page_copy.path, page_copy.depth)
_copy_m2m_relations(specific_self, page_copy, exclude_fields=exclude_fields, update_attrs=base_update_attrs)
# Copy revisions
if copy_revisions:
for revision in self.revisions.all():
revision.pk = None
revision.submitted_for_moderation = False
revision.approved_go_live_at = None
revision.page = page_copy
# Update ID fields in content
revision_content = json.loads(revision.content_json)
revision_content['pk'] = page_copy.pk
for child_relation in get_all_child_relations(specific_self):
accessor_name = child_relation.get_accessor_name()
try:
child_objects = revision_content[accessor_name]
except KeyError:
# KeyErrors are possible if the revision was created
# before this child relation was added to the database
continue
for child_object in child_objects:
child_object[child_relation.field.name] = page_copy.pk
# Remap primary key to copied versions
# If the primary key is not recognised (eg, the child object has been deleted from the database)
# set the primary key to None
copied_child_object = child_object_map.get((child_relation, child_object['pk']))
child_object['pk'] = copied_child_object.pk if copied_child_object else None
revision.content_json = json.dumps(revision_content)
# Save
revision.save()
# Create a new revision
# This code serves a few purposes:
# * It makes sure update_attrs gets applied to the latest revision
# * It bumps the last_revision_created_at value so the new page gets ordered as if it was just created
# * It sets the user of the new revision so it's possible to see who copied the page by looking at its history
latest_revision = page_copy.get_latest_revision_as_page()
if update_attrs:
for field, value in update_attrs.items():
setattr(latest_revision, field, value)
latest_revision_as_page_revision = latest_revision.save_revision(user=user, changed=False, clean=False)
if keep_live:
page_copy.live_revision = latest_revision_as_page_revision
page_copy.last_published_at = latest_revision_as_page_revision.created_at
page_copy.first_published_at = latest_revision_as_page_revision.created_at
page_copy.save(clean=False)
if page_copy.live:
page_published.send(
sender=page_copy.specific_class, instance=page_copy,
revision=latest_revision_as_page_revision
)
# Log
if log_action:
parent = specific_self.get_parent()
PageLogEntry.objects.log_action(
instance=page_copy,
action=log_action,
user=user,
data={
'page': {
'id': page_copy.id,
'title': page_copy.get_admin_display_title()
},
'source': {'id': parent.id, 'title': parent.specific_deferred.get_admin_display_title()} if parent else None,
'destination': {'id': to.id, 'title': to.specific_deferred.get_admin_display_title()} if to else None,
'keep_live': page_copy.live and keep_live
},
)
if page_copy.live and keep_live:
# Log the publish if the use chose to keep the copied page live
PageLogEntry.objects.log_action(
instance=page_copy,
action='wagtail.publish',
user=user,
revision=latest_revision_as_page_revision,
)
logger.info("Page copied: \"%s\" id=%d from=%d", page_copy.title, page_copy.id, self.id)
# Copy child pages
if recursive:
numchild = 0
for child_page in self.get_children().specific():
newdepth = _mpnode_attrs[1] + 1
child_mpnode_attrs = (
Page._get_path(_mpnode_attrs[0], newdepth, numchild),
newdepth
)
numchild += 1
child_page.copy(
recursive=True,
to=page_copy,
copy_revisions=copy_revisions,
keep_live=keep_live,
user=user,
process_child_object=process_child_object,
_mpnode_attrs=child_mpnode_attrs
)
if numchild > 0:
page_copy.numchild = numchild
page_copy.save(clean=False, update_fields=['numchild'])
return page_copy
copy.alters_data = True
def create_alias(self, *, recursive=False, parent=None, update_slug=None, update_locale=None, user=None, log_action='wagtail.create_alias', reset_translation_key=True, _mpnode_attrs=None):
"""
Creates an alias of the given page.
An alias is like a copy, but an alias remains in sync with the original page. They
are not directly editable and do not have revisions.
You can convert an alias into a regular page by setting the .alias_of attibute to None
and creating an initial revision.
:param recursive: create aliases of the page's subtree, defaults to False
:type recursive: boolean, optional
:param parent: The page to create the new alias under
:type parent: Page, optional
:param update_slug: The slug of the new alias page, defaults to the slug of the original page
:type update_slug: string, optional
:param update_locale: The locale of the new alias page, defaults to the locale of the original page
:type update_locale: Locale, optional
:param user: The user who is performing this action. This user would be assigned as the owner of the new page and appear in the audit log
:type user: User, optional
:param log_action: Override the log action with a custom one. or pass None to skip logging, defaults to 'wagtail.create_alias'
:type log_action: string or None, optional
:param reset_translation_key: Generate new translation_keys for the page and any translatable child objects, defaults to False
:type reset_translation_key: boolean, optional
"""
specific_self = self.specific
# FIXME: Switch to the same fields that are excluded from copy
# We can't do this right now because we can't exclude fields from with_content_json
# which we use for updating aliases
exclude_fields = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries']
update_attrs = {
'alias_of': self,
# Aliases don't have revisions so the draft title should always match the live title
'draft_title': self.title,
# Likewise, an alias page can't have unpublished changes if it's live
'has_unpublished_changes': not self.live,
}
if update_slug:
update_attrs['slug'] = update_slug
if update_locale:
update_attrs['locale'] = update_locale
if user:
update_attrs['owner'] = user
# When we're not copying for translation, we should give the translation_key a new value
if reset_translation_key:
update_attrs['translation_key'] = uuid.uuid4()
alias, child_object_map = _copy(specific_self, update_attrs=update_attrs, exclude_fields=exclude_fields)
# Update any translatable child objects
for (child_relation, old_pk), child_object in child_object_map.items():
if isinstance(child_object, TranslatableMixin):
if update_locale:
child_object.locale = update_locale
# When we're not copying for translation, we should give the translation_key a new value for each child object as well
if reset_translation_key:
child_object.translation_key = uuid.uuid4()
# Save the new page
if _mpnode_attrs:
# We've got a tree position already reserved. Perform a quick save
alias.path = _mpnode_attrs[0]
alias.depth = _mpnode_attrs[1]
alias.save(clean=False)
else:
if parent:
if recursive and (parent == self or parent.is_descendant_of(self)):
raise Exception("You cannot copy a tree branch recursively into itself")
alias = parent.add_child(instance=alias)
else:
alias = self.add_sibling(instance=alias)
_mpnode_attrs = (alias.path, alias.depth)
_copy_m2m_relations(specific_self, alias, exclude_fields=exclude_fields)
# Log
if log_action:
source_parent = specific_self.get_parent()
PageLogEntry.objects.log_action(
instance=alias,
action=log_action,
user=user,
data={
'page': {
'id': alias.id,
'title': alias.get_admin_display_title()
},
'source': {'id': source_parent.id, 'title': source_parent.specific_deferred.get_admin_display_title()} if source_parent else None,
'destination': {'id': parent.id, 'title': parent.specific_deferred.get_admin_display_title()} if parent else None,
},
)
if alias.live:
# Log the publish
PageLogEntry.objects.log_action(
instance=alias,
action='wagtail.publish',
user=user,
)
logger.info("Page alias created: \"%s\" id=%d from=%d", alias.title, alias.id, self.id)
# Copy child pages
if recursive:
numchild = 0
for child_page in self.get_children().specific():
newdepth = _mpnode_attrs[1] + 1
child_mpnode_attrs = (
Page._get_path(_mpnode_attrs[0], newdepth, numchild),
newdepth
)
numchild += 1
child_page.create_alias(
recursive=True,
parent=alias,
update_locale=update_locale,
user=user,
log_action=log_action,
reset_translation_key=reset_translation_key,
_mpnode_attrs=child_mpnode_attrs
)
if numchild > 0:
alias.numchild = numchild
alias.save(clean=False, update_fields=['numchild'])
return alias
create_alias.alters_data = True
@transaction.atomic
def copy_for_translation(self, locale, copy_parents=False, alias=False, exclude_fields=None):
"""
Creates a copy of this page in the specified locale.
The new page will be created in draft as a child of this page's translated
parent.
For example, if you are translating a blog post from English into French,
this method will look for the French version of the blog index and create
the French translation of the blog post under that.
If this page's parent is not translated into the locale, then a ``ParentNotTranslatedError``
is raised. You can circumvent this error by passing ``copy_parents=True`` which
copies any parents that are not translated yet.
The ``exclude_fields`` parameter can be used to set any fields to a blank value
in the copy.
Note that this method calls the ``.copy()`` method internally so any fields that
are excluded in ``.exclude_fields_in_copy`` will be excluded from the translation.
"""
# Find the translated version of the parent page to create the new page under
parent = self.get_parent().specific
slug = self.slug
if not parent.is_root():
try:
translated_parent = parent.get_translation(locale)
except parent.__class__.DoesNotExist:
if not copy_parents:
raise ParentNotTranslatedError
translated_parent = parent.copy_for_translation(
locale, copy_parents=True, alias=True
)
else:
# Don't duplicate the root page for translation. Create new locale as a sibling
translated_parent = parent
# Append language code to slug as the new page
# will be created in the same section as the existing one
slug += "-" + locale.language_code
# Find available slug for new page
slug = find_available_slug(translated_parent, slug)
if alias:
return self.create_alias(
parent=translated_parent,
update_slug=slug,
update_locale=locale,
reset_translation_key=False,
)
else:
# Update locale on translatable child objects as well
def process_child_object(
original_page, page_copy, child_relation, child_object
):
if isinstance(child_object, TranslatableMixin):
child_object.locale = locale
return self.copy(
to=translated_parent,
update_attrs={
"locale": locale,
"slug": slug,
},
copy_revisions=False,
keep_live=False,
reset_translation_key=False,
process_child_object=process_child_object,
exclude_fields=exclude_fields,
)
copy_for_translation.alters_data = True
def permissions_for_user(self, user):
"""
Return a PagePermissionsTester object defining what actions the user can perform on this page
"""
user_perms = UserPagePermissionsProxy(user)
return user_perms.for_page(self)
def make_preview_request(self, original_request=None, preview_mode=None, extra_request_attrs=None):
"""
Simulate a request to this page, by constructing a fake HttpRequest object that is (as far
as possible) representative of a real request to this page's front-end URL, and invoking
serve_preview with that request (and the given preview_mode).
Used for previewing / moderation and any other place where we
want to display a view of this page in the admin interface without going through the regular
page routing logic.
If you pass in a real request object as original_request, additional information (e.g. client IP, cookies)
will be included in the dummy request.
"""
dummy_meta = self._get_dummy_headers(original_request)
request = WSGIRequest(dummy_meta)
# Add a flag to let middleware know that this is a dummy request.
request.is_dummy = True
if extra_request_attrs:
for k, v in extra_request_attrs.items():
setattr(request, k, v)
page = self
# Build a custom django.core.handlers.BaseHandler subclass that invokes serve_preview as
# the eventual view function called at the end of the middleware chain, rather than going
# through the URL resolver
class Handler(BaseHandler):
def _get_response(self, request):
response = page.serve_preview(request, preview_mode)
if hasattr(response, 'render') and callable(response.render):
response = response.render()
return response
# Invoke this custom handler.
handler = Handler()
handler.load_middleware()
return handler.get_response(request)
def _get_dummy_headers(self, original_request=None):
"""
Return a dict of META information to be included in a faked HttpRequest object to pass to
serve_preview.
"""
url = self._get_dummy_header_url(original_request)
if url:
url_info = urlparse(url)
hostname = url_info.hostname
path = url_info.path
port = url_info.port or (443 if url_info.scheme == 'https' else 80)
scheme = url_info.scheme
else:
# Cannot determine a URL to this page - cobble one together based on
# whatever we find in ALLOWED_HOSTS
try:
hostname = settings.ALLOWED_HOSTS[0]
if hostname == '*':
# '*' is a valid value to find in ALLOWED_HOSTS[0], but it's not a valid domain name.
# So we pretend it isn't there.
raise IndexError
except IndexError:
hostname = 'localhost'
path = '/'
port = 80
scheme = 'http'
http_host = hostname
if port != (443 if scheme == 'https' else 80):
http_host = '%s:%s' % (http_host, port)
dummy_values = {
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'SERVER_NAME': hostname,
'SERVER_PORT': port,
'SERVER_PROTOCOL': 'HTTP/1.1',
'HTTP_HOST': http_host,
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': StringIO(),
'wsgi.url_scheme': scheme,
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
}
# Add important values from the original request object, if it was provided.
HEADERS_FROM_ORIGINAL_REQUEST = [
'REMOTE_ADDR', 'HTTP_X_FORWARDED_FOR', 'HTTP_COOKIE', 'HTTP_USER_AGENT', 'HTTP_AUTHORIZATION',
'wsgi.version', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once',
]
if settings.SECURE_PROXY_SSL_HEADER:
HEADERS_FROM_ORIGINAL_REQUEST.append(settings.SECURE_PROXY_SSL_HEADER[0])
if original_request:
for header in HEADERS_FROM_ORIGINAL_REQUEST:
if header in original_request.META:
dummy_values[header] = original_request.META[header]
return dummy_values
def _get_dummy_header_url(self, original_request=None):
"""
Return the URL that _get_dummy_headers() should use to set META headers
for the faked HttpRequest.
"""
return self.full_url
DEFAULT_PREVIEW_MODES = [('', _('Default'))]
@property
def preview_modes(self):
"""
A list of (internal_name, display_name) tuples for the modes in which
this page can be displayed for preview/moderation purposes. Ordinarily a page
will only have one display mode, but subclasses of Page can override this -
for example, a page containing a form might have a default view of the form,
and a post-submission 'thank you' page
"""
return Page.DEFAULT_PREVIEW_MODES
@property
def default_preview_mode(self):
"""
The preview mode to use in workflows that do not give the user the option of selecting a
mode explicitly, e.g. moderator approval. Will raise IndexError if preview_modes is empty
"""
return self.preview_modes[0][0]
def is_previewable(self):
"""Returns True if at least one preview mode is specified"""
# It's possible that this will be called from a listing page using a plain Page queryset -
# if so, checking self.preview_modes would incorrectly give us the default set from
# Page.preview_modes. However, accessing self.specific.preview_modes would result in an N+1
# query problem. To avoid this (at least in the general case), we'll call .specific only if
# a check of the property at the class level indicates that preview_modes has been
# overridden from whatever type we're currently in.
page = self
if page.specific_class.preview_modes != type(page).preview_modes:
page = page.specific
return bool(page.preview_modes)
def serve_preview(self, request, mode_name):
"""
Return an HTTP response for use in page previews. Normally this would be equivalent
to self.serve(request), since we obviously want the preview to be indicative of how
it looks on the live site. However, there are a couple of cases where this is not
appropriate, and custom behaviour is required:
1) The page has custom routing logic that derives some additional required
args/kwargs to be passed to serve(). The routing mechanism is bypassed when
previewing, so there's no way to know what args we should pass. In such a case,
the page model needs to implement its own version of serve_preview.
2) The page has several different renderings that we would like to be able to see
when previewing - for example, a form page might have one rendering that displays
the form, and another rendering to display a landing page when the form is posted.
This can be done by setting a custom preview_modes list on the page model -
Wagtail will allow the user to specify one of those modes when previewing, and
pass the chosen mode_name to serve_preview so that the page model can decide how
to render it appropriately. (Page models that do not specify their own preview_modes
list will always receive an empty string as mode_name.)
Any templates rendered during this process should use the 'request' object passed
here - this ensures that request.user and other properties are set appropriately for
the wagtail user bar to be displayed. This request will always be a GET.
"""
request.is_preview = True
response = self.serve(request)
patch_cache_control(response, private=True)
return response
def get_cached_paths(self):
"""
This returns a list of paths to invalidate in a frontend cache
"""
return ['/']
def get_sitemap_urls(self, request=None):
return [
{
'location': self.get_full_url(request),
# fall back on latest_revision_created_at if last_published_at is null
# (for backwards compatibility from before last_published_at was added)
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
def get_static_site_paths(self):
"""
This is a generator of URL paths to feed into a static site generator
Override this if you would like to create static versions of subpages
"""
# Yield path for this page
yield '/'
# Yield paths for child pages
for child in self.get_children().live():
for path in child.specific.get_static_site_paths():
yield '/' + child.slug + path
def get_ancestors(self, inclusive=False):
"""
Returns a queryset of the current page's ancestors, starting at the root page
and descending to the parent, or to the current page itself if ``inclusive`` is true.
"""
return Page.objects.ancestor_of(self, inclusive)
def get_descendants(self, inclusive=False):
"""
Returns a queryset of all pages underneath the current page, any number of levels deep.
If ``inclusive`` is true, the current page itself is included in the queryset.
"""
return Page.objects.descendant_of(self, inclusive)
def get_siblings(self, inclusive=True):
"""
Returns a queryset of all other pages with the same parent as the current page.
If ``inclusive`` is true, the current page itself is included in the queryset.
"""
return Page.objects.sibling_of(self, inclusive)
def get_next_siblings(self, inclusive=False):
return self.get_siblings(inclusive).filter(path__gte=self.path).order_by('path')
def get_prev_siblings(self, inclusive=False):
return self.get_siblings(inclusive).filter(path__lte=self.path).order_by('-path')
def get_view_restrictions(self):
"""
Return a query set of all page view restrictions that apply to this page.
This checks the current page and all ancestor pages for page view restrictions.
If any of those pages are aliases, it will resolve them to their source pages
before querying PageViewRestrictions so alias pages use the same view restrictions
as their source page and they cannot have their own.
"""
page_ids_to_check = set()
def add_page_to_check_list(page):
# If the page is an alias, add the source page to the check list instead
if page.alias_of:
add_page_to_check_list(page.alias_of)
else:
page_ids_to_check.add(page.id)
# Check current page for view restrictions
add_page_to_check_list(self)
# Check each ancestor for view restrictions as well
for page in self.get_ancestors().only('alias_of'):
add_page_to_check_list(page)
return PageViewRestriction.objects.filter(page_id__in=page_ids_to_check)
password_required_template = getattr(settings, 'PASSWORD_REQUIRED_TEMPLATE', 'wagtailcore/password_required.html')
def serve_password_required_response(self, request, form, action_url):
"""
Serve a response indicating that the user has been denied access to view this page,
and must supply a password.
form = a Django form object containing the password input
(and zero or more hidden fields that also need to be output on the template)
action_url = URL that this form should be POSTed to
"""
context = self.get_context(request)
context['form'] = form
context['action_url'] = action_url
return TemplateResponse(request, self.password_required_template, context)
def with_content_json(self, content_json):
"""
Returns a new version of the page with field values updated to reflect changes
in the provided ``content_json`` (which usually comes from a previously-saved
page revision).
Certain field values are preserved in order to prevent errors if the returned
page is saved, such as ``id``, ``content_type`` and some tree-related values.
The following field values are also preserved, as they are considered to be
meaningful to the page as a whole, rather than to a specific revision:
* ``draft_title``
* ``live``
* ``has_unpublished_changes``
* ``owner``
* ``locked``
* ``locked_by``
* ``locked_at``
* ``latest_revision_created_at``
* ``first_published_at``
* ``alias_of``
* ``comments``
"""
obj = self.specific_class.from_json(content_json)
# These should definitely never change between revisions
obj.id = self.id
obj.pk = self.pk
obj.content_type = self.content_type
# Override possibly-outdated tree parameter fields
obj.path = self.path
obj.depth = self.depth
obj.numchild = self.numchild
# Update url_path to reflect potential slug changes, but maintining the page's
# existing tree position
obj.set_url_path(self.get_parent())
# Ensure other values that are meaningful for the page as a whole (rather than
# to a specific revision) are preserved
obj.draft_title = self.draft_title
obj.live = self.live
obj.has_unpublished_changes = self.has_unpublished_changes
obj.owner = self.owner
obj.locked = self.locked
obj.locked_by = self.locked_by
obj.locked_at = self.locked_at
obj.latest_revision_created_at = self.latest_revision_created_at
obj.first_published_at = self.first_published_at
obj.translation_key = self.translation_key
obj.locale = self.locale
obj.alias_of_id = self.alias_of_id
revision_comments = obj.comments
page_comments = self.comments.filter(resolved_at__isnull=True)
for comment in page_comments:
# attempt to retrieve the comment position from the revision's stored version
# of the comment
try:
revision_comment = revision_comments.get(id=comment.id)
comment.position = revision_comment.position
except Comment.DoesNotExist:
pass
obj.comments = page_comments
return obj
@property
def has_workflow(self):
"""Returns True if the page or an ancestor has an active workflow assigned, otherwise False"""
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return False
return self.get_ancestors(inclusive=True).filter(workflowpage__isnull=False).filter(workflowpage__workflow__active=True).exists()
def get_workflow(self):
"""Returns the active workflow assigned to the page or its nearest ancestor"""
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return None
if hasattr(self, 'workflowpage') and self.workflowpage.workflow.active:
return self.workflowpage.workflow
else:
try:
workflow = self.get_ancestors().filter(workflowpage__isnull=False).filter(workflowpage__workflow__active=True).order_by(
'-depth').first().workflowpage.workflow
except AttributeError:
workflow = None
return workflow
@property
def workflow_in_progress(self):
"""Returns True if a workflow is in progress on the current page, otherwise False"""
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return False
return WorkflowState.objects.filter(page=self, status=WorkflowState.STATUS_IN_PROGRESS).exists()
@property
def current_workflow_state(self):
"""Returns the in progress or needs changes workflow state on this page, if it exists"""
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return None
try:
return WorkflowState.objects.active().select_related("current_task_state__task").get(page=self)
except WorkflowState.DoesNotExist:
return
@property
def current_workflow_task_state(self):
"""Returns (specific class of) the current task state of the workflow on this page, if it exists"""
current_workflow_state = self.current_workflow_state
if current_workflow_state and current_workflow_state.status == WorkflowState.STATUS_IN_PROGRESS and current_workflow_state.current_task_state:
return current_workflow_state.current_task_state.specific
@property
def current_workflow_task(self):
"""Returns (specific class of) the current task in progress on this page, if it exists"""
current_workflow_task_state = self.current_workflow_task_state
if current_workflow_task_state:
return current_workflow_task_state.task.specific
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
unique_together = [("translation_key", "locale")]
class Orderable(models.Model):
sort_order = models.IntegerField(null=True, blank=True, editable=False)
sort_order_field = 'sort_order'
class Meta:
abstract = True
ordering = ['sort_order']
class SubmittedRevisionsManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(submitted_for_moderation=True)
class PageRevision(models.Model):
page = models.ForeignKey('Page', verbose_name=_('page'), related_name='revisions', on_delete=models.CASCADE)
submitted_for_moderation = models.BooleanField(
verbose_name=_('submitted for moderation'),
default=False,
db_index=True
)
created_at = models.DateTimeField(db_index=True, verbose_name=_('created at'))
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('user'), null=True, blank=True,
on_delete=models.SET_NULL
)
content_json = models.TextField(verbose_name=_('content JSON'))
approved_go_live_at = models.DateTimeField(
verbose_name=_('approved go live at'),
null=True,
blank=True,
db_index=True
)
objects = models.Manager()
submitted_revisions = SubmittedRevisionsManager()
def save(self, user=None, *args, **kwargs):
# Set default value for created_at to now
# We cannot use auto_now_add as that will override
# any value that is set before saving
if self.created_at is None:
self.created_at = timezone.now()
super().save(*args, **kwargs)
if self.submitted_for_moderation:
# ensure that all other revisions of this page have the 'submitted for moderation' flag unset
self.page.revisions.exclude(id=self.id).update(submitted_for_moderation=False)
if (
self.approved_go_live_at is None
and 'update_fields' in kwargs and 'approved_go_live_at' in kwargs['update_fields']
):
# Log scheduled revision publish cancellation
page = self.as_page_object()
# go_live_at = kwargs['update_fields'][]
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.schedule.cancel',
data={
'revision': {
'id': self.id,
'created': self.created_at.strftime("%d %b %Y %H:%M"),
'go_live_at': page.go_live_at.strftime("%d %b %Y %H:%M") if page.go_live_at else None,
}
},
user=user,
revision=self,
)
def as_page_object(self):
return self.page.specific.with_content_json(self.content_json)
def approve_moderation(self, user=None):
if self.submitted_for_moderation:
logger.info("Page moderation approved: \"%s\" id=%d revision_id=%d", self.page.title, self.page.id, self.id)
PageLogEntry.objects.log_action(
instance=self.as_page_object(),
action='wagtail.moderation.approve',
user=user,
revision=self,
)
self.publish()
def reject_moderation(self, user=None):
if self.submitted_for_moderation:
logger.info("Page moderation rejected: \"%s\" id=%d revision_id=%d", self.page.title, self.page.id, self.id)
PageLogEntry.objects.log_action(
instance=self.as_page_object(),
action='wagtail.moderation.reject',
user=user,
revision=self,
)
self.submitted_for_moderation = False
self.save(update_fields=['submitted_for_moderation'])
def is_latest_revision(self):
if self.id is None:
# special case: a revision without an ID is presumed to be newly-created and is thus
# newer than any revision that might exist in the database
return True
latest_revision = PageRevision.objects.filter(page_id=self.page_id).order_by('-created_at', '-id').first()
return (latest_revision == self)
def delete(self):
# Update revision_created fields for comments that reference the current revision, if applicable.
try:
next_revision = self.get_next()
except PageRevision.DoesNotExist:
next_revision = None
if next_revision:
# move comments created on this revision to the next revision, as they may well still apply if they're unresolved
self.created_comments.all().update(revision_created=next_revision)
return super().delete()
def publish(self, user=None, changed=True, log_action=True, previous_revision=None):
"""
Publishes or schedules revision for publishing.
:param user: the publishing user
:param changed: indicated whether content has changed
:param log_action:
flag for the logging action. Pass False to skip logging. Cannot pass an action string as the method
performs several actions: "publish", "revert" (and publish the reverted revision),
"schedule publishing with a live revision", "schedule revision reversal publishing, with a live revision",
"schedule publishing", "schedule revision reversal publishing"
:param previous_revision: indicates a revision reversal. Should be set to the previous revision instance
"""
page = self.as_page_object()
def log_scheduling_action(revision, user=None, changed=changed):
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.publish.schedule',
user=user,
data={
'revision': {
'id': revision.id,
'created': revision.created_at.strftime("%d %b %Y %H:%M"),
'go_live_at': page.go_live_at.strftime("%d %b %Y %H:%M"),
'has_live_version': page.live,
}
},
revision=revision,
content_changed=changed,
)
if page.go_live_at and page.go_live_at > timezone.now():
page.has_unpublished_changes = True
# Instead set the approved_go_live_at of this revision
self.approved_go_live_at = page.go_live_at
self.save()
# And clear the the approved_go_live_at of any other revisions
page.revisions.exclude(id=self.id).update(approved_go_live_at=None)
# if we are updating a currently live page skip the rest
if page.live_revision:
# Log scheduled publishing
if log_action:
log_scheduling_action(self, user, changed)
return
# if we have a go_live in the future don't make the page live
page.live = False
else:
page.live = True
# at this point, the page has unpublished changes if and only if there are newer revisions than this one
page.has_unpublished_changes = not self.is_latest_revision()
# If page goes live clear the approved_go_live_at of all revisions
page.revisions.update(approved_go_live_at=None)
page.expired = False # When a page is published it can't be expired
# Set first_published_at, last_published_at and live_revision
# if the page is being published now
if page.live:
now = timezone.now()
page.last_published_at = now
page.live_revision = self
if page.first_published_at is None:
page.first_published_at = now
if previous_revision:
previous_revision_page = previous_revision.as_page_object()
old_page_title = previous_revision_page.title if page.title != previous_revision_page.title else None
else:
try:
previous = self.get_previous()
except PageRevision.DoesNotExist:
previous = None
old_page_title = previous.page.title if previous and page.title != previous.page.title else None
else:
# Unset live_revision if the page is going live in the future
page.live_revision = None
page.save()
for comment in page.comments.all().only('position'):
comment.save(update_fields=['position'])
self.submitted_for_moderation = False
page.revisions.update(submitted_for_moderation=False)
workflow_state = page.current_workflow_state
if workflow_state and getattr(settings, 'WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH', True):
workflow_state.cancel(user=user)
if page.live:
page_published.send(sender=page.specific_class, instance=page.specific, revision=self)
# Update alias pages
page.update_aliases(revision=self, user=user, _content_json=self.content_json)
if log_action:
data = None
if previous_revision:
data = {
'revision': {
'id': previous_revision.id,
'created': previous_revision.created_at.strftime("%d %b %Y %H:%M")
}
}
if old_page_title:
data = data or {}
data['title'] = {
'old': old_page_title,
'new': page.title,
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.rename',
user=user,
data=data,
revision=self,
)
PageLogEntry.objects.log_action(
instance=page,
action=log_action if isinstance(log_action, str) else 'wagtail.publish',
user=user,
data=data,
revision=self,
content_changed=changed,
)
logger.info("Page published: \"%s\" id=%d revision_id=%d", page.title, page.id, self.id)
elif page.go_live_at:
logger.info(
"Page scheduled for publish: \"%s\" id=%d revision_id=%d go_live_at=%s",
page.title,
page.id,
self.id,
page.go_live_at.isoformat()
)
if log_action:
log_scheduling_action(self, user, changed)
def get_previous(self):
return self.get_previous_by_created_at(page=self.page)
def get_next(self):
return self.get_next_by_created_at(page=self.page)
def __str__(self):
return '"' + str(self.page) + '" at ' + str(self.created_at)
class Meta:
verbose_name = _('page revision')
verbose_name_plural = _('page revisions')
PAGE_PERMISSION_TYPES = [
('add', _("Add"), _("Add/edit pages you own")),
('edit', _("Edit"), _("Edit any page")),
('publish', _("Publish"), _("Publish any page")),
('bulk_delete', _("Bulk delete"), _("Delete pages with children")),
('lock', _("Lock"), _("Lock/unlock pages you've locked")),
('unlock', _("Unlock"), _("Unlock any page")),
]
PAGE_PERMISSION_TYPE_CHOICES = [
(identifier, long_label)
for identifier, short_label, long_label in PAGE_PERMISSION_TYPES
]
class GroupPagePermission(models.Model):
group = models.ForeignKey(Group, verbose_name=_('group'), related_name='page_permissions', on_delete=models.CASCADE)
page = models.ForeignKey('Page', verbose_name=_('page'), related_name='group_permissions', on_delete=models.CASCADE)
permission_type = models.CharField(
verbose_name=_('permission type'),
max_length=20,
choices=PAGE_PERMISSION_TYPE_CHOICES
)
class Meta:
unique_together = ('group', 'page', 'permission_type')
verbose_name = _('group page permission')
verbose_name_plural = _('group page permissions')
def __str__(self):
return "Group %d ('%s') has permission '%s' on page %d ('%s')" % (
self.group.id, self.group,
self.permission_type,
self.page.id, self.page
)
class UserPagePermissionsProxy:
"""Helper object that encapsulates all the page permission rules that this user has
across the page hierarchy."""
def __init__(self, user):
self.user = user
if user.is_active and not user.is_superuser:
self.permissions = GroupPagePermission.objects.filter(group__user=self.user).select_related('page')
def revisions_for_moderation(self):
"""Return a queryset of page revisions awaiting moderation that this user has publish permission on"""
# Deal with the trivial cases first...
if not self.user.is_active:
return PageRevision.objects.none()
if self.user.is_superuser:
return PageRevision.submitted_revisions.all()
# get the list of pages for which they have direct publish permission
# (i.e. they can publish any page within this subtree)
publishable_pages_paths = self.permissions.filter(
permission_type='publish'
).values_list('page__path', flat=True).distinct()
if not publishable_pages_paths:
return PageRevision.objects.none()
# compile a filter expression to apply to the PageRevision.submitted_revisions manager:
# return only those pages whose paths start with one of the publishable_pages paths
only_my_sections = Q(page__path__startswith=publishable_pages_paths[0])
for page_path in publishable_pages_paths[1:]:
only_my_sections = only_my_sections | Q(page__path__startswith=page_path)
# return the filtered queryset
return PageRevision.submitted_revisions.filter(only_my_sections)
def for_page(self, page):
"""Return a PagePermissionTester object that can be used to query whether this user has
permission to perform specific tasks on the given page"""
return PagePermissionTester(self, page)
def explorable_pages(self):
"""Return a queryset of pages that the user has access to view in the
explorer (e.g. add/edit/publish permission). Includes all pages with
specific group permissions and also the ancestors of those pages (in
order to enable navigation in the explorer)"""
# Deal with the trivial cases first...
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
explorable_pages = Page.objects.none()
# Creates a union queryset of all objects the user has access to add,
# edit and publish
for perm in self.permissions.filter(
Q(permission_type="add")
| Q(permission_type="edit")
| Q(permission_type="publish")
| Q(permission_type="lock")
):
explorable_pages |= Page.objects.descendant_of(
perm.page, inclusive=True
)
# For all pages with specific permissions, add their ancestors as
# explorable. This will allow deeply nested pages to be accessed in the
# explorer. For example, in the hierarchy A>B>C>D where the user has
# 'edit' access on D, they will be able to navigate to D without having
# explicit access to A, B or C.
page_permissions = Page.objects.filter(group_permissions__in=self.permissions)
for page in page_permissions:
explorable_pages |= page.get_ancestors()
# Remove unnecessary top-level ancestors that the user has no access to
fca_page = page_permissions.first_common_ancestor()
explorable_pages = explorable_pages.filter(path__startswith=fca_page.path)
return explorable_pages
def editable_pages(self):
"""Return a queryset of the pages that this user has permission to edit"""
# Deal with the trivial cases first...
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
editable_pages = Page.objects.none()
for perm in self.permissions.filter(permission_type='add'):
# user has edit permission on any subpage of perm.page
# (including perm.page itself) that is owned by them
editable_pages |= Page.objects.descendant_of(perm.page, inclusive=True).filter(owner=self.user)
for perm in self.permissions.filter(permission_type='edit'):
# user has edit permission on any subpage of perm.page
# (including perm.page itself) regardless of owner
editable_pages |= Page.objects.descendant_of(perm.page, inclusive=True)
return editable_pages
def can_edit_pages(self):
"""Return True if the user has permission to edit any pages"""
return self.editable_pages().exists()
def publishable_pages(self):
"""Return a queryset of the pages that this user has permission to publish"""
# Deal with the trivial cases first...
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
publishable_pages = Page.objects.none()
for perm in self.permissions.filter(permission_type='publish'):
# user has publish permission on any subpage of perm.page
# (including perm.page itself)
publishable_pages |= Page.objects.descendant_of(perm.page, inclusive=True)
return publishable_pages
def can_publish_pages(self):
"""Return True if the user has permission to publish any pages"""
return self.publishable_pages().exists()
def can_remove_locks(self):
"""Returns True if the user has permission to unlock pages they have not locked"""
if self.user.is_superuser:
return True
if not self.user.is_active:
return False
else:
return self.permissions.filter(permission_type='unlock').exists()
class PagePermissionTester:
def __init__(self, user_perms, page):
self.user = user_perms.user
self.user_perms = user_perms
self.page = page
self.page_is_root = page.depth == 1 # Equivalent to page.is_root()
if self.user.is_active and not self.user.is_superuser:
self.permissions = set(
perm.permission_type for perm in user_perms.permissions
if self.page.path.startswith(perm.page.path)
)
def user_has_lock(self):
return self.page.locked_by_id == self.user.pk
def page_locked(self):
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
if current_workflow_task.page_locked_for_user(self.page, self.user):
return True
if not self.page.locked:
# Page is not locked
return False
if getattr(settings, 'WAGTAILADMIN_GLOBAL_PAGE_EDIT_LOCK', False):
# All locks are global
return True
else:
# Locked only if the current user was not the one who locked the page
return not self.user_has_lock()
def can_add_subpage(self):
if not self.user.is_active:
return False
specific_class = self.page.specific_class
if specific_class is None or not specific_class.creatable_subpage_models():
return False
return self.user.is_superuser or ('add' in self.permissions)
def can_edit(self):
if not self.user.is_active:
return False
if self.page_is_root: # root node is not a page and can never be edited, even by superusers
return False
if self.user.is_superuser:
return True
if 'edit' in self.permissions:
return True
if 'add' in self.permissions and self.page.owner_id == self.user.pk:
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
if current_workflow_task.user_can_access_editor(self.page, self.user):
return True
return False
def can_delete(self, ignore_bulk=False):
if not self.user.is_active:
return False
if self.page_is_root: # root node is not a page and can never be deleted, even by superusers
return False
if self.user.is_superuser:
# superusers require no further checks
return True
# if the user does not have bulk_delete permission, they may only delete leaf pages
if 'bulk_delete' not in self.permissions and not self.page.is_leaf() and not ignore_bulk:
return False
if 'edit' in self.permissions:
# if the user does not have publish permission, we also need to confirm that there
# are no published pages here
if 'publish' not in self.permissions:
pages_to_delete = self.page.get_descendants(inclusive=True)
if pages_to_delete.live().exists():
return False
return True
elif 'add' in self.permissions:
pages_to_delete = self.page.get_descendants(inclusive=True)
if 'publish' in self.permissions:
# we don't care about live state, but all pages must be owned by this user
# (i.e. eliminating pages owned by this user must give us the empty set)
return not pages_to_delete.exclude(owner=self.user).exists()
else:
# all pages must be owned by this user and non-live
# (i.e. eliminating non-live pages owned by this user must give us the empty set)
return not pages_to_delete.exclude(live=False, owner=self.user).exists()
else:
return False
def can_unpublish(self):
if not self.user.is_active:
return False
if (not self.page.live) or self.page_is_root:
return False
if self.page_locked():
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_publish(self):
if not self.user.is_active:
return False
if self.page_is_root:
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_submit_for_moderation(self):
return not self.page_locked() and self.page.has_workflow and not self.page.workflow_in_progress
def can_set_view_restrictions(self):
return self.can_publish()
def can_unschedule(self):
return self.can_publish()
def can_lock(self):
if self.user.is_superuser:
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
return current_workflow_task.user_can_lock(self.page, self.user)
if 'lock' in self.permissions:
return True
return False
def can_unlock(self):
if self.user.is_superuser:
return True
if self.user_has_lock():
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
return current_workflow_task.user_can_unlock(self.page, self.user)
if 'unlock' in self.permissions:
return True
return False
def can_publish_subpage(self):
"""
Niggly special case for creating and publishing a page in one go.
Differs from can_publish in that we want to be able to publish subpages of root, but not
to be able to publish root itself. (Also, can_publish_subpage returns false if the page
does not allow subpages at all.)
"""
if not self.user.is_active:
return False
specific_class = self.page.specific_class
if specific_class is None or not specific_class.creatable_subpage_models():
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_reorder_children(self):
"""
Keep reorder permissions the same as publishing, since it immediately affects published pages
(and the use-cases for a non-admin needing to do it are fairly obscure...)
"""
return self.can_publish_subpage()
def can_move(self):
"""
Moving a page should be logically equivalent to deleting and re-adding it (and all its children).
As such, the permission test for 'can this be moved at all?' should be the same as for deletion.
(Further constraints will then apply on where it can be moved *to*.)
"""
return self.can_delete(ignore_bulk=True)
def can_copy(self):
return not self.page_is_root
def can_move_to(self, destination):
# reject the logically impossible cases first
if self.page == destination or destination.is_descendant_of(self.page):
return False
# reject moves that are forbidden by subpage_types / parent_page_types rules
# (these rules apply to superusers too)
if not self.page.specific.can_move_to(destination):
return False
# shortcut the trivial 'everything' / 'nothing' permissions
if not self.user.is_active:
return False
if self.user.is_superuser:
return True
# check that the page can be moved at all
if not self.can_move():
return False
# Inspect permissions on the destination
destination_perms = self.user_perms.for_page(destination)
# we always need at least add permission in the target
if 'add' not in destination_perms.permissions:
return False
if self.page.live or self.page.get_descendants().filter(live=True).exists():
# moving this page will entail publishing within the destination section
return ('publish' in destination_perms.permissions)
else:
# no publishing required, so the already-tested 'add' permission is sufficient
return True
def can_copy_to(self, destination, recursive=False):
# reject the logically impossible cases first
# recursive can't copy to the same tree otherwise it will be on infinite loop
if recursive and (self.page == destination or destination.is_descendant_of(self.page)):
return False
# reject inactive users early
if not self.user.is_active:
return False
# reject early if pages of this type cannot be created at the destination
if not self.page.specific_class.can_create_at(destination):
return False
# skip permission checking for super users
if self.user.is_superuser:
return True
# Inspect permissions on the destination
destination_perms = self.user_perms.for_page(destination)
if not destination.specific_class.creatable_subpage_models():
return False
# we always need at least add permission in the target
if 'add' not in destination_perms.permissions:
return False
return True
def can_view_revisions(self):
return not self.page_is_root
class PageViewRestriction(BaseViewRestriction):
page = models.ForeignKey(
'Page', verbose_name=_('page'), related_name='view_restrictions', on_delete=models.CASCADE
)
passed_view_restrictions_session_key = 'passed_page_view_restrictions'
class Meta:
verbose_name = _('page view restriction')
verbose_name_plural = _('page view restrictions')
def save(self, user=None, **kwargs):
"""
Custom save handler to include logging.
:param user: the user add/updating the view restriction
:param specific_instance: the specific model instance the restriction applies to
"""
specific_instance = self.page.specific
is_new = self.id is None
super().save(**kwargs)
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.create' if is_new else 'wagtail.view_restriction.edit',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
def delete(self, user=None, **kwargs):
"""
Custom delete handler to aid in logging
:param user: the user removing the view restriction
:param specific_instance: the specific model instance the restriction applies to
"""
specific_instance = self.page.specific
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.delete',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
return super().delete(**kwargs)
class WorkflowPage(models.Model):
page = models.OneToOneField(
'Page',
verbose_name=_('page'),
on_delete=models.CASCADE,
primary_key=True,
unique=True
)
workflow = models.ForeignKey(
'Workflow',
related_name='workflow_pages',
verbose_name=_('workflow'),
on_delete=models.CASCADE,
)
def get_pages(self):
"""
Returns a queryset of pages that are affected by this WorkflowPage link.
This includes all descendants of the page excluding any that have other WorkflowPages.
"""
descendant_pages = Page.objects.descendant_of(self.page, inclusive=True)
descendant_workflow_pages = WorkflowPage.objects.filter(page_id__in=descendant_pages.values_list('id', flat=True)).exclude(pk=self.pk)
for path, depth in descendant_workflow_pages.values_list('page__path', 'page__depth'):
descendant_pages = descendant_pages.exclude(path__startswith=path, depth__gte=depth)
return descendant_pages
class Meta:
verbose_name = _('workflow page')
verbose_name_plural = _('workflow pages')
class WorkflowTask(Orderable):
workflow = ParentalKey('Workflow', on_delete=models.CASCADE, verbose_name=_('workflow_tasks'),
related_name='workflow_tasks')
task = models.ForeignKey('Task', on_delete=models.CASCADE, verbose_name=_('task'), related_name='workflow_tasks',
limit_choices_to={'active': True})
class Meta(Orderable.Meta):
unique_together = [('workflow', 'task')]
verbose_name = _('workflow task order')
verbose_name_plural = _('workflow task orders')
class TaskManager(models.Manager):
def active(self):
return self.filter(active=True)
class Task(models.Model):
name = models.CharField(max_length=255, verbose_name=_('name'))
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='wagtail_tasks',
on_delete=models.CASCADE
)
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active tasks can be added to workflows. Deactivating a task does not remove it from existing workflows."))
objects = TaskManager()
admin_form_fields = ['name']
admin_form_readonly_on_edit_fields = ['name']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
# this model is being newly created
# rather than retrieved from the db;
if not self.content_type_id:
# set content type to correctly represent the model class
# that this was created as
self.content_type = ContentType.objects.get_for_model(self)
def __str__(self):
return self.name
@property
def workflows(self):
"""Returns all ``Workflow`` instances that use this task"""
return Workflow.objects.filter(workflow_tasks__task=self)
@property
def active_workflows(self):
"""Return a ``QuerySet``` of active workflows that this task is part of"""
return Workflow.objects.active().filter(workflow_tasks__task=self)
@classmethod
def get_verbose_name(cls):
"""
Returns the human-readable "verbose name" of this task model e.g "Group approval task".
"""
# This is similar to doing cls._meta.verbose_name.title()
# except this doesn't convert any characters to lowercase
return capfirst(cls._meta.verbose_name)
@cached_property
def specific(self):
"""
Return this Task in its most specific subclassed form.
"""
# the ContentType.objects manager keeps a cache, so this should potentially
# avoid a database lookup over doing self.content_type. I think.
content_type = ContentType.objects.get_for_id(self.content_type_id)
model_class = content_type.model_class()
if model_class is None:
# Cannot locate a model class for this content type. This might happen
# if the codebase and database are out of sync (e.g. the model exists
# on a different git branch and we haven't rolled back migrations before
# switching branches); if so, the best we can do is return the page
# unchanged.
return self
elif isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
else:
return content_type.get_object_for_this_type(id=self.id)
task_state_class = None
@classmethod
def get_task_state_class(self):
return self.task_state_class or TaskState
def start(self, workflow_state, user=None):
"""Start this task on the provided workflow state by creating an instance of TaskState"""
task_state = self.get_task_state_class()(workflow_state=workflow_state)
task_state.status = TaskState.STATUS_IN_PROGRESS
task_state.page_revision = workflow_state.page.get_latest_revision()
task_state.task = self
task_state.save()
task_submitted.send(sender=task_state.specific.__class__, instance=task_state.specific, user=user)
return task_state
@transaction.atomic
def on_action(self, task_state, user, action_name, **kwargs):
"""Performs an action on a task state determined by the ``action_name`` string passed"""
if action_name == 'approve':
task_state.approve(user=user, **kwargs)
elif action_name == 'reject':
task_state.reject(user=user, **kwargs)
def user_can_access_editor(self, page, user):
"""Returns True if a user who would not normally be able to access the editor for the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def page_locked_for_user(self, page, user):
"""Returns True if the page should be locked to a given user's edits. This can be used to prevent editing by non-reviewers."""
return False
def user_can_lock(self, page, user):
"""Returns True if a user who would not normally be able to lock the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def user_can_unlock(self, page, user):
"""Returns True if a user who would not normally be able to unlock the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def get_actions(self, page, user):
"""
Get the list of action strings (name, verbose_name, whether the action requires additional data - see
``get_form_for_action``) for actions the current user can perform for this task on the given page.
These strings should be the same as those able to be passed to ``on_action``
"""
return []
def get_form_for_action(self, action):
return TaskStateCommentForm
def get_template_for_action(self, action):
return ''
def get_task_states_user_can_moderate(self, user, **kwargs):
"""Returns a ``QuerySet`` of the task states the current user can moderate"""
return TaskState.objects.none()
@classmethod
def get_description(cls):
"""Returns the task description."""
return ''
@transaction.atomic
def deactivate(self, user=None):
"""Set ``active`` to False and cancel all in progress task states linked to this task"""
self.active = False
self.save()
in_progress_states = TaskState.objects.filter(task=self, status=TaskState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
class Meta:
verbose_name = _('task')
verbose_name_plural = _('tasks')
class WorkflowManager(models.Manager):
def active(self):
return self.filter(active=True)
class Workflow(ClusterableModel):
name = models.CharField(max_length=255, verbose_name=_('name'))
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active workflows can be added to pages. Deactivating a workflow does not remove it from existing pages."))
objects = WorkflowManager()
def __str__(self):
return self.name
@property
def tasks(self):
"""Returns all ``Task`` instances linked to this workflow"""
return Task.objects.filter(workflow_tasks__workflow=self).order_by('workflow_tasks__sort_order')
@transaction.atomic
def start(self, page, user):
"""Initiates a workflow by creating an instance of ``WorkflowState``"""
state = WorkflowState(page=page, workflow=self, status=WorkflowState.STATUS_IN_PROGRESS, requested_by=user)
state.save()
state.update(user=user)
workflow_submitted.send(sender=state.__class__, instance=state, user=user)
next_task_data = None
if state.current_task_state:
next_task_data = {
'id': state.current_task_state.task.id,
'title': state.current_task_state.task.name,
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.workflow.start',
data={
'workflow': {
'id': self.id,
'title': self.name,
'status': state.status,
'next': next_task_data,
'task_state_id': state.current_task_state.id if state.current_task_state else None,
}
},
revision=page.get_latest_revision(),
user=user,
)
return state
@transaction.atomic
def deactivate(self, user=None):
"""Sets the workflow as inactive, and cancels all in progress instances of ``WorkflowState`` linked to this workflow"""
self.active = False
in_progress_states = WorkflowState.objects.filter(workflow=self, status=WorkflowState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
WorkflowPage.objects.filter(workflow=self).delete()
self.save()
def all_pages(self):
"""
Returns a queryset of all the pages that this Workflow applies to.
"""
pages = Page.objects.none()
for workflow_page in self.workflow_pages.all():
pages |= workflow_page.get_pages()
return pages
class Meta:
verbose_name = _('workflow')
verbose_name_plural = _('workflows')
class GroupApprovalTask(Task):
groups = models.ManyToManyField(Group, verbose_name=_('groups'), help_text=_('Pages at this step in a workflow will be moderated or approved by these groups of users'))
admin_form_fields = Task.admin_form_fields + ['groups']
admin_form_widgets = {
'groups': forms.CheckboxSelectMultiple,
}
def start(self, workflow_state, user=None):
if workflow_state.page.locked_by:
# If the person who locked the page isn't in one of the groups, unlock the page
if not workflow_state.page.locked_by.groups.filter(id__in=self.groups.all()).exists():
workflow_state.page.locked = False
workflow_state.page.locked_by = None
workflow_state.page.locked_at = None
workflow_state.page.save(update_fields=['locked', 'locked_by', 'locked_at'])
return super().start(workflow_state, user=user)
def user_can_access_editor(self, page, user):
return self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser
def page_locked_for_user(self, page, user):
return not (self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser)
def user_can_lock(self, page, user):
return self.groups.filter(id__in=user.groups.all()).exists()
def user_can_unlock(self, page, user):
return False
def get_actions(self, page, user):
if self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser:
return [
('reject', _("Request changes"), True),
('approve', _("Approve"), False),
('approve', _("Approve with comment"), True),
]
return []
def get_task_states_user_can_moderate(self, user, **kwargs):
if self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser:
return TaskState.objects.filter(status=TaskState.STATUS_IN_PROGRESS, task=self.task_ptr)
else:
return TaskState.objects.none()
@classmethod
def get_description(cls):
return _("Members of the chosen Wagtail Groups can approve this task")
class Meta:
verbose_name = _('Group approval task')
verbose_name_plural = _('Group approval tasks')
class WorkflowStateManager(models.Manager):
def active(self):
"""
Filters to only STATUS_IN_PROGRESS and STATUS_NEEDS_CHANGES WorkflowStates
"""
return self.filter(Q(status=WorkflowState.STATUS_IN_PROGRESS) | Q(status=WorkflowState.STATUS_NEEDS_CHANGES))
class WorkflowState(models.Model):
"""Tracks the status of a started Workflow on a Page."""
STATUS_IN_PROGRESS = 'in_progress'
STATUS_APPROVED = 'approved'
STATUS_NEEDS_CHANGES = 'needs_changes'
STATUS_CANCELLED = 'cancelled'
STATUS_CHOICES = (
(STATUS_IN_PROGRESS, _("In progress")),
(STATUS_APPROVED, _("Approved")),
(STATUS_NEEDS_CHANGES, _("Needs changes")),
(STATUS_CANCELLED, _("Cancelled")),
)
page = models.ForeignKey('Page', on_delete=models.CASCADE, verbose_name=_("page"), related_name='workflow_states')
workflow = models.ForeignKey('Workflow', on_delete=models.CASCADE, verbose_name=_('workflow'), related_name='workflow_states')
status = models.fields.CharField(choices=STATUS_CHOICES, verbose_name=_("status"), max_length=50, default=STATUS_IN_PROGRESS)
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("created at"))
requested_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('requested by'),
null=True,
blank=True,
editable=True,
on_delete=models.SET_NULL,
related_name='requested_workflows')
current_task_state = models.OneToOneField('TaskState', on_delete=models.SET_NULL, null=True, blank=True,
verbose_name=_("current task state"))
# allows a custom function to be called on finishing the Workflow successfully.
on_finish = import_string(getattr(settings, 'WAGTAIL_FINISH_WORKFLOW_ACTION', 'wagtail.core.workflows.publish_workflow_state'))
objects = WorkflowStateManager()
def clean(self):
super().clean()
if self.status in (self.STATUS_IN_PROGRESS, self.STATUS_NEEDS_CHANGES):
# The unique constraint is conditional, and so not supported on the MySQL backend - so an additional check is done here
if WorkflowState.objects.active().filter(page=self.page).exclude(pk=self.pk).exists():
raise ValidationError(_('There may only be one in progress or needs changes workflow state per page.'))
def save(self, *args, **kwargs):
self.full_clean()
return super().save(*args, **kwargs)
def __str__(self):
return _("Workflow '{0}' on Page '{1}': {2}").format(self.workflow, self.page, self.status)
def resume(self, user=None):
"""Put a STATUS_NEEDS_CHANGES workflow state back into STATUS_IN_PROGRESS, and restart the current task"""
if self.status != self.STATUS_NEEDS_CHANGES:
raise PermissionDenied
revision = self.current_task_state.page_revision
current_task_state = self.current_task_state
self.current_task_state = None
self.status = self.STATUS_IN_PROGRESS
self.save()
PageLogEntry.objects.log_action(
instance=self.page.specific,
action='wagtail.workflow.resume',
data={
'workflow': {
'id': self.workflow_id,
'title': self.workflow.name,
'status': self.status,
'task_state_id': current_task_state.id,
'task': {
'id': current_task_state.task.id,
'title': current_task_state.task.name,
},
}
},
revision=revision,
user=user,
)
return self.update(user=user, next_task=current_task_state.task)
def user_can_cancel(self, user):
if self.page.locked and self.page.locked_by != user:
return False
return user == self.requested_by or user == self.page.owner or (self.current_task_state and self.current_task_state.status == self.current_task_state.STATUS_IN_PROGRESS and 'approve' in [action[0] for action in self.current_task_state.task.get_actions(self.page, user)])
def update(self, user=None, next_task=None):
"""Checks the status of the current task, and progresses (or ends) the workflow if appropriate. If the workflow progresses,
next_task will be used to start a specific task next if provided."""
if self.status != self.STATUS_IN_PROGRESS:
# Updating a completed or cancelled workflow should have no effect
return
try:
current_status = self.current_task_state.status
except AttributeError:
current_status = None
if current_status == TaskState.STATUS_REJECTED:
self.status = self.STATUS_NEEDS_CHANGES
self.save()
workflow_rejected.send(sender=self.__class__, instance=self, user=user)
else:
if not next_task:
next_task = self.get_next_task()
if next_task:
if (not self.current_task_state) or self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS:
# if not on a task, or the next task to move to is not the current task (ie current task's status is
# not STATUS_IN_PROGRESS), move to the next task
self.current_task_state = next_task.specific.start(self, user=user)
self.save()
# if task has auto-approved, update the workflow again
if self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS:
self.update(user=user)
# otherwise, continue on the current task
else:
# if there is no uncompleted task, finish the workflow.
self.finish(user=user)
@property
def successful_task_states(self):
successful_task_states = self.task_states.filter(
Q(status=TaskState.STATUS_APPROVED) | Q(status=TaskState.STATUS_SKIPPED)
)
if getattr(settings, "WAGTAIL_WORKFLOW_REQUIRE_REAPPROVAL_ON_EDIT", False):
successful_task_states = successful_task_states.filter(page_revision=self.page.get_latest_revision())
return successful_task_states
def get_next_task(self):
"""Returns the next active task, which has not been either approved or skipped"""
return (
Task.objects.filter(workflow_tasks__workflow=self.workflow, active=True)
.exclude(
task_states__in=self.successful_task_states
).order_by('workflow_tasks__sort_order').first()
)
def cancel(self, user=None):
"""Cancels the workflow state"""
if self.status not in (self.STATUS_IN_PROGRESS, self.STATUS_NEEDS_CHANGES):
raise PermissionDenied
self.status = self.STATUS_CANCELLED
self.save()
PageLogEntry.objects.log_action(
instance=self.page.specific,
action='wagtail.workflow.cancel',
data={
'workflow': {
'id': self.workflow_id,
'title': self.workflow.name,
'status': self.status,
'task_state_id': self.current_task_state.id,
'task': {
'id': self.current_task_state.task.id,
'title': self.current_task_state.task.name,
},
}
},
revision=self.current_task_state.page_revision,
user=user,
)
for state in self.task_states.filter(status=TaskState.STATUS_IN_PROGRESS):
# Cancel all in progress task states
state.specific.cancel(user=user)
workflow_cancelled.send(sender=self.__class__, instance=self, user=user)
@transaction.atomic
def finish(self, user=None):
"""Finishes a successful in progress workflow, marking it as approved and performing the ``on_finish`` action"""
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_APPROVED
self.save()
self.on_finish(user=user)
workflow_approved.send(sender=self.__class__, instance=self, user=user)
def copy_approved_task_states_to_revision(self, revision):
"""This creates copies of previously approved task states with page_revision set to a different revision."""
approved_states = TaskState.objects.filter(workflow_state=self, status=TaskState.STATUS_APPROVED)
for state in approved_states:
state.copy(update_attrs={'page_revision': revision})
def revisions(self):
"""Returns all page revisions associated with task states linked to the current workflow state"""
return PageRevision.objects.filter(
page_id=self.page_id,
id__in=self.task_states.values_list('page_revision_id', flat=True)
).defer('content_json')
def _get_applicable_task_states(self):
"""Returns the set of task states whose status applies to the current revision"""
task_states = TaskState.objects.filter(workflow_state_id=self.id)
# If WAGTAIL_WORKFLOW_REQUIRE_REAPPROVAL_ON_EDIT=True, this is only task states created on the current revision
if getattr(settings, "WAGTAIL_WORKFLOW_REQUIRE_REAPPROVAL_ON_EDIT", False):
latest_revision_id = self.revisions().order_by('-created_at', '-id').values_list('id', flat=True).first()
task_states = task_states.filter(page_revision_id=latest_revision_id)
return task_states
def all_tasks_with_status(self):
"""
Returns a list of Task objects that are linked with this workflow state's
workflow. The status of that task in this workflow state is annotated in the
`.status` field. And a displayable version of that status is annotated in the
`.status_display` field.
This is different to querying TaskState as it also returns tasks that haven't
been started yet (so won't have a TaskState).
"""
# Get the set of task states whose status applies to the current revision
task_states = self._get_applicable_task_states()
tasks = list(
self.workflow.tasks.annotate(
status=Subquery(
task_states.filter(
task_id=OuterRef('id'),
).order_by(
'-started_at', '-id'
).values('status')[:1]
),
)
)
# Manually annotate status_display
status_choices = dict(TaskState.STATUS_CHOICES)
for task in tasks:
task.status_display = status_choices.get(task.status, _("Not started"))
return tasks
def all_tasks_with_state(self):
"""
Returns a list of Task objects that are linked with this WorkflowState's
workflow, and have the latest task state.
In a "Submit for moderation -> reject at step 1 -> resubmit -> accept" workflow, this ensures
the task list reflects the accept, rather than the reject.
"""
task_states = self._get_applicable_task_states()
tasks = list(
self.workflow.tasks.annotate(
task_state_id=Subquery(
task_states.filter(
task_id=OuterRef('id'),
).order_by(
'-started_at', '-id'
).values('id')[:1]
),
)
)
task_states = {task_state.id: task_state for task_state in task_states}
# Manually annotate task_state
for task in tasks:
task.task_state = task_states.get(task.task_state_id)
return tasks
@property
def is_active(self):
return self.status not in [self.STATUS_APPROVED, self.STATUS_CANCELLED]
@property
def is_at_final_task(self):
"""Returns the next active task, which has not been either approved or skipped"""
last_task = Task.objects.filter(workflow_tasks__workflow=self.workflow, active=True)\
.exclude(task_states__in=self.successful_task_states)\
.order_by('workflow_tasks__sort_order').last()
return self.get_next_task() == last_task
class Meta:
verbose_name = _('Workflow state')
verbose_name_plural = _('Workflow states')
# prevent multiple STATUS_IN_PROGRESS/STATUS_NEEDS_CHANGES workflows for the same page. This is only supported by specific databases (e.g. Postgres, SQL Server), so is checked additionally on save.
constraints = [
models.UniqueConstraint(fields=['page'], condition=Q(status__in=('in_progress', 'needs_changes')), name='unique_in_progress_workflow')
]
class TaskStateManager(models.Manager):
def reviewable_by(self, user):
tasks = Task.objects.filter(active=True)
states = TaskState.objects.none()
for task in tasks:
states = states | task.specific.get_task_states_user_can_moderate(user=user)
return states
class TaskState(models.Model):
"""Tracks the status of a given Task for a particular page revision."""
STATUS_IN_PROGRESS = 'in_progress'
STATUS_APPROVED = 'approved'
STATUS_REJECTED = 'rejected'
STATUS_SKIPPED = 'skipped'
STATUS_CANCELLED = 'cancelled'
STATUS_CHOICES = (
(STATUS_IN_PROGRESS, _("In progress")),
(STATUS_APPROVED, _("Approved")),
(STATUS_REJECTED, _("Rejected")),
(STATUS_SKIPPED, _("Skipped")),
(STATUS_CANCELLED, _("Cancelled")),
)
workflow_state = models.ForeignKey('WorkflowState', on_delete=models.CASCADE, verbose_name=_('workflow state'), related_name='task_states')
page_revision = models.ForeignKey('PageRevision', on_delete=models.CASCADE, verbose_name=_('page revision'), related_name='task_states')
task = models.ForeignKey('Task', on_delete=models.CASCADE, verbose_name=_('task'), related_name='task_states')
status = models.fields.CharField(choices=STATUS_CHOICES, verbose_name=_("status"), max_length=50, default=STATUS_IN_PROGRESS)
started_at = models.DateTimeField(verbose_name=_('started at'), auto_now_add=True)
finished_at = models.DateTimeField(verbose_name=_('finished at'), blank=True, null=True)
finished_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('finished by'),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='finished_task_states'
)
comment = models.TextField(blank=True)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='wagtail_task_states',
on_delete=models.CASCADE
)
exclude_fields_in_copy = []
default_exclude_fields_in_copy = ['id']
objects = TaskStateManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
# this model is being newly created
# rather than retrieved from the db;
if not self.content_type_id:
# set content type to correctly represent the model class
# that this was created as
self.content_type = ContentType.objects.get_for_model(self)
def __str__(self):
return _("Task '{0}' on Page Revision '{1}': {2}").format(self.task, self.page_revision, self.status)
@cached_property
def specific(self):
"""
Return this TaskState in its most specific subclassed form.
"""
# the ContentType.objects manager keeps a cache, so this should potentially
# avoid a database lookup over doing self.content_type. I think.
content_type = ContentType.objects.get_for_id(self.content_type_id)
model_class = content_type.model_class()
if model_class is None:
# Cannot locate a model class for this content type. This might happen
# if the codebase and database are out of sync (e.g. the model exists
# on a different git branch and we haven't rolled back migrations before
# switching branches); if so, the best we can do is return the page
# unchanged.
return self
elif isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
else:
return content_type.get_object_for_this_type(id=self.id)
@transaction.atomic
def approve(self, user=None, update=True, comment=''):
"""Approve the task state and update the workflow state"""
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_APPROVED
self.finished_at = timezone.now()
self.finished_by = user
self.comment = comment
self.save()
self.log_state_change_action(user, 'approve')
if update:
self.workflow_state.update(user=user)
task_approved.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
@transaction.atomic
def reject(self, user=None, update=True, comment=''):
"""Reject the task state and update the workflow state"""
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_REJECTED
self.finished_at = timezone.now()
self.finished_by = user
self.comment = comment
self.save()
self.log_state_change_action(user, 'reject')
if update:
self.workflow_state.update(user=user)
task_rejected.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
@cached_property
def task_type_started_at(self):
"""Finds the first chronological started_at for successive TaskStates - ie started_at if the task had not been restarted"""
task_states = TaskState.objects.filter(workflow_state=self.workflow_state).order_by('-started_at').select_related('task')
started_at = None
for task_state in task_states:
if task_state.task == self.task:
started_at = task_state.started_at
elif started_at:
break
return started_at
@transaction.atomic
def cancel(self, user=None, resume=False, comment=''):
"""Cancel the task state and update the workflow state. If ``resume`` is set to True, then upon update the workflow state
is passed the current task as ``next_task``, causing it to start a new task state on the current task if possible"""
self.status = self.STATUS_CANCELLED
self.finished_at = timezone.now()
self.comment = comment
self.finished_by = user
self.save()
if resume:
self.workflow_state.update(user=user, next_task=self.task.specific)
else:
self.workflow_state.update(user=user)
task_cancelled.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
def copy(self, update_attrs=None, exclude_fields=None):
"""Copy this task state, excluding the attributes in the ``exclude_fields`` list and updating any attributes to values
specified in the ``update_attrs`` dictionary of ``attribute``: ``new value`` pairs"""
exclude_fields = self.default_exclude_fields_in_copy + self.exclude_fields_in_copy + (exclude_fields or [])
instance, child_object_map = _copy(self.specific, exclude_fields, update_attrs)
instance.save()
_copy_m2m_relations(self, instance, exclude_fields=exclude_fields)
return instance
def get_comment(self):
"""
Returns a string that is displayed in workflow history.
This could be a comment by the reviewer, or generated.
Use mark_safe to return HTML.
"""
return self.comment
def log_state_change_action(self, user, action):
"""Log the approval/rejection action"""
page = self.page_revision.as_page_object()
next_task = self.workflow_state.get_next_task()
next_task_data = None
if next_task:
next_task_data = {
'id': next_task.id,
'title': next_task.name
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.workflow.{}'.format(action),
user=user,
data={
'workflow': {
'id': self.workflow_state.workflow.id,
'title': self.workflow_state.workflow.name,
'status': self.status,
'task_state_id': self.id,
'task': {
'id': self.task.id,
'title': self.task.name,
},
'next': next_task_data,
},
'comment': self.get_comment()
},
revision=self.page_revision
)
class Meta:
verbose_name = _('Task state')
verbose_name_plural = _('Task states')
class PageLogEntryManager(BaseLogEntryManager):
def get_instance_title(self, instance):
return instance.specific_deferred.get_admin_display_title()
def log_action(self, instance, action, **kwargs):
kwargs.update(page=instance)
return super().log_action(instance, action, **kwargs)
class PageLogEntry(BaseLogEntry):
page = models.ForeignKey(
'wagtailcore.Page',
on_delete=models.DO_NOTHING,
db_constraint=False,
related_name='+'
)
# Pointer to a specific page revision
revision = models.ForeignKey(
'wagtailcore.PageRevision',
null=True,
blank=True,
on_delete=models.DO_NOTHING,
db_constraint=False,
related_name='+',
)
objects = PageLogEntryManager()
action_registry = page_log_action_registry
class Meta:
ordering = ['-timestamp', '-id']
verbose_name = _('page log entry')
verbose_name_plural = _('page log entries')
def __str__(self):
return "PageLogEntry %d: '%s' on '%s' with id %s" % (
self.pk, self.action, self.object_verbose_name(), self.page_id
)
@cached_property
def object_id(self):
return self.page_id
class Comment(ClusterableModel):
"""
A comment on a field, or a field within a streamfield block
"""
page = ParentalKey(Page, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
contentpath = models.TextField()
# This stores the field or field within a streamfield block that the comment is applied on, in the form: 'field', or 'field.block_id.field'
# This must be unchanging across all revisions, so we will not support (current-format) ListBlock or the contents of InlinePanels initially.
position = models.TextField(blank=True)
# This stores the position within a field, to be interpreted by the field's frontend widget. It may change between revisions
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
revision_created = models.ForeignKey(PageRevision, on_delete=models.CASCADE, related_name='created_comments', null=True, blank=True)
resolved_at = models.DateTimeField(null=True, blank=True)
resolved_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='comments_resolved',
null=True,
blank=True
)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "Comment on Page '{0}', left by {1}: '{2}'".format(self.page, self.user, self.text)
def save(self, update_position=False, **kwargs):
# Don't save the position unless specifically instructed to, as the position will normally be retrieved from the revision
update_fields = kwargs.pop('update_fields', None)
if not update_position and (not update_fields or 'position' not in update_fields):
if self.id:
# The instance is already saved; we can use `update_fields`
update_fields = update_fields if update_fields else self._meta.get_fields()
update_fields = [field.name for field in update_fields if field.name not in {'position', 'id'}]
else:
# This is a new instance, we have to preserve and then restore the position via a variable
position = self.position
result = super().save(**kwargs)
self.position = position
return result
return super().save(update_fields=update_fields, **kwargs)
def _log(self, action, page_revision=None, user=None):
PageLogEntry.objects.log_action(
instance=self.page,
action=action,
user=user,
revision=page_revision,
data={
'comment': {
'id': self.pk,
'contentpath': self.contentpath,
'text': self.text,
}
}
)
def log_create(self, **kwargs):
self._log('wagtail.comments.create', **kwargs)
def log_edit(self, **kwargs):
self._log('wagtail.comments.edit', **kwargs)
def log_resolve(self, **kwargs):
self._log('wagtail.comments.resolve', **kwargs)
def log_delete(self, **kwargs):
self._log('wagtail.comments.delete', **kwargs)
class CommentReply(models.Model):
comment = ParentalKey(Comment, on_delete=models.CASCADE, related_name='replies')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comment_replies')
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('comment reply')
verbose_name_plural = _('comment replies')
def __str__(self):
return "CommentReply left by '{0}': '{1}'".format(self.user, self.text)
def _log(self, action, page_revision=None, user=None):
PageLogEntry.objects.log_action(
instance=self.comment.page,
action=action,
user=user,
revision=page_revision,
data={
'comment': {
'id': self.comment.pk,
'contentpath': self.comment.contentpath,
'text': self.comment.text,
},
'reply': {
'id': self.pk,
'text': self.text,
}
}
)
def log_create(self, **kwargs):
self._log('wagtail.comments.create_reply', **kwargs)
def log_edit(self, **kwargs):
self._log('wagtail.comments.edit_reply', **kwargs)
def log_delete(self, **kwargs):
self._log('wagtail.comments.delete_reply', **kwargs)
class PageSubscription(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='page_subscriptions')
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='subscribers')
comment_notifications = models.BooleanField()
class Meta:
unique_together = [
('page', 'user'),
]
| 40.163229 | 278 | 0.631461 |
import functools
import json
import logging
import uuid
from io import StringIO
from urllib.parse import urlparse
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.db import migrations, models, transaction
from django.db.models import DEFERRED, Q, Value
from django.db.models.expressions import OuterRef, Subquery
from django.db.models.functions import Concat, Substr
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.http import Http404
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import timezone, translation
from django.utils.cache import patch_cache_control
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.text import capfirst, slugify
from django.utils.translation import gettext_lazy as _
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel, get_all_child_relations
from treebeard.mp_tree import MP_Node
from wagtail.core.fields import StreamField
from wagtail.core.forms import TaskStateCommentForm
from wagtail.core.log_actions import page_log_action_registry
from wagtail.core.query import PageQuerySet
from wagtail.core.signals import (
page_published, page_unpublished, post_page_move, pre_page_move, task_approved, task_cancelled,
task_rejected, task_submitted, workflow_approved, workflow_cancelled, workflow_rejected,
workflow_submitted)
from wagtail.core.treebeard import TreebeardPathFixMixin
from wagtail.core.url_routing import RouteResult
from wagtail.core.utils import (
WAGTAIL_APPEND_SLASH, camelcase_to_underscore, find_available_slug, get_content_languages,
get_supported_content_language_variant, resolve_model_string)
from wagtail.search import index
from .audit_log import BaseLogEntry, BaseLogEntryManager, LogEntryQuerySet
from .collections import (
BaseCollectionManager, Collection, CollectionManager, CollectionMember,
CollectionViewRestriction, GroupCollectionPermission, GroupCollectionPermissionManager,
get_root_collection_id)
from .sites import Site, SiteManager, SiteRootPath
from .view_restrictions import BaseViewRestriction
logger = logging.getLogger('wagtail.core')
PAGE_TEMPLATE_VAR = 'page'
def _extract_field_data(source, exclude_fields=None):
exclude_fields = exclude_fields or []
data_dict = {}
for field in source._meta.get_fields():
if field.name in exclude_fields:
continue
if field.auto_created:
continue
if field.many_to_many:
if isinstance(field, ParentalManyToManyField):
parental_field = getattr(source, field.name)
if hasattr(parental_field, 'all'):
values = parental_field.all()
if values:
data_dict[field.name] = values
continue
if isinstance(field, models.OneToOneField) and field.remote_field.parent_link:
continue
if isinstance(field, models.ForeignKey):
data_dict[field.name] = None
data_dict[field.attname] = getattr(source, field.attname)
else:
data_dict[field.name] = getattr(source, field.name)
return data_dict
def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
if field.many_to_many and field.name not in exclude_fields and not field.auto_created and not isinstance(field, ParentalManyToManyField):
try:
through_model_parental_links = [field for field in field.through._meta.get_fields() if isinstance(field, ParentalKey) and issubclass(source.__class__, field.related_model)]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value)
def _copy(source, exclude_fields=None, update_attrs=None):
data_dict = _extract_field_data(source, exclude_fields=exclude_fields)
target = source.__class__(**data_dict)
if update_attrs:
for field, value in update_attrs.items():
if field not in data_dict:
continue
setattr(target, field, value)
if isinstance(source, ClusterableModel):
child_object_map = source.copy_all_child_relations(target, exclude=exclude_fields)
else:
child_object_map = {}
return target, child_object_map
def pk(obj):
if isinstance(obj, models.Model):
return obj.pk
else:
return obj
class LocaleManager(models.Manager):
def get_for_language(self, language_code):
return self.get(language_code=get_supported_content_language_variant(language_code))
class Locale(models.Model):
language_code = models.CharField(max_length=100, unique=True)
objects = LocaleManager()
all_objects = models.Manager()
class Meta:
ordering = [
"language_code",
]
@classmethod
def get_default(cls):
return cls.objects.get_for_language(settings.LANGUAGE_CODE)
@classmethod
def get_active(cls):
try:
return cls.objects.get_for_language(translation.get_language())
except (cls.DoesNotExist, LookupError):
return cls.get_default()
@transaction.atomic
def delete(self, *args, **kwargs):
root_page_with_this_locale = Page.objects.filter(depth=1, locale=self)
if root_page_with_this_locale.exists():
# Select the default locale, if one exists and isn't the one being deleted
try:
new_locale = Locale.get_default()
default_locale_is_ok = (new_locale != self)
except (Locale.DoesNotExist, LookupError):
default_locale_is_ok = False
if not default_locale_is_ok:
new_locale = Locale.all_objects.exclude(pk=self.pk).first()
root_page_with_this_locale.update(locale=new_locale)
return super().delete(*args, **kwargs)
def language_code_is_valid(self):
return self.language_code in get_content_languages()
def get_display_name(self):
return get_content_languages().get(self.language_code)
def __str__(self):
return force_str(self.get_display_name() or self.language_code)
class TranslatableMixin(models.Model):
translation_key = models.UUIDField(default=uuid.uuid4, editable=False)
locale = models.ForeignKey(Locale, on_delete=models.PROTECT, related_name="+", editable=False)
class Meta:
abstract = True
unique_together = [("translation_key", "locale")]
@classmethod
def check(cls, **kwargs):
errors = super(TranslatableMixin, cls).check(**kwargs)
is_translation_model = cls.get_translation_model() is cls
if is_translation_model and ("translation_key", "locale") not in cls._meta.unique_together:
errors.append(
checks.Error(
"{0}.{1} is missing a unique_together constraint for the translation key and locale fields"
.format(cls._meta.app_label, cls.__name__),
hint="Add ('translation_key', 'locale') to {}.Meta.unique_together".format(cls.__name__),
obj=cls,
id='wagtailcore.E003',
)
)
return errors
@property
def localized(self):
try:
locale = Locale.get_active()
except (LookupError, Locale.DoesNotExist):
return self
if locale.id == self.locale_id:
return self
return self.get_translation_or_none(locale) or self
def get_translations(self, inclusive=False):
translations = self.__class__.objects.filter(
translation_key=self.translation_key
)
if inclusive is False:
translations = translations.exclude(id=self.id)
return translations
def get_translation(self, locale):
return self.get_translations(inclusive=True).get(locale_id=pk(locale))
def get_translation_or_none(self, locale):
try:
return self.get_translation(locale)
except self.__class__.DoesNotExist:
return None
def has_translation(self, locale):
return self.get_translations(inclusive=True).filter(locale_id=pk(locale)).exists()
def copy_for_translation(self, locale):
translated, child_object_map = _copy(self)
translated.locale = locale
for (child_relation, old_pk), child_object in child_object_map.items():
if isinstance(child_object, TranslatableMixin):
child_object.locale = locale
return translated
def get_default_locale(self):
parental_keys = [
field
for field in self._meta.get_fields()
if isinstance(field, ParentalKey)
and issubclass(field.related_model, TranslatableMixin)
]
if parental_keys:
parent_id = parental_keys[0].value_from_object(self)
return (
parental_keys[0]
.related_model.objects.defer().select_related("locale")
.get(id=parent_id)
.locale
)
return Locale.get_default()
@classmethod
def get_translation_model(cls):
return cls._meta.get_field("locale").model
def bootstrap_translatable_model(model, locale):
for instance in (
model.objects.filter(translation_key__isnull=True).defer().iterator()
):
instance.translation_key = uuid.uuid4()
instance.locale = locale
instance.save(update_fields=["translation_key", "locale"])
class BootstrapTranslatableModel(migrations.RunPython):
def __init__(self, model_string, language_code=None):
if language_code is None:
language_code = get_supported_content_language_variant(settings.LANGUAGE_CODE)
def forwards(apps, schema_editor):
model = apps.get_model(model_string)
Locale = apps.get_model("wagtailcore.Locale")
locale = Locale.objects.get(language_code=language_code)
bootstrap_translatable_model(model, locale)
def backwards(apps, schema_editor):
pass
super().__init__(forwards, backwards)
class ParentNotTranslatedError(Exception):
pass
class BootstrapTranslatableMixin(TranslatableMixin):
translation_key = models.UUIDField(null=True, editable=False)
locale = models.ForeignKey(
Locale, on_delete=models.PROTECT, null=True, related_name="+", editable=False
)
@classmethod
def check(cls, **kwargs):
return super(TranslatableMixin, cls).check(**kwargs)
class Meta:
abstract = True
def get_translatable_models(include_subclasses=False):
translatable_models = [
model
for model in apps.get_models()
if issubclass(model, TranslatableMixin) and not model._meta.abstract
]
if include_subclasses is False:
root_translatable_models = set()
for model in translatable_models:
root_translatable_models.add(model.get_translation_model())
translatable_models = [
model for model in translatable_models if model in root_translatable_models
]
return translatable_models
@receiver(pre_save)
def set_locale_on_new_instance(sender, instance, **kwargs):
if not isinstance(instance, TranslatableMixin):
return
if instance.locale_id is not None:
return
if kwargs["raw"]:
instance.locale = Locale.get_default()
return
instance.locale = instance.get_default_locale()
PAGE_MODEL_CLASSES = []
def get_page_models():
return PAGE_MODEL_CLASSES
def get_default_page_content_type():
return ContentType.objects.get_for_model(Page)
@functools.lru_cache(maxsize=None)
def get_streamfield_names(model_class):
return tuple(
field.name for field in model_class._meta.concrete_fields
if isinstance(field, StreamField)
)
class BasePageManager(models.Manager):
def get_queryset(self):
return self._queryset_class(self.model).order_by('path')
PageManager = BasePageManager.from_queryset(PageQuerySet)
class PageBase(models.base.ModelBase):
def __init__(cls, name, bases, dct):
super(PageBase, cls).__init__(name, bases, dct)
if 'template' not in dct:
cls.template = "%s/%s.html" % (cls._meta.app_label, camelcase_to_underscore(name))
if 'ajax_template' not in dct:
cls.ajax_template = None
cls._clean_subpage_models = None
cls._clean_parent_page_models = None
if 'is_creatable' not in dct:
cls.is_creatable = not cls._meta.abstract
if not cls._meta.abstract:
PAGE_MODEL_CLASSES.append(cls)
class AbstractPage(TranslatableMixin, TreebeardPathFixMixin, MP_Node):
objects = PageManager()
class Meta:
abstract = True
class Page(AbstractPage, index.Indexed, ClusterableModel, metaclass=PageBase):
title = models.CharField(
verbose_name=_('title'),
max_length=255,
help_text=_("The page title as you'd like it to be seen by the public")
)
# to reflect title of a current draft in the admin UI
draft_title = models.CharField(
max_length=255,
editable=False
)
slug = models.SlugField(
verbose_name=_('slug'),
allow_unicode=True,
max_length=255,
help_text=_("The name of the page as it will appear in URLs e.g http://domain.com/blog/[my-slug]/")
)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='pages',
on_delete=models.SET(get_default_page_content_type)
)
live = models.BooleanField(verbose_name=_('live'), default=True, editable=False)
has_unpublished_changes = models.BooleanField(
verbose_name=_('has unpublished changes'),
default=False,
editable=False
)
url_path = models.TextField(verbose_name=_('URL path'), blank=True, editable=False)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('owner'),
null=True,
blank=True,
editable=True,
on_delete=models.SET_NULL,
related_name='owned_pages'
)
seo_title = models.CharField(
verbose_name=_("title tag"),
max_length=255,
blank=True,
help_text=_("The name of the page displayed on search engine results as the clickable headline.")
)
show_in_menus_default = False
show_in_menus = models.BooleanField(
verbose_name=_('show in menus'),
default=False,
help_text=_("Whether a link to this page will appear in automatically generated menus")
)
search_description = models.TextField(
verbose_name=_('meta description'),
blank=True,
help_text=_("The descriptive text displayed underneath a headline in search engine results.")
)
go_live_at = models.DateTimeField(
verbose_name=_("go live date/time"),
blank=True,
null=True
)
expire_at = models.DateTimeField(
verbose_name=_("expiry date/time"),
blank=True,
null=True
)
expired = models.BooleanField(verbose_name=_('expired'), default=False, editable=False)
locked = models.BooleanField(verbose_name=_('locked'), default=False, editable=False)
locked_at = models.DateTimeField(verbose_name=_('locked at'), null=True, editable=False)
locked_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('locked by'),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL,
related_name='locked_pages'
)
first_published_at = models.DateTimeField(
verbose_name=_('first published at'),
blank=True,
null=True,
db_index=True
)
last_published_at = models.DateTimeField(
verbose_name=_('last published at'),
null=True,
editable=False
)
latest_revision_created_at = models.DateTimeField(
verbose_name=_('latest revision created at'),
null=True,
editable=False
)
live_revision = models.ForeignKey(
'PageRevision',
related_name='+',
verbose_name=_('live revision'),
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False
)
# If non-null, this page is an alias of the linked page
# This means the page is kept in sync with the live version
# of the linked pages and is not editable by users.
alias_of = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False,
related_name='aliases',
)
search_fields = [
index.SearchField('title', partial_match=True, boost=2),
index.AutocompleteField('title'),
index.FilterField('title'),
index.FilterField('id'),
index.FilterField('live'),
index.FilterField('owner'),
index.FilterField('content_type'),
index.FilterField('path'),
index.FilterField('depth'),
index.FilterField('locked'),
index.FilterField('show_in_menus'),
index.FilterField('first_published_at'),
index.FilterField('last_published_at'),
index.FilterField('latest_revision_created_at'),
index.FilterField('locale'),
index.FilterField('translation_key'),
]
# Do not allow plain Page instances to be created through the Wagtail admin
is_creatable = False
# Define the maximum number of instances this page type can have. Default to unlimited.
max_count = None
# Define the maximum number of instances this page can have under a specific parent. Default to unlimited.
max_count_per_parent = None
# An array of additional field names that will not be included when a Page is copied.
exclude_fields_in_copy = []
default_exclude_fields_in_copy = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries', 'comments']
# Define these attributes early to avoid masking errors. (Issue #3078)
# The canonical definition is in wagtailadmin.edit_handlers.
content_panels = []
promote_panels = []
settings_panels = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
# this model is being newly created
# rather than retrieved from the db;
if not self.content_type_id:
# set content type to correctly represent the model class
# that this was created as
self.content_type = ContentType.objects.get_for_model(self)
if 'show_in_menus' not in kwargs:
# if the value is not set on submit refer to the model setting
self.show_in_menus = self.show_in_menus_default
def __str__(self):
return self.title
@classmethod
def get_streamfield_names(cls):
return get_streamfield_names(cls)
def set_url_path(self, parent):
if parent:
self.url_path = parent.url_path + self.slug + '/'
else:
# a page without a parent is the tree root, which always has a url_path of '/'
self.url_path = '/'
return self.url_path
@staticmethod
def _slug_is_available(slug, parent_page, page=None):
if parent_page is None:
# the root page's slug can be whatever it likes...
return True
siblings = parent_page.get_children()
if page:
siblings = siblings.not_page(page)
return not siblings.filter(slug=slug).exists()
def _get_autogenerated_slug(self, base_slug):
candidate_slug = base_slug
suffix = 1
parent_page = self.get_parent()
while not Page._slug_is_available(candidate_slug, parent_page, self):
suffix += 1
candidate_slug = "%s-%d" % (base_slug, suffix)
return candidate_slug
def get_default_locale(self):
parent = self.get_parent()
if parent is not None:
return (
parent.specific_class.objects.defer().select_related("locale")
.get(id=parent.id)
.locale
)
return super().get_default_locale()
def full_clean(self, *args, **kwargs):
if not self.slug:
allow_unicode = getattr(settings, 'WAGTAIL_ALLOW_UNICODE_SLUGS', True)
base_slug = slugify(self.title, allow_unicode=allow_unicode)
if base_slug:
self.slug = self._get_autogenerated_slug(base_slug)
if not self.draft_title:
self.draft_title = self.title
if self.locale_id is None:
self.locale = self.get_default_locale()
super().full_clean(*args, **kwargs)
def clean(self):
super().clean()
if not Page._slug_is_available(self.slug, self.get_parent(), self):
raise ValidationError({'slug': _("This slug is already in use")})
def is_site_root(self):
return Site.objects.filter(root_page__translation_key=self.translation_key).exists()
@transaction.atomic
def save(self, clean=True, user=None, log_action=False, **kwargs):
if clean:
self.full_clean()
update_descendant_url_paths = False
is_new = self.id is None
if is_new:
# through a treebeard method like add_child, in which case the 'path' field
# has been set and so we can safely call get_parent
self.set_url_path(self.get_parent())
else:
# Check that we are committing the slug to the database
# Basically: If update_fields has been specified, and slug is not included, skip this step
if not ('update_fields' in kwargs and 'slug' not in kwargs['update_fields']):
# see if the slug has changed from the record in the db, in which case we need to
# update url_path of self and all descendants
old_record = Page.objects.get(id=self.id)
if old_record.slug != self.slug:
self.set_url_path(self.get_parent())
update_descendant_url_paths = True
old_url_path = old_record.url_path
new_url_path = self.url_path
result = super().save(**kwargs)
if not is_new and update_descendant_url_paths:
self._update_descendant_url_paths(old_url_path, new_url_path)
# Check if this is a root page of any sites and clear the 'wagtail_site_root_paths' key if so
# Note: New translations of existing site roots are considered site roots as well, so we must
# always check if this page is a site root, even if it's new.
if self.is_site_root():
cache.delete('wagtail_site_root_paths')
if is_new:
cls = type(self)
logger.info(
"Page created: \"%s\" id=%d content_type=%s.%s path=%s",
self.title,
self.id,
cls._meta.app_label,
cls.__name__,
self.url_path
)
if log_action is not None:
# Page creation is a special case that we want logged by default, but allow skipping it
# explicitly by passing log_action=None
if is_new:
PageLogEntry.objects.log_action(
instance=self,
action='wagtail.create',
user=user or self.owner,
content_changed=True,
)
elif log_action:
PageLogEntry.objects.log_action(
instance=self,
action=log_action,
user=user
)
return result
def delete(self, *args, **kwargs):
# Ensure that deletion always happens on an instance of Page, not a specific subclass. This
# works around a bug in treebeard <= 3.0 where calling SpecificPage.delete() fails to delete
# child pages that are not instances of SpecificPage
if type(self) is Page:
user = kwargs.pop('user', None)
def log_deletion(page, user):
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.delete',
user=user,
deleted=True,
)
if self.get_children().exists():
for child in self.get_children():
log_deletion(child.specific, user)
log_deletion(self.specific, user)
# this is a Page instance, so carry on as we were
return super().delete(*args, **kwargs)
else:
# retrieve an actual Page instance and delete that instead of self
return Page.objects.get(id=self.id).delete(*args, **kwargs)
@classmethod
def check(cls, **kwargs):
errors = super(Page, cls).check(**kwargs)
# Check that foreign keys from pages are not configured to cascade
# This is the default Django behaviour which must be explicitly overridden
# to prevent pages disappearing unexpectedly and the tree being corrupted
# get names of foreign keys pointing to parent classes (such as page_ptr)
field_exceptions = [field.name
for model in [cls] + list(cls._meta.get_parent_list())
for field in model._meta.parents.values() if field]
for field in cls._meta.fields:
if isinstance(field, models.ForeignKey) and field.name not in field_exceptions:
if field.remote_field.on_delete == models.CASCADE:
errors.append(
checks.Warning(
"Field hasn't specified on_delete action",
hint="Set on_delete=models.SET_NULL and make sure the field is nullable or set on_delete=models.PROTECT. Wagtail does not allow simple database CASCADE because it will corrupt its tree storage.",
obj=field,
id='wagtailcore.W001',
)
)
if not isinstance(cls.objects, PageManager):
errors.append(
checks.Error(
"Manager does not inherit from PageManager",
hint="Ensure that custom Page managers inherit from wagtail.core.models.PageManager",
obj=cls,
id='wagtailcore.E002',
)
)
try:
cls.clean_subpage_models()
except (ValueError, LookupError) as e:
errors.append(
checks.Error(
"Invalid subpage_types setting for %s" % cls,
hint=str(e),
id='wagtailcore.E002'
)
)
try:
cls.clean_parent_page_models()
except (ValueError, LookupError) as e:
errors.append(
checks.Error(
"Invalid parent_page_types setting for %s" % cls,
hint=str(e),
id='wagtailcore.E002'
)
)
return errors
def _update_descendant_url_paths(self, old_url_path, new_url_path):
(
Page.objects
.filter(path__startswith=self.path)
.exclude(pk=self.pk)
.update(
url_path=Concat(
Value(new_url_path),
Substr('url_path', len(old_url_path) + 1)
)
)
)
def get_specific(self, deferred=False, copy_attrs=None, copy_attrs_exclude=None):
model_class = self.specific_class
if model_class is None:
return self
if isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
if deferred:
# Generate a tuple of values in the order expected by __init__(),
# with missing values substituted with DEFERRED ()
values = tuple(
getattr(self, f.attname, self.pk if f.primary_key else DEFERRED)
for f in model_class._meta.concrete_fields
)
# Create object from known attribute values
specific_obj = model_class(*values)
specific_obj._state.adding = self._state.adding
else:
# Fetch object from database
specific_obj = model_class._default_manager.get(id=self.id)
# Copy non-field attribute values
if copy_attrs is not None:
for attr in (attr for attr in copy_attrs if attr in self.__dict__):
setattr(specific_obj, attr, getattr(self, attr))
else:
exclude = copy_attrs_exclude or ()
for k, v in (
(k, v) for k, v in self.__dict__.items()
if k not in exclude
):
# only set values that haven't already been set
specific_obj.__dict__.setdefault(k, v)
return specific_obj
@cached_property
def specific(self):
return self.get_specific()
@cached_property
def specific_deferred(self):
return self.get_specific(deferred=True)
@cached_property
def specific_class(self):
return self.cached_content_type.model_class()
@property
def cached_content_type(self):
return ContentType.objects.get_for_id(self.content_type_id)
@property
def localized_draft(self):
try:
locale = Locale.get_active()
except (LookupError, Locale.DoesNotExist):
return self
if locale.id == self.locale_id:
return self
return self.get_translation_or_none(locale) or self
@property
def localized(self):
localized = self.localized_draft
if not localized.live:
return self
return localized
def route(self, request, path_components):
if path_components:
child_slug = path_components[0]
remaining_components = path_components[1:]
try:
subpage = self.get_children().get(slug=child_slug)
except Page.DoesNotExist:
raise Http404
return subpage.specific.route(request, remaining_components)
else:
if self.live:
return RouteResult(self)
else:
raise Http404
def get_admin_display_title(self):
return self.draft_title or self.title
def save_revision(self, user=None, submitted_for_moderation=False, approved_go_live_at=None, changed=True,
log_action=False, previous_revision=None, clean=True):
# Raise an error if this page is an alias.
if self.alias_of_id:
raise RuntimeError(
"save_revision() was called on an alias page. "
"Revisions are not required for alias pages as they are an exact copy of another page."
)
if clean:
self.full_clean()
new_comments = self.comments.filter(pk__isnull=True)
for comment in new_comments:
# We need to ensure comments have an id in the revision, so positions can be identified correctly
comment.save()
# Create revision
revision = self.revisions.create(
content_json=self.to_json(),
user=user,
submitted_for_moderation=submitted_for_moderation,
approved_go_live_at=approved_go_live_at,
)
for comment in new_comments:
comment.revision_created = revision
update_fields = ['comments']
self.latest_revision_created_at = revision.created_at
update_fields.append('latest_revision_created_at')
self.draft_title = self.title
update_fields.append('draft_title')
if changed:
self.has_unpublished_changes = True
update_fields.append('has_unpublished_changes')
if update_fields:
# clean=False because the fields we're updating don't need validation
self.save(update_fields=update_fields, clean=False)
# Log
logger.info("Page edited: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id)
if log_action:
if not previous_revision:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.edit',
user=user,
revision=revision,
content_changed=changed,
)
else:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.revert',
user=user,
data={
'revision': {
'id': previous_revision.id,
'created': previous_revision.created_at.strftime("%d %b %Y %H:%M")
}
},
revision=revision,
content_changed=changed,
)
if submitted_for_moderation:
logger.info("Page submitted for moderation: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id)
return revision
def get_latest_revision(self):
return self.revisions.order_by('-created_at', '-id').first()
def get_latest_revision_as_page(self):
if not self.has_unpublished_changes:
# Use the live database copy in preference to the revision record, as:
# 1) this will pick up any changes that have been made directly to the model,
# such as automated data imports;
# 2) it ensures that inline child objects pick up real database IDs even if
# those are absent from the revision data. (If this wasn't the case, the child
return self.specific
latest_revision = self.get_latest_revision()
if latest_revision:
return latest_revision.as_page_object()
else:
return self.specific
def update_aliases(self, *, revision=None, user=None, _content_json=None, _updated_ids=None):
specific_self = self.specific
if _content_json is None:
_content_json = self.to_json()
# A list of IDs that have already been updated. This is just in case someone has
# created an alias loop (which is impossible to do with the UI Wagtail provides)
_updated_ids = _updated_ids or []
for alias in self.specific_class.objects.filter(alias_of=self).exclude(id__in=_updated_ids):
# FIXME: Switch to the same fields that are excluded from copy
# We can't do this right now because we can't exclude fields from with_content_json
exclude_fields = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries']
# Copy field content
alias_updated = alias.with_content_json(_content_json)
# Publish the alias if it's currently in draft
alias_updated.live = True
alias_updated.has_unpublished_changes = False
child_object_map = specific_self.copy_all_child_relations(target=alias_updated, exclude=exclude_fields)
if child_object_map:
alias_is_translation = alias.translation_key == self.translation_key
def process_child_object(child_object):
if isinstance(child_object, TranslatableMixin):
child_object.locale = alias_updated.locale
# If the alias isn't a translation of the original page,
# not either
if not alias_is_translation:
child_object.translation_key = uuid.uuid4()
for (rel, previous_id), child_objects in child_object_map.items():
if previous_id is None:
for child_object in child_objects:
process_child_object(child_object)
else:
process_child_object(child_objects)
# Copy M2M relations
_copy_m2m_relations(specific_self, alias_updated, exclude_fields=exclude_fields)
# Don't change the aliases slug
alias_updated.slug = alias.slug
alias_updated.set_url_path(alias_updated.get_parent())
alias_updated.draft_title = alias_updated.title
alias_updated.latest_revision_created_at = self.latest_revision_created_at
alias_updated.save(clean=False)
page_published.send(sender=alias_updated.specific_class, instance=alias_updated, revision=revision, alias=True)
# Log the publish of the alias
PageLogEntry.objects.log_action(
instance=alias_updated,
action='wagtail.publish',
user=user,
)
# Update any aliases of that alias
# Design note:
# It could be argued that this will be faster if we just changed these alias-of-alias
# pages to all point to the original page and avoid having to update them recursively.
#
# But, it's useful to have a record of how aliases have been chained.
alias.update_aliases(revision=revision, _content_json=_content_json, _updated_ids=_updated_ids)
update_aliases.alters_data = True
def unpublish(self, set_expired=False, commit=True, user=None, log_action=True):
if self.live:
self.live = False
self.has_unpublished_changes = True
self.live_revision = None
if set_expired:
self.expired = True
if commit:
self.save(clean=False)
page_unpublished.send(sender=self.specific_class, instance=self.specific)
if log_action:
PageLogEntry.objects.log_action(
instance=self,
action=log_action if isinstance(log_action, str) else 'wagtail.unpublish',
user=user,
)
logger.info("Page unpublished: \"%s\" id=%d", self.title, self.id)
self.revisions.update(approved_go_live_at=None)
for alias in self.aliases.all():
alias.unpublish()
context_object_name = None
def get_context(self, request, *args, **kwargs):
context = {
PAGE_TEMPLATE_VAR: self,
'self': self,
'request': request,
}
if self.context_object_name:
context[self.context_object_name] = self
return context
def get_template(self, request, *args, **kwargs):
if request.is_ajax():
return self.ajax_template or self.template
else:
return self.template
def serve(self, request, *args, **kwargs):
request.is_preview = getattr(request, 'is_preview', False)
return TemplateResponse(
request,
self.get_template(request, *args, **kwargs),
self.get_context(request, *args, **kwargs)
)
def is_navigable(self):
return (not self.is_leaf()) or self.depth == 2
def _get_site_root_paths(self, request=None):
cache_object = request if request else self
try:
return cache_object._wagtail_cached_site_root_paths
except AttributeError:
cache_object._wagtail_cached_site_root_paths = Site.get_site_root_paths()
return cache_object._wagtail_cached_site_root_paths
def get_url_parts(self, request=None):
possible_sites = [
(pk, path, url, language_code)
for pk, path, url, language_code in self._get_site_root_paths(request)
if self.url_path.startswith(path)
]
if not possible_sites:
return None
site_id, root_path, root_url, language_code = possible_sites[0]
site = Site.find_for_request(request)
if site:
for site_id, root_path, root_url, language_code in possible_sites:
if site_id == site.pk:
break
else:
site_id, root_path, root_url, language_code = possible_sites[0]
use_wagtail_i18n = getattr(settings, 'WAGTAIL_I18N_ENABLED', False)
if use_wagtail_i18n:
# use that instead
# This is used when LANGUAGES contain more languages than WAGTAIL_CONTENT_LANGUAGES
try:
if get_supported_content_language_variant(translation.get_language()) == language_code:
language_code = translation.get_language()
except LookupError:
# active language code is not a recognised content language, so leave
# page's language code unchanged
pass
try:
if use_wagtail_i18n:
with translation.override(language_code):
page_path = reverse(
'wagtail_serve', args=(self.url_path[len(root_path):],))
else:
page_path = reverse(
'wagtail_serve', args=(self.url_path[len(root_path):],))
except NoReverseMatch:
return (site_id, None, None)
# the root path
if not WAGTAIL_APPEND_SLASH and page_path != '/':
page_path = page_path.rstrip('/')
return (site_id, root_url, page_path)
def get_full_url(self, request=None):
url_parts = self.get_url_parts(request=request)
if url_parts is None or url_parts[1] is None and url_parts[2] is None:
# page is not routable
return
site_id, root_url, page_path = url_parts
return root_url + page_path
full_url = property(get_full_url)
def get_url(self, request=None, current_site=None):
# ``current_site`` is purposefully undocumented, as one can simply pass the request and get
# a relative URL based on ``Site.find_for_request()``. Nonetheless, support it here to avoid
# copy/pasting the code to the ``relative_url`` method below.
if current_site is None and request is not None:
site = Site.find_for_request(request)
current_site = site
url_parts = self.get_url_parts(request=request)
if url_parts is None or url_parts[1] is None and url_parts[2] is None:
# page is not routable
return
site_id, root_url, page_path = url_parts
# Get number of unique sites in root paths
# Note: there may be more root paths to sites if there are multiple languages
num_sites = len(set(root_path[0] for root_path in self._get_site_root_paths(request)))
if (current_site is not None and site_id == current_site.id) or num_sites == 1:
# the site matches OR we're only running a single site, so a local URL is sufficient
return page_path
else:
return root_url + page_path
url = property(get_url)
def relative_url(self, current_site, request=None):
return self.get_url(request=request, current_site=current_site)
def get_site(self):
url_parts = self.get_url_parts()
if url_parts is None:
return
site_id, root_url, page_path = url_parts
return Site.objects.get(id=site_id)
@classmethod
def get_indexed_objects(cls):
content_type = ContentType.objects.get_for_model(cls)
return super(Page, cls).get_indexed_objects().filter(content_type=content_type)
def get_indexed_instance(self):
# entry has been created. In those cases, we aren't ready to be indexed yet, so
try:
return self.specific
except self.specific_class.DoesNotExist:
return None
@classmethod
def clean_subpage_models(cls):
if cls._clean_subpage_models is None:
subpage_types = getattr(cls, 'subpage_types', None)
if subpage_types is None:
cls._clean_subpage_models = get_page_models()
else:
cls._clean_subpage_models = [
resolve_model_string(model_string, cls._meta.app_label)
for model_string in subpage_types
]
for model in cls._clean_subpage_models:
if not issubclass(model, Page):
raise LookupError("%s is not a Page subclass" % model)
return cls._clean_subpage_models
@classmethod
def clean_parent_page_models(cls):
if cls._clean_parent_page_models is None:
parent_page_types = getattr(cls, 'parent_page_types', None)
if parent_page_types is None:
cls._clean_parent_page_models = get_page_models()
else:
cls._clean_parent_page_models = [
resolve_model_string(model_string, cls._meta.app_label)
for model_string in parent_page_types
]
for model in cls._clean_parent_page_models:
if not issubclass(model, Page):
raise LookupError("%s is not a Page subclass" % model)
return cls._clean_parent_page_models
@classmethod
def allowed_parent_page_models(cls):
return [
parent_model for parent_model in cls.clean_parent_page_models()
if cls in parent_model.clean_subpage_models()
]
@classmethod
def allowed_subpage_models(cls):
return [
subpage_model for subpage_model in cls.clean_subpage_models()
if cls in subpage_model.clean_parent_page_models()
]
@classmethod
def creatable_subpage_models(cls):
return [
page_model for page_model in cls.allowed_subpage_models()
if page_model.is_creatable
]
@classmethod
def can_exist_under(cls, parent):
return cls in parent.specific_class.allowed_subpage_models()
@classmethod
def can_create_at(cls, parent):
can_create = cls.is_creatable and cls.can_exist_under(parent)
if cls.max_count is not None:
can_create = can_create and cls.objects.count() < cls.max_count
if cls.max_count_per_parent is not None:
can_create = can_create and parent.get_children().type(cls).count() < cls.max_count_per_parent
return can_create
def can_move_to(self, parent):
parent_is_root = parent.depth == 1
if not parent_is_root and parent.locale_id != self.locale_id:
return False
return self.can_exist_under(parent)
@classmethod
def get_verbose_name(cls):
return capfirst(cls._meta.verbose_name)
@property
def status_string(self):
if not self.live:
if self.expired:
return _("expired")
elif self.approved_schedule:
return _("scheduled")
elif self.workflow_in_progress:
return _("in moderation")
else:
return _("draft")
else:
if self.approved_schedule:
return _("live + scheduled")
elif self.workflow_in_progress:
return _("live + in moderation")
elif self.has_unpublished_changes:
return _("live + draft")
else:
return _("live")
@property
def approved_schedule(self):
return self.revisions.exclude(approved_go_live_at__isnull=True).exists()
def has_unpublished_subtree(self):
return (not self.live) and (not self.get_descendants().filter(live=True).exists())
def move(self, target, pos=None, user=None):
# Determine old and new parents
parent_before = self.get_parent()
if pos in ('first-child', 'last-child', 'sorted-child'):
parent_after = target
else:
parent_after = target.get_parent()
# Determine old and new url_paths
# Fetching new object to avoid affecting `self`
old_self = Page.objects.get(id=self.id)
old_url_path = old_self.url_path
new_url_path = old_self.set_url_path(parent=parent_after)
# Emit pre_page_move signal
pre_page_move.send(
sender=self.specific_class or self.__class__,
instance=self,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Only commit when all descendants are properly updated
with transaction.atomic():
# Allow treebeard to update `path` values
super().move(target, pos=pos)
# Treebeard's move method doesn't actually update the in-memory instance,
# so we need to work with a freshly loaded one now
new_self = Page.objects.get(id=self.id)
new_self.url_path = new_url_path
new_self.save()
# Update descendant paths if url_path has changed
if old_url_path != new_url_path:
new_self._update_descendant_url_paths(old_url_path, new_url_path)
# Emit post_page_move signal
post_page_move.send(
sender=self.specific_class or self.__class__,
instance=new_self,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Log
PageLogEntry.objects.log_action(
instance=self,
# Check if page was reordered (reordering doesn't change the parent)
action='wagtail.reorder' if parent_before.id == target.id else 'wagtail.move',
user=user,
data={
'source': {
'id': parent_before.id,
'title': parent_before.specific_deferred.get_admin_display_title()
},
'destination': {
'id': parent_after.id,
'title': parent_after.specific_deferred.get_admin_display_title()
}
}
)
logger.info("Page moved: \"%s\" id=%d path=%s", self.title, self.id, new_url_path)
def copy(self, recursive=False, to=None, update_attrs=None, copy_revisions=True, keep_live=True, user=None,
process_child_object=None, exclude_fields=None, log_action='wagtail.copy', reset_translation_key=True, _mpnode_attrs=None):
if self._state.adding:
raise RuntimeError('Page.copy() called on an unsaved page')
exclude_fields = self.default_exclude_fields_in_copy + self.exclude_fields_in_copy + (exclude_fields or [])
specific_self = self.specific
if keep_live:
base_update_attrs = {
'alias_of': None,
}
else:
base_update_attrs = {
'live': False,
'has_unpublished_changes': True,
'live_revision': None,
'first_published_at': None,
'last_published_at': None,
'alias_of': None,
}
if user:
base_update_attrs['owner'] = user
if reset_translation_key:
base_update_attrs['translation_key'] = uuid.uuid4()
if update_attrs:
base_update_attrs.update(update_attrs)
page_copy, child_object_map = _copy(specific_self, exclude_fields=exclude_fields, update_attrs=base_update_attrs)
# Save copied child objects and run process_child_object on them if we need to
for (child_relation, old_pk), child_object in child_object_map.items():
if process_child_object:
process_child_object(specific_self, page_copy, child_relation, child_object)
# When we're not copying for translation, we should give the translation_key a new value for each child object as well
if reset_translation_key and isinstance(child_object, TranslatableMixin):
child_object.translation_key = uuid.uuid4()
if _mpnode_attrs:
page_copy.path = _mpnode_attrs[0]
page_copy.depth = _mpnode_attrs[1]
page_copy.save(clean=False)
else:
if to:
if recursive and (to == self or to.is_descendant_of(self)):
raise Exception("You cannot copy a tree branch recursively into itself")
page_copy = to.add_child(instance=page_copy)
else:
page_copy = self.add_sibling(instance=page_copy)
_mpnode_attrs = (page_copy.path, page_copy.depth)
_copy_m2m_relations(specific_self, page_copy, exclude_fields=exclude_fields, update_attrs=base_update_attrs)
# Copy revisions
if copy_revisions:
for revision in self.revisions.all():
revision.pk = None
revision.submitted_for_moderation = False
revision.approved_go_live_at = None
revision.page = page_copy
# Update ID fields in content
revision_content = json.loads(revision.content_json)
revision_content['pk'] = page_copy.pk
for child_relation in get_all_child_relations(specific_self):
accessor_name = child_relation.get_accessor_name()
try:
child_objects = revision_content[accessor_name]
except KeyError:
# KeyErrors are possible if the revision was created
# before this child relation was added to the database
continue
for child_object in child_objects:
child_object[child_relation.field.name] = page_copy.pk
# Remap primary key to copied versions
# If the primary key is not recognised (eg, the child object has been deleted from the database)
# set the primary key to None
copied_child_object = child_object_map.get((child_relation, child_object['pk']))
child_object['pk'] = copied_child_object.pk if copied_child_object else None
revision.content_json = json.dumps(revision_content)
# Save
revision.save()
# Create a new revision
# This code serves a few purposes:
# * It makes sure update_attrs gets applied to the latest revision
# * It bumps the last_revision_created_at value so the new page gets ordered as if it was just created
# * It sets the user of the new revision so it's possible to see who copied the page by looking at its history
latest_revision = page_copy.get_latest_revision_as_page()
if update_attrs:
for field, value in update_attrs.items():
setattr(latest_revision, field, value)
latest_revision_as_page_revision = latest_revision.save_revision(user=user, changed=False, clean=False)
if keep_live:
page_copy.live_revision = latest_revision_as_page_revision
page_copy.last_published_at = latest_revision_as_page_revision.created_at
page_copy.first_published_at = latest_revision_as_page_revision.created_at
page_copy.save(clean=False)
if page_copy.live:
page_published.send(
sender=page_copy.specific_class, instance=page_copy,
revision=latest_revision_as_page_revision
)
if log_action:
parent = specific_self.get_parent()
PageLogEntry.objects.log_action(
instance=page_copy,
action=log_action,
user=user,
data={
'page': {
'id': page_copy.id,
'title': page_copy.get_admin_display_title()
},
'source': {'id': parent.id, 'title': parent.specific_deferred.get_admin_display_title()} if parent else None,
'destination': {'id': to.id, 'title': to.specific_deferred.get_admin_display_title()} if to else None,
'keep_live': page_copy.live and keep_live
},
)
if page_copy.live and keep_live:
PageLogEntry.objects.log_action(
instance=page_copy,
action='wagtail.publish',
user=user,
revision=latest_revision_as_page_revision,
)
logger.info("Page copied: \"%s\" id=%d from=%d", page_copy.title, page_copy.id, self.id)
if recursive:
numchild = 0
for child_page in self.get_children().specific():
newdepth = _mpnode_attrs[1] + 1
child_mpnode_attrs = (
Page._get_path(_mpnode_attrs[0], newdepth, numchild),
newdepth
)
numchild += 1
child_page.copy(
recursive=True,
to=page_copy,
copy_revisions=copy_revisions,
keep_live=keep_live,
user=user,
process_child_object=process_child_object,
_mpnode_attrs=child_mpnode_attrs
)
if numchild > 0:
page_copy.numchild = numchild
page_copy.save(clean=False, update_fields=['numchild'])
return page_copy
copy.alters_data = True
def create_alias(self, *, recursive=False, parent=None, update_slug=None, update_locale=None, user=None, log_action='wagtail.create_alias', reset_translation_key=True, _mpnode_attrs=None):
specific_self = self.specific
exclude_fields = ['id', 'path', 'depth', 'numchild', 'url_path', 'path', 'index_entries']
update_attrs = {
'alias_of': self,
'draft_title': self.title,
# Likewise, an alias page can't have unpublished changes if it's live
'has_unpublished_changes': not self.live,
}
if update_slug:
update_attrs['slug'] = update_slug
if update_locale:
update_attrs['locale'] = update_locale
if user:
update_attrs['owner'] = user
# When we're not copying for translation, we should give the translation_key a new value
if reset_translation_key:
update_attrs['translation_key'] = uuid.uuid4()
alias, child_object_map = _copy(specific_self, update_attrs=update_attrs, exclude_fields=exclude_fields)
for (child_relation, old_pk), child_object in child_object_map.items():
if isinstance(child_object, TranslatableMixin):
if update_locale:
child_object.locale = update_locale
if reset_translation_key:
child_object.translation_key = uuid.uuid4()
# Save the new page
if _mpnode_attrs:
# We've got a tree position already reserved. Perform a quick save
alias.path = _mpnode_attrs[0]
alias.depth = _mpnode_attrs[1]
alias.save(clean=False)
else:
if parent:
if recursive and (parent == self or parent.is_descendant_of(self)):
raise Exception("You cannot copy a tree branch recursively into itself")
alias = parent.add_child(instance=alias)
else:
alias = self.add_sibling(instance=alias)
_mpnode_attrs = (alias.path, alias.depth)
_copy_m2m_relations(specific_self, alias, exclude_fields=exclude_fields)
if log_action:
source_parent = specific_self.get_parent()
PageLogEntry.objects.log_action(
instance=alias,
action=log_action,
user=user,
data={
'page': {
'id': alias.id,
'title': alias.get_admin_display_title()
},
'source': {'id': source_parent.id, 'title': source_parent.specific_deferred.get_admin_display_title()} if source_parent else None,
'destination': {'id': parent.id, 'title': parent.specific_deferred.get_admin_display_title()} if parent else None,
},
)
if alias.live:
PageLogEntry.objects.log_action(
instance=alias,
action='wagtail.publish',
user=user,
)
logger.info("Page alias created: \"%s\" id=%d from=%d", alias.title, alias.id, self.id)
if recursive:
numchild = 0
for child_page in self.get_children().specific():
newdepth = _mpnode_attrs[1] + 1
child_mpnode_attrs = (
Page._get_path(_mpnode_attrs[0], newdepth, numchild),
newdepth
)
numchild += 1
child_page.create_alias(
recursive=True,
parent=alias,
update_locale=update_locale,
user=user,
log_action=log_action,
reset_translation_key=reset_translation_key,
_mpnode_attrs=child_mpnode_attrs
)
if numchild > 0:
alias.numchild = numchild
alias.save(clean=False, update_fields=['numchild'])
return alias
create_alias.alters_data = True
@transaction.atomic
def copy_for_translation(self, locale, copy_parents=False, alias=False, exclude_fields=None):
parent = self.get_parent().specific
slug = self.slug
if not parent.is_root():
try:
translated_parent = parent.get_translation(locale)
except parent.__class__.DoesNotExist:
if not copy_parents:
raise ParentNotTranslatedError
translated_parent = parent.copy_for_translation(
locale, copy_parents=True, alias=True
)
else:
translated_parent = parent
# Append language code to slug as the new page
# will be created in the same section as the existing one
slug += "-" + locale.language_code
# Find available slug for new page
slug = find_available_slug(translated_parent, slug)
if alias:
return self.create_alias(
parent=translated_parent,
update_slug=slug,
update_locale=locale,
reset_translation_key=False,
)
else:
# Update locale on translatable child objects as well
def process_child_object(
original_page, page_copy, child_relation, child_object
):
if isinstance(child_object, TranslatableMixin):
child_object.locale = locale
return self.copy(
to=translated_parent,
update_attrs={
"locale": locale,
"slug": slug,
},
copy_revisions=False,
keep_live=False,
reset_translation_key=False,
process_child_object=process_child_object,
exclude_fields=exclude_fields,
)
copy_for_translation.alters_data = True
def permissions_for_user(self, user):
user_perms = UserPagePermissionsProxy(user)
return user_perms.for_page(self)
def make_preview_request(self, original_request=None, preview_mode=None, extra_request_attrs=None):
dummy_meta = self._get_dummy_headers(original_request)
request = WSGIRequest(dummy_meta)
# Add a flag to let middleware know that this is a dummy request.
request.is_dummy = True
if extra_request_attrs:
for k, v in extra_request_attrs.items():
setattr(request, k, v)
page = self
# Build a custom django.core.handlers.BaseHandler subclass that invokes serve_preview as
# the eventual view function called at the end of the middleware chain, rather than going
# through the URL resolver
class Handler(BaseHandler):
def _get_response(self, request):
response = page.serve_preview(request, preview_mode)
if hasattr(response, 'render') and callable(response.render):
response = response.render()
return response
# Invoke this custom handler.
handler = Handler()
handler.load_middleware()
return handler.get_response(request)
def _get_dummy_headers(self, original_request=None):
url = self._get_dummy_header_url(original_request)
if url:
url_info = urlparse(url)
hostname = url_info.hostname
path = url_info.path
port = url_info.port or (443 if url_info.scheme == 'https' else 80)
scheme = url_info.scheme
else:
# Cannot determine a URL to this page - cobble one together based on
# whatever we find in ALLOWED_HOSTS
try:
hostname = settings.ALLOWED_HOSTS[0]
if hostname == '*':
# '*' is a valid value to find in ALLOWED_HOSTS[0], but it's not a valid domain name.
raise IndexError
except IndexError:
hostname = 'localhost'
path = '/'
port = 80
scheme = 'http'
http_host = hostname
if port != (443 if scheme == 'https' else 80):
http_host = '%s:%s' % (http_host, port)
dummy_values = {
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'SERVER_NAME': hostname,
'SERVER_PORT': port,
'SERVER_PROTOCOL': 'HTTP/1.1',
'HTTP_HOST': http_host,
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': StringIO(),
'wsgi.url_scheme': scheme,
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
}
# Add important values from the original request object, if it was provided.
HEADERS_FROM_ORIGINAL_REQUEST = [
'REMOTE_ADDR', 'HTTP_X_FORWARDED_FOR', 'HTTP_COOKIE', 'HTTP_USER_AGENT', 'HTTP_AUTHORIZATION',
'wsgi.version', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once',
]
if settings.SECURE_PROXY_SSL_HEADER:
HEADERS_FROM_ORIGINAL_REQUEST.append(settings.SECURE_PROXY_SSL_HEADER[0])
if original_request:
for header in HEADERS_FROM_ORIGINAL_REQUEST:
if header in original_request.META:
dummy_values[header] = original_request.META[header]
return dummy_values
def _get_dummy_header_url(self, original_request=None):
return self.full_url
DEFAULT_PREVIEW_MODES = [('', _('Default'))]
@property
def preview_modes(self):
return Page.DEFAULT_PREVIEW_MODES
@property
def default_preview_mode(self):
return self.preview_modes[0][0]
def is_previewable(self):
# It's possible that this will be called from a listing page using a plain Page queryset -
# a check of the property at the class level indicates that preview_modes has been
# overridden from whatever type we're currently in.
page = self
if page.specific_class.preview_modes != type(page).preview_modes:
page = page.specific
return bool(page.preview_modes)
def serve_preview(self, request, mode_name):
request.is_preview = True
response = self.serve(request)
patch_cache_control(response, private=True)
return response
def get_cached_paths(self):
return ['/']
def get_sitemap_urls(self, request=None):
return [
{
'location': self.get_full_url(request),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
def get_static_site_paths(self):
yield '/'
for child in self.get_children().live():
for path in child.specific.get_static_site_paths():
yield '/' + child.slug + path
def get_ancestors(self, inclusive=False):
return Page.objects.ancestor_of(self, inclusive)
def get_descendants(self, inclusive=False):
return Page.objects.descendant_of(self, inclusive)
def get_siblings(self, inclusive=True):
return Page.objects.sibling_of(self, inclusive)
def get_next_siblings(self, inclusive=False):
return self.get_siblings(inclusive).filter(path__gte=self.path).order_by('path')
def get_prev_siblings(self, inclusive=False):
return self.get_siblings(inclusive).filter(path__lte=self.path).order_by('-path')
def get_view_restrictions(self):
page_ids_to_check = set()
def add_page_to_check_list(page):
if page.alias_of:
add_page_to_check_list(page.alias_of)
else:
page_ids_to_check.add(page.id)
add_page_to_check_list(self)
for page in self.get_ancestors().only('alias_of'):
add_page_to_check_list(page)
return PageViewRestriction.objects.filter(page_id__in=page_ids_to_check)
password_required_template = getattr(settings, 'PASSWORD_REQUIRED_TEMPLATE', 'wagtailcore/password_required.html')
def serve_password_required_response(self, request, form, action_url):
context = self.get_context(request)
context['form'] = form
context['action_url'] = action_url
return TemplateResponse(request, self.password_required_template, context)
def with_content_json(self, content_json):
obj = self.specific_class.from_json(content_json)
obj.id = self.id
obj.pk = self.pk
obj.content_type = self.content_type
obj.path = self.path
obj.depth = self.depth
obj.numchild = self.numchild
# existing tree position
obj.set_url_path(self.get_parent())
# Ensure other values that are meaningful for the page as a whole (rather than
# to a specific revision) are preserved
obj.draft_title = self.draft_title
obj.live = self.live
obj.has_unpublished_changes = self.has_unpublished_changes
obj.owner = self.owner
obj.locked = self.locked
obj.locked_by = self.locked_by
obj.locked_at = self.locked_at
obj.latest_revision_created_at = self.latest_revision_created_at
obj.first_published_at = self.first_published_at
obj.translation_key = self.translation_key
obj.locale = self.locale
obj.alias_of_id = self.alias_of_id
revision_comments = obj.comments
page_comments = self.comments.filter(resolved_at__isnull=True)
for comment in page_comments:
# attempt to retrieve the comment position from the revision's stored version
try:
revision_comment = revision_comments.get(id=comment.id)
comment.position = revision_comment.position
except Comment.DoesNotExist:
pass
obj.comments = page_comments
return obj
@property
def has_workflow(self):
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return False
return self.get_ancestors(inclusive=True).filter(workflowpage__isnull=False).filter(workflowpage__workflow__active=True).exists()
def get_workflow(self):
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return None
if hasattr(self, 'workflowpage') and self.workflowpage.workflow.active:
return self.workflowpage.workflow
else:
try:
workflow = self.get_ancestors().filter(workflowpage__isnull=False).filter(workflowpage__workflow__active=True).order_by(
'-depth').first().workflowpage.workflow
except AttributeError:
workflow = None
return workflow
@property
def workflow_in_progress(self):
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return False
return WorkflowState.objects.filter(page=self, status=WorkflowState.STATUS_IN_PROGRESS).exists()
@property
def current_workflow_state(self):
if not getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
return None
try:
return WorkflowState.objects.active().select_related("current_task_state__task").get(page=self)
except WorkflowState.DoesNotExist:
return
@property
def current_workflow_task_state(self):
current_workflow_state = self.current_workflow_state
if current_workflow_state and current_workflow_state.status == WorkflowState.STATUS_IN_PROGRESS and current_workflow_state.current_task_state:
return current_workflow_state.current_task_state.specific
@property
def current_workflow_task(self):
current_workflow_task_state = self.current_workflow_task_state
if current_workflow_task_state:
return current_workflow_task_state.task.specific
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
unique_together = [("translation_key", "locale")]
class Orderable(models.Model):
sort_order = models.IntegerField(null=True, blank=True, editable=False)
sort_order_field = 'sort_order'
class Meta:
abstract = True
ordering = ['sort_order']
class SubmittedRevisionsManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(submitted_for_moderation=True)
class PageRevision(models.Model):
page = models.ForeignKey('Page', verbose_name=_('page'), related_name='revisions', on_delete=models.CASCADE)
submitted_for_moderation = models.BooleanField(
verbose_name=_('submitted for moderation'),
default=False,
db_index=True
)
created_at = models.DateTimeField(db_index=True, verbose_name=_('created at'))
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('user'), null=True, blank=True,
on_delete=models.SET_NULL
)
content_json = models.TextField(verbose_name=_('content JSON'))
approved_go_live_at = models.DateTimeField(
verbose_name=_('approved go live at'),
null=True,
blank=True,
db_index=True
)
objects = models.Manager()
submitted_revisions = SubmittedRevisionsManager()
def save(self, user=None, *args, **kwargs):
if self.created_at is None:
self.created_at = timezone.now()
super().save(*args, **kwargs)
if self.submitted_for_moderation:
self.page.revisions.exclude(id=self.id).update(submitted_for_moderation=False)
if (
self.approved_go_live_at is None
and 'update_fields' in kwargs and 'approved_go_live_at' in kwargs['update_fields']
):
page = self.as_page_object()
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.schedule.cancel',
data={
'revision': {
'id': self.id,
'created': self.created_at.strftime("%d %b %Y %H:%M"),
'go_live_at': page.go_live_at.strftime("%d %b %Y %H:%M") if page.go_live_at else None,
}
},
user=user,
revision=self,
)
def as_page_object(self):
return self.page.specific.with_content_json(self.content_json)
def approve_moderation(self, user=None):
if self.submitted_for_moderation:
logger.info("Page moderation approved: \"%s\" id=%d revision_id=%d", self.page.title, self.page.id, self.id)
PageLogEntry.objects.log_action(
instance=self.as_page_object(),
action='wagtail.moderation.approve',
user=user,
revision=self,
)
self.publish()
def reject_moderation(self, user=None):
if self.submitted_for_moderation:
logger.info("Page moderation rejected: \"%s\" id=%d revision_id=%d", self.page.title, self.page.id, self.id)
PageLogEntry.objects.log_action(
instance=self.as_page_object(),
action='wagtail.moderation.reject',
user=user,
revision=self,
)
self.submitted_for_moderation = False
self.save(update_fields=['submitted_for_moderation'])
def is_latest_revision(self):
if self.id is None:
return True
latest_revision = PageRevision.objects.filter(page_id=self.page_id).order_by('-created_at', '-id').first()
return (latest_revision == self)
def delete(self):
try:
next_revision = self.get_next()
except PageRevision.DoesNotExist:
next_revision = None
if next_revision:
self.created_comments.all().update(revision_created=next_revision)
return super().delete()
def publish(self, user=None, changed=True, log_action=True, previous_revision=None):
page = self.as_page_object()
def log_scheduling_action(revision, user=None, changed=changed):
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.publish.schedule',
user=user,
data={
'revision': {
'id': revision.id,
'created': revision.created_at.strftime("%d %b %Y %H:%M"),
'go_live_at': page.go_live_at.strftime("%d %b %Y %H:%M"),
'has_live_version': page.live,
}
},
revision=revision,
content_changed=changed,
)
if page.go_live_at and page.go_live_at > timezone.now():
page.has_unpublished_changes = True
# Instead set the approved_go_live_at of this revision
self.approved_go_live_at = page.go_live_at
self.save()
# And clear the the approved_go_live_at of any other revisions
page.revisions.exclude(id=self.id).update(approved_go_live_at=None)
# if we are updating a currently live page skip the rest
if page.live_revision:
# Log scheduled publishing
if log_action:
log_scheduling_action(self, user, changed)
return
# if we have a go_live in the future don't make the page live
page.live = False
else:
page.live = True
page.has_unpublished_changes = not self.is_latest_revision()
page.revisions.update(approved_go_live_at=None)
page.expired = False
# Set first_published_at, last_published_at and live_revision
# if the page is being published now
if page.live:
now = timezone.now()
page.last_published_at = now
page.live_revision = self
if page.first_published_at is None:
page.first_published_at = now
if previous_revision:
previous_revision_page = previous_revision.as_page_object()
old_page_title = previous_revision_page.title if page.title != previous_revision_page.title else None
else:
try:
previous = self.get_previous()
except PageRevision.DoesNotExist:
previous = None
old_page_title = previous.page.title if previous and page.title != previous.page.title else None
else:
# Unset live_revision if the page is going live in the future
page.live_revision = None
page.save()
for comment in page.comments.all().only('position'):
comment.save(update_fields=['position'])
self.submitted_for_moderation = False
page.revisions.update(submitted_for_moderation=False)
workflow_state = page.current_workflow_state
if workflow_state and getattr(settings, 'WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH', True):
workflow_state.cancel(user=user)
if page.live:
page_published.send(sender=page.specific_class, instance=page.specific, revision=self)
# Update alias pages
page.update_aliases(revision=self, user=user, _content_json=self.content_json)
if log_action:
data = None
if previous_revision:
data = {
'revision': {
'id': previous_revision.id,
'created': previous_revision.created_at.strftime("%d %b %Y %H:%M")
}
}
if old_page_title:
data = data or {}
data['title'] = {
'old': old_page_title,
'new': page.title,
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.rename',
user=user,
data=data,
revision=self,
)
PageLogEntry.objects.log_action(
instance=page,
action=log_action if isinstance(log_action, str) else 'wagtail.publish',
user=user,
data=data,
revision=self,
content_changed=changed,
)
logger.info("Page published: \"%s\" id=%d revision_id=%d", page.title, page.id, self.id)
elif page.go_live_at:
logger.info(
"Page scheduled for publish: \"%s\" id=%d revision_id=%d go_live_at=%s",
page.title,
page.id,
self.id,
page.go_live_at.isoformat()
)
if log_action:
log_scheduling_action(self, user, changed)
def get_previous(self):
return self.get_previous_by_created_at(page=self.page)
def get_next(self):
return self.get_next_by_created_at(page=self.page)
def __str__(self):
return '"' + str(self.page) + '" at ' + str(self.created_at)
class Meta:
verbose_name = _('page revision')
verbose_name_plural = _('page revisions')
PAGE_PERMISSION_TYPES = [
('add', _("Add"), _("Add/edit pages you own")),
('edit', _("Edit"), _("Edit any page")),
('publish', _("Publish"), _("Publish any page")),
('bulk_delete', _("Bulk delete"), _("Delete pages with children")),
('lock', _("Lock"), _("Lock/unlock pages you've locked")),
('unlock', _("Unlock"), _("Unlock any page")),
]
PAGE_PERMISSION_TYPE_CHOICES = [
(identifier, long_label)
for identifier, short_label, long_label in PAGE_PERMISSION_TYPES
]
class GroupPagePermission(models.Model):
group = models.ForeignKey(Group, verbose_name=_('group'), related_name='page_permissions', on_delete=models.CASCADE)
page = models.ForeignKey('Page', verbose_name=_('page'), related_name='group_permissions', on_delete=models.CASCADE)
permission_type = models.CharField(
verbose_name=_('permission type'),
max_length=20,
choices=PAGE_PERMISSION_TYPE_CHOICES
)
class Meta:
unique_together = ('group', 'page', 'permission_type')
verbose_name = _('group page permission')
verbose_name_plural = _('group page permissions')
def __str__(self):
return "Group %d ('%s') has permission '%s' on page %d ('%s')" % (
self.group.id, self.group,
self.permission_type,
self.page.id, self.page
)
class UserPagePermissionsProxy:
def __init__(self, user):
self.user = user
if user.is_active and not user.is_superuser:
self.permissions = GroupPagePermission.objects.filter(group__user=self.user).select_related('page')
def revisions_for_moderation(self):
if not self.user.is_active:
return PageRevision.objects.none()
if self.user.is_superuser:
return PageRevision.submitted_revisions.all()
publishable_pages_paths = self.permissions.filter(
permission_type='publish'
).values_list('page__path', flat=True).distinct()
if not publishable_pages_paths:
return PageRevision.objects.none()
only_my_sections = Q(page__path__startswith=publishable_pages_paths[0])
for page_path in publishable_pages_paths[1:]:
only_my_sections = only_my_sections | Q(page__path__startswith=page_path)
return PageRevision.submitted_revisions.filter(only_my_sections)
def for_page(self, page):
return PagePermissionTester(self, page)
def explorable_pages(self):
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
explorable_pages = Page.objects.none()
for perm in self.permissions.filter(
Q(permission_type="add")
| Q(permission_type="edit")
| Q(permission_type="publish")
| Q(permission_type="lock")
):
explorable_pages |= Page.objects.descendant_of(
perm.page, inclusive=True
)
page_permissions = Page.objects.filter(group_permissions__in=self.permissions)
for page in page_permissions:
explorable_pages |= page.get_ancestors()
fca_page = page_permissions.first_common_ancestor()
explorable_pages = explorable_pages.filter(path__startswith=fca_page.path)
return explorable_pages
def editable_pages(self):
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
editable_pages = Page.objects.none()
for perm in self.permissions.filter(permission_type='add'):
editable_pages |= Page.objects.descendant_of(perm.page, inclusive=True).filter(owner=self.user)
for perm in self.permissions.filter(permission_type='edit'):
editable_pages |= Page.objects.descendant_of(perm.page, inclusive=True)
return editable_pages
def can_edit_pages(self):
return self.editable_pages().exists()
def publishable_pages(self):
if not self.user.is_active:
return Page.objects.none()
if self.user.is_superuser:
return Page.objects.all()
publishable_pages = Page.objects.none()
for perm in self.permissions.filter(permission_type='publish'):
publishable_pages |= Page.objects.descendant_of(perm.page, inclusive=True)
return publishable_pages
def can_publish_pages(self):
return self.publishable_pages().exists()
def can_remove_locks(self):
if self.user.is_superuser:
return True
if not self.user.is_active:
return False
else:
return self.permissions.filter(permission_type='unlock').exists()
class PagePermissionTester:
def __init__(self, user_perms, page):
self.user = user_perms.user
self.user_perms = user_perms
self.page = page
self.page_is_root = page.depth == 1
if self.user.is_active and not self.user.is_superuser:
self.permissions = set(
perm.permission_type for perm in user_perms.permissions
if self.page.path.startswith(perm.page.path)
)
def user_has_lock(self):
return self.page.locked_by_id == self.user.pk
def page_locked(self):
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
if current_workflow_task.page_locked_for_user(self.page, self.user):
return True
if not self.page.locked:
return False
if getattr(settings, 'WAGTAILADMIN_GLOBAL_PAGE_EDIT_LOCK', False):
return True
else:
return not self.user_has_lock()
def can_add_subpage(self):
if not self.user.is_active:
return False
specific_class = self.page.specific_class
if specific_class is None or not specific_class.creatable_subpage_models():
return False
return self.user.is_superuser or ('add' in self.permissions)
def can_edit(self):
if not self.user.is_active:
return False
if self.page_is_root:
return False
if self.user.is_superuser:
return True
if 'edit' in self.permissions:
return True
if 'add' in self.permissions and self.page.owner_id == self.user.pk:
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
if current_workflow_task.user_can_access_editor(self.page, self.user):
return True
return False
def can_delete(self, ignore_bulk=False):
if not self.user.is_active:
return False
if self.page_is_root:
return False
if self.user.is_superuser:
return True
if 'bulk_delete' not in self.permissions and not self.page.is_leaf() and not ignore_bulk:
return False
if 'edit' in self.permissions:
if 'publish' not in self.permissions:
pages_to_delete = self.page.get_descendants(inclusive=True)
if pages_to_delete.live().exists():
return False
return True
elif 'add' in self.permissions:
pages_to_delete = self.page.get_descendants(inclusive=True)
if 'publish' in self.permissions:
# (i.e. eliminating pages owned by this user must give us the empty set)
return not pages_to_delete.exclude(owner=self.user).exists()
else:
# all pages must be owned by this user and non-live
# (i.e. eliminating non-live pages owned by this user must give us the empty set)
return not pages_to_delete.exclude(live=False, owner=self.user).exists()
else:
return False
def can_unpublish(self):
if not self.user.is_active:
return False
if (not self.page.live) or self.page_is_root:
return False
if self.page_locked():
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_publish(self):
if not self.user.is_active:
return False
if self.page_is_root:
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_submit_for_moderation(self):
return not self.page_locked() and self.page.has_workflow and not self.page.workflow_in_progress
def can_set_view_restrictions(self):
return self.can_publish()
def can_unschedule(self):
return self.can_publish()
def can_lock(self):
if self.user.is_superuser:
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
return current_workflow_task.user_can_lock(self.page, self.user)
if 'lock' in self.permissions:
return True
return False
def can_unlock(self):
if self.user.is_superuser:
return True
if self.user_has_lock():
return True
current_workflow_task = self.page.current_workflow_task
if current_workflow_task:
return current_workflow_task.user_can_unlock(self.page, self.user)
if 'unlock' in self.permissions:
return True
return False
def can_publish_subpage(self):
if not self.user.is_active:
return False
specific_class = self.page.specific_class
if specific_class is None or not specific_class.creatable_subpage_models():
return False
return self.user.is_superuser or ('publish' in self.permissions)
def can_reorder_children(self):
return self.can_publish_subpage()
def can_move(self):
return self.can_delete(ignore_bulk=True)
def can_copy(self):
return not self.page_is_root
def can_move_to(self, destination):
# reject the logically impossible cases first
if self.page == destination or destination.is_descendant_of(self.page):
return False
# reject moves that are forbidden by subpage_types / parent_page_types rules
# (these rules apply to superusers too)
if not self.page.specific.can_move_to(destination):
return False
# shortcut the trivial 'everything' / 'nothing' permissions
if not self.user.is_active:
return False
if self.user.is_superuser:
return True
# check that the page can be moved at all
if not self.can_move():
return False
# Inspect permissions on the destination
destination_perms = self.user_perms.for_page(destination)
# we always need at least add permission in the target
if 'add' not in destination_perms.permissions:
return False
if self.page.live or self.page.get_descendants().filter(live=True).exists():
# moving this page will entail publishing within the destination section
return ('publish' in destination_perms.permissions)
else:
# no publishing required, so the already-tested 'add' permission is sufficient
return True
def can_copy_to(self, destination, recursive=False):
# reject the logically impossible cases first
# recursive can't copy to the same tree otherwise it will be on infinite loop
if recursive and (self.page == destination or destination.is_descendant_of(self.page)):
return False
if not self.user.is_active:
return False
if not self.page.specific_class.can_create_at(destination):
return False
if self.user.is_superuser:
return True
destination_perms = self.user_perms.for_page(destination)
if not destination.specific_class.creatable_subpage_models():
return False
if 'add' not in destination_perms.permissions:
return False
return True
def can_view_revisions(self):
return not self.page_is_root
class PageViewRestriction(BaseViewRestriction):
page = models.ForeignKey(
'Page', verbose_name=_('page'), related_name='view_restrictions', on_delete=models.CASCADE
)
passed_view_restrictions_session_key = 'passed_page_view_restrictions'
class Meta:
verbose_name = _('page view restriction')
verbose_name_plural = _('page view restrictions')
def save(self, user=None, **kwargs):
specific_instance = self.page.specific
is_new = self.id is None
super().save(**kwargs)
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.create' if is_new else 'wagtail.view_restriction.edit',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
def delete(self, user=None, **kwargs):
specific_instance = self.page.specific
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.delete',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
return super().delete(**kwargs)
class WorkflowPage(models.Model):
page = models.OneToOneField(
'Page',
verbose_name=_('page'),
on_delete=models.CASCADE,
primary_key=True,
unique=True
)
workflow = models.ForeignKey(
'Workflow',
related_name='workflow_pages',
verbose_name=_('workflow'),
on_delete=models.CASCADE,
)
def get_pages(self):
descendant_pages = Page.objects.descendant_of(self.page, inclusive=True)
descendant_workflow_pages = WorkflowPage.objects.filter(page_id__in=descendant_pages.values_list('id', flat=True)).exclude(pk=self.pk)
for path, depth in descendant_workflow_pages.values_list('page__path', 'page__depth'):
descendant_pages = descendant_pages.exclude(path__startswith=path, depth__gte=depth)
return descendant_pages
class Meta:
verbose_name = _('workflow page')
verbose_name_plural = _('workflow pages')
class WorkflowTask(Orderable):
workflow = ParentalKey('Workflow', on_delete=models.CASCADE, verbose_name=_('workflow_tasks'),
related_name='workflow_tasks')
task = models.ForeignKey('Task', on_delete=models.CASCADE, verbose_name=_('task'), related_name='workflow_tasks',
limit_choices_to={'active': True})
class Meta(Orderable.Meta):
unique_together = [('workflow', 'task')]
verbose_name = _('workflow task order')
verbose_name_plural = _('workflow task orders')
class TaskManager(models.Manager):
def active(self):
return self.filter(active=True)
class Task(models.Model):
name = models.CharField(max_length=255, verbose_name=_('name'))
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='wagtail_tasks',
on_delete=models.CASCADE
)
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active tasks can be added to workflows. Deactivating a task does not remove it from existing workflows."))
objects = TaskManager()
admin_form_fields = ['name']
admin_form_readonly_on_edit_fields = ['name']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
if not self.content_type_id:
self.content_type = ContentType.objects.get_for_model(self)
def __str__(self):
return self.name
@property
def workflows(self):
return Workflow.objects.filter(workflow_tasks__task=self)
@property
def active_workflows(self):
return Workflow.objects.active().filter(workflow_tasks__task=self)
@classmethod
def get_verbose_name(cls):
return capfirst(cls._meta.verbose_name)
@cached_property
def specific(self):
# the ContentType.objects manager keeps a cache, so this should potentially
# avoid a database lookup over doing self.content_type. I think.
content_type = ContentType.objects.get_for_id(self.content_type_id)
model_class = content_type.model_class()
if model_class is None:
# Cannot locate a model class for this content type. This might happen
# if the codebase and database are out of sync (e.g. the model exists
# on a different git branch and we haven't rolled back migrations before
return self
elif isinstance(self, model_class):
return self
else:
return content_type.get_object_for_this_type(id=self.id)
task_state_class = None
@classmethod
def get_task_state_class(self):
return self.task_state_class or TaskState
def start(self, workflow_state, user=None):
task_state = self.get_task_state_class()(workflow_state=workflow_state)
task_state.status = TaskState.STATUS_IN_PROGRESS
task_state.page_revision = workflow_state.page.get_latest_revision()
task_state.task = self
task_state.save()
task_submitted.send(sender=task_state.specific.__class__, instance=task_state.specific, user=user)
return task_state
@transaction.atomic
def on_action(self, task_state, user, action_name, **kwargs):
if action_name == 'approve':
task_state.approve(user=user, **kwargs)
elif action_name == 'reject':
task_state.reject(user=user, **kwargs)
def user_can_access_editor(self, page, user):
return False
def page_locked_for_user(self, page, user):
return False
def user_can_lock(self, page, user):
return False
def user_can_unlock(self, page, user):
return False
def get_actions(self, page, user):
return []
def get_form_for_action(self, action):
return TaskStateCommentForm
def get_template_for_action(self, action):
return ''
def get_task_states_user_can_moderate(self, user, **kwargs):
return TaskState.objects.none()
@classmethod
def get_description(cls):
return ''
@transaction.atomic
def deactivate(self, user=None):
self.active = False
self.save()
in_progress_states = TaskState.objects.filter(task=self, status=TaskState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
class Meta:
verbose_name = _('task')
verbose_name_plural = _('tasks')
class WorkflowManager(models.Manager):
def active(self):
return self.filter(active=True)
class Workflow(ClusterableModel):
name = models.CharField(max_length=255, verbose_name=_('name'))
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active workflows can be added to pages. Deactivating a workflow does not remove it from existing pages."))
objects = WorkflowManager()
def __str__(self):
return self.name
@property
def tasks(self):
return Task.objects.filter(workflow_tasks__workflow=self).order_by('workflow_tasks__sort_order')
@transaction.atomic
def start(self, page, user):
state = WorkflowState(page=page, workflow=self, status=WorkflowState.STATUS_IN_PROGRESS, requested_by=user)
state.save()
state.update(user=user)
workflow_submitted.send(sender=state.__class__, instance=state, user=user)
next_task_data = None
if state.current_task_state:
next_task_data = {
'id': state.current_task_state.task.id,
'title': state.current_task_state.task.name,
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.workflow.start',
data={
'workflow': {
'id': self.id,
'title': self.name,
'status': state.status,
'next': next_task_data,
'task_state_id': state.current_task_state.id if state.current_task_state else None,
}
},
revision=page.get_latest_revision(),
user=user,
)
return state
@transaction.atomic
def deactivate(self, user=None):
self.active = False
in_progress_states = WorkflowState.objects.filter(workflow=self, status=WorkflowState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
WorkflowPage.objects.filter(workflow=self).delete()
self.save()
def all_pages(self):
pages = Page.objects.none()
for workflow_page in self.workflow_pages.all():
pages |= workflow_page.get_pages()
return pages
class Meta:
verbose_name = _('workflow')
verbose_name_plural = _('workflows')
class GroupApprovalTask(Task):
groups = models.ManyToManyField(Group, verbose_name=_('groups'), help_text=_('Pages at this step in a workflow will be moderated or approved by these groups of users'))
admin_form_fields = Task.admin_form_fields + ['groups']
admin_form_widgets = {
'groups': forms.CheckboxSelectMultiple,
}
def start(self, workflow_state, user=None):
if workflow_state.page.locked_by:
if not workflow_state.page.locked_by.groups.filter(id__in=self.groups.all()).exists():
workflow_state.page.locked = False
workflow_state.page.locked_by = None
workflow_state.page.locked_at = None
workflow_state.page.save(update_fields=['locked', 'locked_by', 'locked_at'])
return super().start(workflow_state, user=user)
def user_can_access_editor(self, page, user):
return self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser
def page_locked_for_user(self, page, user):
return not (self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser)
def user_can_lock(self, page, user):
return self.groups.filter(id__in=user.groups.all()).exists()
def user_can_unlock(self, page, user):
return False
def get_actions(self, page, user):
if self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser:
return [
('reject', _("Request changes"), True),
('approve', _("Approve"), False),
('approve', _("Approve with comment"), True),
]
return []
def get_task_states_user_can_moderate(self, user, **kwargs):
if self.groups.filter(id__in=user.groups.all()).exists() or user.is_superuser:
return TaskState.objects.filter(status=TaskState.STATUS_IN_PROGRESS, task=self.task_ptr)
else:
return TaskState.objects.none()
@classmethod
def get_description(cls):
return _("Members of the chosen Wagtail Groups can approve this task")
class Meta:
verbose_name = _('Group approval task')
verbose_name_plural = _('Group approval tasks')
class WorkflowStateManager(models.Manager):
def active(self):
return self.filter(Q(status=WorkflowState.STATUS_IN_PROGRESS) | Q(status=WorkflowState.STATUS_NEEDS_CHANGES))
class WorkflowState(models.Model):
STATUS_IN_PROGRESS = 'in_progress'
STATUS_APPROVED = 'approved'
STATUS_NEEDS_CHANGES = 'needs_changes'
STATUS_CANCELLED = 'cancelled'
STATUS_CHOICES = (
(STATUS_IN_PROGRESS, _("In progress")),
(STATUS_APPROVED, _("Approved")),
(STATUS_NEEDS_CHANGES, _("Needs changes")),
(STATUS_CANCELLED, _("Cancelled")),
)
page = models.ForeignKey('Page', on_delete=models.CASCADE, verbose_name=_("page"), related_name='workflow_states')
workflow = models.ForeignKey('Workflow', on_delete=models.CASCADE, verbose_name=_('workflow'), related_name='workflow_states')
status = models.fields.CharField(choices=STATUS_CHOICES, verbose_name=_("status"), max_length=50, default=STATUS_IN_PROGRESS)
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("created at"))
requested_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('requested by'),
null=True,
blank=True,
editable=True,
on_delete=models.SET_NULL,
related_name='requested_workflows')
current_task_state = models.OneToOneField('TaskState', on_delete=models.SET_NULL, null=True, blank=True,
verbose_name=_("current task state"))
# allows a custom function to be called on finishing the Workflow successfully.
on_finish = import_string(getattr(settings, 'WAGTAIL_FINISH_WORKFLOW_ACTION', 'wagtail.core.workflows.publish_workflow_state'))
objects = WorkflowStateManager()
def clean(self):
super().clean()
if self.status in (self.STATUS_IN_PROGRESS, self.STATUS_NEEDS_CHANGES):
# The unique constraint is conditional, and so not supported on the MySQL backend - so an additional check is done here
if WorkflowState.objects.active().filter(page=self.page).exclude(pk=self.pk).exists():
raise ValidationError(_('There may only be one in progress or needs changes workflow state per page.'))
def save(self, *args, **kwargs):
self.full_clean()
return super().save(*args, **kwargs)
def __str__(self):
return _("Workflow '{0}' on Page '{1}': {2}").format(self.workflow, self.page, self.status)
def resume(self, user=None):
if self.status != self.STATUS_NEEDS_CHANGES:
raise PermissionDenied
revision = self.current_task_state.page_revision
current_task_state = self.current_task_state
self.current_task_state = None
self.status = self.STATUS_IN_PROGRESS
self.save()
PageLogEntry.objects.log_action(
instance=self.page.specific,
action='wagtail.workflow.resume',
data={
'workflow': {
'id': self.workflow_id,
'title': self.workflow.name,
'status': self.status,
'task_state_id': current_task_state.id,
'task': {
'id': current_task_state.task.id,
'title': current_task_state.task.name,
},
}
},
revision=revision,
user=user,
)
return self.update(user=user, next_task=current_task_state.task)
def user_can_cancel(self, user):
if self.page.locked and self.page.locked_by != user:
return False
return user == self.requested_by or user == self.page.owner or (self.current_task_state and self.current_task_state.status == self.current_task_state.STATUS_IN_PROGRESS and 'approve' in [action[0] for action in self.current_task_state.task.get_actions(self.page, user)])
def update(self, user=None, next_task=None):
if self.status != self.STATUS_IN_PROGRESS:
# Updating a completed or cancelled workflow should have no effect
return
try:
current_status = self.current_task_state.status
except AttributeError:
current_status = None
if current_status == TaskState.STATUS_REJECTED:
self.status = self.STATUS_NEEDS_CHANGES
self.save()
workflow_rejected.send(sender=self.__class__, instance=self, user=user)
else:
if not next_task:
next_task = self.get_next_task()
if next_task:
if (not self.current_task_state) or self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS:
# if not on a task, or the next task to move to is not the current task (ie current task's status is
self.current_task_state = next_task.specific.start(self, user=user)
self.save()
if self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS:
self.update(user=user)
else:
self.finish(user=user)
@property
def successful_task_states(self):
successful_task_states = self.task_states.filter(
Q(status=TaskState.STATUS_APPROVED) | Q(status=TaskState.STATUS_SKIPPED)
)
if getattr(settings, "WAGTAIL_WORKFLOW_REQUIRE_REAPPROVAL_ON_EDIT", False):
successful_task_states = successful_task_states.filter(page_revision=self.page.get_latest_revision())
return successful_task_states
def get_next_task(self):
return (
Task.objects.filter(workflow_tasks__workflow=self.workflow, active=True)
.exclude(
task_states__in=self.successful_task_states
).order_by('workflow_tasks__sort_order').first()
)
def cancel(self, user=None):
if self.status not in (self.STATUS_IN_PROGRESS, self.STATUS_NEEDS_CHANGES):
raise PermissionDenied
self.status = self.STATUS_CANCELLED
self.save()
PageLogEntry.objects.log_action(
instance=self.page.specific,
action='wagtail.workflow.cancel',
data={
'workflow': {
'id': self.workflow_id,
'title': self.workflow.name,
'status': self.status,
'task_state_id': self.current_task_state.id,
'task': {
'id': self.current_task_state.task.id,
'title': self.current_task_state.task.name,
},
}
},
revision=self.current_task_state.page_revision,
user=user,
)
for state in self.task_states.filter(status=TaskState.STATUS_IN_PROGRESS):
state.specific.cancel(user=user)
workflow_cancelled.send(sender=self.__class__, instance=self, user=user)
@transaction.atomic
def finish(self, user=None):
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_APPROVED
self.save()
self.on_finish(user=user)
workflow_approved.send(sender=self.__class__, instance=self, user=user)
def copy_approved_task_states_to_revision(self, revision):
approved_states = TaskState.objects.filter(workflow_state=self, status=TaskState.STATUS_APPROVED)
for state in approved_states:
state.copy(update_attrs={'page_revision': revision})
def revisions(self):
return PageRevision.objects.filter(
page_id=self.page_id,
id__in=self.task_states.values_list('page_revision_id', flat=True)
).defer('content_json')
def _get_applicable_task_states(self):
task_states = TaskState.objects.filter(workflow_state_id=self.id)
if getattr(settings, "WAGTAIL_WORKFLOW_REQUIRE_REAPPROVAL_ON_EDIT", False):
latest_revision_id = self.revisions().order_by('-created_at', '-id').values_list('id', flat=True).first()
task_states = task_states.filter(page_revision_id=latest_revision_id)
return task_states
def all_tasks_with_status(self):
task_states = self._get_applicable_task_states()
tasks = list(
self.workflow.tasks.annotate(
status=Subquery(
task_states.filter(
task_id=OuterRef('id'),
).order_by(
'-started_at', '-id'
).values('status')[:1]
),
)
)
status_choices = dict(TaskState.STATUS_CHOICES)
for task in tasks:
task.status_display = status_choices.get(task.status, _("Not started"))
return tasks
def all_tasks_with_state(self):
task_states = self._get_applicable_task_states()
tasks = list(
self.workflow.tasks.annotate(
task_state_id=Subquery(
task_states.filter(
task_id=OuterRef('id'),
).order_by(
'-started_at', '-id'
).values('id')[:1]
),
)
)
task_states = {task_state.id: task_state for task_state in task_states}
for task in tasks:
task.task_state = task_states.get(task.task_state_id)
return tasks
@property
def is_active(self):
return self.status not in [self.STATUS_APPROVED, self.STATUS_CANCELLED]
@property
def is_at_final_task(self):
last_task = Task.objects.filter(workflow_tasks__workflow=self.workflow, active=True)\
.exclude(task_states__in=self.successful_task_states)\
.order_by('workflow_tasks__sort_order').last()
return self.get_next_task() == last_task
class Meta:
verbose_name = _('Workflow state')
verbose_name_plural = _('Workflow states')
constraints = [
models.UniqueConstraint(fields=['page'], condition=Q(status__in=('in_progress', 'needs_changes')), name='unique_in_progress_workflow')
]
class TaskStateManager(models.Manager):
def reviewable_by(self, user):
tasks = Task.objects.filter(active=True)
states = TaskState.objects.none()
for task in tasks:
states = states | task.specific.get_task_states_user_can_moderate(user=user)
return states
class TaskState(models.Model):
STATUS_IN_PROGRESS = 'in_progress'
STATUS_APPROVED = 'approved'
STATUS_REJECTED = 'rejected'
STATUS_SKIPPED = 'skipped'
STATUS_CANCELLED = 'cancelled'
STATUS_CHOICES = (
(STATUS_IN_PROGRESS, _("In progress")),
(STATUS_APPROVED, _("Approved")),
(STATUS_REJECTED, _("Rejected")),
(STATUS_SKIPPED, _("Skipped")),
(STATUS_CANCELLED, _("Cancelled")),
)
workflow_state = models.ForeignKey('WorkflowState', on_delete=models.CASCADE, verbose_name=_('workflow state'), related_name='task_states')
page_revision = models.ForeignKey('PageRevision', on_delete=models.CASCADE, verbose_name=_('page revision'), related_name='task_states')
task = models.ForeignKey('Task', on_delete=models.CASCADE, verbose_name=_('task'), related_name='task_states')
status = models.fields.CharField(choices=STATUS_CHOICES, verbose_name=_("status"), max_length=50, default=STATUS_IN_PROGRESS)
started_at = models.DateTimeField(verbose_name=_('started at'), auto_now_add=True)
finished_at = models.DateTimeField(verbose_name=_('finished at'), blank=True, null=True)
finished_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('finished by'),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='finished_task_states'
)
comment = models.TextField(blank=True)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='wagtail_task_states',
on_delete=models.CASCADE
)
exclude_fields_in_copy = []
default_exclude_fields_in_copy = ['id']
objects = TaskStateManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
if not self.content_type_id:
self.content_type = ContentType.objects.get_for_model(self)
def __str__(self):
return _("Task '{0}' on Page Revision '{1}': {2}").format(self.task, self.page_revision, self.status)
@cached_property
def specific(self):
content_type = ContentType.objects.get_for_id(self.content_type_id)
model_class = content_type.model_class()
if model_class is None:
# switching branches); if so, the best we can do is return the page
# unchanged.
return self
elif isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
else:
return content_type.get_object_for_this_type(id=self.id)
@transaction.atomic
def approve(self, user=None, update=True, comment=''):
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_APPROVED
self.finished_at = timezone.now()
self.finished_by = user
self.comment = comment
self.save()
self.log_state_change_action(user, 'approve')
if update:
self.workflow_state.update(user=user)
task_approved.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
@transaction.atomic
def reject(self, user=None, update=True, comment=''):
if self.status != self.STATUS_IN_PROGRESS:
raise PermissionDenied
self.status = self.STATUS_REJECTED
self.finished_at = timezone.now()
self.finished_by = user
self.comment = comment
self.save()
self.log_state_change_action(user, 'reject')
if update:
self.workflow_state.update(user=user)
task_rejected.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
@cached_property
def task_type_started_at(self):
task_states = TaskState.objects.filter(workflow_state=self.workflow_state).order_by('-started_at').select_related('task')
started_at = None
for task_state in task_states:
if task_state.task == self.task:
started_at = task_state.started_at
elif started_at:
break
return started_at
@transaction.atomic
def cancel(self, user=None, resume=False, comment=''):
self.status = self.STATUS_CANCELLED
self.finished_at = timezone.now()
self.comment = comment
self.finished_by = user
self.save()
if resume:
self.workflow_state.update(user=user, next_task=self.task.specific)
else:
self.workflow_state.update(user=user)
task_cancelled.send(sender=self.specific.__class__, instance=self.specific, user=user)
return self
def copy(self, update_attrs=None, exclude_fields=None):
exclude_fields = self.default_exclude_fields_in_copy + self.exclude_fields_in_copy + (exclude_fields or [])
instance, child_object_map = _copy(self.specific, exclude_fields, update_attrs)
instance.save()
_copy_m2m_relations(self, instance, exclude_fields=exclude_fields)
return instance
def get_comment(self):
return self.comment
def log_state_change_action(self, user, action):
page = self.page_revision.as_page_object()
next_task = self.workflow_state.get_next_task()
next_task_data = None
if next_task:
next_task_data = {
'id': next_task.id,
'title': next_task.name
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.workflow.{}'.format(action),
user=user,
data={
'workflow': {
'id': self.workflow_state.workflow.id,
'title': self.workflow_state.workflow.name,
'status': self.status,
'task_state_id': self.id,
'task': {
'id': self.task.id,
'title': self.task.name,
},
'next': next_task_data,
},
'comment': self.get_comment()
},
revision=self.page_revision
)
class Meta:
verbose_name = _('Task state')
verbose_name_plural = _('Task states')
class PageLogEntryManager(BaseLogEntryManager):
def get_instance_title(self, instance):
return instance.specific_deferred.get_admin_display_title()
def log_action(self, instance, action, **kwargs):
kwargs.update(page=instance)
return super().log_action(instance, action, **kwargs)
class PageLogEntry(BaseLogEntry):
page = models.ForeignKey(
'wagtailcore.Page',
on_delete=models.DO_NOTHING,
db_constraint=False,
related_name='+'
)
# Pointer to a specific page revision
revision = models.ForeignKey(
'wagtailcore.PageRevision',
null=True,
blank=True,
on_delete=models.DO_NOTHING,
db_constraint=False,
related_name='+',
)
objects = PageLogEntryManager()
action_registry = page_log_action_registry
class Meta:
ordering = ['-timestamp', '-id']
verbose_name = _('page log entry')
verbose_name_plural = _('page log entries')
def __str__(self):
return "PageLogEntry %d: '%s' on '%s' with id %s" % (
self.pk, self.action, self.object_verbose_name(), self.page_id
)
@cached_property
def object_id(self):
return self.page_id
class Comment(ClusterableModel):
page = ParentalKey(Page, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
contentpath = models.TextField()
# This stores the field or field within a streamfield block that the comment is applied on, in the form: 'field', or 'field.block_id.field'
# This must be unchanging across all revisions, so we will not support (current-format) ListBlock or the contents of InlinePanels initially.
position = models.TextField(blank=True)
# This stores the position within a field, to be interpreted by the field's frontend widget. It may change between revisions
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
revision_created = models.ForeignKey(PageRevision, on_delete=models.CASCADE, related_name='created_comments', null=True, blank=True)
resolved_at = models.DateTimeField(null=True, blank=True)
resolved_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='comments_resolved',
null=True,
blank=True
)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "Comment on Page '{0}', left by {1}: '{2}'".format(self.page, self.user, self.text)
def save(self, update_position=False, **kwargs):
update_fields = kwargs.pop('update_fields', None)
if not update_position and (not update_fields or 'position' not in update_fields):
if self.id:
# The instance is already saved; we can use `update_fields`
update_fields = update_fields if update_fields else self._meta.get_fields()
update_fields = [field.name for field in update_fields if field.name not in {'position', 'id'}]
else:
# This is a new instance, we have to preserve and then restore the position via a variable
position = self.position
result = super().save(**kwargs)
self.position = position
return result
return super().save(update_fields=update_fields, **kwargs)
def _log(self, action, page_revision=None, user=None):
PageLogEntry.objects.log_action(
instance=self.page,
action=action,
user=user,
revision=page_revision,
data={
'comment': {
'id': self.pk,
'contentpath': self.contentpath,
'text': self.text,
}
}
)
def log_create(self, **kwargs):
self._log('wagtail.comments.create', **kwargs)
def log_edit(self, **kwargs):
self._log('wagtail.comments.edit', **kwargs)
def log_resolve(self, **kwargs):
self._log('wagtail.comments.resolve', **kwargs)
def log_delete(self, **kwargs):
self._log('wagtail.comments.delete', **kwargs)
class CommentReply(models.Model):
comment = ParentalKey(Comment, on_delete=models.CASCADE, related_name='replies')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comment_replies')
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('comment reply')
verbose_name_plural = _('comment replies')
def __str__(self):
return "CommentReply left by '{0}': '{1}'".format(self.user, self.text)
def _log(self, action, page_revision=None, user=None):
PageLogEntry.objects.log_action(
instance=self.comment.page,
action=action,
user=user,
revision=page_revision,
data={
'comment': {
'id': self.comment.pk,
'contentpath': self.comment.contentpath,
'text': self.comment.text,
},
'reply': {
'id': self.pk,
'text': self.text,
}
}
)
def log_create(self, **kwargs):
self._log('wagtail.comments.create_reply', **kwargs)
def log_edit(self, **kwargs):
self._log('wagtail.comments.edit_reply', **kwargs)
def log_delete(self, **kwargs):
self._log('wagtail.comments.delete_reply', **kwargs)
class PageSubscription(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='page_subscriptions')
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='subscribers')
comment_notifications = models.BooleanField()
class Meta:
unique_together = [
('page', 'user'),
]
| true | true |
1c346f8c3402e5acc7704116e8651576d52dd5b9 | 141 | py | Python | glim_extensions/db/utils.py | aacanakin/glim-extensions | 75cf1e857abd717645db85f273650c0d883c55f2 | [
"MIT"
] | 2 | 2015-01-06T19:21:44.000Z | 2019-06-14T13:04:51.000Z | glim_extensions/db/utils.py | aacanakin/glim-extensions | 75cf1e857abd717645db85f273650c0d883c55f2 | [
"MIT"
] | 2 | 2015-02-20T07:40:47.000Z | 2015-02-20T07:44:42.000Z | glim_extensions/db/utils.py | aacanakin/glim-extensions | 75cf1e857abd717645db85f273650c0d883c55f2 | [
"MIT"
] | null | null | null | import os
def touch(path):
fhandle = open(path, 'a')
try:
os.utime(path, None)
finally:
fhandle.close()
return os.path.isfile(path)
| 14.1 | 28 | 0.673759 | import os
def touch(path):
fhandle = open(path, 'a')
try:
os.utime(path, None)
finally:
fhandle.close()
return os.path.isfile(path)
| true | true |
1c34700a448f47b1743fd71647ab3fbfaa3323ec | 2,400 | py | Python | test.py | speedcell4/pytorch-noreward-rl | b889d78b7b2115feb80198c90e75e35956eae284 | [
"MIT"
] | null | null | null | test.py | speedcell4/pytorch-noreward-rl | b889d78b7b2115feb80198c90e75e35956eae284 | [
"MIT"
] | null | null | null | test.py | speedcell4/pytorch-noreward-rl | b889d78b7b2115feb80198c90e75e35956eae284 | [
"MIT"
] | null | null | null | import pickle
import time
from collections import deque
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import env_wrapper
from model import ActorCritic
def test(rank, args, shared_model):
torch.manual_seed(args.seed + rank)
env = env_wrapper.create_doom(args.record, outdir=args.outdir)
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state)
reward_sum = 0
done = True
start_time = time.time()
# a quick hack to prevent the agent from stucking
actions = deque(maxlen=2100)
episode_length = 0
result = []
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx = Variable(torch.zeros(1, 256), volatile=True)
hx = Variable(torch.zeros(1, 256), volatile=True)
else:
cx = Variable(cx.data, volatile=True)
hx = Variable(hx.data, volatile=True)
value, logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True), (hx, cx)),
icm=False
)
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
state = torch.from_numpy(state)
done = done or episode_length >= args.max_episode_length
reward_sum += reward
# a quick hack to prevent the agent from stucking
actions.append(action[0, 0])
if actions.count(actions[0]) == actions.maxlen:
done = True
if done:
end_time = time.time()
print("Time {}, episode reward {}, episode length {}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(end_time - start_time)),
reward_sum, episode_length))
result.append((reward_sum, end_time - start_time))
f = open('output/result.pickle', 'w')
pickle.dump(result, f)
f.close()
torch.save(model.state_dict(), 'output/{}.pth'.format((end_time - start_time)))
reward_sum = 0
episode_length = 0
actions.clear()
state = env.reset()
state = torch.from_numpy(state)
time.sleep(60)
| 30 | 91 | 0.585833 | import pickle
import time
from collections import deque
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import env_wrapper
from model import ActorCritic
def test(rank, args, shared_model):
torch.manual_seed(args.seed + rank)
env = env_wrapper.create_doom(args.record, outdir=args.outdir)
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state)
reward_sum = 0
done = True
start_time = time.time()
actions = deque(maxlen=2100)
episode_length = 0
result = []
while True:
episode_length += 1
if done:
model.load_state_dict(shared_model.state_dict())
cx = Variable(torch.zeros(1, 256), volatile=True)
hx = Variable(torch.zeros(1, 256), volatile=True)
else:
cx = Variable(cx.data, volatile=True)
hx = Variable(hx.data, volatile=True)
value, logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True), (hx, cx)),
icm=False
)
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
state = torch.from_numpy(state)
done = done or episode_length >= args.max_episode_length
reward_sum += reward
actions.append(action[0, 0])
if actions.count(actions[0]) == actions.maxlen:
done = True
if done:
end_time = time.time()
print("Time {}, episode reward {}, episode length {}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(end_time - start_time)),
reward_sum, episode_length))
result.append((reward_sum, end_time - start_time))
f = open('output/result.pickle', 'w')
pickle.dump(result, f)
f.close()
torch.save(model.state_dict(), 'output/{}.pth'.format((end_time - start_time)))
reward_sum = 0
episode_length = 0
actions.clear()
state = env.reset()
state = torch.from_numpy(state)
time.sleep(60)
| true | true |
1c3470d827c40a69d453b9d1c08c8a9036f3fde5 | 703 | py | Python | benchmarks/benchmark_msgpackrpc.py | brglng/aiorpc | 575a898e54e61cd73ec5cf2b48348e70cfaa5b41 | [
"WTFPL"
] | 66 | 2016-10-17T19:16:44.000Z | 2022-02-26T01:10:06.000Z | benchmarks/benchmark_msgpackrpc.py | webclinic017/aiorpc | a46929d70f17a6a98ee8f071012656f57bcd073b | [
"WTFPL"
] | 25 | 2018-05-13T03:14:43.000Z | 2022-03-03T03:29:04.000Z | benchmarks/benchmark_msgpackrpc.py | webclinic017/aiorpc | a46929d70f17a6a98ee8f071012656f57bcd073b | [
"WTFPL"
] | 20 | 2017-09-13T17:04:21.000Z | 2022-02-03T12:26:25.000Z | # -*- coding: utf-8 -*-
import time
import msgpackrpc
import multiprocessing
NUM_CALLS = 10000
def run_sum_server():
class SumServer(object):
def sum(self, x, y):
return x + y
server = msgpackrpc.Server(SumServer())
server.listen(msgpackrpc.Address("localhost", 6000))
server.start()
def call():
client = msgpackrpc.Client(msgpackrpc.Address("localhost", 6000))
start = time.time()
[client.call('sum', 1, 2) for _ in range(NUM_CALLS)]
print('call: %d qps' % (NUM_CALLS / (time.time() - start)))
if __name__ == '__main__':
p = multiprocessing.Process(target=run_sum_server)
p.start()
time.sleep(1)
call()
p.terminate()
| 19 | 69 | 0.633001 |
import time
import msgpackrpc
import multiprocessing
NUM_CALLS = 10000
def run_sum_server():
class SumServer(object):
def sum(self, x, y):
return x + y
server = msgpackrpc.Server(SumServer())
server.listen(msgpackrpc.Address("localhost", 6000))
server.start()
def call():
client = msgpackrpc.Client(msgpackrpc.Address("localhost", 6000))
start = time.time()
[client.call('sum', 1, 2) for _ in range(NUM_CALLS)]
print('call: %d qps' % (NUM_CALLS / (time.time() - start)))
if __name__ == '__main__':
p = multiprocessing.Process(target=run_sum_server)
p.start()
time.sleep(1)
call()
p.terminate()
| true | true |
1c34723774ef88f3e523e0d9e0ebd06168f81247 | 4,183 | py | Python | nypdbot/dotplacer.py | artdent/nypdbot | 6b2cc459aa9fa326dbb5297836eb6b3e92e53397 | [
"Apache-2.0"
] | null | null | null | nypdbot/dotplacer.py | artdent/nypdbot | 6b2cc459aa9fa326dbb5297836eb6b3e92e53397 | [
"Apache-2.0"
] | null | null | null | nypdbot/dotplacer.py | artdent/nypdbot | 6b2cc459aa9fa326dbb5297836eb6b3e92e53397 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Jacob Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pure Data object placer that uses graphviz to lay out the patch.
"""
import cgi
import tempfile
import pygraphviz as pgv
_TABLE_HTML = """<
<table cellspacing="0" cellborder="0">
<tr>%s</tr>
<tr><td colspan="%d">%s</td></tr>
<tr>%s</tr>
</table>
>"""
class DotPlacer(object):
def __init__(self):
self.node_id = 0
self.node_names = {}
self.graph = pgv.AGraph(directed=True, ordering='out', ranksep=0.1)
def _format_arg(self, arg):
if isinstance(arg, float):
return '%0.2f' % arg
return cgi.escape(str(arg))
def _box_content(self, box):
return ' '.join([self._format_arg(arg) for arg in box.args])
def _label(self, box):
if box.inlet_count():
inlets = ''.join('<td port="i%d" height="0"></td>' % i
for i in range(box.inlet_count()))
else:
inlets = '<td></td>'
if box.outlet_count():
outlets = ''.join('<td port="o%d" height="0"></td>' % i
for i in range(box.outlet_count()))
else:
outlets = '<td></td>'
max_cell_count = max(1, box.inlet_count(), box.outlet_count())
return _TABLE_HTML % (inlets,
max_cell_count, self._box_content(box),
outlets)
def _parse_coord(self, node):
x, y = node.attr['pos'].split(',')
return int(float(x)), int(float(y))
def _add_nodes(self, boxes):
# TODO: place all inlet and outlet nodes in their own respective
# subgraphs so that their left-to-right ordering is preserved.
# Or just punt and have pdctl place those nodes itself.
for box in boxes:
name = 'node%d' % self.node_id
self.node_id += 1
# Fudge factor to translate height from pixels to inches.
self.graph.add_node(name, label=self._label(box), shape='none',
fontsize=10, height=box.HEIGHT / 40.0)
self.node_names[box] = name
def _add_edges(self, boxes):
for box in boxes:
for conn in box.outgoing():
weight = 2 if self._might_be_audio_rate(conn) else 1
self.graph.add_edge(
self.node_names[box], self.node_names[conn.inlet.box],
headport='i%d:n' % conn.inlet.idx,
tailport='o%d:s' % conn.outlet.idx,
arrowhead='tee', weight=weight)
def _might_be_audio_rate(self, conn):
# For canvases, we know exactly which ports are audio rate.
# TODO: it would be clear if the patch method would note
# if it is connecting an audio-rate port.
from_box = conn.outlet.box
if from_box.outlets and from_box.outlets[conn.outlet.idx]:
return True
to_box = conn.inlet.box
if to_box.inlets and to_box.inlets[conn.inlet.idx]:
return True
# For other boxes, guess that two audio-rate boxes are connected
# by an audio-rate signal.
return from_box.audio_rate and to_box.audio_rate
def place_all(self, boxes):
self._add_nodes(boxes)
self._add_edges(boxes)
# Invert the y-axis to match pd.
self.graph.layout(prog='dot', args='-y')
# For debugging:
#self.graph.draw(tempfile.mkstemp(suffix='.dot')[1])
#self.graph.draw(tempfile.mkstemp(suffix='.png')[1])
return dict(
(box, self._parse_coord(self.graph.get_node(self.node_names[box])))
for box in boxes)
| 36.373913 | 79 | 0.593354 |
import cgi
import tempfile
import pygraphviz as pgv
_TABLE_HTML = """<
<table cellspacing="0" cellborder="0">
<tr>%s</tr>
<tr><td colspan="%d">%s</td></tr>
<tr>%s</tr>
</table>
>"""
class DotPlacer(object):
def __init__(self):
self.node_id = 0
self.node_names = {}
self.graph = pgv.AGraph(directed=True, ordering='out', ranksep=0.1)
def _format_arg(self, arg):
if isinstance(arg, float):
return '%0.2f' % arg
return cgi.escape(str(arg))
def _box_content(self, box):
return ' '.join([self._format_arg(arg) for arg in box.args])
def _label(self, box):
if box.inlet_count():
inlets = ''.join('<td port="i%d" height="0"></td>' % i
for i in range(box.inlet_count()))
else:
inlets = '<td></td>'
if box.outlet_count():
outlets = ''.join('<td port="o%d" height="0"></td>' % i
for i in range(box.outlet_count()))
else:
outlets = '<td></td>'
max_cell_count = max(1, box.inlet_count(), box.outlet_count())
return _TABLE_HTML % (inlets,
max_cell_count, self._box_content(box),
outlets)
def _parse_coord(self, node):
x, y = node.attr['pos'].split(',')
return int(float(x)), int(float(y))
def _add_nodes(self, boxes):
for box in boxes:
name = 'node%d' % self.node_id
self.node_id += 1
self.graph.add_node(name, label=self._label(box), shape='none',
fontsize=10, height=box.HEIGHT / 40.0)
self.node_names[box] = name
def _add_edges(self, boxes):
for box in boxes:
for conn in box.outgoing():
weight = 2 if self._might_be_audio_rate(conn) else 1
self.graph.add_edge(
self.node_names[box], self.node_names[conn.inlet.box],
headport='i%d:n' % conn.inlet.idx,
tailport='o%d:s' % conn.outlet.idx,
arrowhead='tee', weight=weight)
def _might_be_audio_rate(self, conn):
from_box = conn.outlet.box
if from_box.outlets and from_box.outlets[conn.outlet.idx]:
return True
to_box = conn.inlet.box
if to_box.inlets and to_box.inlets[conn.inlet.idx]:
return True
return from_box.audio_rate and to_box.audio_rate
def place_all(self, boxes):
self._add_nodes(boxes)
self._add_edges(boxes)
self.graph.layout(prog='dot', args='-y')
return dict(
(box, self._parse_coord(self.graph.get_node(self.node_names[box])))
for box in boxes)
| true | true |
1c3473d2b9d25fbb8ba2ff4fdf3423f69a3d79d6 | 369 | py | Python | pkg/auth/schema.py | Krishap-s/Encrypt-Everywhere | cf1f6f32b856685e3d29679dbf66e20876d30313 | [
"MIT"
] | null | null | null | pkg/auth/schema.py | Krishap-s/Encrypt-Everywhere | cf1f6f32b856685e3d29679dbf66e20876d30313 | [
"MIT"
] | null | null | null | pkg/auth/schema.py | Krishap-s/Encrypt-Everywhere | cf1f6f32b856685e3d29679dbf66e20876d30313 | [
"MIT"
] | null | null | null | from pydantic import BaseModel, EmailStr
class AddUserSchema(BaseModel):
name:str
email:EmailStr
salt:str
encrypted_master_password:str
derived_key:str
class SignInSchema(BaseModel):
email:EmailStr
derived_key:str
class GetUserSchema(BaseModel):
_id:str
name:str
email:EmailStr
encrypted_master_password:str
token:str
| 18.45 | 40 | 0.739837 | from pydantic import BaseModel, EmailStr
class AddUserSchema(BaseModel):
name:str
email:EmailStr
salt:str
encrypted_master_password:str
derived_key:str
class SignInSchema(BaseModel):
email:EmailStr
derived_key:str
class GetUserSchema(BaseModel):
_id:str
name:str
email:EmailStr
encrypted_master_password:str
token:str
| true | true |
1c34748d620a653ca09649749f7210e101fd1278 | 1,234 | py | Python | allennlp/training/metrics/average.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 1 | 2022-01-06T02:06:23.000Z | 2022-01-06T02:06:23.000Z | allennlp/training/metrics/average.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 52 | 2020-11-11T13:08:25.000Z | 2021-12-16T13:04:30.000Z | allennlp/training/metrics/average.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | null | null | null | from allennlp.training.metrics.metric import Metric
from allennlp.nn.util import dist_reduce_sum
@Metric.register("average")
class Average(Metric):
"""
This [`Metric`](./metric.md) breaks with the typical `Metric` API and just stores values that were
computed in some fashion outside of a `Metric`. If you have some external code that computes
the metric for you, for instance, you can use this to report the average result using our
`Metric` API.
"""
def __init__(self) -> None:
self._total_value = 0.0
self._count = 0
def __call__(self, value):
"""
# Parameters
value : `float`
The value to average.
"""
self._count += dist_reduce_sum(1)
self._total_value += dist_reduce_sum(float(list(self.detach_tensors(value))[0]))
def get_metric(self, reset: bool = False):
"""
# Returns
The average of all values that were passed to `__call__`.
"""
average_value = self._total_value / self._count if self._count > 0 else 0.0
if reset:
self.reset()
return float(average_value)
def reset(self):
self._total_value = 0.0
self._count = 0
| 28.697674 | 102 | 0.622366 | from allennlp.training.metrics.metric import Metric
from allennlp.nn.util import dist_reduce_sum
@Metric.register("average")
class Average(Metric):
def __init__(self) -> None:
self._total_value = 0.0
self._count = 0
def __call__(self, value):
self._count += dist_reduce_sum(1)
self._total_value += dist_reduce_sum(float(list(self.detach_tensors(value))[0]))
def get_metric(self, reset: bool = False):
average_value = self._total_value / self._count if self._count > 0 else 0.0
if reset:
self.reset()
return float(average_value)
def reset(self):
self._total_value = 0.0
self._count = 0
| true | true |
1c3474e689134df0fd4ac4bf9c158084911a2b25 | 12,265 | py | Python | inference-engine/ie_bridges/python/tests/test_IENetwork.py | NikDemoShow/openvino | 31907e51e96f1603753dc69811bdf738374ca5e6 | [
"Apache-2.0"
] | 1 | 2022-02-10T08:05:09.000Z | 2022-02-10T08:05:09.000Z | inference-engine/ie_bridges/python/tests/test_IENetwork.py | NikDemoShow/openvino | 31907e51e96f1603753dc69811bdf738374ca5e6 | [
"Apache-2.0"
] | 105 | 2020-06-04T00:23:29.000Z | 2022-02-21T13:04:33.000Z | inference-engine/ie_bridges/python/tests/test_IENetwork.py | NikDemoShow/openvino | 31907e51e96f1603753dc69811bdf738374ca5e6 | [
"Apache-2.0"
] | 3 | 2021-04-25T06:52:41.000Z | 2021-05-07T02:01:44.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
import warnings
from openvino.inference_engine import IECore, IENetwork, DataPtr, InputInfoPtr, PreProcessInfo
from conftest import model_path
test_net_xml, test_net_bin = model_path()
def test_create_ie_network_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork(model=test_net_xml, weights=test_net_bin)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_xml_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model="./model.xml", weights=test_net_bin)
assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_bin_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model=test_net_xml, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_name():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.name == "test_model"
def test_inputs_deprecated():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with warnings.catch_warnings(record=True) as w:
inp = net.inputs
assert isinstance(inp['data'], DataPtr)
assert inp['data'].layout == "NCHW"
assert inp['data'].precision == "FP32"
assert inp['data'].shape == [1, 3, 32, 32]
assert len(w) == 1
assert "'inputs' property of IENetwork class is deprecated. " \
"To access DataPtrs user need to use 'input_data' property " \
"of InputInfoPtr objects which " \
"can be accessed by 'input_info' property." in str(w[-1].message)
def test_input_info():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net.input_info['data'], InputInfoPtr)
assert net.input_info['data'].layout == "NCHW"
assert net.input_info['data'].precision == "FP32"
assert isinstance(net.input_info['data'].input_data, DataPtr)
assert isinstance(net.input_info['data'].preprocess_info, PreProcessInfo)
def test_input_info_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.input_info['data'].layout == "NCHW"
net.input_info['data'].layout = "NHWC"
assert net.input_info['data'].layout == "NHWC"
def test_input_input_info_layout_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.input_info['data'].precision == "FP32"
net.input_info['data'].precision = "I8"
assert net.input_info['data'].precision == "I8"
def test_input_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.input_info['data'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_input_unsupported_layout_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.input_info['data'].layout = "BLA"
assert "Unsupported layout BLA! List of supported layouts: " in str(e.value)
def test_outputs():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net.outputs['fc_out'], DataPtr)
assert net.outputs['fc_out'].layout == "NC"
assert net.outputs['fc_out'].precision == "FP32"
assert net.outputs['fc_out'].shape == [1, 10]
def test_output_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.outputs['fc_out'].precision == "FP32"
net.outputs['fc_out'].precision = "I8"
assert net.outputs['fc_out'].precision == "I8"
def test_output_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.outputs['fc_out'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_add_ouputs():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs('28/Reshape')
net.add_outputs(['29/WithoutBiases'])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_add_outputs_with_port():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs(('28/Reshape', 0))
net.add_outputs([('29/WithoutBiases', 0)])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_add_outputs_with_and_without_port():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs('28/Reshape')
net.add_outputs([('29/WithoutBiases', 0)])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_batch_size_getter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.batch_size == 1
def test_batch_size_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.batch_size = 4
assert net.batch_size == 4
assert net.input_info['data'].input_data.shape == [4, 3, 32, 32]
def test_batch_size_after_reshape():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.reshape({'data': [4, 3, 32, 32]})
assert net.batch_size == 4
assert net.input_info['data'].input_data.shape == [4, 3, 32, 32]
net.reshape({'data': [8, 3, 32, 32]})
assert net.batch_size == 8
assert net.input_info['data'].input_data.shape == [8, 3, 32, 32]
def test_serialize(device):
ie = IECore()
if device == "CPU":
if ie.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ngraph")
import ngraph as ng
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.serialize("./serialized_net.xml", "./serialized_net.bin")
serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
func_net = ng.function_from_cnn(net)
ops_net = func_net.get_ordered_ops()
ops_net_names = [op.friendly_name for op in ops_net]
func_serialized_net = ng.function_from_cnn(serialized_net)
ops_serialized_net = func_serialized_net.get_ordered_ops()
ops_serialized_net_names = [op.friendly_name for op in ops_serialized_net]
assert ops_serialized_net_names == ops_net_names
os.remove("./serialized_net.xml")
os.remove("./serialized_net.bin")
def test_reshape():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.reshape({"data": (2, 3, 32, 32)})
def test_read_net_from_buffer_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(test_net_xml, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_net_from_buffer_valid_deprecated():
ie = IECore()
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(model_path()[0], 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
ref_net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.name == ref_net.name
assert net.batch_size == ref_net.batch_size
ii_net = net.input_info
ii_net2 = ref_net.input_info
o_net = net.outputs
o_net2 = ref_net.outputs
assert ii_net.keys() == ii_net2.keys()
assert o_net.keys() == o_net2.keys()
def test_multi_out_data():
# Regression test 23965
# Check that DataPtr for all output layers not copied between outputs map items
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs(['28/Reshape'])
assert "28/Reshape" in net.outputs and "fc_out" in net.outputs
assert isinstance(net.outputs["28/Reshape"], DataPtr)
assert isinstance(net.outputs["fc_out"], DataPtr)
assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
pass
def test_tensor_names():
model = """
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
"""
ie = IECore()
weights = b''
net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True)
assert net.get_ov_name_for_tensor("relu_t") == "activation"
assert net.get_ov_name_for_tensor("identity_t") == "activation"
assert net.get_ov_name_for_tensor("input") == "in1"
| 39.310897 | 106 | 0.613208 |
import os
import pytest
import warnings
from openvino.inference_engine import IECore, IENetwork, DataPtr, InputInfoPtr, PreProcessInfo
from conftest import model_path
test_net_xml, test_net_bin = model_path()
def test_create_ie_network_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork(model=test_net_xml, weights=test_net_bin)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_xml_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model="./model.xml", weights=test_net_bin)
assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_bin_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model=test_net_xml, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_name():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.name == "test_model"
def test_inputs_deprecated():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with warnings.catch_warnings(record=True) as w:
inp = net.inputs
assert isinstance(inp['data'], DataPtr)
assert inp['data'].layout == "NCHW"
assert inp['data'].precision == "FP32"
assert inp['data'].shape == [1, 3, 32, 32]
assert len(w) == 1
assert "'inputs' property of IENetwork class is deprecated. " \
"To access DataPtrs user need to use 'input_data' property " \
"of InputInfoPtr objects which " \
"can be accessed by 'input_info' property." in str(w[-1].message)
def test_input_info():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net.input_info['data'], InputInfoPtr)
assert net.input_info['data'].layout == "NCHW"
assert net.input_info['data'].precision == "FP32"
assert isinstance(net.input_info['data'].input_data, DataPtr)
assert isinstance(net.input_info['data'].preprocess_info, PreProcessInfo)
def test_input_info_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.input_info['data'].layout == "NCHW"
net.input_info['data'].layout = "NHWC"
assert net.input_info['data'].layout == "NHWC"
def test_input_input_info_layout_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.input_info['data'].precision == "FP32"
net.input_info['data'].precision = "I8"
assert net.input_info['data'].precision == "I8"
def test_input_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.input_info['data'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_input_unsupported_layout_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.input_info['data'].layout = "BLA"
assert "Unsupported layout BLA! List of supported layouts: " in str(e.value)
def test_outputs():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net.outputs['fc_out'], DataPtr)
assert net.outputs['fc_out'].layout == "NC"
assert net.outputs['fc_out'].precision == "FP32"
assert net.outputs['fc_out'].shape == [1, 10]
def test_output_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.outputs['fc_out'].precision == "FP32"
net.outputs['fc_out'].precision = "I8"
assert net.outputs['fc_out'].precision == "I8"
def test_output_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(ValueError) as e:
net.outputs['fc_out'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_add_ouputs():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs('28/Reshape')
net.add_outputs(['29/WithoutBiases'])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_add_outputs_with_port():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs(('28/Reshape', 0))
net.add_outputs([('29/WithoutBiases', 0)])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_add_outputs_with_and_without_port():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs('28/Reshape')
net.add_outputs([('29/WithoutBiases', 0)])
assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
def test_batch_size_getter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.batch_size == 1
def test_batch_size_setter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.batch_size = 4
assert net.batch_size == 4
assert net.input_info['data'].input_data.shape == [4, 3, 32, 32]
def test_batch_size_after_reshape():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.reshape({'data': [4, 3, 32, 32]})
assert net.batch_size == 4
assert net.input_info['data'].input_data.shape == [4, 3, 32, 32]
net.reshape({'data': [8, 3, 32, 32]})
assert net.batch_size == 8
assert net.input_info['data'].input_data.shape == [8, 3, 32, 32]
def test_serialize(device):
ie = IECore()
if device == "CPU":
if ie.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ngraph")
import ngraph as ng
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.serialize("./serialized_net.xml", "./serialized_net.bin")
serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
func_net = ng.function_from_cnn(net)
ops_net = func_net.get_ordered_ops()
ops_net_names = [op.friendly_name for op in ops_net]
func_serialized_net = ng.function_from_cnn(serialized_net)
ops_serialized_net = func_serialized_net.get_ordered_ops()
ops_serialized_net_names = [op.friendly_name for op in ops_serialized_net]
assert ops_serialized_net_names == ops_net_names
os.remove("./serialized_net.xml")
os.remove("./serialized_net.bin")
def test_reshape():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.reshape({"data": (2, 3, 32, 32)})
def test_read_net_from_buffer_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(test_net_xml, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_net_from_buffer_valid_deprecated():
ie = IECore()
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(model_path()[0], 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
ref_net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.name == ref_net.name
assert net.batch_size == ref_net.batch_size
ii_net = net.input_info
ii_net2 = ref_net.input_info
o_net = net.outputs
o_net2 = ref_net.outputs
assert ii_net.keys() == ii_net2.keys()
assert o_net.keys() == o_net2.keys()
def test_multi_out_data():
# Regression test 23965
# Check that DataPtr for all output layers not copied between outputs map items
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.add_outputs(['28/Reshape'])
assert "28/Reshape" in net.outputs and "fc_out" in net.outputs
assert isinstance(net.outputs["28/Reshape"], DataPtr)
assert isinstance(net.outputs["fc_out"], DataPtr)
assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
pass
def test_tensor_names():
model = """
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
"""
ie = IECore()
weights = b''
net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True)
assert net.get_ov_name_for_tensor("relu_t") == "activation"
assert net.get_ov_name_for_tensor("identity_t") == "activation"
assert net.get_ov_name_for_tensor("input") == "in1"
| true | true |
1c34782fa214c3c817dce5a5206ad0051feb3f7b | 5,311 | py | Python | pettingzoo/classic/tictactoe/tictactoe.py | AbhijeetKrishnan/PettingZoo | d1a68923cef108b92012bfaaf2f083c839213d9f | [
"Apache-2.0"
] | 1 | 2021-05-27T05:30:10.000Z | 2021-05-27T05:30:10.000Z | pettingzoo/classic/tictactoe/tictactoe.py | AbhijeetKrishnan/PettingZoo | d1a68923cef108b92012bfaaf2f083c839213d9f | [
"Apache-2.0"
] | null | null | null | pettingzoo/classic/tictactoe/tictactoe.py | AbhijeetKrishnan/PettingZoo | d1a68923cef108b92012bfaaf2f083c839213d9f | [
"Apache-2.0"
] | null | null | null | from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector
from gym import spaces
import numpy as np
import warnings
from pettingzoo.utils import wrappers
from .board import Board
def env():
env = raw_env()
env = wrappers.CaptureStdoutWrapper(env)
env = wrappers.TerminateIllegalWrapper(env, illegal_reward=-1)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
class raw_env(AECEnv):
metadata = {'render.modes': ['human'], "name": "tictactoe_v3"}
def __init__(self):
super().__init__()
self.board = Board()
self.agents = ["player_1", "player_2"]
self.possible_agents = self.agents[:]
self.action_spaces = {i: spaces.Discrete(9) for i in self.agents}
self.observation_spaces = {i: spaces.Dict({
'observation': spaces.Box(low=0, high=1, shape=(3, 3, 2), dtype=np.int8),
'action_mask': spaces.Box(low=0, high=1, shape=(9,), dtype=np.int8)
}) for i in self.agents}
self.rewards = {i: 0 for i in self.agents}
self.dones = {i: False for i in self.agents}
self.infos = {i: {'legal_moves': list(range(0, 9))} for i in self.agents}
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
# Key
# ----
# blank space = 0
# agent 0 = 1
# agent 1 = 2
# An observation is list of lists, where each list represents a row
#
# [[0,0,2]
# [1,2,1]
# [2,1,0]]
def observe(self, agent):
board_vals = np.array(self.board.squares).reshape(3, 3)
cur_player = self.possible_agents.index(agent)
opp_player = (cur_player + 1) % 2
cur_p_board = np.equal(board_vals, cur_player + 1)
opp_p_board = np.equal(board_vals, opp_player + 1)
observation = np.stack([cur_p_board, opp_p_board], axis=2).astype(np.int8)
legal_moves = self._legal_moves() if agent == self.agent_selection else []
action_mask = np.zeros(9, int)
for i in legal_moves:
action_mask[i] = 1
return {'observation': observation, 'action_mask': action_mask}
def _legal_moves(self):
return [i for i in range(len(self.board.squares)) if self.board.squares[i] == 0]
# action in this case is a value from 0 to 8 indicating position to move on tictactoe board
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
# check if input action is a valid move (0 == empty spot)
assert (self.board.squares[action] == 0), "played illegal move"
# play turn
self.board.play_turn(self.agents.index(self.agent_selection), action)
# update infos
# list of valid actions (indexes in board)
# next_agent = self.agents[(self.agents.index(self.agent_selection) + 1) % len(self.agents)]
next_agent = self._agent_selector.next()
if self.board.check_game_over():
winner = self.board.check_for_winner()
if winner == -1:
# tie
pass
elif winner == 1:
# agent 0 won
self.rewards[self.agents[0]] += 1
self.rewards[self.agents[1]] -= 1
else:
# agent 1 won
self.rewards[self.agents[1]] += 1
self.rewards[self.agents[0]] -= 1
# once either play wins or there is a draw, game over, both players are done
self.dones = {i: True for i in self.agents}
# Switch selection to next agents
self._cumulative_rewards[self.agent_selection] = 0
self.agent_selection = next_agent
self._accumulate_rewards()
def reset(self):
# reset environment
self.board = Board()
self.agents = self.possible_agents[:]
self.rewards = {i: 0 for i in self.agents}
self._cumulative_rewards = {i: 0 for i in self.agents}
self.dones = {i: False for i in self.agents}
self.infos = {i: {} for i in self.agents}
# selects the first agent
self._agent_selector.reinit(self.agents)
self._agent_selector.reset()
self.agent_selection = self._agent_selector.reset()
def render(self, mode='human'):
def getSymbol(input):
if input == 0:
return '-'
elif input == 1:
return 'X'
else:
return 'O'
board = list(map(getSymbol, self.board.squares))
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[0]} " + "|" + f" {board[3]} " + "|" + f" {board[6]} ")
print("_" * 5 + "|" + "_" * 5 + "|" + "_" * 5)
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[1]} " + "|" + f" {board[4]} " + "|" + f" {board[7]} ")
print("_" * 5 + "|" + "_" * 5 + "|" + "_" * 5)
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[2]} " + "|" + f" {board[5]} " + "|" + f" {board[8]} ")
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
def close(self):
pass
| 35.172185 | 113 | 0.548296 | from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector
from gym import spaces
import numpy as np
import warnings
from pettingzoo.utils import wrappers
from .board import Board
def env():
env = raw_env()
env = wrappers.CaptureStdoutWrapper(env)
env = wrappers.TerminateIllegalWrapper(env, illegal_reward=-1)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
class raw_env(AECEnv):
metadata = {'render.modes': ['human'], "name": "tictactoe_v3"}
def __init__(self):
super().__init__()
self.board = Board()
self.agents = ["player_1", "player_2"]
self.possible_agents = self.agents[:]
self.action_spaces = {i: spaces.Discrete(9) for i in self.agents}
self.observation_spaces = {i: spaces.Dict({
'observation': spaces.Box(low=0, high=1, shape=(3, 3, 2), dtype=np.int8),
'action_mask': spaces.Box(low=0, high=1, shape=(9,), dtype=np.int8)
}) for i in self.agents}
self.rewards = {i: 0 for i in self.agents}
self.dones = {i: False for i in self.agents}
self.infos = {i: {'legal_moves': list(range(0, 9))} for i in self.agents}
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
def observe(self, agent):
board_vals = np.array(self.board.squares).reshape(3, 3)
cur_player = self.possible_agents.index(agent)
opp_player = (cur_player + 1) % 2
cur_p_board = np.equal(board_vals, cur_player + 1)
opp_p_board = np.equal(board_vals, opp_player + 1)
observation = np.stack([cur_p_board, opp_p_board], axis=2).astype(np.int8)
legal_moves = self._legal_moves() if agent == self.agent_selection else []
action_mask = np.zeros(9, int)
for i in legal_moves:
action_mask[i] = 1
return {'observation': observation, 'action_mask': action_mask}
def _legal_moves(self):
return [i for i in range(len(self.board.squares)) if self.board.squares[i] == 0]
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
assert (self.board.squares[action] == 0), "played illegal move"
self.board.play_turn(self.agents.index(self.agent_selection), action)
next_agent = self._agent_selector.next()
if self.board.check_game_over():
winner = self.board.check_for_winner()
if winner == -1:
pass
elif winner == 1:
self.rewards[self.agents[0]] += 1
self.rewards[self.agents[1]] -= 1
else:
self.rewards[self.agents[1]] += 1
self.rewards[self.agents[0]] -= 1
self.dones = {i: True for i in self.agents}
self._cumulative_rewards[self.agent_selection] = 0
self.agent_selection = next_agent
self._accumulate_rewards()
def reset(self):
self.board = Board()
self.agents = self.possible_agents[:]
self.rewards = {i: 0 for i in self.agents}
self._cumulative_rewards = {i: 0 for i in self.agents}
self.dones = {i: False for i in self.agents}
self.infos = {i: {} for i in self.agents}
self._agent_selector.reinit(self.agents)
self._agent_selector.reset()
self.agent_selection = self._agent_selector.reset()
def render(self, mode='human'):
def getSymbol(input):
if input == 0:
return '-'
elif input == 1:
return 'X'
else:
return 'O'
board = list(map(getSymbol, self.board.squares))
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[0]} " + "|" + f" {board[3]} " + "|" + f" {board[6]} ")
print("_" * 5 + "|" + "_" * 5 + "|" + "_" * 5)
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[1]} " + "|" + f" {board[4]} " + "|" + f" {board[7]} ")
print("_" * 5 + "|" + "_" * 5 + "|" + "_" * 5)
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
print(f" {board[2]} " + "|" + f" {board[5]} " + "|" + f" {board[8]} ")
print(" " * 5 + "|" + " " * 5 + "|" + " " * 5)
def close(self):
pass
| true | true |
1c3478685033008557db52af634886c3a839281b | 12,397 | py | Python | src/oci/bds/models/bds_metastore_configuration.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/bds/models/bds_metastore_configuration.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/bds/models/bds_metastore_configuration.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BdsMetastoreConfiguration(object):
"""
The metastore configuration information.
"""
#: A constant which can be used with the metastore_type property of a BdsMetastoreConfiguration.
#: This constant has a value of "LOCAL"
METASTORE_TYPE_LOCAL = "LOCAL"
#: A constant which can be used with the metastore_type property of a BdsMetastoreConfiguration.
#: This constant has a value of "EXTERNAL"
METASTORE_TYPE_EXTERNAL = "EXTERNAL"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "ACTIVATING"
LIFECYCLE_STATE_ACTIVATING = "ACTIVATING"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a BdsMetastoreConfiguration.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
def __init__(self, **kwargs):
"""
Initializes a new BdsMetastoreConfiguration object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this BdsMetastoreConfiguration.
:type id: str
:param display_name:
The value to assign to the display_name property of this BdsMetastoreConfiguration.
:type display_name: str
:param metastore_type:
The value to assign to the metastore_type property of this BdsMetastoreConfiguration.
Allowed values for this property are: "LOCAL", "EXTERNAL", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type metastore_type: str
:param metastore_id:
The value to assign to the metastore_id property of this BdsMetastoreConfiguration.
:type metastore_id: str
:param bds_api_key_id:
The value to assign to the bds_api_key_id property of this BdsMetastoreConfiguration.
:type bds_api_key_id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this BdsMetastoreConfiguration.
Allowed values for this property are: "CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this BdsMetastoreConfiguration.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this BdsMetastoreConfiguration.
:type time_updated: datetime
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'metastore_type': 'str',
'metastore_id': 'str',
'bds_api_key_id': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'time_updated': 'datetime'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'metastore_type': 'metastoreType',
'metastore_id': 'metastoreId',
'bds_api_key_id': 'bdsApiKeyId',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated'
}
self._id = None
self._display_name = None
self._metastore_type = None
self._metastore_id = None
self._bds_api_key_id = None
self._lifecycle_state = None
self._time_created = None
self._time_updated = None
@property
def id(self):
"""
**[Required]** Gets the id of this BdsMetastoreConfiguration.
The ID of the metastore configuration
:return: The id of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BdsMetastoreConfiguration.
The ID of the metastore configuration
:param id: The id of this BdsMetastoreConfiguration.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this BdsMetastoreConfiguration.
The display name of metastore configuration
:return: The display_name of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this BdsMetastoreConfiguration.
The display name of metastore configuration
:param display_name: The display_name of this BdsMetastoreConfiguration.
:type: str
"""
self._display_name = display_name
@property
def metastore_type(self):
"""
**[Required]** Gets the metastore_type of this BdsMetastoreConfiguration.
The type of the metastore in the metastore configuration.
Allowed values for this property are: "LOCAL", "EXTERNAL", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The metastore_type of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._metastore_type
@metastore_type.setter
def metastore_type(self, metastore_type):
"""
Sets the metastore_type of this BdsMetastoreConfiguration.
The type of the metastore in the metastore configuration.
:param metastore_type: The metastore_type of this BdsMetastoreConfiguration.
:type: str
"""
allowed_values = ["LOCAL", "EXTERNAL"]
if not value_allowed_none_or_none_sentinel(metastore_type, allowed_values):
metastore_type = 'UNKNOWN_ENUM_VALUE'
self._metastore_type = metastore_type
@property
def metastore_id(self):
"""
Gets the metastore_id of this BdsMetastoreConfiguration.
The OCID of the Data Catalog metastore. Set only if metastore's type is EXTERNAL.
:return: The metastore_id of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._metastore_id
@metastore_id.setter
def metastore_id(self, metastore_id):
"""
Sets the metastore_id of this BdsMetastoreConfiguration.
The OCID of the Data Catalog metastore. Set only if metastore's type is EXTERNAL.
:param metastore_id: The metastore_id of this BdsMetastoreConfiguration.
:type: str
"""
self._metastore_id = metastore_id
@property
def bds_api_key_id(self):
"""
Gets the bds_api_key_id of this BdsMetastoreConfiguration.
The ID of BDS API Key used for metastore configuration. Set only if metastore's type is EXTERNAL.
:return: The bds_api_key_id of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._bds_api_key_id
@bds_api_key_id.setter
def bds_api_key_id(self, bds_api_key_id):
"""
Sets the bds_api_key_id of this BdsMetastoreConfiguration.
The ID of BDS API Key used for metastore configuration. Set only if metastore's type is EXTERNAL.
:param bds_api_key_id: The bds_api_key_id of this BdsMetastoreConfiguration.
:type: str
"""
self._bds_api_key_id = bds_api_key_id
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this BdsMetastoreConfiguration.
the lifecycle state of the metastore configuration.
Allowed values for this property are: "CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this BdsMetastoreConfiguration.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this BdsMetastoreConfiguration.
the lifecycle state of the metastore configuration.
:param lifecycle_state: The lifecycle_state of this BdsMetastoreConfiguration.
:type: str
"""
allowed_values = ["CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this BdsMetastoreConfiguration.
The time when the configuration was created, shown as an RFC 3339 formatted datetime string.
:return: The time_created of this BdsMetastoreConfiguration.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this BdsMetastoreConfiguration.
The time when the configuration was created, shown as an RFC 3339 formatted datetime string.
:param time_created: The time_created of this BdsMetastoreConfiguration.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this BdsMetastoreConfiguration.
The time when the configuration was updated, shown as an RFC 3339 formatted datetime string.
:return: The time_updated of this BdsMetastoreConfiguration.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this BdsMetastoreConfiguration.
The time when the configuration was updated, shown as an RFC 3339 formatted datetime string.
:param time_updated: The time_updated of this BdsMetastoreConfiguration.
:type: datetime
"""
self._time_updated = time_updated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.037791 | 245 | 0.674034 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BdsMetastoreConfiguration(object):
METASTORE_TYPE_LOCAL = "LOCAL"
METASTORE_TYPE_EXTERNAL = "EXTERNAL"
LIFECYCLE_STATE_CREATING = "CREATING"
LIFECYCLE_STATE_ACTIVATING = "ACTIVATING"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
LIFECYCLE_STATE_UPDATING = "UPDATING"
LIFECYCLE_STATE_FAILED = "FAILED"
LIFECYCLE_STATE_DELETING = "DELETING"
LIFECYCLE_STATE_DELETED = "DELETED"
def __init__(self, **kwargs):
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'metastore_type': 'str',
'metastore_id': 'str',
'bds_api_key_id': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'time_updated': 'datetime'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'metastore_type': 'metastoreType',
'metastore_id': 'metastoreId',
'bds_api_key_id': 'bdsApiKeyId',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated'
}
self._id = None
self._display_name = None
self._metastore_type = None
self._metastore_id = None
self._bds_api_key_id = None
self._lifecycle_state = None
self._time_created = None
self._time_updated = None
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def metastore_type(self):
return self._metastore_type
@metastore_type.setter
def metastore_type(self, metastore_type):
allowed_values = ["LOCAL", "EXTERNAL"]
if not value_allowed_none_or_none_sentinel(metastore_type, allowed_values):
metastore_type = 'UNKNOWN_ENUM_VALUE'
self._metastore_type = metastore_type
@property
def metastore_id(self):
return self._metastore_id
@metastore_id.setter
def metastore_id(self, metastore_id):
self._metastore_id = metastore_id
@property
def bds_api_key_id(self):
return self._bds_api_key_id
@bds_api_key_id.setter
def bds_api_key_id(self, bds_api_key_id):
self._bds_api_key_id = bds_api_key_id
@property
def lifecycle_state(self):
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
allowed_values = ["CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
return self._time_created
@time_created.setter
def time_created(self, time_created):
self._time_created = time_created
@property
def time_updated(self):
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
self._time_updated = time_updated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3478bbe14778e56f347b0fc81f273cc23619f8 | 37,447 | py | Python | run_classifier.py | kunde122/bert | def0a6534b77de915c5d39b2ffd05fd19ac3f2f2 | [
"Apache-2.0"
] | null | null | null | run_classifier.py | kunde122/bert | def0a6534b77de915c5d39b2ffd05fd19ac3f2f2 | [
"Apache-2.0"
] | null | null | null | run_classifier.py | kunde122/bert | def0a6534b77de915c5d39b2ffd05fd19ac3f2f2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class SsProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
#每次从数据源中按顺序取buffer_size个样本,并打乱。
# 每次从中取一个样本放入batch中,填充buffer_size,。。。,直至达到batchsize
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def set_flags(flags):
BERT_BASE_DIR='../uncased_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='glue_data'
flags.task_name='MRPC'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR+'/MRPC'
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/mrpc_output/'
return flags
def set_flags_ss(flags):
BERT_BASE_DIR='../chinese_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='my_data'
flags.task_name='ssadr'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/ss_output/'
return flags
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"ssadr":SsProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.FLAGS = set_flags_ss(flags.FLAGS)
tf.app.run()
| 35.194549 | 82 | 0.681603 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
ing(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
class InputFeatures(object):
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_test_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class SsProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
#每次从数据源中按顺序取buffer_size个样本,并打乱。
# 每次从中取一个样本放入batch中,填充buffer_size,。。。,直至达到batchsize
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def input_fn_builder(features, seq_length, is_training, drop_remainder):
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
batch_size = params["batch_size"]
num_examples = len(features)
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def set_flags(flags):
BERT_BASE_DIR='../uncased_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='glue_data'
flags.task_name='MRPC'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR+'/MRPC'
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/mrpc_output/'
return flags
def set_flags_ss(flags):
BERT_BASE_DIR='../chinese_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='my_data'
flags.task_name='ssadr'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/ss_output/'
return flags
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"ssadr":SsProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.FLAGS = set_flags_ss(flags.FLAGS)
tf.app.run()
| true | true |
1c34792b7c909732c8e6e2de13ded7b83a1de10b | 35,897 | py | Python | scripts/bicorr_plot.py | pfschus/fission_bicorrelation | 103d1d6e93f722c73e33a9af773dd7ebbf4c6f25 | [
"MIT"
] | 1 | 2018-02-26T00:40:29.000Z | 2018-02-26T00:40:29.000Z | scripts/bicorr_plot.py | pfschus/fission_bicorrelation | 103d1d6e93f722c73e33a9af773dd7ebbf4c6f25 | [
"MIT"
] | null | null | null | scripts/bicorr_plot.py | pfschus/fission_bicorrelation | 103d1d6e93f722c73e33a9af773dd7ebbf4c6f25 | [
"MIT"
] | null | null | null | """
Plotting functions for Bicorr project
Moving them here to keep the bicorr.py file cleaner
PFS, March 2018
Changelog:
2018_03_15: Move a few functions here
"""
import matplotlib
#matplotlib.use('agg') # for flux
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
import seaborn as sns
sns.set(style='ticks')
import sys
import os
import os.path
import scipy.io as sio
import time
import numpy as np
np.set_printoptions(threshold=np.nan) # print entire matrices
import pandas as pd
from tqdm import *
# Don't import any bicorr modules here
# Other modules will import bicorr_plot, but not the other way around
############### SOME GENERAL FUNCTIONS TO KEEP AROUND ########################
def save_fig_to_folder(fig_filename,fig_folder='fig',extensions=['png','pdf'],dpi=300):
"""
Summary: Save .png of current matplotlib plot to fig_folder / fig_filename
Code will check to make sure fig_folder exists. If not, create folder then save .png to folder
Parameters
----------
fig_filename : str
Filename to use for saving the figure
fig_folder : str, optional
Folder where to save the image, relative to cwd
extensions: str, optional
File save format. If several, produce all.
Returns
-------
n/a
"""
# Don't cut off labels
plt.tight_layout()
# If saving to same folder
if fig_folder is None:
plt.savefig(fig_filename)
# If saving to a subfolder
else:
try:
os.stat(fig_folder)
except:
os.mkdir(fig_folder)
for extension in extensions:
plt.savefig(fig_folder+'/'+fig_filename+'.'+extension,dpi=dpi)
def histogram_metrics(values, xlabel = 'x', ylabel = 'y'):
"""
Plot histogram with some metrics overlaid (mean, std, median)
Parameters
----------
values : array-like
Values for the histogram
xlabel : str, optional
ylabel : str, optional
"""
mu = np.mean(values)
sigma = np.std(values)
med = np.median(values)
plt.figure(figsize=(4,3))
sns.distplot(values, rug=True)
plt.axvline(mu,color='k',linewidth=1)
plt.axvline(mu-sigma,color='k',linewidth=.5)
plt.axvline(mu+sigma,color='k',linewidth=.5)
plt.axvline(med,color='r',linewidth=.5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
sns.despine(right=False)
plt.show()
def step_plot(edges,y, linewidth=.5, color='k', zorder = 1):
"""
Plot a step plot. Meant for use with histogram data generated by:
counts, bin_edges = np.histogram(x_samples,bin_edges)
bicorr.step_plot(bin_edges,counts)
Parameters
----------
edges : ndarray
Bin edges
y : ndarray
Bin counts
linewidth : float, optional
Width of step lines
color : float, optional
Color of lines
zorder : int, optional
Order of layer. Lower integer = farther back
Returns
-------
n/a
"""
# Horizontal lines
for i in range(len(y)):
plt.hlines(y[i],edges[i],edges[i+1],linewidth=linewidth,color=color,zorder=zorder)
# Vertical lines
for i in range(len(y)-1):
plt.vlines(edges[i+1],y[i],y[i+1],linewidth=linewidth,color=color,zorder=zorder)
##################### EXPERIMENTAL SETUP STUFF ###########################
def plot_det_df(det_df, which = ['index','angle'], cmap='viridis', title_flag = True, save_flag = False, fig_folder = 'fig', show_flag = True, clear_flag = True):
"""
Make some plots to visualize the data in det_df, which can be loaded using `load_det_df`.
Parameters
----------
det_df : pandas dataFrame
dataFrame of detector pair indices and angles
which : list of str, optional
Which plots to show? Options include 'index', 'angle'
cmap : str, optional
Colormap
title_flag : bool, optional
save_flag : bool, optional
save plots to file
fig_folder : str, optional
where to save plots
show_flag : bool, optional
display plots
clear_flag : bool, optional
whether to clear matplotlib figure
Returns
-------
n/a
"""
if 'index' in which:
# Detector pair indices
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],s=13,marker='s',edgecolor='none',c=det_df.index.values,cmap=cmap)
ax.grid(True, which='both')
plt.xlim([0,48]); plt.ylim([0,48])
plt.xlabel('Detector 1 channel'); plt.ylabel('Detector 2 channel')
cbar =plt.colorbar(sc, fraction = 0.043, pad=0.1)
cbar.set_label('Detector pair index value')
if title_flag: plt.title('Detector pair indices\n')
ax.set_aspect('equal')
if save_flag: save_fig_to_folder('det_df_ch_to_index',fig_folder=fig_folder)
if show_flag: plt.show()
if clear_flag: plt.clf()
if 'angle' in which:
# Detector pair angles
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],c=det_df['angle'],s=18,marker='s',edgecolor='none',cmap=cmap)
plt.xlim([0,48]); plt.ylim([0,48])
plt.xlabel('Detector 1 channel'); plt.ylabel('Detector 2 channel')
cbar = plt.colorbar(sc,fraction = 0.043, pad=0.1)
cbar.set_label('Angle (degrees)')
if title_flag: plt.title('Angle between all detector pairs (degrees)\n')
ax.set_aspect('equal')
if save_flag: save_fig_to_folder('det_df_ch_to_angle',fig_folder=fig_folder)
if show_flag: plt.show()
if clear_flag: plt.clf()
##################### GENERATING BICORR FILE ###########################
def bicorr_checkpoint_plots(bicorr_data, fig_folder = 'fig', show_flag = False):
"""
Construct and store checkpoint plots from the bicorr_data matrix.
Require: bicorr_data
Modify: fig_folder, show_flag
if fig_folder = None, save to same folder
if fig_folder = an int, that is the folder number and fig_folder is set to `#/bicorr_fig`
Effect: Stores .png images for plots to fig_folder
"""
# Make a subfolder to store the checkpoint plots
if isinstance(fig_folder,str) == False:
fig_folder = str(fig_folder)+'/bicorr_fig'
# If the folder doesn't exist yet, create it
try:
os.stat(fig_folder)
except:
os.mkdir(fig_folder)
# Which detector pairs fired?
plt.plot(bicorr_data['det1ch'],bicorr_data['det2ch'],'.k')
plt.xlabel('Detector 1 channel')
plt.ylabel('Detector 2 channel')
plt.title('Detector pairs with bicorrelation events')
save_fig_to_folder('bicorr_pairs_scatter.png',fig_folder)
if show_flag: plt.show()
plt.clf()
# Plot count rate for each detector pair
plt.figure(figsize=(7,6))
plt.hist2d(bicorr_data['det1ch'],bicorr_data['det2ch'],bins=np.arange(-0.5,46.5,1),cmin=1,cmap='viridis')
plt.ylim([-.5,46.5])
plt.colorbar()
plt.grid(True, which='both')
plt.xticks([i for i in np.arange(0,46,4)])
plt.yticks([i for i in np.arange(0,46,4)])
plt.xlabel('Detector 1 channel')
plt.ylabel('Detector 2 channel')
plt.title('Frequency of detector pair interactions')
save_fig_to_folder('bicorr_pairs_2dhist.png',fig_folder)
if show_flag: plt.show()
plt.clf()
# Plot event number vs. line in
plt.plot(bicorr_data['event'])
plt.xlabel('Line number')
plt.ylabel('Event number')
plt.title('Event number vs. line number')
save_fig_to_folder('bicorr_all_evnum.png',fig_folder)
if show_flag: plt.show()
plt.clf()
################# SINGLES_HIST ########################
def plot_singles_hist(singles_hist,dt_bin_edges,
save_flag = False, fig_folder ='fig',
show_flag = False):
"""
Plot singles TOF distribution from singles_hist for all channels.
Future development option: incorporate a channel rather than summing across all.
Parameters
----------
singles_hist : ndarray
Histogram of singles timing information
Dimension 0: particle type, 0=n, 1=g
Dimension 1: detector channel
Dimension 2: dt bin
dt_bin_edges : ndarray
Time bin edges array
save_flag : bool, optional
save plots to file
fig_folder : str, optional
where to save plots
show_flag : bool, optional
display plots
Returns
-------
n/a
"""
plt.figure(figsize=(4,3))
dt_bin_centers = (dt_bin_edges[:-1]+dt_bin_edges[1:])/2
plt.plot(dt_bin_centers,np.sum(singles_hist[0,:,:],axis=(0)))
plt.plot(dt_bin_centers,np.sum(singles_hist[1,:,:],axis=(0)))
plt.xlabel('Time (ns)')
plt.ylabel('Number of events')
plt.title('Singles TOF distribution, all channels')
plt.legend(['N','G'])
plt.yscale('log')
sns.despine(right=False)
if save_flag: save_fig_to_folder('singles_TOF_dist.png',fig_folder)
if show_flag: plt.show()
plt.clf()
def plot_singles_hist_e_n(singles_hist_e_n,e_bin_edges,
save_flag = False, fig_folder ='fig',
show_flag = False, clear_flag = True):
"""
Plot singles TOF distribution from singles_hist for all channels.
Future development option: incorporate a channel rather than summing across all.
Parameters
----------
singles_hist_e_n : ndarray
Histogram of singles timing information
Dimension 0: particle type, 0=n, 1=g
Dimension 1: detector channel
Dimension 2: dt bin
e_bin_edges : ndarray
Time bin edges array
save_flag : bool, optional
save plots to file
fig_folder : str, optional
where to save plots
show_flag : bool, optional
display plots
Returns
-------
n/a
"""
plt.figure(figsize=(4,3))
e_bin_centers = (e_bin_edges[:-1]+e_bin_edges[1:])/2
plt.plot(e_bin_centers, np.sum(singles_hist_e_n[:,:],axis=(0)))
plt.xlabel('Energy (MeV)')
plt.ylabel('Number of events')
plt.title('Singles energy distribution, all channels')
plt.yscale('log')
if save_flag: save_fig_to_folder('singles_e_dist',fig_folder)
if show_flag: plt.show()
if clear_flag: plt.clf()
def Sd_vs_ch_all(singles_df, show_flag = True, save_flag = True,
fig_folder = 'fig', normalized = False):
"""
Generate plots of counts vs. angle for all pairs separately
Parameters
----------
singles_df : pandas dataFrame
singles dataframe with counts already entered
Returns
-------
n/a
"""
plt.figure(figsize=(4,3));
plt.errorbar(singles_df['ch'],singles_df['Sd'],yerr=singles_df['Sd_err'],
fmt='.',markersize=5,elinewidth=.5)
plt.xlabel('detector channel')
plt.ylabel('Sd (counts)')
plt.title('br-subtracted $n$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Sd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
################## BHP ##########################
def bhp_plot(bicorr_hist_plot, dt_bin_edges, title = None,
vmin = None, vmax = None,
save_flag = False, save_filename = 'bicorr',
save_folder = 'fig', extensions = ['png','pdf'],
show_flag = False, clear = True):
"""
Creates 2d bicorr hist plot
Parameters
----------
bicorr_hist_plot : ndarray
Array to plot. Two-dimensional with axes sizes corresponding to dt_bin_edges x dt_bin_edges.
dt_bin_edges : ndarray
One-dimensional array of time bin edges
title : str, optional
vmin : float, optional
Minimum of colorbar range
vmax : float, optional
Maximum of colorbar range
save_flag : bool, optional
Do you want to save to disk using function save_fig_to_folder
save_filename : str, optional
Filename for bicorrelation image (.png will be added)
save_folder : str, optional
Destination folder location for storing bicorrelation image
extensions: str, optional
File save format. If several, produce all.
show_flag : bool, optional
Display plot to current session with plt.show()
clear : bool, optional
Clear matplotlib after creating bicorr plot. (If set to False, you can add more plots before showing, saving, or clearing the figure)
Returns
-------
none
"""
fig = plt.figure(figsize=[4,3])
ax = plt.gca()
mesh = ax.pcolormesh(dt_bin_edges, dt_bin_edges, bicorr_hist_plot.T,
norm=matplotlib.colors.LogNorm(),
vmin = vmin, vmax = vmax, cmap="viridis")
cbar = plt.colorbar(mesh, ax=ax, fraction = 0.043, pad=0.1)
if np.max(bicorr_hist_plot) >=1: # absolute counts
cbar.set_label('counts')
else: # normalized
cbar.set_label('counts / (fission$\cdot$ns$^2$$\cdot$pair)')
ax.set_xlabel('$\Delta t_1$ (ns)')
ax.set_ylabel('$\Delta t_2$ (ns)')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_major_locator(MultipleLocator(50))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(10))
if title is not None: ax.set_title(title)
ax.set_aspect('equal')
plt.tight_layout()
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
return ax
########################## BHP_E #########################
def bhp_e_plot(bhp_e, e_bin_edges, title = None,
vmin = None, vmax = None, zoom_range = None,
save_flag = False, save_filename = 'bicorr_e',
save_folder = 'fig', extensions = ['png','pdf'],
show_flag = False, clear_flag = True):
"""
Creates 2d bicorr_e hist plot
Parameters
----------
bhm_e : ndarray
Master histogram of bicorrelation events in energy space.
Dimension 0: detector pair, use dictionary 'dict_pair_to_index', where pair is (100*det1ch+det2ch)
Dimension 1: interaction type, length 1. Only storing 0=nn.
Dimension 2: e bin for detector 1
Dimension 3: e bin for detector 2
e_bin_edges : ndarray
One-dimensional array of energy bin edges
title : str, optional
vmin : float, optional
Minimum of colorbar range
vmax : float, optional
Maximum of colorbar range
zoom_range : list, optional
Range of x and y axes. Ex: [0,6] for 0 to 6 MeV
save_flag : bool, optional
Do you want to save to disk using function save_fig_to_folder
save_filename : str, optional
Filename for bicorrelation image (.png will be added)
save_folder : str, optional
Destination folder location for storing bicorrelation image
extensions: str, optional
File save format. If several, produce all.
show_flag : bool, optional
Display plot to current session with plt.show()
clear_flag : bool, optional
Clear matplotlib after creating bicorr plot. (If set to False, you can add more plots before showing, saving, or clearing the figure)
Returns
-------
none
"""
fig = plt.figure(figsize=[4,3])
ax = plt.gca()
mesh = plt.pcolormesh(e_bin_edges, e_bin_edges, bhp_e.T,
norm=matplotlib.colors.LogNorm(),
vmin = vmin, vmax = vmax, cmap="inferno")
cbar = plt.colorbar(mesh, ax=ax, fraction = 0.043, pad=0.1)
if np.max(bhp_e) >=1: # absolute counts
cbar.set_label('counts')
else: # normalized
cbar.set_label('counts / (fission$\cdot$MeV$^2$$\cdot$pair)')
ax.set_xlabel('$E_1$ (MeV)')
ax.set_ylabel('$E_2$ (MeV)')
if title is not None: plt.title(title)
if zoom_range is not None:
ax.set_xlim(zoom_range)
ax.set_ylim(zoom_range)
ax.set_aspect('equal')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(.2))
plt.tight_layout()
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear_flag: plt.clf()
return ax
############# COUNTS VS. ANGLE #################################
def counts_vs_angle_all(det_df, show_flag = True, save_flag = True,
fig_folder = 'fig', normalized = False, t_flag=False):
"""
Generate plots of counts vs. angle for all pairs separately
Parameters
----------
det_df : pandas dataFrame
detector pair dataframe with counts already entered
normalized : bool, optional
option to plot normalized columns
Returns
-------
n/a
"""
if t_flag:
# Positive counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cp'],yerr=det_df['Cp']**.5,
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cp (counts)')
plt.title('positive $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cp_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
# Negative counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cn'],yerr=det_df['Cn']**.5,
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cn (counts)')
plt.title('negative $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cn_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
# Diff counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cd'],yerr=det_df['Cd_err'],
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cd (counts)')
plt.title('$nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
if normalized:
print('yes')
# Negative counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Nd'],yerr=det_df['Nd_err'],
fmt='.',markersize=5,elinewidth=.5)
plt.xlabel('Angle (degrees)')
plt.ylabel('Nd (counts/fission)')
plt.title('normalized br-subtracted $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Nd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
def W_vs_angle_all(det_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
"""
Generate plots of W vs. angle for all pairs separately
Parameters
----------
det_df : pandas dataFrame
detector pair dataframe with counts already entered, W calculated
Returns
-------
n/a
"""
# Positive counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['W'],yerr=det_df['W_err'],
fmt='.',markersize=5,elinewidth=.5,zorder=1)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
def W_vs_angle_binned(by_angle_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
"""
Generate plots of W vs. angle for pairs by bin
Parameters
----------
by_angle_df : pandas dataFrame
Condensed by angle dataframe with W calculated
Returns
-------
n/a
"""
angle_bin_edges = [by_angle_df.loc[0,'angle_bin_min']]+by_angle_df['angle_bin_max'].values.tolist()
plt.figure(figsize=(4,3))
plt.errorbar(by_angle_df['angle_bin_centers'],by_angle_df['W'],yerr=by_angle_df['std W'],fmt='.',color='k',zorder=3)
step_plot(angle_bin_edges,by_angle_df['W'],linewidth=1,zorder=2)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle_binned',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
def W_vs_angle(det_df, by_angle_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
"""
Generate plots of W vs. angle for all pairs, overlaid by pairs binned
"""
angle_bin_edges = [by_angle_df.loc[0,'angle_bin_min']]+by_angle_df['angle_bin_max'].values.tolist()
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['W'],yerr=det_df['W_err'],fmt='.',color='r', markersize=5,elinewidth=.5,zorder=1)
plt.errorbar(by_angle_df['angle_bin_centers'],by_angle_df['W'],yerr=by_angle_df['std W'],fmt='.',color='k',zorder=3)
step_plot(angle_bin_edges,by_angle_df['W'],linewidth=1,zorder=2)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle_all',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
######################### SLICES ############################
def plot_bhp_slice(bhp_slice, bin_edges, bin_units = 'time',
slice_range = None, normalized = None,
c = 'k', title = False, show_flag = False,
save_flag = False, save_filename = 'bhp_slice', save_folder = 'fig', new_fig = True, clear = True, msize=5,
norm_range = None):
"""
Plot bhp slice.
Parameters
----------
bhp_slice : ndarray
Slice through bhp at delta_tj_min, produce with slice_bhp()
bin_edges : ndarray
One-dimensional array of bin edges
bin_units : str, optional
Units for labels. 'time' or 'energy'
slice_range : array or float, optional
Range of time or energy values over which slice was taken. Primarily used for creating a title or legend
if None: not provided
if array: Min and max of slice range, ex: [slice_dt_min, slice_dt_max]
if float: Slice position, ex: slice_dt_middle
normalized : str, optional
None: Don't normalize
'int': Normalize by integral
'max': Normalize by height
c : str, optional
Color of step plot
title : str, optional
Title for plot. Ex: '$\Delta t_j$ = {}'.format(dt_bin_centers[i])
if default True, print according to slice_dt_range
if None, no title printed
if a str, use custom title
show_flag : bool
Option to show figure
save_flag : bool
Option to save figure to file
save_filename : str
filename where to save figure
save_folder : str
foldername where to save figure
new_fig : bool, optional
option to open new fig (if False, plots on existing axes)
clear : bool, optional
Clear matplotlib after creating bicorr plot. (If set to False, you can add more plots before showing, saving, or clearing the figure)
msize : int, optional
Marker size
norm_range : list of floats, optional
Range of bin edges for normalization. Ex [15,150]
Not yet available for energy units
Returns
-------
n/a
"""
if new_fig: plt.figure(figsize=(4,4))
ax = plt.gca()
if norm_range is not None:
imin = np.digitize(norm_range[0],bin_edges)-1
imax = np.digitize(norm_range[1],bin_edges)-1
else:
imin = 0
imax = len(bin_edges)
if normalized is 'max':
step_plot(bin_edges, bhp_slice/np.max(bhp_slice[imin:imax]), linewidth=.5, color = c)
ax.set_ylabel('Counts normalized by maximum')
elif normalized is 'int':
step_plot(bin_edges, bhp_slice/np.sum(bhp_slice[imin:imax]), linewidth=.5, color = c)
ax.set_ylabel('Counts normalized by integral')
else:
step_plot(bin_edges, bhp_slice, linewidth=.5)
ax.plot(calc_centers(bin_edges),bhp_slice,'.-',markersize=msize,linewidth = .5, color = c)
ax.set_ylabel('Counts')
if bin_units is 'time': ax.set_xlabel('$\Delta t_i$')
elif bin_units is 'energy': ax.set_xlabel('$\Delta E_i$')
if title is True: # Make a title according to slice_range
if type(slice_range) is list: # Min and max boundaries
ax.set_title('$\Delta t_j$ = {} to {}'.format(slice_range[0],slice_range[1]))
else: # float
ax.set_title('$\Delta t_j$ = {}'.format(slice_range))
elif title is False:
pass
elif title is not None: # print custom title
ax.set_title(title)
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_major_locator(MultipleLocator(50))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(10))
# plt.axes().set_aspect('equal')
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
def plot_bhp_slices(bhp_slices,bin_edges,bin_units='time',slice_range = None,new_fig=True,show_flag=True, log_flag = False):
'''
Plot bhp_slices on same axes, normalized by integral
Parameters
----------
bhp_slices : ndarray
Array of bhp slices. Dimensions: # slices x len(dt_bin_centers)
bin_edges : ndarray
One-dimensional array of bin edges, time or energy
bin_units : str, optional
Units for labels. 'time' or 'energy'
slice_range : ndarray
Array of slice ranges. Dimensions: # slices x 2 (min, max)
Either time or energy
new_fig : bool, optional
Option to start new figure
show_flag : bool, optional
Option to display
Returns
-------
legend_text : str
String of legend text
'''
if new_fig: plt.figure(figsize=(4,3))
legend_text = []
color = iter(cm.rainbow(np.linspace(0,1,bhp_slices.shape[0]))) # Set up colors for plotting
for i in range(bhp_slices.shape[0]): # Loop through slices
c = next(color);
plot_bhp_slice(bhp_slices[i,:],bin_edges,bin_units,slice_range[i,:],normalized='int',c=c,clear=False,new_fig=False,title=False)
if slice_range is not None: legend_text.append('{:04.2f} to {:04.2f}'.format(np.min(slice_range[i,:]),np.max(slice_range[i,:])))
plt.legend(legend_text)
plt.title('Slices normalized by integral')
# Hack legend
ax = plt.gca()
leg = ax.get_legend()
color = iter(cm.rainbow(np.linspace(0,1,bhp_slices.shape[0]))) # Reset colors
for i in range(bhp_slices.shape[0]): # Make legend
c = next(color)
leg.legendHandles[i].set_color(c)
if show_flag: plt.show()
return legend_text
######################### SLICES IN ENERGY ############################
def plot_bhp_e_slice(bhp_e_slice, e_bin_edges,
slice_e_range = None, normalized = None,
c = 'k', title = True, show_flag = False,
save_flag = False, save_filename = 'bhp_e_slice', save_folder = 'fig', new_fig = True, clear = True, msize=5,
norm_range = None):
"""
Plot bhp slice.
Parameters
----------
bhp_e_slice : ndarray
Slice through bhp_e at delta_E_min, produce with slice_bhp_e()
e_bin_edges : ndarray
One-dimensional array of bin edges
slice_e_range : array or float, optional
Range of time or energy values over which slice was taken. Primarily used for creating a title or legend
if None: not provided
if array: Min and max of slice range, ex: [slice_dt_min, slice_dt_max]
if float: Slice position, ex: slice_dt_middle
normalized : str, optional
None: Don't normalize
'int': Normalize by integral
'max': Normalize by height
c : str, optional
Color of step plot
title : str, optional
Title for plot. Ex: '$E_j$ = {}'.format(e_bin_centers[i])
if default True, print according to slice_e_range
if None, no title printed
if a str, use custom title
show_flag : bool
Option to show figure
save_flag : bool
Option to save figure to file
save_filename : str
filename where to save figure
save_folder : str
foldername where to save figure
new_fig : bool, optional
option to open new fig (if False, plots on existing axes)
clear : bool, optional
Clear matplotlib after creating bicorr plot. (If set to False, you can add more plots before showing, saving, or clearing the figure)
msize : int, optional
Marker size
norm_range : list of floats, optional
Range of bin edges for normalization. Ex [15,150]
Returns
-------
n/a
"""
if new_fig: plt.figure(figsize=(6,4))
if norm_range is not None:
imin = np.digitize(norm_range[0],e_bin_edges)-1
imax = np.digitize(norm_range[1],e_bin_edges)-1
else:
imin = 0
imax = len(e_bin_edges)
if normalized is 'max':
step_plot(e_bin_edges, bhp_e_slice/np.max(bhp_e_slice[imin:imax]), linewidth=.5, color = c)
plt.ylabel('Counts normalized by maximum')
elif normalized is 'int':
step_plot(e_bin_edges, bhp_e_slice/np.sum(bhp_e_slice[imin:imax]), linewidth=.5, color = c)
plt.ylabel('Counts normalized by integral')
else:
step_plot(e_bin_edges, bhp_e_slice, linewidth=.5)
plt.ylabel('Counts')
plt.xlabel('$\Delta E_i$')
if title is True: # Make a title according to slice_range
if type(slice_e_range) is list: # Min and max boundaries
plt.title('$E_j$ = {} to {}'.format(slice_e_range[0],slice_e_range[1]))
else: # float
plt.title('$E_j$ = {}'.format(slice_e_range))
elif title is False:
pass
else: # print custom title
plt.title(title)
sns.despine(right=False)
# plt.axes().set_aspect('equal')
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
def plot_bhp_e_slices(bhp_e_slices,e_bin_edges,slice_e_ranges = None,
E_min = None, E_max = None, title = None,
new_fig=True,show_flag=True,
log_flag = False, clear = False,
save_flag = True, save_filename = 'bhp_e_slices'):
'''
Plot bhp_slices on same axes, normalized by integral
Parameters
----------
bhp_e_slices : ndarray
Array of bhp_e slices. Dimensions: # slices x len(e_bin_centers)
e_bin_edges : ndarray
One-dimensional array of bin edges
slice_e_ranges : ndarray
Array of slice ranges. Dimensions: # slices x 2 (min, max)
new_fig : bool, optional
Option to start new figure
show_flag : bool, optional
Option to display
log_flag : bool, optional
Option for log y-axis
clear : bool, optional
Option to clear axes
Returns
-------
legend_text : str
String of legend text
'''
if new_fig: plt.figure(figsize=(6,4))
legend_text = []
color = iter(cm.rainbow(np.linspace(0,1,bhp_e_slices.shape[0]))) # Set up colors for plotting
for i in range(bhp_e_slices.shape[0]): # Loop through slices
c = next(color);
plot_bhp_e_slice(bhp_e_slices[i,:],e_bin_edges,slice_e_ranges[i,:],normalized='int',c=c,clear=False,new_fig=False,title=False)
if slice_e_ranges[i,:] is not None: legend_text.append('{:04.2f} to {:04.2f}'.format(np.min(slice_e_ranges[i,:]),np.max(slice_e_ranges[i,:])))
if E_min is not None: plt.axvline(E_min, c='r')
if E_max is not None: plt.axvline(E_max, c='r')
plt.legend(legend_text)
if title is not None: plt.title(title)
# Hack legend
ax = plt.gca()
leg = ax.get_legend()
color = iter(cm.rainbow(np.linspace(0,1,bhp_e_slices.shape[0]))) # Reset colors
for i in range(bhp_e_slices.shape[0]): # Make legend
c = next(color)
leg.legendHandles[i].set_color(c)
if save_flag: save_fig_to_folder(save_filename, 'fig')
if show_flag: plt.show()
if clear: plt.clf()
return legend_text
def plot_Eave_vs_Ej(Eave, Eave_err, Ej, log_flag = False, title = None,
y_range = None,
save_flag = False, save_filename = 'Eave_vs_Ej',
show_flag = True, clear = False):
"""
Plot average energies as calculated from slices
Parameters
----------
Eave : ndarray
Average energies calculated
Eave_err : ndarray
1-sigma error calculated in Eave
Ej : ndarray
Dependent neutron energies
y_range : list, optional
Two-element list for y-range on plot.
Returns
-------
n/a
"""
fig = plt.figure(figsize=(4,3))
ax = plt.gca()
plt.errorbar(Ej, Eave, yerr=Eave_err, fmt='.')
plt.xlabel('$E_j$ (MeV)')
plt.ylabel('Average $E_i$ (MeV)')
if y_range is not None: plt.ylim(y_range)
if title is not None: plt.title(title)
if log_flag: plt.xscale('log')
if save_flag: save_fig_to_folder(save_filename, 'fig')
if show_flag: plt.show()
if clear: plt.clf() | 35.682903 | 162 | 0.608853 |
import matplotlib
tplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
import seaborn as sns
sns.set(style='ticks')
import sys
import os
import os.path
import scipy.io as sio
import time
import numpy as np
np.set_printoptions(threshold=np.nan)
import pandas as pd
from tqdm import *
# Other modules will import bicorr_plot, but not the other way around
############### SOME GENERAL FUNCTIONS TO KEEP AROUND ########################
def save_fig_to_folder(fig_filename,fig_folder='fig',extensions=['png','pdf'],dpi=300):
# Don't cut off labels
plt.tight_layout()
if fig_folder is None:
plt.savefig(fig_filename)
else:
try:
os.stat(fig_folder)
except:
os.mkdir(fig_folder)
for extension in extensions:
plt.savefig(fig_folder+'/'+fig_filename+'.'+extension,dpi=dpi)
def histogram_metrics(values, xlabel = 'x', ylabel = 'y'):
mu = np.mean(values)
sigma = np.std(values)
med = np.median(values)
plt.figure(figsize=(4,3))
sns.distplot(values, rug=True)
plt.axvline(mu,color='k',linewidth=1)
plt.axvline(mu-sigma,color='k',linewidth=.5)
plt.axvline(mu+sigma,color='k',linewidth=.5)
plt.axvline(med,color='r',linewidth=.5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
sns.despine(right=False)
plt.show()
def step_plot(edges,y, linewidth=.5, color='k', zorder = 1):
for i in range(len(y)):
plt.hlines(y[i],edges[i],edges[i+1],linewidth=linewidth,color=color,zorder=zorder)
for i in range(len(y)-1):
plt.vlines(edges[i+1],y[i],y[i+1],linewidth=linewidth,color=color,zorder=zorder)
,
show_flag = False):
plt.figure(figsize=(4,3))
dt_bin_centers = (dt_bin_edges[:-1]+dt_bin_edges[1:])/2
plt.plot(dt_bin_centers,np.sum(singles_hist[0,:,:],axis=(0)))
plt.plot(dt_bin_centers,np.sum(singles_hist[1,:,:],axis=(0)))
plt.xlabel('Time (ns)')
plt.ylabel('Number of events')
plt.title('Singles TOF distribution, all channels')
plt.legend(['N','G'])
plt.yscale('log')
sns.despine(right=False)
if save_flag: save_fig_to_folder('singles_TOF_dist.png',fig_folder)
if show_flag: plt.show()
plt.clf()
def plot_singles_hist_e_n(singles_hist_e_n,e_bin_edges,
save_flag = False, fig_folder ='fig',
show_flag = False, clear_flag = True):
plt.figure(figsize=(4,3))
e_bin_centers = (e_bin_edges[:-1]+e_bin_edges[1:])/2
plt.plot(e_bin_centers, np.sum(singles_hist_e_n[:,:],axis=(0)))
plt.xlabel('Energy (MeV)')
plt.ylabel('Number of events')
plt.title('Singles energy distribution, all channels')
plt.yscale('log')
if save_flag: save_fig_to_folder('singles_e_dist',fig_folder)
if show_flag: plt.show()
if clear_flag: plt.clf()
def Sd_vs_ch_all(singles_df, show_flag = True, save_flag = True,
fig_folder = 'fig', normalized = False):
plt.figure(figsize=(4,3));
plt.errorbar(singles_df['ch'],singles_df['Sd'],yerr=singles_df['Sd_err'],
fmt='.',markersize=5,elinewidth=.5)
plt.xlabel('detector channel')
plt.ylabel('Sd (counts)')
plt.title('br-subtracted $n$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Sd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
################## BHP ##########################
def bhp_plot(bicorr_hist_plot, dt_bin_edges, title = None,
vmin = None, vmax = None,
save_flag = False, save_filename = 'bicorr',
save_folder = 'fig', extensions = ['png','pdf'],
show_flag = False, clear = True):
fig = plt.figure(figsize=[4,3])
ax = plt.gca()
mesh = ax.pcolormesh(dt_bin_edges, dt_bin_edges, bicorr_hist_plot.T,
norm=matplotlib.colors.LogNorm(),
vmin = vmin, vmax = vmax, cmap="viridis")
cbar = plt.colorbar(mesh, ax=ax, fraction = 0.043, pad=0.1)
if np.max(bicorr_hist_plot) >=1: # absolute counts
cbar.set_label('counts')
else: # normalized
cbar.set_label('counts / (fission$\cdot$ns$^2$$\cdot$pair)')
ax.set_xlabel('$\Delta t_1$ (ns)')
ax.set_ylabel('$\Delta t_2$ (ns)')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_major_locator(MultipleLocator(50))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(10))
if title is not None: ax.set_title(title)
ax.set_aspect('equal')
plt.tight_layout()
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
return ax
########################## BHP_E #########################
def bhp_e_plot(bhp_e, e_bin_edges, title = None,
vmin = None, vmax = None, zoom_range = None,
save_flag = False, save_filename = 'bicorr_e',
save_folder = 'fig', extensions = ['png','pdf'],
show_flag = False, clear_flag = True):
fig = plt.figure(figsize=[4,3])
ax = plt.gca()
mesh = plt.pcolormesh(e_bin_edges, e_bin_edges, bhp_e.T,
norm=matplotlib.colors.LogNorm(),
vmin = vmin, vmax = vmax, cmap="inferno")
cbar = plt.colorbar(mesh, ax=ax, fraction = 0.043, pad=0.1)
if np.max(bhp_e) >=1: # absolute counts
cbar.set_label('counts')
else: # normalized
cbar.set_label('counts / (fission$\cdot$MeV$^2$$\cdot$pair)')
ax.set_xlabel('$E_1$ (MeV)')
ax.set_ylabel('$E_2$ (MeV)')
if title is not None: plt.title(title)
if zoom_range is not None:
ax.set_xlim(zoom_range)
ax.set_ylim(zoom_range)
ax.set_aspect('equal')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(.2))
plt.tight_layout()
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear_flag: plt.clf()
return ax
############# COUNTS VS. ANGLE #################################
def counts_vs_angle_all(det_df, show_flag = True, save_flag = True,
fig_folder = 'fig', normalized = False, t_flag=False):
if t_flag:
# Positive counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cp'],yerr=det_df['Cp']**.5,
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cp (counts)')
plt.title('positive $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cp_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
# Negative counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cn'],yerr=det_df['Cn']**.5,
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cn (counts)')
plt.title('negative $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cn_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
# Diff counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Cd'],yerr=det_df['Cd_err'],
fmt='.',markersize=5,elinewidth=.5,color='k')
plt.xlabel('Angle (degrees)')
plt.ylabel('Cd (counts)')
plt.title('$nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Cd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
if normalized:
print('yes')
# Negative counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['Nd'],yerr=det_df['Nd_err'],
fmt='.',markersize=5,elinewidth=.5)
plt.xlabel('Angle (degrees)')
plt.ylabel('Nd (counts/fission)')
plt.title('normalized br-subtracted $nn$ sum')
sns.despine(right=False)
if save_flag: save_fig_to_folder('Nd_vs_angle_raw',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
plt.clf()
def W_vs_angle_all(det_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
# Positive counts vs. angle
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['W'],yerr=det_df['W_err'],
fmt='.',markersize=5,elinewidth=.5,zorder=1)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
def W_vs_angle_binned(by_angle_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
angle_bin_edges = [by_angle_df.loc[0,'angle_bin_min']]+by_angle_df['angle_bin_max'].values.tolist()
plt.figure(figsize=(4,3))
plt.errorbar(by_angle_df['angle_bin_centers'],by_angle_df['W'],yerr=by_angle_df['std W'],fmt='.',color='k',zorder=3)
step_plot(angle_bin_edges,by_angle_df['W'],linewidth=1,zorder=2)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle_binned',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
def W_vs_angle(det_df, by_angle_df, show_flag = True, save_flag = True, clf_flag = True,
fig_folder = 'fig'):
angle_bin_edges = [by_angle_df.loc[0,'angle_bin_min']]+by_angle_df['angle_bin_max'].values.tolist()
plt.figure(figsize=(4,3))
plt.errorbar(det_df['angle'],det_df['W'],yerr=det_df['W_err'],fmt='.',color='r', markersize=5,elinewidth=.5,zorder=1)
plt.errorbar(by_angle_df['angle_bin_centers'],by_angle_df['W'],yerr=by_angle_df['std W'],fmt='.',color='k',zorder=3)
step_plot(angle_bin_edges,by_angle_df['W'],linewidth=1,zorder=2)
plt.xlabel('Angle (degrees)')
plt.ylabel('W (relative doubles counts)')
sns.despine(right=False)
if save_flag: save_fig_to_folder('W_vs_angle_all',fig_folder,extensions=['png','pdf'])
if show_flag: plt.show()
if clf_flag: plt.clf()
######################### SLICES ############################
def plot_bhp_slice(bhp_slice, bin_edges, bin_units = 'time',
slice_range = None, normalized = None,
c = 'k', title = False, show_flag = False,
save_flag = False, save_filename = 'bhp_slice', save_folder = 'fig', new_fig = True, clear = True, msize=5,
norm_range = None):
if new_fig: plt.figure(figsize=(4,4))
ax = plt.gca()
if norm_range is not None:
imin = np.digitize(norm_range[0],bin_edges)-1
imax = np.digitize(norm_range[1],bin_edges)-1
else:
imin = 0
imax = len(bin_edges)
if normalized is 'max':
step_plot(bin_edges, bhp_slice/np.max(bhp_slice[imin:imax]), linewidth=.5, color = c)
ax.set_ylabel('Counts normalized by maximum')
elif normalized is 'int':
step_plot(bin_edges, bhp_slice/np.sum(bhp_slice[imin:imax]), linewidth=.5, color = c)
ax.set_ylabel('Counts normalized by integral')
else:
step_plot(bin_edges, bhp_slice, linewidth=.5)
ax.plot(calc_centers(bin_edges),bhp_slice,'.-',markersize=msize,linewidth = .5, color = c)
ax.set_ylabel('Counts')
if bin_units is 'time': ax.set_xlabel('$\Delta t_i$')
elif bin_units is 'energy': ax.set_xlabel('$\Delta E_i$')
if title is True: # Make a title according to slice_range
if type(slice_range) is list: # Min and max boundaries
ax.set_title('$\Delta t_j$ = {} to {}'.format(slice_range[0],slice_range[1]))
else: # float
ax.set_title('$\Delta t_j$ = {}'.format(slice_range))
elif title is False:
pass
elif title is not None: # print custom title
ax.set_title(title)
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_major_locator(MultipleLocator(50))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(10))
# plt.axes().set_aspect('equal')
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
def plot_bhp_slices(bhp_slices,bin_edges,bin_units='time',slice_range = None,new_fig=True,show_flag=True, log_flag = False):
if new_fig: plt.figure(figsize=(4,3))
legend_text = []
color = iter(cm.rainbow(np.linspace(0,1,bhp_slices.shape[0]))) # Set up colors for plotting
for i in range(bhp_slices.shape[0]): # Loop through slices
c = next(color);
plot_bhp_slice(bhp_slices[i,:],bin_edges,bin_units,slice_range[i,:],normalized='int',c=c,clear=False,new_fig=False,title=False)
if slice_range is not None: legend_text.append('{:04.2f} to {:04.2f}'.format(np.min(slice_range[i,:]),np.max(slice_range[i,:])))
plt.legend(legend_text)
plt.title('Slices normalized by integral')
# Hack legend
ax = plt.gca()
leg = ax.get_legend()
color = iter(cm.rainbow(np.linspace(0,1,bhp_slices.shape[0]))) # Reset colors
for i in range(bhp_slices.shape[0]): # Make legend
c = next(color)
leg.legendHandles[i].set_color(c)
if show_flag: plt.show()
return legend_text
######################### SLICES IN ENERGY ############################
def plot_bhp_e_slice(bhp_e_slice, e_bin_edges,
slice_e_range = None, normalized = None,
c = 'k', title = True, show_flag = False,
save_flag = False, save_filename = 'bhp_e_slice', save_folder = 'fig', new_fig = True, clear = True, msize=5,
norm_range = None):
if new_fig: plt.figure(figsize=(6,4))
if norm_range is not None:
imin = np.digitize(norm_range[0],e_bin_edges)-1
imax = np.digitize(norm_range[1],e_bin_edges)-1
else:
imin = 0
imax = len(e_bin_edges)
if normalized is 'max':
step_plot(e_bin_edges, bhp_e_slice/np.max(bhp_e_slice[imin:imax]), linewidth=.5, color = c)
plt.ylabel('Counts normalized by maximum')
elif normalized is 'int':
step_plot(e_bin_edges, bhp_e_slice/np.sum(bhp_e_slice[imin:imax]), linewidth=.5, color = c)
plt.ylabel('Counts normalized by integral')
else:
step_plot(e_bin_edges, bhp_e_slice, linewidth=.5)
plt.ylabel('Counts')
plt.xlabel('$\Delta E_i$')
if title is True: # Make a title according to slice_range
if type(slice_e_range) is list: # Min and max boundaries
plt.title('$E_j$ = {} to {}'.format(slice_e_range[0],slice_e_range[1]))
else: # float
plt.title('$E_j$ = {}'.format(slice_e_range))
elif title is False:
pass
else: # print custom title
plt.title(title)
sns.despine(right=False)
# plt.axes().set_aspect('equal')
if save_flag: save_fig_to_folder(save_filename, save_folder, extensions)
if show_flag: plt.show()
if clear: plt.clf()
def plot_bhp_e_slices(bhp_e_slices,e_bin_edges,slice_e_ranges = None,
E_min = None, E_max = None, title = None,
new_fig=True,show_flag=True,
log_flag = False, clear = False,
save_flag = True, save_filename = 'bhp_e_slices'):
if new_fig: plt.figure(figsize=(6,4))
legend_text = []
color = iter(cm.rainbow(np.linspace(0,1,bhp_e_slices.shape[0]))) # Set up colors for plotting
for i in range(bhp_e_slices.shape[0]): # Loop through slices
c = next(color);
plot_bhp_e_slice(bhp_e_slices[i,:],e_bin_edges,slice_e_ranges[i,:],normalized='int',c=c,clear=False,new_fig=False,title=False)
if slice_e_ranges[i,:] is not None: legend_text.append('{:04.2f} to {:04.2f}'.format(np.min(slice_e_ranges[i,:]),np.max(slice_e_ranges[i,:])))
if E_min is not None: plt.axvline(E_min, c='r')
if E_max is not None: plt.axvline(E_max, c='r')
plt.legend(legend_text)
if title is not None: plt.title(title)
# Hack legend
ax = plt.gca()
leg = ax.get_legend()
color = iter(cm.rainbow(np.linspace(0,1,bhp_e_slices.shape[0]))) # Reset colors
for i in range(bhp_e_slices.shape[0]): # Make legend
c = next(color)
leg.legendHandles[i].set_color(c)
if save_flag: save_fig_to_folder(save_filename, 'fig')
if show_flag: plt.show()
if clear: plt.clf()
return legend_text
def plot_Eave_vs_Ej(Eave, Eave_err, Ej, log_flag = False, title = None,
y_range = None,
save_flag = False, save_filename = 'Eave_vs_Ej',
show_flag = True, clear = False):
fig = plt.figure(figsize=(4,3))
ax = plt.gca()
plt.errorbar(Ej, Eave, yerr=Eave_err, fmt='.')
plt.xlabel('$E_j$ (MeV)')
plt.ylabel('Average $E_i$ (MeV)')
if y_range is not None: plt.ylim(y_range)
if title is not None: plt.title(title)
if log_flag: plt.xscale('log')
if save_flag: save_fig_to_folder(save_filename, 'fig')
if show_flag: plt.show()
if clear: plt.clf() | true | true |
1c3479b242c45478096faa4c288a0868f284cab4 | 960 | py | Python | ICHSACTF2021/Crypto/Baby_Homework.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | null | null | null | ICHSACTF2021/Crypto/Baby_Homework.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | null | null | null | ICHSACTF2021/Crypto/Baby_Homework.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | 3 | 2021-06-28T09:52:07.000Z | 2021-09-22T03:28:40.000Z | # AES ECB -- One byte at a time attack
from pwn import *
def main(data):
host = 'baby_homework.ichsa.ctf.today'
port = 8010
t = remote(host, port)
t.sendline(data)
t.recvuntil("Hello! What do you want to encrypt today?\n")
a = t.recvline()[36:38]
return a
if __name__ == '__main__':
# restored flags
flag ="d0n7_7ruzt_DeF4uL7_V4lu3z"
flag1 ="d0n7_7ruzt_DeF4u"
flag2 ="L7_V4lu3z"
count = 13
while True:
for i in range(33,125):
print(i)
input1 = "A" * count
a = main(input1)
input2 = "A" * count + flag1 + flag2 + chr(i)
b = main(input2)
if a == b:
print("yes flag is %s " % chr(i))
flag2 = flag2 + chr(i)
print(flag2)
count = count - 1
break
if count == -1:
print("restored flag %s" % (flag1 + flag2))
break | 26.666667 | 62 | 0.496875 |
from pwn import *
def main(data):
host = 'baby_homework.ichsa.ctf.today'
port = 8010
t = remote(host, port)
t.sendline(data)
t.recvuntil("Hello! What do you want to encrypt today?\n")
a = t.recvline()[36:38]
return a
if __name__ == '__main__':
flag ="d0n7_7ruzt_DeF4uL7_V4lu3z"
flag1 ="d0n7_7ruzt_DeF4u"
flag2 ="L7_V4lu3z"
count = 13
while True:
for i in range(33,125):
print(i)
input1 = "A" * count
a = main(input1)
input2 = "A" * count + flag1 + flag2 + chr(i)
b = main(input2)
if a == b:
print("yes flag is %s " % chr(i))
flag2 = flag2 + chr(i)
print(flag2)
count = count - 1
break
if count == -1:
print("restored flag %s" % (flag1 + flag2))
break | true | true |
1c3479bd4399e34cb2e02d3a0bdb6bf445aa0d20 | 2,367 | py | Python | compressor/utils/__init__.py | gatherhealth/django-compressor | 2eea7b1a71855cfc8e44f12301d85057f2bb70e6 | [
"Apache-2.0"
] | 10 | 2016-09-14T21:58:51.000Z | 2019-01-28T21:56:37.000Z | compressor/utils/__init__.py | gatherhealth/django-compressor | 2eea7b1a71855cfc8e44f12301d85057f2bb70e6 | [
"Apache-2.0"
] | 6 | 2020-06-05T18:44:19.000Z | 2022-01-13T00:48:56.000Z | compressor/utils/__init__.py | gatherhealth/django-compressor | 2eea7b1a71855cfc8e44f12301d85057f2bb70e6 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.utils import six
from compressor.exceptions import FilterError
def get_class(class_string, exception=FilterError):
"""
Convert a string version of a function name to the callable object.
"""
if not hasattr(class_string, '__bases__'):
try:
class_string = str(class_string)
mod_name, class_name = get_mod_func(class_string)
if class_name:
return getattr(__import__(mod_name, {}, {}, [str('')]), class_name)
except (ImportError, AttributeError):
raise exception('Failed to import %s' % class_string)
raise exception("Invalid class path '%s'" % class_string)
def get_mod_func(callback):
"""
Converts 'django.views.news.stories.story_detail' to
('django.views.news.stories', 'story_detail')
"""
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
def get_pathext(default_pathext=None):
"""
Returns the path extensions from environment or a default
"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
return os.environ.get('PATHEXT', default_pathext)
def find_command(cmd, paths=None, pathext=None):
"""
Searches the PATH for the given command and returns its path
"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, six.string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
return None
| 31.986486 | 83 | 0.634981 |
from __future__ import unicode_literals
import os
from django.utils import six
from compressor.exceptions import FilterError
def get_class(class_string, exception=FilterError):
if not hasattr(class_string, '__bases__'):
try:
class_string = str(class_string)
mod_name, class_name = get_mod_func(class_string)
if class_name:
return getattr(__import__(mod_name, {}, {}, [str('')]), class_name)
except (ImportError, AttributeError):
raise exception('Failed to import %s' % class_string)
raise exception("Invalid class path '%s'" % class_string)
def get_mod_func(callback):
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
def get_pathext(default_pathext=None):
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
return os.environ.get('PATHEXT', default_pathext)
def find_command(cmd, paths=None, pathext=None):
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, six.string_types):
paths = [paths]
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep)]
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
return None
| true | true |
1c347a5748f1cbdeca0be005d1cc1b785ac0a408 | 1,305 | py | Python | tools/perf/measurements/skpicture_printer_unittest.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | tools/perf/measurements/skpicture_printer_unittest.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | tools/perf/measurements/skpicture_printer_unittest.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from measurements import skpicture_printer
class SkpicturePrinterUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._skp_outdir = tempfile.mkdtemp('_skp_test')
def tearDown(self):
shutil.rmtree(self._skp_outdir)
@decorators.Disabled('android')
def testSkpicturePrinter(self):
ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
measurement = skpicture_printer.SkpicturePrinter(self._skp_outdir)
results = self.RunMeasurement(measurement, ps, options=self._options)
# Picture printing is not supported on all platforms.
if results.failures:
assert 'not supported' in results.failures[0].exc_info[1].message
return
saved_picture_count = results.FindAllPageSpecificValuesNamed(
'saved_picture_count')
self.assertEquals(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0].GetRepresentativeNumber(), 0)
| 33.461538 | 75 | 0.783908 |
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from measurements import skpicture_printer
class SkpicturePrinterUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._skp_outdir = tempfile.mkdtemp('_skp_test')
def tearDown(self):
shutil.rmtree(self._skp_outdir)
@decorators.Disabled('android')
def testSkpicturePrinter(self):
ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
measurement = skpicture_printer.SkpicturePrinter(self._skp_outdir)
results = self.RunMeasurement(measurement, ps, options=self._options)
if results.failures:
assert 'not supported' in results.failures[0].exc_info[1].message
return
saved_picture_count = results.FindAllPageSpecificValuesNamed(
'saved_picture_count')
self.assertEquals(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0].GetRepresentativeNumber(), 0)
| true | true |
1c347aeb2574f0b090ef4fa205fee79639bf5b68 | 1,312 | py | Python | Semana8/Vehiculos/controllers/vehiculo.py | GuidoTorres/codigo8 | 7fdff4f677f048de7d7877b96ec3a688d3dde163 | [
"MIT"
] | null | null | null | Semana8/Vehiculos/controllers/vehiculo.py | GuidoTorres/codigo8 | 7fdff4f677f048de7d7877b96ec3a688d3dde163 | [
"MIT"
] | 40 | 2021-03-10T16:58:17.000Z | 2022-03-26T01:55:17.000Z | Semana8/Vehiculos/controllers/vehiculo.py | GuidoTorres/codigo8 | 7fdff4f677f048de7d7877b96ec3a688d3dde163 | [
"MIT"
] | null | null | null | from flask_restful import Resource, reqparse
from Vehiculos.models.vehiculo import VehiculoModel
class Vehiculo(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'marca_vehiculo',
type= str,
required = True,
help = "Falta la marca_vehiculo"
)
parser.add_argument(
'modelo_vehiculo',
type= str,
required = True,
help = "Falta modelo"
)
def get(self, marca):
# Selefc * from producto where desc = nombre
# Query.fetchone()
vehiculo = VehiculoModel.query.filter_by(desc = marca).first()
if vehiculo:
return vehiculo.devolverjson()
return {'message' : 'No existe el vehiculo'}, 404
def post(self):
data = Vehiculo.parser.parse_args()
vehiculo = VehiculoModel(data['marca_vehiculo'],data['modelo_vehiculo'])
try:
producto.guardar_en_bd()
except:
return{'message': 'Hubo un error al guardar en la base de datos'}, 500
return {'message': 'Se guardo el vehiculo exitosamente', 'vehiculo' : data['marca_vehiculo']}
# return {'message': 'Se guardo la categoria exitosamente', 'categoria' : data['categoria']} | 29.818182 | 101 | 0.586128 | from flask_restful import Resource, reqparse
from Vehiculos.models.vehiculo import VehiculoModel
class Vehiculo(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'marca_vehiculo',
type= str,
required = True,
help = "Falta la marca_vehiculo"
)
parser.add_argument(
'modelo_vehiculo',
type= str,
required = True,
help = "Falta modelo"
)
def get(self, marca):
vehiculo = VehiculoModel.query.filter_by(desc = marca).first()
if vehiculo:
return vehiculo.devolverjson()
return {'message' : 'No existe el vehiculo'}, 404
def post(self):
data = Vehiculo.parser.parse_args()
vehiculo = VehiculoModel(data['marca_vehiculo'],data['modelo_vehiculo'])
try:
producto.guardar_en_bd()
except:
return{'message': 'Hubo un error al guardar en la base de datos'}, 500
return {'message': 'Se guardo el vehiculo exitosamente', 'vehiculo' : data['marca_vehiculo']}
| true | true |
1c347af37a69df9363c6020ad91cc40569857713 | 323 | py | Python | vulcan/builder/__init__.py | exrny/vulcan-builder | b0b397202e2a82acc2794a01fc2029e61f411f1c | [
"MIT"
] | null | null | null | vulcan/builder/__init__.py | exrny/vulcan-builder | b0b397202e2a82acc2794a01fc2029e61f411f1c | [
"MIT"
] | null | null | null | vulcan/builder/__init__.py | exrny/vulcan-builder | b0b397202e2a82acc2794a01fc2029e61f411f1c | [
"MIT"
] | null | null | null | '''
Lightweight Python Build Tool
'''
from vulcan.builder.common import nsh, dump, dumps, safe_cd
from ._vb import task, async_task, main
import sh
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
__all__ = [
'task', 'async_task',
'main',
'nsh', 'sh',
'dump', 'dumps',
'safe_cd'
]
| 17 | 59 | 0.662539 |
from vulcan.builder.common import nsh, dump, dumps, safe_cd
from ._vb import task, async_task, main
import sh
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
__all__ = [
'task', 'async_task',
'main',
'nsh', 'sh',
'dump', 'dumps',
'safe_cd'
]
| true | true |
1c347b4c2c1741836278dc153755824a8a53fc7f | 6,954 | py | Python | sdk/python/kubeflow/training/models/v1_tf_job_spec.py | pingsutw/tf-operator | abfecef0ac5d84ba62705de556f392e9b6f60027 | [
"Apache-2.0"
] | null | null | null | sdk/python/kubeflow/training/models/v1_tf_job_spec.py | pingsutw/tf-operator | abfecef0ac5d84ba62705de556f392e9b6f60027 | [
"Apache-2.0"
] | null | null | null | sdk/python/kubeflow/training/models/v1_tf_job_spec.py | pingsutw/tf-operator | abfecef0ac5d84ba62705de556f392e9b6f60027 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
tensorflow
Python SDK for tensorflow # noqa: E501
The version of the OpenAPI document: v1.3.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubeflow.training.configuration import Configuration
class V1TFJobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'enable_dynamic_worker': 'bool',
'run_policy': 'V1RunPolicy',
'success_policy': 'str',
'tf_replica_specs': 'dict(str, V1ReplicaSpec)'
}
attribute_map = {
'enable_dynamic_worker': 'enableDynamicWorker',
'run_policy': 'runPolicy',
'success_policy': 'successPolicy',
'tf_replica_specs': 'tfReplicaSpecs'
}
def __init__(self, enable_dynamic_worker=None, run_policy=None, success_policy=None, tf_replica_specs=None, local_vars_configuration=None): # noqa: E501
"""V1TFJobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._enable_dynamic_worker = None
self._run_policy = None
self._success_policy = None
self._tf_replica_specs = None
self.discriminator = None
if enable_dynamic_worker is not None:
self.enable_dynamic_worker = enable_dynamic_worker
self.run_policy = run_policy
if success_policy is not None:
self.success_policy = success_policy
self.tf_replica_specs = tf_replica_specs
@property
def enable_dynamic_worker(self):
"""Gets the enable_dynamic_worker of this V1TFJobSpec. # noqa: E501
A switch to enable dynamic worker # noqa: E501
:return: The enable_dynamic_worker of this V1TFJobSpec. # noqa: E501
:rtype: bool
"""
return self._enable_dynamic_worker
@enable_dynamic_worker.setter
def enable_dynamic_worker(self, enable_dynamic_worker):
"""Sets the enable_dynamic_worker of this V1TFJobSpec.
A switch to enable dynamic worker # noqa: E501
:param enable_dynamic_worker: The enable_dynamic_worker of this V1TFJobSpec. # noqa: E501
:type: bool
"""
self._enable_dynamic_worker = enable_dynamic_worker
@property
def run_policy(self):
"""Gets the run_policy of this V1TFJobSpec. # noqa: E501
:return: The run_policy of this V1TFJobSpec. # noqa: E501
:rtype: V1RunPolicy
"""
return self._run_policy
@run_policy.setter
def run_policy(self, run_policy):
"""Sets the run_policy of this V1TFJobSpec.
:param run_policy: The run_policy of this V1TFJobSpec. # noqa: E501
:type: V1RunPolicy
"""
if self.local_vars_configuration.client_side_validation and run_policy is None: # noqa: E501
raise ValueError("Invalid value for `run_policy`, must not be `None`") # noqa: E501
self._run_policy = run_policy
@property
def success_policy(self):
"""Gets the success_policy of this V1TFJobSpec. # noqa: E501
SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to \"\", using the default rules. # noqa: E501
:return: The success_policy of this V1TFJobSpec. # noqa: E501
:rtype: str
"""
return self._success_policy
@success_policy.setter
def success_policy(self, success_policy):
"""Sets the success_policy of this V1TFJobSpec.
SuccessPolicy defines the policy to mark the TFJob as succeeded. Default to \"\", using the default rules. # noqa: E501
:param success_policy: The success_policy of this V1TFJobSpec. # noqa: E501
:type: str
"""
self._success_policy = success_policy
@property
def tf_replica_specs(self):
"""Gets the tf_replica_specs of this V1TFJobSpec. # noqa: E501
A map of TFReplicaType (type) to ReplicaSpec (value). Specifies the TF cluster configuration. For example, { \"PS\": ReplicaSpec, \"Worker\": ReplicaSpec, } # noqa: E501
:return: The tf_replica_specs of this V1TFJobSpec. # noqa: E501
:rtype: dict(str, V1ReplicaSpec)
"""
return self._tf_replica_specs
@tf_replica_specs.setter
def tf_replica_specs(self, tf_replica_specs):
"""Sets the tf_replica_specs of this V1TFJobSpec.
A map of TFReplicaType (type) to ReplicaSpec (value). Specifies the TF cluster configuration. For example, { \"PS\": ReplicaSpec, \"Worker\": ReplicaSpec, } # noqa: E501
:param tf_replica_specs: The tf_replica_specs of this V1TFJobSpec. # noqa: E501
:type: dict(str, V1ReplicaSpec)
"""
if self.local_vars_configuration.client_side_validation and tf_replica_specs is None: # noqa: E501
raise ValueError("Invalid value for `tf_replica_specs`, must not be `None`") # noqa: E501
self._tf_replica_specs = tf_replica_specs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TFJobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TFJobSpec):
return True
return self.to_dict() != other.to_dict()
| 33.594203 | 190 | 0.628415 |
import pprint
import re
import six
from kubeflow.training.configuration import Configuration
class V1TFJobSpec(object):
openapi_types = {
'enable_dynamic_worker': 'bool',
'run_policy': 'V1RunPolicy',
'success_policy': 'str',
'tf_replica_specs': 'dict(str, V1ReplicaSpec)'
}
attribute_map = {
'enable_dynamic_worker': 'enableDynamicWorker',
'run_policy': 'runPolicy',
'success_policy': 'successPolicy',
'tf_replica_specs': 'tfReplicaSpecs'
}
def __init__(self, enable_dynamic_worker=None, run_policy=None, success_policy=None, tf_replica_specs=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._enable_dynamic_worker = None
self._run_policy = None
self._success_policy = None
self._tf_replica_specs = None
self.discriminator = None
if enable_dynamic_worker is not None:
self.enable_dynamic_worker = enable_dynamic_worker
self.run_policy = run_policy
if success_policy is not None:
self.success_policy = success_policy
self.tf_replica_specs = tf_replica_specs
@property
def enable_dynamic_worker(self):
return self._enable_dynamic_worker
@enable_dynamic_worker.setter
def enable_dynamic_worker(self, enable_dynamic_worker):
self._enable_dynamic_worker = enable_dynamic_worker
@property
def run_policy(self):
return self._run_policy
@run_policy.setter
def run_policy(self, run_policy):
if self.local_vars_configuration.client_side_validation and run_policy is None:
raise ValueError("Invalid value for `run_policy`, must not be `None`")
self._run_policy = run_policy
@property
def success_policy(self):
return self._success_policy
@success_policy.setter
def success_policy(self, success_policy):
self._success_policy = success_policy
@property
def tf_replica_specs(self):
return self._tf_replica_specs
@tf_replica_specs.setter
def tf_replica_specs(self, tf_replica_specs):
if self.local_vars_configuration.client_side_validation and tf_replica_specs is None:
raise ValueError("Invalid value for `tf_replica_specs`, must not be `None`")
self._tf_replica_specs = tf_replica_specs
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1TFJobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1TFJobSpec):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c347c16656536df69e4c8817e1ab4b095cd912d | 6,220 | py | Python | MoodleMediaConverter.py | MichaelMueller/MoodleMediaConverter | f6087942146d312088417badf406aacca95764fb | [
"Apache-2.0"
] | null | null | null | MoodleMediaConverter.py | MichaelMueller/MoodleMediaConverter | f6087942146d312088417badf406aacca95764fb | [
"Apache-2.0"
] | null | null | null | MoodleMediaConverter.py | MichaelMueller/MoodleMediaConverter | f6087942146d312088417badf406aacca95764fb | [
"Apache-2.0"
] | null | null | null | import argparse
import datetime
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import zipfile
from time import sleep, time
import time
from zipfile import ZipFile
import xml.etree.ElementTree as ET
def find_file(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return None
def hash(file):
BUF_SIZE = 65536
md5 = hashlib.md5()
with open(file, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
f.close()
return "{0}".format(md5.hexdigest())
def replace_in_files(dir, subject, replace, exts=[".xml", ".txt"]):
for root, dirs, files in os.walk(dir):
for file in files:
if os.path.splitext(file)[1] in exts:
replace_in_file(os.path.join(root, file), subject, replace)
def replace_in_file(file, subject, replace):
with tempfile.TemporaryDirectory() as tmp_dir:
# input file
fin = open(file, "rt")
# output file to write the result to
path = os.path.join(tmp_dir, os.path.basename(file))
fout = open(path, "wt")
# for each line in the input file
for line in fin:
# read replace the string and write to output file
fout.write(line.replace(subject, replace))
# close input and output files
fin.close()
fout.close()
shutil.move(path, file)
def run_cmd(cmd, raise_exception=True):
print("running command {}".format(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = process.communicate()
ret = process.returncode
if raise_exception and ret != 0:
raise Exception("error running command {}. output was: {}".format(cmd, output))
return ret, output
def process_file(file: ET.Element, vlc_path, moodle_dir):
try:
# check if we have a convertable media file
if file.find("mimetype").text == "audio/ogg":
# get content hash and its corresponding file
content_hash = file.find("contenthash").text
content_hash_path = find_file(content_hash, moodle_dir)
if not os.path.exists(content_hash_path):
raise Exception("file {} does not exist. skipping.".format(content_hash_path))
content_hash_basename = os.path.basename(content_hash_path)
content_hash_dir = os.path.dirname(content_hash_path)
# build vlc command for conversion of file
mp3_path = content_hash_basename + ".mp3"
print("converting {} to {} in {}".format(content_hash_basename, mp3_path, content_hash_dir))
cmd = vlc_path + " -I dummy " + content_hash_basename
cmd = cmd + " --sout=#transcode{acodec=mp3,channels=2,samplerate=44100}:standard{"
cmd = cmd + "access=file,mux=raw,dst=" + mp3_path + "} vlc://quit"
# cd to dir to run the command
os.chdir(content_hash_dir)
if os.path.exists(mp3_path):
os.remove(mp3_path)
ret, _ = run_cmd(cmd)
shutil.move(mp3_path, content_hash_basename)
mp3_path = content_hash_basename
# modify the current file ElementTree Item
#mp3_content_hash = hash(mp3_path)
#file.find("contenthash").text = mp3_content_hash
file_name_before = file.find("filename").text
new_file_name = os.path.splitext(file_name_before)[0] + ".mp3"
file.find("filename").text = new_file_name
size = os.path.getsize(mp3_path)
file.find("filesize").text = str(size)
file.find("timemodified").text = str(int(time.time()))
file.find("mimetype").text = "audio/mp3"
# actually move the item
# shutil.move(mp3_path, mp3_content_hash)
# replace the occurence in all files
replace_in_files(moodle_dir, file_name_before, new_file_name)
except Exception as e:
print("exception while processing: {}".format(str(e)))
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
if __name__ == "__main__":
# args
parser = argparse.ArgumentParser(
description='A utility to convert moodle backup files')
parser.add_argument('moodle_backup_file', type=str, help='moodle_backup_file')
parser.add_argument('--vlc', type=str, default=None, help='path to the vlc executable')
parser.add_argument('--no_clean', action='store_true', default=False, help='path to the vlc executable')
args = parser.parse_args()
# extract moodle data
bkp_file = args.moodle_backup_file
bkp_file_dir = os.path.abspath(os.path.dirname(bkp_file))
bkp_file_basename = os.path.basename(bkp_file)
bkp_file_name = os.path.splitext(bkp_file_basename)[0]
# extract
os.chdir(bkp_file_dir)
if not os.path.exists(bkp_file_name):
os.makedirs(bkp_file_name)
run_cmd("tar -xvf "+bkp_file_basename+" -C "+bkp_file_name)
sleep(2)
moodle_dir = os.path.abspath(bkp_file_name)
# parse the files xml file
os.chdir(moodle_dir)
tree = ET.parse("files.xml")
vlc_path = args.vlc
if vlc_path is None:
if os.path.exists('C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe'):
vlc_path = '"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe"'
elif os.path.exists('C:\\Program Files\\VideoLAN\\VLC\\vlc.exe'):
vlc_path = '"C:\\Program Files\\VideoLAN\\VLC\\vlc.exe"'
for file in tree.getroot():
process_file(file, vlc_path, moodle_dir)
# write the file again
os.chdir(moodle_dir)
tree.write("files.xml")
run_cmd("tar -cvzf " + bkp_file_name + ".mbz *")
shutil.move(bkp_file_name + ".mbz", "../"+bkp_file_name + ".mbz")
os.chdir(os.path.dirname(bkp_file_dir))
# clean up
if args.no_clean == False:
shutil.rmtree(moodle_dir)
| 36.162791 | 117 | 0.632958 | import argparse
import datetime
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import zipfile
from time import sleep, time
import time
from zipfile import ZipFile
import xml.etree.ElementTree as ET
def find_file(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return None
def hash(file):
BUF_SIZE = 65536
md5 = hashlib.md5()
with open(file, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
f.close()
return "{0}".format(md5.hexdigest())
def replace_in_files(dir, subject, replace, exts=[".xml", ".txt"]):
for root, dirs, files in os.walk(dir):
for file in files:
if os.path.splitext(file)[1] in exts:
replace_in_file(os.path.join(root, file), subject, replace)
def replace_in_file(file, subject, replace):
with tempfile.TemporaryDirectory() as tmp_dir:
fin = open(file, "rt")
path = os.path.join(tmp_dir, os.path.basename(file))
fout = open(path, "wt")
for line in fin:
fout.write(line.replace(subject, replace))
fin.close()
fout.close()
shutil.move(path, file)
def run_cmd(cmd, raise_exception=True):
print("running command {}".format(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = process.communicate()
ret = process.returncode
if raise_exception and ret != 0:
raise Exception("error running command {}. output was: {}".format(cmd, output))
return ret, output
def process_file(file: ET.Element, vlc_path, moodle_dir):
try:
if file.find("mimetype").text == "audio/ogg":
content_hash = file.find("contenthash").text
content_hash_path = find_file(content_hash, moodle_dir)
if not os.path.exists(content_hash_path):
raise Exception("file {} does not exist. skipping.".format(content_hash_path))
content_hash_basename = os.path.basename(content_hash_path)
content_hash_dir = os.path.dirname(content_hash_path)
mp3_path = content_hash_basename + ".mp3"
print("converting {} to {} in {}".format(content_hash_basename, mp3_path, content_hash_dir))
cmd = vlc_path + " -I dummy " + content_hash_basename
cmd = cmd + " --sout=#transcode{acodec=mp3,channels=2,samplerate=44100}:standard{"
cmd = cmd + "access=file,mux=raw,dst=" + mp3_path + "} vlc://quit"
os.chdir(content_hash_dir)
if os.path.exists(mp3_path):
os.remove(mp3_path)
ret, _ = run_cmd(cmd)
shutil.move(mp3_path, content_hash_basename)
mp3_path = content_hash_basename
file_name_before = file.find("filename").text
new_file_name = os.path.splitext(file_name_before)[0] + ".mp3"
file.find("filename").text = new_file_name
size = os.path.getsize(mp3_path)
file.find("filesize").text = str(size)
file.find("timemodified").text = str(int(time.time()))
file.find("mimetype").text = "audio/mp3"
replace_in_files(moodle_dir, file_name_before, new_file_name)
except Exception as e:
print("exception while processing: {}".format(str(e)))
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='A utility to convert moodle backup files')
parser.add_argument('moodle_backup_file', type=str, help='moodle_backup_file')
parser.add_argument('--vlc', type=str, default=None, help='path to the vlc executable')
parser.add_argument('--no_clean', action='store_true', default=False, help='path to the vlc executable')
args = parser.parse_args()
bkp_file = args.moodle_backup_file
bkp_file_dir = os.path.abspath(os.path.dirname(bkp_file))
bkp_file_basename = os.path.basename(bkp_file)
bkp_file_name = os.path.splitext(bkp_file_basename)[0]
os.chdir(bkp_file_dir)
if not os.path.exists(bkp_file_name):
os.makedirs(bkp_file_name)
run_cmd("tar -xvf "+bkp_file_basename+" -C "+bkp_file_name)
sleep(2)
moodle_dir = os.path.abspath(bkp_file_name)
os.chdir(moodle_dir)
tree = ET.parse("files.xml")
vlc_path = args.vlc
if vlc_path is None:
if os.path.exists('C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe'):
vlc_path = '"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe"'
elif os.path.exists('C:\\Program Files\\VideoLAN\\VLC\\vlc.exe'):
vlc_path = '"C:\\Program Files\\VideoLAN\\VLC\\vlc.exe"'
for file in tree.getroot():
process_file(file, vlc_path, moodle_dir)
os.chdir(moodle_dir)
tree.write("files.xml")
run_cmd("tar -cvzf " + bkp_file_name + ".mbz *")
shutil.move(bkp_file_name + ".mbz", "../"+bkp_file_name + ".mbz")
os.chdir(os.path.dirname(bkp_file_dir))
if args.no_clean == False:
shutil.rmtree(moodle_dir)
| true | true |
1c347ce204585efb3f6cd25b73a53ec550c91616 | 10,256 | py | Python | docs/source/conf.py | Shray64/pytorch_connectomics | d6c814f11ac2f8418ede5ae220a93016f50214fc | [
"MIT"
] | null | null | null | docs/source/conf.py | Shray64/pytorch_connectomics | d6c814f11ac2f8418ede5ae220a93016f50214fc | [
"MIT"
] | null | null | null | docs/source/conf.py | Shray64/pytorch_connectomics | d6c814f11ac2f8418ede5ae220a93016f50214fc | [
"MIT"
] | null | null | null | # Based on https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
import sphinx_rtd_theme
import doctest
import connectomics
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_rtd_theme',
'rst2pdf.pdfbuilder',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'Zudi Lin and Donglai Wei'
project = u'connectomics'
copyright = u'{}, {}'.format(datetime.datetime.now().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
'style_nav_header_background': "#FFFFFF",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'test vtest'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/img/logo_text.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {'css_files': ['_static/css/custom.css']}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'connectomicsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'connectomics.tex', u'PyTorch Connectomics Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'connectomics', u'PyTorch Connectomics Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'connectomics', u'PyTorch Connectomics Documentation',
author, 'connectomics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
pdf_documents = [('index', u'connectomics', u'PyTorch Connectomics Documentation', author),]
def setup(app):
def skip(app, what, name, obj, skip, options):
members = [
'__init__',
'__repr__',
'__weakref__',
'__dict__',
'__module__',
]
return True if name in members else skip
app.connect('autodoc-skip-member', skip) | 29.136364 | 92 | 0.699883 |
import sys, os
import datetime
import sphinx_rtd_theme
import doctest
import connectomics
needs_sphinx = '3.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_rtd_theme',
'rst2pdf.pdfbuilder',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
author = 'Zudi Lin and Donglai Wei'
project = u'connectomics'
copyright = u'{}, {}'.format(datetime.datetime.now().year, author)
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
'style_nav_header_background': "#FFFFFF",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'test vtest'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/img/logo_text.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {'css_files': ['_static/css/custom.css']}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'connectomicsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'connectomics.tex', u'PyTorch Connectomics Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'connectomics', u'PyTorch Connectomics Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'connectomics', u'PyTorch Connectomics Documentation',
author, 'connectomics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
pdf_documents = [('index', u'connectomics', u'PyTorch Connectomics Documentation', author),]
def setup(app):
def skip(app, what, name, obj, skip, options):
members = [
'__init__',
'__repr__',
'__weakref__',
'__dict__',
'__module__',
]
return True if name in members else skip
app.connect('autodoc-skip-member', skip) | true | true |
1c347ce2892aa8cfa6ed998db4ec47574d239ba8 | 43,747 | py | Python | tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/cashflow_streams.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 1 | 2021-03-04T01:07:48.000Z | 2021-03-04T01:07:48.000Z | tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/cashflow_streams.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | null | null | null | tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/cashflow_streams.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | 1 | 2021-02-16T12:08:41.000Z | 2021-02-16T12:08:41.000Z | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cashflow streams objects."""
from typing import Optional, Tuple, Callable, Any, List, Union
import numpy as np
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types as curve_types_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import rate_curve
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils as market_data_utils
from tf_quant_finance.experimental.pricing_platform.framework.rate_instruments import coupon_specs
from tf_quant_finance.experimental.pricing_platform.instrument_protos import period_pb2
from tf_quant_finance.math import pad
_CurveType = curve_types_lib.CurveType
class FixedCashflowStream:
"""Represents a batch of fixed stream of cashflows."""
def __init__(self,
coupon_spec: coupon_specs.FixedCouponSpecs,
discount_curve_type: Union[_CurveType, List[_CurveType]],
start_date: types.DateTensor = None,
end_date: types.DateTensor = None,
discount_curve_mask: types.IntTensor = None,
first_coupon_date: Optional[types.DateTensor] = None,
penultimate_coupon_date: Optional[types.DateTensor] = None,
schedule_fn: Optional[Callable[..., Any]] = None,
schedule: Optional[types.DateTensor] = None,
dtype: Optional[types.Dtype] = None,
name: Optional[str] = None):
"""Initializes a batch of fixed cashflow streams.
Args:
coupon_spec: An instance of `FixedCouponSpecs` specifying the
details of the coupon payment for the cashflow stream.
discount_curve_type: An instance of `CurveType` or a list of those.
If supplied as a list and `discount_curve_mask` is not supplied,
the size of the list should be the same as the number of priced
instruments. Defines discount curves for the instruments.
start_date: A `DateTensor` of `batch_shape` specifying the starting dates
of the accrual of the first coupon of the cashflow stream. The shape of
the input correspond to the number of streams being created.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Either this of `schedule` should be supplied
Default value: `None`
end_date: A `DateTensor` of `batch_shape`specifying the end dates for
accrual of the last coupon in each cashflow stream. The shape of the
input should be the same as that of `start_date`.
Either this of `schedule` should be supplied
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: `None`
discount_curve_mask: An optional integer `Tensor` of values ranging from
`0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.
Identifies a mapping between `discount_curve_type` list and the
underlying instruments.
Default value: `None`.
first_coupon_date: An optional `DateTensor` specifying the payment dates
of the first coupon of the cashflow stream. Use this input for cashflows
with irregular first coupon. Should be of the same shape as
`start_date`.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: None which implies regular first coupon.
penultimate_coupon_date: An optional `DateTensor` specifying the payment
dates of the penultimate (next to last) coupon of the cashflow
stream. Use this input for cashflows with irregular last coupon.
Should be of the same shape as `end_date`.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: None which implies regular last coupon.
schedule_fn: A callable that accepts `start_date`, `end_date`,
`coupon_frequency`, `settlement_days`, `first_coupon_date`, and
`penultimate_coupon_date` as `Tensor`s and returns coupon payment
days.
Default value: `None`.
schedule: A `DateTensor` of coupon payment dates including the start and
end dates of the cashflows.
Default value: `None`.
dtype: `tf.Dtype` of the input and output real `Tensor`s.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: Python str. The name to give to the ops created by this class.
Default value: `None` which maps to 'fixed_cashflow_stream'.
"""
self._name = name or "fixed_cashflow_stream"
with tf.name_scope(self._name):
curve_list = to_list(discount_curve_type)
[
self._discount_curve_type,
self._mask
] = process_curve_types(curve_list, discount_curve_mask)
if schedule is None:
if (start_date is None) or (end_date is None):
raise ValueError("If `schedule` is not supplied both "
"`start_date` and `end_date` should be supplied")
if isinstance(start_date, tf.Tensor):
self._start_date = dateslib.dates_from_tensor(
start_date)
else:
self._start_date = dateslib.convert_to_date_tensor(
start_date)
if isinstance(start_date, tf.Tensor):
self._end_date = dateslib.dates_from_tensor(
end_date)
else:
self._end_date = dateslib.convert_to_date_tensor(
end_date)
self._first_coupon_date = first_coupon_date
self._penultimate_coupon_date = penultimate_coupon_date
if self._first_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._first_coupon_date = dateslib.dates_from_tensor(
first_coupon_date)
else:
self._first_coupon_date = dateslib.convert_to_date_tensor(
first_coupon_date)
if self._penultimate_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._penultimate_coupon_date = dateslib.dates_from_tensor(
penultimate_coupon_date)
else:
self._penultimate_coupon_date = dateslib.convert_to_date_tensor(
penultimate_coupon_date)
# Update coupon frequency
coupon_frequency = _get_attr(coupon_spec, "coupon_frequency")
if isinstance(coupon_frequency, period_pb2.Period):
coupon_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, (list, tuple)):
coupon_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, dict):
coupon_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "coupon_frequency"))
businessday_rule = coupon_spec.businessday_rule
# Business day roll convention and the end of month flag
roll_convention, eom = market_data_utils.get_business_day_convention(
businessday_rule)
notional = tf.convert_to_tensor(
_get_attr(coupon_spec, "notional_amount"),
dtype=dtype,
name="notional")
self._dtype = dtype or notional.dtype
fixed_rate = tf.convert_to_tensor(_get_attr(coupon_spec, "fixed_rate"),
dtype=self._dtype,
name="fixed_rate")
daycount_fn = market_data_utils.get_daycount_fn(
_get_attr(coupon_spec, "daycount_convention"), self._dtype)
self._settlement_days = tf.convert_to_tensor(
_get_attr(coupon_spec, "settlement_days"),
dtype=tf.int32,
name="settlement_days")
if schedule is not None:
if isinstance(schedule, tf.Tensor):
coupon_dates = dateslib.dates_from_tensor(schedule)
else:
coupon_dates = dateslib.convert_to_date_tensor(schedule)
# Extract starting date for the cashflow
self._start_date = coupon_dates[..., 0]
elif schedule_fn is None:
# TODO(b/160446193): Calendar is ignored and weekends only is used
calendar = dateslib.create_holiday_calendar(
weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)
self._calendar = calendar
coupon_dates = _generate_schedule(
start_date=self._start_date,
end_date=self._end_date,
coupon_frequency=coupon_frequency,
roll_convention=roll_convention,
calendar=calendar,
settlement_days=self._settlement_days,
end_of_month=eom,
first_coupon_date=self._first_coupon_date,
penultimate_coupon_date=self._penultimate_coupon_date)
# Extract starting date for the cashflow
self._start_date = coupon_dates[..., 0]
else:
if first_coupon_date is not None:
first_coupon_date = self._first_coupon_date.to_tensor()
if penultimate_coupon_date is not None:
penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()
coupon_dates = schedule_fn(
start_date=self._start_date.to_tensor(),
end_date=self._end_date.to_tensor(),
coupon_frequency=coupon_frequency.quantity(),
settlement_days=self._settlement_days,
first_coupon_date=first_coupon_date,
penultimate_coupon_date=penultimate_coupon_date)
# Convert to DateTensor if the result comes from a tf.function
coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)
self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]
payment_dates = coupon_dates[..., 1:]
daycount_fractions = daycount_fn(
start_date=coupon_dates[..., :-1],
end_date=coupon_dates[..., 1:])
coupon_rate = tf.expand_dims(fixed_rate, axis=-1)
self._num_cashflows = tf.shape(payment_dates.ordinal())[-1]
self._payment_dates = payment_dates
self._notional = notional
self._daycount_fractions = daycount_fractions
self._coupon_rate = coupon_rate
self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)
self._daycount_fn = daycount_fn
def daycount_fn(self) -> Callable[..., Any]:
return self._daycount_fn
@property
def daycount_fractions(self) -> types.FloatTensor:
return self._daycount_fractions
@property
def fixed_rate(self) -> types.FloatTensor:
return self._fixed_rate
@property
def notional(self) -> types.FloatTensor:
return self._notional
@property
def discount_curve_type(self) -> _CurveType:
return self._discount_curve_type
@property
def batch_shape(self) -> types.StringTensor:
return self._batch_shape
@property
def cashflow_dates(self) -> types.DateTensor:
return self._payment_dates
def cashflows(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
"""Returns cashflows for the fixed leg.
Args:
market: An instance of `ProcessedMarketData`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'cashflows'.
Returns:
A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and
containing the dates and the corresponding cashflows price for each
stream based on the input market data.
"""
name = name or (self._name + "_cashflows")
with tf.name_scope(name):
valuation_date = dateslib.convert_to_date_tensor(market.date)
future_cashflows = tf.cast(self._payment_dates >= valuation_date,
dtype=self._dtype)
# self._notional is of shape [batch_shape], so broadcasting is needed
notional = tf.expand_dims(self._notional, axis=-1)
# Cashflow present values.
cashflows = notional * (
future_cashflows * self._daycount_fractions * self._coupon_rate)
return self._payment_dates, cashflows
def price(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None):
"""Returns the present value of the stream on the valuation date.
Args:
market: An instance of `ProcessedMarketData`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A `Tensor` of shape `batch_shape` containing the modeled price of each
stream based on the input market data.
"""
name = name or (self._name + "_price")
with tf.name_scope(name):
discount_curve = get_discount_curve(
self._discount_curve_type, market, self._mask)
discount_factors = discount_curve.discount_factor(
self._payment_dates)
_, cashflows = self.cashflows(market)
# Cashflow present values
cashflow_pvs = (cashflows * discount_factors)
return tf.math.reduce_sum(cashflow_pvs, axis=1)
class FloatingCashflowStream:
"""Represents a batch of cashflows indexed to a floating rate."""
def __init__(self,
coupon_spec: coupon_specs.FloatCouponSpecs,
discount_curve_type: Union[_CurveType, List[_CurveType]],
start_date: types.DateTensor = None,
end_date: types.DateTensor = None,
discount_curve_mask: types.IntTensor = None,
rate_index_curves: Union[
curve_types_lib.RateIndexCurve,
List[curve_types_lib.RateIndexCurve]] = None,
reference_mask: types.IntTensor = None,
first_coupon_date: Optional[types.DateTensor] = None,
penultimate_coupon_date: Optional[types.DateTensor] = None,
schedule_fn: Optional[Callable[..., Any]] = None,
schedule: Optional[types.DateTensor] = None,
past_fixing: Optional[types.FloatTensor] = None,
dtype: Optional[types.Dtype] = None,
name: Optional[str] = None):
"""Initializes a batch of floating cashflow streams.
Args:
coupon_spec: An instance of `FloatCouponSpecs` specifying the
details of the coupon payment for the cashflow stream.
discount_curve_type: An instance of `CurveType` or a list of those.
If supplied as a list and `discount_curve_mask` is not supplied,
the size of the list should be the same as the number of priced
instruments. Defines discount curves for the instruments.
start_date: A `DateTensor` of `batch_shape` specifying the starting dates
of the accrual of the first coupon of the cashflow stream. The shape of
the input correspond to the number of streams being created.
Either this of `schedule` should be supplied.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: `None`
end_date: A `DateTensor` of `batch_shape`specifying the end dates for
accrual of the last coupon in each cashflow stream. The shape of the
input should be the same as that of `start_date`.
Either this of `schedule` should be supplied.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: `None`
discount_curve_mask: An optional integer `Tensor` of values ranging from
`0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.
Identifies a mapping between `discount_curve_type` list and the
underlying instruments.
Default value: `None`.
rate_index_curves: An instance of `RateIndexCurve` or a list of those.
If supplied as a list and `reference_mask` is not supplid,
the size of the list should be the same as the number of priced
instruments. Defines the index curves for each instrument. If not
supplied, `coupon_spec.floating_rate_type` is used to identify the
curves.
Default value: `None`.
reference_mask: An optional integer `Tensor` of values ranging from
`0` to `len(rate_index_curves) - 1` and of shape `batch_shape`.
Identifies a mapping between `rate_index_curves` list and the underlying
instruments.
Default value: `None`.
first_coupon_date: An optional `DateTensor` specifying the payment dates
of the first coupon of the cashflow stream. Use this input for cashflows
with irregular first coupon. Should be of the same shape as
`start_date`.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: None which implies regular first coupon.
penultimate_coupon_date: An optional `DateTensor` specifying the payment
dates of the penultimate (next to last) coupon of the cashflow
stream. Use this input for cashflows with irregular last coupon.
Should be of the same shape as `end_date`.
When passed as an integet `Tensor`, should be of shape
`batch_shape + [3]` and contain `[year, month, day]` for each date.
Default value: None which implies regular last coupon.
schedule_fn: A callable that accepts `start_date`, `end_date`,
`coupon_frequency`, `settlement_days`, `first_coupon_date`, and
`penultimate_coupon_date` as `Tensor`s and returns coupon payment
days.
Default value: `None`.
schedule: A `DateTensor` of coupon payment dates including the start and
end dates of the cashflows.
Default value: `None`.
past_fixing: An optional `Tensor` of shape compatible with
`batch_shape + [1]`. Represents the fixings for the cashflows as
observed at `market.date`.
dtype: `tf.Dtype` of the input and output real `Tensor`s.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: Python str. The name to give to the ops created by this class.
Default value: `None` which maps to 'floating_cashflow_stream'.
"""
self._name = name or "floating_cashflow_stream"
with tf.name_scope(self._name):
curve_list = to_list(discount_curve_type)
[
self._discount_curve_type,
self._mask
] = process_curve_types(curve_list, discount_curve_mask)
self._first_coupon_date = None
self._penultimate_coupon_date = None
if schedule is None:
if (start_date is None) or (end_date is None):
raise ValueError("If `schedule` is not supplied both "
"`start_date` and `end_date` should be supplied")
if schedule is None:
if isinstance(start_date, tf.Tensor):
self._start_date = dateslib.dates_from_tensor(
start_date)
else:
self._start_date = dateslib.convert_to_date_tensor(
start_date)
if isinstance(start_date, tf.Tensor):
self._end_date = dateslib.dates_from_tensor(
end_date)
else:
self._end_date = dateslib.convert_to_date_tensor(
end_date)
self._first_coupon_date = first_coupon_date
self._penultimate_coupon_date = penultimate_coupon_date
if self._first_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._first_coupon_date = dateslib.dates_from_tensor(
first_coupon_date)
else:
self._first_coupon_date = dateslib.convert_to_date_tensor(
first_coupon_date)
if self._penultimate_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._penultimate_coupon_date = dateslib.dates_from_tensor(
penultimate_coupon_date)
else:
self._penultimate_coupon_date = dateslib.convert_to_date_tensor(
penultimate_coupon_date)
# Convert coupon and reset frequencies to PeriodTensor
coupon_frequency = _get_attr(coupon_spec, "coupon_frequency")
# Update coupon frequency
if isinstance(coupon_frequency, period_pb2.Period):
coupon_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, (list, tuple)):
coupon_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, dict):
coupon_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "coupon_frequency"))
# Update reset frequency
reset_frequency = _get_attr(coupon_spec, "reset_frequency")
if isinstance(reset_frequency, period_pb2.Period):
reset_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "reset_frequency"))
if isinstance(reset_frequency, (list, tuple)):
reset_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "reset_frequency"))
if isinstance(reset_frequency, dict):
reset_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "reset_frequency"))
self._reset_frequency = reset_frequency
businessday_rule = _get_attr(coupon_spec, "businessday_rule")
roll_convention, eom = market_data_utils.get_business_day_convention(
businessday_rule)
notional = tf.convert_to_tensor(
_get_attr(coupon_spec, "notional_amount"),
dtype=dtype,
name="notional")
self._dtype = dtype or notional.dtype
daycount_convention = _get_attr(coupon_spec, "daycount_convention")
daycount_fn = market_data_utils.get_daycount_fn(
_get_attr(coupon_spec, "daycount_convention"), self._dtype)
self._daycount_convention = daycount_convention
self._settlement_days = tf.convert_to_tensor(
_get_attr(coupon_spec, "settlement_days"),
dtype=tf.int32,
name="settlement_days")
spread = tf.convert_to_tensor(_get_attr(coupon_spec, "spread"),
dtype=self._dtype,
name="spread")
if schedule is not None:
if isinstance(schedule, tf.Tensor):
coupon_dates = dateslib.dates_from_tensor(schedule)
else:
coupon_dates = dateslib.convert_to_date_tensor(schedule)
# Extract starting date for the cashflow
self._start_date = coupon_dates[..., 0]
elif schedule_fn is None:
# TODO(b/160446193): Calendar is ignored and weekends only is used
calendar = dateslib.create_holiday_calendar(
weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)
self._calendar = calendar
coupon_dates = _generate_schedule(
start_date=self._start_date,
end_date=self._end_date,
coupon_frequency=coupon_frequency,
roll_convention=roll_convention,
calendar=calendar,
settlement_days=self._settlement_days,
end_of_month=eom,
first_coupon_date=self._first_coupon_date,
penultimate_coupon_date=self._penultimate_coupon_date)
# Extract starting date for the cashflow
self._start_date = coupon_dates[..., 0]
else:
if first_coupon_date is not None:
first_coupon_date = self._first_coupon_date.to_tensor()
if penultimate_coupon_date is not None:
penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()
coupon_dates = schedule_fn(
start_date=self._start_date.to_tensor(),
end_date=self._end_date.to_tensor(),
coupon_frequency=coupon_frequency.quantity(),
settlement_days=self._settlement_days,
first_coupon_date=first_coupon_date,
penultimate_coupon_date=penultimate_coupon_date)
# Convert to DateTensor if the result comes from a tf.function
coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)
# Extract batch shape
self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]
accrual_start_dates = coupon_dates[..., :-1]
coupon_start_dates = coupon_dates[..., :-1]
coupon_end_dates = coupon_dates[..., 1:]
accrual_end_dates = accrual_start_dates + reset_frequency.expand_dims(
axis=-1)
# Adjust for irregular coupons
accrual_end_dates = dateslib.DateTensor.concat(
[coupon_end_dates[..., :1],
accrual_end_dates[..., 1:-1],
coupon_end_dates[..., -1:]], axis=-1)
daycount_fractions = daycount_fn(
start_date=coupon_start_dates,
end_date=coupon_end_dates)
self._num_cashflows = tf.shape(daycount_fractions)[-1]
self._coupon_start_dates = coupon_start_dates
self._coupon_end_dates = coupon_end_dates
self._accrual_start_date = accrual_start_dates
self._accrual_end_date = accrual_end_dates
self._notional = notional
self._daycount_fractions = daycount_fractions
self._spread = spread
self._currency = _get_attr(coupon_spec, "currency")
self._daycount_fn = daycount_fn
# Construct the reference curve object
# Extract all rate_curves
self._floating_rate_type = to_list(
_get_attr(coupon_spec, "floating_rate_type"))
self._currency = to_list(self._currency)
if rate_index_curves is None:
rate_index_curves = []
for currency, floating_rate_type in zip(self._currency,
self._floating_rate_type):
rate_index_curves.append(curve_types_lib.RateIndexCurve(
currency=currency, index=floating_rate_type))
[
self._reference_curve_type,
self._reference_mask
] = process_curve_types(rate_index_curves, reference_mask)
self._past_fixing = past_fixing
def daycount_fn(self) -> Callable[..., Any]:
return self._daycount_fn
@property
def notional(self) -> types.FloatTensor:
return self._notional
@property
def discount_curve_type(self) -> _CurveType:
return self._discount_curve_type
@property
def reference_curve_type(self) -> _CurveType:
return self._reference_curve_type
@property
def batch_shape(self) -> types.StringTensor:
return self._batch_shape
@property
def daycount_fractions(self) -> types.FloatTensor:
return self._daycount_fractions
@property
def cashflow_dates(self) -> types.DateTensor:
return self._coupon_end_dates
@property
def coupon_start_dates(self) -> types.DateTensor:
return self._coupon_start_dates
@property
def coupon_end_dates(self) -> types.DateTensor:
return self._coupon_end_dates
def forward_rates(self,
market: pmd.ProcessedMarketData,
past_fixing: Optional[types.FloatTensor] = None,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
"""Returns forward rates for the floating leg.
Args:
market: An instance of `ProcessedMarketData`.
past_fixing: An optional `Tensor` of shape compatible with
`batch_shape + [1]`. Represents the fixings for the cashflows as
observed at `market.date`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'forward_rates'.
Returns:
A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]`
containing the dates and the corresponding forward rates for each stream
based on the input market data.
"""
name = name or (self._name + "_forward_rates")
with tf.name_scope(name):
reference_curve = get_discount_curve(
self._reference_curve_type, market, self._reference_mask)
valuation_date = dateslib.convert_to_date_tensor(market.date)
# Previous fixing date
coupon_start_date_ord = self._coupon_start_dates.ordinal()
coupon_end_date_ord = self._coupon_end_dates.ordinal()
valuation_date_ord = valuation_date.ordinal()
batch_shape = tf.shape(coupon_start_date_ord)[:-1]
# Broadcast valuation date batch shape for tf.searchsorted
valuation_date_ord += tf.expand_dims(
tf.zeros(batch_shape, dtype=tf.int32), axis=-1)
ind = tf.maximum(tf.searchsorted(coupon_start_date_ord,
valuation_date_ord) - 1, 0)
# Fixings are assumed to be the same as coupon start dates
# TODO(b/177047910): add fixing settlement dates.
# Shape `batch_shape + [1]`
fixing_dates_ord = tf.gather(
coupon_start_date_ord, ind,
batch_dims=len(coupon_start_date_ord.shape) - 1)
fixing_end_dates_ord = tf.gather(
coupon_end_date_ord, ind,
batch_dims=len(coupon_start_date_ord.shape) - 1)
fixing_dates = dateslib.dates_from_ordinals(fixing_dates_ord)
fixing_end_dates = dateslib.dates_from_ordinals(fixing_end_dates_ord)
# Get fixings. Shape batch_shape + [1]
if past_fixing is None:
past_fixing = _get_fixings(
fixing_dates,
fixing_end_dates,
self._reference_curve_type,
self._reference_mask,
market)
else:
past_fixing = tf.convert_to_tensor(past_fixing, dtype=self._dtype,
name="past_fixing")
forward_rates = reference_curve.forward_rate(
self._accrual_start_date,
self._accrual_end_date,
day_count_fraction=self._daycount_fractions)
# Shape batch_shape + [num_cashflows]
forward_rates = tf.where(self._daycount_fractions > 0., forward_rates,
tf.zeros_like(forward_rates))
# If coupon end date is before the valuation date, the payment is in the
# past. If valuation date is between coupon start date and coupon end
# date, then the rate has been fixed but not paid. Otherwise the rate is
# not fixed and should be read from the curve.
# Shape batch_shape + [num_cashflows]
forward_rates = tf.where(
self._coupon_end_dates < valuation_date,
tf.constant(0, dtype=self._dtype),
tf.where(self._coupon_start_dates >= valuation_date,
forward_rates, past_fixing))
return self._coupon_end_dates, forward_rates
def cashflows(self,
market: pmd.ProcessedMarketData,
past_fixing: Optional[types.FloatTensor] = None,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
"""Returns cashflows for the floating leg.
Args:
market: An instance of `ProcessedMarketData`.
past_fixing: An optional `Tensor` of shape compatible with
`batch_shape + [1]`. Represents the fixings for the cashflows as
observed at `market.date`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'cashflows'.
Returns:
A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and
containing the dates and the corresponding cashflows price for each
stream based on the input market data.
"""
name = name or (self._name + "_cashflows")
with tf.name_scope(name):
_, forward_rates = self.forward_rates(market, past_fixing=past_fixing)
coupon_rate = forward_rates + tf.expand_dims(
self._spread, axis=-1)
# self._notion is of shape [batch_shape], so broadcasting is needed
notional = tf.expand_dims(self._notional, axis=-1)
cashflows = notional * (
self._daycount_fractions * coupon_rate)
return self._coupon_end_dates, cashflows
def price(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None) -> types.FloatTensor:
"""Returns the present value of the stream on the valuation date.
Args:
market: An instance of `ProcessedMarketData`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A `Tensor` of shape `batch_shape` containing the modeled price of each
stream based on the input market data.
"""
name = name or (self._name + "_price")
with tf.name_scope(name):
discount_curve = get_discount_curve(
self._discount_curve_type, market, self._mask)
discount_factors = discount_curve.discount_factor(self._coupon_end_dates)
_, cashflows = self.cashflows(market, past_fixing=self._past_fixing)
# Cashflows present values
cashflow_pvs = cashflows * discount_factors
return tf.math.reduce_sum(cashflow_pvs, axis=1)
def _generate_schedule(
start_date: dateslib.DateTensor,
end_date: dateslib.DateTensor,
coupon_frequency: dateslib.PeriodTensor,
calendar: dateslib.HolidayCalendar,
roll_convention: dateslib.BusinessDayConvention,
settlement_days: tf.Tensor,
end_of_month: bool = False,
first_coupon_date: Optional[dateslib.DateTensor] = None,
penultimate_coupon_date: Optional[dateslib.DateTensor] = None) -> tf.Tensor:
"""Method to generate coupon dates.
Args:
start_date: Starting dates of schedule.
end_date: End dates of the schedule.
coupon_frequency: A `PeriodTensor` specifying the frequency of coupon
payments.
calendar: calendar: An instance of `BankHolidays`.
roll_convention: Business day roll convention of the schedule.
settlement_days: An integer `Tensor` with the shape compatible with
`start_date` and `end_date` specifying the number of settlement days.
end_of_month: Python `bool`. If `True`, shifts all dates in schedule to
the ends of corresponding months, if `start_date` or `end_date` (
depending on `backward`) is at the end of a month. The shift is applied
before applying `roll_convention`.
first_coupon_date: First day of the irregular coupon, if any.
penultimate_coupon_date: Penultimate day of the coupon, if any.
Returns:
A `DateTensor` containing the generated date schedule of shape
`batch_shape + [max_num_coupon_days]`, where `max_num_coupon_days` is the
number of coupon days for the longest living swap in the batch. The coupon
days for the rest of the swaps are padded with their final coupon day.
"""
if first_coupon_date is not None and penultimate_coupon_date is not None:
raise ValueError("Only first or last coupon dates can be specified "
" for an irregular coupon.")
start_date = first_coupon_date or start_date
# Adjust with settlement days
start_date = calendar.add_business_days(
start_date, settlement_days,
roll_convention=roll_convention)
if penultimate_coupon_date is None:
backward = False
else:
backward = True
end_date = end_date or penultimate_coupon_date
# Adjust with settlement days
end_date = calendar.add_business_days(
end_date, settlement_days,
roll_convention=roll_convention)
coupon_dates = dateslib.PeriodicSchedule(
start_date=start_date,
end_date=end_date,
tenor=coupon_frequency,
roll_convention=roll_convention,
backward=backward,
end_of_month=end_of_month).dates()
# Add the regular coupons
coupon_dates = dateslib.DateTensor.concat(
[start_date.expand_dims(-1),
coupon_dates,
end_date.expand_dims(-1)], axis=-1)
return coupon_dates
def get_discount_curve(
discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
market: pmd.ProcessedMarketData,
mask: List[int]) -> rate_curve.RateCurve:
"""Builds a batched discount curve.
Given a list of discount curve an integer mask, creates a discount curve
object to compute discount factors against the list of discount curves.
#### Example
```none
curve_types = [RiskFreeCurve("USD"), RiskFreeCurve("AUD")]
# A mask to price a batch of 7 instruments with the corresponding discount
# curves ["USD", "AUD", "AUD", "AUD" "USD", "USD", "AUD"].
mask = [0, 1, 1, 1, 0, 0, 1]
market = MarketDataDict(...)
get_discount_curve(curve_types, market, mask)
# Returns a RateCurve object that can compute a discount factors for a
# batch of 7 dates.
```
Args:
discount_curve_types: A list of curve types.
market: an instance of the processed market data.
mask: An integer mask.
Returns:
An instance of `RateCurve`.
"""
discount_curves = [market.yield_curve(curve_type)
for curve_type in discount_curve_types]
discounts = []
dates = []
interpolation_method = None
interpolate_rates = None
for curve in discount_curves:
discount, date = curve.discount_factors_and_dates()
discounts.append(discount)
dates.append(date)
interpolation_method = curve.interpolation_method
interpolate_rates = curve.interpolate_rates
all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)
all_dates = pad.pad_date_tensors(dates)
all_dates = dateslib.DateTensor.stack(dates, axis=0)
prepare_discounts = tf.gather(all_discounts, mask)
prepare_dates = dateslib.dates_from_ordinals(
tf.gather(all_dates.ordinal(), mask))
# All curves are assumed to have the same interpolation method
# TODO(b/168411153): Extend to the case with multiple curve configs.
discount_curve = rate_curve.RateCurve(
prepare_dates, prepare_discounts, market.date,
interpolator=interpolation_method,
interpolate_rates=interpolate_rates)
return discount_curve
def _get_fixings(start_dates,
end_dates,
reference_curve_types,
reference_mask,
market):
"""Computes fixings for a list of reference curves."""
num_curves = len(reference_curve_types)
if num_curves > 1:
# For each curve get corresponding cashflow indices
split_indices = [tf.squeeze(tf.where(tf.equal(reference_mask, i)), -1)
for i in range(num_curves)]
else:
split_indices = [0]
fixings = []
start_dates_ordinal = start_dates.ordinal()
end_dates_ordinal = end_dates.ordinal()
for idx, reference_curve_type in zip(split_indices, reference_curve_types):
if num_curves > 1:
# Get all dates corresponding to the reference curve
start_date = dateslib.dates_from_ordinals(
tf.gather(start_dates_ordinal, idx))
end_date = dateslib.dates_from_ordinals(
tf.gather(end_dates_ordinal, idx))
else:
start_date = start_dates
end_date = end_dates
fixing, fixing_daycount = market.fixings(start_date, reference_curve_type)
if fixing_daycount is not None:
fixing_daycount = market_data_utils.get_daycount_fn(
fixing_daycount, dtype=market.dtype)
year_fraction = fixing_daycount(start_date=start_date, end_date=end_date)
else:
year_fraction = 0.0
fixings.append(
fixing * year_fraction)
fixings = pad.pad_tensors(fixings)
all_indices = tf.concat(split_indices, axis=0)
all_fixings = tf.concat(fixings, axis=0)
if num_curves > 1:
return tf.gather(all_fixings, tf.argsort(all_indices))
else:
return all_fixings
def process_curve_types(
curve_types: List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
mask=None
) -> Tuple[
List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
List[int]]:
"""Extracts unique curves and computes an integer mask.
#### Example
```python
curve_types = [RiskFreeCurve("USD"), RiskFreeCurve("AUD"),
RiskFreeCurve("USD")]
process_curve_types(curve_types)
# Returns [RiskFreeCurve("AUD"), RiskFreeCurve("USD")], [1, 0, 1]
```
Args:
curve_types: A list of either `RiskFreeCurve` or `RateIndexCurve`.
mask: An optional integer mask for the sorted curve type sequence. If
supplied, the function returns does not do anything and returns
`(curve_types, mask)`.
Returns:
A Tuple of `(curve_list, mask)` where `curve_list` is a list of unique
curves in `curve_types` and `mask` is a list of integers which is the
mask for `curve_types`.
"""
def _get_signature(curve):
"""Converts curve infromation to a string."""
if isinstance(curve, curve_types_lib.RiskFreeCurve):
return curve.currency.value
elif isinstance(curve, curve_types_lib.RateIndexCurve):
return (curve.currency.value + "_" + curve.index.type.value
+ "_" + "_".join(curve.index.source)
+ "_" + "_".join(curve.index.name))
else:
raise ValueError(f"{type(curve)} is not supported.")
curve_list = to_list(curve_types)
if mask is not None:
return curve_list, mask
curve_hash = [_get_signature(curve_type) for curve_type in curve_list]
hash_discount_map = {
_get_signature(curve_type): curve_type for curve_type in curve_list}
mask, mask_map, num_unique_discounts = create_mask(curve_hash)
discount_curve_types = [
hash_discount_map[mask_map[i]]
for i in range(num_unique_discounts)]
return discount_curve_types, mask
def create_mask(x):
"""Given a list of object creates integer mask for unique values in the list.
Args:
x: 1-d numpy array.
Returns:
A tuple of three objects:
* A list of integers that is the mask for `x`,
* A dictionary map between entries of `x` and the list
* The number of unique elements.
"""
# For example, create_mask(["USD", "AUD", "USD"]) returns
# a list [1, 0, 1], a map {0: "AUD", 1: "USD"} and the number of unique
# elements which is 2.
# Note that elements of `x` are being sorted
unique = np.unique(x)
num_unique_elems = len(unique)
keys = range(num_unique_elems)
d = dict(zip(unique, keys))
mask_map = dict(zip(keys, unique))
return [d[el] for el in x], mask_map, num_unique_elems
def to_list(x):
"""Converts input to a list if necessary."""
if isinstance(x, (list, tuple)):
return x
else:
return [x]
def _get_attr(obj, key):
if isinstance(obj, dict):
return obj[key]
else:
return obj.__getattribute__(key)
__all__ = ["FixedCashflowStream", "FloatingCashflowStream"]
| 43.058071 | 107 | 0.68135 |
from typing import Optional, Tuple, Callable, Any, List, Union
import numpy as np
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types as curve_types_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import rate_curve
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils as market_data_utils
from tf_quant_finance.experimental.pricing_platform.framework.rate_instruments import coupon_specs
from tf_quant_finance.experimental.pricing_platform.instrument_protos import period_pb2
from tf_quant_finance.math import pad
_CurveType = curve_types_lib.CurveType
class FixedCashflowStream:
def __init__(self,
coupon_spec: coupon_specs.FixedCouponSpecs,
discount_curve_type: Union[_CurveType, List[_CurveType]],
start_date: types.DateTensor = None,
end_date: types.DateTensor = None,
discount_curve_mask: types.IntTensor = None,
first_coupon_date: Optional[types.DateTensor] = None,
penultimate_coupon_date: Optional[types.DateTensor] = None,
schedule_fn: Optional[Callable[..., Any]] = None,
schedule: Optional[types.DateTensor] = None,
dtype: Optional[types.Dtype] = None,
name: Optional[str] = None):
self._name = name or "fixed_cashflow_stream"
with tf.name_scope(self._name):
curve_list = to_list(discount_curve_type)
[
self._discount_curve_type,
self._mask
] = process_curve_types(curve_list, discount_curve_mask)
if schedule is None:
if (start_date is None) or (end_date is None):
raise ValueError("If `schedule` is not supplied both "
"`start_date` and `end_date` should be supplied")
if isinstance(start_date, tf.Tensor):
self._start_date = dateslib.dates_from_tensor(
start_date)
else:
self._start_date = dateslib.convert_to_date_tensor(
start_date)
if isinstance(start_date, tf.Tensor):
self._end_date = dateslib.dates_from_tensor(
end_date)
else:
self._end_date = dateslib.convert_to_date_tensor(
end_date)
self._first_coupon_date = first_coupon_date
self._penultimate_coupon_date = penultimate_coupon_date
if self._first_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._first_coupon_date = dateslib.dates_from_tensor(
first_coupon_date)
else:
self._first_coupon_date = dateslib.convert_to_date_tensor(
first_coupon_date)
if self._penultimate_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._penultimate_coupon_date = dateslib.dates_from_tensor(
penultimate_coupon_date)
else:
self._penultimate_coupon_date = dateslib.convert_to_date_tensor(
penultimate_coupon_date)
coupon_frequency = _get_attr(coupon_spec, "coupon_frequency")
if isinstance(coupon_frequency, period_pb2.Period):
coupon_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, (list, tuple)):
coupon_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, dict):
coupon_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "coupon_frequency"))
businessday_rule = coupon_spec.businessday_rule
roll_convention, eom = market_data_utils.get_business_day_convention(
businessday_rule)
notional = tf.convert_to_tensor(
_get_attr(coupon_spec, "notional_amount"),
dtype=dtype,
name="notional")
self._dtype = dtype or notional.dtype
fixed_rate = tf.convert_to_tensor(_get_attr(coupon_spec, "fixed_rate"),
dtype=self._dtype,
name="fixed_rate")
daycount_fn = market_data_utils.get_daycount_fn(
_get_attr(coupon_spec, "daycount_convention"), self._dtype)
self._settlement_days = tf.convert_to_tensor(
_get_attr(coupon_spec, "settlement_days"),
dtype=tf.int32,
name="settlement_days")
if schedule is not None:
if isinstance(schedule, tf.Tensor):
coupon_dates = dateslib.dates_from_tensor(schedule)
else:
coupon_dates = dateslib.convert_to_date_tensor(schedule)
self._start_date = coupon_dates[..., 0]
elif schedule_fn is None:
calendar = dateslib.create_holiday_calendar(
weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)
self._calendar = calendar
coupon_dates = _generate_schedule(
start_date=self._start_date,
end_date=self._end_date,
coupon_frequency=coupon_frequency,
roll_convention=roll_convention,
calendar=calendar,
settlement_days=self._settlement_days,
end_of_month=eom,
first_coupon_date=self._first_coupon_date,
penultimate_coupon_date=self._penultimate_coupon_date)
self._start_date = coupon_dates[..., 0]
else:
if first_coupon_date is not None:
first_coupon_date = self._first_coupon_date.to_tensor()
if penultimate_coupon_date is not None:
penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()
coupon_dates = schedule_fn(
start_date=self._start_date.to_tensor(),
end_date=self._end_date.to_tensor(),
coupon_frequency=coupon_frequency.quantity(),
settlement_days=self._settlement_days,
first_coupon_date=first_coupon_date,
penultimate_coupon_date=penultimate_coupon_date)
coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)
self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]
payment_dates = coupon_dates[..., 1:]
daycount_fractions = daycount_fn(
start_date=coupon_dates[..., :-1],
end_date=coupon_dates[..., 1:])
coupon_rate = tf.expand_dims(fixed_rate, axis=-1)
self._num_cashflows = tf.shape(payment_dates.ordinal())[-1]
self._payment_dates = payment_dates
self._notional = notional
self._daycount_fractions = daycount_fractions
self._coupon_rate = coupon_rate
self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)
self._daycount_fn = daycount_fn
def daycount_fn(self) -> Callable[..., Any]:
return self._daycount_fn
@property
def daycount_fractions(self) -> types.FloatTensor:
return self._daycount_fractions
@property
def fixed_rate(self) -> types.FloatTensor:
return self._fixed_rate
@property
def notional(self) -> types.FloatTensor:
return self._notional
@property
def discount_curve_type(self) -> _CurveType:
return self._discount_curve_type
@property
def batch_shape(self) -> types.StringTensor:
return self._batch_shape
@property
def cashflow_dates(self) -> types.DateTensor:
return self._payment_dates
def cashflows(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
name = name or (self._name + "_cashflows")
with tf.name_scope(name):
valuation_date = dateslib.convert_to_date_tensor(market.date)
future_cashflows = tf.cast(self._payment_dates >= valuation_date,
dtype=self._dtype)
notional = tf.expand_dims(self._notional, axis=-1)
cashflows = notional * (
future_cashflows * self._daycount_fractions * self._coupon_rate)
return self._payment_dates, cashflows
def price(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None):
name = name or (self._name + "_price")
with tf.name_scope(name):
discount_curve = get_discount_curve(
self._discount_curve_type, market, self._mask)
discount_factors = discount_curve.discount_factor(
self._payment_dates)
_, cashflows = self.cashflows(market)
cashflow_pvs = (cashflows * discount_factors)
return tf.math.reduce_sum(cashflow_pvs, axis=1)
class FloatingCashflowStream:
def __init__(self,
coupon_spec: coupon_specs.FloatCouponSpecs,
discount_curve_type: Union[_CurveType, List[_CurveType]],
start_date: types.DateTensor = None,
end_date: types.DateTensor = None,
discount_curve_mask: types.IntTensor = None,
rate_index_curves: Union[
curve_types_lib.RateIndexCurve,
List[curve_types_lib.RateIndexCurve]] = None,
reference_mask: types.IntTensor = None,
first_coupon_date: Optional[types.DateTensor] = None,
penultimate_coupon_date: Optional[types.DateTensor] = None,
schedule_fn: Optional[Callable[..., Any]] = None,
schedule: Optional[types.DateTensor] = None,
past_fixing: Optional[types.FloatTensor] = None,
dtype: Optional[types.Dtype] = None,
name: Optional[str] = None):
self._name = name or "floating_cashflow_stream"
with tf.name_scope(self._name):
curve_list = to_list(discount_curve_type)
[
self._discount_curve_type,
self._mask
] = process_curve_types(curve_list, discount_curve_mask)
self._first_coupon_date = None
self._penultimate_coupon_date = None
if schedule is None:
if (start_date is None) or (end_date is None):
raise ValueError("If `schedule` is not supplied both "
"`start_date` and `end_date` should be supplied")
if schedule is None:
if isinstance(start_date, tf.Tensor):
self._start_date = dateslib.dates_from_tensor(
start_date)
else:
self._start_date = dateslib.convert_to_date_tensor(
start_date)
if isinstance(start_date, tf.Tensor):
self._end_date = dateslib.dates_from_tensor(
end_date)
else:
self._end_date = dateslib.convert_to_date_tensor(
end_date)
self._first_coupon_date = first_coupon_date
self._penultimate_coupon_date = penultimate_coupon_date
if self._first_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._first_coupon_date = dateslib.dates_from_tensor(
first_coupon_date)
else:
self._first_coupon_date = dateslib.convert_to_date_tensor(
first_coupon_date)
if self._penultimate_coupon_date is not None:
if isinstance(start_date, tf.Tensor):
self._penultimate_coupon_date = dateslib.dates_from_tensor(
penultimate_coupon_date)
else:
self._penultimate_coupon_date = dateslib.convert_to_date_tensor(
penultimate_coupon_date)
coupon_frequency = _get_attr(coupon_spec, "coupon_frequency")
if isinstance(coupon_frequency, period_pb2.Period):
coupon_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, (list, tuple)):
coupon_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "coupon_frequency"))
if isinstance(coupon_frequency, dict):
coupon_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "coupon_frequency"))
reset_frequency = _get_attr(coupon_spec, "reset_frequency")
if isinstance(reset_frequency, period_pb2.Period):
reset_frequency = market_data_utils.get_period(
_get_attr(coupon_spec, "reset_frequency"))
if isinstance(reset_frequency, (list, tuple)):
reset_frequency = market_data_utils.period_from_list(
*_get_attr(coupon_spec, "reset_frequency"))
if isinstance(reset_frequency, dict):
reset_frequency = market_data_utils.period_from_dict(
_get_attr(coupon_spec, "reset_frequency"))
self._reset_frequency = reset_frequency
businessday_rule = _get_attr(coupon_spec, "businessday_rule")
roll_convention, eom = market_data_utils.get_business_day_convention(
businessday_rule)
notional = tf.convert_to_tensor(
_get_attr(coupon_spec, "notional_amount"),
dtype=dtype,
name="notional")
self._dtype = dtype or notional.dtype
daycount_convention = _get_attr(coupon_spec, "daycount_convention")
daycount_fn = market_data_utils.get_daycount_fn(
_get_attr(coupon_spec, "daycount_convention"), self._dtype)
self._daycount_convention = daycount_convention
self._settlement_days = tf.convert_to_tensor(
_get_attr(coupon_spec, "settlement_days"),
dtype=tf.int32,
name="settlement_days")
spread = tf.convert_to_tensor(_get_attr(coupon_spec, "spread"),
dtype=self._dtype,
name="spread")
if schedule is not None:
if isinstance(schedule, tf.Tensor):
coupon_dates = dateslib.dates_from_tensor(schedule)
else:
coupon_dates = dateslib.convert_to_date_tensor(schedule)
self._start_date = coupon_dates[..., 0]
elif schedule_fn is None:
calendar = dateslib.create_holiday_calendar(
weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)
self._calendar = calendar
coupon_dates = _generate_schedule(
start_date=self._start_date,
end_date=self._end_date,
coupon_frequency=coupon_frequency,
roll_convention=roll_convention,
calendar=calendar,
settlement_days=self._settlement_days,
end_of_month=eom,
first_coupon_date=self._first_coupon_date,
penultimate_coupon_date=self._penultimate_coupon_date)
self._start_date = coupon_dates[..., 0]
else:
if first_coupon_date is not None:
first_coupon_date = self._first_coupon_date.to_tensor()
if penultimate_coupon_date is not None:
penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()
coupon_dates = schedule_fn(
start_date=self._start_date.to_tensor(),
end_date=self._end_date.to_tensor(),
coupon_frequency=coupon_frequency.quantity(),
settlement_days=self._settlement_days,
first_coupon_date=first_coupon_date,
penultimate_coupon_date=penultimate_coupon_date)
coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)
self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]
accrual_start_dates = coupon_dates[..., :-1]
coupon_start_dates = coupon_dates[..., :-1]
coupon_end_dates = coupon_dates[..., 1:]
accrual_end_dates = accrual_start_dates + reset_frequency.expand_dims(
axis=-1)
accrual_end_dates = dateslib.DateTensor.concat(
[coupon_end_dates[..., :1],
accrual_end_dates[..., 1:-1],
coupon_end_dates[..., -1:]], axis=-1)
daycount_fractions = daycount_fn(
start_date=coupon_start_dates,
end_date=coupon_end_dates)
self._num_cashflows = tf.shape(daycount_fractions)[-1]
self._coupon_start_dates = coupon_start_dates
self._coupon_end_dates = coupon_end_dates
self._accrual_start_date = accrual_start_dates
self._accrual_end_date = accrual_end_dates
self._notional = notional
self._daycount_fractions = daycount_fractions
self._spread = spread
self._currency = _get_attr(coupon_spec, "currency")
self._daycount_fn = daycount_fn
self._floating_rate_type = to_list(
_get_attr(coupon_spec, "floating_rate_type"))
self._currency = to_list(self._currency)
if rate_index_curves is None:
rate_index_curves = []
for currency, floating_rate_type in zip(self._currency,
self._floating_rate_type):
rate_index_curves.append(curve_types_lib.RateIndexCurve(
currency=currency, index=floating_rate_type))
[
self._reference_curve_type,
self._reference_mask
] = process_curve_types(rate_index_curves, reference_mask)
self._past_fixing = past_fixing
def daycount_fn(self) -> Callable[..., Any]:
return self._daycount_fn
@property
def notional(self) -> types.FloatTensor:
return self._notional
@property
def discount_curve_type(self) -> _CurveType:
return self._discount_curve_type
@property
def reference_curve_type(self) -> _CurveType:
return self._reference_curve_type
@property
def batch_shape(self) -> types.StringTensor:
return self._batch_shape
@property
def daycount_fractions(self) -> types.FloatTensor:
return self._daycount_fractions
@property
def cashflow_dates(self) -> types.DateTensor:
return self._coupon_end_dates
@property
def coupon_start_dates(self) -> types.DateTensor:
return self._coupon_start_dates
@property
def coupon_end_dates(self) -> types.DateTensor:
return self._coupon_end_dates
def forward_rates(self,
market: pmd.ProcessedMarketData,
past_fixing: Optional[types.FloatTensor] = None,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
name = name or (self._name + "_forward_rates")
with tf.name_scope(name):
reference_curve = get_discount_curve(
self._reference_curve_type, market, self._reference_mask)
valuation_date = dateslib.convert_to_date_tensor(market.date)
coupon_start_date_ord = self._coupon_start_dates.ordinal()
coupon_end_date_ord = self._coupon_end_dates.ordinal()
valuation_date_ord = valuation_date.ordinal()
batch_shape = tf.shape(coupon_start_date_ord)[:-1]
valuation_date_ord += tf.expand_dims(
tf.zeros(batch_shape, dtype=tf.int32), axis=-1)
ind = tf.maximum(tf.searchsorted(coupon_start_date_ord,
valuation_date_ord) - 1, 0)
fixing_dates_ord = tf.gather(
coupon_start_date_ord, ind,
batch_dims=len(coupon_start_date_ord.shape) - 1)
fixing_end_dates_ord = tf.gather(
coupon_end_date_ord, ind,
batch_dims=len(coupon_start_date_ord.shape) - 1)
fixing_dates = dateslib.dates_from_ordinals(fixing_dates_ord)
fixing_end_dates = dateslib.dates_from_ordinals(fixing_end_dates_ord)
if past_fixing is None:
past_fixing = _get_fixings(
fixing_dates,
fixing_end_dates,
self._reference_curve_type,
self._reference_mask,
market)
else:
past_fixing = tf.convert_to_tensor(past_fixing, dtype=self._dtype,
name="past_fixing")
forward_rates = reference_curve.forward_rate(
self._accrual_start_date,
self._accrual_end_date,
day_count_fraction=self._daycount_fractions)
forward_rates = tf.where(self._daycount_fractions > 0., forward_rates,
tf.zeros_like(forward_rates))
forward_rates = tf.where(
self._coupon_end_dates < valuation_date,
tf.constant(0, dtype=self._dtype),
tf.where(self._coupon_start_dates >= valuation_date,
forward_rates, past_fixing))
return self._coupon_end_dates, forward_rates
def cashflows(self,
market: pmd.ProcessedMarketData,
past_fixing: Optional[types.FloatTensor] = None,
name: Optional[str] = None
) -> Tuple[types.DateTensor, types.FloatTensor]:
name = name or (self._name + "_cashflows")
with tf.name_scope(name):
_, forward_rates = self.forward_rates(market, past_fixing=past_fixing)
coupon_rate = forward_rates + tf.expand_dims(
self._spread, axis=-1)
notional = tf.expand_dims(self._notional, axis=-1)
cashflows = notional * (
self._daycount_fractions * coupon_rate)
return self._coupon_end_dates, cashflows
def price(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None) -> types.FloatTensor:
name = name or (self._name + "_price")
with tf.name_scope(name):
discount_curve = get_discount_curve(
self._discount_curve_type, market, self._mask)
discount_factors = discount_curve.discount_factor(self._coupon_end_dates)
_, cashflows = self.cashflows(market, past_fixing=self._past_fixing)
cashflow_pvs = cashflows * discount_factors
return tf.math.reduce_sum(cashflow_pvs, axis=1)
def _generate_schedule(
start_date: dateslib.DateTensor,
end_date: dateslib.DateTensor,
coupon_frequency: dateslib.PeriodTensor,
calendar: dateslib.HolidayCalendar,
roll_convention: dateslib.BusinessDayConvention,
settlement_days: tf.Tensor,
end_of_month: bool = False,
first_coupon_date: Optional[dateslib.DateTensor] = None,
penultimate_coupon_date: Optional[dateslib.DateTensor] = None) -> tf.Tensor:
if first_coupon_date is not None and penultimate_coupon_date is not None:
raise ValueError("Only first or last coupon dates can be specified "
" for an irregular coupon.")
start_date = first_coupon_date or start_date
start_date = calendar.add_business_days(
start_date, settlement_days,
roll_convention=roll_convention)
if penultimate_coupon_date is None:
backward = False
else:
backward = True
end_date = end_date or penultimate_coupon_date
end_date = calendar.add_business_days(
end_date, settlement_days,
roll_convention=roll_convention)
coupon_dates = dateslib.PeriodicSchedule(
start_date=start_date,
end_date=end_date,
tenor=coupon_frequency,
roll_convention=roll_convention,
backward=backward,
end_of_month=end_of_month).dates()
coupon_dates = dateslib.DateTensor.concat(
[start_date.expand_dims(-1),
coupon_dates,
end_date.expand_dims(-1)], axis=-1)
return coupon_dates
def get_discount_curve(
discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
market: pmd.ProcessedMarketData,
mask: List[int]) -> rate_curve.RateCurve:
discount_curves = [market.yield_curve(curve_type)
for curve_type in discount_curve_types]
discounts = []
dates = []
interpolation_method = None
interpolate_rates = None
for curve in discount_curves:
discount, date = curve.discount_factors_and_dates()
discounts.append(discount)
dates.append(date)
interpolation_method = curve.interpolation_method
interpolate_rates = curve.interpolate_rates
all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)
all_dates = pad.pad_date_tensors(dates)
all_dates = dateslib.DateTensor.stack(dates, axis=0)
prepare_discounts = tf.gather(all_discounts, mask)
prepare_dates = dateslib.dates_from_ordinals(
tf.gather(all_dates.ordinal(), mask))
discount_curve = rate_curve.RateCurve(
prepare_dates, prepare_discounts, market.date,
interpolator=interpolation_method,
interpolate_rates=interpolate_rates)
return discount_curve
def _get_fixings(start_dates,
end_dates,
reference_curve_types,
reference_mask,
market):
num_curves = len(reference_curve_types)
if num_curves > 1:
split_indices = [tf.squeeze(tf.where(tf.equal(reference_mask, i)), -1)
for i in range(num_curves)]
else:
split_indices = [0]
fixings = []
start_dates_ordinal = start_dates.ordinal()
end_dates_ordinal = end_dates.ordinal()
for idx, reference_curve_type in zip(split_indices, reference_curve_types):
if num_curves > 1:
start_date = dateslib.dates_from_ordinals(
tf.gather(start_dates_ordinal, idx))
end_date = dateslib.dates_from_ordinals(
tf.gather(end_dates_ordinal, idx))
else:
start_date = start_dates
end_date = end_dates
fixing, fixing_daycount = market.fixings(start_date, reference_curve_type)
if fixing_daycount is not None:
fixing_daycount = market_data_utils.get_daycount_fn(
fixing_daycount, dtype=market.dtype)
year_fraction = fixing_daycount(start_date=start_date, end_date=end_date)
else:
year_fraction = 0.0
fixings.append(
fixing * year_fraction)
fixings = pad.pad_tensors(fixings)
all_indices = tf.concat(split_indices, axis=0)
all_fixings = tf.concat(fixings, axis=0)
if num_curves > 1:
return tf.gather(all_fixings, tf.argsort(all_indices))
else:
return all_fixings
def process_curve_types(
curve_types: List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
mask=None
) -> Tuple[
List[Union[curve_types_lib.RiskFreeCurve,
curve_types_lib.RateIndexCurve]],
List[int]]:
def _get_signature(curve):
if isinstance(curve, curve_types_lib.RiskFreeCurve):
return curve.currency.value
elif isinstance(curve, curve_types_lib.RateIndexCurve):
return (curve.currency.value + "_" + curve.index.type.value
+ "_" + "_".join(curve.index.source)
+ "_" + "_".join(curve.index.name))
else:
raise ValueError(f"{type(curve)} is not supported.")
curve_list = to_list(curve_types)
if mask is not None:
return curve_list, mask
curve_hash = [_get_signature(curve_type) for curve_type in curve_list]
hash_discount_map = {
_get_signature(curve_type): curve_type for curve_type in curve_list}
mask, mask_map, num_unique_discounts = create_mask(curve_hash)
discount_curve_types = [
hash_discount_map[mask_map[i]]
for i in range(num_unique_discounts)]
return discount_curve_types, mask
def create_mask(x):
unique = np.unique(x)
num_unique_elems = len(unique)
keys = range(num_unique_elems)
d = dict(zip(unique, keys))
mask_map = dict(zip(keys, unique))
return [d[el] for el in x], mask_map, num_unique_elems
def to_list(x):
if isinstance(x, (list, tuple)):
return x
else:
return [x]
def _get_attr(obj, key):
if isinstance(obj, dict):
return obj[key]
else:
return obj.__getattribute__(key)
__all__ = ["FixedCashflowStream", "FloatingCashflowStream"]
| true | true |
1c347d0430732c04a85547fa02506a3e4316f01c | 22,159 | py | Python | external/devlib/devlib/module/cpufreq.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | null | null | null | external/devlib/devlib/module/cpufreq.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | null | null | null | external/devlib/devlib/module/cpufreq.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | 1 | 2021-01-27T05:21:15.000Z | 2021-01-27T05:21:15.000Z | # Copyright 2014-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from contextlib import contextmanager
from devlib.module import Module
from devlib.exception import TargetStableError
from devlib.utils.misc import memoized
# a dict of governor name and a list of it tunables that can't be read
WRITE_ONLY_TUNABLES = {
'interactive': ['boostpulse']
}
class CpufreqModule(Module):
name = 'cpufreq'
@staticmethod
def probe(target):
# x86 with Intel P-State driver
if target.abi == 'x86_64':
path = '/sys/devices/system/cpu/intel_pstate'
if target.file_exists(path):
return True
# Generic CPUFreq support (single policy)
path = '/sys/devices/system/cpu/cpufreq/policy0'
if target.file_exists(path):
return True
# Generic CPUFreq support (per CPU policy)
path = '/sys/devices/system/cpu/cpu0/cpufreq'
return target.file_exists(path)
def __init__(self, target):
super(CpufreqModule, self).__init__(target)
self._governor_tunables = {}
@memoized
def list_governors(self, cpu):
"""Returns a list of governors supported by the cpu."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
output = self.target.read_value(sysfile)
return output.strip().split()
def get_governor(self, cpu):
"""Returns the governor currently set for the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
return self.target.read_value(sysfile)
def set_governor(self, cpu, governor, **kwargs):
"""
Set the governor for the specified CPU.
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
:param cpu: The CPU for which the governor is to be set. This must be
the full name as it appears in sysfs, e.g. "cpu0".
:param governor: The name of the governor to be used. This must be
supported by the specific device.
Additional keyword arguments can be used to specify governor tunables for
governors that support them.
:note: On big.LITTLE all cores in a cluster must be using the same governor.
Setting the governor on any core in a cluster will also set it on all
other cores in that cluster.
:raises: TargetStableError if governor is not supported by the CPU, or if,
for some reason, the governor could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
supported = self.list_governors(cpu)
if governor not in supported:
raise TargetStableError('Governor {} not supported for cpu {}'.format(governor, cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
self.target.write_value(sysfile, governor)
self.set_governor_tunables(cpu, governor, **kwargs)
@contextmanager
def use_governor(self, governor, cpus=None, **kwargs):
"""
Use a given governor, then restore previous governor(s)
:param governor: Governor to use on all targeted CPUs (see :meth:`set_governor`)
:type governor: str
:param cpus: CPUs affected by the governor change (all by default)
:type cpus: list
:Keyword Arguments: Governor tunables, See :meth:`set_governor_tunables`
"""
if not cpus:
cpus = self.target.list_online_cpus()
# Setting a governor & tunables for a cpu will set them for all cpus
# in the same clock domain, so only manipulating one cpu per domain
# is enough
domains = set(self.get_affected_cpus(cpu)[0] for cpu in cpus)
prev_governors = {cpu : (self.get_governor(cpu), self.get_governor_tunables(cpu))
for cpu in domains}
# Special case for userspace, frequency is not seen as a tunable
userspace_freqs = {}
for cpu, (prev_gov, _) in prev_governors.items():
if prev_gov == "userspace":
userspace_freqs[cpu] = self.get_frequency(cpu)
for cpu in domains:
self.set_governor(cpu, governor, **kwargs)
try:
yield
finally:
for cpu, (prev_gov, tunables) in prev_governors.items():
self.set_governor(cpu, prev_gov, **tunables)
if prev_gov == "userspace":
self.set_frequency(cpu, userspace_freqs[cpu])
def list_governor_tunables(self, cpu):
"""Returns a list of tunables available for the governor on the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_governor(cpu)
if governor not in self._governor_tunables:
try:
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
except TargetStableError: # probably an older kernel
try:
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
except TargetStableError: # governor does not support tunables
self._governor_tunables[governor] = []
return self._governor_tunables[governor]
def get_governor_tunables(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_governor(cpu)
tunables = {}
for tunable in self.list_governor_tunables(cpu):
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
tunables[tunable] = self.target.read_value(path)
except TargetStableError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
tunables[tunable] = self.target.read_value(path)
return tunables
def set_governor_tunables(self, cpu, governor=None, **kwargs):
"""
Set tunables for the specified governor. Tunables should be specified as
keyword arguments. Which tunables and values are valid depends on the
governor.
:param cpu: The cpu for which the governor will be set. ``int`` or
full cpu name as it appears in sysfs, e.g. ``cpu0``.
:param governor: The name of the governor. Must be all lower case.
The rest should be keyword parameters mapping tunable name onto the value to
be set for it.
:raises: TargetStableError if governor specified is not a valid governor name, or if
a tunable specified is not valid for the governor, or if could not set
tunable.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
if governor is None:
governor = self.get_governor(cpu)
valid_tunables = self.list_governor_tunables(cpu)
for tunable, value in kwargs.items():
if tunable in valid_tunables:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
try:
self.target.write_value(path, value)
except TargetStableError:
if self.target.file_exists(path):
# File exists but we did something wrong
raise
# Expected file doesn't exist, try older sysfs layout.
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
self.target.write_value(path, value)
else:
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
message += 'Available tunables are: {}'.format(valid_tunables)
raise TargetStableError(message)
@memoized
def list_frequencies(self, cpu):
"""Returns a sorted list of frequencies supported by the cpu or an empty list
if not could be found."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
output = self.target.execute(cmd)
available_frequencies = list(map(int, output.strip().split())) # pylint: disable=E1103
except TargetStableError:
# On some devices scaling_frequencies is not generated.
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
# Fall back to parsing stats/time_in_state
path = '/sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
try:
out_iter = iter(self.target.read_value(path).split())
except TargetStableError:
if not self.target.file_exists(path):
# Probably intel_pstate. Can't get available freqs.
return []
raise
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
return sorted(available_frequencies)
@memoized
def get_max_available_frequency(self, cpu):
"""
Returns the maximum available frequency for a given core or None if
could not be found.
"""
freqs = self.list_frequencies(cpu)
return max(freqs) if freqs else None
@memoized
def get_min_available_frequency(self, cpu):
"""
Returns the minimum available frequency for a given core or None if
could not be found.
"""
freqs = self.list_frequencies(cpu)
return min(freqs) if freqs else None
def get_min_frequency(self, cpu):
"""
Returns the min frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the minimum frequency and the following exception will be
raised ::
:raises: TargetStableError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_min_frequency(self, cpu, frequency, exact=True):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
on the device.
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
some reason, frequency could not be set.
:raises: ValueError if ``frequency`` is not an integer.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_frequencies(cpu)
try:
value = int(frequency)
if exact and available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
self.target.write_value(sysfile, value)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def get_frequency(self, cpu):
"""
Returns the current frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the current frequency and the following exception will be
raised ::
:raises: TargetStableError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_frequency(self, cpu, frequency, exact=True):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer.
If ``exact`` flag is set (the default), the Value must also be supported by
the device. The available frequencies can be obtained by calling
get_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
on the device (if it exists).
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
some reason, frequency could not be set.
:raises: ValueError if ``frequency`` is not an integer.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
value = int(frequency)
if exact:
available_frequencies = self.list_frequencies(cpu)
if available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
if self.get_governor(cpu) != 'userspace':
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
self.target.write_value(sysfile, value, verify=False)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def get_max_frequency(self, cpu):
"""
Returns the max frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the maximum frequency and the following exception will be
raised ::
:raises: TargetStableError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_max_frequency(self, cpu, frequency, exact=True):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
on the device.
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
some reason, frequency could not be set.
:raises: ValueError if ``frequency`` is not an integer.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_frequencies(cpu)
try:
value = int(frequency)
if exact and available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
self.target.write_value(sysfile, value)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def set_governor_for_cpus(self, cpus, governor, **kwargs):
"""
Set the governor for the specified list of CPUs.
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
:param cpus: The list of CPU for which the governor is to be set.
"""
for cpu in cpus:
self.set_governor(cpu, governor, **kwargs)
def set_frequency_for_cpus(self, cpus, freq, exact=False):
"""
Set the frequency for the specified list of CPUs.
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
:param cpus: The list of CPU for which the frequency has to be set.
"""
for cpu in cpus:
self.set_frequency(cpu, freq, exact)
def set_all_frequencies(self, freq):
"""
Set the specified (minimum) frequency for all the (online) CPUs
"""
# pylint: disable=protected-access
return self.target._execute_util(
'cpufreq_set_all_frequencies {}'.format(freq),
as_root=True)
def get_all_frequencies(self):
"""
Get the current frequency for all the (online) CPUs
"""
# pylint: disable=protected-access
output = self.target._execute_util(
'cpufreq_get_all_frequencies', as_root=True)
frequencies = {}
for x in output.splitlines():
kv = x.split(' ')
if kv[0] == '':
break
frequencies[kv[0]] = kv[1]
return frequencies
def set_all_governors(self, governor):
"""
Set the specified governor for all the (online) CPUs
"""
try:
# pylint: disable=protected-access
return self.target._execute_util(
'cpufreq_set_all_governors {}'.format(governor),
as_root=True)
except TargetStableError as e:
if ("echo: I/O error" in str(e) or
"write error: Invalid argument" in str(e)):
cpus_unsupported = [c for c in self.target.list_online_cpus()
if governor not in self.list_governors(c)]
raise TargetStableError("Governor {} unsupported for CPUs {}".format(
governor, cpus_unsupported))
else:
raise
def get_all_governors(self):
"""
Get the current governor for all the (online) CPUs
"""
# pylint: disable=protected-access
output = self.target._execute_util(
'cpufreq_get_all_governors', as_root=True)
governors = {}
for x in output.splitlines():
kv = x.split(' ')
if kv[0] == '':
break
governors[kv[0]] = kv[1]
return governors
def trace_frequencies(self):
"""
Report current frequencies on trace file
"""
# pylint: disable=protected-access
return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
def get_affected_cpus(self, cpu):
"""
Get the online CPUs that share a frequency domain with the given CPU
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/affected_cpus'.format(cpu)
return [int(c) for c in self.target.read_value(sysfile).split()]
@memoized
def get_related_cpus(self, cpu):
"""
Get the CPUs that share a frequency domain with the given CPU
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/related_cpus'.format(cpu)
return [int(c) for c in self.target.read_value(sysfile).split()]
@memoized
def get_driver(self, cpu):
"""
Get the name of the driver used by this cpufreq policy.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_driver'.format(cpu)
return self.target.read_value(sysfile).strip()
def iter_domains(self):
"""
Iterate over the frequency domains in the system
"""
cpus = set(range(self.target.number_of_cpus))
while cpus:
cpu = next(iter(cpus)) # pylint: disable=stop-iteration-return
domain = self.target.cpufreq.get_related_cpus(cpu)
yield domain
cpus = cpus.difference(domain)
| 41.574109 | 115 | 0.601381 |
from contextlib import contextmanager
from devlib.module import Module
from devlib.exception import TargetStableError
from devlib.utils.misc import memoized
WRITE_ONLY_TUNABLES = {
'interactive': ['boostpulse']
}
class CpufreqModule(Module):
name = 'cpufreq'
@staticmethod
def probe(target):
# x86 with Intel P-State driver
if target.abi == 'x86_64':
path = '/sys/devices/system/cpu/intel_pstate'
if target.file_exists(path):
return True
# Generic CPUFreq support (single policy)
path = '/sys/devices/system/cpu/cpufreq/policy0'
if target.file_exists(path):
return True
# Generic CPUFreq support (per CPU policy)
path = '/sys/devices/system/cpu/cpu0/cpufreq'
return target.file_exists(path)
def __init__(self, target):
super(CpufreqModule, self).__init__(target)
self._governor_tunables = {}
@memoized
def list_governors(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
output = self.target.read_value(sysfile)
return output.strip().split()
def get_governor(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
return self.target.read_value(sysfile)
def set_governor(self, cpu, governor, **kwargs):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
supported = self.list_governors(cpu)
if governor not in supported:
raise TargetStableError('Governor {} not supported for cpu {}'.format(governor, cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
self.target.write_value(sysfile, governor)
self.set_governor_tunables(cpu, governor, **kwargs)
@contextmanager
def use_governor(self, governor, cpus=None, **kwargs):
if not cpus:
cpus = self.target.list_online_cpus()
# Setting a governor & tunables for a cpu will set them for all cpus
# in the same clock domain, so only manipulating one cpu per domain
# is enough
domains = set(self.get_affected_cpus(cpu)[0] for cpu in cpus)
prev_governors = {cpu : (self.get_governor(cpu), self.get_governor_tunables(cpu))
for cpu in domains}
# Special case for userspace, frequency is not seen as a tunable
userspace_freqs = {}
for cpu, (prev_gov, _) in prev_governors.items():
if prev_gov == "userspace":
userspace_freqs[cpu] = self.get_frequency(cpu)
for cpu in domains:
self.set_governor(cpu, governor, **kwargs)
try:
yield
finally:
for cpu, (prev_gov, tunables) in prev_governors.items():
self.set_governor(cpu, prev_gov, **tunables)
if prev_gov == "userspace":
self.set_frequency(cpu, userspace_freqs[cpu])
def list_governor_tunables(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_governor(cpu)
if governor not in self._governor_tunables:
try:
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
except TargetStableError: # probably an older kernel
try:
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
except TargetStableError: # governor does not support tunables
self._governor_tunables[governor] = []
return self._governor_tunables[governor]
def get_governor_tunables(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_governor(cpu)
tunables = {}
for tunable in self.list_governor_tunables(cpu):
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
tunables[tunable] = self.target.read_value(path)
except TargetStableError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
tunables[tunable] = self.target.read_value(path)
return tunables
def set_governor_tunables(self, cpu, governor=None, **kwargs):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
if governor is None:
governor = self.get_governor(cpu)
valid_tunables = self.list_governor_tunables(cpu)
for tunable, value in kwargs.items():
if tunable in valid_tunables:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
try:
self.target.write_value(path, value)
except TargetStableError:
if self.target.file_exists(path):
# File exists but we did something wrong
raise
# Expected file doesn't exist, try older sysfs layout.
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
self.target.write_value(path, value)
else:
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
message += 'Available tunables are: {}'.format(valid_tunables)
raise TargetStableError(message)
@memoized
def list_frequencies(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
output = self.target.execute(cmd)
available_frequencies = list(map(int, output.strip().split()))
except TargetStableError:
path = '/sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
try:
out_iter = iter(self.target.read_value(path).split())
except TargetStableError:
if not self.target.file_exists(path):
return []
raise
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
return sorted(available_frequencies)
@memoized
def get_max_available_frequency(self, cpu):
freqs = self.list_frequencies(cpu)
return max(freqs) if freqs else None
@memoized
def get_min_available_frequency(self, cpu):
freqs = self.list_frequencies(cpu)
return min(freqs) if freqs else None
def get_min_frequency(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_min_frequency(self, cpu, frequency, exact=True):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_frequencies(cpu)
try:
value = int(frequency)
if exact and available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
self.target.write_value(sysfile, value)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def get_frequency(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_frequency(self, cpu, frequency, exact=True):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
try:
value = int(frequency)
if exact:
available_frequencies = self.list_frequencies(cpu)
if available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
if self.get_governor(cpu) != 'userspace':
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
self.target.write_value(sysfile, value, verify=False)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def get_max_frequency(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
return self.target.read_int(sysfile)
def set_max_frequency(self, cpu, frequency, exact=True):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_frequencies(cpu)
try:
value = int(frequency)
if exact and available_frequencies and value not in available_frequencies:
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
self.target.write_value(sysfile, value)
except ValueError:
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
def set_governor_for_cpus(self, cpus, governor, **kwargs):
for cpu in cpus:
self.set_governor(cpu, governor, **kwargs)
def set_frequency_for_cpus(self, cpus, freq, exact=False):
for cpu in cpus:
self.set_frequency(cpu, freq, exact)
def set_all_frequencies(self, freq):
# pylint: disable=protected-access
return self.target._execute_util(
'cpufreq_set_all_frequencies {}'.format(freq),
as_root=True)
def get_all_frequencies(self):
# pylint: disable=protected-access
output = self.target._execute_util(
'cpufreq_get_all_frequencies', as_root=True)
frequencies = {}
for x in output.splitlines():
kv = x.split(' ')
if kv[0] == '':
break
frequencies[kv[0]] = kv[1]
return frequencies
def set_all_governors(self, governor):
try:
# pylint: disable=protected-access
return self.target._execute_util(
'cpufreq_set_all_governors {}'.format(governor),
as_root=True)
except TargetStableError as e:
if ("echo: I/O error" in str(e) or
"write error: Invalid argument" in str(e)):
cpus_unsupported = [c for c in self.target.list_online_cpus()
if governor not in self.list_governors(c)]
raise TargetStableError("Governor {} unsupported for CPUs {}".format(
governor, cpus_unsupported))
else:
raise
def get_all_governors(self):
# pylint: disable=protected-access
output = self.target._execute_util(
'cpufreq_get_all_governors', as_root=True)
governors = {}
for x in output.splitlines():
kv = x.split(' ')
if kv[0] == '':
break
governors[kv[0]] = kv[1]
return governors
def trace_frequencies(self):
# pylint: disable=protected-access
return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
def get_affected_cpus(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/affected_cpus'.format(cpu)
return [int(c) for c in self.target.read_value(sysfile).split()]
@memoized
def get_related_cpus(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/related_cpus'.format(cpu)
return [int(c) for c in self.target.read_value(sysfile).split()]
@memoized
def get_driver(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_driver'.format(cpu)
return self.target.read_value(sysfile).strip()
def iter_domains(self):
cpus = set(range(self.target.number_of_cpus))
while cpus:
cpu = next(iter(cpus)) # pylint: disable=stop-iteration-return
domain = self.target.cpufreq.get_related_cpus(cpu)
yield domain
cpus = cpus.difference(domain)
| true | true |
1c347d42305e260a0aaab0ad7a76123148f6e3b1 | 45,320 | py | Python | irrd/integration_tests/run.py | irrdnet/irrd | 6ba27f3bea4fa179525f9b1af68b2fa631d0b644 | [
"BSD-2-Clause"
] | 94 | 2015-02-03T22:50:51.000Z | 2022-03-16T08:24:44.000Z | irrd/integration_tests/run.py | irrdnet/irrd | 6ba27f3bea4fa179525f9b1af68b2fa631d0b644 | [
"BSD-2-Clause"
] | 286 | 2015-02-08T15:16:35.000Z | 2022-03-31T22:38:38.000Z | irrd/integration_tests/run.py | irrdnet/irrd | 6ba27f3bea4fa179525f9b1af68b2fa631d0b644 | [
"BSD-2-Clause"
] | 33 | 2015-02-03T22:50:57.000Z | 2022-03-30T00:46:07.000Z | # flake8: noqa: W293
import sys
import time
import unittest
import ujson
import base64
import email
import os
import requests
import signal
import socket
import sqlalchemy as sa
import subprocess
import textwrap
import yaml
from alembic import command, config
from pathlib import Path
from python_graphql_client import GraphqlClient
from irrd.conf import config_init, PASSWORD_HASH_DUMMY_VALUE
from irrd.utils.rpsl_samples import (SAMPLE_MNTNER, SAMPLE_PERSON, SAMPLE_KEY_CERT, SIGNED_PERSON_UPDATE_VALID,
SAMPLE_AS_SET, SAMPLE_AUT_NUM, SAMPLE_DOMAIN, SAMPLE_FILTER_SET, SAMPLE_INET_RTR,
SAMPLE_INET6NUM, SAMPLE_INETNUM, SAMPLE_PEERING_SET, SAMPLE_ROLE, SAMPLE_ROUTE,
SAMPLE_ROUTE_SET, SAMPLE_ROUTE6, SAMPLE_RTR_SET, SAMPLE_AS_BLOCK)
from irrd.utils.whois_client import whois_query, whois_query_irrd
from .constants import (EMAIL_SMTP_PORT, EMAIL_DISCARD_MSGS_COMMAND, EMAIL_RETURN_MSGS_COMMAND, EMAIL_SEPARATOR,
EMAIL_END)
from ..storage import translate_url
IRRD_ROOT_PATH = str(Path(__file__).resolve().parents[2])
sys.path.append(IRRD_ROOT_PATH)
AS_SET_REFERRING_OTHER_SET = """as-set: AS65537:AS-TESTREF
descr: description
members: AS65537:AS-SETTEST, AS65540
mbrs-by-ref: TEST-MNT
tech-c: PERSON-TEST
admin-c: PERSON-TEST
notify: notify@example.com
mnt-by: TEST-MNT
changed: changed@example.com 20190701 # comment
source: TEST
remarks: remark
"""
SAMPLE_MNTNER_CLEAN = SAMPLE_MNTNER.replace('mnt-by: OTHER1-MNT,OTHER2-MNT\n', '')
LARGE_UPDATE = '\n\n'.join([
SAMPLE_AS_BLOCK,
SAMPLE_AS_SET,
SAMPLE_AUT_NUM,
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65538'),
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65539'),
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65540'),
SAMPLE_DOMAIN,
SAMPLE_FILTER_SET,
SAMPLE_INET_RTR,
SAMPLE_INET6NUM,
SAMPLE_INETNUM,
SAMPLE_KEY_CERT,
SAMPLE_PEERING_SET,
SAMPLE_PERSON.replace('PERSON-TEST', 'DUMY2-TEST'),
SAMPLE_ROLE,
SAMPLE_ROUTE,
SAMPLE_ROUTE_SET,
SAMPLE_ROUTE6,
SAMPLE_RTR_SET,
AS_SET_REFERRING_OTHER_SET,
])
class TestIntegration:
"""
This integration test will start two instances of IRRd, one mirroring off the
other, and an email server that captures all mail. It will then run a series
of updates and queries, verify the contents of mails, the state of the
databases, mirroring, utf-8 handling and run all basic types of queries.
Note that this test will not be included in the default py.test discovery,
this is intentional.
"""
port_http1 = 6080
port_whois1 = 6043
port_http2 = 6081
port_whois2 = 6044
def test_irrd_integration(self, tmpdir):
self.assertCountEqual = unittest.TestCase().assertCountEqual
# IRRD_DATABASE_URL and IRRD_REDIS_URL override the yaml config, so should be removed
if 'IRRD_DATABASE_URL' in os.environ:
del os.environ['IRRD_DATABASE_URL']
if 'IRRD_REDIS_URL' in os.environ:
del os.environ['IRRD_REDIS_URL']
# PYTHONPATH needs to contain the twisted plugin path to support the mailserver.
os.environ['PYTHONPATH'] = IRRD_ROOT_PATH
os.environ['IRRD_SCHEDULER_TIMER_OVERRIDE'] = '1'
self.tmpdir = tmpdir
self._start_mailserver()
self._start_irrds()
# Attempt to load a mntner with valid auth, but broken references.
self._submit_update(self.config_path1, SAMPLE_MNTNER + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate FAILED: [mntner] TEST-MNT\n' in mail_text
assert '\nERROR: Object PERSON-TEST referenced in field admin-c not found in database TEST - must reference one of role, person.\n' in mail_text
assert '\nERROR: Object OTHER1-MNT referenced in field mnt-by not found in database TEST - must reference mntner.\n' in mail_text
assert '\nERROR: Object OTHER2-MNT referenced in field mnt-by not found in database TEST - must reference mntner.\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
# Load a regular valid mntner and person into the DB, and verify
# the contents of the result.
self._submit_update(self.config_path1,
SAMPLE_MNTNER_CLEAN + '\n\n' + SAMPLE_PERSON + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nCreate succeeded: [person] PERSON-TEST\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
# Check whether the objects can be queried from irrd #1,
# whether the hash is masked, and whether encoding is correct.
mntner_text = whois_query('127.0.0.1', self.port_whois1, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
# After three seconds, a new export should have been generated by irrd #1,
# loaded by irrd #2, and the objects should be available in irrd #2
time.sleep(3)
mntner_text = whois_query('127.0.0.1', self.port_whois2, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
# Load a key-cert. This should cause notifications to mnt-nfy (2x).
# Change is authenticated by valid password.
self._submit_update(self.config_path1, SAMPLE_KEY_CERT + '\npassword: md5-password')
messages = self._retrieve_mails()
assert len(messages) == 3
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert 'Create succeeded: [key-cert] PGPKEY-80F238C6' in self._extract_message_body(messages[0])
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net'
])
self._check_text_in_mails(messages[1:], [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nCreate succeeded for object below: [key-cert] PGPKEY-80F238C6:\n',
'email footer',
'Generated by IRRd version ',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
# Use the new PGP key to make an update to PERSON-TEST. Should
# again trigger mnt-nfy messages, and a mail to the notify address
# of PERSON-TEST.
self._submit_update(self.config_path1, SIGNED_PERSON_UPDATE_VALID)
messages = self._retrieve_mails()
assert len(messages) == 4
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nModify succeeded: [person] PERSON-TEST\n' in mail_text
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net', 'notify@example.com',
])
self._check_text_in_mails(messages[1:], [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nModify succeeded for object below: [person] PERSON-TEST:\n',
'\n@@ -1,4 +1,4 @@\n',
'\nNew version of this object:\n',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
# Check that the person is updated on irrd #1
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
# After 2s, NRTM from irrd #2 should have picked up the change.
time.sleep(2)
person_text = whois_query('127.0.0.1', self.port_whois2, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
# Submit an update back to the original person object, with an invalid
# password and invalid override. Should trigger notification to upd-to.
self._submit_update(self.config_path1, SAMPLE_PERSON + '\npassword: invalid\noverride: invalid\n')
messages = self._retrieve_mails()
assert len(messages) == 2
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nModify FAILED: [person] PERSON-TEST\n' in mail_text
assert '\nERROR: Authorisation for person PERSON-TEST failed: must by authenticated by one of: TEST-MNT\n' in mail_text
mail_text = self._extract_message_body(messages[1])
assert messages[1]['Subject'] == 'Notification of TEST database changes'
assert messages[1]['From'] == 'from@example.com'
assert messages[1]['To'] == 'upd-to@example.net'
assert '\nModify FAILED AUTHORISATION for object below: [person] PERSON-TEST:\n' in mail_text
# Object should not have changed by latest update.
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
# Submit a delete with a valid password for PERSON-TEST.
# This should be rejected, because it creates a dangling reference.
# No mail should be sent to upd-to.
self._submit_update(self.config_path1, SAMPLE_PERSON + 'password: md5-password\ndelete: delete\n')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nDelete FAILED: [person] PERSON-TEST\n' in mail_text
assert '\nERROR: Object PERSON-TEST to be deleted, but still referenced by mntner TEST-MNT\n' in mail_text
assert '\nERROR: Object PERSON-TEST to be deleted, but still referenced by key-cert PGPKEY-80F238C6\n' in mail_text
# Object should not have changed by latest update.
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
# Submit a valid delete for all our new objects.
self._submit_update(self.config_path1,
f'{SAMPLE_PERSON}delete: delete\n\n{SAMPLE_KEY_CERT}delete: delete\n\n' +
f'{SAMPLE_MNTNER_CLEAN}delete: delete\npassword: crypt-password\n')
messages = self._retrieve_mails()
# Expected mails are status, mnt-nfy on mntner (2x), and notify on mntner
# (notify on PERSON-TEST was removed in the PGP signed update)
assert len(messages) == 4
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nDelete succeeded: [person] PERSON-TEST\n' in mail_text
assert '\nDelete succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nDelete succeeded: [key-cert] PGPKEY-80F238C6\n' in mail_text
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net', 'notify@example.net',
])
mnt_nfy_msgs = [msg for msg in messages if msg['To'] in ['mnt-nfy@example.net', 'mnt-nfy2@example.net']]
self._check_text_in_mails(mnt_nfy_msgs, [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nDelete succeeded for object below: [person] PERSON-TEST:\n',
'\nDelete succeeded for object below: [mntner] TEST-MNT:\n',
'\nDelete succeeded for object below: [key-cert] PGPKEY-80F238C6:\n',
'unįcöde tæst 🌈🦄\n',
# The object submitted to be deleted has the original name,
# but when sending delete notifications, they should include the
# object as currently in the DB, not as submitted in the email.
'Test person changed by PGP signed update\n',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
# Notify attribute mails are only about the objects concerned.
notify_msg = [msg for msg in messages if msg['To'] == 'notify@example.net'][0]
mail_text = self._extract_message_body(notify_msg)
assert notify_msg['Subject'] == 'Notification of TEST database changes'
assert notify_msg['From'] == 'from@example.com'
assert '\n> Message-ID: <1325754288.4989.6.camel@hostname>\n' in mail_text
assert '\nDelete succeeded for object below: [person] PERSON-TEST:\n' not in mail_text
assert '\nDelete succeeded for object below: [mntner] TEST-MNT:\n' in mail_text
assert '\nDelete succeeded for object below: [key-cert] PGPKEY-80F238C6:\n' not in mail_text
# Object should be deleted
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'No entries found for the selected source(s)' in person_text
assert 'PERSON-TEST' not in person_text
# Object should be deleted from irrd #2 as well through NRTM.
time.sleep(2)
person_text = whois_query('127.0.0.1', self.port_whois2, 'PERSON-TEST')
assert 'No entries found for the selected source(s)' in person_text
assert 'PERSON-TEST' not in person_text
# Load the mntner and person again, using the override password
# Note that the route/route6 objects are RPKI valid on IRRd #1,
# and RPKI-invalid on IRRd #2
self._submit_update(self.config_path1,
SAMPLE_MNTNER_CLEAN + '\n\n' + SAMPLE_PERSON + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nCreate succeeded: [person] PERSON-TEST\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
# Load samples of all known objects, using the mntner password
self._submit_update(self.config_path1, LARGE_UPDATE + '\n\npassword: md5-password')
messages = self._retrieve_mails()
assert len(messages) == 3
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nINFO: AS number as065537 was reformatted as AS65537\n' in mail_text
assert '\nCreate succeeded: [filter-set] FLTR-SETTEST\n' in mail_text
assert '\nINFO: Address range 192.0.2.0 - 192.0.02.255 was reformatted as 192.0.2.0 - 192.0.2.255\n' in mail_text
assert '\nINFO: Address prefix 192.0.02.0/24 was reformatted as 192.0.2.0/24\n' in mail_text
assert '\nINFO: Route set member 2001:0dB8::/48 was reformatted as 2001:db8::/48\n' in mail_text
# Check whether the objects can be queried from irrd #1,
# and whether the hash is masked.
mntner_text = whois_query('127.0.0.1', self.port_whois1, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
# (This is the first instance of an object with unicode chars
# appearing on the NRTM stream.)
time.sleep(3)
mntner_text = whois_query('127.0.0.1', self.port_whois2, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
# These queries have different responses on #1 than #2,
# as all IPv4 routes are RPKI invalid on #2.
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!gAS65537')
assert query_result == '192.0.2.0/24'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!gAS65547')
assert query_result == '192.0.2.0/32' # Pseudo-IRR object from RPKI
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!6AS65537')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!iRS-TEST')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48', 'RS-OTHER-SET'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!aAS65537:AS-SETTEST')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!aAS65537:AS-TESTREF')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!a4AS65537:AS-TESTREF')
assert query_result == '192.0.2.0/24'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!a6AS65537:AS-TESTREF')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/25,l')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,L')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/23,M')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,M')
assert 'RPKI' in query_result # Does not match the /24, does match the RPKI pseudo-IRR /32
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,o')
assert query_result == 'AS65537'
query_result = whois_query('127.0.0.1', self.port_whois1, '-x 192.0.02.0/24')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-l 192.0.02.0/25')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-L 192.0.02.0/24')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-M 192.0.02.0/23')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-i member-of RS-test')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert not query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!6AS65537')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!iRS-TEST')
assert query_result == '2001:db8::/48 RS-OTHER-SET'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!aAS65537:AS-SETTEST')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!aAS65537:AS-TESTREF')
assert query_result == '2001:db8::/48'
query_result = whois_query('127.0.0.1', self.port_whois2, '-x 192.0.02.0/24')
assert 'example route' not in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!r192.0.2.0/24,L')
assert 'RPKI' in query_result # Pseudo-IRR object 0/0 from RPKI
# RPKI invalid object should not be in journal
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:1-LAST')
assert 'route:192.0.2.0/24' not in query_result.replace(' ', '')
# These queries should produce identical answers on both instances.
for port in self.port_whois1, self.port_whois2:
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-SETTEST')
assert set(query_result.split(' ')) == {'AS65537', 'AS65538', 'AS65539', 'AS-OTHERSET'}
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-TESTREF')
assert set(query_result.split(' ')) == {'AS65537:AS-SETTEST', 'AS65540'}
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-TESTREF,1')
assert set(query_result.split(' ')) == {'AS65537', 'AS65538', 'AS65539', 'AS65540'}
query_result = whois_query_irrd('127.0.0.1', port, '!maut-num,as65537')
assert 'AS65537' in query_result
assert 'TEST-AS' in query_result
query_result = whois_query_irrd('127.0.0.1', port, '!oTEST-MNT')
assert 'AS65537' in query_result
assert 'TEST-AS' in query_result
assert 'AS65536 - AS65538' in query_result
assert 'rtrs-settest' in query_result
query_result = whois_query('127.0.0.1', port, '-T route6 -i member-of RS-TEST')
assert 'No entries found for the selected source(s)' in query_result
query_result = whois_query('127.0.0.1', port, 'dashcare')
assert 'ROLE-TEST' in query_result
# Check the mirroring status
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 29
assert result['TEST']['serial_last_export'] == 29
assert result['TEST']['serial_newest_mirror'] is None
# irrd #2 missed the first update from NRTM, as they were done at
# the same time and loaded from the full export, and one RPKI-invalid object
# was not recorded in the journal, so its local serial should
# is lower by three
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 26
assert result['TEST']['serial_last_export'] == 26
assert result['TEST']['serial_newest_mirror'] == 29
# Make the v4 route in irrd2 valid
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '198.51.100.0/24', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
time.sleep(3)
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert query_result == '192.0.2.0/24'
# RPKI invalid object should now be added in the journal
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:27-27')
assert 'ADD 27' in query_result
assert '192.0.2.0/24' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 27
assert result['TEST']['serial_last_export'] == 27
# This was a local journal update from RPKI status change,
# so serial_newest_mirror did not update.
assert result['TEST']['serial_newest_mirror'] == 29
# Make the v4 route in irrd2 invalid again
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
time.sleep(3)
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert not query_result
# RPKI invalid object should now be deleted in the journal
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:28-28')
assert 'DEL 28' in query_result
assert '192.0.2.0/24' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 28
assert result['TEST']['serial_last_export'] == 28
assert result['TEST']['serial_newest_mirror'] == 29
# Make the v4 route in irrd1 invalid, triggering a mail
with open(self.roa_source1, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
# irrd1 is authoritative for the now invalid v4 route, should have sent mail
time.sleep(2)
messages = self._retrieve_mails()
assert len(messages) == 3
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'route(6) objects in TEST marked RPKI invalid'
expected_recipients = {'email@example.com', 'mnt-nfy@example.net', 'mnt-nfy2@example.net'}
assert {m['To'] for m in messages} == expected_recipients
assert '192.0.2.0/24' in mail_text
self.check_http()
self.check_graphql()
def check_http(self):
status1 = requests.get(f'http://127.0.0.1:{self.port_http1}/v1/status/')
status2 = requests.get(f'http://127.0.0.1:{self.port_http2}/v1/status/')
assert status1.status_code == 200
assert status2.status_code == 200
assert 'IRRD version' in status1.text
assert 'IRRD version' in status2.text
assert 'TEST' in status1.text
assert 'TEST' in status2.text
assert 'RPKI' in status1.text
assert 'RPKI' in status2.text
assert 'Authoritative: Yes' in status1.text
assert 'Authoritative: Yes' not in status2.text
def check_graphql(self):
client = GraphqlClient(endpoint=f"http://127.0.0.1:{self.port_http1}/graphql/")
# Regular rpslObjects query including journal and several references
query = """query {
rpslObjects(rpslPk: "PERSON-TEST") {
rpslPk
... on RPSLContact {
mntBy
}
mntByObjs {
rpslPk
adminCObjs {
... on RPSLContact {
rpslPk
}
}
adminCObjs {
... on RPSLContact {
rpslPk
}
}
}
journal {
serialNrtm
operation
origin
}
}
}
"""
result = client.execute(query=query)
assert result['data']['rpslObjects'] == [{
'rpslPk': 'PERSON-TEST',
'mntBy': ['TEST-MNT'],
'mntByObjs': [{'rpslPk': 'TEST-MNT', 'adminCObjs': [{'rpslPk': 'PERSON-TEST'}]}],
'journal': [
{'serialNrtm': 2, 'operation': 'add_or_update', 'origin': 'auth_change'},
{'serialNrtm': 4, 'operation': 'add_or_update', 'origin': 'auth_change'},
{'serialNrtm': 5, 'operation': 'delete', 'origin': 'auth_change'},
{'serialNrtm': 9, 'operation': 'add_or_update', 'origin': 'auth_change'}
]
}]
# Test memberOfObjs resolving and IP search
query = """query {
rpslObjects(ipLessSpecificOneLevel: "192.0.2.1" rpkiStatus:[invalid,valid,not_found]) {
rpslPk
... on RPSLRoute {
memberOfObjs {
rpslPk
}
}
}
}
"""
result = client.execute(query=query)
self.assertCountEqual(result['data']['rpslObjects'], [
{'rpslPk': '192.0.2.0/24AS65537', 'memberOfObjs': [{'rpslPk': 'RS-TEST'}]},
{'rpslPk': '192.0.2.0 - 192.0.2.255'}
])
# Test membersObjs and mbrsByRefObjs resolving
query = """query {
rpslObjects(rpslPk: ["AS65537:AS-TESTREF", "DOESNOTEXIST"]) {
rpslPk
... on RPSLAsSet {
membersObjs {
rpslPk
}
mbrsByRefObjs {
rpslPk
}
}
}
}
"""
result = client.execute(query=query)
assert result['data']['rpslObjects'] == [{
'rpslPk': 'AS65537:AS-TESTREF',
'membersObjs': [{'rpslPk': 'AS65537:AS-SETTEST'}],
'mbrsByRefObjs': [{'rpslPk': 'TEST-MNT'}],
}]
# Test databaseStatus query
query = """query {
databaseStatus {
source
authoritative
serialOldestJournal
serialNewestJournal
serialNewestMirror
}
}
"""
result = client.execute(query=query)
self.assertCountEqual(result['data']['databaseStatus'], [
{
'source': 'TEST',
'authoritative': True,
'serialOldestJournal': 1,
'serialNewestJournal': 30,
'serialNewestMirror': None
}, {
'source': 'RPKI',
'authoritative': False,
'serialOldestJournal': None,
'serialNewestJournal': None,
'serialNewestMirror': None
}
])
# Test asnPrefixes query
query = """query {
asnPrefixes(asns: [65537]) {
asn
prefixes
}
}
"""
result = client.execute(query=query)
asnPrefixes = result['data']['asnPrefixes']
assert len(asnPrefixes) == 1
assert asnPrefixes[0]['asn'] == 65537
assert set(asnPrefixes[0]['prefixes']) == {'2001:db8::/48'}
# Test asSetPrefixes query
query = """query {
asSetPrefixes(setNames: ["AS65537:AS-TESTREF"]) {
rpslPk
prefixes
}
}
"""
result = client.execute(query=query)
asSetPrefixes = result['data']['asSetPrefixes']
assert len(asSetPrefixes) == 1
assert asSetPrefixes[0]['rpslPk'] == 'AS65537:AS-TESTREF'
assert set(asSetPrefixes[0]['prefixes']) == {'2001:db8::/48'}
# Test recursiveSetMembers query
query = """query {
recursiveSetMembers(setNames: ["AS65537:AS-TESTREF"]) {
rpslPk
rootSource
members
}
}
"""
result = client.execute(query=query)
recursiveSetMembers = result['data']['recursiveSetMembers']
assert len(recursiveSetMembers) == 1
assert recursiveSetMembers[0]['rpslPk'] == 'AS65537:AS-TESTREF'
assert recursiveSetMembers[0]['rootSource'] == 'TEST'
assert set(recursiveSetMembers[0]['members']) == {
'AS65537', 'AS65538', 'AS65539', 'AS65540'
}
def _start_mailserver(self):
"""
Start the mailserver through twisted. This special SMTP server is
configured as the SMTP server for both IRRd instances.
It keeps mails in memory, and _retrieve_mails() can retrieve them
using special SMTP commands.
"""
self.pidfile_mailserver = str(self.tmpdir) + '/mailserver.pid'
self.logfile_mailserver = str(self.tmpdir) + '/mailserver.log'
mailserver_path = IRRD_ROOT_PATH + '/irrd/integration_tests/mailserver.tac'
assert not subprocess.call(['twistd', f'--pidfile={self.pidfile_mailserver}',
f'--logfile={self.logfile_mailserver}', '-y', mailserver_path])
# noinspection PyTypeChecker
def _start_irrds(self):
"""
Configure and start two independent instances of IRRd.
IRRd #1 has an authoritative database, IRRd #2 mirrors that database
from #1.
"""
self.database_url1 = os.environ['IRRD_DATABASE_URL_INTEGRATION_1']
self.database_url2 = os.environ['IRRD_DATABASE_URL_INTEGRATION_2']
self.redis_url1 = os.environ['IRRD_REDIS_URL_INTEGRATION_1']
self.redis_url2 = os.environ['IRRD_REDIS_URL_INTEGRATION_2']
self.config_path1 = str(self.tmpdir) + '/irrd1_config.yaml'
self.config_path2 = str(self.tmpdir) + '/irrd2_config.yaml'
self.logfile1 = str(self.tmpdir) + '/irrd1.log'
self.logfile2 = str(self.tmpdir) + '/irrd2.log'
self.roa_source1 = str(self.tmpdir) + '/roa1.json'
self.roa_source2 = str(self.tmpdir) + '/roa2.json'
self.export_dir1 = str(self.tmpdir) + '/export1/'
self.export_dir2 = str(self.tmpdir) + '/export2/'
self.piddir1 = str(self.tmpdir) + '/piddir1/'
self.piddir2 = str(self.tmpdir) + '/piddir2/'
self.pidfile1 = self.piddir1 + 'irrd.pid'
self.pidfile2 = self.piddir2 + 'irrd.pid'
os.mkdir(self.export_dir1)
os.mkdir(self.export_dir2)
os.mkdir(self.piddir1)
os.mkdir(self.piddir2)
print(textwrap.dedent(f"""
Preparing to start IRRd for integration test.
IRRd #1 running on HTTP port {self.port_http1}, whois port {self.port_whois1}
Config in: {self.config_path1}
Database URL: {self.database_url1}
PID file: {self.pidfile1}
Logfile: {self.logfile1}
IRRd #2 running on HTTP port {self.port_http2}, whois port {self.port_whois2}
Config in: {self.config_path2}
Database URL: {self.database_url2}
PID file: {self.pidfile2}
Logfile: {self.logfile2}
"""))
with open(self.roa_source1, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '192.0.2.0/32', 'asn': 'AS65547', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '1', 'ta': 'TA'}]}, roa_file)
base_config = {
'irrd': {
'access_lists': {
'localhost': ['::/32', '127.0.0.1']
},
'server': {
'http': {
'status_access_list': 'localhost',
'interface': '::1',
'port': 8080
},
'whois': {
'interface': '::1',
'max_connections': 10,
'port': 8043
},
},
'rpki':{
'roa_import_timer': 1,
'notify_invalid_enabled': True,
},
'auth': {
'gnupg_keyring': None,
'override_password': '$1$J6KycItM$MbPaBU6iFSGFV299Rk7Di0',
},
'email': {
'footer': 'email footer',
'from': 'from@example.com',
'smtp': f'localhost:{EMAIL_SMTP_PORT}',
},
'log': {
'logfile_path': None,
'level': 'DEBUG',
},
'sources': {}
}
}
config1 = base_config.copy()
config1['irrd']['piddir'] = self.piddir1
config1['irrd']['database_url'] = self.database_url1
config1['irrd']['redis_url'] = self.redis_url1
config1['irrd']['server']['http']['interface'] = '127.0.0.1' # #306
config1['irrd']['server']['http']['port'] = self.port_http1
config1['irrd']['server']['whois']['interface'] = '127.0.0.1'
config1['irrd']['server']['whois']['port'] = self.port_whois1
config1['irrd']['auth']['gnupg_keyring'] = str(self.tmpdir) + '/gnupg1'
config1['irrd']['log']['logfile_path'] = self.logfile1
config1['irrd']['rpki']['roa_source'] = 'file://' + self.roa_source1
config1['irrd']['sources']['TEST'] = {
'authoritative': True,
'keep_journal': True,
'export_destination': self.export_dir1,
'export_timer': '1',
'nrtm_access_list': 'localhost',
}
with open(self.config_path1, 'w') as yaml_file:
yaml.safe_dump(config1, yaml_file)
config2 = base_config.copy()
config2['irrd']['piddir'] = self.piddir2
config2['irrd']['database_url'] = self.database_url2
config2['irrd']['redis_url'] = self.redis_url2
config2['irrd']['server']['http']['port'] = self.port_http2
config2['irrd']['server']['whois']['port'] = self.port_whois2
config2['irrd']['auth']['gnupg_keyring'] = str(self.tmpdir) + '/gnupg2'
config2['irrd']['log']['logfile_path'] = self.logfile2
config2['irrd']['rpki']['roa_source'] = 'file://' + self.roa_source2
config2['irrd']['sources']['TEST'] = {
'keep_journal': True,
'import_serial_source': f'file://{self.export_dir1}/TEST.CURRENTSERIAL',
'import_source': f'file://{self.export_dir1}/test.db.gz',
'export_destination': self.export_dir2,
'import_timer': '1',
'export_timer': '1',
'nrtm_host': '127.0.0.1',
'nrtm_port': str(self.port_whois1),
'nrtm_access_list': 'localhost',
}
with open(self.config_path2, 'w') as yaml_file:
yaml.safe_dump(config2, yaml_file)
self._prepare_database()
assert not subprocess.call(['irrd/daemon/main.py', f'--config={self.config_path1}'])
assert not subprocess.call(['irrd/daemon/main.py', f'--config={self.config_path2}'])
def _prepare_database(self):
"""
Prepare the databases for IRRd #1 and #2. This includes running
migrations to create tables, and *wiping existing content*.
"""
config_init(self.config_path1)
alembic_cfg = config.Config()
alembic_cfg.set_main_option('script_location', f'{IRRD_ROOT_PATH}/irrd/storage/alembic')
command.upgrade(alembic_cfg, 'head')
connection = sa.create_engine(translate_url(self.database_url1)).connect()
connection.execute('DELETE FROM rpsl_objects')
connection.execute('DELETE FROM rpsl_database_journal')
connection.execute('DELETE FROM database_status')
connection.execute('DELETE FROM roa_object')
config_init(self.config_path2)
alembic_cfg = config.Config()
alembic_cfg.set_main_option('script_location', f'{IRRD_ROOT_PATH}/irrd/storage/alembic')
command.upgrade(alembic_cfg, 'head')
connection = sa.create_engine(translate_url(self.database_url2)).connect()
connection.execute('DELETE FROM rpsl_objects')
connection.execute('DELETE FROM rpsl_database_journal')
connection.execute('DELETE FROM database_status')
connection.execute('DELETE FROM roa_object')
def _submit_update(self, config_path, request):
"""
Submit an update to an IRRd by calling the email submission process
with a specific config path. Request is the raw RPSL update, possibly
signed with inline PGP.
"""
email = textwrap.dedent("""
From submitter@example.com@localhost Thu Jan 5 10:04:48 2018
Received: from [127.0.0.1] (localhost.localdomain [127.0.0.1])
by hostname (Postfix) with ESMTPS id 740AD310597
for <irrd@example.com>; Thu, 5 Jan 2018 10:04:48 +0100 (CET)
Message-ID: <1325754288.4989.6.camel@hostname>
Subject: my subject
Subject: not my subject
From: Sasha <sasha@example.com>
To: sasha@localhost
Date: Thu, 05 Jan 2018 10:04:48 +0100
X-Mailer: Python 3.7
Content-Transfer-Encoding: base64
Content-Type: text/plain; charset=utf-8
Mime-Version: 1.0
""").lstrip().encode('utf-8')
email += base64.b64encode(request.encode('utf-8'))
script = IRRD_ROOT_PATH + '/irrd/scripts/submit_email.py'
p = subprocess.Popen([script, f'--config={config_path}'],
stdin=subprocess.PIPE)
p.communicate(email)
p.wait()
def _retrieve_mails(self):
"""
Retrieve all mails kept in storage by the special integration test
SMTP server. Returns a list of email.Message objects.
Will only return new mails since the last call.
"""
s = socket.socket()
s.settimeout(5)
s.connect(('localhost', EMAIL_SMTP_PORT))
s.sendall(f'{EMAIL_RETURN_MSGS_COMMAND}\r\n'.encode('ascii'))
buffer = b''
while EMAIL_END not in buffer:
data = s.recv(1024 * 1024)
buffer += data
buffer = buffer.split(b'\n', 1)[1]
buffer = buffer.split(EMAIL_END, 1)[0]
s.sendall(f'{EMAIL_DISCARD_MSGS_COMMAND}\r\n'.encode('ascii'))
messages = [email.message_from_string(m.strip().decode('ascii')) for m in buffer.split(EMAIL_SEPARATOR.encode('ascii'))]
return messages
def _extract_message_body(self, message):
"""
Convenience method to extract the main body from a non-multipart
email.Message object.
"""
charset = message.get_content_charset(failobj='ascii')
return message.get_payload(decode=True).decode(charset, 'backslashreplace') # type: ignore
def _check_text_in_mails(self, messages, expected_texts):
"""
Check a list of email.Message objects for each of a list of
expected texts. I.e. every message should contain every text.
"""
for expected_text in expected_texts:
for message in messages:
message_text = self._extract_message_body(message)
assert expected_text in message_text, f'Missing text {expected_text} in mail:\n{message_text}'
def _check_recipients_in_mails(self, messages, expected_recipients):
"""
Check whether a list of email.Message objects match a list of
expected email recipients, in any order.
Order may very due to unordered data structures being used when
generating some notifications.
"""
assert len(messages) == len(expected_recipients)
original_expected_recipients = set(expected_recipients)
leftover_expected_recipients = original_expected_recipients.copy()
for message in messages:
for recipient in original_expected_recipients:
if message['To'] == recipient:
leftover_expected_recipients.remove(recipient)
assert not leftover_expected_recipients
def teardown_method(self, method):
"""
This teardown method is always called after tests complete, whether
or not they succeed. It is used to kill any leftover IRRd or SMTP
server processes.
"""
print('\n')
for pidfile in self.pidfile1, self.pidfile2, self.pidfile_mailserver:
try:
with open(pidfile) as fh:
pid = int(fh.read())
print(f'Terminating PID {pid} from {pidfile}')
os.kill(pid, signal.SIGTERM)
except (FileNotFoundError, ProcessLookupError, ValueError) as exc:
print(f'Failed to kill: {pidfile}: {exc}')
pass
| 46.915114 | 152 | 0.610989 |
import sys
import time
import unittest
import ujson
import base64
import email
import os
import requests
import signal
import socket
import sqlalchemy as sa
import subprocess
import textwrap
import yaml
from alembic import command, config
from pathlib import Path
from python_graphql_client import GraphqlClient
from irrd.conf import config_init, PASSWORD_HASH_DUMMY_VALUE
from irrd.utils.rpsl_samples import (SAMPLE_MNTNER, SAMPLE_PERSON, SAMPLE_KEY_CERT, SIGNED_PERSON_UPDATE_VALID,
SAMPLE_AS_SET, SAMPLE_AUT_NUM, SAMPLE_DOMAIN, SAMPLE_FILTER_SET, SAMPLE_INET_RTR,
SAMPLE_INET6NUM, SAMPLE_INETNUM, SAMPLE_PEERING_SET, SAMPLE_ROLE, SAMPLE_ROUTE,
SAMPLE_ROUTE_SET, SAMPLE_ROUTE6, SAMPLE_RTR_SET, SAMPLE_AS_BLOCK)
from irrd.utils.whois_client import whois_query, whois_query_irrd
from .constants import (EMAIL_SMTP_PORT, EMAIL_DISCARD_MSGS_COMMAND, EMAIL_RETURN_MSGS_COMMAND, EMAIL_SEPARATOR,
EMAIL_END)
from ..storage import translate_url
IRRD_ROOT_PATH = str(Path(__file__).resolve().parents[2])
sys.path.append(IRRD_ROOT_PATH)
AS_SET_REFERRING_OTHER_SET = """as-set: AS65537:AS-TESTREF
descr: description
members: AS65537:AS-SETTEST, AS65540
mbrs-by-ref: TEST-MNT
tech-c: PERSON-TEST
admin-c: PERSON-TEST
notify: notify@example.com
mnt-by: TEST-MNT
changed: changed@example.com 20190701 # comment
source: TEST
remarks: remark
"""
SAMPLE_MNTNER_CLEAN = SAMPLE_MNTNER.replace('mnt-by: OTHER1-MNT,OTHER2-MNT\n', '')
LARGE_UPDATE = '\n\n'.join([
SAMPLE_AS_BLOCK,
SAMPLE_AS_SET,
SAMPLE_AUT_NUM,
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65538'),
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65539'),
SAMPLE_AUT_NUM.replace('aut-num: as065537', 'aut-num: as65540'),
SAMPLE_DOMAIN,
SAMPLE_FILTER_SET,
SAMPLE_INET_RTR,
SAMPLE_INET6NUM,
SAMPLE_INETNUM,
SAMPLE_KEY_CERT,
SAMPLE_PEERING_SET,
SAMPLE_PERSON.replace('PERSON-TEST', 'DUMY2-TEST'),
SAMPLE_ROLE,
SAMPLE_ROUTE,
SAMPLE_ROUTE_SET,
SAMPLE_ROUTE6,
SAMPLE_RTR_SET,
AS_SET_REFERRING_OTHER_SET,
])
class TestIntegration:
port_http1 = 6080
port_whois1 = 6043
port_http2 = 6081
port_whois2 = 6044
def test_irrd_integration(self, tmpdir):
self.assertCountEqual = unittest.TestCase().assertCountEqual
if 'IRRD_DATABASE_URL' in os.environ:
del os.environ['IRRD_DATABASE_URL']
if 'IRRD_REDIS_URL' in os.environ:
del os.environ['IRRD_REDIS_URL']
os.environ['PYTHONPATH'] = IRRD_ROOT_PATH
os.environ['IRRD_SCHEDULER_TIMER_OVERRIDE'] = '1'
self.tmpdir = tmpdir
self._start_mailserver()
self._start_irrds()
self._submit_update(self.config_path1, SAMPLE_MNTNER + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate FAILED: [mntner] TEST-MNT\n' in mail_text
assert '\nERROR: Object PERSON-TEST referenced in field admin-c not found in database TEST - must reference one of role, person.\n' in mail_text
assert '\nERROR: Object OTHER1-MNT referenced in field mnt-by not found in database TEST - must reference mntner.\n' in mail_text
assert '\nERROR: Object OTHER2-MNT referenced in field mnt-by not found in database TEST - must reference mntner.\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
self._submit_update(self.config_path1,
SAMPLE_MNTNER_CLEAN + '\n\n' + SAMPLE_PERSON + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nCreate succeeded: [person] PERSON-TEST\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
mntner_text = whois_query('127.0.0.1', self.port_whois1, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
uery('127.0.0.1', self.port_whois2, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
self._submit_update(self.config_path1, SAMPLE_KEY_CERT + '\npassword: md5-password')
messages = self._retrieve_mails()
assert len(messages) == 3
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert 'Create succeeded: [key-cert] PGPKEY-80F238C6' in self._extract_message_body(messages[0])
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net'
])
self._check_text_in_mails(messages[1:], [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nCreate succeeded for object below: [key-cert] PGPKEY-80F238C6:\n',
'email footer',
'Generated by IRRd version ',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
self._submit_update(self.config_path1, SIGNED_PERSON_UPDATE_VALID)
messages = self._retrieve_mails()
assert len(messages) == 4
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nModify succeeded: [person] PERSON-TEST\n' in mail_text
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net', 'notify@example.com',
])
self._check_text_in_mails(messages[1:], [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nModify succeeded for object below: [person] PERSON-TEST:\n',
'\n@@ -1,4 +1,4 @@\n',
'\nNew version of this object:\n',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
n_text = whois_query('127.0.0.1', self.port_whois2, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
self._submit_update(self.config_path1, SAMPLE_PERSON + '\npassword: invalid\noverride: invalid\n')
messages = self._retrieve_mails()
assert len(messages) == 2
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nModify FAILED: [person] PERSON-TEST\n' in mail_text
assert '\nERROR: Authorisation for person PERSON-TEST failed: must by authenticated by one of: TEST-MNT\n' in mail_text
mail_text = self._extract_message_body(messages[1])
assert messages[1]['Subject'] == 'Notification of TEST database changes'
assert messages[1]['From'] == 'from@example.com'
assert messages[1]['To'] == 'upd-to@example.net'
assert '\nModify FAILED AUTHORISATION for object below: [person] PERSON-TEST:\n' in mail_text
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
self._submit_update(self.config_path1, SAMPLE_PERSON + 'password: md5-password\ndelete: delete\n')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'FAILED: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nDelete FAILED: [person] PERSON-TEST\n' in mail_text
assert '\nERROR: Object PERSON-TEST to be deleted, but still referenced by mntner TEST-MNT\n' in mail_text
assert '\nERROR: Object PERSON-TEST to be deleted, but still referenced by key-cert PGPKEY-80F238C6\n' in mail_text
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'PERSON-TEST' in person_text
assert 'Test person changed by PGP signed update' in person_text
self._submit_update(self.config_path1,
f'{SAMPLE_PERSON}delete: delete\n\n{SAMPLE_KEY_CERT}delete: delete\n\n' +
f'{SAMPLE_MNTNER_CLEAN}delete: delete\npassword: crypt-password\n')
messages = self._retrieve_mails()
assert len(messages) == 4
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nDelete succeeded: [person] PERSON-TEST\n' in mail_text
assert '\nDelete succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nDelete succeeded: [key-cert] PGPKEY-80F238C6\n' in mail_text
self._check_recipients_in_mails(messages[1:], [
'mnt-nfy@example.net', 'mnt-nfy2@example.net', 'notify@example.net',
])
mnt_nfy_msgs = [msg for msg in messages if msg['To'] in ['mnt-nfy@example.net', 'mnt-nfy2@example.net']]
self._check_text_in_mails(mnt_nfy_msgs, [
'\n> Message-ID: <1325754288.4989.6.camel@hostname>\n',
'\nDelete succeeded for object below: [person] PERSON-TEST:\n',
'\nDelete succeeded for object below: [mntner] TEST-MNT:\n',
'\nDelete succeeded for object below: [key-cert] PGPKEY-80F238C6:\n',
'unįcöde tæst 🌈🦄\n',
'Test person changed by PGP signed update\n',
])
for message in messages[1:]:
assert message['Subject'] == 'Notification of TEST database changes'
assert message['From'] == 'from@example.com'
notify_msg = [msg for msg in messages if msg['To'] == 'notify@example.net'][0]
mail_text = self._extract_message_body(notify_msg)
assert notify_msg['Subject'] == 'Notification of TEST database changes'
assert notify_msg['From'] == 'from@example.com'
assert '\n> Message-ID: <1325754288.4989.6.camel@hostname>\n' in mail_text
assert '\nDelete succeeded for object below: [person] PERSON-TEST:\n' not in mail_text
assert '\nDelete succeeded for object below: [mntner] TEST-MNT:\n' in mail_text
assert '\nDelete succeeded for object below: [key-cert] PGPKEY-80F238C6:\n' not in mail_text
person_text = whois_query('127.0.0.1', self.port_whois1, 'PERSON-TEST')
assert 'No entries found for the selected source(s)' in person_text
assert 'PERSON-TEST' not in person_text
person_text = whois_query('127.0.0.1', self.port_whois2, 'PERSON-TEST')
assert 'No entries found for the selected source(s)' in person_text
assert 'PERSON-TEST' not in person_text
self._submit_update(self.config_path1,
SAMPLE_MNTNER_CLEAN + '\n\n' + SAMPLE_PERSON + '\n\noverride: override-password')
messages = self._retrieve_mails()
assert len(messages) == 1
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nCreate succeeded: [mntner] TEST-MNT\n' in mail_text
assert '\nCreate succeeded: [person] PERSON-TEST\n' in mail_text
assert 'email footer' in mail_text
assert 'Generated by IRRd version ' in mail_text
self._submit_update(self.config_path1, LARGE_UPDATE + '\n\npassword: md5-password')
messages = self._retrieve_mails()
assert len(messages) == 3
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'SUCCESS: my subject'
assert messages[0]['From'] == 'from@example.com'
assert messages[0]['To'] == 'Sasha <sasha@example.com>'
assert '\nINFO: AS number as065537 was reformatted as AS65537\n' in mail_text
assert '\nCreate succeeded: [filter-set] FLTR-SETTEST\n' in mail_text
assert '\nINFO: Address range 192.0.2.0 - 192.0.02.255 was reformatted as 192.0.2.0 - 192.0.2.255\n' in mail_text
assert '\nINFO: Address prefix 192.0.02.0/24 was reformatted as 192.0.2.0/24\n' in mail_text
assert '\nINFO: Route set member 2001:0dB8::/48 was reformatted as 2001:db8::/48\n' in mail_text
mntner_text = whois_query('127.0.0.1', self.port_whois1, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
time.sleep(3)
mntner_text = whois_query('127.0.0.1', self.port_whois2, 'TEST-MNT')
assert 'TEST-MNT' in mntner_text
assert PASSWORD_HASH_DUMMY_VALUE in mntner_text
assert 'unįcöde tæst 🌈🦄' in mntner_text
assert 'PERSON-TEST' in mntner_text
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!gAS65537')
assert query_result == '192.0.2.0/24'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!gAS65547')
assert query_result == '192.0.2.0/32'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!6AS65537')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!iRS-TEST')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48', 'RS-OTHER-SET'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!aAS65537:AS-SETTEST')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!aAS65537:AS-TESTREF')
assert set(query_result.split(' ')) == {'192.0.2.0/24', '2001:db8::/48'}
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!a4AS65537:AS-TESTREF')
assert query_result == '192.0.2.0/24'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!a6AS65537:AS-TESTREF')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/25,l')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,L')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/23,M')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,M')
assert 'RPKI' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!r192.0.2.0/24,o')
assert query_result == 'AS65537'
query_result = whois_query('127.0.0.1', self.port_whois1, '-x 192.0.02.0/24')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-l 192.0.02.0/25')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-L 192.0.02.0/24')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-M 192.0.02.0/23')
assert 'example route' in query_result
query_result = whois_query('127.0.0.1', self.port_whois1, '-i member-of RS-test')
assert 'example route' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert not query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!6AS65537')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!iRS-TEST')
assert query_result == '2001:db8::/48 RS-OTHER-SET'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!aAS65537:AS-SETTEST')
assert query_result == '2001:db8::/48'
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!aAS65537:AS-TESTREF')
assert query_result == '2001:db8::/48'
query_result = whois_query('127.0.0.1', self.port_whois2, '-x 192.0.02.0/24')
assert 'example route' not in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!r192.0.2.0/24,L')
assert 'RPKI' in query_result
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:1-LAST')
assert 'route:192.0.2.0/24' not in query_result.replace(' ', '')
for port in self.port_whois1, self.port_whois2:
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-SETTEST')
assert set(query_result.split(' ')) == {'AS65537', 'AS65538', 'AS65539', 'AS-OTHERSET'}
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-TESTREF')
assert set(query_result.split(' ')) == {'AS65537:AS-SETTEST', 'AS65540'}
query_result = whois_query_irrd('127.0.0.1', port, '!iAS65537:AS-TESTREF,1')
assert set(query_result.split(' ')) == {'AS65537', 'AS65538', 'AS65539', 'AS65540'}
query_result = whois_query_irrd('127.0.0.1', port, '!maut-num,as65537')
assert 'AS65537' in query_result
assert 'TEST-AS' in query_result
query_result = whois_query_irrd('127.0.0.1', port, '!oTEST-MNT')
assert 'AS65537' in query_result
assert 'TEST-AS' in query_result
assert 'AS65536 - AS65538' in query_result
assert 'rtrs-settest' in query_result
query_result = whois_query('127.0.0.1', port, '-T route6 -i member-of RS-TEST')
assert 'No entries found for the selected source(s)' in query_result
query_result = whois_query('127.0.0.1', port, 'dashcare')
assert 'ROLE-TEST' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois1, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 29
assert result['TEST']['serial_last_export'] == 29
assert result['TEST']['serial_newest_mirror'] is None
uery_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 26
assert result['TEST']['serial_last_export'] == 26
assert result['TEST']['serial_newest_mirror'] == 29
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '198.51.100.0/24', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
time.sleep(3)
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert query_result == '192.0.2.0/24'
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:27-27')
assert 'ADD 27' in query_result
assert '192.0.2.0/24' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 27
assert result['TEST']['serial_last_export'] == 27
assert result['TEST']['serial_newest_mirror'] == 29
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
time.sleep(3)
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!gAS65537')
assert not query_result
query_result = whois_query('127.0.0.1', self.port_whois2, '-g TEST:3:28-28')
assert 'DEL 28' in query_result
assert '192.0.2.0/24' in query_result
query_result = whois_query_irrd('127.0.0.1', self.port_whois2, '!J-*')
result = ujson.loads(query_result)
assert result['TEST']['serial_newest_journal'] == 28
assert result['TEST']['serial_last_export'] == 28
assert result['TEST']['serial_newest_mirror'] == 29
with open(self.roa_source1, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
time.sleep(2)
messages = self._retrieve_mails()
assert len(messages) == 3
mail_text = self._extract_message_body(messages[0])
assert messages[0]['Subject'] == 'route(6) objects in TEST marked RPKI invalid'
expected_recipients = {'email@example.com', 'mnt-nfy@example.net', 'mnt-nfy2@example.net'}
assert {m['To'] for m in messages} == expected_recipients
assert '192.0.2.0/24' in mail_text
self.check_http()
self.check_graphql()
def check_http(self):
status1 = requests.get(f'http://127.0.0.1:{self.port_http1}/v1/status/')
status2 = requests.get(f'http://127.0.0.1:{self.port_http2}/v1/status/')
assert status1.status_code == 200
assert status2.status_code == 200
assert 'IRRD version' in status1.text
assert 'IRRD version' in status2.text
assert 'TEST' in status1.text
assert 'TEST' in status2.text
assert 'RPKI' in status1.text
assert 'RPKI' in status2.text
assert 'Authoritative: Yes' in status1.text
assert 'Authoritative: Yes' not in status2.text
def check_graphql(self):
client = GraphqlClient(endpoint=f"http://127.0.0.1:{self.port_http1}/graphql/")
query = """query {
rpslObjects(rpslPk: "PERSON-TEST") {
rpslPk
... on RPSLContact {
mntBy
}
mntByObjs {
rpslPk
adminCObjs {
... on RPSLContact {
rpslPk
}
}
adminCObjs {
... on RPSLContact {
rpslPk
}
}
}
journal {
serialNrtm
operation
origin
}
}
}
"""
result = client.execute(query=query)
assert result['data']['rpslObjects'] == [{
'rpslPk': 'PERSON-TEST',
'mntBy': ['TEST-MNT'],
'mntByObjs': [{'rpslPk': 'TEST-MNT', 'adminCObjs': [{'rpslPk': 'PERSON-TEST'}]}],
'journal': [
{'serialNrtm': 2, 'operation': 'add_or_update', 'origin': 'auth_change'},
{'serialNrtm': 4, 'operation': 'add_or_update', 'origin': 'auth_change'},
{'serialNrtm': 5, 'operation': 'delete', 'origin': 'auth_change'},
{'serialNrtm': 9, 'operation': 'add_or_update', 'origin': 'auth_change'}
]
}]
query = """query {
rpslObjects(ipLessSpecificOneLevel: "192.0.2.1" rpkiStatus:[invalid,valid,not_found]) {
rpslPk
... on RPSLRoute {
memberOfObjs {
rpslPk
}
}
}
}
"""
result = client.execute(query=query)
self.assertCountEqual(result['data']['rpslObjects'], [
{'rpslPk': '192.0.2.0/24AS65537', 'memberOfObjs': [{'rpslPk': 'RS-TEST'}]},
{'rpslPk': '192.0.2.0 - 192.0.2.255'}
])
query = """query {
rpslObjects(rpslPk: ["AS65537:AS-TESTREF", "DOESNOTEXIST"]) {
rpslPk
... on RPSLAsSet {
membersObjs {
rpslPk
}
mbrsByRefObjs {
rpslPk
}
}
}
}
"""
result = client.execute(query=query)
assert result['data']['rpslObjects'] == [{
'rpslPk': 'AS65537:AS-TESTREF',
'membersObjs': [{'rpslPk': 'AS65537:AS-SETTEST'}],
'mbrsByRefObjs': [{'rpslPk': 'TEST-MNT'}],
}]
query = """query {
databaseStatus {
source
authoritative
serialOldestJournal
serialNewestJournal
serialNewestMirror
}
}
"""
result = client.execute(query=query)
self.assertCountEqual(result['data']['databaseStatus'], [
{
'source': 'TEST',
'authoritative': True,
'serialOldestJournal': 1,
'serialNewestJournal': 30,
'serialNewestMirror': None
}, {
'source': 'RPKI',
'authoritative': False,
'serialOldestJournal': None,
'serialNewestJournal': None,
'serialNewestMirror': None
}
])
query = """query {
asnPrefixes(asns: [65537]) {
asn
prefixes
}
}
"""
result = client.execute(query=query)
asnPrefixes = result['data']['asnPrefixes']
assert len(asnPrefixes) == 1
assert asnPrefixes[0]['asn'] == 65537
assert set(asnPrefixes[0]['prefixes']) == {'2001:db8::/48'}
query = """query {
asSetPrefixes(setNames: ["AS65537:AS-TESTREF"]) {
rpslPk
prefixes
}
}
"""
result = client.execute(query=query)
asSetPrefixes = result['data']['asSetPrefixes']
assert len(asSetPrefixes) == 1
assert asSetPrefixes[0]['rpslPk'] == 'AS65537:AS-TESTREF'
assert set(asSetPrefixes[0]['prefixes']) == {'2001:db8::/48'}
query = """query {
recursiveSetMembers(setNames: ["AS65537:AS-TESTREF"]) {
rpslPk
rootSource
members
}
}
"""
result = client.execute(query=query)
recursiveSetMembers = result['data']['recursiveSetMembers']
assert len(recursiveSetMembers) == 1
assert recursiveSetMembers[0]['rpslPk'] == 'AS65537:AS-TESTREF'
assert recursiveSetMembers[0]['rootSource'] == 'TEST'
assert set(recursiveSetMembers[0]['members']) == {
'AS65537', 'AS65538', 'AS65539', 'AS65540'
}
def _start_mailserver(self):
self.pidfile_mailserver = str(self.tmpdir) + '/mailserver.pid'
self.logfile_mailserver = str(self.tmpdir) + '/mailserver.log'
mailserver_path = IRRD_ROOT_PATH + '/irrd/integration_tests/mailserver.tac'
assert not subprocess.call(['twistd', f'--pidfile={self.pidfile_mailserver}',
f'--logfile={self.logfile_mailserver}', '-y', mailserver_path])
def _start_irrds(self):
self.database_url1 = os.environ['IRRD_DATABASE_URL_INTEGRATION_1']
self.database_url2 = os.environ['IRRD_DATABASE_URL_INTEGRATION_2']
self.redis_url1 = os.environ['IRRD_REDIS_URL_INTEGRATION_1']
self.redis_url2 = os.environ['IRRD_REDIS_URL_INTEGRATION_2']
self.config_path1 = str(self.tmpdir) + '/irrd1_config.yaml'
self.config_path2 = str(self.tmpdir) + '/irrd2_config.yaml'
self.logfile1 = str(self.tmpdir) + '/irrd1.log'
self.logfile2 = str(self.tmpdir) + '/irrd2.log'
self.roa_source1 = str(self.tmpdir) + '/roa1.json'
self.roa_source2 = str(self.tmpdir) + '/roa2.json'
self.export_dir1 = str(self.tmpdir) + '/export1/'
self.export_dir2 = str(self.tmpdir) + '/export2/'
self.piddir1 = str(self.tmpdir) + '/piddir1/'
self.piddir2 = str(self.tmpdir) + '/piddir2/'
self.pidfile1 = self.piddir1 + 'irrd.pid'
self.pidfile2 = self.piddir2 + 'irrd.pid'
os.mkdir(self.export_dir1)
os.mkdir(self.export_dir2)
os.mkdir(self.piddir1)
os.mkdir(self.piddir2)
print(textwrap.dedent(f"""
Preparing to start IRRd for integration test.
IRRd #1 running on HTTP port {self.port_http1}, whois port {self.port_whois1}
Config in: {self.config_path1}
Database URL: {self.database_url1}
PID file: {self.pidfile1}
Logfile: {self.logfile1}
IRRd #2 running on HTTP port {self.port_http2}, whois port {self.port_whois2}
Config in: {self.config_path2}
Database URL: {self.database_url2}
PID file: {self.pidfile2}
Logfile: {self.logfile2}
"""))
with open(self.roa_source1, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '192.0.2.0/32', 'asn': 'AS65547', 'maxLength': '32', 'ta': 'TA'}]}, roa_file)
with open(self.roa_source2, 'w') as roa_file:
ujson.dump({'roas': [{'prefix': '128/1', 'asn': 'AS0', 'maxLength': '1', 'ta': 'TA'}]}, roa_file)
base_config = {
'irrd': {
'access_lists': {
'localhost': ['::/32', '127.0.0.1']
},
'server': {
'http': {
'status_access_list': 'localhost',
'interface': '::1',
'port': 8080
},
'whois': {
'interface': '::1',
'max_connections': 10,
'port': 8043
},
},
'rpki':{
'roa_import_timer': 1,
'notify_invalid_enabled': True,
},
'auth': {
'gnupg_keyring': None,
'override_password': '$1$J6KycItM$MbPaBU6iFSGFV299Rk7Di0',
},
'email': {
'footer': 'email footer',
'from': 'from@example.com',
'smtp': f'localhost:{EMAIL_SMTP_PORT}',
},
'log': {
'logfile_path': None,
'level': 'DEBUG',
},
'sources': {}
}
}
config1 = base_config.copy()
config1['irrd']['piddir'] = self.piddir1
config1['irrd']['database_url'] = self.database_url1
config1['irrd']['redis_url'] = self.redis_url1
config1['irrd']['server']['http']['interface'] = '127.0.0.1' config1['irrd']['server']['http']['port'] = self.port_http1
config1['irrd']['server']['whois']['interface'] = '127.0.0.1'
config1['irrd']['server']['whois']['port'] = self.port_whois1
config1['irrd']['auth']['gnupg_keyring'] = str(self.tmpdir) + '/gnupg1'
config1['irrd']['log']['logfile_path'] = self.logfile1
config1['irrd']['rpki']['roa_source'] = 'file://' + self.roa_source1
config1['irrd']['sources']['TEST'] = {
'authoritative': True,
'keep_journal': True,
'export_destination': self.export_dir1,
'export_timer': '1',
'nrtm_access_list': 'localhost',
}
with open(self.config_path1, 'w') as yaml_file:
yaml.safe_dump(config1, yaml_file)
config2 = base_config.copy()
config2['irrd']['piddir'] = self.piddir2
config2['irrd']['database_url'] = self.database_url2
config2['irrd']['redis_url'] = self.redis_url2
config2['irrd']['server']['http']['port'] = self.port_http2
config2['irrd']['server']['whois']['port'] = self.port_whois2
config2['irrd']['auth']['gnupg_keyring'] = str(self.tmpdir) + '/gnupg2'
config2['irrd']['log']['logfile_path'] = self.logfile2
config2['irrd']['rpki']['roa_source'] = 'file://' + self.roa_source2
config2['irrd']['sources']['TEST'] = {
'keep_journal': True,
'import_serial_source': f'file://{self.export_dir1}/TEST.CURRENTSERIAL',
'import_source': f'file://{self.export_dir1}/test.db.gz',
'export_destination': self.export_dir2,
'import_timer': '1',
'export_timer': '1',
'nrtm_host': '127.0.0.1',
'nrtm_port': str(self.port_whois1),
'nrtm_access_list': 'localhost',
}
with open(self.config_path2, 'w') as yaml_file:
yaml.safe_dump(config2, yaml_file)
self._prepare_database()
assert not subprocess.call(['irrd/daemon/main.py', f'--config={self.config_path1}'])
assert not subprocess.call(['irrd/daemon/main.py', f'--config={self.config_path2}'])
def _prepare_database(self):
config_init(self.config_path1)
alembic_cfg = config.Config()
alembic_cfg.set_main_option('script_location', f'{IRRD_ROOT_PATH}/irrd/storage/alembic')
command.upgrade(alembic_cfg, 'head')
connection = sa.create_engine(translate_url(self.database_url1)).connect()
connection.execute('DELETE FROM rpsl_objects')
connection.execute('DELETE FROM rpsl_database_journal')
connection.execute('DELETE FROM database_status')
connection.execute('DELETE FROM roa_object')
config_init(self.config_path2)
alembic_cfg = config.Config()
alembic_cfg.set_main_option('script_location', f'{IRRD_ROOT_PATH}/irrd/storage/alembic')
command.upgrade(alembic_cfg, 'head')
connection = sa.create_engine(translate_url(self.database_url2)).connect()
connection.execute('DELETE FROM rpsl_objects')
connection.execute('DELETE FROM rpsl_database_journal')
connection.execute('DELETE FROM database_status')
connection.execute('DELETE FROM roa_object')
def _submit_update(self, config_path, request):
email = textwrap.dedent("""
From submitter@example.com@localhost Thu Jan 5 10:04:48 2018
Received: from [127.0.0.1] (localhost.localdomain [127.0.0.1])
by hostname (Postfix) with ESMTPS id 740AD310597
for <irrd@example.com>; Thu, 5 Jan 2018 10:04:48 +0100 (CET)
Message-ID: <1325754288.4989.6.camel@hostname>
Subject: my subject
Subject: not my subject
From: Sasha <sasha@example.com>
To: sasha@localhost
Date: Thu, 05 Jan 2018 10:04:48 +0100
X-Mailer: Python 3.7
Content-Transfer-Encoding: base64
Content-Type: text/plain; charset=utf-8
Mime-Version: 1.0
""").lstrip().encode('utf-8')
email += base64.b64encode(request.encode('utf-8'))
script = IRRD_ROOT_PATH + '/irrd/scripts/submit_email.py'
p = subprocess.Popen([script, f'--config={config_path}'],
stdin=subprocess.PIPE)
p.communicate(email)
p.wait()
def _retrieve_mails(self):
s = socket.socket()
s.settimeout(5)
s.connect(('localhost', EMAIL_SMTP_PORT))
s.sendall(f'{EMAIL_RETURN_MSGS_COMMAND}\r\n'.encode('ascii'))
buffer = b''
while EMAIL_END not in buffer:
data = s.recv(1024 * 1024)
buffer += data
buffer = buffer.split(b'\n', 1)[1]
buffer = buffer.split(EMAIL_END, 1)[0]
s.sendall(f'{EMAIL_DISCARD_MSGS_COMMAND}\r\n'.encode('ascii'))
messages = [email.message_from_string(m.strip().decode('ascii')) for m in buffer.split(EMAIL_SEPARATOR.encode('ascii'))]
return messages
def _extract_message_body(self, message):
charset = message.get_content_charset(failobj='ascii')
return message.get_payload(decode=True).decode(charset, 'backslashreplace')
def _check_text_in_mails(self, messages, expected_texts):
for expected_text in expected_texts:
for message in messages:
message_text = self._extract_message_body(message)
assert expected_text in message_text, f'Missing text {expected_text} in mail:\n{message_text}'
def _check_recipients_in_mails(self, messages, expected_recipients):
assert len(messages) == len(expected_recipients)
original_expected_recipients = set(expected_recipients)
leftover_expected_recipients = original_expected_recipients.copy()
for message in messages:
for recipient in original_expected_recipients:
if message['To'] == recipient:
leftover_expected_recipients.remove(recipient)
assert not leftover_expected_recipients
def teardown_method(self, method):
print('\n')
for pidfile in self.pidfile1, self.pidfile2, self.pidfile_mailserver:
try:
with open(pidfile) as fh:
pid = int(fh.read())
print(f'Terminating PID {pid} from {pidfile}')
os.kill(pid, signal.SIGTERM)
except (FileNotFoundError, ProcessLookupError, ValueError) as exc:
print(f'Failed to kill: {pidfile}: {exc}')
pass
| true | true |
1c347e036313375a8dd918e7c672b537554f1142 | 625 | py | Python | cultr/database/__init__.py | TrixiS/cultr | fe059fdf7838ad250bcdad7db5a88e3c3e789d9c | [
"MIT"
] | null | null | null | cultr/database/__init__.py | TrixiS/cultr | fe059fdf7838ad250bcdad7db5a88e3c3e789d9c | [
"MIT"
] | null | null | null | cultr/database/__init__.py | TrixiS/cultr | fe059fdf7838ad250bcdad7db5a88e3c3e789d9c | [
"MIT"
] | null | null | null | from typing import Generator
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from . import db_models
from ..config import settings
engine = create_async_engine(settings.DATABASE_URI, echo=True)
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession)
async def init_database():
async with engine.begin() as conn:
await conn.run_sync(db_models.Base.metadata.create_all)
async def get_session() -> Generator:
try:
session = async_session()
yield session
finally:
await session.close()
| 25 | 68 | 0.7504 | from typing import Generator
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from . import db_models
from ..config import settings
engine = create_async_engine(settings.DATABASE_URI, echo=True)
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession)
async def init_database():
async with engine.begin() as conn:
await conn.run_sync(db_models.Base.metadata.create_all)
async def get_session() -> Generator:
try:
session = async_session()
yield session
finally:
await session.close()
| true | true |
1c347ea6db0106869ce0b9be812e2121fd128eed | 6,417 | py | Python | venv/lib/python3.6/site-packages/kubernetes/client/models/v1beta2_stateful_set_list.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/client/models/v1beta2_stateful_set_list.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/client/models/v1beta2_stateful_set_list.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta2StatefulSetList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta2StatefulSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""V1beta2StatefulSetList - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta2StatefulSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta2StatefulSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta2StatefulSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta2StatefulSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta2StatefulSetList. # noqa: E501
:return: The items of this V1beta2StatefulSetList. # noqa: E501
:rtype: list[V1beta2StatefulSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta2StatefulSetList.
:param items: The items of this V1beta2StatefulSetList. # noqa: E501
:type: list[V1beta2StatefulSet]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta2StatefulSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta2StatefulSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta2StatefulSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta2StatefulSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta2StatefulSetList. # noqa: E501
:return: The metadata of this V1beta2StatefulSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta2StatefulSetList.
:param metadata: The metadata of this V1beta2StatefulSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2StatefulSetList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.739796 | 295 | 0.620228 |
import pprint
import re
import six
class V1beta2StatefulSetList(object):
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta2StatefulSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta2StatefulSetList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c347f5388c0292a8711ba961b56081b3d0bf22a | 15,766 | py | Python | view/ui/plaster.py | cmh1027/everytimeUtility | 3d274113a6fd212a3f5d7ee957411ca11a93e960 | [
"MIT"
] | null | null | null | view/ui/plaster.py | cmh1027/everytimeUtility | 3d274113a6fd212a3f5d7ee957411ca11a93e960 | [
"MIT"
] | 4 | 2018-07-11T04:57:54.000Z | 2020-10-12T14:23:54.000Z | view/ui/plaster.py | cmh1027/everytimeUtility | 3d274113a6fd212a3f5d7ee957411ca11a93e960 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'plaster.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(231, 223)
self.verticalLayoutWidget = QtWidgets.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 231, 221))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(5, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(11, -1, -1, -1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.searchednicknameLabel = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.searchednicknameLabel.sizePolicy().hasHeightForWidth())
self.searchednicknameLabel.setSizePolicy(sizePolicy)
self.searchednicknameLabel.setMinimumSize(QtCore.QSize(0, 28))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.searchednicknameLabel.setFont(font)
self.searchednicknameLabel.setText("")
self.searchednicknameLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.searchednicknameLabel.setObjectName("searchednicknameLabel")
self.verticalLayout_2.addWidget(self.searchednicknameLabel)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(10, 2, 15, 2)
self.horizontalLayout.setSpacing(10)
self.horizontalLayout.setObjectName("horizontalLayout")
self.selectboardButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.selectboardButton.sizePolicy().hasHeightForWidth())
self.selectboardButton.setSizePolicy(sizePolicy)
self.selectboardButton.setMaximumSize(QtCore.QSize(80, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.selectboardButton.setFont(font)
self.selectboardButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.selectboardButton.setFlat(False)
self.selectboardButton.setObjectName("selectboardButton")
self.horizontalLayout.addWidget(self.selectboardButton)
self.plasterWordButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plasterWordButton.sizePolicy().hasHeightForWidth())
self.plasterWordButton.setSizePolicy(sizePolicy)
self.plasterWordButton.setMinimumSize(QtCore.QSize(0, 0))
self.plasterWordButton.setMaximumSize(QtCore.QSize(60, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.plasterWordButton.setFont(font)
self.plasterWordButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.plasterWordButton.setFlat(False)
self.plasterWordButton.setObjectName("plasterWordButton")
self.horizontalLayout.addWidget(self.plasterWordButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(15, -1, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.articleplasterCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.articleplasterCheckBox.sizePolicy().hasHeightForWidth())
self.articleplasterCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.articleplasterCheckBox.setFont(font)
self.articleplasterCheckBox.setChecked(True)
self.articleplasterCheckBox.setObjectName("articleplasterCheckBox")
self.horizontalLayout_3.addWidget(self.articleplasterCheckBox)
self.commentplasterCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.commentplasterCheckBox.sizePolicy().hasHeightForWidth())
self.commentplasterCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.commentplasterCheckBox.setFont(font)
self.commentplasterCheckBox.setChecked(True)
self.commentplasterCheckBox.setObjectName("commentplasterCheckBox")
self.horizontalLayout_3.addWidget(self.commentplasterCheckBox)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(15, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.promptremoveCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.promptremoveCheckBox.sizePolicy().hasHeightForWidth())
self.promptremoveCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.promptremoveCheckBox.setFont(font)
self.promptremoveCheckBox.setChecked(True)
self.promptremoveCheckBox.setObjectName("promptremoveCheckBox")
self.horizontalLayout_6.addWidget(self.promptremoveCheckBox)
self.isanonymFlag = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.isanonymFlag.sizePolicy().hasHeightForWidth())
self.isanonymFlag.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.isanonymFlag.setFont(font)
self.isanonymFlag.setChecked(True)
self.isanonymFlag.setObjectName("isanonymFlag")
self.horizontalLayout_6.addWidget(self.isanonymFlag)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(5, -1, 0, -1)
self.horizontalLayout_4.setSpacing(12)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.retryLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.retryLineEdit.setMinimumSize(QtCore.QSize(35, 0))
self.retryLineEdit.setMaximumSize(QtCore.QSize(35, 16777215))
self.retryLineEdit.setMaxLength(2)
self.retryLineEdit.setObjectName("retryLineEdit")
self.horizontalLayout_4.addWidget(self.retryLineEdit)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(5, -1, 0, -1)
self.horizontalLayout_5.setSpacing(12)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setObjectName("label_5")
self.horizontalLayout_5.addWidget(self.label_5)
self.iterationLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.iterationLineEdit.setMinimumSize(QtCore.QSize(35, 0))
self.iterationLineEdit.setMaximumSize(QtCore.QSize(35, 16777215))
self.iterationLineEdit.setMaxLength(14)
self.iterationLineEdit.setObjectName("iterationLineEdit")
self.horizontalLayout_5.addWidget(self.iterationLineEdit)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setObjectName("label_6")
self.horizontalLayout_5.addWidget(self.label_6)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(4, -1, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.articleRadioButton = QtWidgets.QRadioButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.articleRadioButton.sizePolicy().hasHeightForWidth())
self.articleRadioButton.setSizePolicy(sizePolicy)
self.articleRadioButton.setChecked(True)
self.articleRadioButton.setObjectName("articleRadioButton")
self.cycleGroup = QtWidgets.QButtonGroup(Form)
self.cycleGroup.setObjectName("cycleGroup")
self.cycleGroup.addButton(self.articleRadioButton)
self.horizontalLayout_7.addWidget(self.articleRadioButton)
self.stringRadioButton = QtWidgets.QRadioButton(self.verticalLayoutWidget)
self.stringRadioButton.setObjectName("stringRadioButton")
self.cycleGroup.addButton(self.stringRadioButton)
self.horizontalLayout_7.addWidget(self.stringRadioButton)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.startplatsterButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startplatsterButton.sizePolicy().hasHeightForWidth())
self.startplatsterButton.setSizePolicy(sizePolicy)
self.startplatsterButton.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.startplatsterButton.setFont(font)
self.startplatsterButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.startplatsterButton.setFlat(False)
self.startplatsterButton.setObjectName("startplatsterButton")
self.horizontalLayout_2.addWidget(self.startplatsterButton)
self.cancelplasterButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cancelplasterButton.sizePolicy().hasHeightForWidth())
self.cancelplasterButton.setSizePolicy(sizePolicy)
self.cancelplasterButton.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.cancelplasterButton.setFont(font)
self.cancelplasterButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.cancelplasterButton.setFlat(False)
self.cancelplasterButton.setObjectName("cancelplasterButton")
self.horizontalLayout_2.addWidget(self.cancelplasterButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.selectboardButton.setText(_translate("Form", "게시판 선택"))
self.plasterWordButton.setText(_translate("Form", "문자열"))
self.articleplasterCheckBox.setText(_translate("Form", "게시글"))
self.commentplasterCheckBox.setText(_translate("Form", "댓글"))
self.promptremoveCheckBox.setText(_translate("Form", "즉시 삭제"))
self.isanonymFlag.setText(_translate("Form", "익명"))
self.label_3.setText(_translate("Form", "실패시 재시도 횟수"))
self.retryLineEdit.setText(_translate("Form", "1"))
self.label_4.setText(_translate("Form", "번"))
self.label_5.setText(_translate("Form", "반복 횟수"))
self.iterationLineEdit.setText(_translate("Form", "4"))
self.label_6.setText(_translate("Form", "번"))
self.articleRadioButton.setText(_translate("Form", "글 기준"))
self.stringRadioButton.setText(_translate("Form", "문자열 기준"))
self.startplatsterButton.setText(_translate("Form", "Go!"))
self.cancelplasterButton.setText(_translate("Form", "중단"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 56.307143 | 114 | 0.736141 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(231, 223)
self.verticalLayoutWidget = QtWidgets.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 231, 221))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(5, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(11, -1, -1, -1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.searchednicknameLabel = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.searchednicknameLabel.sizePolicy().hasHeightForWidth())
self.searchednicknameLabel.setSizePolicy(sizePolicy)
self.searchednicknameLabel.setMinimumSize(QtCore.QSize(0, 28))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.searchednicknameLabel.setFont(font)
self.searchednicknameLabel.setText("")
self.searchednicknameLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.searchednicknameLabel.setObjectName("searchednicknameLabel")
self.verticalLayout_2.addWidget(self.searchednicknameLabel)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(10, 2, 15, 2)
self.horizontalLayout.setSpacing(10)
self.horizontalLayout.setObjectName("horizontalLayout")
self.selectboardButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.selectboardButton.sizePolicy().hasHeightForWidth())
self.selectboardButton.setSizePolicy(sizePolicy)
self.selectboardButton.setMaximumSize(QtCore.QSize(80, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.selectboardButton.setFont(font)
self.selectboardButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.selectboardButton.setFlat(False)
self.selectboardButton.setObjectName("selectboardButton")
self.horizontalLayout.addWidget(self.selectboardButton)
self.plasterWordButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plasterWordButton.sizePolicy().hasHeightForWidth())
self.plasterWordButton.setSizePolicy(sizePolicy)
self.plasterWordButton.setMinimumSize(QtCore.QSize(0, 0))
self.plasterWordButton.setMaximumSize(QtCore.QSize(60, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.plasterWordButton.setFont(font)
self.plasterWordButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.plasterWordButton.setFlat(False)
self.plasterWordButton.setObjectName("plasterWordButton")
self.horizontalLayout.addWidget(self.plasterWordButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(15, -1, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.articleplasterCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.articleplasterCheckBox.sizePolicy().hasHeightForWidth())
self.articleplasterCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.articleplasterCheckBox.setFont(font)
self.articleplasterCheckBox.setChecked(True)
self.articleplasterCheckBox.setObjectName("articleplasterCheckBox")
self.horizontalLayout_3.addWidget(self.articleplasterCheckBox)
self.commentplasterCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.commentplasterCheckBox.sizePolicy().hasHeightForWidth())
self.commentplasterCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.commentplasterCheckBox.setFont(font)
self.commentplasterCheckBox.setChecked(True)
self.commentplasterCheckBox.setObjectName("commentplasterCheckBox")
self.horizontalLayout_3.addWidget(self.commentplasterCheckBox)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(15, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.promptremoveCheckBox = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.promptremoveCheckBox.sizePolicy().hasHeightForWidth())
self.promptremoveCheckBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.promptremoveCheckBox.setFont(font)
self.promptremoveCheckBox.setChecked(True)
self.promptremoveCheckBox.setObjectName("promptremoveCheckBox")
self.horizontalLayout_6.addWidget(self.promptremoveCheckBox)
self.isanonymFlag = QtWidgets.QCheckBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.isanonymFlag.sizePolicy().hasHeightForWidth())
self.isanonymFlag.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("굴림")
self.isanonymFlag.setFont(font)
self.isanonymFlag.setChecked(True)
self.isanonymFlag.setObjectName("isanonymFlag")
self.horizontalLayout_6.addWidget(self.isanonymFlag)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(5, -1, 0, -1)
self.horizontalLayout_4.setSpacing(12)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.retryLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.retryLineEdit.setMinimumSize(QtCore.QSize(35, 0))
self.retryLineEdit.setMaximumSize(QtCore.QSize(35, 16777215))
self.retryLineEdit.setMaxLength(2)
self.retryLineEdit.setObjectName("retryLineEdit")
self.horizontalLayout_4.addWidget(self.retryLineEdit)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(5, -1, 0, -1)
self.horizontalLayout_5.setSpacing(12)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setObjectName("label_5")
self.horizontalLayout_5.addWidget(self.label_5)
self.iterationLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.iterationLineEdit.setMinimumSize(QtCore.QSize(35, 0))
self.iterationLineEdit.setMaximumSize(QtCore.QSize(35, 16777215))
self.iterationLineEdit.setMaxLength(14)
self.iterationLineEdit.setObjectName("iterationLineEdit")
self.horizontalLayout_5.addWidget(self.iterationLineEdit)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setObjectName("label_6")
self.horizontalLayout_5.addWidget(self.label_6)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(4, -1, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.articleRadioButton = QtWidgets.QRadioButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.articleRadioButton.sizePolicy().hasHeightForWidth())
self.articleRadioButton.setSizePolicy(sizePolicy)
self.articleRadioButton.setChecked(True)
self.articleRadioButton.setObjectName("articleRadioButton")
self.cycleGroup = QtWidgets.QButtonGroup(Form)
self.cycleGroup.setObjectName("cycleGroup")
self.cycleGroup.addButton(self.articleRadioButton)
self.horizontalLayout_7.addWidget(self.articleRadioButton)
self.stringRadioButton = QtWidgets.QRadioButton(self.verticalLayoutWidget)
self.stringRadioButton.setObjectName("stringRadioButton")
self.cycleGroup.addButton(self.stringRadioButton)
self.horizontalLayout_7.addWidget(self.stringRadioButton)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.startplatsterButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startplatsterButton.sizePolicy().hasHeightForWidth())
self.startplatsterButton.setSizePolicy(sizePolicy)
self.startplatsterButton.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.startplatsterButton.setFont(font)
self.startplatsterButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.startplatsterButton.setFlat(False)
self.startplatsterButton.setObjectName("startplatsterButton")
self.horizontalLayout_2.addWidget(self.startplatsterButton)
self.cancelplasterButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cancelplasterButton.sizePolicy().hasHeightForWidth())
self.cancelplasterButton.setSizePolicy(sizePolicy)
self.cancelplasterButton.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setFamily("맑은 고딕")
font.setBold(True)
font.setWeight(75)
self.cancelplasterButton.setFont(font)
self.cancelplasterButton.setStyleSheet("background-color: rgb(200, 200, 200)")
self.cancelplasterButton.setFlat(False)
self.cancelplasterButton.setObjectName("cancelplasterButton")
self.horizontalLayout_2.addWidget(self.cancelplasterButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.selectboardButton.setText(_translate("Form", "게시판 선택"))
self.plasterWordButton.setText(_translate("Form", "문자열"))
self.articleplasterCheckBox.setText(_translate("Form", "게시글"))
self.commentplasterCheckBox.setText(_translate("Form", "댓글"))
self.promptremoveCheckBox.setText(_translate("Form", "즉시 삭제"))
self.isanonymFlag.setText(_translate("Form", "익명"))
self.label_3.setText(_translate("Form", "실패시 재시도 횟수"))
self.retryLineEdit.setText(_translate("Form", "1"))
self.label_4.setText(_translate("Form", "번"))
self.label_5.setText(_translate("Form", "반복 횟수"))
self.iterationLineEdit.setText(_translate("Form", "4"))
self.label_6.setText(_translate("Form", "번"))
self.articleRadioButton.setText(_translate("Form", "글 기준"))
self.stringRadioButton.setText(_translate("Form", "문자열 기준"))
self.startplatsterButton.setText(_translate("Form", "Go!"))
self.cancelplasterButton.setText(_translate("Form", "중단"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| true | true |
1c347f8817ee448d054612a79d5d81ae0a082ff1 | 4,500 | py | Python | nengo/tests/test_simulator.py | ConorPQuinn/NengoDecimal | ef798db409417b23da6dcda761654b93a2b44342 | [
"BSD-2-Clause"
] | null | null | null | nengo/tests/test_simulator.py | ConorPQuinn/NengoDecimal | ef798db409417b23da6dcda761654b93a2b44342 | [
"BSD-2-Clause"
] | null | null | null | nengo/tests/test_simulator.py | ConorPQuinn/NengoDecimal | ef798db409417b23da6dcda761654b93a2b44342 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import nengo
import nengo.simulator
from nengo.builder import Model
from nengo.builder.node import build_pyfunc
from nengo.builder.operator import Copy, Reset, DotInc, SimNoise
from nengo.builder.signal import Signal
from nengo.utils.compat import range, iteritems
def test_steps(RefSimulator):
m = nengo.Network(label="test_steps")
sim = RefSimulator(m)
assert sim.n_steps == 0
sim.step()
assert sim.n_steps == 1
sim.step()
assert sim.n_steps == 2
def test_time_steps(RefSimulator):
m = nengo.Network(label="test_time_steps")
sim = RefSimulator(m)
assert np.allclose(sim.signals["__time__"], 0.00)
sim.step()
assert np.allclose(sim.signals["__time__"], 0.001)
sim.step()
assert np.allclose(sim.signals["__time__"], 0.002)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_dtype(RefSimulator, seed, dtype):
with nengo.Network() as model:
u = nengo.Node([0.5, -0.4])
a = nengo.Ensemble(10, 2)
nengo.Connection(u, a)
nengo.Probe(a)
sim = RefSimulator(model, dtype=dtype)
for k, v in iteritems(sim.signals):
assert v.dtype == dtype, "Signal '%s', wrong dtype" % k
def test_time_absolute(Simulator):
m = nengo.Network()
sim = Simulator(m)
sim.run(0.003)
assert np.allclose(sim.trange(), [0.001, 0.002, 0.003])
def test_trange_with_probes(Simulator):
dt = 1e-3
m = nengo.Network()
periods = dt * np.arange(1, 21)
with m:
u = nengo.Node(output=np.sin)
probes = [nengo.Probe(u, sample_every=p, synapse=5*p) for p in periods]
sim = Simulator(m, dt=dt)
sim.run(0.333)
for i, p in enumerate(periods):
assert len(sim.trange(p)) == len(sim.data[probes[i]])
def test_signal_indexing_1(RefSimulator):
one = Signal(np.zeros(1), name="a")
two = Signal(np.zeros(2), name="b")
three = Signal(np.zeros(3), name="c")
tmp = Signal(np.zeros(3), name="tmp")
m = Model(dt=0.001)
m.operators += [
Reset(one), Reset(two), Reset(tmp),
DotInc(Signal(1, name="A1"), three[:1], one),
DotInc(Signal(2.0, name="A2"), three[1:], two),
DotInc(
Signal([[0, 0, 1], [0, 1, 0], [1, 0, 0]], name="A3"), three, tmp),
Copy(src=tmp, dst=three, as_update=True),
]
sim = RefSimulator(None, model=m)
sim.signals[three] = np.asarray([1, 2, 3])
sim.step()
assert np.all(sim.signals[one] == 1)
assert np.all(sim.signals[two] == [4, 6])
assert np.all(sim.signals[three] == [3, 2, 1])
sim.step()
assert np.all(sim.signals[one] == 3)
assert np.all(sim.signals[two] == [4, 2])
assert np.all(sim.signals[three] == [1, 2, 3])
def test_simple_pyfunc(RefSimulator):
dt = 0.001
time = Signal(np.zeros(1), name="time")
sig = Signal(np.zeros(1), name="sig")
m = Model(dt=dt)
sig_in, sig_out = build_pyfunc(m, lambda t, x: np.sin(x), True, 1, 1, None)
m.operators += [
Reset(sig),
DotInc(Signal([[1.0]]), time, sig_in),
DotInc(Signal([[1.0]]), sig_out, sig),
DotInc(Signal(dt), Signal(1), time, as_update=True),
]
sim = RefSimulator(None, model=m)
for i in range(5):
sim.step()
t = i * dt
assert np.allclose(sim.signals[sig], np.sin(t))
assert np.allclose(sim.signals[time], t + dt)
def test_probedict():
"""Tests simulator.ProbeDict's implementation."""
raw = {"scalar": 5,
"list": [2, 4, 6]}
probedict = nengo.simulator.ProbeDict(raw)
assert np.all(probedict["scalar"] == np.asarray(raw["scalar"]))
assert np.all(probedict.get("list") == np.asarray(raw.get("list")))
def test_noise(RefSimulator, seed):
"""Make sure that we can generate noise properly."""
n = 1000
mean, std = 0.1, 0.8
noise = Signal(np.zeros(n), name="noise")
process = nengo.processes.StochasticProcess(
nengo.dists.Gaussian(mean, std))
m = Model(dt=0.001)
m.operators += [Reset(noise), SimNoise(noise, process)]
sim = RefSimulator(None, model=m, seed=seed)
samples = np.zeros((100, n))
for i in range(100):
sim.step()
samples[i] = sim.signals[noise]
h, xedges = np.histogram(samples.flat, bins=51)
x = 0.5 * (xedges[:-1] + xedges[1:])
dx = np.diff(xedges)
z = 1./np.sqrt(2 * np.pi * std**2) * np.exp(-0.5 * (x - mean)**2 / std**2)
y = h / float(h.sum()) / dx
assert np.allclose(y, z, atol=0.02)
| 30.405405 | 79 | 0.606 | import numpy as np
import nengo
import nengo.simulator
from nengo.builder import Model
from nengo.builder.node import build_pyfunc
from nengo.builder.operator import Copy, Reset, DotInc, SimNoise
from nengo.builder.signal import Signal
from nengo.utils.compat import range, iteritems
def test_steps(RefSimulator):
m = nengo.Network(label="test_steps")
sim = RefSimulator(m)
assert sim.n_steps == 0
sim.step()
assert sim.n_steps == 1
sim.step()
assert sim.n_steps == 2
def test_time_steps(RefSimulator):
m = nengo.Network(label="test_time_steps")
sim = RefSimulator(m)
assert np.allclose(sim.signals["__time__"], 0.00)
sim.step()
assert np.allclose(sim.signals["__time__"], 0.001)
sim.step()
assert np.allclose(sim.signals["__time__"], 0.002)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_dtype(RefSimulator, seed, dtype):
with nengo.Network() as model:
u = nengo.Node([0.5, -0.4])
a = nengo.Ensemble(10, 2)
nengo.Connection(u, a)
nengo.Probe(a)
sim = RefSimulator(model, dtype=dtype)
for k, v in iteritems(sim.signals):
assert v.dtype == dtype, "Signal '%s', wrong dtype" % k
def test_time_absolute(Simulator):
m = nengo.Network()
sim = Simulator(m)
sim.run(0.003)
assert np.allclose(sim.trange(), [0.001, 0.002, 0.003])
def test_trange_with_probes(Simulator):
dt = 1e-3
m = nengo.Network()
periods = dt * np.arange(1, 21)
with m:
u = nengo.Node(output=np.sin)
probes = [nengo.Probe(u, sample_every=p, synapse=5*p) for p in periods]
sim = Simulator(m, dt=dt)
sim.run(0.333)
for i, p in enumerate(periods):
assert len(sim.trange(p)) == len(sim.data[probes[i]])
def test_signal_indexing_1(RefSimulator):
one = Signal(np.zeros(1), name="a")
two = Signal(np.zeros(2), name="b")
three = Signal(np.zeros(3), name="c")
tmp = Signal(np.zeros(3), name="tmp")
m = Model(dt=0.001)
m.operators += [
Reset(one), Reset(two), Reset(tmp),
DotInc(Signal(1, name="A1"), three[:1], one),
DotInc(Signal(2.0, name="A2"), three[1:], two),
DotInc(
Signal([[0, 0, 1], [0, 1, 0], [1, 0, 0]], name="A3"), three, tmp),
Copy(src=tmp, dst=three, as_update=True),
]
sim = RefSimulator(None, model=m)
sim.signals[three] = np.asarray([1, 2, 3])
sim.step()
assert np.all(sim.signals[one] == 1)
assert np.all(sim.signals[two] == [4, 6])
assert np.all(sim.signals[three] == [3, 2, 1])
sim.step()
assert np.all(sim.signals[one] == 3)
assert np.all(sim.signals[two] == [4, 2])
assert np.all(sim.signals[three] == [1, 2, 3])
def test_simple_pyfunc(RefSimulator):
dt = 0.001
time = Signal(np.zeros(1), name="time")
sig = Signal(np.zeros(1), name="sig")
m = Model(dt=dt)
sig_in, sig_out = build_pyfunc(m, lambda t, x: np.sin(x), True, 1, 1, None)
m.operators += [
Reset(sig),
DotInc(Signal([[1.0]]), time, sig_in),
DotInc(Signal([[1.0]]), sig_out, sig),
DotInc(Signal(dt), Signal(1), time, as_update=True),
]
sim = RefSimulator(None, model=m)
for i in range(5):
sim.step()
t = i * dt
assert np.allclose(sim.signals[sig], np.sin(t))
assert np.allclose(sim.signals[time], t + dt)
def test_probedict():
raw = {"scalar": 5,
"list": [2, 4, 6]}
probedict = nengo.simulator.ProbeDict(raw)
assert np.all(probedict["scalar"] == np.asarray(raw["scalar"]))
assert np.all(probedict.get("list") == np.asarray(raw.get("list")))
def test_noise(RefSimulator, seed):
n = 1000
mean, std = 0.1, 0.8
noise = Signal(np.zeros(n), name="noise")
process = nengo.processes.StochasticProcess(
nengo.dists.Gaussian(mean, std))
m = Model(dt=0.001)
m.operators += [Reset(noise), SimNoise(noise, process)]
sim = RefSimulator(None, model=m, seed=seed)
samples = np.zeros((100, n))
for i in range(100):
sim.step()
samples[i] = sim.signals[noise]
h, xedges = np.histogram(samples.flat, bins=51)
x = 0.5 * (xedges[:-1] + xedges[1:])
dx = np.diff(xedges)
z = 1./np.sqrt(2 * np.pi * std**2) * np.exp(-0.5 * (x - mean)**2 / std**2)
y = h / float(h.sum()) / dx
assert np.allclose(y, z, atol=0.02)
| true | true |
1c3480f4bf36cf025a44cc3f87ffafe292096841 | 464 | py | Python | mapshader/tests/test_transforms.py | SapirLastimoza-Dooley/mapshader | 9a7a893dd3fdfa7e20666d32c3788003393ffa10 | [
"MIT"
] | 1 | 2021-02-01T18:03:49.000Z | 2021-02-01T18:03:49.000Z | mapshader/tests/test_transforms.py | SapirLastimoza-Dooley/mapshader | 9a7a893dd3fdfa7e20666d32c3788003393ffa10 | [
"MIT"
] | null | null | null | mapshader/tests/test_transforms.py | SapirLastimoza-Dooley/mapshader | 9a7a893dd3fdfa7e20666d32c3788003393ffa10 | [
"MIT"
] | null | null | null | import json
from os import path
from io import BytesIO
import pytest
import xarray as xr
from datashader.transfer_functions import Image
from mapshader.sources import MapSource
from mapshader.core import render_map
from mapshader.core import render_geojson
from mapshader.sources import get_user_datasets
from mapshader.sources import elevation_source
from mapshader.tests.data import DEFAULT_SOURCES_FUNCS
# TODO: add transform tests (test_transforms.py)
| 21.090909 | 54 | 0.846983 | import json
from os import path
from io import BytesIO
import pytest
import xarray as xr
from datashader.transfer_functions import Image
from mapshader.sources import MapSource
from mapshader.core import render_map
from mapshader.core import render_geojson
from mapshader.sources import get_user_datasets
from mapshader.sources import elevation_source
from mapshader.tests.data import DEFAULT_SOURCES_FUNCS
| true | true |
1c34810189be8eac3c587ba7a479a0f467bda3e4 | 3,773 | py | Python | auth/views.py | zand-yasin/BlogBook-Backend | 01eebe2353f06261ab5045e481e10ec291b852ea | [
"MIT"
] | 3 | 2020-08-25T18:40:16.000Z | 2020-10-20T03:51:49.000Z | auth/views.py | zand-yasin/BlogBook-Backend | 01eebe2353f06261ab5045e481e10ec291b852ea | [
"MIT"
] | 12 | 2021-07-05T09:23:28.000Z | 2021-07-30T03:47:41.000Z | auth/views.py | Nandan-unni/KeyBlogs-Django-Backend | 4031e5e22fb27bf777f5f43a7faa1ed1389284dd | [
"MIT"
] | null | null | null | from rest_framework import views, status
from rest_framework.response import Response
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.utils.http import urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.shortcuts import redirect
from django.conf import settings
from django.urls import reverse
from writers.serializers import WriterSerializer, SignupSerializer
from writers.views import message
from auth.token import email_auth_token
from auth.utils import send_email
import jwt
class SignUpView(views.APIView):
def post(self, request, *args, **kwargs):
serializer = SignupSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
user.name = user.name.title()
user.is_active = True
user.save()
message(f"{user.name} ({user.pk}) created an account.")
# START: send email auth mail
token = RefreshToken.for_user(user).access_token
link = f"""{settings.API_URL}{reverse("verify_email")}?token={token}"""
status_code = send_email(
{
"email_subject": "Confirm your email",
"email_file": "mails/confirm_mail.html",
"email_data": {"token_link": link},
},
user,
"Email auth",
)
return Response(status=status_code)
# END: send email auth mail
message(serializer.errors)
return Response(
data=serializer.errors, status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION
)
class SignInView(views.APIView):
def post(self, request, *args, **kwargs):
data = request.data
user = authenticate(
username=data.get("email", None), password=data.get("password", None)
)
if user is not None:
if user.is_email_verified:
login(request, user)
message(f"{user.name} ({user.pk}) logged in.")
serializer = WriterSerializer(user)
return Response(status=status.HTTP_200_OK, data=serializer.data)
return Response(
status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION,
data={
"msg": "A verification mail is send to your email address. Please verify your email address to Login."
},
)
message("User not found.")
return Response(status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION)
class SignOutView(views.APIView):
def get(self, request, **kwargs):
user = get_user_model().objects.get(pk=kwargs["pk"])
message(f"{user.name} ({user.pk}) logged out. ")
logout(request)
return Response(status=status.HTTP_200_OK)
class VerifyEmailView(views.APIView):
def get(self, request, *args, **kwargs):
token = request.GET.get("token")
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=["HS256"])
user = get_user_model().objects.get(pk=payload["user_pk"])
except (jwt.exceptions.InvalidSignatureError, get_user_model().DoesNotExist):
user = None
if user is not None:
user.is_email_verified = True
message(f"{user.name} ({user.pk}) activated their account.")
user.save()
link = f"{settings.CLIENT_URL}/emailconfirmation/success/{user.pk}/"
return redirect(link)
message("Invalid email verification link recieved.")
link = f"{settings.CLIENT_URL}/emailconfirmation/failure/"
return redirect(link)
| 38.111111 | 122 | 0.627087 | from rest_framework import views, status
from rest_framework.response import Response
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.utils.http import urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.shortcuts import redirect
from django.conf import settings
from django.urls import reverse
from writers.serializers import WriterSerializer, SignupSerializer
from writers.views import message
from auth.token import email_auth_token
from auth.utils import send_email
import jwt
class SignUpView(views.APIView):
def post(self, request, *args, **kwargs):
serializer = SignupSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
user.name = user.name.title()
user.is_active = True
user.save()
message(f"{user.name} ({user.pk}) created an account.")
token = RefreshToken.for_user(user).access_token
link = f"""{settings.API_URL}{reverse("verify_email")}?token={token}"""
status_code = send_email(
{
"email_subject": "Confirm your email",
"email_file": "mails/confirm_mail.html",
"email_data": {"token_link": link},
},
user,
"Email auth",
)
return Response(status=status_code)
message(serializer.errors)
return Response(
data=serializer.errors, status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION
)
class SignInView(views.APIView):
def post(self, request, *args, **kwargs):
data = request.data
user = authenticate(
username=data.get("email", None), password=data.get("password", None)
)
if user is not None:
if user.is_email_verified:
login(request, user)
message(f"{user.name} ({user.pk}) logged in.")
serializer = WriterSerializer(user)
return Response(status=status.HTTP_200_OK, data=serializer.data)
return Response(
status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION,
data={
"msg": "A verification mail is send to your email address. Please verify your email address to Login."
},
)
message("User not found.")
return Response(status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION)
class SignOutView(views.APIView):
def get(self, request, **kwargs):
user = get_user_model().objects.get(pk=kwargs["pk"])
message(f"{user.name} ({user.pk}) logged out. ")
logout(request)
return Response(status=status.HTTP_200_OK)
class VerifyEmailView(views.APIView):
def get(self, request, *args, **kwargs):
token = request.GET.get("token")
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=["HS256"])
user = get_user_model().objects.get(pk=payload["user_pk"])
except (jwt.exceptions.InvalidSignatureError, get_user_model().DoesNotExist):
user = None
if user is not None:
user.is_email_verified = True
message(f"{user.name} ({user.pk}) activated their account.")
user.save()
link = f"{settings.CLIENT_URL}/emailconfirmation/success/{user.pk}/"
return redirect(link)
message("Invalid email verification link recieved.")
link = f"{settings.CLIENT_URL}/emailconfirmation/failure/"
return redirect(link)
| true | true |
1c3481d8f7be27a6d80eacb0aadf14080eca9bc0 | 2,665 | py | Python | tests/ons-mock/server.py | uk-gov-mirror/alphagov.govuk-shielded-vulnerable-people-service | 5b191980dec554155e9d431a514a945072032e7c | [
"MIT"
] | 3 | 2020-08-16T19:36:26.000Z | 2020-10-29T14:35:01.000Z | tests/ons-mock/server.py | uk-gov-mirror/alphagov.govuk-shielded-vulnerable-people-service | 5b191980dec554155e9d431a514a945072032e7c | [
"MIT"
] | 101 | 2020-09-03T11:10:00.000Z | 2021-10-01T03:03:46.000Z | tests/ons-mock/server.py | alphagov-mirror/govuk-shielded-vulnerable-people-service | f9cb4ae9046fc402f0878503733a23d42546cc53 | [
"MIT"
] | 6 | 2020-07-28T09:03:20.000Z | 2021-04-10T18:04:56.000Z | from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import re
from fake_os_places_api_entry import FakeOSPlacesAPIEntry
_postcode_to_uprn = {"LS287TQ": 10000000,
"BB11TA": 1000000,
"LE674AY": 1000,
"L244AD": 2000,
"LU11AA": 10000001,
"QJ57VC": 3000}
class OnsMockHandler(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
def do_GET(self):
postcode_re = re.compile("postcode=([A-Za-z0-9 ]*)&")
postcode = postcode_re.search(self.path).group(1)
data = None
if postcode == "QJ57VC":
self.send_response(400)
self.send_header('Content-type', 'text/json')
self.end_headers()
else:
self._set_response()
data = {
"header": {
"uri": f'https://api.ordnancesurvey.co.uk/places/v1/addresses/postcode?postcode={postcode}&dataset=LPI', # noqa E501
"query": f'postcode={postcode}',
"offset": 0,
"totalresults": 1,
"format": "JSON",
"dataset": "LPI",
"lr": "EN,CY",
"maxresults": 100,
"epoch": "78",
"output_srs": "EPSG:27700"
},
"results": [FakeOSPlacesAPIEntry(
postcode=postcode,
city="London",
street="Carnegie Street",
door_number="1",
building_type="Terraced",
uprn=10000000,
usrn=10000000,
postal_address_code="D",
lpi_key="1111A111111111",
x_coordinate="000000.0",
y_coordinate="000000.0",
local_custodian_code=1000,
topography_layer_toid='osgb01234567891234',
last_update_date='01/02/1942',
entry_date='01/02/1942',
blpu_state_date='01/02/1942'
).to_json()]
}
self.wfile.write(json.dumps(data).encode('utf-8'))
server_address = ('', 8000)
httpd = HTTPServer(server_address, OnsMockHandler)
httpd.serve_forever()
| 38.071429 | 141 | 0.454034 | from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import re
from fake_os_places_api_entry import FakeOSPlacesAPIEntry
_postcode_to_uprn = {"LS287TQ": 10000000,
"BB11TA": 1000000,
"LE674AY": 1000,
"L244AD": 2000,
"LU11AA": 10000001,
"QJ57VC": 3000}
class OnsMockHandler(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
def do_GET(self):
postcode_re = re.compile("postcode=([A-Za-z0-9 ]*)&")
postcode = postcode_re.search(self.path).group(1)
data = None
if postcode == "QJ57VC":
self.send_response(400)
self.send_header('Content-type', 'text/json')
self.end_headers()
else:
self._set_response()
data = {
"header": {
"uri": f'https://api.ordnancesurvey.co.uk/places/v1/addresses/postcode?postcode={postcode}&dataset=LPI',
"query": f'postcode={postcode}',
"offset": 0,
"totalresults": 1,
"format": "JSON",
"dataset": "LPI",
"lr": "EN,CY",
"maxresults": 100,
"epoch": "78",
"output_srs": "EPSG:27700"
},
"results": [FakeOSPlacesAPIEntry(
postcode=postcode,
city="London",
street="Carnegie Street",
door_number="1",
building_type="Terraced",
uprn=10000000,
usrn=10000000,
postal_address_code="D",
lpi_key="1111A111111111",
x_coordinate="000000.0",
y_coordinate="000000.0",
local_custodian_code=1000,
topography_layer_toid='osgb01234567891234',
last_update_date='01/02/1942',
entry_date='01/02/1942',
blpu_state_date='01/02/1942'
).to_json()]
}
self.wfile.write(json.dumps(data).encode('utf-8'))
server_address = ('', 8000)
httpd = HTTPServer(server_address, OnsMockHandler)
httpd.serve_forever()
| true | true |
1c3481fdb8ef31e875f8f06ce2d01a73abf4bb77 | 7,882 | py | Python | tensorflow/contrib/keras/python/keras/utils/layer_utils.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 384 | 2017-02-21T18:38:04.000Z | 2022-02-22T07:30:25.000Z | tensorflow/contrib/keras/python/keras/utils/layer_utils.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 15 | 2017-03-01T20:18:43.000Z | 2020-05-07T10:33:51.000Z | udacity-car/lib/python2.7/site-packages/tensorflow/contrib/keras/python/keras/utils/layer_utils.py | 808brick/CarND-Capstone | f9e536b4a9d96322d7e971073602c8969dbd9369 | [
"MIT"
] | 81 | 2017-02-21T19:31:19.000Z | 2022-02-22T07:30:24.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.conv_utils import convert_kernel
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use (defaults to `print`).
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model.nodes_by_depth.values():
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# If the model has multiple nodes or if the nodes have
# multiple inbound_layers, the model is no longer sequential.
sequential_like = False
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 100
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model.nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer.inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + ']['
+ str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
trainable_count = int(
np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
| 35.827273 | 81 | 0.65897 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.conv_utils import convert_kernel
def print_summary(model, line_length=None, positions=None, print_fn=None):
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model.nodes_by_depth.values():
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
sequential_like = False
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 100
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model.nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer.inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + ']['
+ str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
trainable_count = int(
np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1))
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0))
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
| true | true |
1c34825a8ee5c966c486000b4d04fc9340575a66 | 1,903 | py | Python | Commands/doublexp/doublexp.py | Chromeilion/kyoshi | ee7a448dde73831edbd0bc5e829cdf77f3a9a20d | [
"MIT"
] | 1 | 2021-07-18T15:21:09.000Z | 2021-07-18T15:21:09.000Z | Commands/doublexp/doublexp.py | Chromeilion/kyoshi | ee7a448dde73831edbd0bc5e829cdf77f3a9a20d | [
"MIT"
] | 6 | 2021-07-18T14:37:07.000Z | 2021-09-27T15:51:13.000Z | Commands/doublexp/doublexp.py | Chromeilion/kyoshi | ee7a448dde73831edbd0bc5e829cdf77f3a9a20d | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from Systems.levelsys import levelling
import os
from Systems.gettext_init import GettextInit
# Set up environment variables:
PREFIX = os.environ["BOT_PREFIX"]
ERROR_EMB_COLOUR = discord.Colour(int(os.environ["ERROR_EMB_COLOUR"]))
SUCCESS_EMB_COLOUR = discord.Colour(int(os.environ["SUCCESS_EMB_COLOUR"]))
# Set up gettext
_ = GettextInit(__file__).generate()
# Spam system class
class doublexp(commands.Cog):
def __init__(self, client):
self.client = client
# Reset Command
@commands.command()
@commands.has_permissions(administrator=True)
async def doublexp(self, ctx, *, role=None):
stats = levelling.find_one({"server": ctx.guild.id})
if stats is None:
newserver = {"server": ctx.guild.id, "double_xp_role": " "}
levelling.insert_one(newserver)
else:
if role is None:
embed2 = discord.Embed(title=_(":x: SETUP FAILED"),
description=_("You need to enter a role name!"),
colour=ERROR_EMB_COLOUR)
embed2.add_field(name=_("Example:"), value=PREFIX + _("doublexp <rolename>"))
await ctx.send(embed=embed2)
elif role:
levelling.update_one({"server": ctx.guild.id}, {"$set": {"double_xp_role": role}})
embed = discord.Embed(title=_(":white_check_mark: DOUBLE XP ROLE!"),
description=_("The new Double XP Role:") + role,
colour=SUCCESS_EMB_COLOUR)
await ctx.send(embed=embed)
doublexp.__doc__ = _('''\ndoublexp <rolename> \n\nAbout:\nThe DoubleXP command will let you set what role will earn
x2 XP *Admin Only*''')
# Sets-up the cog for help
def setup(client):
client.add_cog(doublexp(client))
| 36.596154 | 116 | 0.612717 | import discord
from discord.ext import commands
from Systems.levelsys import levelling
import os
from Systems.gettext_init import GettextInit
PREFIX = os.environ["BOT_PREFIX"]
ERROR_EMB_COLOUR = discord.Colour(int(os.environ["ERROR_EMB_COLOUR"]))
SUCCESS_EMB_COLOUR = discord.Colour(int(os.environ["SUCCESS_EMB_COLOUR"]))
_ = GettextInit(__file__).generate()
class doublexp(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.has_permissions(administrator=True)
async def doublexp(self, ctx, *, role=None):
stats = levelling.find_one({"server": ctx.guild.id})
if stats is None:
newserver = {"server": ctx.guild.id, "double_xp_role": " "}
levelling.insert_one(newserver)
else:
if role is None:
embed2 = discord.Embed(title=_(":x: SETUP FAILED"),
description=_("You need to enter a role name!"),
colour=ERROR_EMB_COLOUR)
embed2.add_field(name=_("Example:"), value=PREFIX + _("doublexp <rolename>"))
await ctx.send(embed=embed2)
elif role:
levelling.update_one({"server": ctx.guild.id}, {"$set": {"double_xp_role": role}})
embed = discord.Embed(title=_(":white_check_mark: DOUBLE XP ROLE!"),
description=_("The new Double XP Role:") + role,
colour=SUCCESS_EMB_COLOUR)
await ctx.send(embed=embed)
doublexp.__doc__ = _('''\ndoublexp <rolename> \n\nAbout:\nThe DoubleXP command will let you set what role will earn
x2 XP *Admin Only*''')
def setup(client):
client.add_cog(doublexp(client))
| true | true |
1c3482a0f1bb99d8764898b5f1cd9e655b4f5b36 | 3,567 | py | Python | gobiko/apns/exceptions.py | belkka/python-apns | 35b0962eb50faf99d678d42ccec8cc3013a60eac | [
"MIT"
] | null | null | null | gobiko/apns/exceptions.py | belkka/python-apns | 35b0962eb50faf99d678d42ccec8cc3013a60eac | [
"MIT"
] | null | null | null | gobiko/apns/exceptions.py | belkka/python-apns | 35b0962eb50faf99d678d42ccec8cc3013a60eac | [
"MIT"
] | 1 | 2018-08-27T04:04:02.000Z | 2018-08-27T04:04:02.000Z |
class APNsException(Exception):
pass
class InternalException(APNsException):
pass
class ImproperlyConfigured(APNsException):
pass
class BadCollapseId(APNsException):
"The collapse identifier exceeds the maximum allowed size"
pass
class BadDeviceToken(APNsException):
"The specified device token was bad. Verify that the request contains a valid token and that the token matches the environment."
pass
class BadExpirationDate(APNsException):
"The apns-expiration value is bad."
pass
class BadMessageId(APNsException):
"The apns-id value is bad."
pass
class PartialBulkMessage(APNsException):
def __init__(self, message, bad_registration_ids):
super(APNsException, self).__init__(message)
self.bad_registration_ids = bad_registration_ids
class BadPriority(APNsException):
"The apns-priority value is bad."
pass
class BadTopic(APNsException):
"The apns-topic was invalid."
pass
class DeviceTokenNotForTopic(APNsException):
"The device token does not match the specified topic."
pass
class DuplicateHeaders(APNsException):
"One or more headers were repeated."
pass
class IdleTimeout(APNsException):
"Idle time out."
pass
class MissingDeviceToken(APNsException):
"The device token is not specified in the request :path. Verify that the :path header contains the device token."
pass
class MissingTopic(APNsException):
"The apns-topic header of the request was not specified and was required. The apns-topic header is mandatory when the client is connected using a certificate that supports multiple topics."
pass
class PayloadEmpty(APNsException):
"The message payload was empty."
pass
class TopicDisallowed(APNsException):
"Pushing to this topic is not allowed."
pass
class BadCertificate(APNsException):
"The certificate was bad."
pass
class BadCertificateEnvironment(APNsException):
"The client certificate was for the wrong environment."
pass
class ExpiredProviderToken(APNsException):
"The provider token is stale and a new token should be generated."
pass
class Forbidden(APNsException):
"The specified action is not allowed."
pass
class InvalidProviderToken(APNsException):
"The provider token is not valid or the token signature could not be verified."
pass
class MissingProviderToken(APNsException):
"No provider certificate was used to connect to APNs and Authorization header was missing or no provider token was specified."
pass
class BadPath(APNsException):
"The request contained a bad :path value."
pass
class MethodNotAllowed(APNsException):
"The specified :method was not POST."
pass
class Unregistered(APNsException):
"The device token is inactive for the specified topic. Expected HTTP/2 status code is 410; see Table 8-4."
pass
class PayloadTooLarge(APNsException):
"The message payload was too large. See Creating the Remote Notification Payload for details on maximum payload size."
pass
class TooManyProviderTokenUpdates(APNsException):
"The provider token is being updated too often."
pass
class TooManyRequests(APNsException):
"Too many requests were made consecutively to the same device token."
pass
class InternalServerError(APNsException):
"An internal server error occurred."
pass
class ServiceUnavailable(APNsException):
"The service is unavailable."
pass
class Shutdown(APNsException):
"The server is shutting down."
pass
| 22.575949 | 193 | 0.746285 |
class APNsException(Exception):
pass
class InternalException(APNsException):
pass
class ImproperlyConfigured(APNsException):
pass
class BadCollapseId(APNsException):
pass
class BadDeviceToken(APNsException):
pass
class BadExpirationDate(APNsException):
pass
class BadMessageId(APNsException):
pass
class PartialBulkMessage(APNsException):
def __init__(self, message, bad_registration_ids):
super(APNsException, self).__init__(message)
self.bad_registration_ids = bad_registration_ids
class BadPriority(APNsException):
pass
class BadTopic(APNsException):
pass
class DeviceTokenNotForTopic(APNsException):
pass
class DuplicateHeaders(APNsException):
pass
class IdleTimeout(APNsException):
pass
class MissingDeviceToken(APNsException):
pass
class MissingTopic(APNsException):
pass
class PayloadEmpty(APNsException):
pass
class TopicDisallowed(APNsException):
pass
class BadCertificate(APNsException):
pass
class BadCertificateEnvironment(APNsException):
pass
class ExpiredProviderToken(APNsException):
pass
class Forbidden(APNsException):
pass
class InvalidProviderToken(APNsException):
pass
class MissingProviderToken(APNsException):
pass
class BadPath(APNsException):
pass
class MethodNotAllowed(APNsException):
pass
class Unregistered(APNsException):
pass
class PayloadTooLarge(APNsException):
pass
class TooManyProviderTokenUpdates(APNsException):
pass
class TooManyRequests(APNsException):
pass
class InternalServerError(APNsException):
pass
class ServiceUnavailable(APNsException):
pass
class Shutdown(APNsException):
pass
| true | true |
1c348353cae1e5d2994c4be5b943e32ee0ffda79 | 9,868 | py | Python | tests/providers/microsoft/azure/transfers/test_sftp_to_wasb.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 27 | 2019-02-25T14:20:36.000Z | 2022-03-22T09:35:13.000Z | tests/providers/microsoft/azure/transfers/test_sftp_to_wasb.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 200 | 2019-01-09T15:33:06.000Z | 2022-01-12T09:13:42.000Z | tests/providers/microsoft/azure/transfers/test_sftp_to_wasb.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 14 | 2019-06-25T17:08:29.000Z | 2022-03-29T13:25:53.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow import AirflowException
from airflow.providers.microsoft.azure.transfers.sftp_to_wasb import SftpFile, SFTPToWasbOperator
TASK_ID = "test-gcs-to-sftp-operator"
WASB_CONN_ID = "wasb_default"
SFTP_CONN_ID = "ssh_default"
CONTAINER_NAME = "test-container"
WILDCARD_PATH = "main_dir/*"
WILDCARD_FILE_NAME = "main_dir/test_object*.json"
SOURCE_PATH_NO_WILDCARD = "main_dir/"
SOURCE_OBJECT_MULTIPLE_WILDCARDS = "main_dir/csv/*/test_*.csv"
BLOB_PREFIX = "sponge-bob"
EXPECTED_BLOB_NAME = "test_object3.json"
EXPECTED_FILES = [SOURCE_PATH_NO_WILDCARD + EXPECTED_BLOB_NAME]
class TestSFTPToWasbOperator(unittest.TestCase):
def test_init(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
assert operator.sftp_source_path == SOURCE_PATH_NO_WILDCARD
assert operator.sftp_conn_id == SFTP_CONN_ID
assert operator.container_name == CONTAINER_NAME
assert operator.wasb_conn_id == WASB_CONN_ID
assert operator.blob_prefix == BLOB_PREFIX
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook', autospec=True)
def test_execute_more_than_one_wildcard_exception(self, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
with self.assertRaises(AirflowException) as cm:
operator.check_wildcards_limit()
err = cm.exception
assert "Only one wildcard '*' is allowed" in str(err)
def test_get_sftp_tree_behavior(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == 'main_dir', "not matched at expected complete path"
assert prefix == 'main_dir/', "Prefix must be EQUAL TO wildcard"
assert delimiter == "", "Delimiter must be empty"
def test_get_sftp_tree_behavior_without_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == 'main_dir/', "not matched at expected complete path"
assert prefix is None, "Prefix must be NONE when no wildcard"
assert delimiter is None, "Delimiter must be none"
def test_source_path_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is True, "This path contains a wildpath"
def test_source_path_not_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is False, "This path does not contains a wildpath"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_get_sftp_files_map_no_wildcard(self, sftp_hook, mock_hook):
sftp_hook.return_value.get_tree_map.return_value = [
EXPECTED_FILES,
[],
[],
]
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
files = operator.get_sftp_files_map()
assert len(files) == 1, "no matched at expected found files"
assert files[0].blob_name == EXPECTED_BLOB_NAME, "expected blob name not matched"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_copy_files_to_wasb(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
sftp_files = [SftpFile(EXPECTED_FILES[0], EXPECTED_BLOB_NAME)]
files = operator.copy_files_to_wasb(sftp_files)
operator.sftp_hook.retrieve_file.assert_has_calls([mock.call("main_dir/test_object3.json", mock.ANY)])
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, EXPECTED_BLOB_NAME, overwrite=False
)
assert len(files) == 1, "no matched at expected uploaded files"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_delete_files(self, sftp_hook):
sftp_mock = sftp_hook.return_value
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
sftp_file_paths = EXPECTED_FILES
operator.delete_files(sftp_file_paths)
sftp_mock.delete_file.assert_has_calls([mock.call(EXPECTED_FILES[0])])
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_execute(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, "test_object.json", overwrite=False
)
sftp_hook.return_value.delete_file.assert_not_called()
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_execute_moved_files(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
blob_prefix=BLOB_PREFIX,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, BLOB_PREFIX + "test_object.json", overwrite=False
)
assert sftp_hook.return_value.delete_file.called is True, "File must be moved"
| 38.396887 | 110 | 0.681901 |
import unittest
from unittest import mock
from airflow import AirflowException
from airflow.providers.microsoft.azure.transfers.sftp_to_wasb import SftpFile, SFTPToWasbOperator
TASK_ID = "test-gcs-to-sftp-operator"
WASB_CONN_ID = "wasb_default"
SFTP_CONN_ID = "ssh_default"
CONTAINER_NAME = "test-container"
WILDCARD_PATH = "main_dir/*"
WILDCARD_FILE_NAME = "main_dir/test_object*.json"
SOURCE_PATH_NO_WILDCARD = "main_dir/"
SOURCE_OBJECT_MULTIPLE_WILDCARDS = "main_dir/csv/*/test_*.csv"
BLOB_PREFIX = "sponge-bob"
EXPECTED_BLOB_NAME = "test_object3.json"
EXPECTED_FILES = [SOURCE_PATH_NO_WILDCARD + EXPECTED_BLOB_NAME]
class TestSFTPToWasbOperator(unittest.TestCase):
def test_init(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
assert operator.sftp_source_path == SOURCE_PATH_NO_WILDCARD
assert operator.sftp_conn_id == SFTP_CONN_ID
assert operator.container_name == CONTAINER_NAME
assert operator.wasb_conn_id == WASB_CONN_ID
assert operator.blob_prefix == BLOB_PREFIX
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook', autospec=True)
def test_execute_more_than_one_wildcard_exception(self, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
with self.assertRaises(AirflowException) as cm:
operator.check_wildcards_limit()
err = cm.exception
assert "Only one wildcard '*' is allowed" in str(err)
def test_get_sftp_tree_behavior(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == 'main_dir', "not matched at expected complete path"
assert prefix == 'main_dir/', "Prefix must be EQUAL TO wildcard"
assert delimiter == "", "Delimiter must be empty"
def test_get_sftp_tree_behavior_without_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == 'main_dir/', "not matched at expected complete path"
assert prefix is None, "Prefix must be NONE when no wildcard"
assert delimiter is None, "Delimiter must be none"
def test_source_path_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is True, "This path contains a wildpath"
def test_source_path_not_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is False, "This path does not contains a wildpath"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_get_sftp_files_map_no_wildcard(self, sftp_hook, mock_hook):
sftp_hook.return_value.get_tree_map.return_value = [
EXPECTED_FILES,
[],
[],
]
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
files = operator.get_sftp_files_map()
assert len(files) == 1, "no matched at expected found files"
assert files[0].blob_name == EXPECTED_BLOB_NAME, "expected blob name not matched"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_copy_files_to_wasb(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
sftp_files = [SftpFile(EXPECTED_FILES[0], EXPECTED_BLOB_NAME)]
files = operator.copy_files_to_wasb(sftp_files)
operator.sftp_hook.retrieve_file.assert_has_calls([mock.call("main_dir/test_object3.json", mock.ANY)])
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, EXPECTED_BLOB_NAME, overwrite=False
)
assert len(files) == 1, "no matched at expected uploaded files"
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_delete_files(self, sftp_hook):
sftp_mock = sftp_hook.return_value
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
sftp_file_paths = EXPECTED_FILES
operator.delete_files(sftp_file_paths)
sftp_mock.delete_file.assert_has_calls([mock.call(EXPECTED_FILES[0])])
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_execute(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, "test_object.json", overwrite=False
)
sftp_hook.return_value.delete_file.assert_not_called()
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook')
@mock.patch('airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook')
def test_execute_moved_files(self, sftp_hook, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
blob_prefix=BLOB_PREFIX,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, BLOB_PREFIX + "test_object.json", overwrite=False
)
assert sftp_hook.return_value.delete_file.called is True, "File must be moved"
| true | true |
1c34836839975671d78d43f89aec68536e835df9 | 654 | py | Python | character.py | Aposhian/mystery | 23429f0d5c207ce531edca1480455aedd15cf811 | [
"BSD-2-Clause"
] | 1 | 2020-07-27T23:54:27.000Z | 2020-07-27T23:54:27.000Z | character.py | Aposhian/mystery | 23429f0d5c207ce531edca1480455aedd15cf811 | [
"BSD-2-Clause"
] | null | null | null | character.py | Aposhian/mystery | 23429f0d5c207ce531edca1480455aedd15cf811 | [
"BSD-2-Clause"
] | 1 | 2021-11-09T19:54:33.000Z | 2021-11-09T19:54:33.000Z | from eliza import Eliza
class Character:
def __init__(self, name, avatar, sprite, scriptfile):
self.name = name
self.coordinates = (0,0)
self.avatar = avatar
self.sprite = sprite
self.eliza = Eliza()
#self.outputbox = OutputBox()
#self.inputbox = InputBox()
self.leadinfulfilled = False
with open(scriptfile) as character_script:
content = character_script.read()
self.eliza.combined_script += content
def load(self):
self.eliza.load()
def initiateDialogue(self, gameState):
# Put main function of textbox here
pass | 29.727273 | 57 | 0.608563 | from eliza import Eliza
class Character:
def __init__(self, name, avatar, sprite, scriptfile):
self.name = name
self.coordinates = (0,0)
self.avatar = avatar
self.sprite = sprite
self.eliza = Eliza()
self.leadinfulfilled = False
with open(scriptfile) as character_script:
content = character_script.read()
self.eliza.combined_script += content
def load(self):
self.eliza.load()
def initiateDialogue(self, gameState):
pass | true | true |
1c3484259cce61701ac3aec64e03dc08151fe4b5 | 17,771 | py | Python | onmt/utils/loss.py | USE-sum/usesum | eaf6dae0c451459551f728c0a8866777c20ed707 | [
"MIT"
] | null | null | null | onmt/utils/loss.py | USE-sum/usesum | eaf6dae0c451459551f728c0a8866777c20ed707 | [
"MIT"
] | 1 | 2020-08-16T13:47:24.000Z | 2020-08-16T13:47:24.000Z | onmt/utils/loss.py | USE-sum/usesum | eaf6dae0c451459551f728c0a8866777c20ed707 | [
"MIT"
] | null | null | null | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
import onmt.inputters as inputters
from onmt.modules.sparse_losses import SparsemaxLoss
from math import isnan
def build_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength, focal_gamma=opt.focal_gamma)
elif opt.model_type=="vector":
sequential_target = False
if opt.decoder_type=="vecdif_multi":
sequential_target=True
compute = AcosLoss(model.generator, tgt_vocab, model.decoder.hidden_size, device, sequential_target=sequential_target) #model.generator
else:
compute = NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating mutiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[inputters.PAD_WORD]
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics
"""
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
to_compare = batch.src[0, :1, :]
shard_state["to_compare"] = to_compare
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats
def monolithic_compute_loss_multivec(self, batch, output):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics
"""
stats = None
i = 0
for o in output:
range_ = (i, i+1)
shard_state = self._make_shard_state(batch, o, range_, None)
to_compare = batch.src[:, i, :] # to compare makes no point in validation.
shard_state["to_compare"] = to_compare
_, batch_stats = self._compute_loss(batch, **shard_state)
if stats is None:
stats = batch_stats
else:
stats.update(batch_stats)
i+=1
return stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization, to_compare=None):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
to_compare (vector) - sources used for current prediction - used only in vecdiff
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics
"""
batch_stats = onmt.utils.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
if to_compare is not None:
shard["to_compare"]=to_compare
loss, stats = self._compute_loss(batch, **shard)
#try:
loss.div(float(normalization)).backward()
# except Exception as e:
# print("PROBLEM "+str(e))
batch_stats.update(stats)
return batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum() \
.item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)
def _stats_vec(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
# equal = scores.eq(target).sum().item()
# pred = scores.max(1)[1]
# non_padding = target.ne(self.padding_idx)
# num_correct = pred.eq(target) \
# .masked_select(non_padding) \
# .sum() \
# .item()
# num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), 1 ,1 ) # equal, target.size()[1])
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.padding_idx = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.padding_idx] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class AcosLoss(LossComputeBase):
"""
arcus cosine loss
"""
def __init__(self, generator, tgt_vocab, output_size, device, sequential_target=False):
super(AcosLoss, self).__init__(generator, tgt_vocab)
self.zero_vec = torch.zeros(1,output_size, device=device)
self.filled_vec = torch.zeros(1, output_size, device=device).fill_(0.0001)
#self.prev_vec = torch.zeros(1,output_size, device=device)
self.prev_distance = None # torch.zeros(1, 1, device=device)
self.sequential_target=sequential_target
self.lrelu = nn.LeakyReLU(0.01)
def _compute_loss(self, batch, output, target, to_compare):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
if self.generator is not None:
output = torch.squeeze(output, dim=0)
output = self.generator(output)
while len(output.size()) < len(target.size()):
output = output.unsqueeze(0)
v1 = F.cosine_similarity(output, target, dim=(len(target.size())-1) ) #torch.abs()
v2 = torch.acos(v1)
vstat = v2.clone()
if self.prev_distance is None:
self.prev_distance = torch.ones_like(v2) *1.5
if v2.size()[0]> self.prev_distance.size()[0]: # in such case,
v2 = v2[:self.prev_distance.size()[0]]
elif v2.size()[0]< self.prev_distance.size()[0]: # in such case,
self.prev_distance = self.prev_distance[:v2.size()[0]]
v3 = v2 - self.prev_distance[:v2.size()[0]] # v2/10 + F.relu remove relu ?
if self.sequential_target:
optimal_improvement = torch.abs(F.cosine_similarity(to_compare, target, dim=(len(target.size()) - 1)))
optimal_improvement = torch.acos(optimal_improvement)
if v2.size()[0] > optimal_improvement.size()[0]: # in such case,
v2 = v2[:optimal_improvement.size()[0]]
elif v2.size()[0] < optimal_improvement.size()[0]: # in such case,
optimal_improvement = optimal_improvement[:v2.size()[0]]
if v2.size()[0] != optimal_improvement.size()[0]:
print("v2 "+str(v2.size))
print("optimal_improvement " + str(optimal_improvement.size))
v3a = v2 - optimal_improvement
v4 = v3a + F.relu(v3)
else:
v4 = v3
#print(str(v2)+" \n v3="+str(v3)+" \n v3a="+str(v3a)+" \n v4="+str(v4)+"\n sum= "+str(v4.sum())+" \n\n" )
self.prev_distance = v2.detach()
#print("targe " + str(target[0,0:5]) + " outout= " + str(output[0,0:5]) + " loss = " + str(v2.item())+" final loss = "+str(v3))
stats = self._stats_vec(vstat.sum()/vstat.size()[0], output, target)
return v4.sum(), stats
def _make_shard_state(self, batch, output, range_, attns=None):
if self.sequential_target:
return {
"output": output,
"target": batch.tgt[:,range_[0]: range_[1],:].squeeze(1),
}
return {
"output": output,
"target": batch.tgt[range_[0]: range_[1]],
}
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, generator, tgt_vocab, normalization="sents",
label_smoothing=0.0):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
self.sparse = not isinstance(generator[1], nn.LogSoftmax)
self.vector = not isinstance(generator[1], nn.Sigmoid)
if label_smoothing > 0:
self.criterion = LabelSmoothingLoss(
label_smoothing, len(tgt_vocab), ignore_index=self.padding_idx
)
elif self.sparse:
self.criterion = SparsemaxLoss(
ignore_index=self.padding_idx, size_average=False
)
elif self.vector:
self.criterion = SparsemaxLoss(
ignore_index=self.padding_idx, size_average=False
)
else:
self.criterion = nn.NLLLoss(
ignore_index=self.padding_idx, reduction='sum'
)
def _make_shard_state(self, batch, output, range_, attns=None):
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def _compute_loss(self, batch, output, target):
bottled_output = self._bottle(output)
if self.sparse:
# for sparsemax loss, the loss function operates on the raw output
# vector, not a probability vector. Hence it's only necessary to
# apply the first part of the generator here.
scores = self.generator[0](bottled_output)
else:
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def filter_shard_state(state, shard_size=None):
""" ? """
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| 39.057143 | 143 | 0.606381 | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
import onmt.inputters as inputters
from onmt.modules.sparse_losses import SparsemaxLoss
from math import isnan
def build_loss_compute(model, tgt_vocab, opt, train=True):
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength, focal_gamma=opt.focal_gamma)
elif opt.model_type=="vector":
sequential_target = False
if opt.decoder_type=="vecdif_multi":
sequential_target=True
compute = AcosLoss(model.generator, tgt_vocab, model.decoder.hidden_size, device, sequential_target=sequential_target)
else:
compute = NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[inputters.PAD_WORD]
def _make_shard_state(self, batch, output, range_, attns=None):
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
to_compare = batch.src[0, :1, :]
shard_state["to_compare"] = to_compare
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats
def monolithic_compute_loss_multivec(self, batch, output):
stats = None
i = 0
for o in output:
range_ = (i, i+1)
shard_state = self._make_shard_state(batch, o, range_, None)
to_compare = batch.src[:, i, :]
shard_state["to_compare"] = to_compare
_, batch_stats = self._compute_loss(batch, **shard_state)
if stats is None:
stats = batch_stats
else:
stats.update(batch_stats)
i+=1
return stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization, to_compare=None):
batch_stats = onmt.utils.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
if to_compare is not None:
shard["to_compare"]=to_compare
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return batch_stats
def _stats(self, loss, scores, target):
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum() \
.item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)
def _stats_vec(self, loss, scores, target):
return onmt.utils.Statistics(loss.item(), 1 ,1 )
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.padding_idx = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.padding_idx] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class AcosLoss(LossComputeBase):
def __init__(self, generator, tgt_vocab, output_size, device, sequential_target=False):
super(AcosLoss, self).__init__(generator, tgt_vocab)
self.zero_vec = torch.zeros(1,output_size, device=device)
self.filled_vec = torch.zeros(1, output_size, device=device).fill_(0.0001)
self.prev_distance = None
self.sequential_target=sequential_target
self.lrelu = nn.LeakyReLU(0.01)
def _compute_loss(self, batch, output, target, to_compare):
if self.generator is not None:
output = torch.squeeze(output, dim=0)
output = self.generator(output)
while len(output.size()) < len(target.size()):
output = output.unsqueeze(0)
v1 = F.cosine_similarity(output, target, dim=(len(target.size())-1) )
v2 = torch.acos(v1)
vstat = v2.clone()
if self.prev_distance is None:
self.prev_distance = torch.ones_like(v2) *1.5
if v2.size()[0]> self.prev_distance.size()[0]:
v2 = v2[:self.prev_distance.size()[0]]
elif v2.size()[0]< self.prev_distance.size()[0]:
self.prev_distance = self.prev_distance[:v2.size()[0]]
v3 = v2 - self.prev_distance[:v2.size()[0]]
if self.sequential_target:
optimal_improvement = torch.abs(F.cosine_similarity(to_compare, target, dim=(len(target.size()) - 1)))
optimal_improvement = torch.acos(optimal_improvement)
if v2.size()[0] > optimal_improvement.size()[0]:
v2 = v2[:optimal_improvement.size()[0]]
elif v2.size()[0] < optimal_improvement.size()[0]:
optimal_improvement = optimal_improvement[:v2.size()[0]]
if v2.size()[0] != optimal_improvement.size()[0]:
print("v2 "+str(v2.size))
print("optimal_improvement " + str(optimal_improvement.size))
v3a = v2 - optimal_improvement
v4 = v3a + F.relu(v3)
else:
v4 = v3
self.prev_distance = v2.detach()
stats = self._stats_vec(vstat.sum()/vstat.size()[0], output, target)
return v4.sum(), stats
def _make_shard_state(self, batch, output, range_, attns=None):
if self.sequential_target:
return {
"output": output,
"target": batch.tgt[:,range_[0]: range_[1],:].squeeze(1),
}
return {
"output": output,
"target": batch.tgt[range_[0]: range_[1]],
}
class NMTLossCompute(LossComputeBase):
def __init__(self, generator, tgt_vocab, normalization="sents",
label_smoothing=0.0):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
self.sparse = not isinstance(generator[1], nn.LogSoftmax)
self.vector = not isinstance(generator[1], nn.Sigmoid)
if label_smoothing > 0:
self.criterion = LabelSmoothingLoss(
label_smoothing, len(tgt_vocab), ignore_index=self.padding_idx
)
elif self.sparse:
self.criterion = SparsemaxLoss(
ignore_index=self.padding_idx, size_average=False
)
elif self.vector:
self.criterion = SparsemaxLoss(
ignore_index=self.padding_idx, size_average=False
)
else:
self.criterion = nn.NLLLoss(
ignore_index=self.padding_idx, reduction='sum'
)
def _make_shard_state(self, batch, output, range_, attns=None):
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def _compute_loss(self, batch, output, target):
bottled_output = self._bottle(output)
if self.sparse:
# apply the first part of the generator here.
scores = self.generator[0](bottled_output)
else:
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def filter_shard_state(state, shard_size=None):
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| true | true |
1c34857d0c2d2e15ec8e14ed43ddedd917058814 | 200 | py | Python | pykrita/glTF_editor/bu_glTF/material/__init__.py | akirfin/krita_python_fun | 74173d140b39f7f80f43f9474381e4adfa3b5f01 | [
"MIT"
] | 1 | 2021-10-01T00:25:43.000Z | 2021-10-01T00:25:43.000Z | pykrita/glTF_editor/bu_glTF/material/__init__.py | akirfin/krita_python_fun | 74173d140b39f7f80f43f9474381e4adfa3b5f01 | [
"MIT"
] | null | null | null | pykrita/glTF_editor/bu_glTF/material/__init__.py | akirfin/krita_python_fun | 74173d140b39f7f80f43f9474381e4adfa3b5f01 | [
"MIT"
] | null | null | null | from .normalTextureInfo import NormalTextureInfo
from .occlusionTextureInfo import OcclusionTextureInfo
from .pbrMetallicRoughness import PbrMetallicRoughness
class Material(object):
"""
"""
| 25 | 54 | 0.815 | from .normalTextureInfo import NormalTextureInfo
from .occlusionTextureInfo import OcclusionTextureInfo
from .pbrMetallicRoughness import PbrMetallicRoughness
class Material(object):
| true | true |
1c3486b4d3ab68bff3a647585d6669d171d9367f | 286 | py | Python | traiders/backend/api/views/token.py | rdilruba/bounswe2019group2 | b373908a4a8e92481f359297aba07245f0a23c1c | [
"Apache-2.0"
] | 11 | 2019-02-15T12:08:32.000Z | 2019-11-14T19:25:09.000Z | traiders/backend/api/views/token.py | bounswe/bounswe2019group2 | 05d41cf7b6bc1b3f994e82495d2a885a6eaa7cf3 | [
"Apache-2.0"
] | 279 | 2019-02-13T14:57:39.000Z | 2022-03-12T00:02:30.000Z | traiders/backend/api/views/token.py | rdilruba/bounswe2019group2 | b373908a4a8e92481f359297aba07245f0a23c1c | [
"Apache-2.0"
] | 13 | 2019-03-20T08:30:55.000Z | 2021-01-31T16:49:14.000Z | from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from ..serializers import TokenSerializer
class TokenViewSet(mixins.CreateModelMixin, GenericViewSet):
"""
Create or get already created token
"""
serializer_class = TokenSerializer
| 23.833333 | 60 | 0.79021 | from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from ..serializers import TokenSerializer
class TokenViewSet(mixins.CreateModelMixin, GenericViewSet):
serializer_class = TokenSerializer
| true | true |
1c3486e574d48b1b6e56d62abd94484044dca39e | 64,507 | py | Python | ffiwrappers/src/arlwrap.py | ChrisHad/algorithm-reference-library | bded1b62ea801ea4f4f5bd0794c18cd81d4b2810 | [
"Apache-2.0"
] | null | null | null | ffiwrappers/src/arlwrap.py | ChrisHad/algorithm-reference-library | bded1b62ea801ea4f4f5bd0794c18cd81d4b2810 | [
"Apache-2.0"
] | null | null | null | ffiwrappers/src/arlwrap.py | ChrisHad/algorithm-reference-library | bded1b62ea801ea4f4f5bd0794c18cd81d4b2810 | [
"Apache-2.0"
] | null | null | null | # Author: Bojan Nikolic <b.nikolic@mrao.cam.ac.uk>
# ARL Wrapping Interface
# In this file we declare the functions that need to be exposed to C (wrapped) --with the callback modifier
# and the needed structs -- with cdef
import numpy
import collections
import sys
from astropy.coordinates import SkyCoord
from astropy import units as u
from processing_components.calibration.operations import apply_gaintable, create_gaintable_from_blockvisibility, qa_gaintable
from processing_components.visibility.base import create_visibility, copy_visibility
from data_models.memory_data_models import ReceptorFrame
from processing_components.image.deconvolution import deconvolve_cube, restore_cube
from processing_components.imaging.base import create_image_from_visibility, predict_2d, invert_2d
from processing_components.imaging.base import advise_wide_field
from processing_components.simulation.testing_support import create_named_configuration, create_test_image, create_low_test_image_from_gleam, simulate_gaintable
from data_models.polarisation import PolarisationFrame
from processing_components.visibility.base import create_blockvisibility
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow
from processing_components.image.operations import qa_image
from processing_components.visibility.coalesce import convert_visibility_to_blockvisibility, convert_blockvisibility_to_visibility
from processing_components.calibration.calibration import solve_gaintable
from workflows.serial.pipelines.pipeline_serial import ical_list_serial_workflow
from data_models.data_model_helpers import export_image_to_hdf5
from ffiwrappers.src.arlwrap_support import *
import logging
import os
results_dir = './results'
os.makedirs(results_dir, exist_ok=True)
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
arl_error = 0
def handle_error(*args):
global arl_error
if(args[0] != ""):
arl_error = -1
print(args[0],"\n",args[1],"\n",args[2])
ff.cdef("""
typedef struct {
size_t nvis;
int npol;
void *data;
char *phasecentre;
} ARLVis;
""")
ff.cdef("""
typedef struct {
size_t nrows;
void *data;
} ARLGt;
""")
ff.cdef("""
typedef struct {
char *confname;
double pc_ra;
double pc_dec;
double *times;
int ntimes;
double *freqs;
int nfreqs;
double *channel_bandwidth;
int nchanwidth;
int nbases;
int nant;
int npol;
int nrec;
double rmax;
char *polframe;
} ARLConf;
""")
ff.cdef("""
typedef struct {
int vis_slices;
int npixel;
double cellsize;
double guard_band_image;
double delA;
int wprojection_planes;
} ARLadvice ;
""")
#@ff.callback("void (*)(const ARLVis *, ARLVis *, bool)")
#def arl_copy_visibility_ffi(visin, visout, zero):
# """
# Wrap of arl.visibility.base.copy_visibility
# """
# # Extra comments becasue this is an example.
# #
# # Convert the input visibilities into the ARL structure
# nvisin=cARLVis(visin)
#
# # Call the ARL function
# tvis=copy_visibility(nvisin, zero=zero)
#
# # Copy the result into the output buffer
# visout.npol=visin.npol
# visout.nvis=visin.nvis
# nvisout=cARLVis(visout)
# numpy.copyto(nvisout, tvis)
#
#
#arl_copy_visibility=collections.namedtuple("FFIX", "address")
#arl_copy_visibility.address=int(ff.cast("size_t", arl_copy_visibility_ffi))
@ff.callback("int (*)()")
def arl_handle_error_ffi():
global arl_error
return arl_error
arl_handle_error=collections.namedtuple("FFIX", "address")
arl_handle_error.address=int(ff.cast("size_t", arl_handle_error_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_copy_visibility_ffi(lowconfig, vis_in, vis_out, zero_in):
# Convert the input blockvisibilities into the ARL structure
if zero_in == 0:
zero = True
else:
zero = False
# Create configuration object
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-create input blockvisibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Call the ARL function
py_visout=copy_visibility(py_visin, zero=zero)
# Copy the result into the output buffer
vis_out.npol=vis_in.npol
vis_out.nvis=vis_in.nvis
py_vis_out = cARLVis(vis_out)
numpy.copyto(py_vis_out, py_visout.data)
store_phasecentre(vis_out.phasecentre, py_visin.phasecentre)
arl_copy_visibility=collections.namedtuple("FFIX", "address")
arl_copy_visibility.address=int(ff.cast("size_t", arl_copy_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_copy_blockvisibility_ffi(lowconfig, blockvis_in, blockvis_out, zero_in):
# Convert the input blockvisibilities into the ARL structure
if zero_in == 0:
zero = True
else:
zero = False
# Create configuration object
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-create input blockvisibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Call the ARL function
py_blockvisout=copy_visibility(py_blockvisin, zero=zero)
# Copy the result into the output buffer
blockvis_out.npol=blockvis_in.npol
blockvis_out.nvis=blockvis_in.nvis
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_copy_blockvisibility=collections.namedtuple("FFIX", "address")
arl_copy_blockvisibility.address=int(ff.cast("size_t", arl_copy_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_set_visibility_data_to_zero_ffi(lowconfig, vis_in):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
py_visin.data['vis'][...] = 0.0
arl_set_visibility_data_to_zero=collections.namedtuple("FFIX", "address")
arl_set_visibility_data_to_zero.address=int(ff.cast("size_t", arl_set_visibility_data_to_zero_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_manipulate_visibility_data_ffi(lowconfig, vis1_in, vis2_in, vis_out, operation):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_vis1in = cARLVis(vis1_in)
py_vis1in = helper_create_visibility_object(c_vis1in)
py_vis1in.phasecentre = load_phasecentre(vis1_in.phasecentre)
py_vis1in.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_vis1in.polarisation_frame = PolarisationFrame(polframe)
c_vis2in = cARLVis(vis2_in)
py_vis2in = helper_create_visibility_object(c_vis2in)
py_vis2in.phasecentre = load_phasecentre(vis2_in.phasecentre)
py_vis2in.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_vis2in.polarisation_frame = PolarisationFrame(polframe)
c_visout = cARLVis(vis_out)
py_visout = helper_create_visibility_object(c_visout)
py_visout.phasecentre = load_phasecentre(vis_out.phasecentre)
py_visout.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visout.polarisation_frame = PolarisationFrame(polframe)
print("arl_manipulate_visibility_data opcode: ", operation)
if operation == 0: # Add
print("arl_manipulate_visibility_data: adding")
py_visout.data['vis'] = py_vis1in.data['vis'] + py_vis2in.data['vis']
elif operation == 1: # Subtract
print("arl_manipulate_visibility_data: subtracting")
py_visout.data['vis'] = py_vis1in.data['vis'] - py_vis2in.data['vis']
elif operation == 2: # Multiply
print("arl_manipulate_visibility_data: multiplying")
py_visout.data['vis'] = py_vis1in.data['vis'] * py_vis2in.data['vis']
elif operation == 3: # Divide
print("arl_manipulate_visibility_data: dividing")
py_visout.data['vis'] = py_vis1in.data['vis'] / py_vis2in.data['vis']
else:
py_visout.data['vis'][...] = 0.0
print("arl_manipulate_visibility_data np.sum(vis.data): ", numpy.sum(py_visout.data['vis']), numpy.sum(py_vis1in.data['vis']), numpy.sum(py_vis2in.data['vis']))
arl_manipulate_visibility_data=collections.namedtuple("FFIX", "address")
arl_manipulate_visibility_data.address=int(ff.cast("size_t", arl_manipulate_visibility_data_ffi))
ff.cdef("""
typedef struct {
size_t size;
int data_shape[4];
void *data;
char *wcs;
char *polarisation_frame;
} Image;
""")
@ff.callback("void (*)(Image*, Image*)")
def arl_add_to_model_ffi(model, res):
c_model = cImage(model)
c_res = cImage(res)
c_model.data += c_res.data
arl_add_to_model=collections.namedtuple("FFIX", "address")
arl_add_to_model.address=int(ff.cast("size_t", arl_add_to_model_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_create_visibility_ffi(lowconfig, c_res_vis):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
# Temp fix for ffi_demo
if lowconfig.rmax < 1.0e-5 :
lowcore = create_named_configuration(lowcore_name)
else:
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print(lowcore_name)
print("Times: ", times)
print("Freqs: ", frequency)
print("BW : ", channel_bandwidth)
print("PCentre: ", lowconfig.pc_ra, lowconfig.pc_dec)
phasecentre = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
vt = create_visibility(lowcore, times, frequency,
channel_bandwidth=channel_bandwidth, weight=1.0,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame(polframe))
py_res_vis = cARLVis(c_res_vis)
numpy.copyto(py_res_vis, vt.data)
store_phasecentre(c_res_vis.phasecentre, phasecentre)
arl_create_visibility=collections.namedtuple("FFIX", "address")
arl_create_visibility.address=int(ff.cast("size_t", arl_create_visibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_create_blockvisibility_ffi(lowconfig, c_res_vis):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
print(lowconfig.rmax)
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print(lowcore_name)
print("Times: ", times)
print("Freqs: ", frequency)
print("BW : ", channel_bandwidth)
print("PCentre: ", lowconfig.pc_ra, lowconfig.pc_dec)
phasecentre = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
print("Polarisation frame: ", polframe)
vt = create_blockvisibility(lowcore, times, frequency=frequency,
channel_bandwidth=channel_bandwidth, weight=1.0,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame(polframe))
py_res_vis = cARLBlockVis(c_res_vis, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_res_vis, vt.data)
store_phasecentre(c_res_vis.phasecentre, phasecentre)
receptor_frame = ReceptorFrame(vt.polarisation_frame.type)
lowconfig.nrec = receptor_frame.nrec
arl_create_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_blockvisibility.address=int(ff.cast("size_t", arl_create_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, long long int *, ARLVis *)", onerror=handle_error)
def arl_convert_visibility_to_blockvisibility_ffi(lowconfig, vis_in, blockvis_in, cindex_in, blockvis_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_in, 8*cindex_size), dtype='int', count=cindex_size)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
py_visin.cindex = py_cindex
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
py_visin.blockvis = py_blockvisin
py_blockvisout = convert_visibility_to_blockvisibility(py_visin)
print("convert_visibility_to_blockvisibility np.sum(block_vis.data): ", numpy.sum(py_blockvisout.data['vis']))
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_convert_visibility_to_blockvisibility=collections.namedtuple("FFIX", "address")
arl_convert_visibility_to_blockvisibility.address=int(ff.cast("size_t", arl_convert_visibility_to_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, long long int *, ARLVis *)", onerror=handle_error)
def arl_convert_blockvisibility_to_visibility_ffi(lowconfig, blockvis_in, vis_out, cindex_out, blockvis_out):
# Create configuration object
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Link cindex memory objects
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_out, 8*cindex_size), dtype='int', count=cindex_size)
# Re-create input blockvisibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Call arl.coalesce::convert_blockvisibility_to_visibility()
vis = convert_blockvisibility_to_visibility(py_blockvisin)
# Copy vis.data to C visibility vis_out.data
py_vis = cARLVis(vis_out)
numpy.copyto(py_vis, vis.data)
store_phasecentre(vis_out.phasecentre, py_blockvisin.phasecentre)
# Copy vis.blockvis.data to C blockvisibility blockvis_out.data
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, vis.blockvis.data)
# Copy vis.cindex to cindex_out
numpy.copyto(py_cindex, vis.cindex)
print("convert_blockvisibility_to_visibility np.sum(vis.data): ", numpy.sum(vis.data['vis']))
arl_convert_blockvisibility_to_visibility=collections.namedtuple("FFIX", "address")
arl_convert_blockvisibility_to_visibility.address=int(ff.cast("size_t", arl_convert_blockvisibility_to_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLGt *)", onerror=handle_error)
def arl_create_gaintable_from_blockvisibility_ffi(lowconfig, blockvis_in, gt_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
py_gt = create_gaintable_from_blockvisibility(py_blockvisin)
# print("create_gaintable_from_blockvisibility np.sum(gt.data): ", numpy.sum(py_gt.data['gain']))
# print(py_gt.data['gain'].shape, py_gt.data['weight'].shape, py_gt.data['residual'].shape, py_gt.data['time'].shape)
# print(py_gt.data.size, py_gt.data.itemsize)
# print(py_gt.frequency.size)
# print("create_gaintable_from_blockvisibility: ", py_gt.receptor_frame.nrec)
# receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
# pframe1 = PolarisationFrame(polframe)
# recframe1 = ReceptorFrame(pframe1.type)
# print(receptor_frame.nrec, recframe1.nrec, lowcore.receptor_frame.nrec)
c_gt_out = cARLGt(gt_out, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
numpy.copyto(c_gt_out, py_gt.data)
arl_create_gaintable_from_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_gaintable_from_blockvisibility.address=int(ff.cast("size_t", arl_create_gaintable_from_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLGt *)", onerror=handle_error)
def arl_simulate_gaintable_ffi(lowconfig, gt):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
polarisation_frame = PolarisationFrame(polframe)
receptor_frame = ReceptorFrame(polarisation_frame.type)
# print(lowconfig.polframe, lowconfig.nrec, receptor_frame.nrec)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
# print()
# print(py_gt.__dict__)
# print("simulate_gaintable 1 nrec: ", py_gt.receptor_frame.nrec)
# print(py_gt.data['gain'].shape, py_gt.data['weight'].shape, py_gt.data['residual'].shape, py_gt.data['time'].shape)
py_gt = simulate_gaintable(py_gt, phase_error = 1.0)
# py_gt = simulate_gaintable(py_gt, phase_error = 0.0)
# print("simulate_gaintable np.sum(gt.data): ", numpy.sum(py_gt.data['gain']))
# print("simulate_gaintable 2 nrec: ", py_gt.receptor_frame.nrec)
# print(py_gt.data['gain'].shape, py_gt.data['weight'].shape, py_gt.data['residual'].shape, py_gt.data['time'].shape)
numpy.copyto(c_gt, py_gt.data)
arl_simulate_gaintable=collections.namedtuple("FFIX", "address")
arl_simulate_gaintable.address=int(ff.cast("size_t", arl_simulate_gaintable_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLGt *, ARLVis *, int )", onerror=handle_error)
def arl_apply_gaintable_ffi(lowconfig, blockvis_in, gt, blockvis_out, inverse_in):
if inverse_in == 0:
inverse = True
else:
inverse = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
# Re-creating the input BlockVisibility object
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating GainTable object
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
# Calling apply_gaintable() function
py_blockvisout = apply_gaintable(py_blockvisin, py_gt, inverse=inverse)
# print("apply_gaintable np.sum(blockvis.data): ", numpy.sum(py_blockvisout.data['vis']))
# Copy resulting data from py_blockvisout into c_blockvisout
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_apply_gaintable=collections.namedtuple("FFIX", "address")
arl_apply_gaintable.address=int(ff.cast("size_t", arl_apply_gaintable_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, ARLGt *, int )", onerror=handle_error)
def arl_apply_gaintable_ical_ffi(lowconfig, blockvis_in, gt, inverse_in):
if inverse_in == 0:
inverse = True
else:
inverse = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
# Re-creating the input BlockVisibility object
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating GainTable object
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
# Calling apply_gaintable() function
py_blockvisout = apply_gaintable(py_blockvisin, py_gt, inverse=inverse)
# print("apply_gaintable np.sum(blockvis.data): ", numpy.sum(py_blockvisout.data['vis']))
# Copy resulting data from py_blockvisout back to c_blockvisin
numpy.copyto(c_blockvisin, py_blockvisout.data)
arl_apply_gaintable_ical=collections.namedtuple("FFIX", "address")
arl_apply_gaintable_ical.address=int(ff.cast("size_t", arl_apply_gaintable_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, ARLGt *, int )", onerror=handle_error)
def arl_solve_gaintable_ical_ffi(lowconfig, blockvis_in, blockvis_pred, gt, vis_slices):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
# Re-creating the input BlockVisibility object
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating the input BlockVisibility_pred object
c_blockvispred = cARLBlockVis(blockvis_pred, lowconfig.nant, lowconfig.nfreqs)
py_blockvispred = helper_create_blockvisibility_object(c_blockvispred, frequency, channel_bandwidth, lowcore)
py_blockvispred.phasecentre = load_phasecentre(blockvis_pred.phasecentre)
py_blockvispred.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvispred.polarisation_frame = PolarisationFrame(polframe)
# Re-creating GainTable object
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
# Calling apply_gaintable() function
gt_out = solve_gaintable(py_blockvisin, py_blockvispred,
vis_slices=vis_slices, timeslice='auto',
algorithm='hogbom', niter=1000, fractional_threshold=0.1, threshold=0.1,
nmajor=5, gain=0.1, first_selfcal=1,
global_solution=False)
log.info(qa_gaintable(gt_out, context='Gaintable for selfcal cycle'))
numpy.copyto(c_gt, gt_out.data)
# print("apply_gaintable np.sum(blockvis.data): ", numpy.sum(py_blockvisout.data['vis']))
arl_solve_gaintable_ical=collections.namedtuple("FFIX", "address")
arl_solve_gaintable_ical.address=int(ff.cast("size_t", arl_solve_gaintable_ical_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, ARLadvice *)", onerror=handle_error)
def arl_advise_wide_field_ffi(lowconfig, vis_in, adv):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
print("Index :", py_visin.data['index'])
advice=advise_wide_field(py_visin, guard_band_image=adv.guard_band_image, delA=adv.delA,
wprojection_planes=adv.wprojection_planes)
print(advice['vis_slices'], advice['npixels2'], advice['cellsize'])
adv.cellsize = advice['cellsize']
adv.vis_slices = advice['vis_slices']
adv.npixel = advice['npixels2']
arl_advise_wide_field=collections.namedtuple("FFIX", "address")
arl_advise_wide_field.address=int(ff.cast("size_t", arl_advise_wide_field_ffi))
ff.cdef("""
typedef struct {int nant, nbases;} ant_t;
""")
# Get the number of baselines for the given configuration
# WARING!!! rmax is missing ! -ToDo
@ff.callback("void (*) (char*, ant_t *)", onerror=handle_error)
def helper_get_nbases_ffi(config_name, nbases_in):
tconfig_name = str(ff.string(config_name), 'utf-8')
lowcore = create_named_configuration(tconfig_name)
nbases_in.nant = len(lowcore.xyz)
nbases_in.nbases = int(len(lowcore.xyz)*(len(lowcore.xyz)-1)/2)
print(tconfig_name,nbases_in.nant, nbases_in.nbases )
helper_get_nbases=collections.namedtuple("FFIX", "address")
helper_get_nbases.address=int(ff.cast("size_t", helper_get_nbases_ffi))
# Get the number of baselines for the given configuration
# WARING!!! rmax is missing ! -ToDo
@ff.callback("void (*) (char*, double, ant_t *)")
def helper_get_nbases_rmax_ffi(config_name, rmax, nbases_in):
tconfig_name = str(ff.string(config_name), 'utf-8')
lowcore = create_named_configuration(tconfig_name, rmax=rmax)
nbases_in.nant = len(lowcore.xyz)
nbases_in.nbases = int(len(lowcore.xyz)*(len(lowcore.xyz)-1)/2)
print(tconfig_name,nbases_in.nant, nbases_in.nbases )
helper_get_nbases_rmax=collections.namedtuple("FFIX", "address")
helper_get_nbases_rmax.address=int(ff.cast("size_t", helper_get_nbases_rmax_ffi))
@ff.callback("void (*)(ARLConf *, double, int, int *)", onerror=handle_error)
def helper_get_image_shape_multifreq_ffi(lowconfig, cellsize, npixel, c_shape):
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print("About to start create_low_test_image_from_gleam with flux_limit = 10. to get a shape of the image")
res = create_low_test_image_from_gleam(npixel=npixel, frequency=frequency,
channel_bandwidth=channel_bandwidth, cellsize=cellsize, flux_limit = 10.)
# phasecentre=phasecentre, applybeam=True)
# res = create_test_image(frequency=frequency, cellsize=cellsize, npixel = npixel)
shape = list(res.data.shape)
# TODO fix ugly
numpy.copyto(numpy.frombuffer(ff.buffer(c_shape,4*4),dtype='i4',count=4), shape)
helper_get_image_shape_multifreq=collections.namedtuple("FFIX", "address")
helper_get_image_shape_multifreq.address=int(ff.cast("size_t", helper_get_image_shape_multifreq_ffi))
# TODO temporary until better solution found
@ff.callback("void (*)(const double *, double, int *)", onerror=handle_error)
def helper_get_image_shape_ffi(freq, cellsize, c_shape):
res = create_test_image(freq, cellsize)
shape = list(res.data.shape)
# TODO fix ugly
numpy.copyto(numpy.frombuffer(ff.buffer(c_shape,4*4),dtype='i4',count=4), shape)
helper_get_image_shape=collections.namedtuple("FFIX", "address")
helper_get_image_shape.address=int(ff.cast("size_t", helper_get_image_shape_ffi))
# TODO properly implement this routine - shouldn't be within create_test_image
#@ff.callback("void (*)(const ARLVis *, Image *)")
#def helper_set_image_params_ffi(vis, image):
# phasecentre = load_phasecentre(vis.phasecentre)
#
# py_image = cImage(image)
#
# py_image.wcs.wcs.crval[0] = phasecentre.ra.deg
# py_image.wcs.wcs.crval[1] = phasecentre.dec.deg
# py_image.wcs.wcs.crpix[0] = float(nx // 2)
# py_image.wcs.wcs.crpix[1] = float(ny // 2)
#
#helper_set_image_params=collections.namedtuple("FFIX", "address")
#helper_set_image_params.address=int(ff.cast("size_t", helper_set_image_params_ffi))
@ff.callback("void (*)(const double *, double, char*, Image *)", onerror=handle_error)
def arl_create_test_image_ffi(frequency, cellsize, c_phasecentre, out_img):
py_outimg = cImage(out_img, new=True)
res = create_test_image(frequency, cellsize)
phasecentre = load_phasecentre(c_phasecentre)
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre.ra.deg
# res.wcs.wcs.crval[1] = phasecentre.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_outimg, res)
arl_create_test_image=collections.namedtuple("FFIX", "address")
arl_create_test_image.address=int(ff.cast("size_t", arl_create_test_image_ffi))
@ff.callback("void (*)(ARLConf *, double, int, char*, Image *)", onerror=handle_error)
def arl_create_low_test_image_from_gleam_ffi(lowconfig, cellsize, npixel, c_phasecentre, out_img):
py_outimg = cImage(out_img, new=True)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
phasecentre = load_phasecentre(c_phasecentre)
print("About to start create_low_test_image_from_gleam")
res = create_low_test_image_from_gleam(npixel=npixel, frequency=frequency,
channel_bandwidth=channel_bandwidth, cellsize=cellsize, flux_limit = 1.0, phasecentre=phasecentre, applybeam=True)
export_image_to_hdf5(res, '%s/gleam_model_res.hdf'%(results_dir))
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre.ra.deg
# res.wcs.wcs.crval[1] = phasecentre.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
export_image_to_hdf5(res, '%s/gleam_model_res1.hdf'%(results_dir))
store_image_in_c(py_outimg, res)
arl_create_low_test_image_from_gleam=collections.namedtuple("FFIX", "address")
arl_create_low_test_image_from_gleam.address=int(ff.cast("size_t", arl_create_low_test_image_from_gleam_ffi))
@ff.callback("void (*)(const ARLVis *, const Image *, ARLVis *)", onerror=handle_error)
def arl_predict_2d_ffi(vis_in, img, vis_out):
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
res = predict_2d(py_visin, c_img)
vis_out.nvis = vis_in.nvis
vis_out.npol = vis_in.npol
c_visout = cARLVis(vis_out)
numpy.copyto(c_visout, res.data)
store_phasecentre(vis_out.phasecentre, res.phasecentre)
#arl_copy_visibility(py_visin, c_visout, False)
arl_predict_2d=collections.namedtuple("FFIX", "address")
arl_predict_2d.address=int(ff.cast("size_t", arl_predict_2d_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const Image *, ARLVis *, ARLVis *, long long int *)", onerror=handle_error)
def arl_predict_function_ffi(lowconfig, vis_in, img, vis_out, blockvis_out, cindex_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_out, 8*cindex_size), dtype='int', count=cindex_size)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# print("--------------------> predict_list_serial_workflow Phasecentre : ", py_visin.phasecentre.ra.deg, py_visin.phasecentre.dec.deg)
res = predict_list_serial_workflow(py_visin, c_img, vis_slices=51, context='wstack')
# print("--------------------> predict_list_serial_workflow sizeof(py_visin.data), sizeof(res.data)", sys.getsizeof(py_visin.data[:]), sys.getsizeof(res.data[:]))
# print("--------------------> predict_list_serial_workflow cindex", type(res.cindex), type(res.cindex[0]), len(res.cindex))
# print("--------------------> predict_list_serial_workflow sys.getsizeof(res.cindex)", sys.getsizeof(res.cindex))
# print("--------------------> predict_list_serial_workflow np.sum(predicted_vis.data): ", numpy.sum(res.data['vis']))
# print("--------------------> predict_list_serial_workflow predicted_vis.data: ", res.data)
# print("--------------------> predict_list_serial_workflow py_visin.data): ", py_visin.data)
# print("predict_list_serial_workflow np.sum(predicted_vis.data): ", numpy.sum(res.data['vis']))
vis_out.npol = vis_in.npol
c_visout = cARLVis(vis_out)
numpy.copyto(c_visout, res.data)
store_phasecentre(vis_out.phasecentre, res.phasecentre)
numpy.copyto(py_cindex, res.cindex)
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, res.blockvis.data)
store_phasecentre(blockvis_out.phasecentre, res.phasecentre)
arl_predict_function=collections.namedtuple("FFIX", "address")
arl_predict_function.address=int(ff.cast("size_t", arl_predict_function_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, const Image *)", onerror=handle_error)
def arl_predict_function_blockvis_ffi(lowconfig, vis_in, img):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
log.info(qa_image(c_img, context='arl_predict_function'))
# export_image_to_fits(c_img, '%s/imaging-blockvis_model_in_predicted_function.fits'%(results_dir))
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin.hdf'%(results_dir))
# export_image_to_hdf5(c_img, '%s/gleam_model_c_img.hdf'%(results_dir))
py_blockvis = predict_list_serial_workflow(py_visin, c_img, vis_slices=51, context='wstack')
# export_blockvisibility_to_hdf5(py_blockvis, '%s/py_blockvis.hdf'%(results_dir))
# print(qa_visibility(py_blockvis, context='arl_predict_function_blockvis py_blockvis'))
# print("arl_predict_function_blockvis :", py_visin, py_blockvis)
numpy.copyto(c_visin, py_blockvis.data)
# store_phasecentre(vis_out.phasecentre, res.phasecentre)
# print("arl_predict_function_blockvis np.sum(py_blockvis.data): ", numpy.sum(py_blockvis.data['vis']))
# print("arl_predict_function_blockvis nchan npol nants ", py_blockvis.nchan, py_blockvis.npol, py_blockvis.nants)
# print("arl_predict_function_blockvis sum(uvw) ", numpy.sum(py_blockvis.uvw))
# print("arl_predict_function_blockvis sum(vis) ", numpy.sum(py_blockvis.vis))
# print("arl_predict_function_blockvis sum(weight) ", numpy.sum(py_blockvis.weight))
# print("arl_predict_function_blockvis time", py_blockvis.time, numpy.sum(py_blockvis.time))
# print("arl_predict_function_blockvis integration_time", py_blockvis.integration_time, numpy.sum(py_blockvis.integration_time))
# print("arl_predict_function_blockvis nvis, size", py_blockvis.nvis, py_blockvis.size())
arl_predict_function_blockvis=collections.namedtuple("FFIX", "address")
arl_predict_function_blockvis.address=int(ff.cast("size_t", arl_predict_function_blockvis_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, const Image *, ARLVis *, long long int *, int)", onerror=handle_error)
def arl_predict_function_ical_ffi(lowconfig, vis_inout, img, blockvis_inout, cindex_inout, vis_slices):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_inout, 8*cindex_size), dtype='int', count=cindex_size)
c_visinout = cARLVis(vis_inout)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visinout = helper_create_visibility_object(c_visinout)
py_visinout.configuration = lowcore
py_visinout.phasecentre = load_phasecentre(vis_inout.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visinout.polarisation_frame = PolarisationFrame(polframe)
py_blockvis_inout = cARLBlockVis(blockvis_inout, lowconfig.nant, lowconfig.nfreqs)
py_blockvisinout = helper_create_blockvisibility_object(py_blockvis_inout, frequency, channel_bandwidth, lowcore)
py_visinout.blockvis = py_blockvisinout
py_visinout.cindex = py_cindex
c_img = cImage(img)
res = predict_list_serial_workflow(py_visinout, c_img, vis_slices=vis_slices, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
# print("####################> arl_predict_function_ical: ", type(res))
numpy.copyto(c_visinout, res.data)
store_phasecentre(vis_inout.phasecentre, res.phasecentre)
numpy.copyto(py_cindex, res.cindex)
numpy.copyto(py_blockvis_inout, res.blockvis.data)
store_phasecentre(blockvis_inout.phasecentre, res.phasecentre)
# print("predict_function_ical np.sum(res.data): ", numpy.sum(res.data['vis']))
# print("predict_function_ical np.sum(res.blockvis.data): ", numpy.sum(res.blockvis.data['vis']))
arl_predict_function_ical=collections.namedtuple("FFIX", "address")
arl_predict_function_ical.address=int(ff.cast("size_t", arl_predict_function_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin_invert_function.hdf'%(results_dir))
# export_image_to_hdf5(py_img, '%s/model_invert_function.hdf'%(results_dir))
# print("arl_invert_function vis_slices: ", vis_slices)
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=False, context='wstack')
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
arl_invert_function=collections.namedtuple("FFIX", "address")
arl_invert_function.address=int(ff.cast("size_t", arl_invert_function_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_blockvis_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin_invert_function.hdf'%(results_dir))
# export_image_to_hdf5(py_img, '%s/model_invert_function.hdf'%(results_dir))
# print("arl_invert_function vis_slices: ", vis_slices)
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=False, context='wstack')
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
arl_invert_function_blockvis=collections.namedtuple("FFIX", "address")
arl_invert_function_blockvis.address=int(ff.cast("size_t", arl_invert_function_blockvis_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_ical_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
log.info("Maximum in residual image is %.6f" % (numpy.max(numpy.abs(dirty.data))))
arl_invert_function_ical=collections.namedtuple("FFIX", "address")
arl_invert_function_ical.address=int(ff.cast("size_t", arl_invert_function_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_psf_ffi(lowconfig, vis_in, img, vis_slices, img_psf):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_psf = cImage(img_psf, new=True)
# Calling invert_finction()
psf, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=True, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
nchan, npol, ny, nx = psf.data.shape
# psf.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# psf.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# psf.wcs.wcs.crpix[0] = float(nx // 2)
# psf.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_psf, psf)
arl_invert_function_psf=collections.namedtuple("FFIX", "address")
arl_invert_function_psf.address=int(ff.cast("size_t", arl_invert_function_psf_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *, Image *, Image *)", onerror=handle_error)
def arl_ical_ffi(lowconfig, blockvis_in, img_model, vis_slices, img_deconvolved, img_residual, img_restored):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating BlockVisibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_model = cImage(img_model)
py_img_deconvolved = cImage(img_deconvolved, new=True)
py_img_residual = cImage(img_residual, new=True)
py_img_restored = cImage(img_restored, new=True)
# Callinc ical_list_serial_workflow()
deconvolved, residual, restored = ical_list_serial_workflow(block_vis=py_blockvisin, model=py_model, vis_slices=vis_slices,
timeslice='auto',
algorithm='hogbom', niter=1000, fractional_threshold=0.1, threshold=0.1,
context='wstack', nmajor=5, gain=0.1, first_selfcal=1,
global_solution=False)
# Preparing deconvolved
nchan, npol, ny, nx = deconvolved.data.shape
# deconvolved.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# deconvolved.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# deconvolved.wcs.wcs.crpix[0] = float(nx // 2)
# deconvolved.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_deconvolved, deconvolved)
# Preparing residual
nchan, npol, ny, nx = residual.data.shape
# residual.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# residual.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# residual.wcs.wcs.crpix[0] = float(nx // 2)
# residual.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_residual, residual)
# Preparing restored
nchan, npol, ny, nx = restored.data.shape
# restored.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# restored.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# restored.wcs.wcs.crpix[0] = float(nx // 2)
# restored.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_restored, restored)
arl_ical=collections.namedtuple("FFIX", "address")
arl_ical.address=int(ff.cast("size_t", arl_ical_ffi))
@ff.callback("void (*)(const ARLVis *, const Image *, bool dopsf, Image *, double *)")
def arl_invert_2d_ffi(invis, in_image, dopsf, out_image, sumwt):
py_visin = helper_create_visibility_object(cARLVis(invis))
c_in_img = cImage(in_image)
c_out_img = cImage(out_image, new=True)
py_visin.phasecentre = load_phasecentre(invis.phasecentre)
if dopsf:
out, sumwt = invert_2d(py_visin, c_in_img, dopsf=True)
else:
out, sumwt = invert_2d(py_visin, c_in_img)
store_image_in_c_2(c_out_img, out)
arl_invert_2d=collections.namedtuple("FFIX", "address")
arl_invert_2d.address=int(ff.cast("size_t", arl_invert_2d_ffi))
@ff.callback("void (*)(const ARLVis *, Image *)", onerror=handle_error)
def arl_create_image_from_visibility_ffi(vis_in, img_in):
c_vis = cARLVis(vis_in)
c_img = cImage(img_in, new=True);
# We need a proper Visibility object - not this, and not a cARLVis
# This is temporary - just so we have some data to pass to
# the create_... routine
tvis = helper_create_visibility_object(c_vis)
tvis.phasecentre = load_phasecentre(vis_in.phasecentre)
# Default args for now
image = create_image_from_visibility(tvis, cellsize=0.001, npixel=256)
#numpy.copyto(c_img.data, image.data)
# Pickle WCS and polframe, until better way is found to handle these data
# structures
#store_image_pickles(c_img, image)
store_image_in_c(c_img, image)
arl_create_image_from_visibility=collections.namedtuple("FFIX", "address")
arl_create_image_from_visibility.address=int(ff.cast("size_t",
arl_create_image_from_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, double, int, char*, Image *)", onerror=handle_error)
def arl_create_image_from_blockvisibility_ffi(lowconfig, blockvis_in, cellsize, npixel, c_phasecentre, img_out):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating BlockVisibility object
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
# py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
# Copying phasecentre and other metadata
phasecentre = load_phasecentre(c_phasecentre)
py_blockvisin.phasecentre = phasecentre
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
phasecentre1 = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
# Re-creating Image object
py_outimg = cImage(img_out, new=True);
# Construct a model from py_blockvisin
res = create_image_from_visibility(py_blockvisin, npixel=npixel, frequency=[numpy.average(frequency)], nchan=1,
channel_bandwidth=[numpy.sum(channel_bandwidth)], cellsize=cellsize, phasecentre=phasecentre1)
#numpy.copyto(c_img.data, image.data)
# Pickle WCS and polframe, until better way is found to handle these data
# structures
#store_image_pickles(c_img, image)
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre1.ra.deg
# res.wcs.wcs.crval[1] = phasecentre1.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_outimg, res)
arl_create_image_from_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_image_from_blockvisibility.address=int(ff.cast("size_t",
arl_create_image_from_blockvisibility_ffi))
@ff.callback("void (*)(Image *, Image *, Image *, Image *)", onerror=handle_error)
def arl_deconvolve_cube_ffi(dirty, psf, restored, residual):
c_dirty = cImage(dirty)
c_psf = cImage(psf)
c_residual = cImage(residual, new=True)
c_restored = cImage(restored, new=True)
py_restored, py_residual = deconvolve_cube(c_dirty, c_psf,
niter=1000,threshold=0.001, fracthresh=0.01, window_shape='quarter',
gain=0.7, scales=[0,3,10,30])
store_image_in_c(c_restored,py_restored)
store_image_in_c(c_residual,py_residual)
arl_deconvolve_cube=collections.namedtuple("FFIX", "address")
arl_deconvolve_cube.address=int(ff.cast("size_t", arl_deconvolve_cube_ffi))
@ff.callback("void (*)(Image *, Image *, Image *, Image *)", onerror=handle_error)
def arl_deconvolve_cube_ical_ffi(dirty, psf, restored, residual):
c_dirty = cImage(dirty)
c_psf = cImage(psf)
c_residual = cImage(residual, new=True)
c_restored = cImage(restored, new=True)
py_restored, py_residual = deconvolve_cube(c_dirty, c_psf,
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
store_image_in_c(c_restored,py_restored)
store_image_in_c(c_residual,py_residual)
arl_deconvolve_cube_ical=collections.namedtuple("FFIX", "address")
arl_deconvolve_cube_ical.address=int(ff.cast("size_t", arl_deconvolve_cube_ical_ffi))
@ff.callback("void (*)(Image *, Image *, Image*, Image*)", onerror=handle_error)
def arl_restore_cube_ffi(model, psf, residual, restored):
# Cast C Image structs to Python objects
c_model = cImage(model)
c_psf = cImage(psf)
if residual:
c_residual = cImage(residual)
else:
c_residual = None
c_restored = cImage(restored, new=True)
# Calculate
py_restored = restore_cube(c_model, c_psf, c_residual)
# Copy Python result to C result struct
store_image_in_c(c_restored,py_restored)
arl_restore_cube=collections.namedtuple("FFIX", "address")
arl_restore_cube.address=int(ff.cast("size_t", arl_restore_cube_ffi))
@ff.callback("void (*)(Image *, Image *, Image*, Image*)", onerror=handle_error)
def arl_restore_cube_ical_ffi(model, psf, residual, restored):
# Cast C Image structs to Python objects
c_model = cImage(model)
c_psf = cImage(psf)
if residual:
c_residual = cImage(residual)
else:
c_residual = None
c_restored = cImage(restored, new=True)
# Calculate
py_restored = restore_cube(c_model, c_psf, c_residual,
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
# Copy Python result to C result struct
store_image_in_c(c_restored,py_restored)
arl_restore_cube_ical=collections.namedtuple("FFIX", "address")
arl_restore_cube_ical.address=int(ff.cast("size_t", arl_restore_cube_ical_ffi))
| 47.853858 | 165 | 0.750508 |
import numpy
import collections
import sys
from astropy.coordinates import SkyCoord
from astropy import units as u
from processing_components.calibration.operations import apply_gaintable, create_gaintable_from_blockvisibility, qa_gaintable
from processing_components.visibility.base import create_visibility, copy_visibility
from data_models.memory_data_models import ReceptorFrame
from processing_components.image.deconvolution import deconvolve_cube, restore_cube
from processing_components.imaging.base import create_image_from_visibility, predict_2d, invert_2d
from processing_components.imaging.base import advise_wide_field
from processing_components.simulation.testing_support import create_named_configuration, create_test_image, create_low_test_image_from_gleam, simulate_gaintable
from data_models.polarisation import PolarisationFrame
from processing_components.visibility.base import create_blockvisibility
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow
from processing_components.image.operations import qa_image
from processing_components.visibility.coalesce import convert_visibility_to_blockvisibility, convert_blockvisibility_to_visibility
from processing_components.calibration.calibration import solve_gaintable
from workflows.serial.pipelines.pipeline_serial import ical_list_serial_workflow
from data_models.data_model_helpers import export_image_to_hdf5
from ffiwrappers.src.arlwrap_support import *
import logging
import os
results_dir = './results'
os.makedirs(results_dir, exist_ok=True)
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
arl_error = 0
def handle_error(*args):
global arl_error
if(args[0] != ""):
arl_error = -1
print(args[0],"\n",args[1],"\n",args[2])
ff.cdef("""
typedef struct {
size_t nvis;
int npol;
void *data;
char *phasecentre;
} ARLVis;
""")
ff.cdef("""
typedef struct {
size_t nrows;
void *data;
} ARLGt;
""")
ff.cdef("""
typedef struct {
char *confname;
double pc_ra;
double pc_dec;
double *times;
int ntimes;
double *freqs;
int nfreqs;
double *channel_bandwidth;
int nchanwidth;
int nbases;
int nant;
int npol;
int nrec;
double rmax;
char *polframe;
} ARLConf;
""")
ff.cdef("""
typedef struct {
int vis_slices;
int npixel;
double cellsize;
double guard_band_image;
double delA;
int wprojection_planes;
} ARLadvice ;
""")
# Wrap of arl.visibility.base.copy_visibility
# """
"address")
arl_handle_error.address=int(ff.cast("size_t", arl_handle_error_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_copy_visibility_ffi(lowconfig, vis_in, vis_out, zero_in):
if zero_in == 0:
zero = True
else:
zero = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
py_visout=copy_visibility(py_visin, zero=zero)
vis_out.npol=vis_in.npol
vis_out.nvis=vis_in.nvis
py_vis_out = cARLVis(vis_out)
numpy.copyto(py_vis_out, py_visout.data)
store_phasecentre(vis_out.phasecentre, py_visin.phasecentre)
arl_copy_visibility=collections.namedtuple("FFIX", "address")
arl_copy_visibility.address=int(ff.cast("size_t", arl_copy_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_copy_blockvisibility_ffi(lowconfig, blockvis_in, blockvis_out, zero_in):
if zero_in == 0:
zero = True
else:
zero = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
py_blockvisout=copy_visibility(py_blockvisin, zero=zero)
blockvis_out.npol=blockvis_in.npol
blockvis_out.nvis=blockvis_in.nvis
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_copy_blockvisibility=collections.namedtuple("FFIX", "address")
arl_copy_blockvisibility.address=int(ff.cast("size_t", arl_copy_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_set_visibility_data_to_zero_ffi(lowconfig, vis_in):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
py_visin.data['vis'][...] = 0.0
arl_set_visibility_data_to_zero=collections.namedtuple("FFIX", "address")
arl_set_visibility_data_to_zero.address=int(ff.cast("size_t", arl_set_visibility_data_to_zero_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, ARLVis *, int)", onerror=handle_error)
def arl_manipulate_visibility_data_ffi(lowconfig, vis1_in, vis2_in, vis_out, operation):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_vis1in = cARLVis(vis1_in)
py_vis1in = helper_create_visibility_object(c_vis1in)
py_vis1in.phasecentre = load_phasecentre(vis1_in.phasecentre)
py_vis1in.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_vis1in.polarisation_frame = PolarisationFrame(polframe)
c_vis2in = cARLVis(vis2_in)
py_vis2in = helper_create_visibility_object(c_vis2in)
py_vis2in.phasecentre = load_phasecentre(vis2_in.phasecentre)
py_vis2in.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_vis2in.polarisation_frame = PolarisationFrame(polframe)
c_visout = cARLVis(vis_out)
py_visout = helper_create_visibility_object(c_visout)
py_visout.phasecentre = load_phasecentre(vis_out.phasecentre)
py_visout.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visout.polarisation_frame = PolarisationFrame(polframe)
print("arl_manipulate_visibility_data opcode: ", operation)
if operation == 0:
print("arl_manipulate_visibility_data: adding")
py_visout.data['vis'] = py_vis1in.data['vis'] + py_vis2in.data['vis']
elif operation == 1:
print("arl_manipulate_visibility_data: subtracting")
py_visout.data['vis'] = py_vis1in.data['vis'] - py_vis2in.data['vis']
elif operation == 2:
print("arl_manipulate_visibility_data: multiplying")
py_visout.data['vis'] = py_vis1in.data['vis'] * py_vis2in.data['vis']
elif operation == 3:
print("arl_manipulate_visibility_data: dividing")
py_visout.data['vis'] = py_vis1in.data['vis'] / py_vis2in.data['vis']
else:
py_visout.data['vis'][...] = 0.0
print("arl_manipulate_visibility_data np.sum(vis.data): ", numpy.sum(py_visout.data['vis']), numpy.sum(py_vis1in.data['vis']), numpy.sum(py_vis2in.data['vis']))
arl_manipulate_visibility_data=collections.namedtuple("FFIX", "address")
arl_manipulate_visibility_data.address=int(ff.cast("size_t", arl_manipulate_visibility_data_ffi))
ff.cdef("""
typedef struct {
size_t size;
int data_shape[4];
void *data;
char *wcs;
char *polarisation_frame;
} Image;
""")
@ff.callback("void (*)(Image*, Image*)")
def arl_add_to_model_ffi(model, res):
c_model = cImage(model)
c_res = cImage(res)
c_model.data += c_res.data
arl_add_to_model=collections.namedtuple("FFIX", "address")
arl_add_to_model.address=int(ff.cast("size_t", arl_add_to_model_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_create_visibility_ffi(lowconfig, c_res_vis):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
if lowconfig.rmax < 1.0e-5 :
lowcore = create_named_configuration(lowcore_name)
else:
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print(lowcore_name)
print("Times: ", times)
print("Freqs: ", frequency)
print("BW : ", channel_bandwidth)
print("PCentre: ", lowconfig.pc_ra, lowconfig.pc_dec)
phasecentre = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
vt = create_visibility(lowcore, times, frequency,
channel_bandwidth=channel_bandwidth, weight=1.0,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame(polframe))
py_res_vis = cARLVis(c_res_vis)
numpy.copyto(py_res_vis, vt.data)
store_phasecentre(c_res_vis.phasecentre, phasecentre)
arl_create_visibility=collections.namedtuple("FFIX", "address")
arl_create_visibility.address=int(ff.cast("size_t", arl_create_visibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *)", onerror=handle_error)
def arl_create_blockvisibility_ffi(lowconfig, c_res_vis):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
print(lowconfig.rmax)
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print(lowcore_name)
print("Times: ", times)
print("Freqs: ", frequency)
print("BW : ", channel_bandwidth)
print("PCentre: ", lowconfig.pc_ra, lowconfig.pc_dec)
phasecentre = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
print("Polarisation frame: ", polframe)
vt = create_blockvisibility(lowcore, times, frequency=frequency,
channel_bandwidth=channel_bandwidth, weight=1.0,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame(polframe))
py_res_vis = cARLBlockVis(c_res_vis, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_res_vis, vt.data)
store_phasecentre(c_res_vis.phasecentre, phasecentre)
receptor_frame = ReceptorFrame(vt.polarisation_frame.type)
lowconfig.nrec = receptor_frame.nrec
arl_create_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_blockvisibility.address=int(ff.cast("size_t", arl_create_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, long long int *, ARLVis *)", onerror=handle_error)
def arl_convert_visibility_to_blockvisibility_ffi(lowconfig, vis_in, blockvis_in, cindex_in, blockvis_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_in, 8*cindex_size), dtype='int', count=cindex_size)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
py_visin.cindex = py_cindex
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
py_visin.blockvis = py_blockvisin
py_blockvisout = convert_visibility_to_blockvisibility(py_visin)
print("convert_visibility_to_blockvisibility np.sum(block_vis.data): ", numpy.sum(py_blockvisout.data['vis']))
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_convert_visibility_to_blockvisibility=collections.namedtuple("FFIX", "address")
arl_convert_visibility_to_blockvisibility.address=int(ff.cast("size_t", arl_convert_visibility_to_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLVis *, long long int *, ARLVis *)", onerror=handle_error)
def arl_convert_blockvisibility_to_visibility_ffi(lowconfig, blockvis_in, vis_out, cindex_out, blockvis_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_out, 8*cindex_size), dtype='int', count=cindex_size)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
vis = convert_blockvisibility_to_visibility(py_blockvisin)
py_vis = cARLVis(vis_out)
numpy.copyto(py_vis, vis.data)
store_phasecentre(vis_out.phasecentre, py_blockvisin.phasecentre)
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, vis.blockvis.data)
numpy.copyto(py_cindex, vis.cindex)
print("convert_blockvisibility_to_visibility np.sum(vis.data): ", numpy.sum(vis.data['vis']))
arl_convert_blockvisibility_to_visibility=collections.namedtuple("FFIX", "address")
arl_convert_blockvisibility_to_visibility.address=int(ff.cast("size_t", arl_convert_blockvisibility_to_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLGt *)", onerror=handle_error)
def arl_create_gaintable_from_blockvisibility_ffi(lowconfig, blockvis_in, gt_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
py_gt = create_gaintable_from_blockvisibility(py_blockvisin)
c_gt_out = cARLGt(gt_out, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
numpy.copyto(c_gt_out, py_gt.data)
arl_create_gaintable_from_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_gaintable_from_blockvisibility.address=int(ff.cast("size_t", arl_create_gaintable_from_blockvisibility_ffi))
@ff.callback("void (*)(ARLConf *, ARLGt *)", onerror=handle_error)
def arl_simulate_gaintable_ffi(lowconfig, gt):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
polarisation_frame = PolarisationFrame(polframe)
receptor_frame = ReceptorFrame(polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
py_gt = simulate_gaintable(py_gt, phase_error = 1.0)
numpy.copyto(c_gt, py_gt.data)
arl_simulate_gaintable=collections.namedtuple("FFIX", "address")
arl_simulate_gaintable.address=int(ff.cast("size_t", arl_simulate_gaintable_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, ARLGt *, ARLVis *, int )", onerror=handle_error)
def arl_apply_gaintable_ffi(lowconfig, blockvis_in, gt, blockvis_out, inverse_in):
if inverse_in == 0:
inverse = True
else:
inverse = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
py_blockvisout = apply_gaintable(py_blockvisin, py_gt, inverse=inverse)
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, py_blockvisout.data)
store_phasecentre(blockvis_out.phasecentre, py_blockvisin.phasecentre)
arl_apply_gaintable=collections.namedtuple("FFIX", "address")
arl_apply_gaintable.address=int(ff.cast("size_t", arl_apply_gaintable_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, ARLGt *, int )", onerror=handle_error)
def arl_apply_gaintable_ical_ffi(lowconfig, blockvis_in, gt, inverse_in):
if inverse_in == 0:
inverse = True
else:
inverse = False
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
py_blockvisout = apply_gaintable(py_blockvisin, py_gt, inverse=inverse)
numpy.copyto(c_blockvisin, py_blockvisout.data)
arl_apply_gaintable_ical=collections.namedtuple("FFIX", "address")
arl_apply_gaintable_ical.address=int(ff.cast("size_t", arl_apply_gaintable_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const ARLVis *, ARLGt *, int )", onerror=handle_error)
def arl_solve_gaintable_ical_ffi(lowconfig, blockvis_in, blockvis_pred, gt, vis_slices):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
py_blockvisin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
c_blockvispred = cARLBlockVis(blockvis_pred, lowconfig.nant, lowconfig.nfreqs)
py_blockvispred = helper_create_blockvisibility_object(c_blockvispred, frequency, channel_bandwidth, lowcore)
py_blockvispred.phasecentre = load_phasecentre(blockvis_pred.phasecentre)
py_blockvispred.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvispred.polarisation_frame = PolarisationFrame(polframe)
receptor_frame = ReceptorFrame(py_blockvisin.polarisation_frame.type)
c_gt = cARLGt(gt, lowconfig.nant, lowconfig.nfreqs, lowconfig.nrec)
py_gt = helper_create_gaintable_object(c_gt, frequency, receptor_frame)
py_gt.receptor_frame = receptor_frame
gt_out = solve_gaintable(py_blockvisin, py_blockvispred,
vis_slices=vis_slices, timeslice='auto',
algorithm='hogbom', niter=1000, fractional_threshold=0.1, threshold=0.1,
nmajor=5, gain=0.1, first_selfcal=1,
global_solution=False)
log.info(qa_gaintable(gt_out, context='Gaintable for selfcal cycle'))
numpy.copyto(c_gt, gt_out.data)
arl_solve_gaintable_ical=collections.namedtuple("FFIX", "address")
arl_solve_gaintable_ical.address=int(ff.cast("size_t", arl_solve_gaintable_ical_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, ARLadvice *)", onerror=handle_error)
def arl_advise_wide_field_ffi(lowconfig, vis_in, adv):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
print("Index :", py_visin.data['index'])
advice=advise_wide_field(py_visin, guard_band_image=adv.guard_band_image, delA=adv.delA,
wprojection_planes=adv.wprojection_planes)
print(advice['vis_slices'], advice['npixels2'], advice['cellsize'])
adv.cellsize = advice['cellsize']
adv.vis_slices = advice['vis_slices']
adv.npixel = advice['npixels2']
arl_advise_wide_field=collections.namedtuple("FFIX", "address")
arl_advise_wide_field.address=int(ff.cast("size_t", arl_advise_wide_field_ffi))
ff.cdef("""
typedef struct {int nant, nbases;} ant_t;
""")
@ff.callback("void (*) (char*, ant_t *)", onerror=handle_error)
def helper_get_nbases_ffi(config_name, nbases_in):
tconfig_name = str(ff.string(config_name), 'utf-8')
lowcore = create_named_configuration(tconfig_name)
nbases_in.nant = len(lowcore.xyz)
nbases_in.nbases = int(len(lowcore.xyz)*(len(lowcore.xyz)-1)/2)
print(tconfig_name,nbases_in.nant, nbases_in.nbases )
helper_get_nbases=collections.namedtuple("FFIX", "address")
helper_get_nbases.address=int(ff.cast("size_t", helper_get_nbases_ffi))
@ff.callback("void (*) (char*, double, ant_t *)")
def helper_get_nbases_rmax_ffi(config_name, rmax, nbases_in):
tconfig_name = str(ff.string(config_name), 'utf-8')
lowcore = create_named_configuration(tconfig_name, rmax=rmax)
nbases_in.nant = len(lowcore.xyz)
nbases_in.nbases = int(len(lowcore.xyz)*(len(lowcore.xyz)-1)/2)
print(tconfig_name,nbases_in.nant, nbases_in.nbases )
helper_get_nbases_rmax=collections.namedtuple("FFIX", "address")
helper_get_nbases_rmax.address=int(ff.cast("size_t", helper_get_nbases_rmax_ffi))
@ff.callback("void (*)(ARLConf *, double, int, int *)", onerror=handle_error)
def helper_get_image_shape_multifreq_ffi(lowconfig, cellsize, npixel, c_shape):
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
print("About to start create_low_test_image_from_gleam with flux_limit = 10. to get a shape of the image")
res = create_low_test_image_from_gleam(npixel=npixel, frequency=frequency,
channel_bandwidth=channel_bandwidth, cellsize=cellsize, flux_limit = 10.)
shape = list(res.data.shape)
numpy.copyto(numpy.frombuffer(ff.buffer(c_shape,4*4),dtype='i4',count=4), shape)
helper_get_image_shape_multifreq=collections.namedtuple("FFIX", "address")
helper_get_image_shape_multifreq.address=int(ff.cast("size_t", helper_get_image_shape_multifreq_ffi))
@ff.callback("void (*)(const double *, double, int *)", onerror=handle_error)
def helper_get_image_shape_ffi(freq, cellsize, c_shape):
res = create_test_image(freq, cellsize)
shape = list(res.data.shape)
numpy.copyto(numpy.frombuffer(ff.buffer(c_shape,4*4),dtype='i4',count=4), shape)
helper_get_image_shape=collections.namedtuple("FFIX", "address")
helper_get_image_shape.address=int(ff.cast("size_t", helper_get_image_shape_ffi))
#@ff.callback("void (*)(const ARLVis *, Image *)")
#def helper_set_image_params_ffi(vis, image):
# phasecentre = load_phasecentre(vis.phasecentre)
#
# py_image = cImage(image)
#
# py_image.wcs.wcs.crval[0] = phasecentre.ra.deg
# py_image.wcs.wcs.crval[1] = phasecentre.dec.deg
# py_image.wcs.wcs.crpix[0] = float(nx // 2)
# py_image.wcs.wcs.crpix[1] = float(ny // 2)
#
#helper_set_image_params=collections.namedtuple("FFIX", "address")
#helper_set_image_params.address=int(ff.cast("size_t", helper_set_image_params_ffi))
@ff.callback("void (*)(const double *, double, char*, Image *)", onerror=handle_error)
def arl_create_test_image_ffi(frequency, cellsize, c_phasecentre, out_img):
py_outimg = cImage(out_img, new=True)
res = create_test_image(frequency, cellsize)
phasecentre = load_phasecentre(c_phasecentre)
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre.ra.deg
# res.wcs.wcs.crval[1] = phasecentre.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_outimg, res)
arl_create_test_image=collections.namedtuple("FFIX", "address")
arl_create_test_image.address=int(ff.cast("size_t", arl_create_test_image_ffi))
@ff.callback("void (*)(ARLConf *, double, int, char*, Image *)", onerror=handle_error)
def arl_create_low_test_image_from_gleam_ffi(lowconfig, cellsize, npixel, c_phasecentre, out_img):
py_outimg = cImage(out_img, new=True)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
phasecentre = load_phasecentre(c_phasecentre)
print("About to start create_low_test_image_from_gleam")
res = create_low_test_image_from_gleam(npixel=npixel, frequency=frequency,
channel_bandwidth=channel_bandwidth, cellsize=cellsize, flux_limit = 1.0, phasecentre=phasecentre, applybeam=True)
export_image_to_hdf5(res, '%s/gleam_model_res.hdf'%(results_dir))
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre.ra.deg
# res.wcs.wcs.crval[1] = phasecentre.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
export_image_to_hdf5(res, '%s/gleam_model_res1.hdf'%(results_dir))
store_image_in_c(py_outimg, res)
arl_create_low_test_image_from_gleam=collections.namedtuple("FFIX", "address")
arl_create_low_test_image_from_gleam.address=int(ff.cast("size_t", arl_create_low_test_image_from_gleam_ffi))
@ff.callback("void (*)(const ARLVis *, const Image *, ARLVis *)", onerror=handle_error)
def arl_predict_2d_ffi(vis_in, img, vis_out):
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
res = predict_2d(py_visin, c_img)
vis_out.nvis = vis_in.nvis
vis_out.npol = vis_in.npol
c_visout = cARLVis(vis_out)
numpy.copyto(c_visout, res.data)
store_phasecentre(vis_out.phasecentre, res.phasecentre)
#arl_copy_visibility(py_visin, c_visout, False)
arl_predict_2d=collections.namedtuple("FFIX", "address")
arl_predict_2d.address=int(ff.cast("size_t", arl_predict_2d_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, const Image *, ARLVis *, ARLVis *, long long int *)", onerror=handle_error)
def arl_predict_function_ffi(lowconfig, vis_in, img, vis_out, blockvis_out, cindex_out):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_out, 8*cindex_size), dtype='int', count=cindex_size)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# print("--------------------> predict_list_serial_workflow Phasecentre : ", py_visin.phasecentre.ra.deg, py_visin.phasecentre.dec.deg)
res = predict_list_serial_workflow(py_visin, c_img, vis_slices=51, context='wstack')
# print("--------------------> predict_list_serial_workflow sizeof(py_visin.data), sizeof(res.data)", sys.getsizeof(py_visin.data[:]), sys.getsizeof(res.data[:]))
# print("--------------------> predict_list_serial_workflow cindex", type(res.cindex), type(res.cindex[0]), len(res.cindex))
# print("--------------------> predict_list_serial_workflow sys.getsizeof(res.cindex)", sys.getsizeof(res.cindex))
# print("--------------------> predict_list_serial_workflow np.sum(predicted_vis.data): ", numpy.sum(res.data['vis']))
# print("--------------------> predict_list_serial_workflow predicted_vis.data: ", res.data)
# print("--------------------> predict_list_serial_workflow py_visin.data): ", py_visin.data)
# print("predict_list_serial_workflow np.sum(predicted_vis.data): ", numpy.sum(res.data['vis']))
vis_out.npol = vis_in.npol
c_visout = cARLVis(vis_out)
numpy.copyto(c_visout, res.data)
store_phasecentre(vis_out.phasecentre, res.phasecentre)
numpy.copyto(py_cindex, res.cindex)
py_blockvis_out = cARLBlockVis(blockvis_out, lowconfig.nant, lowconfig.nfreqs)
numpy.copyto(py_blockvis_out, res.blockvis.data)
store_phasecentre(blockvis_out.phasecentre, res.phasecentre)
arl_predict_function=collections.namedtuple("FFIX", "address")
arl_predict_function.address=int(ff.cast("size_t", arl_predict_function_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, const Image *)", onerror=handle_error)
def arl_predict_function_blockvis_ffi(lowconfig, vis_in, img):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
c_img = cImage(img)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
log.info(qa_image(c_img, context='arl_predict_function'))
# export_image_to_fits(c_img, '%s/imaging-blockvis_model_in_predicted_function.fits'%(results_dir))
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin.hdf'%(results_dir))
# export_image_to_hdf5(c_img, '%s/gleam_model_c_img.hdf'%(results_dir))
py_blockvis = predict_list_serial_workflow(py_visin, c_img, vis_slices=51, context='wstack')
# export_blockvisibility_to_hdf5(py_blockvis, '%s/py_blockvis.hdf'%(results_dir))
# print(qa_visibility(py_blockvis, context='arl_predict_function_blockvis py_blockvis'))
# print("arl_predict_function_blockvis :", py_visin, py_blockvis)
numpy.copyto(c_visin, py_blockvis.data)
# store_phasecentre(vis_out.phasecentre, res.phasecentre)
# print("arl_predict_function_blockvis np.sum(py_blockvis.data): ", numpy.sum(py_blockvis.data['vis']))
# print("arl_predict_function_blockvis nchan npol nants ", py_blockvis.nchan, py_blockvis.npol, py_blockvis.nants)
# print("arl_predict_function_blockvis sum(uvw) ", numpy.sum(py_blockvis.uvw))
# print("arl_predict_function_blockvis sum(vis) ", numpy.sum(py_blockvis.vis))
# print("arl_predict_function_blockvis sum(weight) ", numpy.sum(py_blockvis.weight))
# print("arl_predict_function_blockvis time", py_blockvis.time, numpy.sum(py_blockvis.time))
# print("arl_predict_function_blockvis integration_time", py_blockvis.integration_time, numpy.sum(py_blockvis.integration_time))
# print("arl_predict_function_blockvis nvis, size", py_blockvis.nvis, py_blockvis.size())
arl_predict_function_blockvis=collections.namedtuple("FFIX", "address")
arl_predict_function_blockvis.address=int(ff.cast("size_t", arl_predict_function_blockvis_ffi))
@ff.callback("void (*)(ARLConf *, ARLVis *, const Image *, ARLVis *, long long int *, int)", onerror=handle_error)
def arl_predict_function_ical_ffi(lowconfig, vis_inout, img, blockvis_inout, cindex_inout, vis_slices):
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
cindex_size = lowconfig.nant*lowconfig.nant*lowconfig.nfreqs*lowconfig.ntimes
py_cindex = numpy.frombuffer(ff.buffer(cindex_inout, 8*cindex_size), dtype='int', count=cindex_size)
c_visinout = cARLVis(vis_inout)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_visinout = helper_create_visibility_object(c_visinout)
py_visinout.configuration = lowcore
py_visinout.phasecentre = load_phasecentre(vis_inout.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visinout.polarisation_frame = PolarisationFrame(polframe)
py_blockvis_inout = cARLBlockVis(blockvis_inout, lowconfig.nant, lowconfig.nfreqs)
py_blockvisinout = helper_create_blockvisibility_object(py_blockvis_inout, frequency, channel_bandwidth, lowcore)
py_visinout.blockvis = py_blockvisinout
py_visinout.cindex = py_cindex
c_img = cImage(img)
res = predict_list_serial_workflow(py_visinout, c_img, vis_slices=vis_slices, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
# print("####################> arl_predict_function_ical: ", type(res))
numpy.copyto(c_visinout, res.data)
store_phasecentre(vis_inout.phasecentre, res.phasecentre)
numpy.copyto(py_cindex, res.cindex)
numpy.copyto(py_blockvis_inout, res.blockvis.data)
store_phasecentre(blockvis_inout.phasecentre, res.phasecentre)
# print("predict_function_ical np.sum(res.data): ", numpy.sum(res.data['vis']))
# print("predict_function_ical np.sum(res.blockvis.data): ", numpy.sum(res.blockvis.data['vis']))
arl_predict_function_ical=collections.namedtuple("FFIX", "address")
arl_predict_function_ical.address=int(ff.cast("size_t", arl_predict_function_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin_invert_function.hdf'%(results_dir))
# export_image_to_hdf5(py_img, '%s/model_invert_function.hdf'%(results_dir))
# print("arl_invert_function vis_slices: ", vis_slices)
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=False, context='wstack')
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
arl_invert_function=collections.namedtuple("FFIX", "address")
arl_invert_function.address=int(ff.cast("size_t", arl_invert_function_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_blockvis_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLBlockVis(vis_in, lowconfig.nant, lowconfig.nfreqs)
py_visin = helper_create_blockvisibility_object(c_visin, frequency, channel_bandwidth, lowcore)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
# export_blockvisibility_to_hdf5(py_visin, '%s/py_visin_invert_function.hdf'%(results_dir))
# export_image_to_hdf5(py_img, '%s/model_invert_function.hdf'%(results_dir))
# print("arl_invert_function vis_slices: ", vis_slices)
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=False, context='wstack')
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
arl_invert_function_blockvis=collections.namedtuple("FFIX", "address")
arl_invert_function_blockvis.address=int(ff.cast("size_t", arl_invert_function_blockvis_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_ical_ffi(lowconfig, vis_in, img, vis_slices, img_dirty):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_dirty = cImage(img_dirty, new=True)
# Calling invert_finction()
dirty, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
nchan, npol, ny, nx = dirty.data.shape
# dirty.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# dirty.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# dirty.wcs.wcs.crpix[0] = float(nx // 2)
# dirty.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_dirty, dirty)
log.info("Maximum in residual image is %.6f" % (numpy.max(numpy.abs(dirty.data))))
arl_invert_function_ical=collections.namedtuple("FFIX", "address")
arl_invert_function_ical.address=int(ff.cast("size_t", arl_invert_function_ical_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *)", onerror=handle_error)
def arl_invert_function_psf_ffi(lowconfig, vis_in, img, vis_slices, img_psf):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating Visibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_visin = cARLVis(vis_in)
py_visin = helper_create_visibility_object(c_visin)
py_visin.phasecentre = load_phasecentre(vis_in.phasecentre)
py_visin.configuration = lowcore
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_visin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_img = cImage(img)
py_img_psf = cImage(img_psf, new=True)
# Calling invert_finction()
psf, sumwt = invert_list_serial_workflow(py_visin, py_img, vis_slices=vis_slices, dopsf=True, context='wstack',
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
nchan, npol, ny, nx = psf.data.shape
# psf.wcs.wcs.crval[0] = py_visin.phasecentre.ra.deg
# psf.wcs.wcs.crval[1] = py_visin.phasecentre.dec.deg
# psf.wcs.wcs.crpix[0] = float(nx // 2)
# psf.wcs.wcs.crpix[1] = float(ny // 2)
# Copy Python dirty image into C image
store_image_in_c(py_img_psf, psf)
arl_invert_function_psf=collections.namedtuple("FFIX", "address")
arl_invert_function_psf.address=int(ff.cast("size_t", arl_invert_function_psf_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, Image *, int, Image *, Image *, Image *)", onerror=handle_error)
def arl_ical_ffi(lowconfig, blockvis_in, img_model, vis_slices, img_deconvolved, img_residual, img_restored):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating BlockVisibility object
times = numpy.frombuffer(ff.buffer(lowconfig.times, 8*lowconfig.ntimes), dtype='f8', count=lowconfig.ntimes)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
# Re-creating images
py_model = cImage(img_model)
py_img_deconvolved = cImage(img_deconvolved, new=True)
py_img_residual = cImage(img_residual, new=True)
py_img_restored = cImage(img_restored, new=True)
# Callinc ical_list_serial_workflow()
deconvolved, residual, restored = ical_list_serial_workflow(block_vis=py_blockvisin, model=py_model, vis_slices=vis_slices,
timeslice='auto',
algorithm='hogbom', niter=1000, fractional_threshold=0.1, threshold=0.1,
context='wstack', nmajor=5, gain=0.1, first_selfcal=1,
global_solution=False)
# Preparing deconvolved
nchan, npol, ny, nx = deconvolved.data.shape
# deconvolved.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# deconvolved.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# deconvolved.wcs.wcs.crpix[0] = float(nx // 2)
# deconvolved.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_deconvolved, deconvolved)
# Preparing residual
nchan, npol, ny, nx = residual.data.shape
# residual.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# residual.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# residual.wcs.wcs.crpix[0] = float(nx // 2)
# residual.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_residual, residual)
# Preparing restored
nchan, npol, ny, nx = restored.data.shape
# restored.wcs.wcs.crval[0] = py_blockvisin.phasecentre.ra.deg
# restored.wcs.wcs.crval[1] = py_blockvisin.phasecentre.dec.deg
# restored.wcs.wcs.crpix[0] = float(nx // 2)
# restored.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_img_restored, restored)
arl_ical=collections.namedtuple("FFIX", "address")
arl_ical.address=int(ff.cast("size_t", arl_ical_ffi))
@ff.callback("void (*)(const ARLVis *, const Image *, bool dopsf, Image *, double *)")
def arl_invert_2d_ffi(invis, in_image, dopsf, out_image, sumwt):
py_visin = helper_create_visibility_object(cARLVis(invis))
c_in_img = cImage(in_image)
c_out_img = cImage(out_image, new=True)
py_visin.phasecentre = load_phasecentre(invis.phasecentre)
if dopsf:
out, sumwt = invert_2d(py_visin, c_in_img, dopsf=True)
else:
out, sumwt = invert_2d(py_visin, c_in_img)
store_image_in_c_2(c_out_img, out)
arl_invert_2d=collections.namedtuple("FFIX", "address")
arl_invert_2d.address=int(ff.cast("size_t", arl_invert_2d_ffi))
@ff.callback("void (*)(const ARLVis *, Image *)", onerror=handle_error)
def arl_create_image_from_visibility_ffi(vis_in, img_in):
c_vis = cARLVis(vis_in)
c_img = cImage(img_in, new=True);
# We need a proper Visibility object - not this, and not a cARLVis
# This is temporary - just so we have some data to pass to
# the create_... routine
tvis = helper_create_visibility_object(c_vis)
tvis.phasecentre = load_phasecentre(vis_in.phasecentre)
# Default args for now
image = create_image_from_visibility(tvis, cellsize=0.001, npixel=256)
#numpy.copyto(c_img.data, image.data)
# Pickle WCS and polframe, until better way is found to handle these data
# structures
#store_image_pickles(c_img, image)
store_image_in_c(c_img, image)
arl_create_image_from_visibility=collections.namedtuple("FFIX", "address")
arl_create_image_from_visibility.address=int(ff.cast("size_t",
arl_create_image_from_visibility_ffi))
@ff.callback("void (*)(ARLConf *, const ARLVis *, double, int, char*, Image *)", onerror=handle_error)
def arl_create_image_from_blockvisibility_ffi(lowconfig, blockvis_in, cellsize, npixel, c_phasecentre, img_out):
# Creating configuration
lowcore_name = str(ff.string(lowconfig.confname), 'utf-8')
lowcore = create_named_configuration(lowcore_name, rmax=lowconfig.rmax)
# Re-creating BlockVisibility object
c_blockvisin = cARLBlockVis(blockvis_in, lowconfig.nant, lowconfig.nfreqs)
frequency = numpy.frombuffer(ff.buffer(lowconfig.freqs, 8*lowconfig.nfreqs), dtype='f8', count=lowconfig.nfreqs)
channel_bandwidth = numpy.frombuffer(ff.buffer(lowconfig.channel_bandwidth, 8*lowconfig.nchanwidth), dtype='f8', count=lowconfig.nchanwidth)
py_blockvisin = helper_create_blockvisibility_object(c_blockvisin, frequency, channel_bandwidth, lowcore)
# py_blockvisin.phasecentre = load_phasecentre(blockvis_in.phasecentre)
# Copying phasecentre and other metadata
phasecentre = load_phasecentre(c_phasecentre)
py_blockvisin.phasecentre = phasecentre
polframe = str(ff.string(lowconfig.polframe), 'utf-8')
py_blockvisin.polarisation_frame = PolarisationFrame(polframe)
phasecentre1 = SkyCoord(ra=lowconfig.pc_ra * u.deg, dec=lowconfig.pc_dec*u.deg, frame='icrs',
equinox='J2000')
# Re-creating Image object
py_outimg = cImage(img_out, new=True);
# Construct a model from py_blockvisin
res = create_image_from_visibility(py_blockvisin, npixel=npixel, frequency=[numpy.average(frequency)], nchan=1,
channel_bandwidth=[numpy.sum(channel_bandwidth)], cellsize=cellsize, phasecentre=phasecentre1)
#numpy.copyto(c_img.data, image.data)
# Pickle WCS and polframe, until better way is found to handle these data
# structures
#store_image_pickles(c_img, image)
nchan, npol, ny, nx = res.data.shape
# res.wcs.wcs.crval[0] = phasecentre1.ra.deg
# res.wcs.wcs.crval[1] = phasecentre1.dec.deg
# res.wcs.wcs.crpix[0] = float(nx // 2)
# res.wcs.wcs.crpix[1] = float(ny // 2)
store_image_in_c(py_outimg, res)
arl_create_image_from_blockvisibility=collections.namedtuple("FFIX", "address")
arl_create_image_from_blockvisibility.address=int(ff.cast("size_t",
arl_create_image_from_blockvisibility_ffi))
@ff.callback("void (*)(Image *, Image *, Image *, Image *)", onerror=handle_error)
def arl_deconvolve_cube_ffi(dirty, psf, restored, residual):
c_dirty = cImage(dirty)
c_psf = cImage(psf)
c_residual = cImage(residual, new=True)
c_restored = cImage(restored, new=True)
py_restored, py_residual = deconvolve_cube(c_dirty, c_psf,
niter=1000,threshold=0.001, fracthresh=0.01, window_shape='quarter',
gain=0.7, scales=[0,3,10,30])
store_image_in_c(c_restored,py_restored)
store_image_in_c(c_residual,py_residual)
arl_deconvolve_cube=collections.namedtuple("FFIX", "address")
arl_deconvolve_cube.address=int(ff.cast("size_t", arl_deconvolve_cube_ffi))
@ff.callback("void (*)(Image *, Image *, Image *, Image *)", onerror=handle_error)
def arl_deconvolve_cube_ical_ffi(dirty, psf, restored, residual):
c_dirty = cImage(dirty)
c_psf = cImage(psf)
c_residual = cImage(residual, new=True)
c_restored = cImage(restored, new=True)
py_restored, py_residual = deconvolve_cube(c_dirty, c_psf,
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
store_image_in_c(c_restored,py_restored)
store_image_in_c(c_residual,py_residual)
arl_deconvolve_cube_ical=collections.namedtuple("FFIX", "address")
arl_deconvolve_cube_ical.address=int(ff.cast("size_t", arl_deconvolve_cube_ical_ffi))
@ff.callback("void (*)(Image *, Image *, Image*, Image*)", onerror=handle_error)
def arl_restore_cube_ffi(model, psf, residual, restored):
# Cast C Image structs to Python objects
c_model = cImage(model)
c_psf = cImage(psf)
if residual:
c_residual = cImage(residual)
else:
c_residual = None
c_restored = cImage(restored, new=True)
# Calculate
py_restored = restore_cube(c_model, c_psf, c_residual)
# Copy Python result to C result struct
store_image_in_c(c_restored,py_restored)
arl_restore_cube=collections.namedtuple("FFIX", "address")
arl_restore_cube.address=int(ff.cast("size_t", arl_restore_cube_ffi))
@ff.callback("void (*)(Image *, Image *, Image*, Image*)", onerror=handle_error)
def arl_restore_cube_ical_ffi(model, psf, residual, restored):
# Cast C Image structs to Python objects
c_model = cImage(model)
c_psf = cImage(psf)
if residual:
c_residual = cImage(residual)
else:
c_residual = None
c_restored = cImage(restored, new=True)
# Calculate
py_restored = restore_cube(c_model, c_psf, c_residual,
timeslice='auto', algorithm='hogbom', niter=1000, fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.1, first_selfcal=1, global_solution=False)
# Copy Python result to C result struct
store_image_in_c(c_restored,py_restored)
arl_restore_cube_ical=collections.namedtuple("FFIX", "address")
arl_restore_cube_ical.address=int(ff.cast("size_t", arl_restore_cube_ical_ffi))
| true | true |
1c3486e97720206517862fb40985bad2ec8551e4 | 577 | py | Python | test.py | tnemelck/kmeans | c1095c6bfc134f4fc9e2c79a781b42d5ee38620f | [
"OML"
] | null | null | null | test.py | tnemelck/kmeans | c1095c6bfc134f4fc9e2c79a781b42d5ee38620f | [
"OML"
] | null | null | null | test.py | tnemelck/kmeans | c1095c6bfc134f4fc9e2c79a781b42d5ee38620f | [
"OML"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 00:42:53 2018
@author: elvex
"""
import numpy as np
import numpy.random as npr
import random
def init_board(N, mini = -1, maxi = 1):
X = npr.uniform(mini, maxi (N, 2))
return X
def init_board_gauss(N, k, mini = -1, maxi = -1, ecart_min = 0.05, ecart_max = 0.10):
n = N//k
X = []
for i in range(k):
centre, s = npr.uniform(-mini, maxi, 2), random.uniform(ecart_min, ecart_max)
x = npr.normal(centre, s, (n, 2))
X.append(x)
X = np.vstack(X)
return X | 23.08 | 85 | 0.582322 |
import numpy as np
import numpy.random as npr
import random
def init_board(N, mini = -1, maxi = 1):
X = npr.uniform(mini, maxi (N, 2))
return X
def init_board_gauss(N, k, mini = -1, maxi = -1, ecart_min = 0.05, ecart_max = 0.10):
n = N//k
X = []
for i in range(k):
centre, s = npr.uniform(-mini, maxi, 2), random.uniform(ecart_min, ecart_max)
x = npr.normal(centre, s, (n, 2))
X.append(x)
X = np.vstack(X)
return X | true | true |
1c348712057f34dba1eac147defbb4d6ce2a05b4 | 25,054 | py | Python | awx/main/models/projects.py | SysBind/awx | 2e0dd61bb63d729054e97b9cf3560b3f6bc63d4f | [
"Apache-2.0"
] | 1 | 2021-05-13T17:38:03.000Z | 2021-05-13T17:38:03.000Z | awx/main/models/projects.py | SysBind/awx | 2e0dd61bb63d729054e97b9cf3560b3f6bc63d4f | [
"Apache-2.0"
] | 11 | 2021-04-20T15:03:55.000Z | 2021-07-14T21:34:16.000Z | awx/main/models/projects.py | TinLe/awx | 73d8c12e3bf5b193305ed1202549331ea00088c1 | [
"Apache-2.0"
] | 1 | 2021-08-30T02:41:32.000Z | 2021-08-30T02:41:32.000Z | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import datetime
import os
import urllib.parse as urlparse
# Django
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str, smart_text
from django.utils.text import slugify
from django.core.exceptions import ValidationError
from django.utils.timezone import now, make_aware, get_default_timezone
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
from awx.main.models.events import ProjectUpdateEvent, UnpartitionedProjectUpdateEvent
from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.models.unified_jobs import (
UnifiedJob,
UnifiedJobTemplate,
)
from awx.main.models.jobs import Job
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
from awx.main.utils import update_scm_url, polymorphic
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
from awx.main.utils.execution_environments import get_control_plane_execution_environment
from awx.main.fields import ImplicitRoleField
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.fields import JSONField
__all__ = ['Project', 'ProjectUpdate']
class ProjectOptions(models.Model):
SCM_TYPE_CHOICES = [
('', _('Manual')),
('git', _('Git')),
('svn', _('Subversion')),
('insights', _('Red Hat Insights')),
('archive', _('Remote Archive')),
]
class Meta:
abstract = True
# Project files must be available on the server in folders directly
# beneath the path specified by settings.PROJECTS_ROOT. There is no way
# via the API to upload/update a project or its playbooks; this must be
# done by other means for now.
@classmethod
def get_local_path_choices(cls):
if os.path.exists(settings.PROJECTS_ROOT):
paths = [
x
for x in os.listdir(settings.PROJECTS_ROOT)
if (os.path.isdir(os.path.join(settings.PROJECTS_ROOT, x)) and not x.startswith('.') and not x.startswith('_'))
]
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
return [x for x in paths if x not in used_paths]
else:
return []
local_path = models.CharField(
max_length=1024, blank=True, help_text=_('Local path (relative to PROJECTS_ROOT) containing ' 'playbooks and related files for this project.')
)
scm_type = models.CharField(
max_length=8,
choices=SCM_TYPE_CHOICES,
blank=True,
default='',
verbose_name=_('SCM Type'),
help_text=_("Specifies the source control system used to store the project."),
)
scm_url = models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('SCM URL'),
help_text=_("The location where the project is stored."),
)
scm_branch = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name=_('SCM Branch'),
help_text=_('Specific branch, tag or commit to checkout.'),
)
scm_refspec = models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('SCM refspec'),
help_text=_('For git projects, an additional refspec to fetch.'),
)
scm_clean = models.BooleanField(
default=False,
help_text=_('Discard any local changes before syncing the project.'),
)
scm_delete_on_update = models.BooleanField(
default=False,
help_text=_('Delete the project before syncing.'),
)
scm_track_submodules = models.BooleanField(
default=False,
help_text=_('Track submodules latest commits on defined branch.'),
)
credential = models.ForeignKey(
'Credential',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
timeout = models.IntegerField(
blank=True,
default=0,
help_text=_("The amount of time (in seconds) to run before the task is canceled."),
)
def clean_scm_type(self):
return self.scm_type or ''
def clean_scm_url(self):
if self.scm_type == 'insights':
self.scm_url = settings.INSIGHTS_URL_BASE
scm_url = str(self.scm_url or '')
if not self.scm_type:
return ''
try:
scm_url = update_scm_url(self.scm_type, scm_url, check_special_cases=False)
except ValueError as e:
raise ValidationError((e.args or (_('Invalid SCM URL.'),))[0])
scm_url_parts = urlparse.urlsplit(scm_url)
if self.scm_type and not any(scm_url_parts):
raise ValidationError(_('SCM URL is required.'))
return str(self.scm_url or '')
def clean_credential(self):
if not self.scm_type:
return None
cred = self.credential
if not cred and self.scm_type == 'insights':
raise ValidationError(_("Insights Credential is required for an Insights Project."))
elif cred:
if self.scm_type == 'insights':
if cred.kind != 'insights':
raise ValidationError(_("Credential kind must be 'insights'."))
elif cred.kind != 'scm':
raise ValidationError(_("Credential kind must be 'scm'."))
try:
if self.scm_type == 'insights':
self.scm_url = settings.INSIGHTS_URL_BASE
scm_url = update_scm_url(self.scm_type, self.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or cred.get_input('username', default='')
if scm_url_parts.password or cred.has_input('password'):
scm_password = '********'
else:
scm_password = ''
try:
update_scm_url(self.scm_type, self.scm_url, scm_username, scm_password)
except ValueError as e:
raise ValidationError((e.args or (_('Invalid credential.'),))[0])
except ValueError:
pass
return cred
def resolve_execution_environment(self):
"""
Project updates, themselves, will use the control plane execution environment.
Jobs using the project can use the default_environment, but the project updates
are not flexible enough to allow customizing the image they use.
"""
return get_control_plane_execution_environment()
def get_project_path(self, check_if_exists=True):
local_path = os.path.basename(self.local_path)
if local_path and not local_path.startswith('.'):
proj_path = os.path.join(settings.PROJECTS_ROOT, local_path)
if not check_if_exists or os.path.exists(smart_str(proj_path)):
return proj_path
def get_cache_path(self):
local_path = os.path.basename(self.local_path)
if local_path:
return os.path.join(settings.PROJECTS_ROOT, '.__awx_cache', local_path)
@property
def playbooks(self):
results = []
project_path = self.get_project_path()
if project_path:
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=settings.AWX_SHOW_PLAYBOOK_LINKS):
if skip_directory(dirpath):
continue
for filename in filenames:
playbook = could_be_playbook(project_path, dirpath, filename)
if playbook is not None:
results.append(smart_text(playbook))
return sorted(results, key=lambda x: smart_str(x).lower())
@property
def inventories(self):
results = []
project_path = self.get_project_path()
if project_path:
# Cap the number of results, because it could include lots
max_inventory_listing = 50
for dirpath, dirnames, filenames in os.walk(smart_str(project_path)):
if skip_directory(dirpath):
continue
for filename in filenames:
inv_path = could_be_inventory(project_path, dirpath, filename)
if inv_path is not None:
results.append(smart_text(inv_path))
if len(results) > max_inventory_listing:
break
if len(results) > max_inventory_listing:
break
return sorted(results, key=lambda x: smart_str(x).lower())
def get_lock_file(self):
"""
We want the project path in name only, we don't care if it exists or
not. This method will just append .lock onto the full directory path.
"""
proj_path = self.get_project_path(check_if_exists=False)
if not proj_path:
return None
return proj_path + '.lock'
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin):
"""
A project represents a playbook git repo that can access a set of inventories
"""
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials']
FIELDS_TO_DISCARD_AT_COPY = ['local_path']
FIELDS_TRIGGER_UPDATE = frozenset(['scm_url', 'scm_branch', 'scm_type', 'scm_refspec'])
class Meta:
app_label = 'main'
ordering = ('id',)
default_environment = models.ForeignKey(
'ExecutionEnvironment',
null=True,
blank=True,
default=None,
on_delete=polymorphic.SET_NULL,
related_name='+',
help_text=_('The default execution environment for jobs run using this project.'),
)
scm_update_on_launch = models.BooleanField(
default=False,
help_text=_('Update the project when a job is launched that uses the project.'),
)
scm_update_cache_timeout = models.PositiveIntegerField(
default=0,
blank=True,
help_text=_('The number of seconds after the last project update ran that a new ' 'project update will be launched as a job dependency.'),
)
allow_override = models.BooleanField(
default=False,
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
)
scm_revision = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
verbose_name=_('SCM Revision'),
help_text=_('The last revision fetched by a project update'),
)
playbook_files = JSONField(
blank=True,
default=[],
editable=False,
verbose_name=_('Playbook Files'),
help_text=_('List of playbooks found in the project'),
)
inventory_files = JSONField(
blank=True,
default=[],
editable=False,
verbose_name=_('Inventory Files'),
help_text=_('Suggested list of content that could be Ansible inventory in the project'),
)
admin_role = ImplicitRoleField(
parent_role=[
'organization.project_admin_role',
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
]
)
use_role = ImplicitRoleField(
parent_role='admin_role',
)
update_role = ImplicitRoleField(
parent_role='admin_role',
)
read_role = ImplicitRoleField(
parent_role=[
'organization.auditor_role',
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
'use_role',
'update_role',
]
)
@classmethod
def _get_unified_job_class(cls):
return ProjectUpdate
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in ProjectOptions._meta.fields) | set(['name', 'description', 'organization'])
def clean_organization(self):
if self.pk:
old_org_id = getattr(self, '_prior_values_store', {}).get('organization_id', None)
if self.organization_id != old_org_id and self.jobtemplates.exists():
raise ValidationError({'organization': _('Organization cannot be changed when in use by job templates.')})
return self.organization
def save(self, *args, **kwargs):
new_instance = not bool(self.pk)
pre_save_vals = getattr(self, '_prior_values_store', {})
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
skip_update = bool(kwargs.pop('skip_update', False))
# Create auto-generated local path if project uses SCM.
if self.pk and self.scm_type and not self.local_path.startswith('_'):
slug_name = slugify(str(self.name)).replace(u'-', u'_')
self.local_path = u'_%d__%s' % (int(self.pk), slug_name)
if 'local_path' not in update_fields:
update_fields.append('local_path')
# Do the actual save.
super(Project, self).save(*args, **kwargs)
if new_instance:
update_fields = []
# Generate local_path for SCM after initial save (so we have a PK).
if self.scm_type and not self.local_path.startswith('_'):
update_fields.append('local_path')
if update_fields:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.save(update_fields=update_fields)
# If we just created a new project with SCM, start the initial update.
# also update if certain fields have changed
relevant_change = any(pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None) for fd_name in self.FIELDS_TRIGGER_UPDATE)
if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
self.update()
def _get_current_status(self):
if self.scm_type:
if self.current_job and self.current_job.status:
return self.current_job.status
elif not self.last_job:
return 'never updated'
# inherit the child job status on failure
elif self.last_job_failed:
return self.last_job.status
# Return the successful status
else:
return self.last_job.status
elif not self.get_project_path():
return 'missing'
else:
return 'ok'
def _get_last_job_run(self):
if self.scm_type and self.last_job:
return self.last_job.finished
else:
project_path = self.get_project_path()
if project_path:
try:
mtime = os.path.getmtime(smart_str(project_path))
dt = datetime.datetime.fromtimestamp(mtime)
return make_aware(dt, get_default_timezone())
except os.error:
pass
def _can_update(self):
return bool(self.scm_type)
def create_project_update(self, **kwargs):
return self.create_unified_job(**kwargs)
@property
def cache_timeout_blocked(self):
if not self.last_job_run:
return False
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) > now():
return True
return False
@property
def needs_update_on_launch(self):
if self.scm_type and self.scm_update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now():
return True
return False
@property
def cache_id(self):
return str(self.last_job_id)
@property
def notification_templates(self):
base_notification_templates = NotificationTemplate.objects
error_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_errors=self))
started_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_started=self))
success_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_success=self))
# Get Organization NotificationTemplates
if self.organization is not None:
error_notification_templates = set(
error_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_errors=self.organization))
)
started_notification_templates = set(
started_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_started=self.organization))
)
success_notification_templates = set(
success_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_success=self.organization))
)
return dict(error=list(error_notification_templates), started=list(started_notification_templates), success=list(success_notification_templates))
def get_absolute_url(self, request=None):
return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request)
'''
RelatedJobsMixin
'''
def _get_related_jobs(self):
return UnifiedJob.objects.non_polymorphic().filter(models.Q(job__project=self) | models.Q(projectupdate__project=self))
def delete(self, *args, **kwargs):
paths_to_delete = (self.get_project_path(check_if_exists=False), self.get_cache_path())
r = super(Project, self).delete(*args, **kwargs)
for path_to_delete in paths_to_delete:
if self.scm_type and path_to_delete: # non-manual, concrete path
from awx.main.tasks import delete_project_files
delete_project_files.delay(path_to_delete)
return r
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin):
"""
Internal job for tracking project updates from SCM.
"""
class Meta:
app_label = 'main'
project = models.ForeignKey(
'Project',
related_name='project_updates',
on_delete=models.CASCADE,
editable=False,
)
job_type = models.CharField(
max_length=64,
choices=PROJECT_UPDATE_JOB_TYPE_CHOICES,
default='check',
)
job_tags = models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Parts of the project update playbook that will be run.'),
)
scm_revision = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
verbose_name=_('SCM Revision'),
help_text=_('The SCM Revision discovered by this update for the given project and branch.'),
)
def _get_parent_field_name(self):
return 'project'
def _update_parent_instance(self):
if not self.project:
return # no parent instance to update
if self.job_type == PERM_INVENTORY_DEPLOY:
# Do not update project status if this is sync job
# unless no other updates have happened or started
first_update = False
if self.project.status == 'never updated' and self.status == 'running':
first_update = True
elif self.project.current_job == self:
first_update = True
if not first_update:
return
return super(ProjectUpdate, self)._update_parent_instance()
@classmethod
def _get_task_class(cls):
from awx.main.tasks import RunProjectUpdate
return RunProjectUpdate
def _global_timeout_setting(self):
return 'DEFAULT_PROJECT_UPDATE_TIMEOUT'
def is_blocked_by(self, obj):
if type(obj) == ProjectUpdate:
if self.project == obj.project:
return True
if type(obj) == Job:
if self.project == obj.project:
return True
return False
def websocket_emit_data(self):
websocket_data = super(ProjectUpdate, self).websocket_emit_data()
websocket_data.update(dict(project_id=self.project.id))
return websocket_data
@property
def can_run_on_control_plane(self):
return True
@property
def event_class(self):
if self.has_unpartitioned_events:
return UnpartitionedProjectUpdateEvent
return ProjectUpdateEvent
@property
def task_impact(self):
return 0 if self.job_type == 'run' else 1
@property
def result_stdout(self):
return self._result_stdout_raw(redact_sensitive=True, escape_ascii=True)
@property
def result_stdout_raw(self):
return self._result_stdout_raw(redact_sensitive=True)
@property
def branch_override(self):
"""Whether a branch other than the project default is used."""
if not self.project:
return True
return bool(self.scm_branch and self.scm_branch != self.project.scm_branch)
@property
def cache_id(self):
if self.branch_override or self.job_type == 'check' or (not self.project):
return str(self.id)
return self.project.cache_id
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive)
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=True):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive, escape_ascii=True)
def get_absolute_url(self, request=None):
return reverse('api:project_update_detail', kwargs={'pk': self.pk}, request=request)
def get_ui_url(self):
return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/jobs/project/{}".format(self.pk))
def cancel(self, job_explanation=None, is_chain=False):
res = super(ProjectUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
if res and self.launch_type != 'sync':
for inv_src in self.scm_inventory_updates.filter(status='running'):
inv_src.cancel(job_explanation='Source project update `{}` was canceled.'.format(self.name))
return res
'''
JobNotificationMixin
'''
def get_notification_templates(self):
return self.project.notification_templates
def get_notification_friendly_name(self):
return "Project Update"
@property
def preferred_instance_groups(self):
if self.organization is not None:
organization_groups = [x for x in self.organization.instance_groups.all()]
else:
organization_groups = []
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
selected_groups = template_groups + organization_groups
if not any([not group.is_container_group for group in selected_groups]):
selected_groups = selected_groups + list(self.control_plane_instance_group)
if not selected_groups:
return self.global_instance_groups
return selected_groups
def save(self, *args, **kwargs):
added_update_fields = []
if not self.job_tags:
job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']
self.job_tags = ','.join(job_tags)
added_update_fields.append('job_tags')
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':
self.job_tags = ','.join([self.job_tags, 'delete'])
added_update_fields.append('job_tags')
elif (not self.scm_delete_on_update) and 'delete' in self.job_tags:
job_tags = self.job_tags.split(',')
job_tags.remove('delete')
self.job_tags = ','.join(job_tags)
added_update_fields.append('job_tags')
if 'update_fields' in kwargs:
kwargs['update_fields'].extend(added_update_fields)
return super(ProjectUpdate, self).save(*args, **kwargs)
| 38.485407 | 156 | 0.646963 |
import datetime
import os
import urllib.parse as urlparse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str, smart_text
from django.utils.text import slugify
from django.core.exceptions import ValidationError
from django.utils.timezone import now, make_aware, get_default_timezone
from awx.api.versioning import reverse
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
from awx.main.models.events import ProjectUpdateEvent, UnpartitionedProjectUpdateEvent
from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin,
)
from awx.main.models.unified_jobs import (
UnifiedJob,
UnifiedJobTemplate,
)
from awx.main.models.jobs import Job
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
from awx.main.utils import update_scm_url, polymorphic
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
from awx.main.utils.execution_environments import get_control_plane_execution_environment
from awx.main.fields import ImplicitRoleField
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.fields import JSONField
__all__ = ['Project', 'ProjectUpdate']
class ProjectOptions(models.Model):
SCM_TYPE_CHOICES = [
('', _('Manual')),
('git', _('Git')),
('svn', _('Subversion')),
('insights', _('Red Hat Insights')),
('archive', _('Remote Archive')),
]
class Meta:
abstract = True
@classmethod
def get_local_path_choices(cls):
if os.path.exists(settings.PROJECTS_ROOT):
paths = [
x
for x in os.listdir(settings.PROJECTS_ROOT)
if (os.path.isdir(os.path.join(settings.PROJECTS_ROOT, x)) and not x.startswith('.') and not x.startswith('_'))
]
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
return [x for x in paths if x not in used_paths]
else:
return []
local_path = models.CharField(
max_length=1024, blank=True, help_text=_('Local path (relative to PROJECTS_ROOT) containing ' 'playbooks and related files for this project.')
)
scm_type = models.CharField(
max_length=8,
choices=SCM_TYPE_CHOICES,
blank=True,
default='',
verbose_name=_('SCM Type'),
help_text=_("Specifies the source control system used to store the project."),
)
scm_url = models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('SCM URL'),
help_text=_("The location where the project is stored."),
)
scm_branch = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name=_('SCM Branch'),
help_text=_('Specific branch, tag or commit to checkout.'),
)
scm_refspec = models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('SCM refspec'),
help_text=_('For git projects, an additional refspec to fetch.'),
)
scm_clean = models.BooleanField(
default=False,
help_text=_('Discard any local changes before syncing the project.'),
)
scm_delete_on_update = models.BooleanField(
default=False,
help_text=_('Delete the project before syncing.'),
)
scm_track_submodules = models.BooleanField(
default=False,
help_text=_('Track submodules latest commits on defined branch.'),
)
credential = models.ForeignKey(
'Credential',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
timeout = models.IntegerField(
blank=True,
default=0,
help_text=_("The amount of time (in seconds) to run before the task is canceled."),
)
def clean_scm_type(self):
return self.scm_type or ''
def clean_scm_url(self):
if self.scm_type == 'insights':
self.scm_url = settings.INSIGHTS_URL_BASE
scm_url = str(self.scm_url or '')
if not self.scm_type:
return ''
try:
scm_url = update_scm_url(self.scm_type, scm_url, check_special_cases=False)
except ValueError as e:
raise ValidationError((e.args or (_('Invalid SCM URL.'),))[0])
scm_url_parts = urlparse.urlsplit(scm_url)
if self.scm_type and not any(scm_url_parts):
raise ValidationError(_('SCM URL is required.'))
return str(self.scm_url or '')
def clean_credential(self):
if not self.scm_type:
return None
cred = self.credential
if not cred and self.scm_type == 'insights':
raise ValidationError(_("Insights Credential is required for an Insights Project."))
elif cred:
if self.scm_type == 'insights':
if cred.kind != 'insights':
raise ValidationError(_("Credential kind must be 'insights'."))
elif cred.kind != 'scm':
raise ValidationError(_("Credential kind must be 'scm'."))
try:
if self.scm_type == 'insights':
self.scm_url = settings.INSIGHTS_URL_BASE
scm_url = update_scm_url(self.scm_type, self.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
scm_username = scm_url_parts.username or cred.get_input('username', default='')
if scm_url_parts.password or cred.has_input('password'):
scm_password = '********'
else:
scm_password = ''
try:
update_scm_url(self.scm_type, self.scm_url, scm_username, scm_password)
except ValueError as e:
raise ValidationError((e.args or (_('Invalid credential.'),))[0])
except ValueError:
pass
return cred
def resolve_execution_environment(self):
return get_control_plane_execution_environment()
def get_project_path(self, check_if_exists=True):
local_path = os.path.basename(self.local_path)
if local_path and not local_path.startswith('.'):
proj_path = os.path.join(settings.PROJECTS_ROOT, local_path)
if not check_if_exists or os.path.exists(smart_str(proj_path)):
return proj_path
def get_cache_path(self):
local_path = os.path.basename(self.local_path)
if local_path:
return os.path.join(settings.PROJECTS_ROOT, '.__awx_cache', local_path)
@property
def playbooks(self):
results = []
project_path = self.get_project_path()
if project_path:
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=settings.AWX_SHOW_PLAYBOOK_LINKS):
if skip_directory(dirpath):
continue
for filename in filenames:
playbook = could_be_playbook(project_path, dirpath, filename)
if playbook is not None:
results.append(smart_text(playbook))
return sorted(results, key=lambda x: smart_str(x).lower())
@property
def inventories(self):
results = []
project_path = self.get_project_path()
if project_path:
max_inventory_listing = 50
for dirpath, dirnames, filenames in os.walk(smart_str(project_path)):
if skip_directory(dirpath):
continue
for filename in filenames:
inv_path = could_be_inventory(project_path, dirpath, filename)
if inv_path is not None:
results.append(smart_text(inv_path))
if len(results) > max_inventory_listing:
break
if len(results) > max_inventory_listing:
break
return sorted(results, key=lambda x: smart_str(x).lower())
def get_lock_file(self):
proj_path = self.get_project_path(check_if_exists=False)
if not proj_path:
return None
return proj_path + '.lock'
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin):
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials']
FIELDS_TO_DISCARD_AT_COPY = ['local_path']
FIELDS_TRIGGER_UPDATE = frozenset(['scm_url', 'scm_branch', 'scm_type', 'scm_refspec'])
class Meta:
app_label = 'main'
ordering = ('id',)
default_environment = models.ForeignKey(
'ExecutionEnvironment',
null=True,
blank=True,
default=None,
on_delete=polymorphic.SET_NULL,
related_name='+',
help_text=_('The default execution environment for jobs run using this project.'),
)
scm_update_on_launch = models.BooleanField(
default=False,
help_text=_('Update the project when a job is launched that uses the project.'),
)
scm_update_cache_timeout = models.PositiveIntegerField(
default=0,
blank=True,
help_text=_('The number of seconds after the last project update ran that a new ' 'project update will be launched as a job dependency.'),
)
allow_override = models.BooleanField(
default=False,
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
)
scm_revision = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
verbose_name=_('SCM Revision'),
help_text=_('The last revision fetched by a project update'),
)
playbook_files = JSONField(
blank=True,
default=[],
editable=False,
verbose_name=_('Playbook Files'),
help_text=_('List of playbooks found in the project'),
)
inventory_files = JSONField(
blank=True,
default=[],
editable=False,
verbose_name=_('Inventory Files'),
help_text=_('Suggested list of content that could be Ansible inventory in the project'),
)
admin_role = ImplicitRoleField(
parent_role=[
'organization.project_admin_role',
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
]
)
use_role = ImplicitRoleField(
parent_role='admin_role',
)
update_role = ImplicitRoleField(
parent_role='admin_role',
)
read_role = ImplicitRoleField(
parent_role=[
'organization.auditor_role',
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
'use_role',
'update_role',
]
)
@classmethod
def _get_unified_job_class(cls):
return ProjectUpdate
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in ProjectOptions._meta.fields) | set(['name', 'description', 'organization'])
def clean_organization(self):
if self.pk:
old_org_id = getattr(self, '_prior_values_store', {}).get('organization_id', None)
if self.organization_id != old_org_id and self.jobtemplates.exists():
raise ValidationError({'organization': _('Organization cannot be changed when in use by job templates.')})
return self.organization
def save(self, *args, **kwargs):
new_instance = not bool(self.pk)
pre_save_vals = getattr(self, '_prior_values_store', {})
update_fields = kwargs.get('update_fields', [])
skip_update = bool(kwargs.pop('skip_update', False))
if self.pk and self.scm_type and not self.local_path.startswith('_'):
slug_name = slugify(str(self.name)).replace(u'-', u'_')
self.local_path = u'_%d__%s' % (int(self.pk), slug_name)
if 'local_path' not in update_fields:
update_fields.append('local_path')
super(Project, self).save(*args, **kwargs)
if new_instance:
update_fields = []
if self.scm_type and not self.local_path.startswith('_'):
update_fields.append('local_path')
if update_fields:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.save(update_fields=update_fields)
relevant_change = any(pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None) for fd_name in self.FIELDS_TRIGGER_UPDATE)
if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
self.update()
def _get_current_status(self):
if self.scm_type:
if self.current_job and self.current_job.status:
return self.current_job.status
elif not self.last_job:
return 'never updated'
elif self.last_job_failed:
return self.last_job.status
else:
return self.last_job.status
elif not self.get_project_path():
return 'missing'
else:
return 'ok'
def _get_last_job_run(self):
if self.scm_type and self.last_job:
return self.last_job.finished
else:
project_path = self.get_project_path()
if project_path:
try:
mtime = os.path.getmtime(smart_str(project_path))
dt = datetime.datetime.fromtimestamp(mtime)
return make_aware(dt, get_default_timezone())
except os.error:
pass
def _can_update(self):
return bool(self.scm_type)
def create_project_update(self, **kwargs):
return self.create_unified_job(**kwargs)
@property
def cache_timeout_blocked(self):
if not self.last_job_run:
return False
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) > now():
return True
return False
@property
def needs_update_on_launch(self):
if self.scm_type and self.scm_update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now():
return True
return False
@property
def cache_id(self):
return str(self.last_job_id)
@property
def notification_templates(self):
base_notification_templates = NotificationTemplate.objects
error_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_errors=self))
started_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_started=self))
success_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_success=self))
if self.organization is not None:
error_notification_templates = set(
error_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_errors=self.organization))
)
started_notification_templates = set(
started_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_started=self.organization))
)
success_notification_templates = set(
success_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_success=self.organization))
)
return dict(error=list(error_notification_templates), started=list(started_notification_templates), success=list(success_notification_templates))
def get_absolute_url(self, request=None):
return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request)
def _get_related_jobs(self):
return UnifiedJob.objects.non_polymorphic().filter(models.Q(job__project=self) | models.Q(projectupdate__project=self))
def delete(self, *args, **kwargs):
paths_to_delete = (self.get_project_path(check_if_exists=False), self.get_cache_path())
r = super(Project, self).delete(*args, **kwargs)
for path_to_delete in paths_to_delete:
if self.scm_type and path_to_delete:
from awx.main.tasks import delete_project_files
delete_project_files.delay(path_to_delete)
return r
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin):
class Meta:
app_label = 'main'
project = models.ForeignKey(
'Project',
related_name='project_updates',
on_delete=models.CASCADE,
editable=False,
)
job_type = models.CharField(
max_length=64,
choices=PROJECT_UPDATE_JOB_TYPE_CHOICES,
default='check',
)
job_tags = models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Parts of the project update playbook that will be run.'),
)
scm_revision = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
verbose_name=_('SCM Revision'),
help_text=_('The SCM Revision discovered by this update for the given project and branch.'),
)
def _get_parent_field_name(self):
return 'project'
def _update_parent_instance(self):
if not self.project:
return
if self.job_type == PERM_INVENTORY_DEPLOY:
first_update = False
if self.project.status == 'never updated' and self.status == 'running':
first_update = True
elif self.project.current_job == self:
first_update = True
if not first_update:
return
return super(ProjectUpdate, self)._update_parent_instance()
@classmethod
def _get_task_class(cls):
from awx.main.tasks import RunProjectUpdate
return RunProjectUpdate
def _global_timeout_setting(self):
return 'DEFAULT_PROJECT_UPDATE_TIMEOUT'
def is_blocked_by(self, obj):
if type(obj) == ProjectUpdate:
if self.project == obj.project:
return True
if type(obj) == Job:
if self.project == obj.project:
return True
return False
def websocket_emit_data(self):
websocket_data = super(ProjectUpdate, self).websocket_emit_data()
websocket_data.update(dict(project_id=self.project.id))
return websocket_data
@property
def can_run_on_control_plane(self):
return True
@property
def event_class(self):
if self.has_unpartitioned_events:
return UnpartitionedProjectUpdateEvent
return ProjectUpdateEvent
@property
def task_impact(self):
return 0 if self.job_type == 'run' else 1
@property
def result_stdout(self):
return self._result_stdout_raw(redact_sensitive=True, escape_ascii=True)
@property
def result_stdout_raw(self):
return self._result_stdout_raw(redact_sensitive=True)
@property
def branch_override(self):
if not self.project:
return True
return bool(self.scm_branch and self.scm_branch != self.project.scm_branch)
@property
def cache_id(self):
if self.branch_override or self.job_type == 'check' or (not self.project):
return str(self.id)
return self.project.cache_id
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive)
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=True):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive, escape_ascii=True)
def get_absolute_url(self, request=None):
return reverse('api:project_update_detail', kwargs={'pk': self.pk}, request=request)
def get_ui_url(self):
return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/jobs/project/{}".format(self.pk))
def cancel(self, job_explanation=None, is_chain=False):
res = super(ProjectUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
if res and self.launch_type != 'sync':
for inv_src in self.scm_inventory_updates.filter(status='running'):
inv_src.cancel(job_explanation='Source project update `{}` was canceled.'.format(self.name))
return res
def get_notification_templates(self):
return self.project.notification_templates
def get_notification_friendly_name(self):
return "Project Update"
@property
def preferred_instance_groups(self):
if self.organization is not None:
organization_groups = [x for x in self.organization.instance_groups.all()]
else:
organization_groups = []
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
selected_groups = template_groups + organization_groups
if not any([not group.is_container_group for group in selected_groups]):
selected_groups = selected_groups + list(self.control_plane_instance_group)
if not selected_groups:
return self.global_instance_groups
return selected_groups
def save(self, *args, **kwargs):
added_update_fields = []
if not self.job_tags:
job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']
self.job_tags = ','.join(job_tags)
added_update_fields.append('job_tags')
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':
self.job_tags = ','.join([self.job_tags, 'delete'])
added_update_fields.append('job_tags')
elif (not self.scm_delete_on_update) and 'delete' in self.job_tags:
job_tags = self.job_tags.split(',')
job_tags.remove('delete')
self.job_tags = ','.join(job_tags)
added_update_fields.append('job_tags')
if 'update_fields' in kwargs:
kwargs['update_fields'].extend(added_update_fields)
return super(ProjectUpdate, self).save(*args, **kwargs)
| true | true |
1c3487ce1eb2d05b3106d67969bad7dcec987da5 | 28,051 | py | Python | tests/mxnet/test_nn.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | tests/mxnet/test_nn.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | tests/mxnet/test_nn.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | import mxnet as mx
import networkx as nx
import numpy as np
import scipy as sp
import pytest
import dgl
import dgl.nn.mxnet as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from test_utils import parametrize_idtype
from mxnet import autograd, gluon, nd
def check_close(a, b):
assert np.allclose(a.asnumpy(), b.asnumpy(), rtol=1e-4, atol=1e-4)
def _AXWb(A, X, W, b):
X = mx.nd.dot(X, W.data(X.context))
Y = mx.nd.dot(A, X.reshape(X.shape[0], -1)).reshape(X.shape)
return Y + b.data(X.context)
@parametrize_idtype
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv(idtype, out_dim):
g = dgl.from_networkx(nx.path_graph(3))
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx)
conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
conv.initialize(ctx=ctx)
# test#1: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
# test#2: more-dim
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
conv = nn.GraphConv(5, out_dim)
conv.initialize(ctx=ctx)
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
conv = nn.GraphConv(5, out_dim)
conv.initialize(ctx=ctx)
with autograd.train_mode():
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test not override features
g.ndata["h"] = 2 * F.ones((3, 1))
h1 = conv(g, h0)
assert len(g.ndata) == 1
assert len(g.edata) == 0
assert "h" in g.ndata
check_close(g.ndata['h'], 2 * F.ones((3, 1)))
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, out_dim)).as_in_context(F.ctx())
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
if weight:
h_out = conv(g, h)
else:
h_out = conv(g, h, ext_w)
assert h_out.shape == (ndst, out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2_bi(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, out_dim)).as_in_context(F.ctx())
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
h_dst = F.randn((ndst, out_dim)).as_in_context(F.ctx())
if weight:
h_out = conv(g, (h, h_dst))
else:
h_out = conv(g, (h, h_dst), ext_w)
assert h_out.shape == (ndst, out_dim)
def _S2AXWb(A, N, X, W, b):
X1 = X * N
X1 = mx.nd.dot(A, X1.reshape(X1.shape[0], -1))
X1 = X1 * N
X2 = X1 * N
X2 = mx.nd.dot(A, X2.reshape(X2.shape[0], -1))
X2 = X2 * N
X = mx.nd.concat(X, X1, X2, dim=-1)
Y = mx.nd.dot(X, W)
return Y + b
@pytest.mark.parametrize('out_dim', [1, 2])
def test_tagconv(out_dim):
g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx)
norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5)
conv = nn.TAGConv(5, out_dim, bias=True)
conv.initialize(ctx=ctx)
print(conv)
# test#1: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
shp = norm.shape + (1,) * (h0.ndim - 1)
norm = norm.reshape(shp).as_in_context(h0.context)
assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.data(ctx), conv.h_bias.data(ctx)))
conv = nn.TAGConv(5, out_dim)
conv.initialize(ctx=ctx)
# test#2: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert h1.shape[-1] == out_dim
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 20])
@pytest.mark.parametrize('num_heads', [1, 5])
def test_gat_conv(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(10, out_dim, num_heads) # n_heads = 5
gat.initialize(ctx=ctx)
print(gat)
feat = F.randn((g.number_of_src_nodes(), 10))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(10, out_dim, num_heads, residual=True)
gat.initialize(ctx=ctx)
h = gat(g, feat)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv_bi(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(5, out_dim, num_heads)
gat.initialize(ctx=ctx)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 10])
def test_sage_conv(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv(5, out_dim, aggre_type)
feat = F.randn((g.number_of_src_nodes(), 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type)
feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == g.number_of_dst_nodes()
@parametrize_idtype
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi2(idtype, aggre_type, out_dim):
# Test the case for graphs without edges
g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv((3, 3), out_dim, 'gcn')
feat = (F.randn((5, 3)), F.randn((3, 3)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
for aggre_type in ['mean', 'pool']:
sage = nn.SAGEConv((3, 1), out_dim, aggre_type)
feat = (F.randn((5, 3)), F.randn((3, 1)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
def test_gg_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
gg_conv = nn.GatedGraphConv(10, 20, 3, 4) # n_step = 3, n_etypes = 4
gg_conv.initialize(ctx=ctx)
print(gg_conv)
# test#1: basic
h0 = F.randn((20, 10))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = gg_conv(g, h0, etypes)
assert h1.shape == (20, 20)
@pytest.mark.parametrize('out_dim', [1, 20])
def test_cheb_conv(out_dim):
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
cheb = nn.ChebConv(10, out_dim, 3) # k = 3
cheb.initialize(ctx=ctx)
print(cheb)
# test#1: basic
h0 = F.randn((20, 10))
h1 = cheb(g, h0)
assert h1.shape == (20, out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_agnn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = F.randn((g.number_of_src_nodes(), 10))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 10)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_agnn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 5)
def test_appnp_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
appnp_conv = nn.APPNPConv(3, 0.1, 0)
appnp_conv.initialize(ctx=ctx)
print(appnp_conv)
# test#1: basic
h0 = F.randn((20, 10))
h1 = appnp_conv(g, h0)
assert h1.shape == (20, 10)
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_cheb_conv(out_dim):
for k in range(1, 4):
ctx = F.ctx()
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx())
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
cheb = nn.ChebConv(5, out_dim, k)
dense_cheb = nn.DenseChebConv(5, out_dim, k)
cheb.initialize(ctx=ctx)
dense_cheb.initialize(ctx=ctx)
for i in range(len(cheb.fc)):
dense_cheb.fc[i].weight.set_data(
cheb.fc[i].weight.data())
if cheb.bias is not None:
dense_cheb.bias.set_data(
cheb.bias.data())
feat = F.randn((100, 5))
out_cheb = cheb(g, feat, [2.0])
out_dense_cheb = dense_cheb(adj, feat, 2.0)
assert F.allclose(out_cheb, out_dense_cheb)
@parametrize_idtype
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_graph_conv(idtype, g, norm_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True)
conv.initialize(ctx=ctx)
dense_conv.initialize(ctx=ctx)
dense_conv.weight.set_data(
conv.weight.data())
dense_conv.bias.set_data(
conv.bias.data())
feat = F.randn((g.number_of_src_nodes(), 5))
out_conv = conv(g, feat)
out_dense_conv = dense_conv(adj, feat)
assert F.allclose(out_conv, out_dense_conv)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite', 'block-bipartite']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_sage_conv(idtype, g, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, out_dim, 'gcn')
dense_sage = nn.DenseSAGEConv(5, out_dim)
sage.initialize(ctx=ctx)
dense_sage.initialize(ctx=ctx)
dense_sage.fc.weight.set_data(
sage.fc_neigh.weight.data())
dense_sage.fc.bias.set_data(
sage.fc_neigh.bias.data())
if len(g.ntypes) == 2:
feat = (
F.randn((g.number_of_src_nodes(), 5)),
F.randn((g.number_of_dst_nodes(), 5))
)
else:
feat = F.randn((g.number_of_nodes(), 5))
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, out_dim)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv_bi(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, out_dim)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
x0 = F.randn((g.number_of_dst_nodes(), 5))
h1 = edge_conv(g, (h0, x0))
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
# test #1: basic
feat = F.randn((g.number_of_src_nodes(), 5))
h = gin_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 5)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv_bi(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
# test #2: bipartite
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gin_conv(g, feat)
return h.shape == (g.number_of_dst_nodes(), 5)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_gmm_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
h0 = F.randn((g.number_of_src_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_gmm_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv((5, 4), 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
def test_nn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv(5, 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
def test_nn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv((5, 4), 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sg_conv(out_dim):
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
g = dgl.add_self_loop(g)
ctx = F.ctx()
sgc = nn.SGConv(5, out_dim, 2)
sgc.initialize(ctx=ctx)
print(sgc)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = sgc(g, h0)
assert h1.shape == (g.number_of_nodes(), out_dim)
def test_set2set():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
s2s.initialize(ctx=ctx)
print(s2s)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = s2s(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = s2s(bg, h0)
assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.ndim == 2
def test_glob_att_pool():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
gap = nn.GlobalAttentionPooling(gluon.nn.Dense(1), gluon.nn.Dense(10))
gap.initialize(ctx=ctx)
print(gap)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
def test_simple_pool():
g = dgl.from_networkx(nx.path_graph(15)).to(F.ctx())
sum_pool = nn.SumPooling()
avg_pool = nn.AvgPooling()
max_pool = nn.MaxPooling()
sort_pool = nn.SortPooling(10) # k = 10
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = sum_pool(g, h0)
check_close(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
check_close(F.squeeze(h1, 0), F.mean(h0, 0))
h1 = max_pool(g, h0)
check_close(F.squeeze(h1, 0), F.max(h0, 0))
h1 = sort_pool(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.ndim == 2
# test#2: batched graph
g_ = dgl.from_networkx(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = mx.nd.stack(F.sum(h0[:15], 0),
F.sum(h0[15:20], 0),
F.sum(h0[20:35], 0),
F.sum(h0[35:40], 0),
F.sum(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = avg_pool(bg, h0)
truth = mx.nd.stack(F.mean(h0[:15], 0),
F.mean(h0[15:20], 0),
F.mean(h0[20:35], 0),
F.mean(h0[35:40], 0),
F.mean(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = max_pool(bg, h0)
truth = mx.nd.stack(F.max(h0[:15], 0),
F.max(h0[15:20], 0),
F.max(h0[20:35], 0),
F.max(h0[35:40], 0),
F.max(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = sort_pool(bg, h0)
assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.ndim == 2
@pytest.mark.parametrize('O', [1, 2, 8])
def test_rgcn(O):
ctx = F.ctx()
etype = []
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
etype.append(i % 5)
B = 2
I = 10
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r)
assert list(h_new.shape) == [100, O]
# with norm
norm = nd.zeros((g.number_of_edges(), 1), ctx=ctx)
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r, norm)
assert list(h_new.shape) == [100, O]
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r, norm)
assert list(h_new.shape) == [100, O]
# id input
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randint(0, I, (100,), ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
def test_sequential():
ctx = F.ctx()
# test single graph
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat, e_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
graph.apply_edges(fn.u_add_v('h', 'h', 'e'))
e_feat += graph.edata['e']
return n_feat, e_feat
g = dgl.graph(([], [])).to(F.ctx())
g.add_nodes(3)
g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2])
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((3, 4))
e_feat = F.randn((9, 4))
n_feat, e_feat = net(g, n_feat, e_feat)
assert n_feat.shape == (3, 4)
assert e_feat.shape == (9, 4)
# test multiple graphs
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
return n_feat.reshape(graph.number_of_nodes() // 2, 2, -1).sum(1)
g1 = dgl.from_networkx(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
g2 = dgl.from_networkx(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
g3 = dgl.from_networkx(nx.erdos_renyi_graph(8, 0.8)).to(F.ctx())
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((32, 4))
n_feat = net([g1, g2, g3], n_feat)
assert n_feat.shape == (4, 4)
def myagg(alist, dsttype):
rst = alist[0]
for i in range(1, len(alist)):
rst = rst + (i + 1) * alist[i]
return rst
@parametrize_idtype
@pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg])
def test_hetero_conv(agg, idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])},
idtype=idtype, device=F.ctx())
conv = nn.HeteroGraphConv({
'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)},
agg)
conv.initialize(ctx=F.ctx())
print(conv)
uf = F.randn((4, 2))
gf = F.randn((4, 4))
sf = F.randn((2, 3))
h = conv(g, {'user': uf, 'store': sf, 'game': gf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf}, {'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
h = conv(block, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
# test with mod args
class MyMod(mx.gluon.nn.Block):
def __init__(self, s1, s2):
super(MyMod, self).__init__()
self.carg1 = 0
self.s1 = s1
self.s2 = s2
def forward(self, g, h, arg1=None): # mxnet does not support kwargs
if arg1 is not None:
self.carg1 += 1
return F.zeros((g.number_of_dst_nodes(), self.s2))
mod1 = MyMod(2, 3)
mod2 = MyMod(2, 4)
mod3 = MyMod(3, 4)
conv = nn.HeteroGraphConv({
'follows': mod1,
'plays': mod2,
'sells': mod3},
agg)
conv.initialize(ctx=F.ctx())
mod_args = {'follows' : (1,), 'plays' : (1,)}
h = conv(g, {'user' : uf, 'store' : sf, 'game': gf}, mod_args)
assert mod1.carg1 == 1
assert mod2.carg1 == 1
assert mod3.carg1 == 0
#conv on graph without any edges
for etype in g.etypes:
g = dgl.remove_edges(g, g.edges(form='eid', etype=etype), etype=etype)
assert g.num_edges() == 0
h = conv(g, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [
0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf},
{'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if __name__ == '__main__':
test_graph_conv()
test_gat_conv()
test_sage_conv()
test_gg_conv()
test_cheb_conv()
test_agnn_conv()
test_appnp_conv()
test_dense_cheb_conv()
test_dense_graph_conv()
test_dense_sage_conv()
test_edge_conv()
test_gin_conv()
test_gmm_conv()
test_nn_conv()
test_sg_conv()
test_set2set()
test_glob_att_pool()
test_simple_pool()
test_rgcn()
test_sequential()
test_hetero_conv()
| 33.960048 | 110 | 0.593597 | import mxnet as mx
import networkx as nx
import numpy as np
import scipy as sp
import pytest
import dgl
import dgl.nn.mxnet as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from test_utils import parametrize_idtype
from mxnet import autograd, gluon, nd
def check_close(a, b):
assert np.allclose(a.asnumpy(), b.asnumpy(), rtol=1e-4, atol=1e-4)
def _AXWb(A, X, W, b):
X = mx.nd.dot(X, W.data(X.context))
Y = mx.nd.dot(A, X.reshape(X.shape[0], -1)).reshape(X.shape)
return Y + b.data(X.context)
@parametrize_idtype
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv(idtype, out_dim):
g = dgl.from_networkx(nx.path_graph(3))
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx)
conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
conv.initialize(ctx=ctx)
F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
conv = nn.GraphConv(5, out_dim)
conv.initialize(ctx=ctx)
F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
conv = nn.GraphConv(5, out_dim)
conv.initialize(ctx=ctx)
with autograd.train_mode():
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
g.ndata["h"] = 2 * F.ones((3, 1))
h1 = conv(g, h0)
assert len(g.ndata) == 1
assert len(g.edata) == 0
assert "h" in g.ndata
check_close(g.ndata['h'], 2 * F.ones((3, 1)))
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, out_dim)).as_in_context(F.ctx())
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
if weight:
h_out = conv(g, h)
else:
h_out = conv(g, h, ext_w)
assert h_out.shape == (ndst, out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2_bi(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, out_dim)).as_in_context(F.ctx())
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
h_dst = F.randn((ndst, out_dim)).as_in_context(F.ctx())
if weight:
h_out = conv(g, (h, h_dst))
else:
h_out = conv(g, (h, h_dst), ext_w)
assert h_out.shape == (ndst, out_dim)
def _S2AXWb(A, N, X, W, b):
X1 = X * N
X1 = mx.nd.dot(A, X1.reshape(X1.shape[0], -1))
X1 = X1 * N
X2 = X1 * N
X2 = mx.nd.dot(A, X2.reshape(X2.shape[0], -1))
X2 = X2 * N
X = mx.nd.concat(X, X1, X2, dim=-1)
Y = mx.nd.dot(X, W)
return Y + b
@pytest.mark.parametrize('out_dim', [1, 2])
def test_tagconv(out_dim):
g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx)
norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5)
conv = nn.TAGConv(5, out_dim, bias=True)
conv.initialize(ctx=ctx)
print(conv)
F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
shp = norm.shape + (1,) * (h0.ndim - 1)
norm = norm.reshape(shp).as_in_context(h0.context)
assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.data(ctx), conv.h_bias.data(ctx)))
conv = nn.TAGConv(5, out_dim)
conv.initialize(ctx=ctx)
F.ones((3, 5))
h1 = conv(g, h0)
assert h1.shape[-1] == out_dim
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 20])
@pytest.mark.parametrize('num_heads', [1, 5])
def test_gat_conv(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(10, out_dim, num_heads)
gat.initialize(ctx=ctx)
print(gat)
feat = F.randn((g.number_of_src_nodes(), 10))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
gat = nn.GATConv(10, out_dim, num_heads, residual=True)
gat.initialize(ctx=ctx)
h = gat(g, feat)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv_bi(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(5, out_dim, num_heads)
gat.initialize(ctx=ctx)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 10])
def test_sage_conv(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv(5, out_dim, aggre_type)
feat = F.randn((g.number_of_src_nodes(), 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type)
feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == g.number_of_dst_nodes()
@parametrize_idtype
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi2(idtype, aggre_type, out_dim):
g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv((3, 3), out_dim, 'gcn')
feat = (F.randn((5, 3)), F.randn((3, 3)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
for aggre_type in ['mean', 'pool']:
sage = nn.SAGEConv((3, 1), out_dim, aggre_type)
feat = (F.randn((5, 3)), F.randn((3, 1)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
def test_gg_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
gg_conv = nn.GatedGraphConv(10, 20, 3, 4)
gg_conv.initialize(ctx=ctx)
print(gg_conv)
F.randn((20, 10))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = gg_conv(g, h0, etypes)
assert h1.shape == (20, 20)
@pytest.mark.parametrize('out_dim', [1, 20])
def test_cheb_conv(out_dim):
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
cheb = nn.ChebConv(10, out_dim, 3)
cheb.initialize(ctx=ctx)
print(cheb)
F.randn((20, 10))
h1 = cheb(g, h0)
assert h1.shape == (20, out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_agnn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = F.randn((g.number_of_src_nodes(), 10))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 10)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_agnn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 5)
def test_appnp_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
appnp_conv = nn.APPNPConv(3, 0.1, 0)
appnp_conv.initialize(ctx=ctx)
print(appnp_conv)
F.randn((20, 10))
h1 = appnp_conv(g, h0)
assert h1.shape == (20, 10)
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_cheb_conv(out_dim):
for k in range(1, 4):
ctx = F.ctx()
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx())
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
cheb = nn.ChebConv(5, out_dim, k)
dense_cheb = nn.DenseChebConv(5, out_dim, k)
cheb.initialize(ctx=ctx)
dense_cheb.initialize(ctx=ctx)
for i in range(len(cheb.fc)):
dense_cheb.fc[i].weight.set_data(
cheb.fc[i].weight.data())
if cheb.bias is not None:
dense_cheb.bias.set_data(
cheb.bias.data())
feat = F.randn((100, 5))
out_cheb = cheb(g, feat, [2.0])
out_dense_cheb = dense_cheb(adj, feat, 2.0)
assert F.allclose(out_cheb, out_dense_cheb)
@parametrize_idtype
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_graph_conv(idtype, g, norm_type, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True)
conv.initialize(ctx=ctx)
dense_conv.initialize(ctx=ctx)
dense_conv.weight.set_data(
conv.weight.data())
dense_conv.bias.set_data(
conv.bias.data())
feat = F.randn((g.number_of_src_nodes(), 5))
out_conv = conv(g, feat)
out_dense_conv = dense_conv(adj, feat)
assert F.allclose(out_conv, out_dense_conv)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite', 'block-bipartite']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_sage_conv(idtype, g, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=True, ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, out_dim, 'gcn')
dense_sage = nn.DenseSAGEConv(5, out_dim)
sage.initialize(ctx=ctx)
dense_sage.initialize(ctx=ctx)
dense_sage.fc.weight.set_data(
sage.fc_neigh.weight.data())
dense_sage.fc.bias.set_data(
sage.fc_neigh.bias.data())
if len(g.ntypes) == 2:
feat = (
F.randn((g.number_of_src_nodes(), 5)),
F.randn((g.number_of_dst_nodes(), 5))
)
else:
feat = F.randn((g.number_of_nodes(), 5))
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, out_dim)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
F.randn((g.number_of_src_nodes(), 5))
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv_bi(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, out_dim)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
F.randn((g.number_of_src_nodes(), 5))
x0 = F.randn((g.number_of_dst_nodes(), 5))
h1 = edge_conv(g, (h0, x0))
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
= F.randn((g.number_of_src_nodes(), 5))
h = gin_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 5)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv_bi(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gin_conv(g, feat)
return h.shape == (g.number_of_dst_nodes(), 5)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_gmm_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
h0 = F.randn((g.number_of_src_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_gmm_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv((5, 4), 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
def test_nn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv(5, 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
F.randn((g.number_of_src_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
def test_nn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv((5, 4), 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sg_conv(out_dim):
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
g = dgl.add_self_loop(g)
ctx = F.ctx()
sgc = nn.SGConv(5, out_dim, 2)
sgc.initialize(ctx=ctx)
print(sgc)
F.randn((g.number_of_nodes(), 5))
h1 = sgc(g, h0)
assert h1.shape == (g.number_of_nodes(), out_dim)
def test_set2set():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
s2s = nn.Set2Set(5, 3, 3)
s2s.initialize(ctx=ctx)
print(s2s)
F.randn((g.number_of_nodes(), 5))
h1 = s2s(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
ch([g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = s2s(bg, h0)
assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.ndim == 2
def test_glob_att_pool():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
gap = nn.GlobalAttentionPooling(gluon.nn.Dense(1), gluon.nn.Dense(10))
gap.initialize(ctx=ctx)
print(gap)
F.randn((g.number_of_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
ch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
def test_simple_pool():
g = dgl.from_networkx(nx.path_graph(15)).to(F.ctx())
sum_pool = nn.SumPooling()
avg_pool = nn.AvgPooling()
max_pool = nn.MaxPooling()
sort_pool = nn.SortPooling(10)
print(sum_pool, avg_pool, max_pool, sort_pool)
F.randn((g.number_of_nodes(), 5))
h1 = sum_pool(g, h0)
check_close(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
check_close(F.squeeze(h1, 0), F.mean(h0, 0))
h1 = max_pool(g, h0)
check_close(F.squeeze(h1, 0), F.max(h0, 0))
h1 = sort_pool(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.ndim == 2
m_networkx(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = mx.nd.stack(F.sum(h0[:15], 0),
F.sum(h0[15:20], 0),
F.sum(h0[20:35], 0),
F.sum(h0[35:40], 0),
F.sum(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = avg_pool(bg, h0)
truth = mx.nd.stack(F.mean(h0[:15], 0),
F.mean(h0[15:20], 0),
F.mean(h0[20:35], 0),
F.mean(h0[35:40], 0),
F.mean(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = max_pool(bg, h0)
truth = mx.nd.stack(F.max(h0[:15], 0),
F.max(h0[15:20], 0),
F.max(h0[20:35], 0),
F.max(h0[35:40], 0),
F.max(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = sort_pool(bg, h0)
assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.ndim == 2
@pytest.mark.parametrize('O', [1, 2, 8])
def test_rgcn(O):
ctx = F.ctx()
etype = []
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)).to(F.ctx())
R = 5
for i in range(g.number_of_edges()):
etype.append(i % 5)
B = 2
I = 10
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r)
assert list(h_new.shape) == [100, O]
norm = nd.zeros((g.number_of_edges(), 1), ctx=ctx)
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r, norm)
assert list(h_new.shape) == [100, O]
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r, norm)
assert list(h_new.shape) == [100, O]
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randint(0, I, (100,), ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
def test_sequential():
ctx = F.ctx()
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat, e_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
graph.apply_edges(fn.u_add_v('h', 'h', 'e'))
e_feat += graph.edata['e']
return n_feat, e_feat
g = dgl.graph(([], [])).to(F.ctx())
g.add_nodes(3)
g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2])
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((3, 4))
e_feat = F.randn((9, 4))
n_feat, e_feat = net(g, n_feat, e_feat)
assert n_feat.shape == (3, 4)
assert e_feat.shape == (9, 4)
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
return n_feat.reshape(graph.number_of_nodes() // 2, 2, -1).sum(1)
g1 = dgl.from_networkx(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
g2 = dgl.from_networkx(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
g3 = dgl.from_networkx(nx.erdos_renyi_graph(8, 0.8)).to(F.ctx())
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((32, 4))
n_feat = net([g1, g2, g3], n_feat)
assert n_feat.shape == (4, 4)
def myagg(alist, dsttype):
rst = alist[0]
for i in range(1, len(alist)):
rst = rst + (i + 1) * alist[i]
return rst
@parametrize_idtype
@pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg])
def test_hetero_conv(agg, idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])},
idtype=idtype, device=F.ctx())
conv = nn.HeteroGraphConv({
'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)},
agg)
conv.initialize(ctx=F.ctx())
print(conv)
uf = F.randn((4, 2))
gf = F.randn((4, 4))
sf = F.randn((2, 3))
h = conv(g, {'user': uf, 'store': sf, 'game': gf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf}, {'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
h = conv(block, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
class MyMod(mx.gluon.nn.Block):
def __init__(self, s1, s2):
super(MyMod, self).__init__()
self.carg1 = 0
self.s1 = s1
self.s2 = s2
def forward(self, g, h, arg1=None):
if arg1 is not None:
self.carg1 += 1
return F.zeros((g.number_of_dst_nodes(), self.s2))
mod1 = MyMod(2, 3)
mod2 = MyMod(2, 4)
mod3 = MyMod(3, 4)
conv = nn.HeteroGraphConv({
'follows': mod1,
'plays': mod2,
'sells': mod3},
agg)
conv.initialize(ctx=F.ctx())
mod_args = {'follows' : (1,), 'plays' : (1,)}
h = conv(g, {'user' : uf, 'store' : sf, 'game': gf}, mod_args)
assert mod1.carg1 == 1
assert mod2.carg1 == 1
assert mod3.carg1 == 0
for etype in g.etypes:
g = dgl.remove_edges(g, g.edges(form='eid', etype=etype), etype=etype)
assert g.num_edges() == 0
h = conv(g, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [
0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf},
{'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if __name__ == '__main__':
test_graph_conv()
test_gat_conv()
test_sage_conv()
test_gg_conv()
test_cheb_conv()
test_agnn_conv()
test_appnp_conv()
test_dense_cheb_conv()
test_dense_graph_conv()
test_dense_sage_conv()
test_edge_conv()
test_gin_conv()
test_gmm_conv()
test_nn_conv()
test_sg_conv()
test_set2set()
test_glob_att_pool()
test_simple_pool()
test_rgcn()
test_sequential()
test_hetero_conv()
| true | true |
1c3487cecc72f7c00dd34da8362cdb9ba0a14b65 | 2,127 | py | Python | comparison/eval/metrics.py | killianlevacher/defenseInvGAN-src | 8fa398536773c5bc00c906562d2d9359572b8157 | [
"MIT"
] | 14 | 2019-12-12T11:28:18.000Z | 2022-03-09T11:56:04.000Z | comparison/eval/metrics.py | killianlevacher/defenseInvGAN-src | 8fa398536773c5bc00c906562d2d9359572b8157 | [
"MIT"
] | 7 | 2019-12-16T22:20:01.000Z | 2022-02-10T00:45:21.000Z | comparison/eval/metrics.py | killianlevacher/defenseInvGAN-src | 8fa398536773c5bc00c906562d2d9359572b8157 | [
"MIT"
] | 2 | 2020-04-01T09:02:00.000Z | 2021-08-01T14:27:11.000Z | import cPickle
import tensorflow as tf
from classifiers.cifar_model import Model as CIFARModel
import utils
import numpy as np
import inception
import fid
def ComputeClassificationAccuracy(images, recons, labels, args, debug=True):
model_paths = {'CIFAR': 'classifiers/model/cifar-10',
'CelebA': 'classifiers/model/celeba'}
batch_size = 50
dset = utils.data_loader(images, recons, labels, batch_size)
# normalization, accuracy
sess = tf.Session()
if args.dataset == 'CIFAR':
model = CIFARModel(model_paths[args.dataset], tiny=False, mode='eval', sess=sess)
# TODO: Write CelebA model class
n_data = 0
n_correct_orig = 0
n_correct = 0
total = 0
for images, recons, labels in dset:
total += 1
n_correct_orig += sess.run(model.num_correct, feed_dict={model.x_input: images, model.y_input: labels})
n_correct += sess.run(model.num_correct, feed_dict={model.x_input: recons, model.y_input: labels})
n_data += len(images)
acc_orig = float(n_correct_orig) / n_data
acc = float(n_correct) / n_data
print('Original acc: {}'.format(acc_orig))
print('Accuracy: {}'.format(acc))
return acc
def ComputeMSE(reconstructions, images):
recons = np.reshape(reconstructions, (reconstructions.shape[0], -1))
img = np.reshape(images, (images.shape[0], -1))
mse = ((recons - img)**2).mean(axis=1)
mse_avg = np.mean(mse)
mse_std = np.std(mse)
return (mse_avg, mse_std)
def ComputeInception(images):
images = ((images + 1) / 2.0)*255.0
images = images.astype(np.uint8)
IS = inception.get_inception_score(images)
return IS
def ComputeFID(reconstructions, images):
reconstructions = ((reconstructions + 1) / 2.0)*255.0
reconstructions = reconstructions.astype(np.uint8)
images = ((images + 1) / 2.0)*255.0
images = images.astype(np.uint8)
images = np.transpose(images, (0, 3, 1, 2))
reconstructions = np.transpose(reconstructions, (0, 3, 1, 2))
FID = fid.get_fid(images, reconstructions)
return FID
| 29.957746 | 111 | 0.657734 | import cPickle
import tensorflow as tf
from classifiers.cifar_model import Model as CIFARModel
import utils
import numpy as np
import inception
import fid
def ComputeClassificationAccuracy(images, recons, labels, args, debug=True):
model_paths = {'CIFAR': 'classifiers/model/cifar-10',
'CelebA': 'classifiers/model/celeba'}
batch_size = 50
dset = utils.data_loader(images, recons, labels, batch_size)
sess = tf.Session()
if args.dataset == 'CIFAR':
model = CIFARModel(model_paths[args.dataset], tiny=False, mode='eval', sess=sess)
n_data = 0
n_correct_orig = 0
n_correct = 0
total = 0
for images, recons, labels in dset:
total += 1
n_correct_orig += sess.run(model.num_correct, feed_dict={model.x_input: images, model.y_input: labels})
n_correct += sess.run(model.num_correct, feed_dict={model.x_input: recons, model.y_input: labels})
n_data += len(images)
acc_orig = float(n_correct_orig) / n_data
acc = float(n_correct) / n_data
print('Original acc: {}'.format(acc_orig))
print('Accuracy: {}'.format(acc))
return acc
def ComputeMSE(reconstructions, images):
recons = np.reshape(reconstructions, (reconstructions.shape[0], -1))
img = np.reshape(images, (images.shape[0], -1))
mse = ((recons - img)**2).mean(axis=1)
mse_avg = np.mean(mse)
mse_std = np.std(mse)
return (mse_avg, mse_std)
def ComputeInception(images):
images = ((images + 1) / 2.0)*255.0
images = images.astype(np.uint8)
IS = inception.get_inception_score(images)
return IS
def ComputeFID(reconstructions, images):
reconstructions = ((reconstructions + 1) / 2.0)*255.0
reconstructions = reconstructions.astype(np.uint8)
images = ((images + 1) / 2.0)*255.0
images = images.astype(np.uint8)
images = np.transpose(images, (0, 3, 1, 2))
reconstructions = np.transpose(reconstructions, (0, 3, 1, 2))
FID = fid.get_fid(images, reconstructions)
return FID
| true | true |
1c34888dd7c4c965f8b0a566fa7ca6256d71885e | 1,905 | py | Python | src/rozbieznosci_dyscyplin/models.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | src/rozbieznosci_dyscyplin/models.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/rozbieznosci_dyscyplin/models.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.db.models import DO_NOTHING
from bpp.fields import YearField
from bpp.models import BazaModeluOdpowiedzialnosciAutorow, TupleField
class RozbieznosciViewBase(models.Model):
id = TupleField(models.IntegerField(), size=3, primary_key=True)
rekord = models.ForeignKey("bpp.Rekord", DO_NOTHING, related_name="+")
rok = YearField()
autor = models.ForeignKey("bpp.Autor", DO_NOTHING, related_name="+")
dyscyplina_rekordu = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
dyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+"
)
subdyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
class Meta:
managed = False
abstract = True
class BrakPrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznePrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznosciView(RozbieznosciViewBase):
# Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora
# oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie.
class Meta:
managed = False
verbose_name = "rozbieżność rekordu i dyscyplin"
verbose_name_plural = "rozbieżności rekordów i dyscyplin"
def get_wydawnictwo_autor_obj(self) -> BazaModeluOdpowiedzialnosciAutorow:
# Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora
# oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie (zwraca wyłacznie pierwszy
# rekord z powiazaniem autora + rekordu)
return self.rekord.original.autorzy_set.filter(autor=self.autor).first()
| 37.352941 | 108 | 0.728609 | from django.db import models
from django.db.models import DO_NOTHING
from bpp.fields import YearField
from bpp.models import BazaModeluOdpowiedzialnosciAutorow, TupleField
class RozbieznosciViewBase(models.Model):
id = TupleField(models.IntegerField(), size=3, primary_key=True)
rekord = models.ForeignKey("bpp.Rekord", DO_NOTHING, related_name="+")
rok = YearField()
autor = models.ForeignKey("bpp.Autor", DO_NOTHING, related_name="+")
dyscyplina_rekordu = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
dyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+"
)
subdyscyplina_autora = models.ForeignKey(
"bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True
)
class Meta:
managed = False
abstract = True
class BrakPrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznePrzypisaniaView(RozbieznosciViewBase):
class Meta:
managed = False
class RozbieznosciView(RozbieznosciViewBase):
class Meta:
managed = False
verbose_name = "rozbieżność rekordu i dyscyplin"
verbose_name_plural = "rozbieżności rekordów i dyscyplin"
def get_wydawnictwo_autor_obj(self) -> BazaModeluOdpowiedzialnosciAutorow:
return self.rekord.original.autorzy_set.filter(autor=self.autor).first()
| true | true |
1c348a468a745f1994d97461eb60b2dee436b18a | 9,194 | py | Python | sovrin/common/txn.py | sovrin-foundation/old-sovrin | d4e705054b7252c62fea00114060035c6eb314a4 | [
"Apache-2.0"
] | 3 | 2017-07-19T14:26:31.000Z | 2020-05-16T16:09:37.000Z | sovrin/common/txn.py | sovrin-foundation/old-sovrin | d4e705054b7252c62fea00114060035c6eb314a4 | [
"Apache-2.0"
] | null | null | null | sovrin/common/txn.py | sovrin-foundation/old-sovrin | d4e705054b7252c62fea00114060035c6eb314a4 | [
"Apache-2.0"
] | 3 | 2017-10-28T08:19:00.000Z | 2021-06-06T10:48:55.000Z | import json
from collections import OrderedDict
from plenum.common.txn import TXN_TYPE, TARGET_NYM, ORIGIN, DATA, TXN_ID, TXN_TIME, \
RAW, ENC, HASH, NAME, VERSION, TYPE, POOL_TXN_TYPES, ALIAS, \
STEWARD, NYM, VERKEY
from plenum.common.types import f, TaggedTuple
ROLE = 'role'
NONCE = 'nonce'
ATTRIBUTES = "attributes"
ATTR_NAMES = "attr_names"
ACTION = 'action'
SCHEDULE = 'schedule'
TIMEOUT = 'timeout'
SHA256 = 'sha256'
START = 'start'
CANCEL = 'cancel'
COMPLETE = 'complete'
FAIL = 'fail'
NIL = '<nil>'
OWNER = '<owner>'
LAST_TXN = "lastTxn"
TXNS = "Txns"
ENC_TYPE = "encType"
SKEY = "secretKey"
REF = "ref"
PRIMARY = "primary"
REVOCATION = "revocation"
allOpKeys = (TXN_TYPE, TARGET_NYM, VERKEY, ORIGIN, ROLE, DATA, NONCE, REF, RAW,
ENC, HASH, ALIAS, ACTION, SCHEDULE, TIMEOUT, SHA256, START, CANCEL,
NAME, VERSION)
reqOpKeys = (TXN_TYPE,)
# Attribute Names
ENDPOINT = "endpoint"
# client transaction types
NYM = NYM
ATTRIB = "ATTRIB"
IDPROOF = "IDPROOF"
ASSIGN_AGENT = "ASSIGN_AGENT"
ADD_SPONSOR = "ADD_SPONSOR"
ADD_AGENT = "ADD_AGENT"
DISCLO = "DISCLO"
GET_ATTR = "GET_ATTR"
GET_NYM = "GET_NYM"
GET_TXNS = "GET_TXNS"
GET_TXN = "GET_TXN"
CLAIM_DEF = "CLAIM_DEF"
GET_CLAIM_DEF = "GET_CLAIM_DEF"
ADD_PKI = "ADD_PKI"
REQ_CRED = "REQ_CRED"
GET_NONCE = "GET_NONCE"
VER_PRF = "VER_PRF"
ISSUER_KEY = "ISSUER_KEY"
GET_ISSUER_KEY = "GET_ISSUER_KEY"
POOL_UPGRADE = 'POOL_UPGRADE'
NODE_UPGRADE = 'NODE_UPGRADE'
# Temp for demo
GEN_CRED = "GEN_CRED"
openTxns = (GET_NYM, GET_ATTR, GET_CLAIM_DEF, GET_ISSUER_KEY)
# TXN_TYPE -> (requireds, optionals)
fields = {NYM: ([TARGET_NYM], [ROLE]),
ATTRIB: ([], [RAW, ENC, HASH]),
CLAIM_DEF: ([NAME, VERSION, ATTR_NAMES], [TYPE, ]),
GET_CLAIM_DEF: ([], []),
ISSUER_KEY: ([REF, DATA]),
GET_ISSUER_KEY: ([REF, ORIGIN])
}
CONFIG_TXN_TYPES = {POOL_UPGRADE, NODE_UPGRADE}
IDENTITY_TXN_TYPES = {NYM,
ATTRIB,
IDPROOF,
DISCLO,
GET_ATTR,
GET_NYM,
GET_TXNS,
CLAIM_DEF,
GET_CLAIM_DEF,
ISSUER_KEY,
GET_ISSUER_KEY}
validTxnTypes = set()
validTxnTypes.update(POOL_TXN_TYPES)
validTxnTypes.update(IDENTITY_TXN_TYPES)
validTxnTypes.update(CONFIG_TXN_TYPES)
def AddNym(target, role=None):
return newTxn(txnType=NYM, target=target, role=role)
def AddAttr(target, attrData, role=None):
return newTxn(txnType=ATTRIB, target=target, role=role,
enc=attrData)
def GetAttr(target, attrName, role=None):
queryData = json.dumps({"name": attrName})
return newTxn(txnType=GET_ATTR, target=target, role=role,
data=queryData)
# TODO: Change name to txn or some thing else after discussion
def newTxn(txnType, target=None, data=None, enc=None, raw=None,
hash=None, role=None):
txn = {
TXN_TYPE: txnType
}
if target:
txn[TARGET_NYM] = target
if data:
txn[DATA] = data
if enc:
txn[ENC] = enc
if raw:
txn[RAW] = raw
if hash:
txn[HASH] = hash
if role:
txn[ROLE] = role
return txn
# TODO: Move them to a separate file
# ROLE types
STEWARD = STEWARD
SPONSOR = "SPONSOR"
TRUSTEE = "TRUSTEE"
TGB = "TGB"
def getGenesisTxns():
return [
{ALIAS: "Trustee1", TARGET_NYM: "9XNVHKtucEZWh7GrS9S8nRWtVuFQwYLfzGD7pQ7Scjtc", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4a", TXN_TYPE: NYM, ROLE: TRUSTEE},
{ALIAS: "Steward1", TARGET_NYM: "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward2", TARGET_NYM: "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward3", TARGET_NYM: "CECeGXDi6EHuhpwz19uyjjEnsRGNXodFYqCRgdLmLRkt", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4d", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward4", TARGET_NYM: "3znAGhp6Tk4kmebhXnk9K3jaTMffu82PJfEG91AeRkq2", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4e", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward5", TARGET_NYM: "4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4f", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward6", TARGET_NYM: "4Yk9HoDSfJv9QcmJbLcXdWVgS7nfvdUqiVcvbSu8VBru", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b50", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward7", TARGET_NYM: "FR5pWwinRBn35GNhg7bsvw8Q13kRept2pm561DwZCQzT", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b51", TXN_TYPE: NYM, ROLE: STEWARD},
{TXN_TYPE: NYM, TARGET_NYM: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9', ROLE: STEWARD, TXN_ID: '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b'},
{TXN_TYPE: NYM, f.IDENTIFIER.nm: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9', TARGET_NYM: 'C2AafyXuDBbcdiHJ8pdJ14PJ17X5KEBjbyfPPJWZFA4b', ROLE: SPONSOR, TXN_ID: '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c'},
{TXN_TYPE: NYM, TARGET_NYM: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3', TXN_ID: '50c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', ROLE: SPONSOR, f.IDENTIFIER.nm: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9'},
{TXN_TYPE: NYM, TARGET_NYM: 'adityastaging', TXN_ID: '77c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', f.IDENTIFIER.nm: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3'},
{TXN_TYPE: NYM, TARGET_NYM: 'iosstaging', TXN_ID: '91c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', f.IDENTIFIER.nm: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3'},
{ALIAS: "Steward8", TARGET_NYM: "6vAQkuCgTm7Jeki3vVhZm1FTAQYCeLE5mSvVRQdiwt1w", TXN_ID: "4770beb7e45bf623bd9987af4bd6d6d8eb8b68a4d00fa2a4c6b6f3f0c1c036f8", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward9", TARGET_NYM: "6hbecbh36EMK6yAi5NZ9bLZEuRsWFt6qLa2SyMQGXs7H", TXN_ID: "4770beb7e45bf623bd9987af4bd6d6d8eb8b68a4d00fa2a4c6b6f3f0c1c036f9", TXN_TYPE: NYM, ROLE: STEWARD},
]
def getGenesisTxnsForLocal():
return [{ALIAS: "Steward1",
TARGET_NYM: "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward2",
TARGET_NYM: "3NhxuJKShrpnhxG8VYGkum6mv3HeXWUDfj7ktn5NbeymHoDX",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward3",
TARGET_NYM: "CECeGXDi6EHuhpwz19uyjjEnsRGNXodFYqCRgdLmLRkt",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4d",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward4",
TARGET_NYM: "3znAGhp6Tk4kmebhXnk9K3jaTMffu82PJfEG91AeRkq2",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4e",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Alice",
TARGET_NYM: "4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919683",
TXN_TYPE: NYM},
{ALIAS: "Jason",
TARGET_NYM: "46Kq4hASUdvUbwR7s7Pie3x8f4HRB3NLay7Z9jh9eZsB",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919684",
TXN_TYPE: NYM},
{ALIAS: "John",
TARGET_NYM: "3wpYnGqceZ8DzN3guiTd9rrYkWTwTHCChBSuo6cvkXTG",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919685",
TXN_TYPE: NYM},
{ALIAS: "Les",
TARGET_NYM: "4Yk9HoDSfJv9QcmJbLcXdWVgS7nfvdUqiVcvbSu8VBru",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID: "e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919686",
TXN_TYPE: NYM}]
def getTxnOrderedFields():
return OrderedDict([
(f.IDENTIFIER.nm, (str, str)),
(f.REQ_ID.nm, (str, int)),
(TXN_ID, (str, str)),
(TXN_TIME, (str, int)),
(TXN_TYPE, (str, str)),
(TARGET_NYM, (str, str)),
(VERKEY, (str, str)),
(DATA, (str, str)),
(ALIAS, (str, str)),
(RAW, (str, str)),
(ENC, (str, str)),
(HASH, (str, str)),
(ROLE, (str, str)),
(REF, (str, str))
])
| 40.148472 | 240 | 0.684359 | import json
from collections import OrderedDict
from plenum.common.txn import TXN_TYPE, TARGET_NYM, ORIGIN, DATA, TXN_ID, TXN_TIME, \
RAW, ENC, HASH, NAME, VERSION, TYPE, POOL_TXN_TYPES, ALIAS, \
STEWARD, NYM, VERKEY
from plenum.common.types import f, TaggedTuple
ROLE = 'role'
NONCE = 'nonce'
ATTRIBUTES = "attributes"
ATTR_NAMES = "attr_names"
ACTION = 'action'
SCHEDULE = 'schedule'
TIMEOUT = 'timeout'
SHA256 = 'sha256'
START = 'start'
CANCEL = 'cancel'
COMPLETE = 'complete'
FAIL = 'fail'
NIL = '<nil>'
OWNER = '<owner>'
LAST_TXN = "lastTxn"
TXNS = "Txns"
ENC_TYPE = "encType"
SKEY = "secretKey"
REF = "ref"
PRIMARY = "primary"
REVOCATION = "revocation"
allOpKeys = (TXN_TYPE, TARGET_NYM, VERKEY, ORIGIN, ROLE, DATA, NONCE, REF, RAW,
ENC, HASH, ALIAS, ACTION, SCHEDULE, TIMEOUT, SHA256, START, CANCEL,
NAME, VERSION)
reqOpKeys = (TXN_TYPE,)
ENDPOINT = "endpoint"
NYM = NYM
ATTRIB = "ATTRIB"
IDPROOF = "IDPROOF"
ASSIGN_AGENT = "ASSIGN_AGENT"
ADD_SPONSOR = "ADD_SPONSOR"
ADD_AGENT = "ADD_AGENT"
DISCLO = "DISCLO"
GET_ATTR = "GET_ATTR"
GET_NYM = "GET_NYM"
GET_TXNS = "GET_TXNS"
GET_TXN = "GET_TXN"
CLAIM_DEF = "CLAIM_DEF"
GET_CLAIM_DEF = "GET_CLAIM_DEF"
ADD_PKI = "ADD_PKI"
REQ_CRED = "REQ_CRED"
GET_NONCE = "GET_NONCE"
VER_PRF = "VER_PRF"
ISSUER_KEY = "ISSUER_KEY"
GET_ISSUER_KEY = "GET_ISSUER_KEY"
POOL_UPGRADE = 'POOL_UPGRADE'
NODE_UPGRADE = 'NODE_UPGRADE'
GEN_CRED = "GEN_CRED"
openTxns = (GET_NYM, GET_ATTR, GET_CLAIM_DEF, GET_ISSUER_KEY)
fields = {NYM: ([TARGET_NYM], [ROLE]),
ATTRIB: ([], [RAW, ENC, HASH]),
CLAIM_DEF: ([NAME, VERSION, ATTR_NAMES], [TYPE, ]),
GET_CLAIM_DEF: ([], []),
ISSUER_KEY: ([REF, DATA]),
GET_ISSUER_KEY: ([REF, ORIGIN])
}
CONFIG_TXN_TYPES = {POOL_UPGRADE, NODE_UPGRADE}
IDENTITY_TXN_TYPES = {NYM,
ATTRIB,
IDPROOF,
DISCLO,
GET_ATTR,
GET_NYM,
GET_TXNS,
CLAIM_DEF,
GET_CLAIM_DEF,
ISSUER_KEY,
GET_ISSUER_KEY}
validTxnTypes = set()
validTxnTypes.update(POOL_TXN_TYPES)
validTxnTypes.update(IDENTITY_TXN_TYPES)
validTxnTypes.update(CONFIG_TXN_TYPES)
def AddNym(target, role=None):
return newTxn(txnType=NYM, target=target, role=role)
def AddAttr(target, attrData, role=None):
return newTxn(txnType=ATTRIB, target=target, role=role,
enc=attrData)
def GetAttr(target, attrName, role=None):
queryData = json.dumps({"name": attrName})
return newTxn(txnType=GET_ATTR, target=target, role=role,
data=queryData)
def newTxn(txnType, target=None, data=None, enc=None, raw=None,
hash=None, role=None):
txn = {
TXN_TYPE: txnType
}
if target:
txn[TARGET_NYM] = target
if data:
txn[DATA] = data
if enc:
txn[ENC] = enc
if raw:
txn[RAW] = raw
if hash:
txn[HASH] = hash
if role:
txn[ROLE] = role
return txn
STEWARD = STEWARD
SPONSOR = "SPONSOR"
TRUSTEE = "TRUSTEE"
TGB = "TGB"
def getGenesisTxns():
return [
{ALIAS: "Trustee1", TARGET_NYM: "9XNVHKtucEZWh7GrS9S8nRWtVuFQwYLfzGD7pQ7Scjtc", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4a", TXN_TYPE: NYM, ROLE: TRUSTEE},
{ALIAS: "Steward1", TARGET_NYM: "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward2", TARGET_NYM: "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward3", TARGET_NYM: "CECeGXDi6EHuhpwz19uyjjEnsRGNXodFYqCRgdLmLRkt", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4d", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward4", TARGET_NYM: "3znAGhp6Tk4kmebhXnk9K3jaTMffu82PJfEG91AeRkq2", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4e", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward5", TARGET_NYM: "4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4f", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward6", TARGET_NYM: "4Yk9HoDSfJv9QcmJbLcXdWVgS7nfvdUqiVcvbSu8VBru", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b50", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward7", TARGET_NYM: "FR5pWwinRBn35GNhg7bsvw8Q13kRept2pm561DwZCQzT", TXN_ID: "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b51", TXN_TYPE: NYM, ROLE: STEWARD},
{TXN_TYPE: NYM, TARGET_NYM: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9', ROLE: STEWARD, TXN_ID: '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b'},
{TXN_TYPE: NYM, f.IDENTIFIER.nm: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9', TARGET_NYM: 'C2AafyXuDBbcdiHJ8pdJ14PJ17X5KEBjbyfPPJWZFA4b', ROLE: SPONSOR, TXN_ID: '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c'},
{TXN_TYPE: NYM, TARGET_NYM: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3', TXN_ID: '50c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', ROLE: SPONSOR, f.IDENTIFIER.nm: 'EGRf6ho37aqg5ZZpAyD2mesS6XrNUeSkoVUAbpL6bmJ9'},
{TXN_TYPE: NYM, TARGET_NYM: 'adityastaging', TXN_ID: '77c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', f.IDENTIFIER.nm: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3'},
{TXN_TYPE: NYM, TARGET_NYM: 'iosstaging', TXN_ID: '91c2f66f7fda2ece684d1befc667e894b4460cb782f5387d864fa7d5f14c4066', f.IDENTIFIER.nm: '4qU9QRZ79CbWuDKUtTvpDUnUiDnkLkwd1i8p2B3gJNU3'},
{ALIAS: "Steward8", TARGET_NYM: "6vAQkuCgTm7Jeki3vVhZm1FTAQYCeLE5mSvVRQdiwt1w", TXN_ID: "4770beb7e45bf623bd9987af4bd6d6d8eb8b68a4d00fa2a4c6b6f3f0c1c036f8", TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward9", TARGET_NYM: "6hbecbh36EMK6yAi5NZ9bLZEuRsWFt6qLa2SyMQGXs7H", TXN_ID: "4770beb7e45bf623bd9987af4bd6d6d8eb8b68a4d00fa2a4c6b6f3f0c1c036f9", TXN_TYPE: NYM, ROLE: STEWARD},
]
def getGenesisTxnsForLocal():
return [{ALIAS: "Steward1",
TARGET_NYM: "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward2",
TARGET_NYM: "3NhxuJKShrpnhxG8VYGkum6mv3HeXWUDfj7ktn5NbeymHoDX",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4c",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward3",
TARGET_NYM: "CECeGXDi6EHuhpwz19uyjjEnsRGNXodFYqCRgdLmLRkt",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4d",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Steward4",
TARGET_NYM: "3znAGhp6Tk4kmebhXnk9K3jaTMffu82PJfEG91AeRkq2",
TXN_ID:
"6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4e",
TXN_TYPE: NYM, ROLE: STEWARD},
{ALIAS: "Alice",
TARGET_NYM: "4AdS22kC7xzb4bcqg9JATuCfAMNcQYcZa1u5eWzs6cSJ",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919683",
TXN_TYPE: NYM},
{ALIAS: "Jason",
TARGET_NYM: "46Kq4hASUdvUbwR7s7Pie3x8f4HRB3NLay7Z9jh9eZsB",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919684",
TXN_TYPE: NYM},
{ALIAS: "John",
TARGET_NYM: "3wpYnGqceZ8DzN3guiTd9rrYkWTwTHCChBSuo6cvkXTG",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID:
"e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919685",
TXN_TYPE: NYM},
{ALIAS: "Les",
TARGET_NYM: "4Yk9HoDSfJv9QcmJbLcXdWVgS7nfvdUqiVcvbSu8VBru",
"identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC",
TXN_ID: "e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919686",
TXN_TYPE: NYM}]
def getTxnOrderedFields():
return OrderedDict([
(f.IDENTIFIER.nm, (str, str)),
(f.REQ_ID.nm, (str, int)),
(TXN_ID, (str, str)),
(TXN_TIME, (str, int)),
(TXN_TYPE, (str, str)),
(TARGET_NYM, (str, str)),
(VERKEY, (str, str)),
(DATA, (str, str)),
(ALIAS, (str, str)),
(RAW, (str, str)),
(ENC, (str, str)),
(HASH, (str, str)),
(ROLE, (str, str)),
(REF, (str, str))
])
| true | true |
1c348a5acb86adb0a976856e0be07a0ff3b78da9 | 504 | py | Python | cnn_code/cuda.py | neurocaience/deepfreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | 1 | 2020-11-17T06:41:10.000Z | 2020-11-17T06:41:10.000Z | cnn_code/cuda.py | neurocaience/DeepFreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | null | null | null | cnn_code/cuda.py | neurocaience/DeepFreeze | 2a8c7da7519df2bacb640917695bd7d226e8d4f4 | [
"MIT"
] | 1 | 2020-06-18T04:25:48.000Z | 2020-06-18T04:25:48.000Z | """=============================================================================
Manage CUDA-related utility functions.
============================================================================="""
import torch
# ------------------------------------------------------------------------------
def device():
"""Return current CUDA device if on GPUs else CPU device.
"""
if torch.cuda.is_available():
return torch.cuda.current_device()
else:
return torch.device('cpu')
| 31.5 | 80 | 0.343254 |
import torch
def device():
if torch.cuda.is_available():
return torch.cuda.current_device()
else:
return torch.device('cpu')
| true | true |
1c348b2a617346f4892a06a93923aa29bbc60222 | 121 | py | Python | app/multiplication.py | magicalcarpet/python_modules_and_packages | 663a957674c41d0dc33e3f6ca7eefe4c808606b4 | [
"MIT"
] | null | null | null | app/multiplication.py | magicalcarpet/python_modules_and_packages | 663a957674c41d0dc33e3f6ca7eefe4c808606b4 | [
"MIT"
] | null | null | null | app/multiplication.py | magicalcarpet/python_modules_and_packages | 663a957674c41d0dc33e3f6ca7eefe4c808606b4 | [
"MIT"
] | null | null | null | def multiply(x, y):
'''
Multiply two numbers x and y
'''
print('multiplying x: {} * y: {}'.format(x, y))
| 20.166667 | 51 | 0.512397 | def multiply(x, y):
print('multiplying x: {} * y: {}'.format(x, y))
| true | true |
1c348d45c3fb17732c03fd82af2a1c1cdf2c030f | 415 | py | Python | acmicpc/9506/9506.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/9506/9506.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/9506/9506.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | def get_divisor(k):
divisors = []
for i in range(1, k):
if k % i == 0:
divisors.append(i)
return divisors
while True:
n = int(input())
if n == -1:
break
divisors = get_divisor(n)
if n == sum(divisors):
print(f'{n}', end=' = ')
print(' + '.join(list(map(str, divisors))))
elif n != sum(divisors):
print(f'{n} is NOT perfect.')
| 23.055556 | 51 | 0.489157 | def get_divisor(k):
divisors = []
for i in range(1, k):
if k % i == 0:
divisors.append(i)
return divisors
while True:
n = int(input())
if n == -1:
break
divisors = get_divisor(n)
if n == sum(divisors):
print(f'{n}', end=' = ')
print(' + '.join(list(map(str, divisors))))
elif n != sum(divisors):
print(f'{n} is NOT perfect.')
| true | true |
1c348da64d28394f354e57927ffd94baebc17e4a | 2,973 | py | Python | src/data/traffic_data.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 3 | 2020-07-06T08:26:12.000Z | 2021-04-20T05:31:38.000Z | src/data/traffic_data.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 8 | 2021-06-01T03:49:28.000Z | 2022-03-18T02:27:43.000Z | src/data/traffic_data.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 1 | 2021-04-20T05:30:46.000Z | 2021-04-20T05:30:46.000Z | import datetime
from typing import Optional
from sqlalchemy.sql import text
from data.base_data import BaseModel, BaseData
class TrafficMonthlyModel(BaseModel):
"""
Note: date is the first day of the month.
"""
_table = "traffic_monthly"
_pk_field = "id"
_columns = ["id", "date", "unique_pageviews", "total_pageviews"]
class TrafficDailyModel(BaseModel):
_table = "traffic_daily"
_pk_field = "id"
_columns = ["id", "date", "unique_pageviews", "total_pageviews", "net_subscribers"]
class TrafficData(BaseData):
def get_monthly_traffic_by_range(
self, start_date: datetime.date, end_date: datetime.date
) -> list[TrafficMonthlyModel]:
"""Gets the monthly traffic between the dates specified (inclusive)."""
start_date_str = start_date.isoformat()
end_date_str = end_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_monthly
WHERE date >= :start_date and date <= :end_date;
"""
)
result_rows = self.execute(sql, start_date=start_date_str, end_date=end_date_str)
if not result_rows:
return []
return [TrafficMonthlyModel(row) for row in result_rows]
def get_daily_traffic_by_range(self, start_date: datetime.date, end_date: datetime.date) -> list[TrafficDailyModel]:
"""Gets the daily traffic between the dates specified (inclusive)."""
start_date_str = start_date.isoformat()
end_date_str = end_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_daily
WHERE date >= :start_date and date <= :end_date;
"""
)
result_rows = self.execute(sql, start_date=start_date_str, end_date=end_date_str)
if not result_rows:
return []
return [TrafficDailyModel(row) for row in result_rows]
def get_monthly_traffic_by_datetime(self, target_date: datetime.date) -> Optional[TrafficMonthlyModel]:
"""Gets the monthly traffic for the date, rounding down from the provided target_date
to the start of the month."""
target_date_str = target_date.replace(day=1).isoformat()
sql = text(
"""
SELECT * FROM traffic_monthly
WHERE date = :date;
"""
)
result_rows = self.execute(sql, date=target_date_str)
if not result_rows:
return None
return TrafficMonthlyModel(result_rows[0])
def get_daily_traffic_by_datetime(self, target_date: datetime.date) -> Optional[TrafficDailyModel]:
"""Gets the daily traffic for the date."""
target_date_str = target_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_daily
WHERE date = :date;
"""
)
result_rows = self.execute(sql, date=target_date_str)
if not result_rows:
return None
return TrafficDailyModel(result_rows[0])
| 29.147059 | 120 | 0.636394 | import datetime
from typing import Optional
from sqlalchemy.sql import text
from data.base_data import BaseModel, BaseData
class TrafficMonthlyModel(BaseModel):
_table = "traffic_monthly"
_pk_field = "id"
_columns = ["id", "date", "unique_pageviews", "total_pageviews"]
class TrafficDailyModel(BaseModel):
_table = "traffic_daily"
_pk_field = "id"
_columns = ["id", "date", "unique_pageviews", "total_pageviews", "net_subscribers"]
class TrafficData(BaseData):
def get_monthly_traffic_by_range(
self, start_date: datetime.date, end_date: datetime.date
) -> list[TrafficMonthlyModel]:
start_date_str = start_date.isoformat()
end_date_str = end_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_monthly
WHERE date >= :start_date and date <= :end_date;
"""
)
result_rows = self.execute(sql, start_date=start_date_str, end_date=end_date_str)
if not result_rows:
return []
return [TrafficMonthlyModel(row) for row in result_rows]
def get_daily_traffic_by_range(self, start_date: datetime.date, end_date: datetime.date) -> list[TrafficDailyModel]:
start_date_str = start_date.isoformat()
end_date_str = end_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_daily
WHERE date >= :start_date and date <= :end_date;
"""
)
result_rows = self.execute(sql, start_date=start_date_str, end_date=end_date_str)
if not result_rows:
return []
return [TrafficDailyModel(row) for row in result_rows]
def get_monthly_traffic_by_datetime(self, target_date: datetime.date) -> Optional[TrafficMonthlyModel]:
target_date_str = target_date.replace(day=1).isoformat()
sql = text(
"""
SELECT * FROM traffic_monthly
WHERE date = :date;
"""
)
result_rows = self.execute(sql, date=target_date_str)
if not result_rows:
return None
return TrafficMonthlyModel(result_rows[0])
def get_daily_traffic_by_datetime(self, target_date: datetime.date) -> Optional[TrafficDailyModel]:
target_date_str = target_date.isoformat()
sql = text(
"""
SELECT * FROM traffic_daily
WHERE date = :date;
"""
)
result_rows = self.execute(sql, date=target_date_str)
if not result_rows:
return None
return TrafficDailyModel(result_rows[0])
| true | true |
1c348ea078265e308375f04362e7559419d8dd01 | 3,788 | py | Python | homeassistant/components/mobile_app/__init__.py | headcode/home-assistant | ef338fa8803c9691c545cb335503723d271c652c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mobile_app/__init__.py | headcode/home-assistant | ef338fa8803c9691c545cb335503723d271c652c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mobile_app/__init__.py | headcode/home-assistant | ef338fa8803c9691c545cb335503723d271c652c | [
"Apache-2.0"
] | null | null | null | """Integrates Native Apps to Home Assistant."""
from homeassistant import config_entries
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.components.webhook import async_register as webhook_register
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (ATTR_APP_COMPONENT, ATTR_DEVICE_ID, ATTR_DEVICE_NAME,
ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION,
DATA_CONFIG_ENTRIES, DATA_DELETED_IDS, DATA_DEVICES,
DATA_STORE, DOMAIN, STORAGE_KEY, STORAGE_VERSION)
from .http_api import RegistrationsView
from .webhook import handle_webhook
from .websocket_api import register_websocket_handlers
DEPENDENCIES = ['device_tracker', 'http', 'webhook']
REQUIREMENTS = ['PyNaCl==1.3.0']
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the mobile app component."""
hass.data[DOMAIN] = {
DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: [], DATA_DEVICES: {},
}
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
app_config = await store.async_load()
if app_config is None:
app_config = {
DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: [], DATA_DEVICES: {},
}
hass.data[DOMAIN] = app_config
hass.data[DOMAIN][DATA_STORE] = store
hass.http.register_view(RegistrationsView())
register_websocket_handlers(hass)
for deleted_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
try:
webhook_register(hass, DOMAIN, "Deleted Webhook", deleted_id,
handle_webhook)
except ValueError:
pass
return True
async def async_setup_entry(hass, entry):
"""Set up a mobile_app entry."""
registration = entry.data
webhook_id = registration[CONF_WEBHOOK_ID]
hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] = entry
device_registry = await dr.async_get_registry(hass)
identifiers = {
(ATTR_DEVICE_ID, registration[ATTR_DEVICE_ID]),
(CONF_WEBHOOK_ID, registration[CONF_WEBHOOK_ID])
}
device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=identifiers,
manufacturer=registration[ATTR_MANUFACTURER],
model=registration[ATTR_MODEL],
name=registration[ATTR_DEVICE_NAME],
sw_version=registration[ATTR_OS_VERSION]
)
hass.data[DOMAIN][DATA_DEVICES][webhook_id] = device
registration_name = 'Mobile App: {}'.format(registration[ATTR_DEVICE_NAME])
webhook_register(hass, DOMAIN, registration_name, webhook_id,
handle_webhook)
if ATTR_APP_COMPONENT in registration:
load_platform(hass, registration[ATTR_APP_COMPONENT], DOMAIN, {},
{DOMAIN: {}})
return True
@config_entries.HANDLERS.register(DOMAIN)
class MobileAppFlowHandler(config_entries.ConfigFlow):
"""Handle a Mobile App config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
placeholders = {
'apps_url':
'https://www.home-assistant.io/components/mobile_app/#apps'
}
return self.async_abort(reason='install_app',
description_placeholders=placeholders)
async def async_step_registration(self, user_input=None):
"""Handle a flow initialized during registration."""
return self.async_create_entry(title=user_input[ATTR_DEVICE_NAME],
data=user_input)
| 34.436364 | 79 | 0.693506 | from homeassistant import config_entries
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.components.webhook import async_register as webhook_register
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (ATTR_APP_COMPONENT, ATTR_DEVICE_ID, ATTR_DEVICE_NAME,
ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION,
DATA_CONFIG_ENTRIES, DATA_DELETED_IDS, DATA_DEVICES,
DATA_STORE, DOMAIN, STORAGE_KEY, STORAGE_VERSION)
from .http_api import RegistrationsView
from .webhook import handle_webhook
from .websocket_api import register_websocket_handlers
DEPENDENCIES = ['device_tracker', 'http', 'webhook']
REQUIREMENTS = ['PyNaCl==1.3.0']
async def async_setup(hass: HomeAssistantType, config: ConfigType):
hass.data[DOMAIN] = {
DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: [], DATA_DEVICES: {},
}
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
app_config = await store.async_load()
if app_config is None:
app_config = {
DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: [], DATA_DEVICES: {},
}
hass.data[DOMAIN] = app_config
hass.data[DOMAIN][DATA_STORE] = store
hass.http.register_view(RegistrationsView())
register_websocket_handlers(hass)
for deleted_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
try:
webhook_register(hass, DOMAIN, "Deleted Webhook", deleted_id,
handle_webhook)
except ValueError:
pass
return True
async def async_setup_entry(hass, entry):
registration = entry.data
webhook_id = registration[CONF_WEBHOOK_ID]
hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] = entry
device_registry = await dr.async_get_registry(hass)
identifiers = {
(ATTR_DEVICE_ID, registration[ATTR_DEVICE_ID]),
(CONF_WEBHOOK_ID, registration[CONF_WEBHOOK_ID])
}
device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=identifiers,
manufacturer=registration[ATTR_MANUFACTURER],
model=registration[ATTR_MODEL],
name=registration[ATTR_DEVICE_NAME],
sw_version=registration[ATTR_OS_VERSION]
)
hass.data[DOMAIN][DATA_DEVICES][webhook_id] = device
registration_name = 'Mobile App: {}'.format(registration[ATTR_DEVICE_NAME])
webhook_register(hass, DOMAIN, registration_name, webhook_id,
handle_webhook)
if ATTR_APP_COMPONENT in registration:
load_platform(hass, registration[ATTR_APP_COMPONENT], DOMAIN, {},
{DOMAIN: {}})
return True
@config_entries.HANDLERS.register(DOMAIN)
class MobileAppFlowHandler(config_entries.ConfigFlow):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
async def async_step_user(self, user_input=None):
placeholders = {
'apps_url':
'https://www.home-assistant.io/components/mobile_app/#apps'
}
return self.async_abort(reason='install_app',
description_placeholders=placeholders)
async def async_step_registration(self, user_input=None):
return self.async_create_entry(title=user_input[ATTR_DEVICE_NAME],
data=user_input)
| true | true |
1c348ea9086c7076385dadaf15b2832eea654037 | 14,939 | py | Python | tests/manage/pv_services/test_dynamic_pvc_accessmodes_with_reclaim_policies.py | tiffanyn108/ocs-ci | 30350e0958d14100edeadbbc5f3fe557954a76b8 | [
"MIT"
] | null | null | null | tests/manage/pv_services/test_dynamic_pvc_accessmodes_with_reclaim_policies.py | tiffanyn108/ocs-ci | 30350e0958d14100edeadbbc5f3fe557954a76b8 | [
"MIT"
] | null | null | null | tests/manage/pv_services/test_dynamic_pvc_accessmodes_with_reclaim_policies.py | tiffanyn108/ocs-ci | 30350e0958d14100edeadbbc5f3fe557954a76b8 | [
"MIT"
] | null | null | null | import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1, tier3, acceptance
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.retry import retry
from tests import helpers
from tests.fixtures import (
create_ceph_block_pool, create_rbd_secret, create_cephfs_secret,
create_project
)
logger = logging.getLogger(__name__)
class BaseDynamicPvc(ManageTest):
"""
Base class for Dynamic PVC creation tests
"""
access_mode = None
storage_type = None
expected_pod_failure = None
expected_pvc_failure = None
pvc_size = '10Gi'
io_size = '512M'
def dynamic_pvc_base(self, interface_type, reclaim_policy):
"""
Base function for Dynamic PVC creation tests
Fetches the worker nodes name list, creates StorageClass and PVC
"""
self.interface_type = interface_type
self.reclaim_policy = reclaim_policy
self.worker_nodes_list = helpers.get_worker_nodes()
if self.interface_type == constants.CEPHBLOCKPOOL:
self.interface_name = self.cbp_obj.name
self.secret_name = self.rbd_secret_obj.name
elif self.interface_type == constants.CEPHFILESYSTEM:
self.interface_name = helpers.get_cephfs_data_pool_name()
self.secret_name = self.cephfs_secret_obj.name
logger.info(
f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}"
)
self.sc_obj = helpers.create_storage_class(
interface_type=self.interface_type,
interface_name=self.interface_name,
secret_name=self.secret_name,
reclaim_policy=self.reclaim_policy
)
logger.info(f"Creating PVC with accessModes: {self.access_mode}")
self.pvc_obj = helpers.create_pvc(
sc_name=self.sc_obj.name, namespace=self.namespace,
size=self.pvc_size, access_mode=self.access_mode
)
helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
self.pvc_obj.reload()
logger.info(
f"Creating first pod on node: {self.worker_nodes_list[0]}"
f" with pvc {self.pvc_obj.name}"
)
self.pod_obj1 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
namespace=self.namespace, node_name=self.worker_nodes_list[0],
pod_dict_path=constants.NGINX_POD_YAML
)
helpers.wait_for_resource_state(self.pod_obj1, constants.STATUS_RUNNING)
self.pod_obj1.reload()
@retry(UnexpectedBehaviour, tries=10, delay=5, backoff=1)
def verify_expected_failure_event(self, ocs_obj, failure_str):
"""
Checks for the expected failure event message in oc describe command
"""
if failure_str in ocs_obj.describe():
logger.info(
f"Failure string {failure_str} is present in oc describe"
f" command"
)
return True
else:
raise UnexpectedBehaviour(
f"Failure string {failure_str} is not found in oc describe"
f" command"
)
def cleanup(self):
"""
Removes resources created during test execution and verifies
the reclaim policy is honored
"""
pod_objs = pod.get_all_pods(namespace=self.namespace)
if len(pod_objs) > 0:
for pod_obj in pod_objs:
pod_obj.delete()
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
if hasattr(self, 'pvc_obj'):
pv_obj = self.pvc_obj.backed_pv_obj
self.pvc_obj.delete()
try:
assert helpers.validate_pv_delete(pv_obj.name)
except AssertionError:
if self.reclaim_policy == constants.RECLAIM_POLICY_RETAIN:
helpers.wait_for_resource_state(
pv_obj, constants.STATUS_RELEASED
)
# TODO: deletion of ceph rbd image, blocked by BZ#1723656
pv_obj.delete()
else:
raise UnexpectedBehaviour(
f"PV {pv_obj.name} is not deleted after deleting PVC"
)
if hasattr(self, 'sc_obj'):
self.sc_obj.delete()
@acceptance
@tier1
@pytest.mark.usefixtures(
create_ceph_block_pool.__name__,
create_rbd_secret.__name__,
create_cephfs_secret.__name__,
create_project.__name__
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-530"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-533"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-525"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-526"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
)
]
)
class TestRWODynamicPvc(BaseDynamicPvc):
"""
Automates the following test cases
OCS-530 - RBD Based RWO Dynamic PVC creation with Reclaim policy set
to Retain
OCS-533 - RBD Based RWO Dynamic PVC creation with Reclaim policy set
to Delete
OCS-525 - CephFS Based RWO Dynamic PVC creation with Reclaim policy set
to Retain
OCS-526 - CephFS Based RWO Dynamic PVC creation with Reclaim policy set
to Delete
"""
access_mode = constants.ACCESS_MODE_RWO
storage_type = 'fs'
expected_pod_failure = 'Multi-Attach error for volume'
@pytest.fixture()
def setup_base(self, request, interface_type, reclaim_policy):
def finalizer():
self.cleanup()
request.addfinalizer(finalizer)
self.dynamic_pvc_base(interface_type, reclaim_policy)
def test_rwo_dynamic_pvc(self, setup_base):
"""
RWO Dynamic PVC creation tests with Reclaim policy set to Delete/Retain
"""
logger.info(
f"Creating second pod on node: {self.worker_nodes_list[1]}"
)
pod_obj2 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
do_reload=False, namespace=self.namespace,
node_name=self.worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
logger.info(f"Running IO on pod {self.pod_obj1.name}")
file_name = self.pod_obj1.name
self.pod_obj1.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name
)
pod.get_fio_rw_iops(self.pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=self.pod_obj1, file_name=file_name
)
# Verify that second pod is still in ContainerCreating state and not able to
# attain Running state due to expected failure
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_CONTAINER_CREATING
)
self.verify_expected_failure_event(
ocs_obj=pod_obj2, failure_str=self.expected_pod_failure
)
logger.info(
f"Deleting first pod so that second pod can attach"
f" {self.pvc_obj.name}"
)
self.pod_obj1.delete()
self.pod_obj1.ocp.wait_for_delete(resource_name=self.pod_obj1.name)
# Wait for second pod to be in Running state
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=240
)
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
pod_obj2.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=pod_obj2.name
)
pod.get_fio_rw_iops(pod_obj2)
# Again verify data integrity
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
class TestRWXDynamicPvc(BaseDynamicPvc):
"""
Automates the following test cases
OCS-542 - CephFS Based RWX Dynamic PVC creation with Reclaim policy set
to Retain
OCS-529 - CephFS Based RWX Dynamic PVC creation with Reclaim policy set
to Delete
OCS-547 - RBD Based RWX Dynamic PVC creation with Reclaim policy set
to Retain
OCS-538 - RBD Based RWX Dynamic PVC creation with Reclaim policy set
to Delete
"""
access_mode = constants.ACCESS_MODE_RWX
storage_type = 'fs'
@pytest.fixture()
def setup_base(self, request, interface_type, reclaim_policy):
def finalizer():
self.cleanup()
request.addfinalizer(finalizer)
self.dynamic_pvc_base(interface_type, reclaim_policy)
@acceptance
@tier1
@pytest.mark.bugzilla("1750916")
@pytest.mark.bugzilla("1751866")
@pytest.mark.usefixtures(
create_cephfs_secret.__name__,
create_project.__name__
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=pytest.mark.polarion_id("OCS-542")
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=pytest.mark.polarion_id("OCS-529")
)
]
)
def test_rwx_dynamic_pvc(self, setup_base):
"""
RWX Dynamic PVC creation tests with Reclaim policy set to Delete/Retain
"""
logger.info(f"CephFS RWX test")
logger.info(
f"Creating second pod on node: {self.worker_nodes_list[1]} "
f"with pvc {self.pvc_obj.name}"
)
pod_obj2 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
namespace=self.namespace, node_name=self.worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
helpers.wait_for_resource_state(pod_obj2, constants.STATUS_RUNNING)
pod_obj2.reload()
node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
# Run IO on both the pods
logger.info(f"Running IO on pod {self.pod_obj1.name}")
file_name1 = self.pod_obj1.name
logger.info(file_name1)
self.pod_obj1.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name1
)
logger.info(f"Running IO on pod {pod_obj2.name}")
file_name2 = pod_obj2.name
pod_obj2.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name2
)
# Check IO and calculate md5sum of files
pod.get_fio_rw_iops(self.pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=self.pod_obj1, file_name=file_name1
)
pod.get_fio_rw_iops(pod_obj2)
md5sum_pod2_data = pod.cal_md5sum(
pod_obj=pod_obj2, file_name=file_name2
)
logger.info(f"verify data from alternate pods")
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name1,
original_md5sum=md5sum_pod1_data
)
assert pod.verify_data_integrity(
pod_obj=self.pod_obj1, file_name=file_name2,
original_md5sum=md5sum_pod2_data
)
# Verify that data is mutable from any pod
logger.info(f"Perform modification of files from alternate pod")
# Access and rename file written by pod-2 from pod-1
file_path2 = pod.get_file_path(pod_obj2, file_name2)
logger.info(file_path2)
self.pod_obj1.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path2} {file_path2}-renamed\"",
out_yaml_format=False
)
# Access and rename file written by pod-1 from pod-2
file_path1 = pod.get_file_path(self.pod_obj1, file_name1)
logger.info(file_path1)
pod_obj2.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path1} {file_path1}-renamed\"",
out_yaml_format=False
)
logger.info(f"Verify presence of renamed files from both pods")
file_names = [f"{file_path1}-renamed", f"{file_path2}-renamed"]
for file in file_names:
assert pod.check_file_existence(self.pod_obj1, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {self.pod_obj1.name} ")
assert pod.check_file_existence(pod_obj2, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {pod_obj2.name}")
@tier3
@pytest.mark.usefixtures(
create_ceph_block_pool.__name__,
create_rbd_secret.__name__,
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_RETAIN],
marks=pytest.mark.polarion_id("OCS-547")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_DELETE],
marks=pytest.mark.polarion_id("OCS-538")
)
]
)
def rwx_dynamic_pvc_rbd(self, setup_base):
logger.info('RWX RBD Test')
# TODO
# ROX Dynamic PVC creation tests not supported in 4.2
# BZ 1727004
| 34.501155 | 84 | 0.624674 | import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1, tier3, acceptance
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.retry import retry
from tests import helpers
from tests.fixtures import (
create_ceph_block_pool, create_rbd_secret, create_cephfs_secret,
create_project
)
logger = logging.getLogger(__name__)
class BaseDynamicPvc(ManageTest):
access_mode = None
storage_type = None
expected_pod_failure = None
expected_pvc_failure = None
pvc_size = '10Gi'
io_size = '512M'
def dynamic_pvc_base(self, interface_type, reclaim_policy):
self.interface_type = interface_type
self.reclaim_policy = reclaim_policy
self.worker_nodes_list = helpers.get_worker_nodes()
if self.interface_type == constants.CEPHBLOCKPOOL:
self.interface_name = self.cbp_obj.name
self.secret_name = self.rbd_secret_obj.name
elif self.interface_type == constants.CEPHFILESYSTEM:
self.interface_name = helpers.get_cephfs_data_pool_name()
self.secret_name = self.cephfs_secret_obj.name
logger.info(
f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}"
)
self.sc_obj = helpers.create_storage_class(
interface_type=self.interface_type,
interface_name=self.interface_name,
secret_name=self.secret_name,
reclaim_policy=self.reclaim_policy
)
logger.info(f"Creating PVC with accessModes: {self.access_mode}")
self.pvc_obj = helpers.create_pvc(
sc_name=self.sc_obj.name, namespace=self.namespace,
size=self.pvc_size, access_mode=self.access_mode
)
helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
self.pvc_obj.reload()
logger.info(
f"Creating first pod on node: {self.worker_nodes_list[0]}"
f" with pvc {self.pvc_obj.name}"
)
self.pod_obj1 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
namespace=self.namespace, node_name=self.worker_nodes_list[0],
pod_dict_path=constants.NGINX_POD_YAML
)
helpers.wait_for_resource_state(self.pod_obj1, constants.STATUS_RUNNING)
self.pod_obj1.reload()
@retry(UnexpectedBehaviour, tries=10, delay=5, backoff=1)
def verify_expected_failure_event(self, ocs_obj, failure_str):
if failure_str in ocs_obj.describe():
logger.info(
f"Failure string {failure_str} is present in oc describe"
f" command"
)
return True
else:
raise UnexpectedBehaviour(
f"Failure string {failure_str} is not found in oc describe"
f" command"
)
def cleanup(self):
pod_objs = pod.get_all_pods(namespace=self.namespace)
if len(pod_objs) > 0:
for pod_obj in pod_objs:
pod_obj.delete()
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
if hasattr(self, 'pvc_obj'):
pv_obj = self.pvc_obj.backed_pv_obj
self.pvc_obj.delete()
try:
assert helpers.validate_pv_delete(pv_obj.name)
except AssertionError:
if self.reclaim_policy == constants.RECLAIM_POLICY_RETAIN:
helpers.wait_for_resource_state(
pv_obj, constants.STATUS_RELEASED
)
pv_obj.delete()
else:
raise UnexpectedBehaviour(
f"PV {pv_obj.name} is not deleted after deleting PVC"
)
if hasattr(self, 'sc_obj'):
self.sc_obj.delete()
@acceptance
@tier1
@pytest.mark.usefixtures(
create_ceph_block_pool.__name__,
create_rbd_secret.__name__,
create_cephfs_secret.__name__,
create_project.__name__
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-530"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-533"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=[
pytest.mark.polarion_id("OCS-525"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=[
pytest.mark.polarion_id("OCS-526"),
pytest.mark.bugzilla("1751866"),
pytest.mark.bugzilla("1750916"),
pytest.mark.bugzilla("1772990")
]
)
]
)
class TestRWODynamicPvc(BaseDynamicPvc):
access_mode = constants.ACCESS_MODE_RWO
storage_type = 'fs'
expected_pod_failure = 'Multi-Attach error for volume'
@pytest.fixture()
def setup_base(self, request, interface_type, reclaim_policy):
def finalizer():
self.cleanup()
request.addfinalizer(finalizer)
self.dynamic_pvc_base(interface_type, reclaim_policy)
def test_rwo_dynamic_pvc(self, setup_base):
logger.info(
f"Creating second pod on node: {self.worker_nodes_list[1]}"
)
pod_obj2 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
do_reload=False, namespace=self.namespace,
node_name=self.worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
logger.info(f"Running IO on pod {self.pod_obj1.name}")
file_name = self.pod_obj1.name
self.pod_obj1.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name
)
pod.get_fio_rw_iops(self.pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=self.pod_obj1, file_name=file_name
)
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_CONTAINER_CREATING
)
self.verify_expected_failure_event(
ocs_obj=pod_obj2, failure_str=self.expected_pod_failure
)
logger.info(
f"Deleting first pod so that second pod can attach"
f" {self.pvc_obj.name}"
)
self.pod_obj1.delete()
self.pod_obj1.ocp.wait_for_delete(resource_name=self.pod_obj1.name)
helpers.wait_for_resource_state(
resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=240
)
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
pod_obj2.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=pod_obj2.name
)
pod.get_fio_rw_iops(pod_obj2)
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name,
original_md5sum=md5sum_pod1_data
)
class TestRWXDynamicPvc(BaseDynamicPvc):
access_mode = constants.ACCESS_MODE_RWX
storage_type = 'fs'
@pytest.fixture()
def setup_base(self, request, interface_type, reclaim_policy):
def finalizer():
self.cleanup()
request.addfinalizer(finalizer)
self.dynamic_pvc_base(interface_type, reclaim_policy)
@acceptance
@tier1
@pytest.mark.bugzilla("1750916")
@pytest.mark.bugzilla("1751866")
@pytest.mark.usefixtures(
create_cephfs_secret.__name__,
create_project.__name__
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_RETAIN],
marks=pytest.mark.polarion_id("OCS-542")
),
pytest.param(
*[constants.CEPHFILESYSTEM, constants.RECLAIM_POLICY_DELETE],
marks=pytest.mark.polarion_id("OCS-529")
)
]
)
def test_rwx_dynamic_pvc(self, setup_base):
logger.info(f"CephFS RWX test")
logger.info(
f"Creating second pod on node: {self.worker_nodes_list[1]} "
f"with pvc {self.pvc_obj.name}"
)
pod_obj2 = helpers.create_pod(
interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
namespace=self.namespace, node_name=self.worker_nodes_list[1],
pod_dict_path=constants.NGINX_POD_YAML
)
helpers.wait_for_resource_state(pod_obj2, constants.STATUS_RUNNING)
pod_obj2.reload()
node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
node_pod2 = pod_obj2.get().get('spec').get('nodeName')
assert node_pod1 != node_pod2, 'Both pods are on the same node'
logger.info(f"Running IO on pod {self.pod_obj1.name}")
file_name1 = self.pod_obj1.name
logger.info(file_name1)
self.pod_obj1.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name1
)
logger.info(f"Running IO on pod {pod_obj2.name}")
file_name2 = pod_obj2.name
pod_obj2.run_io(
storage_type=self.storage_type, size=self.io_size, runtime=30,
fio_filename=file_name2
)
pod.get_fio_rw_iops(self.pod_obj1)
md5sum_pod1_data = pod.cal_md5sum(
pod_obj=self.pod_obj1, file_name=file_name1
)
pod.get_fio_rw_iops(pod_obj2)
md5sum_pod2_data = pod.cal_md5sum(
pod_obj=pod_obj2, file_name=file_name2
)
logger.info(f"verify data from alternate pods")
assert pod.verify_data_integrity(
pod_obj=pod_obj2, file_name=file_name1,
original_md5sum=md5sum_pod1_data
)
assert pod.verify_data_integrity(
pod_obj=self.pod_obj1, file_name=file_name2,
original_md5sum=md5sum_pod2_data
)
logger.info(f"Perform modification of files from alternate pod")
file_path2 = pod.get_file_path(pod_obj2, file_name2)
logger.info(file_path2)
self.pod_obj1.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path2} {file_path2}-renamed\"",
out_yaml_format=False
)
file_path1 = pod.get_file_path(self.pod_obj1, file_name1)
logger.info(file_path1)
pod_obj2.exec_cmd_on_pod(
command=f"bash -c \"mv {file_path1} {file_path1}-renamed\"",
out_yaml_format=False
)
logger.info(f"Verify presence of renamed files from both pods")
file_names = [f"{file_path1}-renamed", f"{file_path2}-renamed"]
for file in file_names:
assert pod.check_file_existence(self.pod_obj1, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {self.pod_obj1.name} ")
assert pod.check_file_existence(pod_obj2, file), (
f"File {file} doesn't exist"
)
logger.info(f"File {file} exists in {pod_obj2.name}")
@tier3
@pytest.mark.usefixtures(
create_ceph_block_pool.__name__,
create_rbd_secret.__name__,
)
@pytest.mark.parametrize(
argnames=["interface_type", "reclaim_policy"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_RETAIN],
marks=pytest.mark.polarion_id("OCS-547")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, constants.RECLAIM_POLICY_DELETE],
marks=pytest.mark.polarion_id("OCS-538")
)
]
)
def rwx_dynamic_pvc_rbd(self, setup_base):
logger.info('RWX RBD Test')
| true | true |
1c34901541213febf90ddc158ba76566cb2e4c41 | 1,192 | py | Python | fastai2/callback/data.py | mrT23/fastai2 | 7eaa4a6a10a8836fbbb90360a7df92d170d1bba3 | [
"Apache-2.0"
] | null | null | null | fastai2/callback/data.py | mrT23/fastai2 | 7eaa4a6a10a8836fbbb90360a7df92d170d1bba3 | [
"Apache-2.0"
] | null | null | null | fastai2/callback/data.py | mrT23/fastai2 | 7eaa4a6a10a8836fbbb90360a7df92d170d1bba3 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/14a_callback.data.ipynb (unless otherwise specified).
__all__ = ['CollectDataCallback', 'WeightedDL', 'weighted_databunch']
# Cell
from ..basics import *
# Cell
class CollectDataCallback(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def begin_fit(self): self.data = L()
def after_batch(self): self.data.append(to_detach((self.xb,self.yb,self.pred,self.loss)))
# Cell
@delegates()
class WeightedDL(TfmdDL):
def __init__(self, dataset=None, bs=None, wgts=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
wgts = array([1.]*len(dataset) if wgts is None else wgts)
self.wgts = wgts/wgts.sum()
def get_idxs(self):
if self.n==0: return []
if not self.shuffle: return super().get_idxs()
return list(np.random.choice(self.n, self.n, p=self.wgts))
# Cell
@patch
@delegates(Datasets.dataloaders)
def weighted_databunch(self:Datasets, wgts, bs=64, **kwargs):
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs) | 37.25 | 105 | 0.685403 |
__all__ = ['CollectDataCallback', 'WeightedDL', 'weighted_databunch']
from ..basics import *
class CollectDataCallback(Callback):
def begin_fit(self): self.data = L()
def after_batch(self): self.data.append(to_detach((self.xb,self.yb,self.pred,self.loss)))
@delegates()
class WeightedDL(TfmdDL):
def __init__(self, dataset=None, bs=None, wgts=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
wgts = array([1.]*len(dataset) if wgts is None else wgts)
self.wgts = wgts/wgts.sum()
def get_idxs(self):
if self.n==0: return []
if not self.shuffle: return super().get_idxs()
return list(np.random.choice(self.n, self.n, p=self.wgts))
@patch
@delegates(Datasets.dataloaders)
def weighted_databunch(self:Datasets, wgts, bs=64, **kwargs):
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs) | true | true |
1c3490a494e5f7b00a2da5b5586d9f7d65b58fbd | 336 | py | Python | backend/base/urls/order_urls.py | drcan94/Dj-React-eCommerce | 498395c2f03528bce8348e5f0aa88221a01b9df8 | [
"MIT"
] | 1 | 2022-01-08T14:11:03.000Z | 2022-01-08T14:11:03.000Z | backend/base/urls/order_urls.py | drcan94/Dj-React-eCommerce | 498395c2f03528bce8348e5f0aa88221a01b9df8 | [
"MIT"
] | null | null | null | backend/base/urls/order_urls.py | drcan94/Dj-React-eCommerce | 498395c2f03528bce8348e5f0aa88221a01b9df8 | [
"MIT"
] | null | null | null | from django.urls import path
from base.views import order_views as views
urlpatterns = [
path('add/', views.addOrderItems, name="order-add"),
path('myorders/', views.getMyOrders, name="myorders"),
path('<str:pk>/', views.getOrderItem, name="user-order"),
path('<str:pk>/pay/', views.updateOrderToPaid, name="pay"),
]
| 28 | 63 | 0.681548 | from django.urls import path
from base.views import order_views as views
urlpatterns = [
path('add/', views.addOrderItems, name="order-add"),
path('myorders/', views.getMyOrders, name="myorders"),
path('<str:pk>/', views.getOrderItem, name="user-order"),
path('<str:pk>/pay/', views.updateOrderToPaid, name="pay"),
]
| true | true |
1c3492f1e73b1b96940d9fdf764f48a4114cc549 | 41,729 | py | Python | tests/accelerators/test_accelerator_connector.py | JanSellner/pytorch-lightning | 0e0da8c3fc2c6d5e7ac54900621a82d213f8ebbf | [
"Apache-2.0"
] | null | null | null | tests/accelerators/test_accelerator_connector.py | JanSellner/pytorch-lightning | 0e0da8c3fc2c6d5e7ac54900621a82d213f8ebbf | [
"Apache-2.0"
] | null | null | null | tests/accelerators/test_accelerator_connector.py | JanSellner/pytorch-lightning | 0e0da8c3fc2c6d5e7ac54900621a82d213f8ebbf | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
from typing import Optional
from unittest import mock
from unittest.mock import Mock
import pytest
import torch
import torch.distributed
import pytorch_lightning
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.gpu import GPUAccelerator
from pytorch_lightning.plugins import DoublePrecisionPlugin, LayerSync, NativeSyncBatchNorm, PrecisionPlugin
from pytorch_lightning.plugins.environments import (
KubeflowEnvironment,
LightningEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.plugins.io import TorchCheckpointIO
from pytorch_lightning.strategies import (
DataParallelStrategy,
DDP2Strategy,
DDPShardedStrategy,
DDPSpawnShardedStrategy,
DDPSpawnStrategy,
DDPStrategy,
DeepSpeedStrategy,
ParallelStrategy,
SingleDeviceStrategy,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.runif import RunIf
# TODO: please modify/sunset any test that has accelerator=ddp/ddp2/ddp_cpu/ddp_spawn @daniellepintz
def test_accelerator_choice_cpu(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, SingleDeviceStrategy)
@pytest.mark.parametrize(("devices", "num_nodes"), ([(1, 1), (1, 2), (2, 1), (2, 2)]))
def test_accelerator_choice_ddp_cpu(tmpdir, devices: int, num_nodes: int):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=devices, num_nodes=num_nodes)
assert isinstance(trainer.accelerator, CPUAccelerator)
no_spawn = devices == 1 and num_nodes > 1
assert isinstance(trainer.strategy, DDPStrategy if no_spawn else DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):
with pytest.deprecated_call(match=r"accelerator='ddp_spawn'\)` has been deprecated"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_spawn", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp2_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1", # present for torch >= 1.9.1
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp2_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0",
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_kubeflow(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=1)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_slurm(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.local_rank == 0
@RunIf(skip_windows=True, standalone=True)
def test_accelerator_choice_ddp_cpu_and_strategy(tmpdir):
"""Test that accelerator="ddp_cpu" can work together with an instance of DDPStrategy."""
_test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPStrategy)
@RunIf(skip_windows=True, skip_49370=True)
def test_accelerator_choice_ddp_cpu_and_strategy_spawn(tmpdir):
"""Test that accelerator="ddp_cpu" can work together with an instance of DDPPSpawnPlugin."""
_test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPSpawnStrategy)
def _test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class):
trainer = Trainer(
default_root_dir=tmpdir,
strategy=ddp_strategy_class(find_unused_parameters=True),
fast_dev_run=True,
accelerator="ddp_cpu",
devices=2,
)
assert isinstance(trainer.strategy, ddp_strategy_class)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.strategy.num_processes == 2
assert trainer.strategy.parallel_devices == [torch.device("cpu")] * 2
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
def test_accelerator_choice_ddp_cpu_custom_cluster(_, tmpdir):
"""Test that we choose the custom cluster even when SLURM or TE flags are around."""
class CustomCluster(LightningEnvironment):
@property
def main_address(self):
return "asdf"
@property
def creates_processes_externally(self) -> bool:
return True
trainer = Trainer(
default_root_dir=tmpdir, plugins=[CustomCluster()], fast_dev_run=True, accelerator="ddp_cpu", devices=2
)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, CustomCluster)
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_custom_accelerator(device_count_mock, setup_distributed_mock):
class Accel(Accelerator):
@staticmethod
def parse_devices(devices):
return devices
@staticmethod
def get_parallel_devices(devices):
return [torch.device("cpu")] * devices
@staticmethod
def auto_device_count() -> int:
return 1
@staticmethod
def is_available() -> bool:
return True
@staticmethod
def name() -> str:
return "custom_acc_name"
class Prec(PrecisionPlugin):
pass
class Strat(SingleDeviceStrategy):
pass
strategy = Strat(device=torch.device("cpu"), accelerator=Accel(), precision_plugin=Prec())
trainer = Trainer(strategy=strategy, fast_dev_run=True, devices=2)
assert isinstance(trainer.accelerator, Accel)
assert isinstance(trainer.strategy, Strat)
assert isinstance(trainer.precision_plugin, Prec)
assert trainer._accelerator_connector.strategy is strategy
class Strat(DDPStrategy):
pass
strategy = Strat(accelerator=Accel(), precision_plugin=Prec())
trainer = Trainer(strategy=strategy, fast_dev_run=True, devices=2)
assert isinstance(trainer.accelerator, Accel)
assert isinstance(trainer.strategy, Strat)
assert isinstance(trainer.precision_plugin, Prec)
assert trainer._accelerator_connector.strategy is strategy
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_dist_backend_accelerator_mapping(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert trainer.strategy.local_rank == 0
@mock.patch("torch.cuda.device_count", return_value=2)
def test_ipython_incompatible_backend_error(_, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp'\)`.*is not compatible"):
Trainer(strategy="ddp", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp2'\)`.*is not compatible"):
Trainer(strategy="ddp2", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_spawn'\)`.*is not compatible"):
Trainer(strategy="ddp_spawn", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_sharded_spawn'\)`.*is not compatible"):
Trainer(strategy="ddp_sharded_spawn", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp'\)`.*is not compatible"):
# Edge case: AcceleratorConnector maps dp to ddp if accelerator != gpu
Trainer(strategy="dp")
@mock.patch("torch.cuda.device_count", return_value=2)
def test_ipython_compatible_dp_strategy_gpu(_, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
trainer = Trainer(strategy="dp", accelerator="gpu")
assert trainer.strategy.launcher is None or trainer.strategy.launcher.is_interactive_compatible
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.is_available", return_value=True)
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.parse_devices", return_value=8)
def test_ipython_compatible_strategy_tpu(mock_devices, mock_tpu_acc_avail, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
trainer = Trainer(accelerator="tpu")
assert trainer.strategy.launcher is None or trainer.strategy.launcher.is_interactive_compatible
@pytest.mark.parametrize(["accelerator", "plugin"], [("ddp_spawn", "ddp_sharded"), (None, "ddp_sharded")])
def test_plugin_accelerator_choice(accelerator: Optional[str], plugin: str):
"""Ensure that when a plugin and accelerator is passed in, that the plugin takes precedent."""
if accelerator is None:
with pytest.deprecated_call(match="Passing .* `strategy` to the `plugins`"):
trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)
else:
with pytest.deprecated_call(match=r"accelerator=.*\)` has been deprecated"):
trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)
assert isinstance(trainer.strategy, DDPShardedStrategy)
with pytest.deprecated_call(match="Passing .* `strategy` to the `plugins`"):
trainer = Trainer(plugins=plugin, accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, DDPShardedStrategy)
@pytest.mark.parametrize(
["accelerator", "plugin"],
[
("ddp", DDPStrategy),
("ddp_spawn", DDPSpawnStrategy),
("ddp_sharded", DDPShardedStrategy),
("ddp_sharded_spawn", DDPSpawnShardedStrategy),
pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)),
],
)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("torch.cuda.device_count", return_value=2)
@pytest.mark.parametrize("devices", [1, 2])
def test_accelerator_choice_multi_node_gpu(
mock_is_available, mock_device_count, tmpdir, accelerator: str, plugin: ParallelStrategy, devices: int
):
with pytest.deprecated_call(match=r"accelerator=.*\)` has been deprecated"):
trainer = Trainer(default_root_dir=tmpdir, num_nodes=2, accelerator=accelerator, devices=devices)
assert isinstance(trainer.strategy, plugin)
@mock.patch("torch.cuda.is_available", return_value=False)
def test_accelerator_cpu(_):
trainer = Trainer(accelerator="cpu")
assert isinstance(trainer.accelerator, CPUAccelerator)
with pytest.raises(MisconfigurationException, match="You requested gpu:"):
trainer = Trainer(gpus=1)
with pytest.raises(
MisconfigurationException,
match="GPUAccelerator can not run on your system since the accelerator is not available.",
):
trainer = Trainer(accelerator="gpu")
with pytest.raises(MisconfigurationException, match="You requested gpu:"):
trainer = Trainer(accelerator="cpu", gpus=1)
@mock.patch("torch.cuda.is_available", return_value=False)
@pytest.mark.parametrize("devices", ["0", 0, []])
def test_passing_zero_and_empty_list_to_devices_flag(_, devices):
with pytest.raises(
MisconfigurationException, match="can not run on your system since the accelerator is not available."
):
Trainer(accelerator="gpu", devices=devices)
@RunIf(min_gpus=1)
def test_accelerator_gpu():
trainer = Trainer(accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
trainer = Trainer(accelerator="gpu")
assert isinstance(trainer.accelerator, GPUAccelerator)
trainer = Trainer(accelerator="auto", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
@pytest.mark.parametrize(["devices", "plugin"], [(1, SingleDeviceStrategy), (5, DDPSpawnStrategy)])
def test_accelerator_cpu_with_devices(devices, plugin):
trainer = Trainer(accelerator="cpu", devices=devices)
assert trainer.num_devices == devices
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, CPUAccelerator)
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
["devices", "plugin"], [(1, SingleDeviceStrategy), ([1], SingleDeviceStrategy), (2, DDPSpawnStrategy)]
)
def test_accelerator_gpu_with_devices(devices, plugin):
trainer = Trainer(accelerator="gpu", devices=devices)
assert trainer.num_devices == len(devices) if isinstance(devices, list) else devices
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, GPUAccelerator)
@RunIf(min_gpus=1)
def test_accelerator_auto_with_devices_gpu():
trainer = Trainer(accelerator="auto", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == 1
def test_validate_accelerator_and_devices():
trainer = Trainer(accelerator="ddp_cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.num_devices == 2
def test_set_devices_if_none_cpu():
trainer = Trainer(accelerator="cpu", devices=3)
assert trainer.num_devices == 3
def test_devices_with_cpu_only_supports_integer():
with pytest.warns(UserWarning, match="The flag `devices` must be an int"):
trainer = Trainer(accelerator="cpu", devices="1,3")
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.num_devices == 1
@pytest.mark.parametrize("training_type", ["ddp2", "dp"])
def test_unsupported_strategy_types_on_cpu(training_type):
with pytest.warns(UserWarning, match="is not supported on CPUs, hence setting `strategy='ddp"):
trainer = Trainer(accelerator=training_type, num_processes=2)
assert isinstance(trainer.strategy, DDPStrategy)
def test_accelerator_ddp_for_cpu(tmpdir):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated"):
trainer = Trainer(accelerator="ddp", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
def test_exception_when_strategy_used_with_accelerator():
with pytest.raises(MisconfigurationException, match="but have also passed"), pytest.deprecated_call(
match=r"accelerator='ddp'\)` has been deprecated"
):
Trainer(accelerator="ddp", strategy="ddp_spawn")
def test_exception_when_strategy_used_with_plugins():
with pytest.raises(MisconfigurationException, match="only specify one strategy, but you have passed"):
with pytest.deprecated_call(match=r"`strategy` to the `plugins` flag in Trainer has been deprecated"):
Trainer(plugins="ddp_find_unused_parameters_false", strategy="ddp_spawn")
def test_exception_invalid_strategy():
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_cpu'\)` is not a valid"):
Trainer(strategy="ddp_cpu")
with pytest.raises(MisconfigurationException, match=r"strategy='tpu_spawn'\)` is not a valid"):
Trainer(strategy="tpu_spawn")
@pytest.mark.parametrize(
["strategy", "plugin"],
[
("ddp_spawn", DDPSpawnStrategy),
("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy),
("ddp", DDPStrategy),
("ddp_find_unused_parameters_false", DDPStrategy),
],
)
def test_strategy_choice_cpu_str(tmpdir, strategy, plugin):
trainer = Trainer(strategy=strategy, accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_strategy_choice_cpu_plugin(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
["strategy", "plugin"],
[
("ddp_spawn", DDPSpawnStrategy),
("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy),
("ddp", DDPStrategy),
("ddp_find_unused_parameters_false", DDPStrategy),
("ddp2", DDP2Strategy),
("dp", DataParallelStrategy),
("ddp_sharded", DDPShardedStrategy),
("ddp_sharded_spawn", DDPSpawnShardedStrategy),
pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)),
],
)
def test_strategy_choice_gpu_str(tmpdir, strategy, plugin):
trainer = Trainer(strategy=strategy, accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_strategy_choice_gpu_plugin(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_device_type_when_training_plugin_gpu_passed(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, GPUAccelerator)
@pytest.mark.parametrize("precision", [1, 12, "invalid"])
def test_validate_precision_type(tmpdir, precision):
with pytest.raises(MisconfigurationException, match=f"Precision {repr(precision)} is invalid"):
Trainer(precision=precision)
def test_amp_level_raises_error_with_native():
with pytest.raises(MisconfigurationException, match="O2'` but it's only supported with `amp_backend='apex'`"):
_ = Trainer(amp_level="O2", amp_backend="native", precision=16)
def test_strategy_choice_ddp_spawn_cpu(tmpdir):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp(cuda_available_mock, device_count_mock):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_spawn(cuda_available_mock, device_count_mock):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="gpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
@pytest.mark.parametrize("strategy", ["ddp2", DDP2Strategy()])
def test_strategy_choice_ddp2_slurm(
set_device_mock, device_count_mock, setup_distributed_mock, is_available_mock, strategy
):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="gpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp2_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp2", accelerator="gpu", devices=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0",
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.local_rank == 0
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.is_available", return_value=True)
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.parse_devices", return_value=8)
def test_unsupported_tpu_choice(mock_devices, mock_tpu_acc_avail):
with pytest.raises(MisconfigurationException, match=r"accelerator='tpu', precision=64\)` is not implemented"):
Trainer(accelerator="tpu", precision=64)
# if user didn't set strategy, AcceleratorConnector will choose the TPUSingleStrategy or TPUSpawnStrategy
with pytest.raises(ValueError, match="TPUAccelerator` can only be used with a `SingleTPUStrategy`"):
with pytest.warns(UserWarning, match=r"accelerator='tpu', precision=16\)` but native AMP is not supported"):
Trainer(accelerator="tpu", precision=16, strategy="ddp")
with pytest.raises(ValueError, match="TPUAccelerator` can only be used with a `SingleTPUStrategy`"):
with pytest.warns(UserWarning, match=r"accelerator='tpu', precision=16\)` but apex AMP is not supported"):
Trainer(accelerator="tpu", precision=16, amp_backend="apex", strategy="single_device")
@mock.patch("pytorch_lightning.accelerators.ipu.IPUAccelerator.is_available", return_value=True)
def test_unsupported_ipu_choice(mock_ipu_acc_avail, monkeypatch):
import pytorch_lightning.strategies.ipu as ipu
import pytorch_lightning.utilities.imports as imports
monkeypatch.setattr(imports, "_IPU_AVAILABLE", True)
monkeypatch.setattr(ipu, "_IPU_AVAILABLE", True)
with pytest.raises(ValueError, match=r"accelerator='ipu', precision='bf16'\)` is not supported"):
Trainer(accelerator="ipu", precision="bf16")
with pytest.raises(ValueError, match=r"accelerator='ipu', precision=64\)` is not supported"):
Trainer(accelerator="ipu", precision=64)
@mock.patch("torch.cuda.is_available", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._TPU_AVAILABLE", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._IPU_AVAILABLE", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._HPU_AVAILABLE", return_value=False)
def test_devices_auto_choice_cpu(
is_ipu_available_mock, is_tpu_available_mock, is_gpu_available_mock, is_hpu_available_mock
):
trainer = Trainer(accelerator="auto", devices="auto")
assert trainer.num_devices == 1
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("torch.cuda.device_count", return_value=2)
def test_devices_auto_choice_gpu(is_gpu_available_mock, device_count_mock):
trainer = Trainer(accelerator="auto", devices="auto")
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == 2
@pytest.mark.parametrize(
["parallel_devices", "accelerator"],
[([torch.device("cpu")], "gpu"), ([torch.device("cuda", i) for i in range(8)], ("tpu"))],
)
def test_parallel_devices_in_strategy_confilict_with_accelerator(parallel_devices, accelerator):
with pytest.raises(MisconfigurationException, match=r"parallel_devices set through"):
Trainer(strategy=DDPStrategy(parallel_devices=parallel_devices), accelerator=accelerator)
@pytest.mark.parametrize("deterministic", [True, False])
def test_deterministic_init(deterministic):
trainer = Trainer(accelerator="auto", deterministic=deterministic)
assert trainer._accelerator_connector.deterministic == deterministic
if deterministic:
assert os.environ.get("CUBLAS_WORKSPACE_CONFIG") == ":4096:8"
assert os.environ.get("HOROVOD_FUSION_THRESHOLD") == "0"
@pytest.mark.parametrize(
"sync_batchnorm,plugins,expected",
[
(False, [], type(None)),
(True, [], NativeSyncBatchNorm),
(False, [NativeSyncBatchNorm()], NativeSyncBatchNorm),
(True, [NativeSyncBatchNorm()], NativeSyncBatchNorm),
(False, [Mock(spec=LayerSync)], LayerSync),
],
)
def test_sync_batchnorm_set(tmpdir, sync_batchnorm, plugins, expected):
"""Test valid combinations of the sync_batchnorm Trainer flag and the plugins list of layer-sync plugins."""
trainer = Trainer(sync_batchnorm=sync_batchnorm, plugins=plugins, strategy="ddp")
assert isinstance(trainer._accelerator_connector._layer_sync, expected)
assert isinstance(trainer.strategy._layer_sync, expected)
def test_sync_batchnorm_invalid_choice(tmpdir):
"""Test that a conflicting specification of enabled sync batchnorm and a custom plugin leads to an error."""
custom = Mock(spec=LayerSync)
with pytest.raises(
MisconfigurationException,
match=r"You set `Trainer\(sync_batchnorm=True\)` and provided a `LayerSync` plugin, but this is not allowed",
):
Trainer(sync_batchnorm=True, plugins=[custom])
@RunIf(skip_windows=True)
def test_sync_batchnorm_set_in_custom_strategy(tmpdir):
"""Tests if layer_sync is automatically set for custom strategy."""
class CustomParallelStrategy(DDPStrategy):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Set to None so it will be overwritten by the accelerator connector.
self._layer_sync = None
strategy = CustomParallelStrategy()
assert strategy._layer_sync is None
Trainer(strategy=strategy, sync_batchnorm=True)
assert isinstance(strategy._layer_sync, NativeSyncBatchNorm)
@pytest.mark.parametrize(
["plugins", "expected"],
[
([LightningEnvironment(), SLURMEnvironment()], "ClusterEnvironment"),
([TorchCheckpointIO(), TorchCheckpointIO()], "CheckpointIO"),
(
[PrecisionPlugin(), DoublePrecisionPlugin(), LightningEnvironment(), SLURMEnvironment()],
"PrecisionPlugin, ClusterEnvironment",
),
],
)
def test_plugin_only_one_instance_for_one_type(plugins, expected):
with pytest.raises(MisconfigurationException, match=f"Received multiple values for {expected}"):
Trainer(plugins=plugins)
| 40.592412 | 117 | 0.733231 |
import os
from typing import Optional
from unittest import mock
from unittest.mock import Mock
import pytest
import torch
import torch.distributed
import pytorch_lightning
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.gpu import GPUAccelerator
from pytorch_lightning.plugins import DoublePrecisionPlugin, LayerSync, NativeSyncBatchNorm, PrecisionPlugin
from pytorch_lightning.plugins.environments import (
KubeflowEnvironment,
LightningEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.plugins.io import TorchCheckpointIO
from pytorch_lightning.strategies import (
DataParallelStrategy,
DDP2Strategy,
DDPShardedStrategy,
DDPSpawnShardedStrategy,
DDPSpawnStrategy,
DDPStrategy,
DeepSpeedStrategy,
ParallelStrategy,
SingleDeviceStrategy,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.runif import RunIf
def test_accelerator_choice_cpu(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, SingleDeviceStrategy)
@pytest.mark.parametrize(("devices", "num_nodes"), ([(1, 1), (1, 2), (2, 1), (2, 2)]))
def test_accelerator_choice_ddp_cpu(tmpdir, devices: int, num_nodes: int):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=devices, num_nodes=num_nodes)
assert isinstance(trainer.accelerator, CPUAccelerator)
no_spawn = devices == 1 and num_nodes > 1
assert isinstance(trainer.strategy, DDPStrategy if no_spawn else DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):
with pytest.deprecated_call(match=r"accelerator='ddp_spawn'\)` has been deprecated"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_spawn", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp2_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp2_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0",
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_accelerator_choice_ddp_kubeflow(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=1)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_slurm(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.local_rank == 0
@RunIf(skip_windows=True, standalone=True)
def test_accelerator_choice_ddp_cpu_and_strategy(tmpdir):
_test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPStrategy)
@RunIf(skip_windows=True, skip_49370=True)
def test_accelerator_choice_ddp_cpu_and_strategy_spawn(tmpdir):
_test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPSpawnStrategy)
def _test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class):
trainer = Trainer(
default_root_dir=tmpdir,
strategy=ddp_strategy_class(find_unused_parameters=True),
fast_dev_run=True,
accelerator="ddp_cpu",
devices=2,
)
assert isinstance(trainer.strategy, ddp_strategy_class)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.strategy.num_processes == 2
assert trainer.strategy.parallel_devices == [torch.device("cpu")] * 2
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
def test_accelerator_choice_ddp_cpu_custom_cluster(_, tmpdir):
class CustomCluster(LightningEnvironment):
@property
def main_address(self):
return "asdf"
@property
def creates_processes_externally(self) -> bool:
return True
trainer = Trainer(
default_root_dir=tmpdir, plugins=[CustomCluster()], fast_dev_run=True, accelerator="ddp_cpu", devices=2
)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, CustomCluster)
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_custom_accelerator(device_count_mock, setup_distributed_mock):
class Accel(Accelerator):
@staticmethod
def parse_devices(devices):
return devices
@staticmethod
def get_parallel_devices(devices):
return [torch.device("cpu")] * devices
@staticmethod
def auto_device_count() -> int:
return 1
@staticmethod
def is_available() -> bool:
return True
@staticmethod
def name() -> str:
return "custom_acc_name"
class Prec(PrecisionPlugin):
pass
class Strat(SingleDeviceStrategy):
pass
strategy = Strat(device=torch.device("cpu"), accelerator=Accel(), precision_plugin=Prec())
trainer = Trainer(strategy=strategy, fast_dev_run=True, devices=2)
assert isinstance(trainer.accelerator, Accel)
assert isinstance(trainer.strategy, Strat)
assert isinstance(trainer.precision_plugin, Prec)
assert trainer._accelerator_connector.strategy is strategy
class Strat(DDPStrategy):
pass
strategy = Strat(accelerator=Accel(), precision_plugin=Prec())
trainer = Trainer(strategy=strategy, fast_dev_run=True, devices=2)
assert isinstance(trainer.accelerator, Accel)
assert isinstance(trainer.strategy, Strat)
assert isinstance(trainer.precision_plugin, Prec)
assert trainer._accelerator_connector.strategy is strategy
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_dist_backend_accelerator_mapping(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert trainer.strategy.local_rank == 0
@mock.patch("torch.cuda.device_count", return_value=2)
def test_ipython_incompatible_backend_error(_, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp'\)`.*is not compatible"):
Trainer(strategy="ddp", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp2'\)`.*is not compatible"):
Trainer(strategy="ddp2", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_spawn'\)`.*is not compatible"):
Trainer(strategy="ddp_spawn", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_sharded_spawn'\)`.*is not compatible"):
Trainer(strategy="ddp_sharded_spawn", accelerator="gpu", devices=2)
with pytest.raises(MisconfigurationException, match=r"strategy='ddp'\)`.*is not compatible"):
Trainer(strategy="dp")
@mock.patch("torch.cuda.device_count", return_value=2)
def test_ipython_compatible_dp_strategy_gpu(_, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
trainer = Trainer(strategy="dp", accelerator="gpu")
assert trainer.strategy.launcher is None or trainer.strategy.launcher.is_interactive_compatible
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.is_available", return_value=True)
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.parse_devices", return_value=8)
def test_ipython_compatible_strategy_tpu(mock_devices, mock_tpu_acc_avail, monkeypatch):
monkeypatch.setattr(pytorch_lightning.utilities, "_IS_INTERACTIVE", True)
trainer = Trainer(accelerator="tpu")
assert trainer.strategy.launcher is None or trainer.strategy.launcher.is_interactive_compatible
@pytest.mark.parametrize(["accelerator", "plugin"], [("ddp_spawn", "ddp_sharded"), (None, "ddp_sharded")])
def test_plugin_accelerator_choice(accelerator: Optional[str], plugin: str):
if accelerator is None:
with pytest.deprecated_call(match="Passing .* `strategy` to the `plugins`"):
trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)
else:
with pytest.deprecated_call(match=r"accelerator=.*\)` has been deprecated"):
trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)
assert isinstance(trainer.strategy, DDPShardedStrategy)
with pytest.deprecated_call(match="Passing .* `strategy` to the `plugins`"):
trainer = Trainer(plugins=plugin, accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, DDPShardedStrategy)
@pytest.mark.parametrize(
["accelerator", "plugin"],
[
("ddp", DDPStrategy),
("ddp_spawn", DDPSpawnStrategy),
("ddp_sharded", DDPShardedStrategy),
("ddp_sharded_spawn", DDPSpawnShardedStrategy),
pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)),
],
)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("torch.cuda.device_count", return_value=2)
@pytest.mark.parametrize("devices", [1, 2])
def test_accelerator_choice_multi_node_gpu(
mock_is_available, mock_device_count, tmpdir, accelerator: str, plugin: ParallelStrategy, devices: int
):
with pytest.deprecated_call(match=r"accelerator=.*\)` has been deprecated"):
trainer = Trainer(default_root_dir=tmpdir, num_nodes=2, accelerator=accelerator, devices=devices)
assert isinstance(trainer.strategy, plugin)
@mock.patch("torch.cuda.is_available", return_value=False)
def test_accelerator_cpu(_):
trainer = Trainer(accelerator="cpu")
assert isinstance(trainer.accelerator, CPUAccelerator)
with pytest.raises(MisconfigurationException, match="You requested gpu:"):
trainer = Trainer(gpus=1)
with pytest.raises(
MisconfigurationException,
match="GPUAccelerator can not run on your system since the accelerator is not available.",
):
trainer = Trainer(accelerator="gpu")
with pytest.raises(MisconfigurationException, match="You requested gpu:"):
trainer = Trainer(accelerator="cpu", gpus=1)
@mock.patch("torch.cuda.is_available", return_value=False)
@pytest.mark.parametrize("devices", ["0", 0, []])
def test_passing_zero_and_empty_list_to_devices_flag(_, devices):
with pytest.raises(
MisconfigurationException, match="can not run on your system since the accelerator is not available."
):
Trainer(accelerator="gpu", devices=devices)
@RunIf(min_gpus=1)
def test_accelerator_gpu():
trainer = Trainer(accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
trainer = Trainer(accelerator="gpu")
assert isinstance(trainer.accelerator, GPUAccelerator)
trainer = Trainer(accelerator="auto", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
@pytest.mark.parametrize(["devices", "plugin"], [(1, SingleDeviceStrategy), (5, DDPSpawnStrategy)])
def test_accelerator_cpu_with_devices(devices, plugin):
trainer = Trainer(accelerator="cpu", devices=devices)
assert trainer.num_devices == devices
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, CPUAccelerator)
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
["devices", "plugin"], [(1, SingleDeviceStrategy), ([1], SingleDeviceStrategy), (2, DDPSpawnStrategy)]
)
def test_accelerator_gpu_with_devices(devices, plugin):
trainer = Trainer(accelerator="gpu", devices=devices)
assert trainer.num_devices == len(devices) if isinstance(devices, list) else devices
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, GPUAccelerator)
@RunIf(min_gpus=1)
def test_accelerator_auto_with_devices_gpu():
trainer = Trainer(accelerator="auto", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == 1
def test_validate_accelerator_and_devices():
trainer = Trainer(accelerator="ddp_cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.num_devices == 2
def test_set_devices_if_none_cpu():
trainer = Trainer(accelerator="cpu", devices=3)
assert trainer.num_devices == 3
def test_devices_with_cpu_only_supports_integer():
with pytest.warns(UserWarning, match="The flag `devices` must be an int"):
trainer = Trainer(accelerator="cpu", devices="1,3")
assert isinstance(trainer.accelerator, CPUAccelerator)
assert trainer.num_devices == 1
@pytest.mark.parametrize("training_type", ["ddp2", "dp"])
def test_unsupported_strategy_types_on_cpu(training_type):
with pytest.warns(UserWarning, match="is not supported on CPUs, hence setting `strategy='ddp"):
trainer = Trainer(accelerator=training_type, num_processes=2)
assert isinstance(trainer.strategy, DDPStrategy)
def test_accelerator_ddp_for_cpu(tmpdir):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated"):
trainer = Trainer(accelerator="ddp", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
def test_exception_when_strategy_used_with_accelerator():
with pytest.raises(MisconfigurationException, match="but have also passed"), pytest.deprecated_call(
match=r"accelerator='ddp'\)` has been deprecated"
):
Trainer(accelerator="ddp", strategy="ddp_spawn")
def test_exception_when_strategy_used_with_plugins():
with pytest.raises(MisconfigurationException, match="only specify one strategy, but you have passed"):
with pytest.deprecated_call(match=r"`strategy` to the `plugins` flag in Trainer has been deprecated"):
Trainer(plugins="ddp_find_unused_parameters_false", strategy="ddp_spawn")
def test_exception_invalid_strategy():
with pytest.raises(MisconfigurationException, match=r"strategy='ddp_cpu'\)` is not a valid"):
Trainer(strategy="ddp_cpu")
with pytest.raises(MisconfigurationException, match=r"strategy='tpu_spawn'\)` is not a valid"):
Trainer(strategy="tpu_spawn")
@pytest.mark.parametrize(
["strategy", "plugin"],
[
("ddp_spawn", DDPSpawnStrategy),
("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy),
("ddp", DDPStrategy),
("ddp_find_unused_parameters_false", DDPStrategy),
],
)
def test_strategy_choice_cpu_str(tmpdir, strategy, plugin):
trainer = Trainer(strategy=strategy, accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_strategy_choice_cpu_plugin(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="cpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
["strategy", "plugin"],
[
("ddp_spawn", DDPSpawnStrategy),
("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy),
("ddp", DDPStrategy),
("ddp_find_unused_parameters_false", DDPStrategy),
("ddp2", DDP2Strategy),
("dp", DataParallelStrategy),
("ddp_sharded", DDPShardedStrategy),
("ddp_sharded_spawn", DDPSpawnShardedStrategy),
pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)),
],
)
def test_strategy_choice_gpu_str(tmpdir, strategy, plugin):
trainer = Trainer(strategy=strategy, accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_strategy_choice_gpu_plugin(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
@RunIf(min_gpus=2)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_device_type_when_training_plugin_gpu_passed(tmpdir, plugin):
trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2)
assert isinstance(trainer.strategy, plugin)
assert isinstance(trainer.accelerator, GPUAccelerator)
@pytest.mark.parametrize("precision", [1, 12, "invalid"])
def test_validate_precision_type(tmpdir, precision):
with pytest.raises(MisconfigurationException, match=f"Precision {repr(precision)} is invalid"):
Trainer(precision=precision)
def test_amp_level_raises_error_with_native():
with pytest.raises(MisconfigurationException, match="O2'` but it's only supported with `amp_backend='apex'`"):
_ = Trainer(amp_level="O2", amp_backend="native", precision=16)
def test_strategy_choice_ddp_spawn_cpu(tmpdir):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp(cuda_available_mock, device_count_mock):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_spawn(cuda_available_mock, device_count_mock):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
assert isinstance(trainer.strategy.cluster_environment, LightningEnvironment)
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="gpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
@pytest.mark.parametrize("strategy", ["ddp2", DDP2Strategy()])
def test_strategy_choice_ddp2_slurm(
set_device_mock, device_count_mock, setup_distributed_mock, is_available_mock, strategy
):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="gpu", devices=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp2_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp2", accelerator="gpu", devices=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDP2Strategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"WORLD_SIZE": "2",
"LOCAL_WORLD_SIZE": "2",
"RANK": "1",
"LOCAL_RANK": "1",
"GROUP_RANK": "0",
"TORCHELASTIC_RUN_ID": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 1
assert trainer.strategy.local_rank == 1
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0",
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_strategy_choice_ddp_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", accelerator="gpu", devices=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"KUBERNETES_PORT": "tcp://127.0.0.1:443",
"MASTER_ADDR": "1.2.3.4",
"MASTER_PORT": "500",
"WORLD_SIZE": "20",
"RANK": "1",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, KubeflowEnvironment)
assert trainer.strategy.cluster_environment.local_rank() == 0
assert trainer.strategy.local_rank == 0
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="cpu", devices=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
assert isinstance(trainer.strategy, DDPStrategy)
assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment)
assert trainer.strategy.local_rank == 0
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.is_available", return_value=True)
@mock.patch("pytorch_lightning.accelerators.tpu.TPUAccelerator.parse_devices", return_value=8)
def test_unsupported_tpu_choice(mock_devices, mock_tpu_acc_avail):
with pytest.raises(MisconfigurationException, match=r"accelerator='tpu', precision=64\)` is not implemented"):
Trainer(accelerator="tpu", precision=64)
# if user didn't set strategy, AcceleratorConnector will choose the TPUSingleStrategy or TPUSpawnStrategy
with pytest.raises(ValueError, match="TPUAccelerator` can only be used with a `SingleTPUStrategy`"):
with pytest.warns(UserWarning, match=r"accelerator='tpu', precision=16\)` but native AMP is not supported"):
Trainer(accelerator="tpu", precision=16, strategy="ddp")
with pytest.raises(ValueError, match="TPUAccelerator` can only be used with a `SingleTPUStrategy`"):
with pytest.warns(UserWarning, match=r"accelerator='tpu', precision=16\)` but apex AMP is not supported"):
Trainer(accelerator="tpu", precision=16, amp_backend="apex", strategy="single_device")
@mock.patch("pytorch_lightning.accelerators.ipu.IPUAccelerator.is_available", return_value=True)
def test_unsupported_ipu_choice(mock_ipu_acc_avail, monkeypatch):
import pytorch_lightning.strategies.ipu as ipu
import pytorch_lightning.utilities.imports as imports
monkeypatch.setattr(imports, "_IPU_AVAILABLE", True)
monkeypatch.setattr(ipu, "_IPU_AVAILABLE", True)
with pytest.raises(ValueError, match=r"accelerator='ipu', precision='bf16'\)` is not supported"):
Trainer(accelerator="ipu", precision="bf16")
with pytest.raises(ValueError, match=r"accelerator='ipu', precision=64\)` is not supported"):
Trainer(accelerator="ipu", precision=64)
@mock.patch("torch.cuda.is_available", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._TPU_AVAILABLE", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._IPU_AVAILABLE", return_value=False)
@mock.patch("pytorch_lightning.utilities.imports._HPU_AVAILABLE", return_value=False)
def test_devices_auto_choice_cpu(
is_ipu_available_mock, is_tpu_available_mock, is_gpu_available_mock, is_hpu_available_mock
):
trainer = Trainer(accelerator="auto", devices="auto")
assert trainer.num_devices == 1
@mock.patch("torch.cuda.is_available", return_value=True)
@mock.patch("torch.cuda.device_count", return_value=2)
def test_devices_auto_choice_gpu(is_gpu_available_mock, device_count_mock):
trainer = Trainer(accelerator="auto", devices="auto")
assert isinstance(trainer.accelerator, GPUAccelerator)
assert trainer.num_devices == 2
@pytest.mark.parametrize(
["parallel_devices", "accelerator"],
[([torch.device("cpu")], "gpu"), ([torch.device("cuda", i) for i in range(8)], ("tpu"))],
)
def test_parallel_devices_in_strategy_confilict_with_accelerator(parallel_devices, accelerator):
with pytest.raises(MisconfigurationException, match=r"parallel_devices set through"):
Trainer(strategy=DDPStrategy(parallel_devices=parallel_devices), accelerator=accelerator)
@pytest.mark.parametrize("deterministic", [True, False])
def test_deterministic_init(deterministic):
trainer = Trainer(accelerator="auto", deterministic=deterministic)
assert trainer._accelerator_connector.deterministic == deterministic
if deterministic:
assert os.environ.get("CUBLAS_WORKSPACE_CONFIG") == ":4096:8"
assert os.environ.get("HOROVOD_FUSION_THRESHOLD") == "0"
@pytest.mark.parametrize(
"sync_batchnorm,plugins,expected",
[
(False, [], type(None)),
(True, [], NativeSyncBatchNorm),
(False, [NativeSyncBatchNorm()], NativeSyncBatchNorm),
(True, [NativeSyncBatchNorm()], NativeSyncBatchNorm),
(False, [Mock(spec=LayerSync)], LayerSync),
],
)
def test_sync_batchnorm_set(tmpdir, sync_batchnorm, plugins, expected):
trainer = Trainer(sync_batchnorm=sync_batchnorm, plugins=plugins, strategy="ddp")
assert isinstance(trainer._accelerator_connector._layer_sync, expected)
assert isinstance(trainer.strategy._layer_sync, expected)
def test_sync_batchnorm_invalid_choice(tmpdir):
custom = Mock(spec=LayerSync)
with pytest.raises(
MisconfigurationException,
match=r"You set `Trainer\(sync_batchnorm=True\)` and provided a `LayerSync` plugin, but this is not allowed",
):
Trainer(sync_batchnorm=True, plugins=[custom])
@RunIf(skip_windows=True)
def test_sync_batchnorm_set_in_custom_strategy(tmpdir):
class CustomParallelStrategy(DDPStrategy):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._layer_sync = None
strategy = CustomParallelStrategy()
assert strategy._layer_sync is None
Trainer(strategy=strategy, sync_batchnorm=True)
assert isinstance(strategy._layer_sync, NativeSyncBatchNorm)
@pytest.mark.parametrize(
["plugins", "expected"],
[
([LightningEnvironment(), SLURMEnvironment()], "ClusterEnvironment"),
([TorchCheckpointIO(), TorchCheckpointIO()], "CheckpointIO"),
(
[PrecisionPlugin(), DoublePrecisionPlugin(), LightningEnvironment(), SLURMEnvironment()],
"PrecisionPlugin, ClusterEnvironment",
),
],
)
def test_plugin_only_one_instance_for_one_type(plugins, expected):
with pytest.raises(MisconfigurationException, match=f"Received multiple values for {expected}"):
Trainer(plugins=plugins)
| true | true |
1c3493c81efda6be8fd6097e672472ea11706c75 | 2,360 | py | Python | examples/federated_learning/yolov5_coco128_mistnet/train.py | davedavedavid/sedna | 7ba3da9f85559ee842ba28d6785f885d38ca49fb | [
"Apache-2.0"
] | null | null | null | examples/federated_learning/yolov5_coco128_mistnet/train.py | davedavedavid/sedna | 7ba3da9f85559ee842ba28d6785f885d38ca49fb | [
"Apache-2.0"
] | null | null | null | examples/federated_learning/yolov5_coco128_mistnet/train.py | davedavedavid/sedna | 7ba3da9f85559ee842ba28d6785f885d38ca49fb | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from interface import mistnet, s3_transmitter
from interface import Dataset, Estimator_edge
from sedna.common.config import BaseConfig
from sedna.core.federated_learning import FederatedLearningV2
from examples.ms_nnrt.ms_nnrt_models.ms_acl_inference import Inference
from examples.ms_nnrt.ms_nnrt_trainer_yolo import Trainer
from examples.ms_nnrt.ms_nnrt_algorithms.ms_mistnet import Algorithm
def main():
data = Dataset()
estimator = Estimator_edge()
data.parameters["data_path"] = BaseConfig.train_dataset_url.replace("robot.txt", "")
data.parameters["train_path"] = os.path.join(data.parameters["data_path"], "./coco128/train2017/")
data.parameters["test_path"] = data.parameters["train_path"]
data.parameters["train_annFile"] = os.path.join(data.parameters["data_path"], "./coco128/annotations/instances_train2017.json")
if "s3_endpoint_url" in s3_transmitter.parameters:
from plato.utils import s3
s3_client = s3.S3(s3_transmitter.parameters["s3_endpoint_url"], s3_transmitter.parameters["access_key"],
s3_transmitter.parameters["secret_key"], s3_transmitter.parameters["s3_bucket"])
#s3_client.download_from_s3("model/client_model/yolov5x_cutlayer4.om", "./yolov5x_cutlayer4.om")
s3_client.download_from_s3("model/client_model/network_f.om", "./network_f.om")
estimator.model = Inference(0, "./network_f.om", 320, 320) #1*3*640*640--->1*12*320*320
estimator.trainer = Trainer(model=estimator.model)
estimator.algorithm = Algorithm(estimator.trainer)
fl_model = FederatedLearningV2(
data=data,
estimator=estimator,
aggregation=mistnet,
transmitter=s3_transmitter)
fl_model.train()
if __name__ == '__main__':
main()
| 46.27451 | 131 | 0.749576 |
import os
from interface import mistnet, s3_transmitter
from interface import Dataset, Estimator_edge
from sedna.common.config import BaseConfig
from sedna.core.federated_learning import FederatedLearningV2
from examples.ms_nnrt.ms_nnrt_models.ms_acl_inference import Inference
from examples.ms_nnrt.ms_nnrt_trainer_yolo import Trainer
from examples.ms_nnrt.ms_nnrt_algorithms.ms_mistnet import Algorithm
def main():
data = Dataset()
estimator = Estimator_edge()
data.parameters["data_path"] = BaseConfig.train_dataset_url.replace("robot.txt", "")
data.parameters["train_path"] = os.path.join(data.parameters["data_path"], "./coco128/train2017/")
data.parameters["test_path"] = data.parameters["train_path"]
data.parameters["train_annFile"] = os.path.join(data.parameters["data_path"], "./coco128/annotations/instances_train2017.json")
if "s3_endpoint_url" in s3_transmitter.parameters:
from plato.utils import s3
s3_client = s3.S3(s3_transmitter.parameters["s3_endpoint_url"], s3_transmitter.parameters["access_key"],
s3_transmitter.parameters["secret_key"], s3_transmitter.parameters["s3_bucket"])
s3_client.download_from_s3("model/client_model/network_f.om", "./network_f.om")
estimator.model = Inference(0, "./network_f.om", 320, 320)
estimator.trainer = Trainer(model=estimator.model)
estimator.algorithm = Algorithm(estimator.trainer)
fl_model = FederatedLearningV2(
data=data,
estimator=estimator,
aggregation=mistnet,
transmitter=s3_transmitter)
fl_model.train()
if __name__ == '__main__':
main()
| true | true |
1c349402dc067fdf30894110cdcb17c3fa320b58 | 2,343 | py | Python | meta_policy_search/envs/point_envs/point_env_2d.py | behzadhaghgoo/cml | e659c7ae10a52bbe1cbabf9d359aea43af19eb12 | [
"MIT"
] | 210 | 2018-10-17T01:04:48.000Z | 2022-03-09T16:17:06.000Z | meta_policy_search/envs/point_envs/point_env_2d.py | behzadhaghgoo/cml | e659c7ae10a52bbe1cbabf9d359aea43af19eb12 | [
"MIT"
] | 13 | 2018-10-25T20:01:09.000Z | 2022-01-24T13:11:24.000Z | meta_policy_search/envs/point_envs/point_env_2d.py | behzadhaghgoo/cml | e659c7ae10a52bbe1cbabf9d359aea43af19eb12 | [
"MIT"
] | 55 | 2018-10-18T22:00:51.000Z | 2021-11-24T00:06:31.000Z | from meta_policy_search.envs.base import MetaEnv
import numpy as np
from gym.spaces import Box
class MetaPointEnv(MetaEnv):
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Args:
action : an action provided by the environment
Returns:
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
prev_state = self._state
self._state = prev_state + np.clip(action, -0.1, 0.1)
reward = self.reward(prev_state, action, self._state)
done = self.done(self._state)
next_observation = np.copy(self._state)
return next_observation, reward, done, {}
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
self._state = np.random.uniform(-2, 2, size=(2,))
observation = np.copy(self._state)
return observation
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=-0.1, high=0.1, shape=(2,))
def done(self, obs):
if obs.ndim == 1:
return abs(obs[0]) < 0.01 and abs(obs[1]) < 0.01
elif obs.ndim == 2:
return np.logical_and(np.abs(obs[:, 0]) < 0.01, np.abs(obs[:, 1]) < 0.01)
def reward(self, obs, act, obs_next):
if obs_next.ndim == 1:
return - np.sqrt(obs_next[0]**2 + obs_next[1]**2)
elif obs_next.ndim == 2:
return - np.sqrt(obs_next[:, 0] ** 2 + obs_next[:, 1] ** 2)
def log_diagnostics(self, paths):
pass
def sample_tasks(self, n_tasks):
return [{}] * n_tasks
def set_task(self, task):
pass
def get_task(self):
return {} | 33 | 96 | 0.597098 | from meta_policy_search.envs.base import MetaEnv
import numpy as np
from gym.spaces import Box
class MetaPointEnv(MetaEnv):
def step(self, action):
prev_state = self._state
self._state = prev_state + np.clip(action, -0.1, 0.1)
reward = self.reward(prev_state, action, self._state)
done = self.done(self._state)
next_observation = np.copy(self._state)
return next_observation, reward, done, {}
def reset(self):
self._state = np.random.uniform(-2, 2, size=(2,))
observation = np.copy(self._state)
return observation
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=-0.1, high=0.1, shape=(2,))
def done(self, obs):
if obs.ndim == 1:
return abs(obs[0]) < 0.01 and abs(obs[1]) < 0.01
elif obs.ndim == 2:
return np.logical_and(np.abs(obs[:, 0]) < 0.01, np.abs(obs[:, 1]) < 0.01)
def reward(self, obs, act, obs_next):
if obs_next.ndim == 1:
return - np.sqrt(obs_next[0]**2 + obs_next[1]**2)
elif obs_next.ndim == 2:
return - np.sqrt(obs_next[:, 0] ** 2 + obs_next[:, 1] ** 2)
def log_diagnostics(self, paths):
pass
def sample_tasks(self, n_tasks):
return [{}] * n_tasks
def set_task(self, task):
pass
def get_task(self):
return {} | true | true |
1c34945971e73b95ec4287a210d713f7431f69e6 | 3,570 | py | Python | Hybrid_Neuron_Simulation.py | emdgroup/brain_waves_for_planning_problems | 4b4356f40470d8ecfb6152960d9c4f25a7a11b46 | [
"Apache-2.0"
] | null | null | null | Hybrid_Neuron_Simulation.py | emdgroup/brain_waves_for_planning_problems | 4b4356f40470d8ecfb6152960d9c4f25a7a11b46 | [
"Apache-2.0"
] | null | null | null | Hybrid_Neuron_Simulation.py | emdgroup/brain_waves_for_planning_problems | 4b4356f40470d8ecfb6152960d9c4f25a7a11b46 | [
"Apache-2.0"
] | null | null | null | """
Attractor Network for 2DoF Robot Arm
Author: Henry Powell and Mathias Winkel
"""
import sys
import numpy as np
from graphics import Graphics
from ContinuousAttractorLayer import ContinuousAttractorLayer
from WavePropagationLayer import WavePropagationLayer
from setups import SETUPS
if len(sys.argv) > 1:
selected_setup = sys.argv[1]
else:
selected_setup = 's_maze'
try:
setup = SETUPS[selected_setup]
except KeyError as e:
raise ValueError('Selected setup "{}" does not exist. Chose one of \n\t{}'.format(selected_setup, '\n\t'.join(SETUPS.keys()))) from e
J = 12 # continuous attractor synaptic connection strength
T = 0.05 # continuous attractor Gaussian shift
σ = 0.03 # continuous attractor Gaussian width
τ = 0.8 # continuous attractor stabilization strength
R = setup.get('R', 12) # continuous attractor movement recovery period
I = 25 # external DC current to stimulate selected wave propagation layer neurons
dt = 1 # simulation timestep
shape = setup['size']
wave_propagation_layer = WavePropagationLayer(shape, setup['randomize_neurons'], setup['randomize_synapses'])
continuous_attractor_layer = ContinuousAttractorLayer(shape, J, T, σ, τ)
graphics = Graphics(shape, selected_setup, setup['blocked'], setup['target_neurons'])
for region in setup['blocked']:
continuous_attractor_layer.block_region(region)
wave_propagation_layer.block_region(region)
continuous_attractor_layer.set_activation(setup['start_neuron'])
Δ = np.array([0, 0])
thalamic_input = np.zeros((2, *shape))
direc_update_delay = 0
coords = np.asarray(np.meshgrid(range(shape[0]), range(shape[1]))).T
for t in range(setup['t_max']):
# random thalamic input if requested
if setup['thalamic_input']:
thalamic_input = np.random.uniform(0, 1, (2, *shape))
# external drive
for target_neuron in setup['target_neurons']:
thalamic_input[(0, *reversed(target_neuron))] = I
# update the continuous attractor, store the center position for computing the direction vector later
place_cell_peak = continuous_attractor_layer.update(Δ / np.asarray(shape))
# update the wave propagation layer, store the firing pattern
spiking_fired = wave_propagation_layer.update(dt, thalamic_input)
# layer interaction - compute direction vector
if direc_update_delay <= 0:
# the continuous attractor is not in its recoverz period
overlap = continuous_attractor_layer.A * spiking_fired[0]
total = np.sum(overlap)
if total > 0:
# there is some overlap --> compute a direction vector and start the recovery period
distance = coords - place_cell_peak[np.newaxis, np.newaxis, :]
Δ = np.sum(distance * overlap[..., np.newaxis], axis=(0, 1)) / total
direc_update_delay = R
else:
# no overlap --> no direction vector
Δ = np.array([0, 0])
else:
# recovery period is still running - do not set a direction vector
direc_update_delay -= dt
Δ = np.array([0, 0])
# dump all data as images / videos, abort of figures have been closed manually
if not graphics.update(t, place_cell_peak, Δ, spiking_fired, wave_propagation_layer.v, continuous_attractor_layer.A, overlap):
print('Figure closed. Finalizing simulation.')
break
# abort simulation after reaching the target
if tuple(place_cell_peak) in setup['target_neurons']:
print('Reached target. Finalizing simulation.')
break
graphics.save_video(fps=8, keep_frame_images=False)
| 36.804124 | 137 | 0.713165 | import sys
import numpy as np
from graphics import Graphics
from ContinuousAttractorLayer import ContinuousAttractorLayer
from WavePropagationLayer import WavePropagationLayer
from setups import SETUPS
if len(sys.argv) > 1:
selected_setup = sys.argv[1]
else:
selected_setup = 's_maze'
try:
setup = SETUPS[selected_setup]
except KeyError as e:
raise ValueError('Selected setup "{}" does not exist. Chose one of \n\t{}'.format(selected_setup, '\n\t'.join(SETUPS.keys()))) from e
J = 12
T = 0.05
σ = 0.03
τ = 0.8
R = setup.get('R', 12)
I = 25
dt = 1
shape = setup['size']
wave_propagation_layer = WavePropagationLayer(shape, setup['randomize_neurons'], setup['randomize_synapses'])
continuous_attractor_layer = ContinuousAttractorLayer(shape, J, T, σ, τ)
graphics = Graphics(shape, selected_setup, setup['blocked'], setup['target_neurons'])
for region in setup['blocked']:
continuous_attractor_layer.block_region(region)
wave_propagation_layer.block_region(region)
continuous_attractor_layer.set_activation(setup['start_neuron'])
Δ = np.array([0, 0])
thalamic_input = np.zeros((2, *shape))
direc_update_delay = 0
coords = np.asarray(np.meshgrid(range(shape[0]), range(shape[1]))).T
for t in range(setup['t_max']):
if setup['thalamic_input']:
thalamic_input = np.random.uniform(0, 1, (2, *shape))
for target_neuron in setup['target_neurons']:
thalamic_input[(0, *reversed(target_neuron))] = I
place_cell_peak = continuous_attractor_layer.update(Δ / np.asarray(shape))
spiking_fired = wave_propagation_layer.update(dt, thalamic_input)
if direc_update_delay <= 0:
overlap = continuous_attractor_layer.A * spiking_fired[0]
total = np.sum(overlap)
if total > 0:
distance = coords - place_cell_peak[np.newaxis, np.newaxis, :]
Δ = np.sum(distance * overlap[..., np.newaxis], axis=(0, 1)) / total
direc_update_delay = R
else:
Δ = np.array([0, 0])
else:
direc_update_delay -= dt
Δ = np.array([0, 0])
if not graphics.update(t, place_cell_peak, Δ, spiking_fired, wave_propagation_layer.v, continuous_attractor_layer.A, overlap):
print('Figure closed. Finalizing simulation.')
break
if tuple(place_cell_peak) in setup['target_neurons']:
print('Reached target. Finalizing simulation.')
break
graphics.save_video(fps=8, keep_frame_images=False)
| true | true |
1c34962ea4c82ad76bb790502dd26afe10c37022 | 6,345 | py | Python | pecos/simulators/sparsesim/logical_sign.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 15 | 2019-04-11T16:02:38.000Z | 2022-03-15T16:56:36.000Z | pecos/simulators/sparsesim/logical_sign.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 4 | 2018-10-04T19:30:09.000Z | 2019-03-12T19:00:34.000Z | pecos/simulators/sparsesim/logical_sign.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 3 | 2020-10-07T16:47:16.000Z | 2022-02-01T05:34:54.000Z | # ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
"""
Functions:
find_logical_signs
logical_flip
"""
def find_logical_signs(state, logical_circuit, delogical_circuit=None):
"""
Find the sign of the logical operator.
Args:
state:
logical_circuit:
delogical_circuit:
Returns:
"""
if len(logical_circuit) != 1:
raise Exception('Logical operators are expected to only have one tick.')
if delogical_circuit and len(delogical_circuit) != 1:
raise Exception('Delogical operators are expected to only have one tick.')
stabs = state.stabs
destabs = state.destabs
logical_xs = set([])
logical_zs = set([])
delogical_xs = set([])
delogical_zs = set([])
for symbol, gate_locations in logical_circuit.items(params=False):
if symbol == 'X':
logical_xs.update(gate_locations)
elif symbol == 'Z':
logical_zs.update(gate_locations)
elif symbol == 'Y':
logical_xs.update(gate_locations)
logical_zs.update(gate_locations)
else:
raise Exception('Can not currently handle logical operator with operator "%s"!' % symbol)
if delogical_circuit: # Check the relationship between logical operator and delogical operator.
for symbol, gate_locations in delogical_circuit.items(params=False):
if symbol == 'X':
delogical_xs.update(gate_locations)
elif symbol == 'Z':
delogical_zs.update(gate_locations)
elif symbol == 'Y':
delogical_xs.update(gate_locations)
delogical_zs.update(gate_locations)
else:
raise Exception('Can not currently handle logical operator with operator "%s"!' % symbol)
# Make sure the logical and delogical anti-commute
anticom_x = len(logical_xs & delogical_zs) % 2 # Number of common elements modulo 2
anticom_z = len(logical_zs & delogical_xs) % 2 # Number of common elements modulo 2
if not ((anticom_x + anticom_z) % 2):
print('logical Xs: %s logical Zs: %s' % (logical_xs, logical_zs))
print('delogical Xs: %s delogical Zs: %s' % (delogical_xs, delogical_zs))
raise Exception("Logical and delogical operators supplied do not anti-commute!")
# We want the supplied logical operator to be in the stabilizer group and
# the supplied delogical to not be in the stabilizers (we want it to end up being the logical op's destabilizer)
# The following two function calls are wasteful because we will need some of what they discover... such as all the
# stabilizers that have destabilizers that anti-commute with the logical operator...
# But it is assumed that the user is not calling this function that often... so we can be wasteful...
# Check logical is a stabilizer (we want to remove it from the stabilizers)
# Find the anti-commuting destabilizers => stabilizers to give the logical operator
# --------------------------
build_stabs = set()
for q in logical_xs: # For qubits that have Xs in for the logical operator...
build_stabs ^= destabs.col_z[q] # Add in stabilizers that anti-commute for the logical operator's Xs
for q in logical_zs:
build_stabs ^= destabs.col_x[q] # Add in stabilizers that anti-commute for the logical operator's Zs
# If a stabilizer anticommutes an even number of times for the X and/or Z Paulis... it will not appear due to ^=
# Confirm that the stabilizers chosen give the logical operator. If not... return with a failure = 1
# --------------------------
test_x = set()
test_z = set()
for stab in build_stabs:
test_x ^= stabs.row_x[stab]
test_z ^= stabs.row_z[stab]
# Compare with logical operator
test_x ^= logical_xs
test_z ^= logical_zs
if len(test_x) != 0 or len(test_z) != 0:
# for stab in build_stabs:
# print('stab ... ', stab)
print(('Logical op: xs - %s and zs - %s' % (logical_xs, logical_zs)))
raise Exception('Failure due to not finding logical op! x... %s z... %s' %
(str(test_x ^ logical_xs), str(test_z ^ logical_zs)))
# Get the sign of the logical operator
# --------------------------
# First, the minus sign
logical_minus = len(build_stabs & stabs.signs_minus)
# Second, the number of imaginary numbers
logical_i = len(build_stabs & stabs.signs_i)
# Translate the Ws to Ys... W = -i(iW) = -iY => For each Y add another -1 and +i.
logical_ws = logical_xs & logical_zs
num_ys = len(logical_ws)
logical_minus += num_ys
logical_i += num_ys
# Do (-1)^even = 1 -> 0, (-1)^odd = -1 -> 1
logical_minus %= 2
# Reinterpret number of is
logical_i %= 4
# num_is %4 = 0 => +1 => logical_i = 0, logical_minus += 0
# num_is %4 = 1 => +i => logical_i = 1, logical_minus += 0
if logical_i == 2: # num_is %4 = 2 => -1 => logical_i = 0, logical_minus += 1
logical_i = 0
logical_minus += 1
elif logical_i == 3: # num_is %4 = 3 => -i => logical_i = 1, logical_minus += 1
logical_i = 1
logical_minus += 1
if logical_i != 0:
raise Exception('Logical operator has an imaginary sign... Not allowed if logical state is stabilized '
'by logical op!')
return logical_minus
| 37.544379 | 118 | 0.622695 |
def find_logical_signs(state, logical_circuit, delogical_circuit=None):
if len(logical_circuit) != 1:
raise Exception('Logical operators are expected to only have one tick.')
if delogical_circuit and len(delogical_circuit) != 1:
raise Exception('Delogical operators are expected to only have one tick.')
stabs = state.stabs
destabs = state.destabs
logical_xs = set([])
logical_zs = set([])
delogical_xs = set([])
delogical_zs = set([])
for symbol, gate_locations in logical_circuit.items(params=False):
if symbol == 'X':
logical_xs.update(gate_locations)
elif symbol == 'Z':
logical_zs.update(gate_locations)
elif symbol == 'Y':
logical_xs.update(gate_locations)
logical_zs.update(gate_locations)
else:
raise Exception('Can not currently handle logical operator with operator "%s"!' % symbol)
if delogical_circuit:
for symbol, gate_locations in delogical_circuit.items(params=False):
if symbol == 'X':
delogical_xs.update(gate_locations)
elif symbol == 'Z':
delogical_zs.update(gate_locations)
elif symbol == 'Y':
delogical_xs.update(gate_locations)
delogical_zs.update(gate_locations)
else:
raise Exception('Can not currently handle logical operator with operator "%s"!' % symbol)
anticom_x = len(logical_xs & delogical_zs) % 2
anticom_z = len(logical_zs & delogical_xs) % 2
if not ((anticom_x + anticom_z) % 2):
print('logical Xs: %s logical Zs: %s' % (logical_xs, logical_zs))
print('delogical Xs: %s delogical Zs: %s' % (delogical_xs, delogical_zs))
raise Exception("Logical and delogical operators supplied do not anti-commute!")
# The following two function calls are wasteful because we will need some of what they discover... such as all the
# stabilizers that have destabilizers that anti-commute with the logical operator...
# But it is assumed that the user is not calling this function that often... so we can be wasteful...
# Check logical is a stabilizer (we want to remove it from the stabilizers)
# Find the anti-commuting destabilizers => stabilizers to give the logical operator
# --------------------------
build_stabs = set()
for q in logical_xs: # For qubits that have Xs in for the logical operator...
build_stabs ^= destabs.col_z[q] # Add in stabilizers that anti-commute for the logical operator's Xs
for q in logical_zs:
build_stabs ^= destabs.col_x[q]
# If a stabilizer anticommutes an even number of times for the X and/or Z Paulis... it will not appear due to ^=
# Confirm that the stabilizers chosen give the logical operator. If not... return with a failure = 1
# --------------------------
test_x = set()
test_z = set()
for stab in build_stabs:
test_x ^= stabs.row_x[stab]
test_z ^= stabs.row_z[stab]
# Compare with logical operator
test_x ^= logical_xs
test_z ^= logical_zs
if len(test_x) != 0 or len(test_z) != 0:
# for stab in build_stabs:
# print('stab ... ', stab)
print(('Logical op: xs - %s and zs - %s' % (logical_xs, logical_zs)))
raise Exception('Failure due to not finding logical op! x... %s z... %s' %
(str(test_x ^ logical_xs), str(test_z ^ logical_zs)))
# Get the sign of the logical operator
# --------------------------
# First, the minus sign
logical_minus = len(build_stabs & stabs.signs_minus)
# Second, the number of imaginary numbers
logical_i = len(build_stabs & stabs.signs_i)
# Translate the Ws to Ys... W = -i(iW) = -iY => For each Y add another -1 and +i.
logical_ws = logical_xs & logical_zs
num_ys = len(logical_ws)
logical_minus += num_ys
logical_i += num_ys
# Do (-1)^even = 1 -> 0, (-1)^odd = -1 -> 1
logical_minus %= 2
# Reinterpret number of is
logical_i %= 4
# num_is %4 = 0 => +1 => logical_i = 0, logical_minus += 0
# num_is %4 = 1 => +i => logical_i = 1, logical_minus += 0
if logical_i == 2: # num_is %4 = 2 => -1 => logical_i = 0, logical_minus += 1
logical_i = 0
logical_minus += 1
elif logical_i == 3: # num_is %4 = 3 => -i => logical_i = 1, logical_minus += 1
logical_i = 1
logical_minus += 1
if logical_i != 0:
raise Exception('Logical operator has an imaginary sign... Not allowed if logical state is stabilized '
'by logical op!')
return logical_minus
| true | true |
1c3496b8cbe1d3ec4b9afe9a121970b48f4fb661 | 20,937 | py | Python | main/Sapphire/Post_Process/DistFuncs.py | JonesRobM/SAPPHIRE | 64fd62634279800642d21b959d0e8f2efd360ad4 | [
"MIT"
] | null | null | null | main/Sapphire/Post_Process/DistFuncs.py | JonesRobM/SAPPHIRE | 64fd62634279800642d21b959d0e8f2efd360ad4 | [
"MIT"
] | 2 | 2022-03-30T12:33:42.000Z | 2022-03-30T12:34:41.000Z | main/Sapphire/Post_Process/DistFuncs.py | JonesRobM/Sapphire | fba875af56e48e2c5a4a3cf6788f51f359f63800 | [
"MIT"
] | null | null | null | import numpy as np
import os
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def CoMDist(positions, CoM = None, homo = False, specie = None, elements = None):
if homo == False:
return [distance(x, CoM) for x in positions]
elif homo:
Temp = get_subspecieslist(specie, elements, positions)
CoM = get_CoM(Temp)
return [distance(x, CoM) for x in Temp]
def get_CoM(positions):
return (np.average(positions, axis = 0))
def get_subspecieslist(specie, elements, positions):
Temp = np.column_stack((elements,positions))
Temp = [x for x in Temp if x[0] == specie]
return np.array(np.delete(Temp,0,1), dtype = np.float64)
def Euc_Dist(positions, homo = False, specie = None, elements = None):
if homo == False:
Distances=[]
for i in range(len(positions)-1):
for j in range(i+1,len(positions)):
Euc = distance(positions[i],positions[j])
Distances.append(Euc)
return Distances
elif homo:
Distances = []
Temp = get_subspecieslist(specie, elements, positions)
if (len(Temp)>1) is False:
return None
else:
for i in range(len(Temp)-1):
for j in range(i+1,len(Temp)):
Euc = distance(Temp[i],Temp[j])
Distances.append(Euc)
return Distances
else:
print("Variables used were:\n%s\n%s\n%s\n" %(homo, specie, (elements[0], elements[1])))
raise TypeError("Euc_Dist function has encountered an error.\n")
def Hetero(positions, species, elements):
""" Robert
Note that no species need to be defined for this function as it is understood that LoDiS
only has provision for mono/bimetallic systems (for the time being) although this
function could be further generalised (albeit it a potential cost to computation time).
"""
TempA = get_subspecieslist(species[0], elements, positions)
TempB = get_subspecieslist(species[1], elements, positions)
try:
np.shape(TempA)[1]
try:
np.shape(TempB)[1]
Dist=[]
for a in TempA:
Temp = [ distance(a,b) for b in TempB]
Dist.append(Temp)
return Dist
except IndexError:
Dist=[]
for x in TempA:
Dist.append( [distance(x, TempB) ])
return Dist
print("You have only one of a specific atom type in your simulation. I hope that this is correct.", "\n")
except IndexError:
try:
np.shape(TempB)[1]
return [ distance(TempA, b) for b in TempB ]
print("You have only one of a specific atom type in your simulation. I hope that this is correct.", "\n")
except IndexError:
print("You only have two atoms.\nIs this correct?", "\n")
return None
class CoM_Dist():
def __init__(self, System, Positions, CoM = None, Type = False, Specie = None, Elements = None, Frame = None):
self.System = System
self.Positions = Positions
self.CoM =Positions
self.Type = Type
self.Specie= Specie
self.Elements = Elements
self.Frame = Frame
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if self.Type == 'Full':
self.Dist = np.array([distance(x, self.CoM) for x in self.Positions])
elif self.Type == 'Homo':
Temp = get_subspecieslist(self.Specie, self.Elements, self.Positions)
self.Dist = np.array([distance(x, self.CoM) for x in Temp])
self.CoM = get_CoM(Temp)
self.MidDist = np.array([distance(x, self.CoM) for x in Temp])
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out # Case 1
#Write object for the CoM
Attributes = getattr(Out, str('com')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.CoM) +'\n')
#Write object for the CoMDistances
Attributes = getattr(Out, str('comdist')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Dist) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out # Case 2
#Write object for the homo CoM
Attributes = getattr(Out, str('hocom')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.CoM) +'\n')
#Write object for the homo CoM distances
Attributes = getattr(Out, str('hocomdist')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Dist) +'\n')
#Write object for the sub-cluster specific homo CoM distances
Attributes = getattr(Out, str('homidcomdist')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.MidDist) +'\n')
class RDF():
def __init__(self, System, Positions, Res=100, R_Cut=10.0, Type = None, Species = None, Elements = None, Frame = None):
""" Robert
Args:
Res:
int data type representing how finely you wish to make
the grid. Usually set in the order of 100
positions:
Single frame of xyz coordinates for a set of atoms
Is expected to be iterated over and so will only take a single frame of xyz
R_Cut:
Float type variable which indicates how far you wish to create
the distribution for.
Good practice is to set it to ~0.5 Diameter of the cluster
Tested with 10 Angstroms
Returns:
Radii:
A numpy array of all the radii the distribution has been computed over
Will have length of "Resolution" and is to be used as the x axis on
an RDF plot.
G:
A numpy array of the (unnormalised) calculated RDF values corresponding
to the respective radius in Radii. To be set on the y axis in a given
RDF plot.
"""
self.R_Cut = R_Cut
self.System = System
self.Res = Res
self.Positions = Positions
self.Type = Type
self.Species = Species
self.Elements = Elements
self.Frame = Frame
self.dr = self.R_Cut / self.Res #Increments to grow the spheres by
self.Radii = np.linspace(0, self.R_Cut, self.Res) #List of Sphere radii to use
self.Volumes=np.zeros(self.Res)
self.G=np.zeros(self.Res)
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if not self.Type == 'Hetero':
for i, atom1 in enumerate(self.Positions):
for j in range(self.Res):
r1 = j * self.dr #Inner radius for the spherical shell
r2 = r1 + self.dr #Outer radius increased by increment dr
v1 = 4.0 / 3.0 * np.pi * r1**3
v2 = 4.0 / 3.0 * np.pi * r2**3
self.Volumes[j] += v2 - v1 #Volume to consider when evaluating distribution
for atom2 in self.Positions[i:]:
self.Distance = distance(atom1, atom2)
index = int(self.Distance / self.dr)
if 0 < index < self.Res:
self.G[index] += 2 #Identifies when there is an atom at this distance
for i, value in enumerate(self.G):
self.G[i] = value / self.Volumes[i] #Rescaling the distribution with respect to enclosing volume
elif self.Type == 'Hetero':
TempA = get_subspecieslist(self.Species[0], self.Elements, self.Positions)
TempB = get_subspecieslist(self.Species[1], self.Elements, self.Positions)
for i, atom1 in enumerate(TempA):
for j in range(self.Res):
r1 = j * self.dr #Inner radius for the spherical shell
r2 = r1 + self.dr #Outer radius increased by increment dr
v1 = 4.0 / 3.0 * np.pi * r1**3
v2 = 4.0 / 3.0 * np.pi * r2**3
self.Volumes[j] += v2 - v1 #Volume to consider when evaluating distribution
for atom2 in TempB:
self.Distance = distance(atom1, atom2)
index = int(self.Distance / self.dr)
if 0 < index < self.Res:
self.G[index] += 2 #Identifies when there is an atom at this distance
for i, value in enumerate(self.G):
self.G[i] = value / self.Volumes[i] #Rescaling the distribution with respect to enclosing volume
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out # Case 1
Attributes = getattr(Out, str('rdf')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('rdfspace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out # Case 2
Attributes = getattr(Out, str('hordf')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Species
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('hordfspace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Species
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
elif self.Type == 'Hetero':
from Sapphire.IO import OutputInfoHetero as Out # Case 3
Attributes = getattr(Out, str('herdf')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('herdfspace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
class Pair_Dist():
def __init__(self, System, Positions, Type = None, Specie = None, Elements = None, Frame = None):
self.System = System
self.Positions = Positions
self.Type = Type
self.Specie = Specie
self.Elements = Elements
self.Frame = Frame
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if self.Type == 'Homo':
try:
self.distances = Euc_Dist(self.Positions, True, self.Specie, self.Elements)
#(positions, homo = False, specie = None, elements = None)
except Exception as e:
pass
elif self.Type == 'Hetero':
try:
self.distances = Hetero(self.Positions, self.Specie, self.Elements)
except Exception as e:
pass
else:
self.distances = Euc_Dist(self.Positions)
self.bins = int(round(200/(1+20*np.exp(-len(self.distances)/1000)))) #Wait, what the fuck???
self.a, b = np.histogram(self.distances, self.bins)
bin_width = b[1]-b[0]
self.bin_cents = [ b[i]+ bin_width for i in range(len(b)-1) ]
#bin_cents, a
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out # Case 1
Attributes = getattr(Out, str('pair_distance')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('pair_distancespace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out # Case 2
Attributes = getattr(Out, str('hopair_distance')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('hopair_distancespace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n')
elif self.Type == 'Hetero':
from Sapphire.IO import OutputInfoHetero as Out # Case 3
Attributes = getattr(Out, str('hepair_distance')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('hepair_distancespace')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n') | 44.264271 | 132 | 0.558151 | import numpy as np
import os
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def CoMDist(positions, CoM = None, homo = False, specie = None, elements = None):
if homo == False:
return [distance(x, CoM) for x in positions]
elif homo:
Temp = get_subspecieslist(specie, elements, positions)
CoM = get_CoM(Temp)
return [distance(x, CoM) for x in Temp]
def get_CoM(positions):
return (np.average(positions, axis = 0))
def get_subspecieslist(specie, elements, positions):
Temp = np.column_stack((elements,positions))
Temp = [x for x in Temp if x[0] == specie]
return np.array(np.delete(Temp,0,1), dtype = np.float64)
def Euc_Dist(positions, homo = False, specie = None, elements = None):
if homo == False:
Distances=[]
for i in range(len(positions)-1):
for j in range(i+1,len(positions)):
Euc = distance(positions[i],positions[j])
Distances.append(Euc)
return Distances
elif homo:
Distances = []
Temp = get_subspecieslist(specie, elements, positions)
if (len(Temp)>1) is False:
return None
else:
for i in range(len(Temp)-1):
for j in range(i+1,len(Temp)):
Euc = distance(Temp[i],Temp[j])
Distances.append(Euc)
return Distances
else:
print("Variables used were:\n%s\n%s\n%s\n" %(homo, specie, (elements[0], elements[1])))
raise TypeError("Euc_Dist function has encountered an error.\n")
def Hetero(positions, species, elements):
TempA = get_subspecieslist(species[0], elements, positions)
TempB = get_subspecieslist(species[1], elements, positions)
try:
np.shape(TempA)[1]
try:
np.shape(TempB)[1]
Dist=[]
for a in TempA:
Temp = [ distance(a,b) for b in TempB]
Dist.append(Temp)
return Dist
except IndexError:
Dist=[]
for x in TempA:
Dist.append( [distance(x, TempB) ])
return Dist
print("You have only one of a specific atom type in your simulation. I hope that this is correct.", "\n")
except IndexError:
try:
np.shape(TempB)[1]
return [ distance(TempA, b) for b in TempB ]
print("You have only one of a specific atom type in your simulation. I hope that this is correct.", "\n")
except IndexError:
print("You only have two atoms.\nIs this correct?", "\n")
return None
class CoM_Dist():
def __init__(self, System, Positions, CoM = None, Type = False, Specie = None, Elements = None, Frame = None):
self.System = System
self.Positions = Positions
self.CoM =Positions
self.Type = Type
self.Specie= Specie
self.Elements = Elements
self.Frame = Frame
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if self.Type == 'Full':
self.Dist = np.array([distance(x, self.CoM) for x in self.Positions])
elif self.Type == 'Homo':
Temp = get_subspecieslist(self.Specie, self.Elements, self.Positions)
self.Dist = np.array([distance(x, self.CoM) for x in Temp])
self.CoM = get_CoM(Temp)
self.MidDist = np.array([distance(x, self.CoM) for x in Temp])
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out
Attributes = getattr(Out, str('com'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.CoM) +'\n')
Attributes = getattr(Out, str('comdist'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Dist) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out
Attributes = getattr(Out, str('hocom'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.CoM) +'\n')
Attributes = getattr(Out, str('hocomdist'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Dist) +'\n')
Attributes = getattr(Out, str('homidcomdist'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.MidDist) +'\n')
class RDF():
def __init__(self, System, Positions, Res=100, R_Cut=10.0, Type = None, Species = None, Elements = None, Frame = None):
self.R_Cut = R_Cut
self.System = System
self.Res = Res
self.Positions = Positions
self.Type = Type
self.Species = Species
self.Elements = Elements
self.Frame = Frame
self.dr = self.R_Cut / self.Res
self.Radii = np.linspace(0, self.R_Cut, self.Res)
self.Volumes=np.zeros(self.Res)
self.G=np.zeros(self.Res)
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if not self.Type == 'Hetero':
for i, atom1 in enumerate(self.Positions):
for j in range(self.Res):
r1 = j * self.dr
r2 = r1 + self.dr
v1 = 4.0 / 3.0 * np.pi * r1**3
v2 = 4.0 / 3.0 * np.pi * r2**3
self.Volumes[j] += v2 - v1
for atom2 in self.Positions[i:]:
self.Distance = distance(atom1, atom2)
index = int(self.Distance / self.dr)
if 0 < index < self.Res:
self.G[index] += 2
for i, value in enumerate(self.G):
self.G[i] = value / self.Volumes[i]
elif self.Type == 'Hetero':
TempA = get_subspecieslist(self.Species[0], self.Elements, self.Positions)
TempB = get_subspecieslist(self.Species[1], self.Elements, self.Positions)
for i, atom1 in enumerate(TempA):
for j in range(self.Res):
r1 = j * self.dr
r2 = r1 + self.dr
v1 = 4.0 / 3.0 * np.pi * r1**3
v2 = 4.0 / 3.0 * np.pi * r2**3
self.Volumes[j] += v2 - v1
for atom2 in TempB:
self.Distance = distance(atom1, atom2)
index = int(self.Distance / self.dr)
if 0 < index < self.Res:
self.G[index] += 2
for i, value in enumerate(self.G):
self.G[i] = value / self.Volumes[i]
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out
Attributes = getattr(Out, str('rdf'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('rdfspace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out
Attributes = getattr(Out, str('hordf'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Species
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('hordfspace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Species
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
elif self.Type == 'Hetero':
from Sapphire.IO import OutputInfoHetero as Out
Attributes = getattr(Out, str('herdf'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.G) +'\n')
Attributes = getattr(Out, str('herdfspace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Radii) +'\n')
class Pair_Dist():
def __init__(self, System, Positions, Type = None, Specie = None, Elements = None, Frame = None):
self.System = System
self.Positions = Positions
self.Type = Type
self.Specie = Specie
self.Elements = Elements
self.Frame = Frame
self.calculate()
self.write()
def ensure_dir(self, base_dir='', file_path=''):
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def calculate(self):
if self.Type == 'Homo':
try:
self.distances = Euc_Dist(self.Positions, True, self.Specie, self.Elements)
except Exception as e:
pass
elif self.Type == 'Hetero':
try:
self.distances = Hetero(self.Positions, self.Specie, self.Elements)
except Exception as e:
pass
else:
self.distances = Euc_Dist(self.Positions)
self.bins = int(round(200/(1+20*np.exp(-len(self.distances)/1000))))
self.a, b = np.histogram(self.distances, self.bins)
bin_width = b[1]-b[0]
self.bin_cents = [ b[i]+ bin_width for i in range(len(b)-1) ]
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out
Attributes = getattr(Out, str('pair_distance'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('pair_distancespace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n')
elif self.Type == 'Homo':
from Sapphire.IO import OutputInfoHomo as Out
Attributes = getattr(Out, str('hopair_distance'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('hopair_distancespace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']+self.Specie
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n')
elif self.Type == 'Hetero':
from Sapphire.IO import OutputInfoHetero as Out
Attributes = getattr(Out, str('hepair_distance'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.a) +'\n')
Attributes = getattr(Out, str('hepair_distancespace'))
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.bin_cents) +'\n') | true | true |
1c3497047196a31028998ae4617a866c66a753ef | 4,399 | py | Python | pygasus/model/decorators/lazy_property.py | talismud/pygasus | fb01c8bd51003b5a008b572182a96bad86ef769f | [
"BSD-3-Clause"
] | 2 | 2021-11-18T09:35:10.000Z | 2021-11-18T14:46:32.000Z | pygasus/model/decorators/lazy_property.py | talismud/pygasus | fb01c8bd51003b5a008b572182a96bad86ef769f | [
"BSD-3-Clause"
] | null | null | null | pygasus/model/decorators/lazy_property.py | talismud/pygasus | fb01c8bd51003b5a008b572182a96bad86ef769f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Lazy property, to optimize getting and setting data.
The lazy property descriptor is used similarly to a property,
but it caches the data it retrieves the first time it's called
and then will only return this cached data, unless a setter
is called in the meantime.
"""
_MISSING = object()
class LazyPropertyDescriptor:
"""
Delays loading of property until first access.
Although extended, this was inspired by Evennia's utility
(wwww.evennia.com), itself based on the iplementation in the
werkzeug suite:
http://werkzeug.pocoo.org/docs/utils/#werkzeug.utils.cached_property
A lazy property should be used as a decorator over the getter method,
just like a property. The difference is that a lazy property will
call the getter method only once, the first time for this object, and
then cache the result for following queries. This allows for fast-access
to handlers that are not re-created each time the property is called:
```python
class SomeTest(Model):
@lazy_property
def db(self):
return AttributeHandler(self)
@db.setter
def db(self, handler):
raise ValueError("you can't change that")
```
Once initialized, the `AttributeHandler` will be available as a
property "db" on the object.
"""
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
self.memory = {}
def __get__(self, instance, owner=None):
if instance is None:
return self
# The value might be cached in `memory`
try:
identifier = hash(instance)
except TypeError:
identifier = None
attr = self.fget.__name__
cached_attr = f"_cached_{attr}"
if identifier:
value = self.memory.get(identifier, _MISSING)
else:
value = getattr(instance, cached_attr, _MISSING)
if value is _MISSING:
value = self.fget(instance)
if identifier:
self.memory[identifier] = value
else:
setattr(instance, cached_attr, value)
return value
def __set__(self, instance, value):
if not self.fset:
raise AttributeError("can't set attribute")
try:
identifier = hash(instance)
except TypeError:
identifier = None
attr = self.fget.__name__
cached_attr = f"_cached_{attr}"
self.fset(instance, value)
if identifier:
self.memory[identifier] = value
else:
setattr(instance, cached_attr, value)
def setter(self, func):
self.fset = func
return self
def lazy_property(func):
return LazyPropertyDescriptor(func)
| 33.838462 | 78 | 0.674926 |
_MISSING = object()
class LazyPropertyDescriptor:
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
self.memory = {}
def __get__(self, instance, owner=None):
if instance is None:
return self
try:
identifier = hash(instance)
except TypeError:
identifier = None
attr = self.fget.__name__
cached_attr = f"_cached_{attr}"
if identifier:
value = self.memory.get(identifier, _MISSING)
else:
value = getattr(instance, cached_attr, _MISSING)
if value is _MISSING:
value = self.fget(instance)
if identifier:
self.memory[identifier] = value
else:
setattr(instance, cached_attr, value)
return value
def __set__(self, instance, value):
if not self.fset:
raise AttributeError("can't set attribute")
try:
identifier = hash(instance)
except TypeError:
identifier = None
attr = self.fget.__name__
cached_attr = f"_cached_{attr}"
self.fset(instance, value)
if identifier:
self.memory[identifier] = value
else:
setattr(instance, cached_attr, value)
def setter(self, func):
self.fset = func
return self
def lazy_property(func):
return LazyPropertyDescriptor(func)
| true | true |
1c34970ee35e01abe21bae0efd3466ad024f3479 | 298 | py | Python | Lab 1. Routes/polyeditor/setup.py | Panda-Lewandowski/Software-engineering | f514c31bc665a54e4894bc6fab39f5cb4b2cbd70 | [
"MIT"
] | 1 | 2019-03-15T12:16:07.000Z | 2019-03-15T12:16:07.000Z | Lab 1. Routes/polyeditor/setup.py | Panda-Lewandowski/Software-engineering | f514c31bc665a54e4894bc6fab39f5cb4b2cbd70 | [
"MIT"
] | null | null | null | Lab 1. Routes/polyeditor/setup.py | Panda-Lewandowski/Software-engineering | f514c31bc665a54e4894bc6fab39f5cb4b2cbd70 | [
"MIT"
] | 1 | 2019-10-19T11:33:03.000Z | 2019-10-19T11:33:03.000Z | from setuptools import setup, find_packages
setup(
name='polyeditor',
version='polyeditor.__version__',
packages=find_packages(),
entry_points={
'console_scripts':
['polyeditor = main:run_editor']
},
install_requires=[
'PyQt5==5.10'
]
)
| 19.866667 | 44 | 0.607383 | from setuptools import setup, find_packages
setup(
name='polyeditor',
version='polyeditor.__version__',
packages=find_packages(),
entry_points={
'console_scripts':
['polyeditor = main:run_editor']
},
install_requires=[
'PyQt5==5.10'
]
)
| true | true |
1c349711beedf8b2182208c0042e23eada19e095 | 1,942 | py | Python | old_python/ClassExpoSin.py | ChrisAndre/expsin | ab6960c009894989d668d13ab48f2517abf377a7 | [
"MIT"
] | null | null | null | old_python/ClassExpoSin.py | ChrisAndre/expsin | ab6960c009894989d668d13ab48f2517abf377a7 | [
"MIT"
] | null | null | null | old_python/ClassExpoSin.py | ChrisAndre/expsin | ab6960c009894989d668d13ab48f2517abf377a7 | [
"MIT"
] | 1 | 2020-04-10T10:24:01.000Z | 2020-04-10T10:24:01.000Z | import math
from ExpoSin import ExpoSin
class ClassExpoSin(object):
"""
Represents the class of sinusoids defined by S_k2[r1, r2, psi, N].
An ExpoSin object can be constructed with this class using an
initial tan(y1).
"""
def __init__(self, k2, r1, r2, angle, N=0):
self.k2 = float(k2)
self.r1 = float(r1)
self.r2 = float(r2)
self.N = N
self.psi = 2 * math.pi * N + angle
def tany1Range(self):
"""Calculate the allowable range for tan(y1)."""
# unpack for easy reading
k2 = self.k2
r1 = self.r1
r2 = self.r2
psi = self.psi
logr1r2 = math.log(r1 / r2)
cosk2O = math.cos(k2 * psi)
delta = 2*(1-cosk2O)/k2**4 - logr1r2**2
if delta < 0: # no feasible trajectories
return None
tany1min = k2/2 * (-logr1r2 / math.tan(k2*psi/2) - math.sqrt(delta))
tany1max = k2/2 * (-logr1r2 / math.tan(k2*psi/2) + math.sqrt(delta))
return tany1min, tany1max
def createExpoSin(self, tany1):
"""Return a single, fully-constrained exponential sinusoid object."""
# unpack for easy reading
k2 = self.k2
r1 = self.r1
r2 = self.r2
psi = self.psi
mintany1, maxtany1 = self.tany1Range()
if tany1 > maxtany1 or tany1 < mintany1:
raise Exception('Cannot create ExpoSin with given tany1; out of legal range.')
logr1r2 = math.log(r1 / r2)
sink2O = math.sin(k2 * psi)
cosk2O = math.cos(k2 * psi)
k1_sqr = ((logr1r2 + tany1 / k2 * sink2O)/(1 - cosk2O))**2 + (tany1 / k2)**2
k1_sign = (logr1r2 + tany1 / k2 * sink2O)/(1 - cosk2O)
if k1_sign < 0:
k1 = -math.sqrt(k1_sqr)
else:
k1 = math.sqrt(k1_sqr)
phi = math.acos(tany1/k1/k2)
k0 = r1/math.exp(k1*math.sin(phi))
return ExpoSin(k0, k1, k2, phi)
| 28.558824 | 90 | 0.553038 | import math
from ExpoSin import ExpoSin
class ClassExpoSin(object):
def __init__(self, k2, r1, r2, angle, N=0):
self.k2 = float(k2)
self.r1 = float(r1)
self.r2 = float(r2)
self.N = N
self.psi = 2 * math.pi * N + angle
def tany1Range(self):
k2 = self.k2
r1 = self.r1
r2 = self.r2
psi = self.psi
logr1r2 = math.log(r1 / r2)
cosk2O = math.cos(k2 * psi)
delta = 2*(1-cosk2O)/k2**4 - logr1r2**2
if delta < 0:
return None
tany1min = k2/2 * (-logr1r2 / math.tan(k2*psi/2) - math.sqrt(delta))
tany1max = k2/2 * (-logr1r2 / math.tan(k2*psi/2) + math.sqrt(delta))
return tany1min, tany1max
def createExpoSin(self, tany1):
k2 = self.k2
r1 = self.r1
r2 = self.r2
psi = self.psi
mintany1, maxtany1 = self.tany1Range()
if tany1 > maxtany1 or tany1 < mintany1:
raise Exception('Cannot create ExpoSin with given tany1; out of legal range.')
logr1r2 = math.log(r1 / r2)
sink2O = math.sin(k2 * psi)
cosk2O = math.cos(k2 * psi)
k1_sqr = ((logr1r2 + tany1 / k2 * sink2O)/(1 - cosk2O))**2 + (tany1 / k2)**2
k1_sign = (logr1r2 + tany1 / k2 * sink2O)/(1 - cosk2O)
if k1_sign < 0:
k1 = -math.sqrt(k1_sqr)
else:
k1 = math.sqrt(k1_sqr)
phi = math.acos(tany1/k1/k2)
k0 = r1/math.exp(k1*math.sin(phi))
return ExpoSin(k0, k1, k2, phi)
| true | true |
1c3499010d88c6042fb15ac3dc48e94eb92db3d9 | 2,915 | py | Python | pluginbase.py | haizaar/iris | 1efe07181cb0ec2307b1385d65160b534b40f9a7 | [
"MIT"
] | 50 | 2018-05-29T13:49:41.000Z | 2022-03-31T03:19:14.000Z | pluginbase.py | haizaar/iris | 1efe07181cb0ec2307b1385d65160b534b40f9a7 | [
"MIT"
] | 22 | 2018-06-25T13:39:53.000Z | 2021-02-02T08:30:55.000Z | pluginbase.py | haizaar/iris | 1efe07181cb0ec2307b1385d65160b534b40f9a7 | [
"MIT"
] | 7 | 2018-08-12T06:02:59.000Z | 2021-02-05T05:01:29.000Z | # a simple Python plugin loading system
# see http://stackoverflow.com/questions/14510286/plugin-architecture-plugin
# -manager-vs-inspecting-from-plugins-import
import logging
from utils import utils
class PluginMount(type):
"""
A plugin mount point derived from:
http://martyalchin.com/2008/jan/10/simple-plugin-framework/
Acts as a metaclass which creates anything inheriting from Plugin
"""
def __init__(cls, name, bases, attrs):
"""Called when a Plugin derived class is imported"""
if not hasattr(cls, 'plugins'):
# Called when the metaclass is first instantiated
cls.plugins = []
else:
# Called when a plugin class is imported
cls.register_plugin(cls)
def register_plugin(cls, plugin):
"""Add the plugin to the plugin list and perform any registration
logic"""
# create a plugin instance and store it
# optionally you could just store the plugin class and lazily
# instantiate
instance = plugin()
# save the plugin reference
cls.plugins.append(instance)
# apply plugin logic - in this case connect the plugin to blinker
# signals
# this must be defined in the derived class
instance.register_signals()
class Plugin(object):
"""A plugin which must provide a register_signals() method"""
__metaclass__ = PluginMount
def __init__(self):
self.counter = 0
self.tags = []
self.on_demand = []
self.batch = None
def set_tags(self, tags):
self.tags = tags
def set_on_demand(self, on_demand):
self.on_demand = on_demand
def gen_labels(self, gcp_object):
labels = {}
for tag in self.tags:
f = "_get_" + tag
if f in dir(self):
res = getattr(self, f)(gcp_object)
if res is not None:
labels[utils.get_prfeix() + '_' + tag] = res
return labels
def batch_callback(self, request_id, response, exception):
if exception is not None:
logging.error(
'Error in Request Id: {0} Response: {1} Exception: {2}'.format(
response, request_id,
exception))
def is_on_demand(self):
for od in self.on_demand:
if self.__class__.__name__.lower() == od.lower():
return True
return False
def do_batch(self):
self.batch.execute()
self.counter = 0
def do_tag(self, project_id):
raise NotImplementedError
def get_gcp_object(self, data):
raise NotImplementedError
def tag_one(self, gcp_object, project_id):
raise NotImplementedError
def api_name(self):
raise NotImplementedError
def methodsNames(self):
raise NotImplementedError
| 24.70339 | 79 | 0.606518 |
import logging
from utils import utils
class PluginMount(type):
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'plugins'):
cls.plugins = []
else:
cls.register_plugin(cls)
def register_plugin(cls, plugin):
instance = plugin()
cls.plugins.append(instance)
instance.register_signals()
class Plugin(object):
__metaclass__ = PluginMount
def __init__(self):
self.counter = 0
self.tags = []
self.on_demand = []
self.batch = None
def set_tags(self, tags):
self.tags = tags
def set_on_demand(self, on_demand):
self.on_demand = on_demand
def gen_labels(self, gcp_object):
labels = {}
for tag in self.tags:
f = "_get_" + tag
if f in dir(self):
res = getattr(self, f)(gcp_object)
if res is not None:
labels[utils.get_prfeix() + '_' + tag] = res
return labels
def batch_callback(self, request_id, response, exception):
if exception is not None:
logging.error(
'Error in Request Id: {0} Response: {1} Exception: {2}'.format(
response, request_id,
exception))
def is_on_demand(self):
for od in self.on_demand:
if self.__class__.__name__.lower() == od.lower():
return True
return False
def do_batch(self):
self.batch.execute()
self.counter = 0
def do_tag(self, project_id):
raise NotImplementedError
def get_gcp_object(self, data):
raise NotImplementedError
def tag_one(self, gcp_object, project_id):
raise NotImplementedError
def api_name(self):
raise NotImplementedError
def methodsNames(self):
raise NotImplementedError
| true | true |
1c349a8db700b8c7abfa0a3b61b22bc079dd4091 | 313 | py | Python | electrum_mona/plugins/coldcard/__init__.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 61 | 2017-08-06T08:51:49.000Z | 2021-12-28T06:25:36.000Z | electrum_mona/plugins/coldcard/__init__.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 15 | 2017-09-12T07:15:01.000Z | 2021-12-28T06:25:15.000Z | electrum_mona/plugins/coldcard/__init__.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 27 | 2017-08-18T19:40:30.000Z | 2021-03-01T11:16:02.000Z | from electrum_mona.i18n import _
fullname = 'Coldcard Wallet'
description = 'Provides support for the Coldcard hardware wallet from Coinkite'
requires = [('ckcc-protocol', 'github.com/Coldcard/ckcc-protocol')]
registers_keystore = ('hardware', 'coldcard', _("Coldcard Wallet"))
available_for = ['qt', 'cmdline']
| 39.125 | 79 | 0.753994 | from electrum_mona.i18n import _
fullname = 'Coldcard Wallet'
description = 'Provides support for the Coldcard hardware wallet from Coinkite'
requires = [('ckcc-protocol', 'github.com/Coldcard/ckcc-protocol')]
registers_keystore = ('hardware', 'coldcard', _("Coldcard Wallet"))
available_for = ['qt', 'cmdline']
| true | true |
1c349bd13a11bf740063716d405c8f522ae73dfc | 23,198 | py | Python | tests/test_ext.py | iomintz/jinja | 6b9eb6df5a7804ec4210bf449296aae71eb5cd3e | [
"BSD-3-Clause"
] | 1 | 2020-07-06T05:53:18.000Z | 2020-07-06T05:53:18.000Z | tests/test_ext.py | iomintz/jinja | 6b9eb6df5a7804ec4210bf449296aae71eb5cd3e | [
"BSD-3-Clause"
] | null | null | null | tests/test_ext.py | iomintz/jinja | 6b9eb6df5a7804ec4210bf449296aae71eb5cd3e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.ext
~~~~~~~~~~~~~~~~~~~~
Tests for the extensions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import pytest
from jinja2 import Environment, DictLoader, contextfunction, nodes
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2._compat import BytesIO, itervalues, text_type
importable_object = 23
_gettext_re = re.compile(r'_\((.*?)\)', re.DOTALL)
i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'plural2.html': '{% trans user_count=get_user_count() %}{{ user_count }}s'
'{% pluralize %}{{ user_count }}p{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}'
}
newstyle_i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}',
'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}'
'{{ num }} apples{% endtrans %}',
'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}',
'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}',
'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}',
'novars.html': '{% trans %}%(hello)s{% endtrans %}',
'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}',
'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}'
}
languages = {
'de': {
'missing': u'fehlend',
'watch out': u'pass auf',
'One user online': u'Ein Benutzer online',
'%(user_count)s users online': u'%(user_count)s Benutzer online',
'User: %(num)s': u'Benutzer: %(num)s',
'User: %(count)s': u'Benutzer: %(count)s',
'%(num)s apple': u'%(num)s Apfel',
'%(num)s apples': u'%(num)s Äpfel'
}
}
@contextfunction
def gettext(context, string):
language = context.get('LANGUAGE', 'en')
return languages.get(language, {}).get(string, string)
@contextfunction
def ngettext(context, s, p, n):
language = context.get('LANGUAGE', 'en')
if n != 1:
return languages.get(language, {}).get(p, p)
return languages.get(language, {}).get(s, s)
i18n_env = Environment(
loader=DictLoader(i18n_templates),
extensions=['jinja2.ext.i18n']
)
i18n_env.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
i18n_env_trimmed = Environment(extensions=['jinja2.ext.i18n'])
i18n_env_trimmed.policies['ext.i18n.trimmed'] = True
i18n_env_trimmed.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates),
extensions=['jinja2.ext.i18n']
)
newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True)
class ExampleExtension(Extension):
tags = set(['test'])
ext_attr = 42
def parse(self, parser):
return nodes.Output([self.call_method('_dump', [
nodes.EnvironmentAttribute('sandboxed'),
self.attr('ext_attr'),
nodes.ImportedName(__name__ + '.importable_object'),
nodes.ContextReference()
])]).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return '%s|%s|%s|%s' % (
sandboxed,
ext_attr,
imported_object,
context.blocks
)
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace('[[TEST]]', '({{ foo }})')
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == 'data':
for t in self.interpolate(token):
yield t
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
@pytest.mark.ext
class TestExtensions(object):
def test_extend_late(self):
env = Environment()
env.add_extension('jinja2.ext.autoescape')
t = env.from_string(
'{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == '<test>'
def test_loop_controls(self):
env = Environment(extensions=['jinja2.ext.loopcontrols'])
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '13'
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '12'
def test_do(self):
env = Environment(extensions=['jinja2.ext.do'])
tmpl = env.from_string('''
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}''')
assert tmpl.render() == '0f, 1o, 2o'
def test_extension_nodes(self):
env = Environment(extensions=[ExampleExtension])
tmpl = env.from_string('{% test %}')
assert tmpl.render() == 'False|42|23|{}'
def test_identifier(self):
assert ExampleExtension.identifier == __name__ + '.ExampleExtension'
def test_rebinding(self):
original = Environment(extensions=[ExampleExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in itervalues(env.extensions):
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string('{[[TEST]]}')
assert tmpl.render(foo=42) == '{(42)}'
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals['gettext'] = lambda x: x.upper()
tmpl = env.from_string('Foo _(bar) Baz')
out = tmpl.render()
assert out == 'Foo BAR Baz'
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
def test_debug(self):
env = Environment(extensions=['jinja2.ext.debug'])
t = env.from_string('Hello\n{% debug %}\nGoodbye')
out = t.render()
for value in ("context", "cycler", "filters", "abs", "tests", "!="):
assert "'{}'".format(value) in out
@pytest.mark.ext
class TestInternationalization(object):
def test_trans(self):
tmpl = i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_trans_plural_with_functions(self):
tmpl = i18n_env.get_template('plural2.html')
def get_user_count():
get_user_count.called += 1
return 1
get_user_count.called = 0
assert tmpl.render(LANGUAGE='de', get_user_count=get_user_count) \
== '1s'
assert get_user_count.called == 1
def test_complex_plural(self):
tmpl = i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_trimmed(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed %} hello\n world {% endtrans -%}')
assert tmpl.render() == 'hello world'
def test_trimmed_policy(self):
s = '{%- trans %} hello\n world {% endtrans -%}'
tmpl = i18n_env.from_string(s)
trimmed_tmpl = i18n_env_trimmed.from_string(s)
assert tmpl.render() == ' hello\n world '
assert trimmed_tmpl.render() == 'hello world'
def test_trimmed_policy_override(self):
tmpl = i18n_env_trimmed.from_string(
'{%- trans notrimmed %} hello\n world {% endtrans -%}')
assert tmpl.render() == ' hello\n world '
def test_trimmed_vars(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed x="world" %} hello\n {{ x }} {% endtrans -%}')
assert tmpl.render() == 'hello world'
def test_trimmed_varname_trimmed(self):
# unlikely variable name, but when used as a variable
# it should not enable trimming
tmpl = i18n_env.from_string(
'{%- trans trimmed = "world" %} hello\n {{ trimmed }} '
'{% endtrans -%}')
assert tmpl.render() == ' hello\n world '
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_extract_trimmed(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext(' Hello \n World') }}
{% trans trimmed %} Hello \n World{% endtrans %}
{% trans trimmed %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u' Hello \n World', []),
(4, 'gettext', u'Hello World', []),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_extract_trimmed_option(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext(' Hello \n World') }}
{% trans %} Hello \n World{% endtrans %}
{% trans %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
opts = {'trimmed': 'true'}
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], opts)) == [
(2, 'gettext', u' Hello \n World', []),
(4, 'gettext', u'Hello World', []),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('utf-8')) # make python 3 happy
assert list(babel_extract(source,
('gettext', 'ngettext', '_'),
['trans', ':'], {})) == [
(3, 'gettext', u'Hello World', ['first']),
(4, 'gettext', u'Hello World', ['second']),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None),
['third'])
]
@pytest.mark.ext
class TestScope(object):
def test_basic_scope_behavior(self):
# This is what the old with statement compiled down to
class ScopeExt(Extension):
tags = set(['scope'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endscope',),
drop_needle=True))
return node
env = Environment(extensions=[ScopeExt])
tmpl = env.from_string('''\
{%- scope a=1, b=2, c=b, d=e, e=5 -%}
{{ a }}|{{ b }}|{{ c }}|{{ d }}|{{ e }}
{%- endscope -%}
''')
assert tmpl.render(b=3, e=4) == '1|2|2|4|5'
@pytest.mark.ext
class TestNewstyleInternationalization(object):
def test_trans(self):
tmpl = newstyle_i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template('ngettext.html')
assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel'
assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel'
def test_autoescape_support(self):
env = Environment(extensions=['jinja2.ext.autoescape',
'jinja2.ext.i18n'])
env.install_gettext_callables(
lambda x: u'<strong>Wert: %(name)s</strong>',
lambda s, p, n: s, newstyle=True)
t = env.from_string('{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}')
assert t.render(ae=True) == '<strong>Wert: <test></strong>'
assert t.render(ae=False) == '<strong>Wert: <test></strong>'
def test_autoescape_macros(self):
env = Environment(autoescape=False, extensions=['jinja2.ext.autoescape'])
template = (
'{% macro m() %}<html>{% endmacro %}'
'{% autoescape true %}{{ m() }}{% endautoescape %}'
)
assert env.from_string(template).render() == '<html>'
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template('ngettext_long.html')
assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel'
def test_num_called_num(self):
source = newstyle_i18n_env.compile('''
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
''', raw=True)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert re.search(r"u?'\%\(num\)s apple', u?'\%\(num\)s "
r"apples', 3", source) is not None
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template('transvars1.html')
t2 = newstyle_i18n_env.get_template('transvars2.html')
t3 = newstyle_i18n_env.get_template('transvars3.html')
assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1'
assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23'
assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42'
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template('novars.html')
assert t.render() == '%(hello)s'
t = newstyle_i18n_env.get_template('vars.html')
assert t.render(foo='42') == '42%(foo)s'
t = newstyle_i18n_env.get_template('explicitvars.html')
assert t.render() == '%(foo)s'
@pytest.mark.ext
class TestAutoEscape(object):
def test_scoped_setting(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=False)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
def test_nonvolatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render() == ' foo="&lt;test&gt;"'
def test_volatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render(foo=False) == ' foo="&lt;test&gt;"'
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmpl = env.from_string(
'{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}')
assert tmpl.render(x=1) == '<x>1<y>'
def test_volatile_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmplsource = '''
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
'''
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == 'Markup'
assert tmpl.render(val=False).split()[0] == text_type.__name__
# looking at the source we should see <testing> there in raw
# (and then escaped as well)
env = Environment(extensions=['jinja2.ext.autoescape'])
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
def test_overlay_scopes(self):
class MagicScopeExtension(Extension):
tags = set(['overlay'])
def parse(self, parser):
node = nodes.OverlayScope(lineno=next(parser.stream).lineno)
node.body = list(parser.parse_statements(('name:endoverlay',),
drop_needle=True))
node.context = self.call_method('get_scope')
return node
def get_scope(self):
return {'x': [1, 2, 3]}
env = Environment(extensions=[MagicScopeExtension])
tmpl = env.from_string('''
{{- x }}|{% set z = 99 %}
{%- overlay %}
{{- y }}|{{ z }}|{% for item in x %}[{{ item }}]{% endfor %}
{%- endoverlay %}|
{{- x -}}
''')
assert tmpl.render(x=42, y=23) == '42|23|99|[1][2][3]|42'
| 38.857621 | 81 | 0.541254 |
import re
import pytest
from jinja2 import Environment, DictLoader, contextfunction, nodes
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2._compat import BytesIO, itervalues, text_type
importable_object = 23
_gettext_re = re.compile(r'_\((.*?)\)', re.DOTALL)
i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'plural2.html': '{% trans user_count=get_user_count() %}{{ user_count }}s'
'{% pluralize %}{{ user_count }}p{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}'
}
newstyle_i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}',
'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}'
'{{ num }} apples{% endtrans %}',
'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}',
'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}',
'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}',
'novars.html': '{% trans %}%(hello)s{% endtrans %}',
'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}',
'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}'
}
languages = {
'de': {
'missing': u'fehlend',
'watch out': u'pass auf',
'One user online': u'Ein Benutzer online',
'%(user_count)s users online': u'%(user_count)s Benutzer online',
'User: %(num)s': u'Benutzer: %(num)s',
'User: %(count)s': u'Benutzer: %(count)s',
'%(num)s apple': u'%(num)s Apfel',
'%(num)s apples': u'%(num)s Äpfel'
}
}
@contextfunction
def gettext(context, string):
language = context.get('LANGUAGE', 'en')
return languages.get(language, {}).get(string, string)
@contextfunction
def ngettext(context, s, p, n):
language = context.get('LANGUAGE', 'en')
if n != 1:
return languages.get(language, {}).get(p, p)
return languages.get(language, {}).get(s, s)
i18n_env = Environment(
loader=DictLoader(i18n_templates),
extensions=['jinja2.ext.i18n']
)
i18n_env.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
i18n_env_trimmed = Environment(extensions=['jinja2.ext.i18n'])
i18n_env_trimmed.policies['ext.i18n.trimmed'] = True
i18n_env_trimmed.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates),
extensions=['jinja2.ext.i18n']
)
newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True)
class ExampleExtension(Extension):
tags = set(['test'])
ext_attr = 42
def parse(self, parser):
return nodes.Output([self.call_method('_dump', [
nodes.EnvironmentAttribute('sandboxed'),
self.attr('ext_attr'),
nodes.ImportedName(__name__ + '.importable_object'),
nodes.ContextReference()
])]).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return '%s|%s|%s|%s' % (
sandboxed,
ext_attr,
imported_object,
context.blocks
)
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace('[[TEST]]', '({{ foo }})')
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == 'data':
for t in self.interpolate(token):
yield t
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
@pytest.mark.ext
class TestExtensions(object):
def test_extend_late(self):
env = Environment()
env.add_extension('jinja2.ext.autoescape')
t = env.from_string(
'{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == '<test>'
def test_loop_controls(self):
env = Environment(extensions=['jinja2.ext.loopcontrols'])
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '13'
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '12'
def test_do(self):
env = Environment(extensions=['jinja2.ext.do'])
tmpl = env.from_string('''
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}''')
assert tmpl.render() == '0f, 1o, 2o'
def test_extension_nodes(self):
env = Environment(extensions=[ExampleExtension])
tmpl = env.from_string('{% test %}')
assert tmpl.render() == 'False|42|23|{}'
def test_identifier(self):
assert ExampleExtension.identifier == __name__ + '.ExampleExtension'
def test_rebinding(self):
original = Environment(extensions=[ExampleExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in itervalues(env.extensions):
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string('{[[TEST]]}')
assert tmpl.render(foo=42) == '{(42)}'
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals['gettext'] = lambda x: x.upper()
tmpl = env.from_string('Foo _(bar) Baz')
out = tmpl.render()
assert out == 'Foo BAR Baz'
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
def test_debug(self):
env = Environment(extensions=['jinja2.ext.debug'])
t = env.from_string('Hello\n{% debug %}\nGoodbye')
out = t.render()
for value in ("context", "cycler", "filters", "abs", "tests", "!="):
assert "'{}'".format(value) in out
@pytest.mark.ext
class TestInternationalization(object):
def test_trans(self):
tmpl = i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_trans_plural_with_functions(self):
tmpl = i18n_env.get_template('plural2.html')
def get_user_count():
get_user_count.called += 1
return 1
get_user_count.called = 0
assert tmpl.render(LANGUAGE='de', get_user_count=get_user_count) \
== '1s'
assert get_user_count.called == 1
def test_complex_plural(self):
tmpl = i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_trimmed(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed %} hello\n world {% endtrans -%}')
assert tmpl.render() == 'hello world'
def test_trimmed_policy(self):
s = '{%- trans %} hello\n world {% endtrans -%}'
tmpl = i18n_env.from_string(s)
trimmed_tmpl = i18n_env_trimmed.from_string(s)
assert tmpl.render() == ' hello\n world '
assert trimmed_tmpl.render() == 'hello world'
def test_trimmed_policy_override(self):
tmpl = i18n_env_trimmed.from_string(
'{%- trans notrimmed %} hello\n world {% endtrans -%}')
assert tmpl.render() == ' hello\n world '
def test_trimmed_vars(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed x="world" %} hello\n {{ x }} {% endtrans -%}')
assert tmpl.render() == 'hello world'
def test_trimmed_varname_trimmed(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed = "world" %} hello\n {{ trimmed }} '
'{% endtrans -%}')
assert tmpl.render() == ' hello\n world '
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('ascii'))
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_extract_trimmed(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext(' Hello \n World') }}
{% trans trimmed %} Hello \n World{% endtrans %}
{% trans trimmed %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
'''.encode('ascii'))
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u' Hello \n World', []),
(4, 'gettext', u'Hello World', []),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_extract_trimmed_option(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext(' Hello \n World') }}
{% trans %} Hello \n World{% endtrans %}
{% trans %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
'''.encode('ascii'))
opts = {'trimmed': 'true'}
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], opts)) == [
(2, 'gettext', u' Hello \n World', []),
(4, 'gettext', u'Hello World', []),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('utf-8'))
assert list(babel_extract(source,
('gettext', 'ngettext', '_'),
['trans', ':'], {})) == [
(3, 'gettext', u'Hello World', ['first']),
(4, 'gettext', u'Hello World', ['second']),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None),
['third'])
]
@pytest.mark.ext
class TestScope(object):
def test_basic_scope_behavior(self):
class ScopeExt(Extension):
tags = set(['scope'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endscope',),
drop_needle=True))
return node
env = Environment(extensions=[ScopeExt])
tmpl = env.from_string('''\
{%- scope a=1, b=2, c=b, d=e, e=5 -%}
{{ a }}|{{ b }}|{{ c }}|{{ d }}|{{ e }}
{%- endscope -%}
''')
assert tmpl.render(b=3, e=4) == '1|2|2|4|5'
@pytest.mark.ext
class TestNewstyleInternationalization(object):
def test_trans(self):
tmpl = newstyle_i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template('ngettext.html')
assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel'
assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel'
def test_autoescape_support(self):
env = Environment(extensions=['jinja2.ext.autoescape',
'jinja2.ext.i18n'])
env.install_gettext_callables(
lambda x: u'<strong>Wert: %(name)s</strong>',
lambda s, p, n: s, newstyle=True)
t = env.from_string('{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}')
assert t.render(ae=True) == '<strong>Wert: <test></strong>'
assert t.render(ae=False) == '<strong>Wert: <test></strong>'
def test_autoescape_macros(self):
env = Environment(autoescape=False, extensions=['jinja2.ext.autoescape'])
template = (
'{% macro m() %}<html>{% endmacro %}'
'{% autoescape true %}{{ m() }}{% endautoescape %}'
)
assert env.from_string(template).render() == '<html>'
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template('ngettext_long.html')
assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel'
def test_num_called_num(self):
source = newstyle_i18n_env.compile('''
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
''', raw=True)
assert re.search(r"u?'\%\(num\)s apple', u?'\%\(num\)s "
r"apples', 3", source) is not None
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template('transvars1.html')
t2 = newstyle_i18n_env.get_template('transvars2.html')
t3 = newstyle_i18n_env.get_template('transvars3.html')
assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1'
assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23'
assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42'
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template('novars.html')
assert t.render() == '%(hello)s'
t = newstyle_i18n_env.get_template('vars.html')
assert t.render(foo='42') == '42%(foo)s'
t = newstyle_i18n_env.get_template('explicitvars.html')
assert t.render() == '%(foo)s'
@pytest.mark.ext
class TestAutoEscape(object):
def test_scoped_setting(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=False)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
def test_nonvolatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render() == ' foo="&lt;test&gt;"'
def test_volatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render(foo=False) == ' foo="&lt;test&gt;"'
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmpl = env.from_string(
'{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}')
assert tmpl.render(x=1) == '<x>1<y>'
def test_volatile_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmplsource = '''
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
'''
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == 'Markup'
assert tmpl.render(val=False).split()[0] == text_type.__name__
env = Environment(extensions=['jinja2.ext.autoescape'])
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
def test_overlay_scopes(self):
class MagicScopeExtension(Extension):
tags = set(['overlay'])
def parse(self, parser):
node = nodes.OverlayScope(lineno=next(parser.stream).lineno)
node.body = list(parser.parse_statements(('name:endoverlay',),
drop_needle=True))
node.context = self.call_method('get_scope')
return node
def get_scope(self):
return {'x': [1, 2, 3]}
env = Environment(extensions=[MagicScopeExtension])
tmpl = env.from_string('''
{{- x }}|{% set z = 99 %}
{%- overlay %}
{{- y }}|{{ z }}|{% for item in x %}[{{ item }}]{% endfor %}
{%- endoverlay %}|
{{- x -}}
''')
assert tmpl.render(x=42, y=23) == '42|23|99|[1][2][3]|42'
| true | true |
1c349c39467a3ca68dc775d9b3b1980ea1bd73a5 | 7,700 | py | Python | light_mappo-main/algorithms/algorithm/rMAPPOPolicy.py | daixiangxiang/Reinforcement_learning | 90aabba61c609c5afd445205b94ebd87a309ff7c | [
"MIT"
] | null | null | null | light_mappo-main/algorithms/algorithm/rMAPPOPolicy.py | daixiangxiang/Reinforcement_learning | 90aabba61c609c5afd445205b94ebd87a309ff7c | [
"MIT"
] | null | null | null | light_mappo-main/algorithms/algorithm/rMAPPOPolicy.py | daixiangxiang/Reinforcement_learning | 90aabba61c609c5afd445205b94ebd87a309ff7c | [
"MIT"
] | null | null | null | """
# @Time : 2021/7/1 6:53 下午
# @Author : hezhiqiang01
# @Email : hezhiqiang01@baidu.com
# @File : rMAPPOPolicy.py
"""
import torch
from algorithms.algorithm.r_actor_critic import R_Actor, R_Critic
from utils.util import update_linear_schedule
#策略网络,网络定义
#每一个智能体的观测obs_space为一个14维的向量,
# 有两个智能体,cent_obs_space为一个28纬的向量,
# 单个智能体的动作空间act_space为一个离散的5个维度的向量。
#cent_obs_space=n×obs_space其中n为智能体的个数,输出为一个V值,这个V值用于actor的更新。
class RMAPPOPolicy:
"""
MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions.
:param args: (argparse.Namespace) arguments containing relevant model and policy information.
:param obs_space: (gym.Space) observation space.
:param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = cent_obs_space
self.act_space = act_space
self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)
self.critic = R_Critic(args, self.share_obs_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
lr=self.lr, eps=self.opti_eps,
weight_decay=self.weight_decay)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=self.critic_lr,
eps=self.opti_eps,
weight_decay=self.weight_decay)
def lr_decay(self, episode, episodes):
"""
Decay the actor and critic learning rates.
:param episode: (int) current training episode.
:param episodes: (int) total number of training episodes.
"""
update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)
update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)
def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,
deterministic=False):
#会调用actor去获取动作和动作的对数概率
"""
Compute actions and value function predictions for the given inputs.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
:return values: (torch.Tensor) value function predictions.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of chosen actions.
:return rnn_states_actor: (torch.Tensor) updated actor network RNN states.
:return rnn_states_critic: (torch.Tensor) updated critic network RNN states.
"""
actions, action_log_probs, rnn_states_actor = self.actor(obs,
rnn_states_actor,
masks,
available_actions,
deterministic)
values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)
return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic
def get_values(self, cent_obs, rnn_states_critic, masks):
"""
Get value function predictions.
:param cent_obs (np.ndarray): centralized input to the critic.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:return values: (torch.Tensor) value function predictions.
"""
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values
def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,
available_actions=None, active_masks=None):
"""
Get action logprobs / entropy and value function predictions for actor update.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param action: (np.ndarray) actions whose log probabilites and entropy to compute.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return values: (torch.Tensor) value function predictions.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,
rnn_states_actor,
action,
masks,
available_actions,
active_masks)
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
#critic网络去获取对于cent_obs的状态值函数的输出:
#obs这里的shape是(5*2, 14),输出actions的shape, 和action_log_probs的shape都为(10 , 1)。
return values, action_log_probs, dist_entropy
def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):
"""
Compute actions using the given inputs.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
"""
actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return actions, rnn_states_actor
| 54.225352 | 120 | 0.623117 |
import torch
from algorithms.algorithm.r_actor_critic import R_Actor, R_Critic
from utils.util import update_linear_schedule
class RMAPPOPolicy:
def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = cent_obs_space
self.act_space = act_space
self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)
self.critic = R_Critic(args, self.share_obs_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
lr=self.lr, eps=self.opti_eps,
weight_decay=self.weight_decay)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=self.critic_lr,
eps=self.opti_eps,
weight_decay=self.weight_decay)
def lr_decay(self, episode, episodes):
update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)
update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)
def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,
deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs,
rnn_states_actor,
masks,
available_actions,
deterministic)
values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)
return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic
def get_values(self, cent_obs, rnn_states_critic, masks):
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values
def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,
available_actions=None, active_masks=None):
action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,
rnn_states_actor,
action,
masks,
available_actions,
active_masks)
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values, action_log_probs, dist_entropy
def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):
actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return actions, rnn_states_actor
| true | true |
1c349d1c7cd02e86d19efc0c7a149bb9a4420182 | 160 | py | Python | scripts/class-3/espiral_quadrado_colorido.py | GabrielMMelo/python4teens | 287f79ada2f8ded669f6e26210e1407202e8ff80 | [
"CC-BY-4.0"
] | 2 | 2021-04-15T13:23:16.000Z | 2022-02-01T18:31:58.000Z | scripts/class-3/espiral_quadrado_colorido.py | GabrielMMelo/python4teens | 287f79ada2f8ded669f6e26210e1407202e8ff80 | [
"CC-BY-4.0"
] | null | null | null | scripts/class-3/espiral_quadrado_colorido.py | GabrielMMelo/python4teens | 287f79ada2f8ded669f6e26210e1407202e8ff80 | [
"CC-BY-4.0"
] | null | null | null | import turtle
t = turtle.Pen()
colors = ['red', 'yellow', 'blue', 'green']
for x in range(100):
t.pencolor(colors[x % 4])
t.forward(x)
t.left(91)
| 16 | 43 | 0.58125 | import turtle
t = turtle.Pen()
colors = ['red', 'yellow', 'blue', 'green']
for x in range(100):
t.pencolor(colors[x % 4])
t.forward(x)
t.left(91)
| true | true |
1c349db75db5924d77d96f3fb786e6ae7ddd3095 | 2,193 | py | Python | py/winnt/ntpfapi.py | gregzakh/sketches | acbc573b9e67228dac21a94b597d89e2ea5cd755 | [
"MIT"
] | 1 | 2022-01-07T13:18:51.000Z | 2022-01-07T13:18:51.000Z | py/winnt/ntpfapi.py | gregzakh/sketches | acbc573b9e67228dac21a94b597d89e2ea5cd755 | [
"MIT"
] | null | null | null | py/winnt/ntpfapi.py | gregzakh/sketches | acbc573b9e67228dac21a94b597d89e2ea5cd755 | [
"MIT"
] | 4 | 2020-02-11T01:00:11.000Z | 2022-01-07T14:24:38.000Z | import wintypes as nt
from enum import IntEnum
# ====================================================================================
PREFETCHER_INFORMATION_CLASS = IntEnum('PREFETCHER_INFORMATION_CLASS', (
'PrefetcherRetrieveTrace',
'PrefetcherSystemParameters',
'PrefetcherBootPhase',
'PrefetcherRetrieveBootLoaderTrace',
'PrefetcherBootControl',
), start=1)
class PREFETCHER_INFORMATION(nt.CStruct):
_fields_ = (
('Version', nt.ULONG),
('Magic', nt.ULONG), # kuhC
('_PrefetcherInformationClass', nt.ULONG),
('PrefetcherInformation', nt.PVOID),
('PrefetcherInformationLengh', nt.ULONG),
)
@property
def PrefetcherInformationClass(self):
return PREFETCHER_INFORMATION_CLASS(
self._PrefetcherInformationClass
).name if self._PrefetcherInformationClass else None
SUPERFETCH_INFORMATION_CLASS = IntEnum('SUPERFETCH_INFORMATION_CLASS', (
'SuperfetchRetrieveTrace',
'SuperfetchSystemParameters',
'SuperfetchLogEvent',
'SuperfetchGenerateTrace',
'SuperfetchPrefetch',
'SuperfetchPfnQuery',
'SuperfetchPfnSetPriority',
'SuperfetchPrivSourceQuery',
'SuperfetchSequenceNumberQuery',
'SuperfetchScenarioPhase',
'SuperfetchWorkerPriority',
'SuperfetchScenarioQuery',
'SuperfetchScenarioPrefetch',
'SuperfetchRobustnessControl',
'SuperfetchTimeControl',
'SuperfetchMemoryListQuery',
'SuperfetchMemoryRangesQuery',
'SuperfetchTracingControl',
'SuperfetchTrimWhileAgingControl',
'SuperfetchRepurposedByPrefetch',
'SuperfetchInformationMax',
), start=1)
class SUPERFETCH_INFORMATION(nt.CStruct):
_fields_ = (
('Version', nt.ULONG),
('Magic', nt.ULONG), # kuhC
('_SuperfetchInformationClass', nt.ULONG),
('SuperfetchInformation', nt.PVOID),
('SuperfetchInformationLength', nt.ULONG),
)
@property
def InfoClass(self):
return SUPERFETCH_INFORMATION_CLASS(
self._SuperfetchInformationClass
).name if self._SuperfetchInformationClass else None
| 34.265625 | 87 | 0.652987 | import wintypes as nt
from enum import IntEnum
PREFETCHER_INFORMATION_CLASS = IntEnum('PREFETCHER_INFORMATION_CLASS', (
'PrefetcherRetrieveTrace',
'PrefetcherSystemParameters',
'PrefetcherBootPhase',
'PrefetcherRetrieveBootLoaderTrace',
'PrefetcherBootControl',
), start=1)
class PREFETCHER_INFORMATION(nt.CStruct):
_fields_ = (
('Version', nt.ULONG),
('Magic', nt.ULONG),
('_PrefetcherInformationClass', nt.ULONG),
('PrefetcherInformation', nt.PVOID),
('PrefetcherInformationLengh', nt.ULONG),
)
@property
def PrefetcherInformationClass(self):
return PREFETCHER_INFORMATION_CLASS(
self._PrefetcherInformationClass
).name if self._PrefetcherInformationClass else None
SUPERFETCH_INFORMATION_CLASS = IntEnum('SUPERFETCH_INFORMATION_CLASS', (
'SuperfetchRetrieveTrace',
'SuperfetchSystemParameters',
'SuperfetchLogEvent',
'SuperfetchGenerateTrace',
'SuperfetchPrefetch',
'SuperfetchPfnQuery',
'SuperfetchPfnSetPriority',
'SuperfetchPrivSourceQuery',
'SuperfetchSequenceNumberQuery',
'SuperfetchScenarioPhase',
'SuperfetchWorkerPriority',
'SuperfetchScenarioQuery',
'SuperfetchScenarioPrefetch',
'SuperfetchRobustnessControl',
'SuperfetchTimeControl',
'SuperfetchMemoryListQuery',
'SuperfetchMemoryRangesQuery',
'SuperfetchTracingControl',
'SuperfetchTrimWhileAgingControl',
'SuperfetchRepurposedByPrefetch',
'SuperfetchInformationMax',
), start=1)
class SUPERFETCH_INFORMATION(nt.CStruct):
_fields_ = (
('Version', nt.ULONG),
('Magic', nt.ULONG),
('_SuperfetchInformationClass', nt.ULONG),
('SuperfetchInformation', nt.PVOID),
('SuperfetchInformationLength', nt.ULONG),
)
@property
def InfoClass(self):
return SUPERFETCH_INFORMATION_CLASS(
self._SuperfetchInformationClass
).name if self._SuperfetchInformationClass else None
| true | true |
1c349e0ca0b1a13e7b3972f3338a8e065eead2a7 | 4,430 | py | Python | scripts/linreg_2d_bayes_demo.py | GSxiongkun/pyprobml | 71b2ce90632b80206760f93ab2a1926ce6c8c490 | [
"MIT"
] | 1 | 2020-03-01T09:01:37.000Z | 2020-03-01T09:01:37.000Z | scripts/linreg_2d_bayes_demo.py | etarakci-hvl/pyprobml | a3fe8086844ae0885e3f21d30be5f2e6448cdeba | [
"MIT"
] | null | null | null | scripts/linreg_2d_bayes_demo.py | etarakci-hvl/pyprobml | a3fe8086844ae0885e3f21d30be5f2e6448cdeba | [
"MIT"
] | null | null | null | #Bayesian inference for simple linear regression with known noise variance
#The goal is to reproduce fig 3.7 from Bishop's book.
#We fit the linear model f(x,w) = w0 + w1*x and plot the posterior over w.
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from scipy.stats import uniform, norm, multivariate_normal
np.random.seed(0)
#Number of samples to draw from posterior distribution of parameters.
NSamples = 10
#Each of these corresponds to a row in the graphic and an amount of data the posterior will reflect.
#First one must be zero, for the prior.
DataIndices = [0,1,2,100]
#True regression parameters that we wish to recover. Do not set these outside the range of [-1,1]
a0 = -0.3
a1 = 0.5
NPoints = 100 #Number of (x,y) training points
noiseSD = 0.2 #True noise standard deviation
priorPrecision = 2.0 #Fix the prior precision, alpha. We will use a zero-mean isotropic Gaussian.
likelihoodSD = noiseSD # Assume the likelihood precision, beta, is known.
likelihoodPrecision = 1.0/(likelihoodSD**2)
#Because of how axises are set up, x and y values should be in the same range as the coefficients.
x = 2*uniform().rvs(NPoints) - 1
y = a0 + a1*x + norm(0, noiseSD).rvs(NPoints)
def MeanCovPost(x, y):
#Given data vectors x and y, this returns the posterior mean and covariance.
X = np.array([[1,x1] for x1 in x])
Precision = np.diag([priorPrecision]*2) + likelihoodPrecision*X.T.dot(X)
Cov = np.linalg.inv(Precision)
Mean = likelihoodPrecision*Cov.dot(X.T.dot(y))
return {'Mean':Mean,'Cov':Cov}
def GaussPdfMaker(mean,cov):
#For a given (mean, cov) pair, this returns a vectorized pdf function.
def out(w1,w2):
return multivariate_normal.pdf([w1,w2],mean=mean,cov=cov)
return np.vectorize(out)
def LikeFMaker(x0,y0):
#For a given (x,y) pair, this returns a vectorized likelhood function.
def out(w1,w2):
err = y0 - (w1 + w2*x0)
return norm.pdf(err,loc=0,scale=likelihoodSD)
return np.vectorize(out)
#Grid space for which values will be determined, which is shared between the coefficient space and data space.
grid = np.linspace(-1,1,50)
Xg = np.array([[1,g] for g in grid])
G1, G2 = np.meshgrid(grid,grid)
#If we have many samples of lines, we make them a bit transparent.
alph = 5.0/NSamples if NSamples>50 else 1.0
#A function to make some common adjustments to our subplots.
def adjustgraph(whitemark):
if whitemark:
plt.ylabel(r'$w_1$')
plt.xlabel(r'$w_0$')
plt.scatter(a0,a1,marker='+',color='white',s=100)
else:
plt.ylabel('y')
plt.xlabel('x')
plt.ylim([-1,1])
plt.xlim([-1,1])
plt.xticks([-1,0,1])
plt.yticks([-1,0,1])
return None
figcounter = 1
fig = plt.figure(figsize=(10,10))
#Top left plot only has a title.
ax = fig.add_subplot(len(DataIndices),3,figcounter)
ax.set_title('likelihood')
plt.axis('off')
#This builds the graph one row at a time.
for di in DataIndices:
if di == 0:
postM = [0,0]
postCov = np.diag([1.0/priorPrecision]*2)
else:
Post = MeanCovPost(x[:di],y[:di])
postM = Post['Mean']
postCov = Post['Cov']
#Left graph
figcounter += 1
fig.add_subplot(len(DataIndices),3,figcounter)
likfunc = LikeFMaker(x[di-1],y[di-1])
plt.contourf(G1, G2, likfunc(G1,G2), 100)
adjustgraph(True)
#Middle graph
postfunc = GaussPdfMaker(postM,postCov)
figcounter += 1
ax = fig.add_subplot(len(DataIndices),3,figcounter)
plt.contourf(G1, G2, postfunc(G1,G2), 100)
adjustgraph(True)
#Set title if this is the top middle graph
if figcounter == 2:
ax.set_title('prior/posterior')
#Right graph
Samples = multivariate_normal(postM,postCov).rvs(NSamples)
Lines = Xg.dot(Samples.T)
figcounter += 1
ax = fig.add_subplot(len(DataIndices),3,figcounter)
if di != 0:
plt.scatter(x[:di],y[:di], s=140, facecolors='none', edgecolors='b')
for j in range(Lines.shape[1]):
plt.plot(grid,Lines[:,j],linewidth=2,color='r',alpha=alph)
#Set title if this is the top right graph
if figcounter == 3:
ax.set_title('data space')
adjustgraph(False)
fig.tight_layout()
plt.show()
save_fig('bayesLinRegPlot2dB.pdf')
| 33.308271 | 110 | 0.669074 |
#We fit the linear model f(x,w) = w0 + w1*x and plot the posterior over w.
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from scipy.stats import uniform, norm, multivariate_normal
np.random.seed(0)
#Number of samples to draw from posterior distribution of parameters.
NSamples = 10
#Each of these corresponds to a row in the graphic and an amount of data the posterior will reflect.
#First one must be zero, for the prior.
DataIndices = [0,1,2,100]
#True regression parameters that we wish to recover. Do not set these outside the range of [-1,1]
a0 = -0.3
a1 = 0.5
NPoints = 100 #Number of (x,y) training points
noiseSD = 0.2 #True noise standard deviation
priorPrecision = 2.0 #Fix the prior precision, alpha. We will use a zero-mean isotropic Gaussian.
likelihoodSD = noiseSD # Assume the likelihood precision, beta, is known.
likelihoodPrecision = 1.0/(likelihoodSD**2)
#Because of how axises are set up, x and y values should be in the same range as the coefficients.
x = 2*uniform().rvs(NPoints) - 1
y = a0 + a1*x + norm(0, noiseSD).rvs(NPoints)
def MeanCovPost(x, y):
#Given data vectors x and y, this returns the posterior mean and covariance.
X = np.array([[1,x1] for x1 in x])
Precision = np.diag([priorPrecision]*2) + likelihoodPrecision*X.T.dot(X)
Cov = np.linalg.inv(Precision)
Mean = likelihoodPrecision*Cov.dot(X.T.dot(y))
return {'Mean':Mean,'Cov':Cov}
def GaussPdfMaker(mean,cov):
#For a given (mean, cov) pair, this returns a vectorized pdf function.
def out(w1,w2):
return multivariate_normal.pdf([w1,w2],mean=mean,cov=cov)
return np.vectorize(out)
def LikeFMaker(x0,y0):
#For a given (x,y) pair, this returns a vectorized likelhood function.
def out(w1,w2):
err = y0 - (w1 + w2*x0)
return norm.pdf(err,loc=0,scale=likelihoodSD)
return np.vectorize(out)
#Grid space for which values will be determined, which is shared between the coefficient space and data space.
grid = np.linspace(-1,1,50)
Xg = np.array([[1,g] for g in grid])
G1, G2 = np.meshgrid(grid,grid)
#If we have many samples of lines, we make them a bit transparent.
alph = 5.0/NSamples if NSamples>50 else 1.0
#A function to make some common adjustments to our subplots.
def adjustgraph(whitemark):
if whitemark:
plt.ylabel(r'$w_1$')
plt.xlabel(r'$w_0$')
plt.scatter(a0,a1,marker='+',color='white',s=100)
else:
plt.ylabel('y')
plt.xlabel('x')
plt.ylim([-1,1])
plt.xlim([-1,1])
plt.xticks([-1,0,1])
plt.yticks([-1,0,1])
return None
figcounter = 1
fig = plt.figure(figsize=(10,10))
#Top left plot only has a title.
ax = fig.add_subplot(len(DataIndices),3,figcounter)
ax.set_title('likelihood')
plt.axis('off')
#This builds the graph one row at a time.
for di in DataIndices:
if di == 0:
postM = [0,0]
postCov = np.diag([1.0/priorPrecision]*2)
else:
Post = MeanCovPost(x[:di],y[:di])
postM = Post['Mean']
postCov = Post['Cov']
#Left graph
figcounter += 1
fig.add_subplot(len(DataIndices),3,figcounter)
likfunc = LikeFMaker(x[di-1],y[di-1])
plt.contourf(G1, G2, likfunc(G1,G2), 100)
adjustgraph(True)
#Middle graph
postfunc = GaussPdfMaker(postM,postCov)
figcounter += 1
ax = fig.add_subplot(len(DataIndices),3,figcounter)
plt.contourf(G1, G2, postfunc(G1,G2), 100)
adjustgraph(True)
#Set title if this is the top middle graph
if figcounter == 2:
ax.set_title('prior/posterior')
#Right graph
Samples = multivariate_normal(postM,postCov).rvs(NSamples)
Lines = Xg.dot(Samples.T)
figcounter += 1
ax = fig.add_subplot(len(DataIndices),3,figcounter)
if di != 0:
plt.scatter(x[:di],y[:di], s=140, facecolors='none', edgecolors='b')
for j in range(Lines.shape[1]):
plt.plot(grid,Lines[:,j],linewidth=2,color='r',alpha=alph)
#Set title if this is the top right graph
if figcounter == 3:
ax.set_title('data space')
adjustgraph(False)
fig.tight_layout()
plt.show()
save_fig('bayesLinRegPlot2dB.pdf')
| true | true |
1c349ebd61d9069a9e48f5a9811d7b1aa8425dc1 | 2,095 | py | Python | api/app/resources/bookings/booking/booking_recurring_delete.py | sumesh-aot/queue-management | d8de45c2d94c1a557c8f8d207d73a067709d5abb | [
"Apache-2.0"
] | null | null | null | api/app/resources/bookings/booking/booking_recurring_delete.py | sumesh-aot/queue-management | d8de45c2d94c1a557c8f8d207d73a067709d5abb | [
"Apache-2.0"
] | null | null | null | api/app/resources/bookings/booking/booking_recurring_delete.py | sumesh-aot/queue-management | d8de45c2d94c1a557c8f8d207d73a067709d5abb | [
"Apache-2.0"
] | null | null | null | '''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from flask import abort, g, request
from flask_restx import Resource
from app.models.bookings import Booking
from app.schemas.bookings import BookingSchema
from app.models.theq import CSR
from qsystem import api, db, oidc
from datetime import datetime, timedelta, date
import pytz
from app.utilities.auth_util import Role, has_any_role
@api.route("/bookings/recurring/<string:id>", methods=["DELETE"])
class BookingRecurringDelete(Resource):
booking_schema = BookingSchema
timezone = pytz.timezone("US/Pacific")
@oidc.accept_token(require_token=True)
@has_any_role(roles=[Role.internal_user.value])
def delete(self, id):
today = datetime.today()
string_today = today.strftime('%Y-%m-%d')
print("==> In the python DELETE /bookings/recurring/<id> endpoint")
csr = CSR.find_by_username(g.oidc_token_info['username'])
bookings = Booking.query.filter_by(recurring_uuid=id)\
.filter(db.func.date(Booking.start_time) >= string_today)\
.all()
for booking in bookings:
if booking.office_id != csr.office_id and csr.liaison_designate != 1:
abort(404)
if booking.start_time.year == today.year and booking.start_time.month == today.month \
and booking.start_time.day == today.day and booking.start_time.hour <= 5:
continue
db.session.delete(booking)
db.session.commit()
return {},204 | 36.12069 | 98 | 0.692601 |
from flask import abort, g, request
from flask_restx import Resource
from app.models.bookings import Booking
from app.schemas.bookings import BookingSchema
from app.models.theq import CSR
from qsystem import api, db, oidc
from datetime import datetime, timedelta, date
import pytz
from app.utilities.auth_util import Role, has_any_role
@api.route("/bookings/recurring/<string:id>", methods=["DELETE"])
class BookingRecurringDelete(Resource):
booking_schema = BookingSchema
timezone = pytz.timezone("US/Pacific")
@oidc.accept_token(require_token=True)
@has_any_role(roles=[Role.internal_user.value])
def delete(self, id):
today = datetime.today()
string_today = today.strftime('%Y-%m-%d')
print("==> In the python DELETE /bookings/recurring/<id> endpoint")
csr = CSR.find_by_username(g.oidc_token_info['username'])
bookings = Booking.query.filter_by(recurring_uuid=id)\
.filter(db.func.date(Booking.start_time) >= string_today)\
.all()
for booking in bookings:
if booking.office_id != csr.office_id and csr.liaison_designate != 1:
abort(404)
if booking.start_time.year == today.year and booking.start_time.month == today.month \
and booking.start_time.day == today.day and booking.start_time.hour <= 5:
continue
db.session.delete(booking)
db.session.commit()
return {},204 | true | true |
1c349f75e2a32387801b8a5e9f6d1335ce053c3a | 1,139 | py | Python | docs/examples/tar_and_transfer.py | rohithj494/gladier | 00fc1cfd0a05f6f18b94b8afd9fef2503d2d3189 | [
"Apache-2.0"
] | null | null | null | docs/examples/tar_and_transfer.py | rohithj494/gladier | 00fc1cfd0a05f6f18b94b8afd9fef2503d2d3189 | [
"Apache-2.0"
] | null | null | null | docs/examples/tar_and_transfer.py | rohithj494/gladier | 00fc1cfd0a05f6f18b94b8afd9fef2503d2d3189 | [
"Apache-2.0"
] | null | null | null | from gladier import GladierBaseClient, generate_flow_definition
from pprint import pprint
@generate_flow_definition
class TarAndTransfer(GladierBaseClient):
gladier_tools = [
'gladier_tools.posix.Tar',
'gladier_tools.globus.Transfer',
]
if __name__ == '__main__':
flow_input = {
'input': {
'tar_input': '',
# Set this to your own funcx endpoint where you want to tar files
'funcx_endpoint_compute': '',
# Set this to the globus endpoint where your tarred archive has been created
'transfer_source_endpoint_id': '',
# By default, this will transfer the tar file to Globus Tutorial Endpoint 1
'transfer_destination_endpoint_id': 'ddb59aef-6d04-11e5-ba46-22000b92c6ec',
'transfer_source_path': '',
'transfer_destination_path': '',
'transfer_recursive': False,
}
}
tat = TarAndTransfer()
pprint(tat.flow_definition)
flow = tat.run_flow(flow_input=flow_input)
action_id = flow['action_id']
tat.progress(action_id)
pprint(tat.get_status(action_id))
| 33.5 | 88 | 0.652327 | from gladier import GladierBaseClient, generate_flow_definition
from pprint import pprint
@generate_flow_definition
class TarAndTransfer(GladierBaseClient):
gladier_tools = [
'gladier_tools.posix.Tar',
'gladier_tools.globus.Transfer',
]
if __name__ == '__main__':
flow_input = {
'input': {
'tar_input': '',
'funcx_endpoint_compute': '',
'transfer_source_endpoint_id': '',
'transfer_destination_endpoint_id': 'ddb59aef-6d04-11e5-ba46-22000b92c6ec',
'transfer_source_path': '',
'transfer_destination_path': '',
'transfer_recursive': False,
}
}
tat = TarAndTransfer()
pprint(tat.flow_definition)
flow = tat.run_flow(flow_input=flow_input)
action_id = flow['action_id']
tat.progress(action_id)
pprint(tat.get_status(action_id))
| true | true |
1c349fa995d70055f33e2ce41fb93d7724b0fda2 | 6,569 | py | Python | CenterNet/src/lib/datasets/sample/ctdet.py | Kalana304/KORSAL | b7a0c7cf5428f632e99d2ca5c5e10a8288f10cc0 | [
"MIT"
] | null | null | null | CenterNet/src/lib/datasets/sample/ctdet.py | Kalana304/KORSAL | b7a0c7cf5428f632e99d2ca5c5e10a8288f10cc0 | [
"MIT"
] | null | null | null | CenterNet/src/lib/datasets/sample/ctdet.py | Kalana304/KORSAL | b7a0c7cf5428f632e99d2ca5c5e10a8288f10cc0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class CTDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _transform_to_coco(self, bboxs, labels):
anns = []
for t in range(len(labels)):
bbox = bboxs[t, :]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
label = labels[t]
anns.append({'bbox': bbox, 'category_id': label + 1})
return anns
def _scale_bbox(self, bbox, i_h, i_w, h, w):
bbox[0] = float(bbox[0])*i_w/w
bbox[2] = float(bbox[2])*i_w/w
bbox[1] = float(bbox[1])*i_h/h
bbox[3] = float(bbox[3])*i_h/h
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
annot_info = self.ids[index]
frame_num = annot_info[1]
video_id = annot_info[0]
videoname = self.video_list[video_id]
img_name = os.path.join(self._imgpath, videoname, '{:05d}{}'.format(frame_num, self.extension))
# ann_ids = self.coco.getAnnIds(imgIds=[img_id])
# anns = self.coco.loadAnns(ids=ann_ids)
anns = self._transform_to_coco(annot_info[3], annot_info[2])
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_name)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
# bbox = self._scale_bbox(bbox, input_h, input_w, height, width)
cls_id = int(self.cat_ids[ann['category_id']])
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'index':index, 'wh': wh}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 6), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': index, 'out_height':output_h, 'out_width':output_w}
ret['meta'] = meta
return ret | 39.812121 | 109 | 0.601309 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class CTDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _transform_to_coco(self, bboxs, labels):
anns = []
for t in range(len(labels)):
bbox = bboxs[t, :]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
label = labels[t]
anns.append({'bbox': bbox, 'category_id': label + 1})
return anns
def _scale_bbox(self, bbox, i_h, i_w, h, w):
bbox[0] = float(bbox[0])*i_w/w
bbox[2] = float(bbox[2])*i_w/w
bbox[1] = float(bbox[1])*i_h/h
bbox[3] = float(bbox[3])*i_h/h
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
annot_info = self.ids[index]
frame_num = annot_info[1]
video_id = annot_info[0]
videoname = self.video_list[video_id]
img_name = os.path.join(self._imgpath, videoname, '{:05d}{}'.format(frame_num, self.extension))
anns = self._transform_to_coco(annot_info[3], annot_info[2])
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_name)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'index':index, 'wh': wh}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 6), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': index, 'out_height':output_h, 'out_width':output_w}
ret['meta'] = meta
return ret | true | true |
1c349fb946612a0b2377a7062161d9e4e668d838 | 11,090 | py | Python | src/pymordemos/parabolic_mor.py | pdiercks/pymor | e94f05714d666a929113543c49e88f8f494d64e1 | [
"Unlicense"
] | null | null | null | src/pymordemos/parabolic_mor.py | pdiercks/pymor | e94f05714d666a929113543c49e88f8f494d64e1 | [
"Unlicense"
] | 4 | 2022-03-17T10:07:38.000Z | 2022-03-30T12:41:06.000Z | src/pymordemos/parabolic_mor.py | pdiercks/pymor | e94f05714d666a929113543c49e88f8f494d64e1 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""Reduced basis approximation of the heat equation.
Usage:
parabolic_mor.py BACKEND ALG SNAPSHOTS RBSIZE TEST
Arguments:
BACKEND Discretization toolkit to use (pymor, fenics).
ALG The model reduction algorithm to use
(greedy, adaptive_greedy, pod).
SNAPSHOTS greedy/pod: number of training set parameters
adaptive_greedy: size of validation set.
RBSIZE Size of the reduced basis.
TEST Number of test parameters for reduction error estimation.
"""
from functools import partial # fix parameters of given function
import numpy as np
from pymor.basic import * # most common pyMOR functions and classes
from pymor.algorithms.timestepping import ImplicitEulerTimeStepper
# parameters for high-dimensional models
GRID_INTERVALS = 100
FENICS_ORDER = 2
NT = 100
DT = 1. / NT
####################################################################################################
# High-dimensional models #
####################################################################################################
def discretize_pymor():
# setup analytical problem
problem = InstationaryProblem(
StationaryProblem(
domain=RectDomain(top='dirichlet', bottom='neumann'),
diffusion=LincombFunction(
[ConstantFunction(1., dim_domain=2),
ExpressionFunction('(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * (x[..., 1] < 0.7) * 1.',
dim_domain=2),
ExpressionFunction('(x[..., 0] > 0.35) * (x[..., 0] < 0.40) * (x[..., 1] > 0.3) * 1. + '
'(x[..., 0] > 0.60) * (x[..., 0] < 0.65) * (x[..., 1] > 0.3) * 1.',
dim_domain=2)],
[1.,
100. - 1.,
ExpressionParameterFunctional('top - 1.', {'top': 0})]
),
rhs=ConstantFunction(value=100., dim_domain=2) * ExpressionParameterFunctional('sin(10*pi*_t)', {'_t': ()}),
dirichlet_data=ConstantFunction(value=0., dim_domain=2),
neumann_data=ExpressionFunction('(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * -1000.',
dim_domain=2),
),
T=1.,
initial_data=ExpressionFunction('(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * (x[..., 1] < 0.7) * 10.',
dim_domain=2),
parameter_space=CubicParameterSpace({'top': 0}, minimum=1, maximum=100.)
)
# discretize using continuous finite elements
fom, _ = discretize_instationary_cg(analytical_problem=problem, diameter=1./GRID_INTERVALS, nt=NT)
fom.enable_caching('disk')
return fom
def discretize_fenics():
from pymor.tools import mpi
if mpi.parallel:
from pymor.models.mpi import mpi_wrap_model
return mpi_wrap_model(_discretize_fenics, use_with=True, pickle_local_spaces=False)
else:
return _discretize_fenics()
def _discretize_fenics():
# assemble system matrices - FEniCS code
########################################
import dolfin as df
# discrete function space
mesh = df.UnitSquareMesh(GRID_INTERVALS, GRID_INTERVALS, 'crossed')
V = df.FunctionSpace(mesh, 'Lagrange', FENICS_ORDER)
u = df.TrialFunction(V)
v = df.TestFunction(V)
# data functions
bottom_diffusion = df.Expression('(x[0] > 0.45) * (x[0] < 0.55) * (x[1] < 0.7) * 1.',
element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
top_diffusion = df.Expression('(x[0] > 0.35) * (x[0] < 0.40) * (x[1] > 0.3) * 1. +'
'(x[0] > 0.60) * (x[0] < 0.65) * (x[1] > 0.3) * 1.',
element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
initial_data = df.Expression('(x[0] > 0.45) * (x[0] < 0.55) * (x[1] < 0.7) * 10.',
element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
neumann_data = df.Expression('(x[0] > 0.45) * (x[0] < 0.55) * 1000.',
element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
# assemble matrices and vectors
l2_mat = df.assemble(df.inner(u, v) * df.dx)
l2_0_mat = l2_mat.copy()
h1_mat = df.assemble(df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)
h1_0_mat = h1_mat.copy()
mat0 = h1_mat.copy()
mat0.zero()
bottom_mat = df.assemble(bottom_diffusion * df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)
top_mat = df.assemble(top_diffusion * df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)
u0 = df.project(initial_data, V).vector()
f = df.assemble(neumann_data * v * df.ds)
# boundary treatment
def dirichlet_boundary(x, on_boundary):
tol = 1e-14
return on_boundary and (abs(x[0]) < tol or abs(x[0] - 1) < tol or abs(x[1] - 1) < tol)
bc = df.DirichletBC(V, df.Constant(0.), dirichlet_boundary)
bc.apply(l2_0_mat)
bc.apply(h1_0_mat)
bc.apply(mat0)
bc.zero(bottom_mat)
bc.zero(top_mat)
bc.apply(f)
bc.apply(u0)
# wrap everything as a pyMOR model
##################################
from pymor.bindings.fenics import FenicsVectorSpace, FenicsMatrixOperator, FenicsVisualizer
fom = InstationaryModel(
T=1.,
initial_data=FenicsVectorSpace(V).make_array([u0]),
operator=LincombOperator([FenicsMatrixOperator(mat0, V, V),
FenicsMatrixOperator(h1_0_mat, V, V),
FenicsMatrixOperator(bottom_mat, V, V),
FenicsMatrixOperator(top_mat, V, V)],
[1.,
1.,
100. - 1.,
ExpressionParameterFunctional('top - 1.', {'top': 0})]),
rhs=VectorOperator(FenicsVectorSpace(V).make_array([f])),
mass=FenicsMatrixOperator(l2_0_mat, V, V, name='l2'),
products={'l2': FenicsMatrixOperator(l2_mat, V, V, name='l2'),
'l2_0': FenicsMatrixOperator(l2_0_mat, V, V, name='l2_0'),
'h1': FenicsMatrixOperator(h1_mat, V, V, name='h1'),
'h1_0_semi': FenicsMatrixOperator(h1_0_mat, V, V, name='h1_0_semi')},
time_stepper=ImplicitEulerTimeStepper(nt=NT),
parameter_space=CubicParameterSpace({'top': 0}, minimum=1, maximum=100.),
visualizer=FenicsVisualizer(FenicsVectorSpace(V))
)
return fom
####################################################################################################
# Reduction algorithms #
####################################################################################################
def reduce_greedy(fom, reductor, snapshots, basis_size):
training_set = fom.parameter_space.sample_uniformly(snapshots)
pool = new_parallel_pool()
greedy_data = rb_greedy(fom, reductor, training_set, max_extensions=basis_size, pool=pool)
return greedy_data['rom']
def reduce_adaptive_greedy(fom, reductor, validation_mus, basis_size):
pool = new_parallel_pool()
greedy_data = rb_adaptive_greedy(fom, reductor, validation_mus=validation_mus,
max_extensions=basis_size, pool=pool)
return greedy_data['rom']
def reduce_pod(fom, reductor, snapshots, basis_size):
training_set = fom.parameter_space.sample_uniformly(snapshots)
snapshots = fom.operator.source.empty()
for mu in training_set:
snapshots.append(fom.solve(mu))
basis, singular_values = pod(snapshots, modes=basis_size, product=fom.h1_0_semi_product)
reductor.extend_basis(basis, method='trivial')
rom = reductor.reduce()
return rom
####################################################################################################
# Main script #
####################################################################################################
def main(BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST):
# discretize
############
if BACKEND == 'pymor':
fom = discretize_pymor()
elif BACKEND == 'fenics':
fom = discretize_fenics()
else:
raise NotImplementedError
# select reduction algorithm with error estimator
#################################################
coercivity_estimator = ExpressionParameterFunctional('1.', fom.parameter_type)
reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product, coercivity_estimator=coercivity_estimator)
# generate reduced model
########################
if ALG == 'greedy':
rom = reduce_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
elif ALG == 'adaptive_greedy':
rom = reduce_adaptive_greedy(fom, reductor, SNAPSHOTS, RBSIZE)
elif ALG == 'pod':
rom = reduce_pod(fom, reductor, SNAPSHOTS, RBSIZE)
else:
raise NotImplementedError
# evaluate the reduction error
##############################
results = reduction_error_analysis(
rom, fom=fom, reductor=reductor, estimator=True,
error_norms=[lambda U: DT * np.sqrt(np.sum(fom.h1_0_semi_norm(U)[1:]**2))],
error_norm_names=['l^2-h^1'],
condition=False, test_mus=TEST, random_seed=999, plot=True
)
# show results
##############
print(results['summary'])
import matplotlib.pyplot as plt
plt.show(results['figure'])
# write results to disk
#######################
from pymor.core.pickle import dump
dump(rom, open('reduced_model.out', 'wb'))
results.pop('figure') # matplotlib figures cannot be serialized
dump(results, open('results.out', 'wb'))
# visualize reduction error for worst-approximated mu
#####################################################
mumax = results['max_error_mus'][0, -1]
U = fom.solve(mumax)
U_RB = reductor.reconstruct(rom.solve(mumax))
if BACKEND == 'fenics': # right now the fenics visualizer does not support time trajectories
U = U[len(U) - 1].copy()
U_RB = U_RB[len(U_RB) - 1].copy()
fom.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
separate_colorbars=True)
return results
if __name__ == '__main__':
import sys
if len(sys.argv) != 6:
print(__doc__)
sys.exit(1)
BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST = sys.argv[1:]
BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST = BACKEND.lower(), ALG.lower(), int(SNAPSHOTS), int(RBSIZE), int(TEST)
main(BACKEND, ALG, SNAPSHOTS, RBSIZE, TEST)
| 36.966667 | 120 | 0.543823 |
from functools import partial
import numpy as np
from pymor.basic import *
from pymor.algorithms.timestepping import ImplicitEulerTimeStepper
GRID_INTERVALS = 100
FENICS_ORDER = 2
NT = 100
DT = 1. / NT
| true | true |
1c34a019bc2a84af2cf1508a4ea7650b0bff1654 | 1,918 | py | Python | python/leetcode/92.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | python/leetcode/92.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | python/leetcode/92.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
s = ""
current = self
s = s + str(current.val)
while current.next:
current = current.next
s = s + " -> "
s = s + str(current.val)
return s
def buildList(list: List[int]) -> ListNode:
if len(list) == 0:
return None
head = ListNode(0)
cur = head
for i in list:
cur.next = ListNode(i)
cur = cur.next
return head.next
class Solution:
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
new_head = ListNode(0)
new_head.next = head
cur_pos = 0
cur = new_head
stack = []
while cur.next or len(stack) > 0:
cur_pos = cur_pos + 1
if cur_pos > n:
tmp = cur.next
while len(stack) > 0:
last = stack.pop(-1)
cur.next = last
cur = cur.next
cur.next = tmp
break
elif cur_pos >= m:
x = cur.next
cur.next = cur.next.next
x.next = None
stack.append(x)
else:
cur = cur.next
return new_head.next
if __name__ == '__main__':
head = buildList([1, 2, 3, 4, 5, 6, 7, 8])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 2, 6)
print(l)
head = buildList([3, 5])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 1, 2)
print(l)
head = buildList([])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 0, 0)
print(l)
head = buildList([1])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 1, 1)
print(l) | 23.679012 | 73 | 0.48488 |
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
s = ""
current = self
s = s + str(current.val)
while current.next:
current = current.next
s = s + " -> "
s = s + str(current.val)
return s
def buildList(list: List[int]) -> ListNode:
if len(list) == 0:
return None
head = ListNode(0)
cur = head
for i in list:
cur.next = ListNode(i)
cur = cur.next
return head.next
class Solution:
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
new_head = ListNode(0)
new_head.next = head
cur_pos = 0
cur = new_head
stack = []
while cur.next or len(stack) > 0:
cur_pos = cur_pos + 1
if cur_pos > n:
tmp = cur.next
while len(stack) > 0:
last = stack.pop(-1)
cur.next = last
cur = cur.next
cur.next = tmp
break
elif cur_pos >= m:
x = cur.next
cur.next = cur.next.next
x.next = None
stack.append(x)
else:
cur = cur.next
return new_head.next
if __name__ == '__main__':
head = buildList([1, 2, 3, 4, 5, 6, 7, 8])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 2, 6)
print(l)
head = buildList([3, 5])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 1, 2)
print(l)
head = buildList([])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 0, 0)
print(l)
head = buildList([1])
print(head)
sol = Solution()
l = sol.reverseBetween(head, 1, 1)
print(l) | true | true |
1c34a0cb90a2d100b1f2c378ba1603f1e5f1e482 | 4,443 | py | Python | ethereumetl/mappers/transaction_mapper.py | spicehq/ethereum-etl | ab76507fa32e9c89620b158b5448696daa87c6f4 | [
"MIT"
] | null | null | null | ethereumetl/mappers/transaction_mapper.py | spicehq/ethereum-etl | ab76507fa32e9c89620b158b5448696daa87c6f4 | [
"MIT"
] | 1 | 2022-03-29T07:21:53.000Z | 2022-03-29T07:21:53.000Z | ethereumetl/mappers/transaction_mapper.py | spicehq/ethereum-etl | ab76507fa32e9c89620b158b5448696daa87c6f4 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ethereumetl.domain.transaction import EthTransaction
from ethereumetl.utils import hex_to_dec, to_normalized_address
class EthTransactionMapper:
@staticmethod
def json_dict_to_transaction(json_dict, **kwargs):
transaction = EthTransaction()
transaction.hash = json_dict.get('hash')
transaction.nonce = hex_to_dec(json_dict.get('nonce'))
transaction.transaction_index = hex_to_dec(json_dict.get('transactionIndex'))
transaction.from_address = to_normalized_address(json_dict.get('from'))
transaction.to_address = to_normalized_address(json_dict.get('to'))
transaction.value = hex_to_dec(json_dict.get('value'))
transaction.gas = hex_to_dec(json_dict.get('gas'))
transaction.gas_price = hex_to_dec(json_dict.get('gasPrice'))
transaction.input = json_dict.get('input')
transaction.block_timestamp = kwargs.get('block_timestamp')
transaction.block_number = hex_to_dec(json_dict.get('blockNumber'))
transaction.block_hash = json_dict.get('blockHash')
transaction.max_fee_per_gas = hex_to_dec(json_dict.get('maxFeePerGas'))
transaction.max_priority_fee_per_gas = hex_to_dec(json_dict.get('maxPriorityFeePerGas'))
transaction.transaction_type = hex_to_dec(json_dict.get('type'))
if 'receipt' in json_dict:
receipt_dict = json_dict.get('receipt')
transaction.receipt_cumulative_gas_used = hex_to_dec(receipt_dict.get('cumulativeGasUsed'))
transaction.receipt_gas_used = hex_to_dec(receipt_dict.get('gasUsed'))
transaction.receipt_contract_address = to_normalized_address(receipt_dict.get('contractAddress'))
transaction.receipt_root = receipt_dict.get('root')
transaction.receipt_status = hex_to_dec(receipt_dict.get('status'))
transaction.receipt_effective_gas_price = hex_to_dec(receipt_dict.get('effectiveGasPrice'))
return transaction
@staticmethod
def transaction_to_dict(transaction: EthTransaction):
return {
'type': 'transaction',
'hash': transaction.hash,
'nonce': transaction.nonce,
'transaction_index': transaction.transaction_index,
'from_address': transaction.from_address,
'to_address': transaction.to_address,
'value': transaction.value,
'gas': transaction.gas,
'gas_price': transaction.gas_price,
'input': transaction.input,
'receipt_cumulative_gas_used': transaction.receipt_cumulative_gas_used,
'receipt_gas_used': transaction.receipt_gas_used,
'receipt_contract_address': transaction.receipt_contract_address,
'receipt_root': transaction.receipt_root,
'receipt_status': transaction.receipt_status,
'receipt_effective_gas_price': transaction.receipt_effective_gas_price,
'block_timestamp': transaction.block_timestamp,
'block_number': transaction.block_number,
'block_hash': transaction.block_hash,
'max_fee_per_gas': transaction.max_fee_per_gas,
'max_priority_fee_per_gas': transaction.max_priority_fee_per_gas,
'transaction_type': transaction.transaction_type
}
| 53.53012 | 109 | 0.720684 |
from ethereumetl.domain.transaction import EthTransaction
from ethereumetl.utils import hex_to_dec, to_normalized_address
class EthTransactionMapper:
@staticmethod
def json_dict_to_transaction(json_dict, **kwargs):
transaction = EthTransaction()
transaction.hash = json_dict.get('hash')
transaction.nonce = hex_to_dec(json_dict.get('nonce'))
transaction.transaction_index = hex_to_dec(json_dict.get('transactionIndex'))
transaction.from_address = to_normalized_address(json_dict.get('from'))
transaction.to_address = to_normalized_address(json_dict.get('to'))
transaction.value = hex_to_dec(json_dict.get('value'))
transaction.gas = hex_to_dec(json_dict.get('gas'))
transaction.gas_price = hex_to_dec(json_dict.get('gasPrice'))
transaction.input = json_dict.get('input')
transaction.block_timestamp = kwargs.get('block_timestamp')
transaction.block_number = hex_to_dec(json_dict.get('blockNumber'))
transaction.block_hash = json_dict.get('blockHash')
transaction.max_fee_per_gas = hex_to_dec(json_dict.get('maxFeePerGas'))
transaction.max_priority_fee_per_gas = hex_to_dec(json_dict.get('maxPriorityFeePerGas'))
transaction.transaction_type = hex_to_dec(json_dict.get('type'))
if 'receipt' in json_dict:
receipt_dict = json_dict.get('receipt')
transaction.receipt_cumulative_gas_used = hex_to_dec(receipt_dict.get('cumulativeGasUsed'))
transaction.receipt_gas_used = hex_to_dec(receipt_dict.get('gasUsed'))
transaction.receipt_contract_address = to_normalized_address(receipt_dict.get('contractAddress'))
transaction.receipt_root = receipt_dict.get('root')
transaction.receipt_status = hex_to_dec(receipt_dict.get('status'))
transaction.receipt_effective_gas_price = hex_to_dec(receipt_dict.get('effectiveGasPrice'))
return transaction
@staticmethod
def transaction_to_dict(transaction: EthTransaction):
return {
'type': 'transaction',
'hash': transaction.hash,
'nonce': transaction.nonce,
'transaction_index': transaction.transaction_index,
'from_address': transaction.from_address,
'to_address': transaction.to_address,
'value': transaction.value,
'gas': transaction.gas,
'gas_price': transaction.gas_price,
'input': transaction.input,
'receipt_cumulative_gas_used': transaction.receipt_cumulative_gas_used,
'receipt_gas_used': transaction.receipt_gas_used,
'receipt_contract_address': transaction.receipt_contract_address,
'receipt_root': transaction.receipt_root,
'receipt_status': transaction.receipt_status,
'receipt_effective_gas_price': transaction.receipt_effective_gas_price,
'block_timestamp': transaction.block_timestamp,
'block_number': transaction.block_number,
'block_hash': transaction.block_hash,
'max_fee_per_gas': transaction.max_fee_per_gas,
'max_priority_fee_per_gas': transaction.max_priority_fee_per_gas,
'transaction_type': transaction.transaction_type
}
| true | true |
1c34a0f87d311d932eeb2628cecf25e5e20da33e | 1,619 | py | Python | tests/calc_area_of_bbox.py | hitfee01/rtm3d | 9e872c1bf857234d17c8863be6006722d4aab283 | [
"MIT"
] | 2 | 2021-01-22T01:21:24.000Z | 2021-04-14T02:46:29.000Z | tests/calc_area_of_bbox.py | hitfee01/rtm3d | 9e872c1bf857234d17c8863be6006722d4aab283 | [
"MIT"
] | 5 | 2021-01-14T03:18:44.000Z | 2021-05-26T02:24:45.000Z | tests/calc_area_of_bbox.py | hitfee01/rtm3d | 9e872c1bf857234d17c8863be6006722d4aab283 | [
"MIT"
] | 2 | 2021-04-14T02:46:35.000Z | 2021-08-09T01:49:11.000Z | import argparse
from utils import utils
import yaml
from datasets.dataset_reader import DatasetReader
import os
from preprocess.data_preprocess import TestTransform
import random
import cv2
import numpy as np
import tqdm
from models.configs.detault import CONFIGS as config
from datasets.data.kitti.devkit_object import utils as kitti_utils
from fvcore.common.config import CfgNode
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-config', type=str, default='./models/configs/rtm3d_dla34_kitti.yaml')
args = parser.parse_args()
# opt.config = utils.check_file(opt.config) # check file
cfg = config.clone()
if len(args.model_config) > 0:
cfg.merge_from_file(args.model_config)
opt = CfgNode(args.__dict__)
cfg.merge_from_other_cfg(opt)
brg_mean = config.DATASET.MEAN
dr = DatasetReader(config.DATASET.PATH, cfg, TestTransform(cfg.INPUT_SIZE[0], mean=brg_mean))
batch_size = min(1, len(dr))
names = cfg.DATASET.OBJs
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
bboxes_merge = []
for img, target, path, _ in tqdm.tqdm(dr):
bboxes_3d_array = target.numpy()
bboxes = bboxes_3d_array.get_field('bbox')
bboxes_merge.append(bboxes)
bboxes = np.concatenate(bboxes_merge, axis=0)
w = (bboxes[:, 2] - bboxes[:, 0])
h = (bboxes[:, 3] - bboxes[:, 1])
areas = w * h
max_area = np.amax(areas)
min_area = np.amin(areas)
indx = np.argmax(areas)
bbox = bboxes[indx]
print('max area: %s, min area: %s' % (max_area, min_area))
| 34.446809 | 102 | 0.696109 | import argparse
from utils import utils
import yaml
from datasets.dataset_reader import DatasetReader
import os
from preprocess.data_preprocess import TestTransform
import random
import cv2
import numpy as np
import tqdm
from models.configs.detault import CONFIGS as config
from datasets.data.kitti.devkit_object import utils as kitti_utils
from fvcore.common.config import CfgNode
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-config', type=str, default='./models/configs/rtm3d_dla34_kitti.yaml')
args = parser.parse_args()
onfig.clone()
if len(args.model_config) > 0:
cfg.merge_from_file(args.model_config)
opt = CfgNode(args.__dict__)
cfg.merge_from_other_cfg(opt)
brg_mean = config.DATASET.MEAN
dr = DatasetReader(config.DATASET.PATH, cfg, TestTransform(cfg.INPUT_SIZE[0], mean=brg_mean))
batch_size = min(1, len(dr))
names = cfg.DATASET.OBJs
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
bboxes_merge = []
for img, target, path, _ in tqdm.tqdm(dr):
bboxes_3d_array = target.numpy()
bboxes = bboxes_3d_array.get_field('bbox')
bboxes_merge.append(bboxes)
bboxes = np.concatenate(bboxes_merge, axis=0)
w = (bboxes[:, 2] - bboxes[:, 0])
h = (bboxes[:, 3] - bboxes[:, 1])
areas = w * h
max_area = np.amax(areas)
min_area = np.amin(areas)
indx = np.argmax(areas)
bbox = bboxes[indx]
print('max area: %s, min area: %s' % (max_area, min_area))
| true | true |
1c34a32c354ce518d6cd601a25f2b12820f04509 | 9,167 | py | Python | docs/conf.py | ttutko/python_oidc | d090e29278533a367dfd2a91f8ecca0fa53fc5e2 | [
"MIT"
] | null | null | null | docs/conf.py | ttutko/python_oidc | d090e29278533a367dfd2a91f8ecca0fa53fc5e2 | [
"MIT"
] | null | null | null | docs/conf.py | ttutko/python_oidc | d090e29278533a367dfd2a91f8ecca0fa53fc5e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/python_oidc")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python_oidc'
copyright = u'2020, Tom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_width': '300px',
'page_width': '1200px'
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from python_oidc import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python_oidc-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'python_oidc Documentation',
u'Tom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.578755 | 85 | 0.703502 |
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.join(__location__, '../src'))
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/python_oidc")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python_oidc'
copyright = u'2020, Tom'
# The version info for the project you're documenting, acts as replacement for
version = ''
release = ''
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'sidebar_width': '300px',
'page_width': '1200px'
}
try:
from python_oidc import __version__ as version
except ImportError:
pass
else:
release = version
html_static_path = ['_static']
htmlhelp_basename = 'python_oidc-doc'
latex_elements = {
}
latex_documents = [
('index', 'user_guide.tex', u'python_oidc Documentation',
u'Tom', 'manual'),
]
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| true | true |
1c34a3939c7708c6b8b63b555e8b62e83c9f1c76 | 3,179 | py | Python | hacksec_cli/mechanism/upcoming/upcoming.py | hacksec-in/hacksec-cli | 18c1c350c21fcab9c5d1c1d799ffda80ac655251 | [
"MIT"
] | 4 | 2021-08-30T16:02:05.000Z | 2022-01-05T14:49:05.000Z | hacksec_cli/mechanism/upcoming/upcoming.py | ScRiPt1337/hacksec-cli | 18c1c350c21fcab9c5d1c1d799ffda80ac655251 | [
"MIT"
] | 1 | 2021-09-11T07:35:28.000Z | 2021-09-11T16:09:30.000Z | hacksec_cli/mechanism/upcoming/upcoming.py | ScRiPt1337/hacksec-cli | 18c1c350c21fcab9c5d1c1d799ffda80ac655251 | [
"MIT"
] | 2 | 2021-09-03T02:40:49.000Z | 2022-01-05T14:49:08.000Z | from rich.console import Console
from rich.table import Table
import os
console = Console()
class upcoming_machine():
"""upcoming machines class"""
def get_data(self, request):
"""fetch upcoming machines data"""
data = request.get(endpoint="/machines/upcoming")
return data[0], data[1]
def show_upcoming_machines(self, data):
"""Formate upcoming machines data to fit in tables and return table"""
table = Table(show_header=True, header_style="bold green")
table.add_column("machine_id", style="green")
table.add_column("machine_name", style="green")
table.add_column("host", style="green")
table.add_column("hint", style="green")
table.add_column("point", style="green")
table.add_column("tottal_own", style="green")
table.add_column("owned", style="green")
for i in data:
table.add_row(str(i["machine_id"]), str(i["machine_name"]), str(i["host"]),
str(i["hint"]), str(i["point"]), str(i["tottal_own"]), str(i["owned"]))
console.print(table)
console.print(
"You can upload your own weblab using this command below 👇\nExample : upload_lab or ul", style="bold green")
def upload_machine(self, interface, request):
"""Upload machine to upcoming machines"""
console.print("Upload Web-lab", style="bold blue")
machine_name = interface.get_prompt(
label="Enter your weblab name: ")
point = interface.get_prompt(
label="Enter your weblab point: ")
file_location = interface.get_prompt(
label="Enter your weblab file location: ")
file_name = os.path.basename(file_location)
with console.status("[bold green]Uploading machine please wait...\n") as status:
data, status_code = request.post(endpoint="/machines/upload/machine", payload={
"machine_name": machine_name, "point": point, "filename": file_name})
if status_code == 200:
try:
with open(file_location, 'rb') as f:
_, status_code = request.upload(
endpoint="/machines/upload/zip", file=f)
if status_code == 200:
console.print(data["data"], style="bold green")
except FileNotFoundError:
console.print(
"Error : File not found please recheck you file location", style="bold red")
else:
console.print(
"Upload failed please contact with our support team", style="bold red")
def generate_table(self, request):
"""Generate table for upcoming machines"""
console.print("Upcoming weblab", style="bold blue")
with console.status("[bold green]please wait...\n") as status:
data, status = self.get_data(request)
if status == 200:
self.show_upcoming_machines(data["data"])
else:
pass
| 46.072464 | 122 | 0.568732 | from rich.console import Console
from rich.table import Table
import os
console = Console()
class upcoming_machine():
def get_data(self, request):
data = request.get(endpoint="/machines/upcoming")
return data[0], data[1]
def show_upcoming_machines(self, data):
table = Table(show_header=True, header_style="bold green")
table.add_column("machine_id", style="green")
table.add_column("machine_name", style="green")
table.add_column("host", style="green")
table.add_column("hint", style="green")
table.add_column("point", style="green")
table.add_column("tottal_own", style="green")
table.add_column("owned", style="green")
for i in data:
table.add_row(str(i["machine_id"]), str(i["machine_name"]), str(i["host"]),
str(i["hint"]), str(i["point"]), str(i["tottal_own"]), str(i["owned"]))
console.print(table)
console.print(
"You can upload your own weblab using this command below 👇\nExample : upload_lab or ul", style="bold green")
def upload_machine(self, interface, request):
console.print("Upload Web-lab", style="bold blue")
machine_name = interface.get_prompt(
label="Enter your weblab name: ")
point = interface.get_prompt(
label="Enter your weblab point: ")
file_location = interface.get_prompt(
label="Enter your weblab file location: ")
file_name = os.path.basename(file_location)
with console.status("[bold green]Uploading machine please wait...\n") as status:
data, status_code = request.post(endpoint="/machines/upload/machine", payload={
"machine_name": machine_name, "point": point, "filename": file_name})
if status_code == 200:
try:
with open(file_location, 'rb') as f:
_, status_code = request.upload(
endpoint="/machines/upload/zip", file=f)
if status_code == 200:
console.print(data["data"], style="bold green")
except FileNotFoundError:
console.print(
"Error : File not found please recheck you file location", style="bold red")
else:
console.print(
"Upload failed please contact with our support team", style="bold red")
def generate_table(self, request):
console.print("Upcoming weblab", style="bold blue")
with console.status("[bold green]please wait...\n") as status:
data, status = self.get_data(request)
if status == 200:
self.show_upcoming_machines(data["data"])
else:
pass
| true | true |
1c34a504c04161e81ae8c9a241e05d32bdea3088 | 335 | py | Python | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/config_s.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | 1 | 2022-01-18T11:47:29.000Z | 2022-01-18T11:47:29.000Z | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/config_s.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | null | null | null | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/config_s.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | null | null | null |
init_1h=90
init_1s=90
init_2h=90
init_2s=90
init_3h=90
init_3s=90
init_4h=90
init_4s=90
l1=80
l2=69
l=142
b=92.8
w=108
speed=0.05
h=30
Kp_H=0.06
pit_Kp_G=0.04
pit_Kd_G=0.6
rol_Kp_G=0.04
rol_Kd_G=0.35
tran_mov_kp=0.1
CC_M=0
walk_h=50
walk_speed=0.02
ma_case=0
trot_cg_f=4
trot_cg_b=4
trot_cg_t=2
in_y=17
| 5.403226 | 15 | 0.704478 |
init_1h=90
init_1s=90
init_2h=90
init_2s=90
init_3h=90
init_3s=90
init_4h=90
init_4s=90
l1=80
l2=69
l=142
b=92.8
w=108
speed=0.05
h=30
Kp_H=0.06
pit_Kp_G=0.04
pit_Kd_G=0.6
rol_Kp_G=0.04
rol_Kd_G=0.35
tran_mov_kp=0.1
CC_M=0
walk_h=50
walk_speed=0.02
ma_case=0
trot_cg_f=4
trot_cg_b=4
trot_cg_t=2
in_y=17
| true | true |
1c34a549cd57e117fc5dda9459bde878e19744b6 | 2,581 | py | Python | astrodom/gui/dashBoardWindowGui.py | fenriques/AstroDom | 84b54d3299cf591c39b214248339a201ae8ae6ca | [
"MIT"
] | 8 | 2020-05-17T14:57:08.000Z | 2020-12-20T12:29:43.000Z | astrodom/gui/dashBoardWindowGui.py | fenriques/AstroDom | 84b54d3299cf591c39b214248339a201ae8ae6ca | [
"MIT"
] | 2 | 2020-06-04T20:49:09.000Z | 2020-09-04T12:35:07.000Z | astrodom/gui/dashBoardWindowGui.py | fenriques/AstroDom | 84b54d3299cf591c39b214248339a201ae8ae6ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dashBoardWindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(865, 741)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox = QtWidgets.QGroupBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.tableViewDashboardCount = QtWidgets.QTableView(self.groupBox)
self.tableViewDashboardCount.setObjectName("tableViewDashboardCount")
self.verticalLayout.addWidget(self.tableViewDashboardCount)
self.verticalLayout_3.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.tableViewDashboardTime = QtWidgets.QTableView(self.groupBox_2)
self.tableViewDashboardTime.setObjectName("tableViewDashboardTime")
self.verticalLayout_2.addWidget(self.tableViewDashboardTime)
self.verticalLayout_3.addWidget(self.groupBox_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.groupBox.setTitle(_translate("Dialog", "Exposure Count"))
self.groupBox_2.setTitle(_translate("Dialog", "Exposure Integration Time"))
| 47.796296 | 108 | 0.74351 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(865, 741)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox = QtWidgets.QGroupBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.tableViewDashboardCount = QtWidgets.QTableView(self.groupBox)
self.tableViewDashboardCount.setObjectName("tableViewDashboardCount")
self.verticalLayout.addWidget(self.tableViewDashboardCount)
self.verticalLayout_3.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.tableViewDashboardTime = QtWidgets.QTableView(self.groupBox_2)
self.tableViewDashboardTime.setObjectName("tableViewDashboardTime")
self.verticalLayout_2.addWidget(self.tableViewDashboardTime)
self.verticalLayout_3.addWidget(self.groupBox_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.groupBox.setTitle(_translate("Dialog", "Exposure Count"))
self.groupBox_2.setTitle(_translate("Dialog", "Exposure Integration Time"))
| true | true |
1c34a628700e2282d78c8d67525397d4f4fbeb16 | 1,558 | py | Python | core/migrations/0021_pumping.py | Alberdi/babybuddy | b2c228fac9d8a7d3abfaf284b37174594493a185 | [
"BSD-2-Clause"
] | 922 | 2017-10-26T13:15:40.000Z | 2020-02-05T19:06:13.000Z | core/migrations/0021_pumping.py | Alberdi/babybuddy | b2c228fac9d8a7d3abfaf284b37174594493a185 | [
"BSD-2-Clause"
] | 109 | 2017-10-26T14:00:30.000Z | 2020-02-05T23:37:11.000Z | core/migrations/0021_pumping.py | Alberdi/babybuddy | b2c228fac9d8a7d3abfaf284b37174594493a185 | [
"BSD-2-Clause"
] | 87 | 2017-10-26T13:15:54.000Z | 2020-01-25T12:49:46.000Z | # Generated by Django 4.0.3 on 2022-04-04 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0020_bmi_tags_diaperchange_tags_feeding_tags_and_more"),
]
operations = [
migrations.CreateModel(
name="Pumping",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", models.FloatField(verbose_name="Amount")),
("time", models.DateTimeField(verbose_name="Time")),
(
"notes",
models.TextField(blank=True, null=True, verbose_name="Notes"),
),
(
"child",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pumping",
to="core.child",
verbose_name="Child",
),
),
],
options={
"verbose_name": "Pumping",
"verbose_name_plural": "Pumping",
"ordering": ["-time"],
"default_permissions": ("view", "add", "change", "delete"),
},
),
]
| 31.16 | 82 | 0.425546 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0020_bmi_tags_diaperchange_tags_feeding_tags_and_more"),
]
operations = [
migrations.CreateModel(
name="Pumping",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", models.FloatField(verbose_name="Amount")),
("time", models.DateTimeField(verbose_name="Time")),
(
"notes",
models.TextField(blank=True, null=True, verbose_name="Notes"),
),
(
"child",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pumping",
to="core.child",
verbose_name="Child",
),
),
],
options={
"verbose_name": "Pumping",
"verbose_name_plural": "Pumping",
"ordering": ["-time"],
"default_permissions": ("view", "add", "change", "delete"),
},
),
]
| true | true |
1c34a6291a643e3bbac29e4c5019e462216187d0 | 7,879 | py | Python | scaffold/scaffold.py | q0w/stg44 | 47fa2d9a5161b4e9165aa916eee24782f46679b1 | [
"MIT"
] | null | null | null | scaffold/scaffold.py | q0w/stg44 | 47fa2d9a5161b4e9165aa916eee24782f46679b1 | [
"MIT"
] | 21 | 2020-11-13T17:06:52.000Z | 2020-12-06T15:40:30.000Z | scaffold/scaffold.py | q0w/manny | 47fa2d9a5161b4e9165aa916eee24782f46679b1 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
from django.core.management import CommandError
from scaffold.kit.colors import TermColor
from scaffold.kit.templates import (
FieldTemplate,
ModelTemplate,
SerializerTemplate,
UrlTemplate,
ViewTemplate,
CommonTemplate,
)
from scaffold.kit.utils import Walker
class Scaffold:
def __init__(
self,
proj_settings,
app_config,
new_model,
fields,
serializers,
urls,
views,
):
self.proj_settings = proj_settings
self.new_model = new_model
self.app_config = app_config
self.models = self.get_model_names()
self.fields = fields
self.serializers = serializers
self.urls = urls
self.views = views
def get_model_names(self):
return [m.__name__ for m in self.app_config.get_models()]
def get_content(self, context, template: CommonTemplate):
return template.convert(context)
def check_models(self, models):
missing_models = [x for x in models if x not in set(self.get_model_names())]
return missing_models
def check_sv(self, file, sv):
if not os.path.isfile(file):
return None
existing_sv = Walker(file).get_sv()
excess_sv = [x for x in sv if x in existing_sv]
return excess_sv
def create_model(self):
if self.new_model in self.get_model_names():
raise CommandError(f"model {self.new_model} already exists...")
fields = []
for field in self.fields:
new_field = self.get_content(field.split(":"), FieldTemplate())
fields.append(new_field)
with open(self.app_config.models_module.__file__, "a") as mf:
content = self.get_content(
{"name": self.new_model, "fields": fields}, ModelTemplate()
)
mf.write(content)
subprocess.call(["black", self.app_config.models_module.__file__, "-q"])
print(f"{TermColor.OK}model: {self.new_model} has been created{TermColor.ENDC}")
def check_imports(self, filename, imports):
if not os.path.isfile(filename):
return imports
existing_imports = Walker(file=filename).get_imports()
missing_imports = {}
for key, value in imports.items():
missing_values = [
x for x in value if x not in set(existing_imports.get(key, []))
]
if missing_values:
missing_imports[key] = missing_values
return missing_imports
def create_serializers(self):
serializer_file_path = f"{self.app_config.module.__path__[0]}/serializers.py"
serializers = (
self.get_model_names() if not self.serializers else self.serializers
)
missing_models = self.check_models(serializers)
if missing_models:
error = (
f'{" ".join(missing_models)} do not exist...'
if len(missing_models) > 1
else f'{" ".join(missing_models)} does not exist...'
)
raise CommandError(error)
excess_serializers = self.check_sv(serializer_file_path, serializers)
if excess_serializers:
serializers = [m for m in serializers if m not in excess_serializers]
if not serializers:
raise CommandError("all serializers already exist...")
error = (
f'{TermColor.ERROR}{" ".join(excess_serializers)} already exist...{TermColor.ENDC}'
if len(excess_serializers) > 1
else f'{" ".join(excess_serializers)} already exists...{TermColor.ENDC}'
)
print(error)
missing_imports = self.check_imports(
serializer_file_path,
{"rest_framework": ["serializers"], f"{self.app_config.name}": ["models"]},
)
with open(serializer_file_path, "a") as sf:
content = self.get_content(
{"models": serializers, "imports": missing_imports},
SerializerTemplate(),
)
sf.write(content)
subprocess.call(["black", serializer_file_path, "-q"])
print(
f"{TermColor.OK}serializers: {' '.join(serializers)} have been created{TermColor.ENDC}"
) if len(serializers) > 1 else print(
f"{TermColor.OK}serializer: {' '.join(serializers)} has been created{TermColor.ENDC}"
)
def create_urls(self):
url_file_path = f"{self.app_config.module.__path__[0]}/urls.py"
existing_models = self.get_model_names()
with open(url_file_path, "w+") as uf:
content = self.get_content(
{"app": self.app_config.name, "models": existing_models}, UrlTemplate()
)
uf.write(content)
subprocess.call(["black", url_file_path, "-q"])
print(
f"{TermColor.OK}urls: SimpleRouter for all models has been created{TermColor.ENDC}"
)
def create_views(self):
view_file_path = f"{self.app_config.module.__path__[0]}/views.py"
views = self.get_model_names() if not self.views else self.views
missing_models = self.check_models(views)
if missing_models:
raise CommandError(f'{" ".join(missing_models)} do/does not exist...')
excess_views = self.check_sv(view_file_path, views)
if excess_views:
views = [m for m in views if m not in excess_views]
if not views:
raise CommandError("all views already exist...")
error = (
f'{TermColor.ERROR}{" ".join(excess_views)} already exist...{TermColor.ENDC}'
if len(excess_views) > 1
else f'{TermColor.ERROR}{" ".join(excess_views)} already exists...{TermColor.ENDC}'
)
print(error)
missing_imports = self.check_imports(
view_file_path,
{
"django.shortcuts": ["get_object_or_404"],
"rest_framework": ["viewsets", "response"],
f"{self.app_config.name}": ["models", "serializers"],
},
)
with open(view_file_path, "a") as wf:
content = self.get_content(
{"models": views, "imports": missing_imports}, ViewTemplate()
)
wf.write(content)
subprocess.call(["black", view_file_path, "-q"])
print(
f"{TermColor.OK}views: {' '.join(views)} have been created{TermColor.ENDC}"
) if len(views) > 1 else print(
f"{TermColor.OK}view: {' '.join(views)} has been created{TermColor.ENDC}"
)
def execute(self):
if self.new_model:
self.create_model()
if self.urls:
self.create_urls()
if self.serializers is not None:
self.create_serializers()
if self.views is not None:
self.create_views()
class ScaffoldApp:
def __init__(self, proj_settings, new_apps):
self.apps = new_apps
self.proj_settings = proj_settings
def create_app(self):
for app in self.apps:
try:
subprocess.call(["python", "manage.py", "startapp", app])
except Exception as e:
print(e)
walker = Walker(
file=sys.modules[self.proj_settings].__file__,
options={"variable": "INSTALLED_APPS", "variable_values": self.apps},
)
walker.mutate()
print(
f"{TermColor.OK}apps: {' '.join(self.apps)} have been created{TermColor.ENDC}"
) if len(self.apps) > 1 else print(
f"{TermColor.OK}app: {' '.join(self.apps)} has been created{TermColor.ENDC}"
)
def execute(self):
if self.apps:
self.create_app()
| 36.308756 | 99 | 0.584846 | import os
import subprocess
import sys
from django.core.management import CommandError
from scaffold.kit.colors import TermColor
from scaffold.kit.templates import (
FieldTemplate,
ModelTemplate,
SerializerTemplate,
UrlTemplate,
ViewTemplate,
CommonTemplate,
)
from scaffold.kit.utils import Walker
class Scaffold:
def __init__(
self,
proj_settings,
app_config,
new_model,
fields,
serializers,
urls,
views,
):
self.proj_settings = proj_settings
self.new_model = new_model
self.app_config = app_config
self.models = self.get_model_names()
self.fields = fields
self.serializers = serializers
self.urls = urls
self.views = views
def get_model_names(self):
return [m.__name__ for m in self.app_config.get_models()]
def get_content(self, context, template: CommonTemplate):
return template.convert(context)
def check_models(self, models):
missing_models = [x for x in models if x not in set(self.get_model_names())]
return missing_models
def check_sv(self, file, sv):
if not os.path.isfile(file):
return None
existing_sv = Walker(file).get_sv()
excess_sv = [x for x in sv if x in existing_sv]
return excess_sv
def create_model(self):
if self.new_model in self.get_model_names():
raise CommandError(f"model {self.new_model} already exists...")
fields = []
for field in self.fields:
new_field = self.get_content(field.split(":"), FieldTemplate())
fields.append(new_field)
with open(self.app_config.models_module.__file__, "a") as mf:
content = self.get_content(
{"name": self.new_model, "fields": fields}, ModelTemplate()
)
mf.write(content)
subprocess.call(["black", self.app_config.models_module.__file__, "-q"])
print(f"{TermColor.OK}model: {self.new_model} has been created{TermColor.ENDC}")
def check_imports(self, filename, imports):
if not os.path.isfile(filename):
return imports
existing_imports = Walker(file=filename).get_imports()
missing_imports = {}
for key, value in imports.items():
missing_values = [
x for x in value if x not in set(existing_imports.get(key, []))
]
if missing_values:
missing_imports[key] = missing_values
return missing_imports
def create_serializers(self):
serializer_file_path = f"{self.app_config.module.__path__[0]}/serializers.py"
serializers = (
self.get_model_names() if not self.serializers else self.serializers
)
missing_models = self.check_models(serializers)
if missing_models:
error = (
f'{" ".join(missing_models)} do not exist...'
if len(missing_models) > 1
else f'{" ".join(missing_models)} does not exist...'
)
raise CommandError(error)
excess_serializers = self.check_sv(serializer_file_path, serializers)
if excess_serializers:
serializers = [m for m in serializers if m not in excess_serializers]
if not serializers:
raise CommandError("all serializers already exist...")
error = (
f'{TermColor.ERROR}{" ".join(excess_serializers)} already exist...{TermColor.ENDC}'
if len(excess_serializers) > 1
else f'{" ".join(excess_serializers)} already exists...{TermColor.ENDC}'
)
print(error)
missing_imports = self.check_imports(
serializer_file_path,
{"rest_framework": ["serializers"], f"{self.app_config.name}": ["models"]},
)
with open(serializer_file_path, "a") as sf:
content = self.get_content(
{"models": serializers, "imports": missing_imports},
SerializerTemplate(),
)
sf.write(content)
subprocess.call(["black", serializer_file_path, "-q"])
print(
f"{TermColor.OK}serializers: {' '.join(serializers)} have been created{TermColor.ENDC}"
) if len(serializers) > 1 else print(
f"{TermColor.OK}serializer: {' '.join(serializers)} has been created{TermColor.ENDC}"
)
def create_urls(self):
url_file_path = f"{self.app_config.module.__path__[0]}/urls.py"
existing_models = self.get_model_names()
with open(url_file_path, "w+") as uf:
content = self.get_content(
{"app": self.app_config.name, "models": existing_models}, UrlTemplate()
)
uf.write(content)
subprocess.call(["black", url_file_path, "-q"])
print(
f"{TermColor.OK}urls: SimpleRouter for all models has been created{TermColor.ENDC}"
)
def create_views(self):
view_file_path = f"{self.app_config.module.__path__[0]}/views.py"
views = self.get_model_names() if not self.views else self.views
missing_models = self.check_models(views)
if missing_models:
raise CommandError(f'{" ".join(missing_models)} do/does not exist...')
excess_views = self.check_sv(view_file_path, views)
if excess_views:
views = [m for m in views if m not in excess_views]
if not views:
raise CommandError("all views already exist...")
error = (
f'{TermColor.ERROR}{" ".join(excess_views)} already exist...{TermColor.ENDC}'
if len(excess_views) > 1
else f'{TermColor.ERROR}{" ".join(excess_views)} already exists...{TermColor.ENDC}'
)
print(error)
missing_imports = self.check_imports(
view_file_path,
{
"django.shortcuts": ["get_object_or_404"],
"rest_framework": ["viewsets", "response"],
f"{self.app_config.name}": ["models", "serializers"],
},
)
with open(view_file_path, "a") as wf:
content = self.get_content(
{"models": views, "imports": missing_imports}, ViewTemplate()
)
wf.write(content)
subprocess.call(["black", view_file_path, "-q"])
print(
f"{TermColor.OK}views: {' '.join(views)} have been created{TermColor.ENDC}"
) if len(views) > 1 else print(
f"{TermColor.OK}view: {' '.join(views)} has been created{TermColor.ENDC}"
)
def execute(self):
if self.new_model:
self.create_model()
if self.urls:
self.create_urls()
if self.serializers is not None:
self.create_serializers()
if self.views is not None:
self.create_views()
class ScaffoldApp:
def __init__(self, proj_settings, new_apps):
self.apps = new_apps
self.proj_settings = proj_settings
def create_app(self):
for app in self.apps:
try:
subprocess.call(["python", "manage.py", "startapp", app])
except Exception as e:
print(e)
walker = Walker(
file=sys.modules[self.proj_settings].__file__,
options={"variable": "INSTALLED_APPS", "variable_values": self.apps},
)
walker.mutate()
print(
f"{TermColor.OK}apps: {' '.join(self.apps)} have been created{TermColor.ENDC}"
) if len(self.apps) > 1 else print(
f"{TermColor.OK}app: {' '.join(self.apps)} has been created{TermColor.ENDC}"
)
def execute(self):
if self.apps:
self.create_app()
| true | true |
1c34a696474756e5c7ec2ad619cef2ac54d11268 | 1,077 | py | Python | ddb/feature/ytt/__init__.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 4 | 2020-06-11T20:54:47.000Z | 2020-09-22T13:07:17.000Z | ddb/feature/ytt/__init__.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 113 | 2019-11-07T00:40:36.000Z | 2021-01-18T12:50:16.000Z | ddb/feature/ytt/__init__.py | inetum-orleans/docker-devbox-ddb | 20c713cf7bfcaf289226a17a9648c17d16003b4d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import ClassVar, Iterable
from dotty_dict import Dotty
from ddb.action import Action
from ddb.feature import Feature
from .actions import YttAction
from .schema import YttSchema
from ...utils.file import TemplateFinder
class YttFeature(Feature):
"""
Render template files with ytt (https://get-ytt.io/).
"""
@property
def name(self) -> str:
return "ytt"
@property
def dependencies(self) -> Iterable[str]:
return ["core", "file"]
@property
def schema(self) -> ClassVar[YttSchema]:
return YttSchema
@property
def actions(self) -> Iterable[Action]:
return (
YttAction(),
)
def _configure_defaults(self, feature_config: Dotty):
includes = feature_config.get("includes")
if includes is None:
includes = TemplateFinder.build_default_includes_from_suffixes(
feature_config["suffixes"],
feature_config["extensions"]
)
feature_config["includes"] = includes
| 24.477273 | 75 | 0.630455 |
from typing import ClassVar, Iterable
from dotty_dict import Dotty
from ddb.action import Action
from ddb.feature import Feature
from .actions import YttAction
from .schema import YttSchema
from ...utils.file import TemplateFinder
class YttFeature(Feature):
@property
def name(self) -> str:
return "ytt"
@property
def dependencies(self) -> Iterable[str]:
return ["core", "file"]
@property
def schema(self) -> ClassVar[YttSchema]:
return YttSchema
@property
def actions(self) -> Iterable[Action]:
return (
YttAction(),
)
def _configure_defaults(self, feature_config: Dotty):
includes = feature_config.get("includes")
if includes is None:
includes = TemplateFinder.build_default_includes_from_suffixes(
feature_config["suffixes"],
feature_config["extensions"]
)
feature_config["includes"] = includes
| true | true |
1c34a90004311f67906c6ddffd962fb446f1d3c1 | 12,670 | py | Python | python/fetch_stats.py | IFTS/ads-platform-tools | e6a1a4bcc2e3bbfc902565bfea9004a2ec80c0b8 | [
"Apache-2.0"
] | null | null | null | python/fetch_stats.py | IFTS/ads-platform-tools | e6a1a4bcc2e3bbfc902565bfea9004a2ec80c0b8 | [
"Apache-2.0"
] | null | null | null | python/fetch_stats.py | IFTS/ads-platform-tools | e6a1a4bcc2e3bbfc902565bfea9004a2ec80c0b8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# import requests
import oauth2 as oauth
import yaml
# import urllib
import json
import os
import time
# import pytz
import datetime
import argparse
import re
import sys
DOMAIN = 'https://ads-api.twitter.com'
VERBOSE = 0
NON_SUB_PARAM_SEGMENTATION_TYPES = ['PLATFORMS', 'LOCATIONS', 'GENDER', 'INTERESTS', 'KEYWORDS']
def main(options):
global VERBOSE
account = options.account_id
headers = options.headers
if options.veryverbose:
VERBOSE = 2
elif options.verbose:
VERBOSE = 1
start = time.clock()
user_twurl = twurlauth()
print("Best practices stats check for :account_id %s" % account)
linesep()
now = datetime.datetime.utcnow()
start_time = datetime.datetime.utcnow() - datetime.timedelta(days=7)
start_time = start_time.replace(minute=0, second=0, microsecond=0)
end_time = datetime.datetime.utcnow()
end_time = end_time.replace(minute=0, second=0, microsecond=0)
end_time -= datetime.timedelta(seconds=1)
print('Current time:\t%s' % now)
print('Start time:\t%s' % start_time)
print('End time:\t%s' % end_time)
linesep()
# check that we have access to this :account_id
resource_path = '/0/accounts/%s' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
if len(data) == 0:
print('ERROR: Could not locate :account_id %s' % account)
sys.exit(0)
# fetch funding instruments
resource_path = '/0/accounts/%s/funding_instruments?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter funding instruments
print("Pre-filtered data:\t\t%s" % len(data))
funding_instruments = check(data, start_time, end_time)
print("Funding instruments:\t\t%s" % len(funding_instruments))
# fetch campaigns
resource_path = '/0/accounts/%s/campaigns?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter campaigns
print("Pre-filtered data:\t\t%s" % len(data))
campaigns = check(data, start_time, end_time, 'funding_instrument_id', funding_instruments)
print("Campaigns:\t\t\t%s" % len(campaigns))
# fetch line items
resource_path = '/0/accounts/%s/line_items?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter line items
print("Pre-filtered data:\t\t%s" % len(data))
line_items = check(data, start_time, end_time, 'campaign_id', campaigns)
print("Line items:\t\t\t%s" % len(line_items))
# fetch promoted_tweets
resource_path = '/0/accounts/%s/promoted_tweets?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter promoted_tweets
print("Pre-filtered data:\t\t%s" % len(data))
promoted_tweets = check(data, start_time, end_time, 'line_item_id', line_items)
print("Promoted Tweets:\t\t%s" % len(promoted_tweets))
total_query_count = 0
total_request_cost = 0
total_rate_limited_query_count = 0
segmented_query_count = 0
segmented_request_cost = 0
if len(line_items) > 0:
print("\tfetching stats for %s line items" % len(line_items))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'line_items',
start_time, end_time, line_items)
total_query_count += query_count
total_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching stats for %s promoted tweets" % len(promoted_tweets))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'promoted_tweets',
start_time, end_time, promoted_tweets)
total_query_count += query_count
total_request_cost += cost_total
total_rate_limited_query_count += rate_limited_query_count
# Segmentation queries
if options.segmentation:
if len(line_items) > 0:
print("\tfetching segmentation stats for %s line items" % len(line_items))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'line_items', start_time, end_time,
line_items, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching segmentation stats for %s promoted tweets" % len(promoted_tweets))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'promoted_tweets', start_time, end_time,
promoted_tweets, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
linesep()
if options.segmentation:
print("Non-Seg Stats Req Cost:\t\t%s" % (total_request_cost - segmented_request_cost))
print("Segmented Stats Req Cost:\t%s" % segmented_request_cost)
linesep()
print("Total Stats Queries:\t\t%s" % total_query_count)
print("Total Stats Request Cost:\t%s" % total_request_cost)
if VERBOSE > 0:
print("Avg Cost per Query:\t\t%s" % str(total_request_cost / total_query_count))
print("Queries Rate Limited:\t\t%s" % total_rate_limited_query_count)
linesep()
elapsed = (time.clock() - start)
print('Time elapsed:\t\t\t%s' % elapsed)
def input():
p = argparse.ArgumentParser(description='Fetch Twitter Ads Account Stats')
p.add_argument('-a', '--account', required=True, dest='account_id', help='Ads Account ID')
p.add_argument('-A', '--header', dest='headers', action='append',
help='HTTP headers to include')
p.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose outputs cost avgs')
p.add_argument('-vv', '--very-verbose', dest='veryverbose', action='store_true',
help='Very verbose outputs API queries made')
p.add_argument('-s', '--segmentation', dest='segmentation', help='Pull segmentation stats',
action='store_true')
args = p.parse_args()
return args
def twurlauth():
with open(os.path.expanduser('~/.twurlrc'), 'r') as f:
contents = yaml.load(f)
f.close()
default_user = contents["configuration"]["default_profile"][0]
CONSUMER_KEY = contents["configuration"]["default_profile"][1]
CONSUMER_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["consumer_secret"]
USER_OAUTH_TOKEN = contents["profiles"][default_user][CONSUMER_KEY]["token"]
USER_OAUTH_TOKEN_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["secret"]
return CONSUMER_KEY, CONSUMER_SECRET, USER_OAUTH_TOKEN, USER_OAUTH_TOKEN_SECRET
def request(user_twurl, http_method, headers, url):
CONSUMER_KEY = user_twurl[0]
CONSUMER_SECRET = user_twurl[1]
USER_OAUTH_TOKEN = user_twurl[2]
USER_OAUTH_TOKEN_SECRET = user_twurl[3]
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=USER_OAUTH_TOKEN, secret=USER_OAUTH_TOKEN_SECRET)
client = oauth.Client(consumer, token)
header_list = {}
if headers:
for i in headers:
(key, value) = i.split(': ')
if key and value:
header_list[key] = value
response, content = client.request(url, method=http_method, headers=header_list)
try:
data = json.loads(content)
except:
data = None
return response, data
def get_data(user_twurl, http_method, headers, url):
data = []
res_headers, response = request(user_twurl, http_method, headers, url)
if res_headers['status'] != '200':
print('ERROR: query failed, cannot continue: %s' % url)
sys.exit(0)
if response and 'data' in response:
data += response['data']
while 'next_cursor' in response and response['next_cursor'] is not None:
cursor_url = url + '&cursor=%s' % response['next_cursor']
res_headers, response = request(user_twurl, http_method, headers, cursor_url)
if response and 'data' in response:
data += response['data']
return data
def gather_stats(user_twurl, headers, account_id, entity_type, start_time, end_time, input_entities,
segmentation=None):
entities = list(input_entities)
resource_url = DOMAIN + "/0/stats/accounts/%s/%s" % (account_id, entity_type)
param_data = (start_time.isoformat(), end_time.isoformat())
query_params = '?granularity=HOUR&start_time=%sZ&end_time=%sZ' % param_data
query_param_entity_name = re.sub(r's$', '_ids', entity_type)
if segmentation:
query_params += '&segmentation_type=%s' % segmentation
query_count = 0
cost_total = 0
rate_limited_query_count = 0
limit_exceeded_sleep = 0
while entities:
if limit_exceeded_sleep > 0:
print('\t! sleeping for %s' % limit_exceeded_sleep)
time.sleep(limit_exceeded_sleep)
limit_exceeded_sleep = 0
query_entities = []
limit = 20
if len(entities) < limit:
limit = len(entities)
for _ in range(limit):
query_entities.append(entities.pop(0))
url_entites = '&%s=%s' % (query_param_entity_name, ','.join(query_entities))
stats_url = resource_url + query_params + url_entites
res_headers, res_data = request(user_twurl, 'GET', headers, stats_url)
if 'x-request-cost' in res_headers:
cost_total += int(res_headers['x-request-cost'])
reset_at = int(res_headers['x-cost-rate-limit-reset'])
if (('x-cost-rate-limit-remaining' in res_headers and
int(res_headers['x-cost-rate-limit-remaining']) == 0) and
res_headers['status'] == '429'):
limit_exceeded_sleep = reset_at - int(time.time())
if res_headers['status'] == '200':
query_count += 1
if VERBOSE > 1:
print('VERBOSE:\tStats Query:\t%s' % stats_url)
elif res_headers['status'] == '429':
print("RATE LIMITED! adding entities back to queue")
rate_limited_query_count += 1
entities.extend(query_entities)
elif res_headers['status'] == '503':
print("TIMEOUT!")
print(stats_url)
entities.extend(query_entities)
else:
print("ERROR %s" % res_headers['status'])
print(res_headers)
sys.exit(0)
if VERBOSE > 0:
if segmentation:
print('VERBOSE:\tSegmentation type:\t%s' % segmentation)
print('VERBOSE:\tAvg cost per query:\t%s' % str(cost_total / query_count))
return query_count, cost_total, rate_limited_query_count
def check(data, start_time, end_time, filter_field=None, filter_data=[]):
d = []
if data and len(data) > 0:
for i in data:
if 'end_time' in i and i['end_time'] and format_timestamp(i['end_time']) < start_time:
continue
elif ('start_time' in i and i['start_time'] and
format_timestamp(i['start_time']) > end_time):
continue
elif i['deleted'] and format_timestamp(i['updated_at']) < start_time:
continue
elif i['paused'] and format_timestamp(i['updated_at']) < start_time:
continue
elif filter_field and i[filter_field] not in filter_data:
continue
else:
d.append(i['id'])
return d
def format_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
def linesep():
print('-----------------------------------------------')
if __name__ == '__main__':
options = input()
main(options)
| 36.2 | 100 | 0.627309 |
import oauth2 as oauth
import yaml
import json
import os
import time
import datetime
import argparse
import re
import sys
DOMAIN = 'https://ads-api.twitter.com'
VERBOSE = 0
NON_SUB_PARAM_SEGMENTATION_TYPES = ['PLATFORMS', 'LOCATIONS', 'GENDER', 'INTERESTS', 'KEYWORDS']
def main(options):
global VERBOSE
account = options.account_id
headers = options.headers
if options.veryverbose:
VERBOSE = 2
elif options.verbose:
VERBOSE = 1
start = time.clock()
user_twurl = twurlauth()
print("Best practices stats check for :account_id %s" % account)
linesep()
now = datetime.datetime.utcnow()
start_time = datetime.datetime.utcnow() - datetime.timedelta(days=7)
start_time = start_time.replace(minute=0, second=0, microsecond=0)
end_time = datetime.datetime.utcnow()
end_time = end_time.replace(minute=0, second=0, microsecond=0)
end_time -= datetime.timedelta(seconds=1)
print('Current time:\t%s' % now)
print('Start time:\t%s' % start_time)
print('End time:\t%s' % end_time)
linesep()
resource_path = '/0/accounts/%s' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
if len(data) == 0:
print('ERROR: Could not locate :account_id %s' % account)
sys.exit(0)
resource_path = '/0/accounts/%s/funding_instruments?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
print("Pre-filtered data:\t\t%s" % len(data))
funding_instruments = check(data, start_time, end_time)
print("Funding instruments:\t\t%s" % len(funding_instruments))
resource_path = '/0/accounts/%s/campaigns?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
print("Pre-filtered data:\t\t%s" % len(data))
campaigns = check(data, start_time, end_time, 'funding_instrument_id', funding_instruments)
print("Campaigns:\t\t\t%s" % len(campaigns))
resource_path = '/0/accounts/%s/line_items?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
print("Pre-filtered data:\t\t%s" % len(data))
line_items = check(data, start_time, end_time, 'campaign_id', campaigns)
print("Line items:\t\t\t%s" % len(line_items))
resource_path = '/0/accounts/%s/promoted_tweets?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
print("Pre-filtered data:\t\t%s" % len(data))
promoted_tweets = check(data, start_time, end_time, 'line_item_id', line_items)
print("Promoted Tweets:\t\t%s" % len(promoted_tweets))
total_query_count = 0
total_request_cost = 0
total_rate_limited_query_count = 0
segmented_query_count = 0
segmented_request_cost = 0
if len(line_items) > 0:
print("\tfetching stats for %s line items" % len(line_items))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'line_items',
start_time, end_time, line_items)
total_query_count += query_count
total_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching stats for %s promoted tweets" % len(promoted_tweets))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'promoted_tweets',
start_time, end_time, promoted_tweets)
total_query_count += query_count
total_request_cost += cost_total
total_rate_limited_query_count += rate_limited_query_count
if options.segmentation:
if len(line_items) > 0:
print("\tfetching segmentation stats for %s line items" % len(line_items))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'line_items', start_time, end_time,
line_items, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching segmentation stats for %s promoted tweets" % len(promoted_tweets))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'promoted_tweets', start_time, end_time,
promoted_tweets, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
linesep()
if options.segmentation:
print("Non-Seg Stats Req Cost:\t\t%s" % (total_request_cost - segmented_request_cost))
print("Segmented Stats Req Cost:\t%s" % segmented_request_cost)
linesep()
print("Total Stats Queries:\t\t%s" % total_query_count)
print("Total Stats Request Cost:\t%s" % total_request_cost)
if VERBOSE > 0:
print("Avg Cost per Query:\t\t%s" % str(total_request_cost / total_query_count))
print("Queries Rate Limited:\t\t%s" % total_rate_limited_query_count)
linesep()
elapsed = (time.clock() - start)
print('Time elapsed:\t\t\t%s' % elapsed)
def input():
p = argparse.ArgumentParser(description='Fetch Twitter Ads Account Stats')
p.add_argument('-a', '--account', required=True, dest='account_id', help='Ads Account ID')
p.add_argument('-A', '--header', dest='headers', action='append',
help='HTTP headers to include')
p.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose outputs cost avgs')
p.add_argument('-vv', '--very-verbose', dest='veryverbose', action='store_true',
help='Very verbose outputs API queries made')
p.add_argument('-s', '--segmentation', dest='segmentation', help='Pull segmentation stats',
action='store_true')
args = p.parse_args()
return args
def twurlauth():
with open(os.path.expanduser('~/.twurlrc'), 'r') as f:
contents = yaml.load(f)
f.close()
default_user = contents["configuration"]["default_profile"][0]
CONSUMER_KEY = contents["configuration"]["default_profile"][1]
CONSUMER_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["consumer_secret"]
USER_OAUTH_TOKEN = contents["profiles"][default_user][CONSUMER_KEY]["token"]
USER_OAUTH_TOKEN_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["secret"]
return CONSUMER_KEY, CONSUMER_SECRET, USER_OAUTH_TOKEN, USER_OAUTH_TOKEN_SECRET
def request(user_twurl, http_method, headers, url):
CONSUMER_KEY = user_twurl[0]
CONSUMER_SECRET = user_twurl[1]
USER_OAUTH_TOKEN = user_twurl[2]
USER_OAUTH_TOKEN_SECRET = user_twurl[3]
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=USER_OAUTH_TOKEN, secret=USER_OAUTH_TOKEN_SECRET)
client = oauth.Client(consumer, token)
header_list = {}
if headers:
for i in headers:
(key, value) = i.split(': ')
if key and value:
header_list[key] = value
response, content = client.request(url, method=http_method, headers=header_list)
try:
data = json.loads(content)
except:
data = None
return response, data
def get_data(user_twurl, http_method, headers, url):
data = []
res_headers, response = request(user_twurl, http_method, headers, url)
if res_headers['status'] != '200':
print('ERROR: query failed, cannot continue: %s' % url)
sys.exit(0)
if response and 'data' in response:
data += response['data']
while 'next_cursor' in response and response['next_cursor'] is not None:
cursor_url = url + '&cursor=%s' % response['next_cursor']
res_headers, response = request(user_twurl, http_method, headers, cursor_url)
if response and 'data' in response:
data += response['data']
return data
def gather_stats(user_twurl, headers, account_id, entity_type, start_time, end_time, input_entities,
segmentation=None):
entities = list(input_entities)
resource_url = DOMAIN + "/0/stats/accounts/%s/%s" % (account_id, entity_type)
param_data = (start_time.isoformat(), end_time.isoformat())
query_params = '?granularity=HOUR&start_time=%sZ&end_time=%sZ' % param_data
query_param_entity_name = re.sub(r's$', '_ids', entity_type)
if segmentation:
query_params += '&segmentation_type=%s' % segmentation
query_count = 0
cost_total = 0
rate_limited_query_count = 0
limit_exceeded_sleep = 0
while entities:
if limit_exceeded_sleep > 0:
print('\t! sleeping for %s' % limit_exceeded_sleep)
time.sleep(limit_exceeded_sleep)
limit_exceeded_sleep = 0
query_entities = []
limit = 20
if len(entities) < limit:
limit = len(entities)
for _ in range(limit):
query_entities.append(entities.pop(0))
url_entites = '&%s=%s' % (query_param_entity_name, ','.join(query_entities))
stats_url = resource_url + query_params + url_entites
res_headers, res_data = request(user_twurl, 'GET', headers, stats_url)
if 'x-request-cost' in res_headers:
cost_total += int(res_headers['x-request-cost'])
reset_at = int(res_headers['x-cost-rate-limit-reset'])
if (('x-cost-rate-limit-remaining' in res_headers and
int(res_headers['x-cost-rate-limit-remaining']) == 0) and
res_headers['status'] == '429'):
limit_exceeded_sleep = reset_at - int(time.time())
if res_headers['status'] == '200':
query_count += 1
if VERBOSE > 1:
print('VERBOSE:\tStats Query:\t%s' % stats_url)
elif res_headers['status'] == '429':
print("RATE LIMITED! adding entities back to queue")
rate_limited_query_count += 1
entities.extend(query_entities)
elif res_headers['status'] == '503':
print("TIMEOUT!")
print(stats_url)
entities.extend(query_entities)
else:
print("ERROR %s" % res_headers['status'])
print(res_headers)
sys.exit(0)
if VERBOSE > 0:
if segmentation:
print('VERBOSE:\tSegmentation type:\t%s' % segmentation)
print('VERBOSE:\tAvg cost per query:\t%s' % str(cost_total / query_count))
return query_count, cost_total, rate_limited_query_count
def check(data, start_time, end_time, filter_field=None, filter_data=[]):
d = []
if data and len(data) > 0:
for i in data:
if 'end_time' in i and i['end_time'] and format_timestamp(i['end_time']) < start_time:
continue
elif ('start_time' in i and i['start_time'] and
format_timestamp(i['start_time']) > end_time):
continue
elif i['deleted'] and format_timestamp(i['updated_at']) < start_time:
continue
elif i['paused'] and format_timestamp(i['updated_at']) < start_time:
continue
elif filter_field and i[filter_field] not in filter_data:
continue
else:
d.append(i['id'])
return d
def format_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
def linesep():
print('-----------------------------------------------')
if __name__ == '__main__':
options = input()
main(options)
| true | true |
1c34a93ae2bd9277a4dc8c36d811381ee461f571 | 20,333 | py | Python | tensorflow/python/keras/legacy_tf_layers/pooling.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 190,993 | 2015-11-09T13:17:30.000Z | 2022-03-31T23:05:27.000Z | tensorflow/python/keras/legacy_tf_layers/pooling.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 48,461 | 2015-11-09T14:21:11.000Z | 2022-03-31T23:17:33.000Z | tensorflow/python/keras/legacy_tf_layers/pooling.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 104,981 | 2015-11-09T13:40:17.000Z | 2022-03-31T19:51:54.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=g-classes-have-attributes
"""Contains the pooling layer classes and their functional aliases."""
import warnings
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.keras.legacy_tf_layers import base
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling1D'])
@tf_export(v1=['layers.AveragePooling1D'])
class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):
"""Average Pooling layer for 1D inputs.
Args:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling1d'])
@tf_export(v1=['layers.average_pooling1d'])
def average_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average Pooling layer for 1D inputs.
Args:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.average_pooling1d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling1D` instead.')
layer = AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling1D'])
@tf_export(v1=['layers.MaxPooling1D'])
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):
"""Max Pooling layer for 1D inputs.
Args:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling1d'])
@tf_export(v1=['layers.max_pooling1d'])
def max_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max Pooling layer for 1D inputs.
Args:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.max_pooling1d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling1D` instead.')
layer = MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling2D'])
@tf_export(v1=['layers.AveragePooling2D'])
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):
"""Average pooling layer for 2D inputs (e.g. images).
Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling2d'])
@tf_export(v1=['layers.average_pooling2d'])
def average_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 2D inputs (e.g. images).
Args:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.average_pooling2d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling2D` instead.')
layer = AveragePooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling2D'])
@tf_export(v1=['layers.MaxPooling2D'])
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):
"""Max pooling layer for 2D inputs (e.g. images).
Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling2d'])
@tf_export(v1=['layers.max_pooling2d'])
def max_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 2D inputs (e.g. images).
Args:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.max_pooling2d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling2D` instead.')
layer = MaxPooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling3D'])
@tf_export(v1=['layers.AveragePooling3D'])
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):
"""Average pooling layer for 3D inputs (e.g. volumes).
Args:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling3d'])
@tf_export(v1=['layers.average_pooling3d'])
def average_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 3D inputs (e.g. volumes).
Args:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.average_pooling3d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling3D` instead.')
layer = AveragePooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling3D'])
@tf_export(v1=['layers.MaxPooling3D'])
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
"""Max pooling layer for 3D inputs (e.g. volumes).
Args:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling3d'])
@tf_export(v1=['layers.max_pooling3d'])
def max_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 3D inputs (e.g.
volumes).
Args:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height,
pool_width) specifying the size of the pooling window. Can be a single
integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the pooling operation. Can be a single integer to specify the same value
for all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape `(batch, depth, height,
width, channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.max_pooling3d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling3D` instead.')
layer = MaxPooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
# Aliases
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
max_pool2d = max_pooling2d
avg_pool2d = average_pooling2d
| 41.837449 | 80 | 0.680962 |
import warnings
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.keras.legacy_tf_layers import base
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling1D'])
@tf_export(v1=['layers.AveragePooling1D'])
class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling1d'])
@tf_export(v1=['layers.average_pooling1d'])
def average_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.average_pooling1d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling1D` instead.')
layer = AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling1D'])
@tf_export(v1=['layers.MaxPooling1D'])
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling1d'])
@tf_export(v1=['layers.max_pooling1d'])
def max_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.max_pooling1d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling1D` instead.')
layer = MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling2D'])
@tf_export(v1=['layers.AveragePooling2D'])
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling2d'])
@tf_export(v1=['layers.average_pooling2d'])
def average_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.average_pooling2d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling2D` instead.')
layer = AveragePooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling2D'])
@tf_export(v1=['layers.MaxPooling2D'])
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling2D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling2d'])
@tf_export(v1=['layers.max_pooling2d'])
def max_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.max_pooling2d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling2D` instead.')
layer = MaxPooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.AveragePooling3D'])
@tf_export(v1=['layers.AveragePooling3D'])
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.average_pooling3d'])
@tf_export(v1=['layers.average_pooling3d'])
def average_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.average_pooling3d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling3D` instead.')
layer = AveragePooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
@keras_export(v1=['keras.__internal__.legacy.layers.MaxPooling3D'])
@tf_export(v1=['layers.MaxPooling3D'])
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(MaxPooling3D, self).__init__(
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
@keras_export(v1=['keras.__internal__.legacy.layers.max_pooling3d'])
@tf_export(v1=['layers.max_pooling3d'])
def max_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
warnings.warn('`tf.layers.max_pooling3d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.MaxPooling3D` instead.')
layer = MaxPooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
max_pool2d = max_pooling2d
avg_pool2d = average_pooling2d
| true | true |
1c34a9d14522f5765d93c55935a02b61f39cd5a8 | 4,949 | py | Python | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_02_01/resource_management_client.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_02_01/resource_management_client.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_02_01/resource_management_client.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.deployments_operations import DeploymentsOperations
from .operations.providers_operations import ProvidersOperations
from .operations.resources_operations import ResourcesOperations
from .operations.resource_groups_operations import ResourceGroupsOperations
from .operations.tags_operations import TagsOperations
from .operations.deployment_operations import DeploymentOperations
from . import models
class ResourceManagementClientConfiguration(AzureConfiguration):
"""Configuration for ResourceManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ResourceManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-resource/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ResourceManagementClient(SDKClient):
"""Provides operations for working with resources and resource groups.
:ivar config: Configuration for client.
:vartype config: ResourceManagementClientConfiguration
:ivar deployments: Deployments operations
:vartype deployments: azure.mgmt.resource.resources.v2018_02_01.operations.DeploymentsOperations
:ivar providers: Providers operations
:vartype providers: azure.mgmt.resource.resources.v2018_02_01.operations.ProvidersOperations
:ivar resources: Resources operations
:vartype resources: azure.mgmt.resource.resources.v2018_02_01.operations.ResourcesOperations
:ivar resource_groups: ResourceGroups operations
:vartype resource_groups: azure.mgmt.resource.resources.v2018_02_01.operations.ResourceGroupsOperations
:ivar tags: Tags operations
:vartype tags: azure.mgmt.resource.resources.v2018_02_01.operations.TagsOperations
:ivar deployment_operations: DeploymentOperations operations
:vartype deployment_operations: azure.mgmt.resource.resources.v2018_02_01.operations.DeploymentOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ResourceManagementClientConfiguration(credentials, subscription_id, base_url)
super(ResourceManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2018-02-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.deployments = DeploymentsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.tags = TagsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperations(
self._client, self.config, self._serialize, self._deserialize)
| 46.252336 | 109 | 0.736108 |
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.deployments_operations import DeploymentsOperations
from .operations.providers_operations import ProvidersOperations
from .operations.resources_operations import ResourcesOperations
from .operations.resource_groups_operations import ResourceGroupsOperations
from .operations.tags_operations import TagsOperations
from .operations.deployment_operations import DeploymentOperations
from . import models
class ResourceManagementClientConfiguration(AzureConfiguration):
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ResourceManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-resource/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ResourceManagementClient(SDKClient):
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ResourceManagementClientConfiguration(credentials, subscription_id, base_url)
super(ResourceManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2018-02-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.deployments = DeploymentsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.tags = TagsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperations(
self._client, self.config, self._serialize, self._deserialize)
| true | true |
1c34aa2b27f5bb6516cbd5dc6fc230dfc8b8ad9b | 260 | py | Python | lang/py/rfc/20/multiprocessing/connection_client_20_3_6.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/rfc/20/multiprocessing/connection_client_20_3_6.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/rfc/20/multiprocessing/connection_client_20_3_6.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:UTF-8
from multiprocessing.connection import Client
conn = Client(('45.77.93.132', 15000), authkey=b'12345')
conn.send((3, 5))
r = conn.recv()
print(r)
conn.send(("hello", "world"))
r = conn.recv()
print(r)
conn.close()
| 16.25 | 56 | 0.657692 |
from multiprocessing.connection import Client
conn = Client(('45.77.93.132', 15000), authkey=b'12345')
conn.send((3, 5))
r = conn.recv()
print(r)
conn.send(("hello", "world"))
r = conn.recv()
print(r)
conn.close()
| true | true |
1c34aa6b9a93347f198a6efead0a8d1dfecf2e32 | 2,671 | py | Python | jarviscli/plugins/quote.py | qwireq/Jarvis | 7d8aafd7e7c94ecc0eab2a09fa6484ae599606b8 | [
"MIT"
] | 1 | 2019-10-03T13:39:19.000Z | 2019-10-03T13:39:19.000Z | jarviscli/plugins/quote.py | qwireq/Jarvis | 7d8aafd7e7c94ecc0eab2a09fa6484ae599606b8 | [
"MIT"
] | null | null | null | jarviscli/plugins/quote.py | qwireq/Jarvis | 7d8aafd7e7c94ecc0eab2a09fa6484ae599606b8 | [
"MIT"
] | null | null | null | import requests
import bs4
from six.moves import input
import json
from plugin import plugin, require
@require(network=True)
@plugin('quote')
class Quote():
"""
quote prints quote for the day for you or quotes based on a given keyword
"""
def __call__(self, jarvis, s):
prompt = 'Press 1 to get the quote of the day \n or 2 to get quotes based on a keyword: '
user_input = self.get_input(prompt, jarvis)
if user_input == 1:
self.get_quote_of_the_day(jarvis)
else:
text = 'Enter the keyword based on which you want to see quotes: '
keyword = input(text)
self.get_keyword_quotes(jarvis, keyword)
def get_quote_of_the_day(self, jarvis):
res = requests.get('https://www.brainyquote.com/quotes_of_the_day.html')
soup = bs4.BeautifulSoup(res.text, 'lxml')
quote = soup.find('img', {'class': 'p-qotd'})
jarvis.say(quote['alt'])
def get_keyword_quotes(self, jarvis, keyword):
"""
shows quotes based on a keyword given by the user
"""
res = requests.get('https://talaikis.com/api/quotes')
quotes = json.loads(res.text)
flag = False
line = 1
for quote in quotes:
self.contains_word(quote['quote'], keyword)
if self.contains_word(quote['quote'], keyword):
jarvis.say(str(line) + '. ' + quote['quote'])
line = line + 1
flag = True # there is at least one quote
if not flag:
jarvis.say('No quotes inlcude this word. PLease try one more time.\n')
self.try_again(keyword, jarvis)
else:
jarvis.say('')
self.try_again(keyword, jarvis)
def try_again(self, keyword, jarvis):
again = input('Enter -again- to get more quotes or -exit- to leave: ')
if again.lower() == "again":
self.get_keyword_quotes(jarvis, keyword)
def contains_word(self, s, keyword):
return (' ' + keyword.lower()) in s or (keyword.capitalize()) in s
def get_input(self, prompt, jarvis):
"""
checks if the input the user gave is valid(either 1 or 2)
"""
while True:
try:
response = int(input(prompt))
jarvis.say('')
except ValueError:
jarvis.say("\nSorry, I didn't understand that.")
continue
if (response != 1) and (response != 2):
jarvis.say("\nSorry, your response is not valid.")
continue
else:
break
return response
| 31.797619 | 97 | 0.565331 | import requests
import bs4
from six.moves import input
import json
from plugin import plugin, require
@require(network=True)
@plugin('quote')
class Quote():
def __call__(self, jarvis, s):
prompt = 'Press 1 to get the quote of the day \n or 2 to get quotes based on a keyword: '
user_input = self.get_input(prompt, jarvis)
if user_input == 1:
self.get_quote_of_the_day(jarvis)
else:
text = 'Enter the keyword based on which you want to see quotes: '
keyword = input(text)
self.get_keyword_quotes(jarvis, keyword)
def get_quote_of_the_day(self, jarvis):
res = requests.get('https://www.brainyquote.com/quotes_of_the_day.html')
soup = bs4.BeautifulSoup(res.text, 'lxml')
quote = soup.find('img', {'class': 'p-qotd'})
jarvis.say(quote['alt'])
def get_keyword_quotes(self, jarvis, keyword):
res = requests.get('https://talaikis.com/api/quotes')
quotes = json.loads(res.text)
flag = False
line = 1
for quote in quotes:
self.contains_word(quote['quote'], keyword)
if self.contains_word(quote['quote'], keyword):
jarvis.say(str(line) + '. ' + quote['quote'])
line = line + 1
flag = True
if not flag:
jarvis.say('No quotes inlcude this word. PLease try one more time.\n')
self.try_again(keyword, jarvis)
else:
jarvis.say('')
self.try_again(keyword, jarvis)
def try_again(self, keyword, jarvis):
again = input('Enter -again- to get more quotes or -exit- to leave: ')
if again.lower() == "again":
self.get_keyword_quotes(jarvis, keyword)
def contains_word(self, s, keyword):
return (' ' + keyword.lower()) in s or (keyword.capitalize()) in s
def get_input(self, prompt, jarvis):
while True:
try:
response = int(input(prompt))
jarvis.say('')
except ValueError:
jarvis.say("\nSorry, I didn't understand that.")
continue
if (response != 1) and (response != 2):
jarvis.say("\nSorry, your response is not valid.")
continue
else:
break
return response
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.