hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70b82a64651b669501101e2383b4a201ac4b9ba
| 5,305
|
py
|
Python
|
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
'''
- Basically this is to ensure
all the facilities related to HTTP range headers are working properly;
'''
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://192.168.1.115/docker/unittest/empty_file.bin'
# We're just testing some basic util functions,
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
'''测试:下载完整文件到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
'''测试:下载第一个字节到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
'''测试:从头下载一部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
'''测试:从中间开始,下载文件后半部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
'''测试:从中间开始,下载一部分到流'''
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
'''测试:完整读取文件内容'''
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
'''测试:读取文件第一个字节'''
self.assertEqual(
1,
len(self.client.content.get_data(url=self.download_url, size=1)),
'.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
'''测试:从头读取文件的一部分内容'''
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)), # noqa E501
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
'''测试:从中间开始,读取文件后半部分内容'''
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
'''测试:从中间开始,读取文件一部分的内容'''
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start), # noqa E501
)
def test_31_download_to_file(self):
'''测试:完整下载文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
'''测试:下载空文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
| 32.746914
| 106
| 0.590575
|
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://192.168.1.115/docker/unittest/empty_file.bin'
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
self.assertEqual(
1,
len(self.client.content.get_data(url=self.download_url, size=1)),
'.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)),
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start),
)
def test_31_download_to_file(self):
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
| true
| true
|
f70b82c0df0d88c5e8c371dcea1b15a28a5a37fd
| 321
|
py
|
Python
|
answers/Anuraj Pariya/Day 4/question 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 22
|
2021-03-16T14:07:47.000Z
|
2021-08-13T08:52:50.000Z
|
answers/Anuraj Pariya/Day 4/question 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 174
|
2021-03-16T21:16:40.000Z
|
2021-06-12T05:19:51.000Z
|
answers/Anuraj Pariya/Day 4/question 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 135
|
2021-03-16T16:47:12.000Z
|
2021-06-27T14:22:38.000Z
|
n=int(input('enter no.'))
factors = []
while n % 2 == 0:
factors.append(2)
n//=2
divisor=3
while n!=1 and divisor <=n:
if n% divisor == 0:
factors.append(divisor)
n//=divisor
else:
divisor+=2
print('prime factors is')
for i in range (len(factors)):
print(factors[i], end=" ")
| 18.882353
| 31
| 0.560748
|
n=int(input('enter no.'))
factors = []
while n % 2 == 0:
factors.append(2)
n//=2
divisor=3
while n!=1 and divisor <=n:
if n% divisor == 0:
factors.append(divisor)
n//=divisor
else:
divisor+=2
print('prime factors is')
for i in range (len(factors)):
print(factors[i], end=" ")
| true
| true
|
f70b82ff4fbb8ab82c3cc5110fdd6e662a84733a
| 9,806
|
py
|
Python
|
salt/cli/caller.py
|
martin-helmich/salt
|
eed588f65b6c7e3b1fbd73bf618eba1d85b7cdb7
|
[
"Apache-2.0"
] | null | null | null |
salt/cli/caller.py
|
martin-helmich/salt
|
eed588f65b6c7e3b1fbd73bf618eba1d85b7cdb7
|
[
"Apache-2.0"
] | null | null | null |
salt/cli/caller.py
|
martin-helmich/salt
|
eed588f65b6c7e3b1fbd73bf618eba1d85b7cdb7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The caller module is used as a front-end to manage direct calls to the salt
minion modules.
'''
# Import python libs
from __future__ import print_function
import os
import sys
import logging
import datetime
import traceback
# Import salt libs
import salt.exitcodes
import salt.loader
import salt.minion
import salt.output
import salt.payload
import salt.transport
import salt.utils.args
from salt._compat import string_types
from salt.log import LOG_LEVELS
from salt.utils import print_cli
log = logging.getLogger(__name__)
try:
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
except ImportError:
# Don't die on missing transport libs since only one transport is required
pass
# Custom exceptions
from salt.exceptions import (
SaltClientError,
CommandNotFoundError,
CommandExecutionError,
SaltInvocationError,
)
class Caller(object):
'''
Factory class to create salt-call callers for different transport
'''
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
# determine the ttype
if 'transport' in opts:
ttype = opts['transport']
elif 'transport' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport']
# switch on available ttypes
if ttype == 'zeromq':
return ZeroMQCaller(opts, **kwargs)
elif ttype == 'raet':
return RAETCaller(opts, **kwargs)
else:
raise Exception('Callers are only defined for ZeroMQ and raet')
# return NewKindOfCaller(opts, **kwargs)
class ZeroMQCaller(object):
'''
Object to wrap the calling of local salt modules for the salt-call command
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
self.opts = opts
self.opts['caller'] = True
self.serial = salt.payload.Serial(self.opts)
# Handle this here so other deeper code which might
# be imported as part of the salt api doesn't do a
# nasty sys.exit() and tick off our developer users
try:
self.minion = salt.minion.SMinion(opts)
except SaltClientError as exc:
raise SystemExit(str(exc))
def call(self):
'''
Call the module
'''
# raet channel here
ret = {}
fun = self.opts['fun']
ret['jid'] = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
sys.stderr.write('Function {0} is not available\n'.format(fun))
sys.exit(-1)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(self.opts['arg']),
data=sdata)
try:
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
# Don't require msgpack with local
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
try:
ret['return'] = func(*args, **kwargs)
except TypeError as exc:
trace = traceback.format_exc()
sys.stderr.write('Passed invalid arguments: {0}\n'.format(exc))
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(trace)
sys.exit(salt.exitcodes.EX_GENERIC)
try:
ret['retcode'] = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
ret['retcode'] = 1
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
for returner in returners:
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
# close raet channel here
return ret
def return_pub(self, ret):
'''
Return the data up to the master
'''
channel = salt.transport.Channel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
channel.send(load)
def print_docs(self):
'''
Pick up the documentation for all of the modules and print it out.
'''
docs = {}
for name, func in self.minion.functions.items():
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
print_cli('{0}:\n{1}\n'.format(name, docs[name]))
def print_grains(self):
'''
Print out the grains
'''
grains = salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
out = ret.get('out', 'nested')
if self.opts['metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out,
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
class RAETCaller(ZeroMQCaller):
'''
Object to wrap the calling of local salt modules for the salt-call command
when transport is raet
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
self.stack = self._setup_caller_stack(opts)
salt.transport.jobber_stack = self.stack
super(RAETCaller, self).__init__(opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
self.stack.server.close()
salt.transport.jobber_stack = None
if self.opts['metadata']:
print_ret = ret
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
ret.get('out', 'nested'),
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
def _setup_caller_stack(self, opts):
'''
Setup and return the LaneStack and Yard used by by channel when global
not already setup such as in salt-call to communicate to-from the minion
'''
mid = opts['id']
sockdirpath = opts['sock_dir']
uid = nacling.uuid(size=18)
name = 'caller' + uid
stack = LaneStack(name=name,
lanename=mid,
sockdirpath=sockdirpath)
stack.Pk = raeting.packKinds.pack
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=mid,
dirpath=sockdirpath))
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
return stack
| 32.795987
| 80
| 0.538344
|
from __future__ import print_function
import os
import sys
import logging
import datetime
import traceback
import salt.exitcodes
import salt.loader
import salt.minion
import salt.output
import salt.payload
import salt.transport
import salt.utils.args
from salt._compat import string_types
from salt.log import LOG_LEVELS
from salt.utils import print_cli
log = logging.getLogger(__name__)
try:
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
except ImportError:
pass
# Custom exceptions
from salt.exceptions import (
SaltClientError,
CommandNotFoundError,
CommandExecutionError,
SaltInvocationError,
)
class Caller(object):
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
# determine the ttype
if 'transport' in opts:
ttype = opts['transport']
elif 'transport' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport']
# switch on available ttypes
if ttype == 'zeromq':
return ZeroMQCaller(opts, **kwargs)
elif ttype == 'raet':
return RAETCaller(opts, **kwargs)
else:
raise Exception('Callers are only defined for ZeroMQ and raet')
# return NewKindOfCaller(opts, **kwargs)
class ZeroMQCaller(object):
def __init__(self, opts):
self.opts = opts
self.opts['caller'] = True
self.serial = salt.payload.Serial(self.opts)
# Handle this here so other deeper code which might
# be imported as part of the salt api doesn't do a
try:
self.minion = salt.minion.SMinion(opts)
except SaltClientError as exc:
raise SystemExit(str(exc))
def call(self):
ret = {}
fun = self.opts['fun']
ret['jid'] = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
sys.stderr.write('Function {0} is not available\n'.format(fun))
sys.exit(-1)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(self.opts['arg']),
data=sdata)
try:
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
try:
ret['return'] = func(*args, **kwargs)
except TypeError as exc:
trace = traceback.format_exc()
sys.stderr.write('Passed invalid arguments: {0}\n'.format(exc))
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(trace)
sys.exit(salt.exitcodes.EX_GENERIC)
try:
ret['retcode'] = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
ret['retcode'] = 1
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
for returner in returners:
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
return ret
def return_pub(self, ret):
channel = salt.transport.Channel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
channel.send(load)
def print_docs(self):
docs = {}
for name, func in self.minion.functions.items():
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
print_cli('{0}:\n{1}\n'.format(name, docs[name]))
def print_grains(self):
grains = salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts)
def run(self):
try:
ret = self.call()
out = ret.get('out', 'nested')
if self.opts['metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out,
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
class RAETCaller(ZeroMQCaller):
def __init__(self, opts):
self.stack = self._setup_caller_stack(opts)
salt.transport.jobber_stack = self.stack
super(RAETCaller, self).__init__(opts)
def run(self):
try:
ret = self.call()
self.stack.server.close()
salt.transport.jobber_stack = None
if self.opts['metadata']:
print_ret = ret
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
ret.get('out', 'nested'),
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
def _setup_caller_stack(self, opts):
mid = opts['id']
sockdirpath = opts['sock_dir']
uid = nacling.uuid(size=18)
name = 'caller' + uid
stack = LaneStack(name=name,
lanename=mid,
sockdirpath=sockdirpath)
stack.Pk = raeting.packKinds.pack
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=mid,
dirpath=sockdirpath))
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
return stack
| true
| true
|
f70b831d2289ee6bccaec8a8ac8e8f483a6803be
| 17,388
|
py
|
Python
|
PyFlow/UI/Widgets/PropertiesFramework.py
|
Kochera/PyFlow
|
0f59c7127be696c514da276c003d2444cd3a1f9c
|
[
"Apache-2.0"
] | null | null | null |
PyFlow/UI/Widgets/PropertiesFramework.py
|
Kochera/PyFlow
|
0f59c7127be696c514da276c003d2444cd3a1f9c
|
[
"Apache-2.0"
] | null | null | null |
PyFlow/UI/Widgets/PropertiesFramework.py
|
Kochera/PyFlow
|
0f59c7127be696c514da276c003d2444cd3a1f9c
|
[
"Apache-2.0"
] | 1
|
2020-06-14T19:50:12.000Z
|
2020-06-14T19:50:12.000Z
|
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PyFlow.UI.Canvas.UICommon import clearLayout
from PyFlow.UI.Widgets.EditPropertiesWidget import EditPropertiesTreeWidget
from PyFlow.UI.Widgets.EditSecurityRatingWidget import EditSecurityRatingTreeWidget
from Qt import QtWidgets
from Qt import QtCore, QtGui
# Framework
class HeadButton(QtWidgets.QPushButton):
"""docstring for HeadButton."""
def __init__(self, parent=None, maxHeight=25):
super(HeadButton, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self.setDefault(True)
self.setMaximumHeight(maxHeight)
class CollapsibleWidget(QtWidgets.QWidget):
"""Has content widget and button on top to hide or show content"""
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False):
super(CollapsibleWidget, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self.setupUi()
self.connectUi()
self.setButtonName(headName)
if noSpacer:
self.removeSpacer()
self.setCollapsed(collapsed)
def filterContent(self, pattern):
pass
def title(self):
return self.pbHead.text()
def setReadOnly(self, bReadOnly=True):
self.ContentWidget.setEnabled(not bReadOnly)
def connectUi(self):
self.pbHead.clicked.connect(self.toggleCollapsed)
def setupUi(self):
self.resize(400, 300)
self.mainVLayout = QtWidgets.QVBoxLayout(self)
self.mainVLayout.setSpacing(2)
self.mainVLayout.setContentsMargins(2, 2, 2, 2)
self.mainVLayout.setObjectName("mainVLayout")
self.mainVLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.pbHead = HeadButton(self)
self.mainVLayout.addWidget(self.pbHead)
self.setMinimumHeight(30)
self.ContentWidget = QtWidgets.QWidget(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ContentWidget.sizePolicy().hasHeightForWidth())
self.ContentWidget.setSizePolicy(sizePolicy)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred))
self.ContentWidget.setObjectName("ContentWidget")
self.ContentWidget.setContentsMargins(10, 0, 0, 0)
self.mainVLayout.addWidget(self.ContentWidget)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainVLayout.addItem(self.spacerItem)
self.setWindowTitle(self.objectName())
self.pbHead.setStyleSheet(self.pbHead.styleSheet() + "\nText-align:left;")
self.contentHiddenIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarUnshadeButton)
self.contentVisibleIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarShadeButton)
self.updateIcon()
def addWidget(self, widget):
self.mainVLayout.addWidget(widget)
def removeSpacer(self):
if self.spacerItem is not None:
self.mainVLayout.removeItem(self.spacerItem)
del self.spacerItem
self.spacerItem = None
def setContentHiddenIcon(self, icon):
self.contentHiddenIcon = icon
def setContentVisibleIcon(self, icon):
self.contentVisibleIcon = icon
def toggleCollapsed(self):
if self.ContentWidget.isVisible():
self.setCollapsed(True)
else:
self.setCollapsed(False)
def setButtonName(self, name):
self.pbHead.setText(name)
def isCollapsed(self):
return self.ContentWidget.isHidden()
def updateIcon(self):
if self.isCollapsed():
self.pbHead.setIcon(self.contentHiddenIcon)
else:
self.pbHead.setIcon(self.contentVisibleIcon)
def setCollapsed(self, bCollapsed=False):
self.ContentWidget.setVisible(not bCollapsed)
self.updateIcon()
class PropertyEntry(QtWidgets.QWidget):
"""docstring for PropertyEntry."""
def __init__(self, label, widget, parent=None, hideLabel=False, maxLabelWidth=None, toolTip=""):
super(PropertyEntry, self).__init__(parent)
self.label = label
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.setContentsMargins(1, 1, 1, 1)
if not hideLabel:
label = QtWidgets.QLabel(label + ":")
label.setStyleSheet("font: bold")
label.setToolTip(toolTip)
if not maxLabelWidth:
label.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred))
else:
label.setMaximumWidth(maxLabelWidth)
self.layout.addWidget(label)
self.layout.addWidget(widget)
self.index = -1
def getLabel(self):
return self.label
class CollapsibleFormWidget(CollapsibleWidget):
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False, hideLabels=False):
super(CollapsibleFormWidget, self).__init__(parent, headName=headName, noSpacer=noSpacer, collapsed=collapsed)
self.hideLabels = hideLabels
self.Layout = QtWidgets.QVBoxLayout(self.ContentWidget)
self.Layout.setObjectName("CollapseWidgetFormLayout")
self.Layout.setSpacing(2)
self.Layout.setContentsMargins(0, 0, 0, 5)
self.propertyNames = {}
self.entryNames = {}
self.updateIcon()
self.groups = {}
def setSpacing(self, spacing=2):
self.Layout.setSpacing(spacing)
def isAllWidgetsHidden(self):
count = self.Layout.count()
hidden = 0
for i in range(count):
widget = self.Layout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def filterContent(self, pattern):
count = self.Layout.count()
for key, value in self.entryNames.items():
if isinstance(value, PropertyEntry):
value.setVisible(pattern.lower() in value.getLabel().lower())
for key, value in self.groups.items():
if isinstance(value, CollapSibleGoupBox):
if value.isAllWidgetsHidden():
value.hide()
else:
value.show()
value.setCollapsed(False)
def insertWidget(self, index=0, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth)
self.propertyNames[label] = widget
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.insertWidget(index, entry)
else:
groupW.insertWidget(index, entry)
self.Layout.addWidget(groupW)
return True
def addWidget(self, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
self.propertyNames[label] = widget
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth, toolTip=widget.toolTip())
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.addWidget(entry)
else:
groupW.addWidget(entry)
self.Layout.addWidget(groupW)
return True
def getWidgetByName(self, name):
if name in self.propertyNames:
return self.propertyNames[name]
else:
return None
class CollapSibleGoupBox(QtWidgets.QWidget):
def __init__(self,name):
super(CollapSibleGoupBox, self).__init__()
# widgets
self.controlGroup = QtWidgets.QGroupBox()
self.controlGroup.setTitle(name)
self.controlGroup.setCheckable(True)
self.controlGroup.setChecked(True)
# groupbox layout
self.groupLayout = QtWidgets.QVBoxLayout(self.controlGroup)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
# signals
self.controlGroup.toggled.connect(
lambda: self.toggleCollapsed())
# layout
self.mainLayout = QtWidgets.QGridLayout(self)
self.mainLayout.addWidget(self.controlGroup)
def isAllWidgetsHidden(self):
count = self.groupLayout.count()
hidden = 0
for i in range(count):
widget = self.groupLayout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def insertWidget(self,index,widget):
self.groupLayout.insertWidget(index,widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def addWidget(self,widget):
self.groupLayout.addWidget(widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def toggleCollapsed(self):
state = self.controlGroup.isChecked()
if state:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
def setCollapsed(self, bCollapsed=False):
self.controlGroup.setChecked(not bCollapsed)
if not bCollapsed:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
class PropertiesWidget(QtWidgets.QWidget):
"""docstring for PropertiesWidget."""
spawnDuplicate = QtCore.Signal()
def __init__(self, parent=None, searchByHeaders=False):
super(PropertiesWidget, self).__init__(parent)
self.setWindowTitle("Properties view")
self.mainLayout = QtWidgets.QVBoxLayout(self)
self.mainLayout.setObjectName("propertiesMainLayout")
self.mainLayout.setContentsMargins(2, 2, 2, 2)
self.searchBox = QtWidgets.QLineEdit(self)
self.searchBox.setObjectName("lineEdit")
self.searchBox.setPlaceholderText(str("search..."))
self.searchBox.textChanged.connect(self.filterByHeaders if searchByHeaders else self.filterByHeadersAndFields)
self.searchBoxWidget = QtWidgets.QWidget()
self.searchBoxLayout = QtWidgets.QHBoxLayout(self.searchBoxWidget)
self.searchBoxLayout.setContentsMargins(1, 1, 1, 1)
self.searchBoxLayout.addWidget(self.searchBox)
# self.settingsButton = QtWidgets.QToolButton()
# self.settingsButton.setIcon(QtGui.QIcon(":/settings.png"))
# self.settingsMenu = QtWidgets.QMenu()
# self.editPropertiesAction = QtWidgets.QAction("Edit Parameter Interface", None)
# self.settingsMenu.addAction(self.editPropertiesAction)
# self.settingsButton.setMenu(self.settingsMenu)
# self.editPropertiesAction.triggered.connect(self.showPropertyEditor)
#self.settingsButton.clicked.connect(self.spawnDuplicate.emit)
# self.settingsButton.setPopupMode(QtWidgets.QToolButton.InstantPopup)
self.lockCheckBox = QtWidgets.QToolButton()
self.lockCheckBox.setCheckable(True)
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
self.lockCheckBox.toggled.connect(self.changeLockIcon)
self.searchBoxLayout.addWidget(self.lockCheckBox)
self.tearOffCopy = QtWidgets.QToolButton()
self.tearOffCopy.setIcon(QtGui.QIcon(":/tear_off_copy_bw.png"))
self.tearOffCopy.clicked.connect(self.spawnDuplicate.emit)
self.searchBoxLayout.addWidget(self.tearOffCopy)
self.mainLayout.addWidget(self.searchBoxWidget)
self.searchBoxWidget.hide()
self.contentLayout = QtWidgets.QVBoxLayout()
self.contentLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.mainLayout.addLayout(self.contentLayout)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainLayout.addItem(self.spacerItem)
self.mainLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
def changeLockIcon(self,checked):
if checked:
self.lockCheckBox.setIcon(QtGui.QIcon(':/locked.png'))
else:
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
def setLockCheckBoxVisible(self, bVisible):
self.lockCheckBox.setVisible(bVisible)
def setTearOffCopyVisible(self, bVisible):
self.tearOffCopy.setVisible(bVisible)
def setSearchBoxVisible(self, bVisible):
self.searchBox.setVisible(bVisible)
def filterByHeaders(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if text.lower() in w.title().lower():
w.show()
else:
w.hide()
def filterByHeadersAndFields(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
w.filterContent(text)
if w.isAllWidgetsHidden():
w.hide()
else:
w.show()
w.setCollapsed(False)
def isLocked(self):
return self.lockCheckBox.isChecked() == True
def clear(self):
if not self.isLocked():
clearLayout(self.contentLayout)
self.searchBoxWidget.hide()
self.lockCheckBox.setChecked(False)
def insertWidget(self, collapsibleWidget,index):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(index, collapsibleWidget)
return True
def addWidget(self, collapsibleWidget):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(-1, collapsibleWidget)
return True
def showPropertyEditor(self):
tree = EditPropertiesTreeWidget()
count = self.contentLayout.count()
folders = {}
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if w.title() in ["Inputs"]:
for key,group in w.groups.items():
if key not in folders:
folders[key] = {}
#for e in range(group.groupLayout.count()):
# w = group.groupLayout.itemAt(e).widget()
# folders[key][w.getLabel()] = group.groupLayout.itemAt(e).widget()
for fold in folders:
folder = tree.addFolder(fold)
#for widg in folders[fold]:
# child = tree.addNormal(widg,folder)
d = QtWidgets.QDialog()
d.setLayout(QtWidgets.QHBoxLayout())
d.layout().addWidget(tree)
d.exec_()
newOrder = tree.model_to_dict()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
s = QtWidgets.QScrollArea()
pw = PropertiesWidget()
rootWidget = CollapsibleFormWidget(headName="Settings", noSpacer=True)
rootWidget.addWidget("test", QtWidgets.QPushButton("ss"))
rootWidget.addWidget("foo", QtWidgets.QPushButton(""))
rootWidget.addWidget("bar", QtWidgets.QPushButton(""))
rootWidget2 = CollapsibleFormWidget(headName="Test", noSpacer=True)
rootWidget2.addWidget("test2", QtWidgets.QPushButton("aa"))
pw.addWidget(rootWidget)
pw.addWidget(rootWidget2)
s.setWidget(pw)
s.show()
pw.clear()
sys.exit(app.exec_())
| 38.8125
| 131
| 0.654762
|
apsibleWidget(QtWidgets.QWidget):
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False):
super(CollapsibleWidget, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self.setupUi()
self.connectUi()
self.setButtonName(headName)
if noSpacer:
self.removeSpacer()
self.setCollapsed(collapsed)
def filterContent(self, pattern):
pass
def title(self):
return self.pbHead.text()
def setReadOnly(self, bReadOnly=True):
self.ContentWidget.setEnabled(not bReadOnly)
def connectUi(self):
self.pbHead.clicked.connect(self.toggleCollapsed)
def setupUi(self):
self.resize(400, 300)
self.mainVLayout = QtWidgets.QVBoxLayout(self)
self.mainVLayout.setSpacing(2)
self.mainVLayout.setContentsMargins(2, 2, 2, 2)
self.mainVLayout.setObjectName("mainVLayout")
self.mainVLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.pbHead = HeadButton(self)
self.mainVLayout.addWidget(self.pbHead)
self.setMinimumHeight(30)
self.ContentWidget = QtWidgets.QWidget(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ContentWidget.sizePolicy().hasHeightForWidth())
self.ContentWidget.setSizePolicy(sizePolicy)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred))
self.ContentWidget.setObjectName("ContentWidget")
self.ContentWidget.setContentsMargins(10, 0, 0, 0)
self.mainVLayout.addWidget(self.ContentWidget)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainVLayout.addItem(self.spacerItem)
self.setWindowTitle(self.objectName())
self.pbHead.setStyleSheet(self.pbHead.styleSheet() + "\nText-align:left;")
self.contentHiddenIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarUnshadeButton)
self.contentVisibleIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarShadeButton)
self.updateIcon()
def addWidget(self, widget):
self.mainVLayout.addWidget(widget)
def removeSpacer(self):
if self.spacerItem is not None:
self.mainVLayout.removeItem(self.spacerItem)
del self.spacerItem
self.spacerItem = None
def setContentHiddenIcon(self, icon):
self.contentHiddenIcon = icon
def setContentVisibleIcon(self, icon):
self.contentVisibleIcon = icon
def toggleCollapsed(self):
if self.ContentWidget.isVisible():
self.setCollapsed(True)
else:
self.setCollapsed(False)
def setButtonName(self, name):
self.pbHead.setText(name)
def isCollapsed(self):
return self.ContentWidget.isHidden()
def updateIcon(self):
if self.isCollapsed():
self.pbHead.setIcon(self.contentHiddenIcon)
else:
self.pbHead.setIcon(self.contentVisibleIcon)
def setCollapsed(self, bCollapsed=False):
self.ContentWidget.setVisible(not bCollapsed)
self.updateIcon()
class PropertyEntry(QtWidgets.QWidget):
def __init__(self, label, widget, parent=None, hideLabel=False, maxLabelWidth=None, toolTip=""):
super(PropertyEntry, self).__init__(parent)
self.label = label
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.setContentsMargins(1, 1, 1, 1)
if not hideLabel:
label = QtWidgets.QLabel(label + ":")
label.setStyleSheet("font: bold")
label.setToolTip(toolTip)
if not maxLabelWidth:
label.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred))
else:
label.setMaximumWidth(maxLabelWidth)
self.layout.addWidget(label)
self.layout.addWidget(widget)
self.index = -1
def getLabel(self):
return self.label
class CollapsibleFormWidget(CollapsibleWidget):
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False, hideLabels=False):
super(CollapsibleFormWidget, self).__init__(parent, headName=headName, noSpacer=noSpacer, collapsed=collapsed)
self.hideLabels = hideLabels
self.Layout = QtWidgets.QVBoxLayout(self.ContentWidget)
self.Layout.setObjectName("CollapseWidgetFormLayout")
self.Layout.setSpacing(2)
self.Layout.setContentsMargins(0, 0, 0, 5)
self.propertyNames = {}
self.entryNames = {}
self.updateIcon()
self.groups = {}
def setSpacing(self, spacing=2):
self.Layout.setSpacing(spacing)
def isAllWidgetsHidden(self):
count = self.Layout.count()
hidden = 0
for i in range(count):
widget = self.Layout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def filterContent(self, pattern):
count = self.Layout.count()
for key, value in self.entryNames.items():
if isinstance(value, PropertyEntry):
value.setVisible(pattern.lower() in value.getLabel().lower())
for key, value in self.groups.items():
if isinstance(value, CollapSibleGoupBox):
if value.isAllWidgetsHidden():
value.hide()
else:
value.show()
value.setCollapsed(False)
def insertWidget(self, index=0, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth)
self.propertyNames[label] = widget
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.insertWidget(index, entry)
else:
groupW.insertWidget(index, entry)
self.Layout.addWidget(groupW)
return True
def addWidget(self, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
self.propertyNames[label] = widget
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth, toolTip=widget.toolTip())
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.addWidget(entry)
else:
groupW.addWidget(entry)
self.Layout.addWidget(groupW)
return True
def getWidgetByName(self, name):
if name in self.propertyNames:
return self.propertyNames[name]
else:
return None
class CollapSibleGoupBox(QtWidgets.QWidget):
def __init__(self,name):
super(CollapSibleGoupBox, self).__init__()
self.controlGroup = QtWidgets.QGroupBox()
self.controlGroup.setTitle(name)
self.controlGroup.setCheckable(True)
self.controlGroup.setChecked(True)
self.groupLayout = QtWidgets.QVBoxLayout(self.controlGroup)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
self.controlGroup.toggled.connect(
lambda: self.toggleCollapsed())
self.mainLayout = QtWidgets.QGridLayout(self)
self.mainLayout.addWidget(self.controlGroup)
def isAllWidgetsHidden(self):
count = self.groupLayout.count()
hidden = 0
for i in range(count):
widget = self.groupLayout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def insertWidget(self,index,widget):
self.groupLayout.insertWidget(index,widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def addWidget(self,widget):
self.groupLayout.addWidget(widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def toggleCollapsed(self):
state = self.controlGroup.isChecked()
if state:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
def setCollapsed(self, bCollapsed=False):
self.controlGroup.setChecked(not bCollapsed)
if not bCollapsed:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
class PropertiesWidget(QtWidgets.QWidget):
spawnDuplicate = QtCore.Signal()
def __init__(self, parent=None, searchByHeaders=False):
super(PropertiesWidget, self).__init__(parent)
self.setWindowTitle("Properties view")
self.mainLayout = QtWidgets.QVBoxLayout(self)
self.mainLayout.setObjectName("propertiesMainLayout")
self.mainLayout.setContentsMargins(2, 2, 2, 2)
self.searchBox = QtWidgets.QLineEdit(self)
self.searchBox.setObjectName("lineEdit")
self.searchBox.setPlaceholderText(str("search..."))
self.searchBox.textChanged.connect(self.filterByHeaders if searchByHeaders else self.filterByHeadersAndFields)
self.searchBoxWidget = QtWidgets.QWidget()
self.searchBoxLayout = QtWidgets.QHBoxLayout(self.searchBoxWidget)
self.searchBoxLayout.setContentsMargins(1, 1, 1, 1)
self.searchBoxLayout.addWidget(self.searchBox)
self.lockCheckBox = QtWidgets.QToolButton()
self.lockCheckBox.setCheckable(True)
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
self.lockCheckBox.toggled.connect(self.changeLockIcon)
self.searchBoxLayout.addWidget(self.lockCheckBox)
self.tearOffCopy = QtWidgets.QToolButton()
self.tearOffCopy.setIcon(QtGui.QIcon(":/tear_off_copy_bw.png"))
self.tearOffCopy.clicked.connect(self.spawnDuplicate.emit)
self.searchBoxLayout.addWidget(self.tearOffCopy)
self.mainLayout.addWidget(self.searchBoxWidget)
self.searchBoxWidget.hide()
self.contentLayout = QtWidgets.QVBoxLayout()
self.contentLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.mainLayout.addLayout(self.contentLayout)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainLayout.addItem(self.spacerItem)
self.mainLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
def changeLockIcon(self,checked):
if checked:
self.lockCheckBox.setIcon(QtGui.QIcon(':/locked.png'))
else:
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
def setLockCheckBoxVisible(self, bVisible):
self.lockCheckBox.setVisible(bVisible)
def setTearOffCopyVisible(self, bVisible):
self.tearOffCopy.setVisible(bVisible)
def setSearchBoxVisible(self, bVisible):
self.searchBox.setVisible(bVisible)
def filterByHeaders(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if text.lower() in w.title().lower():
w.show()
else:
w.hide()
def filterByHeadersAndFields(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
w.filterContent(text)
if w.isAllWidgetsHidden():
w.hide()
else:
w.show()
w.setCollapsed(False)
def isLocked(self):
return self.lockCheckBox.isChecked() == True
def clear(self):
if not self.isLocked():
clearLayout(self.contentLayout)
self.searchBoxWidget.hide()
self.lockCheckBox.setChecked(False)
def insertWidget(self, collapsibleWidget,index):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(index, collapsibleWidget)
return True
def addWidget(self, collapsibleWidget):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(-1, collapsibleWidget)
return True
def showPropertyEditor(self):
tree = EditPropertiesTreeWidget()
count = self.contentLayout.count()
folders = {}
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if w.title() in ["Inputs"]:
for key,group in w.groups.items():
if key not in folders:
folders[key] = {}
for fold in folders:
folder = tree.addFolder(fold)
d = QtWidgets.QDialog()
d.setLayout(QtWidgets.QHBoxLayout())
d.layout().addWidget(tree)
d.exec_()
newOrder = tree.model_to_dict()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
s = QtWidgets.QScrollArea()
pw = PropertiesWidget()
rootWidget = CollapsibleFormWidget(headName="Settings", noSpacer=True)
rootWidget.addWidget("test", QtWidgets.QPushButton("ss"))
rootWidget.addWidget("foo", QtWidgets.QPushButton(""))
rootWidget.addWidget("bar", QtWidgets.QPushButton(""))
rootWidget2 = CollapsibleFormWidget(headName="Test", noSpacer=True)
rootWidget2.addWidget("test2", QtWidgets.QPushButton("aa"))
pw.addWidget(rootWidget)
pw.addWidget(rootWidget2)
s.setWidget(pw)
s.show()
pw.clear()
sys.exit(app.exec_())
| true
| true
|
f70b8423fc991d0b95cd0f26021b8c0e980bec5c
| 31,695
|
py
|
Python
|
docusign_esign/models/email_address.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/email_address.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/email_address.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | 1
|
2021-04-26T20:52:45.000Z
|
2021-04-26T20:52:45.000Z
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EmailAddress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, anchor_case_sensitive=None, anchor_horizontal_alignment=None, anchor_ignore_if_not_present=None, anchor_match_whole_word=None, anchor_string=None, anchor_units=None, anchor_x_offset=None, anchor_y_offset=None, bold=None, conditional_parent_label=None, conditional_parent_value=None, custom_tab_id=None, document_id=None, error_details=None, font=None, font_color=None, font_size=None, italic=None, merge_field=None, name=None, page_number=None, recipient_id=None, status=None, tab_group_labels=None, tab_id=None, tab_label=None, tab_order=None, template_locked=None, template_required=None, tooltip=None, underline=None, value=None, x_position=None, y_position=None):
"""
EmailAddress - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'anchor_case_sensitive': 'str',
'anchor_horizontal_alignment': 'str',
'anchor_ignore_if_not_present': 'str',
'anchor_match_whole_word': 'str',
'anchor_string': 'str',
'anchor_units': 'str',
'anchor_x_offset': 'str',
'anchor_y_offset': 'str',
'bold': 'str',
'conditional_parent_label': 'str',
'conditional_parent_value': 'str',
'custom_tab_id': 'str',
'document_id': 'str',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_size': 'str',
'italic': 'str',
'merge_field': 'MergeField',
'name': 'str',
'page_number': 'str',
'recipient_id': 'str',
'status': 'str',
'tab_group_labels': 'list[str]',
'tab_id': 'str',
'tab_label': 'str',
'tab_order': 'str',
'template_locked': 'str',
'template_required': 'str',
'tooltip': 'str',
'underline': 'str',
'value': 'str',
'x_position': 'str',
'y_position': 'str'
}
self.attribute_map = {
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_string': 'anchorString',
'anchor_units': 'anchorUnits',
'anchor_x_offset': 'anchorXOffset',
'anchor_y_offset': 'anchorYOffset',
'bold': 'bold',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_value': 'conditionalParentValue',
'custom_tab_id': 'customTabId',
'document_id': 'documentId',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_size': 'fontSize',
'italic': 'italic',
'merge_field': 'mergeField',
'name': 'name',
'page_number': 'pageNumber',
'recipient_id': 'recipientId',
'status': 'status',
'tab_group_labels': 'tabGroupLabels',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_order': 'tabOrder',
'template_locked': 'templateLocked',
'template_required': 'templateRequired',
'tooltip': 'tooltip',
'underline': 'underline',
'value': 'value',
'x_position': 'xPosition',
'y_position': 'yPosition'
}
self._anchor_case_sensitive = anchor_case_sensitive
self._anchor_horizontal_alignment = anchor_horizontal_alignment
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
self._anchor_match_whole_word = anchor_match_whole_word
self._anchor_string = anchor_string
self._anchor_units = anchor_units
self._anchor_x_offset = anchor_x_offset
self._anchor_y_offset = anchor_y_offset
self._bold = bold
self._conditional_parent_label = conditional_parent_label
self._conditional_parent_value = conditional_parent_value
self._custom_tab_id = custom_tab_id
self._document_id = document_id
self._error_details = error_details
self._font = font
self._font_color = font_color
self._font_size = font_size
self._italic = italic
self._merge_field = merge_field
self._name = name
self._page_number = page_number
self._recipient_id = recipient_id
self._status = status
self._tab_group_labels = tab_group_labels
self._tab_id = tab_id
self._tab_label = tab_label
self._tab_order = tab_order
self._template_locked = template_locked
self._template_required = template_required
self._tooltip = tooltip
self._underline = underline
self._value = value
self._x_position = x_position
self._y_position = y_position
@property
def anchor_case_sensitive(self):
"""
Gets the anchor_case_sensitive of this EmailAddress.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**.
:return: The anchor_case_sensitive of this EmailAddress.
:rtype: str
"""
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
"""
Sets the anchor_case_sensitive of this EmailAddress.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**.
:param anchor_case_sensitive: The anchor_case_sensitive of this EmailAddress.
:type: str
"""
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_horizontal_alignment(self):
"""
Gets the anchor_horizontal_alignment of this EmailAddress.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**.
:return: The anchor_horizontal_alignment of this EmailAddress.
:rtype: str
"""
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
"""
Sets the anchor_horizontal_alignment of this EmailAddress.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**.
:param anchor_horizontal_alignment: The anchor_horizontal_alignment of this EmailAddress.
:type: str
"""
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_ignore_if_not_present(self):
"""
Gets the anchor_ignore_if_not_present of this EmailAddress.
When set to **true**, this tab is ignored if anchorString is not found in the document.
:return: The anchor_ignore_if_not_present of this EmailAddress.
:rtype: str
"""
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
"""
Sets the anchor_ignore_if_not_present of this EmailAddress.
When set to **true**, this tab is ignored if anchorString is not found in the document.
:param anchor_ignore_if_not_present: The anchor_ignore_if_not_present of this EmailAddress.
:type: str
"""
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_match_whole_word(self):
"""
Gets the anchor_match_whole_word of this EmailAddress.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**.
:return: The anchor_match_whole_word of this EmailAddress.
:rtype: str
"""
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
"""
Sets the anchor_match_whole_word of this EmailAddress.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**.
:param anchor_match_whole_word: The anchor_match_whole_word of this EmailAddress.
:type: str
"""
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_string(self):
"""
Gets the anchor_string of this EmailAddress.
Anchor text information for a radio button.
:return: The anchor_string of this EmailAddress.
:rtype: str
"""
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
"""
Sets the anchor_string of this EmailAddress.
Anchor text information for a radio button.
:param anchor_string: The anchor_string of this EmailAddress.
:type: str
"""
self._anchor_string = anchor_string
@property
def anchor_units(self):
"""
Gets the anchor_units of this EmailAddress.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches.
:return: The anchor_units of this EmailAddress.
:rtype: str
"""
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
"""
Sets the anchor_units of this EmailAddress.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches.
:param anchor_units: The anchor_units of this EmailAddress.
:type: str
"""
self._anchor_units = anchor_units
@property
def anchor_x_offset(self):
"""
Gets the anchor_x_offset of this EmailAddress.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString.
:return: The anchor_x_offset of this EmailAddress.
:rtype: str
"""
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
"""
Sets the anchor_x_offset of this EmailAddress.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString.
:param anchor_x_offset: The anchor_x_offset of this EmailAddress.
:type: str
"""
self._anchor_x_offset = anchor_x_offset
@property
def anchor_y_offset(self):
"""
Gets the anchor_y_offset of this EmailAddress.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString.
:return: The anchor_y_offset of this EmailAddress.
:rtype: str
"""
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
"""
Sets the anchor_y_offset of this EmailAddress.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString.
:param anchor_y_offset: The anchor_y_offset of this EmailAddress.
:type: str
"""
self._anchor_y_offset = anchor_y_offset
@property
def bold(self):
"""
Gets the bold of this EmailAddress.
When set to **true**, the information in the tab is bold.
:return: The bold of this EmailAddress.
:rtype: str
"""
return self._bold
@bold.setter
def bold(self, bold):
"""
Sets the bold of this EmailAddress.
When set to **true**, the information in the tab is bold.
:param bold: The bold of this EmailAddress.
:type: str
"""
self._bold = bold
@property
def conditional_parent_label(self):
"""
Gets the conditional_parent_label of this EmailAddress.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility.
:return: The conditional_parent_label of this EmailAddress.
:rtype: str
"""
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
"""
Sets the conditional_parent_label of this EmailAddress.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility.
:param conditional_parent_label: The conditional_parent_label of this EmailAddress.
:type: str
"""
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_value(self):
"""
Gets the conditional_parent_value of this EmailAddress.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active.
:return: The conditional_parent_value of this EmailAddress.
:rtype: str
"""
return self._conditional_parent_value
@conditional_parent_value.setter
def conditional_parent_value(self, conditional_parent_value):
"""
Sets the conditional_parent_value of this EmailAddress.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active.
:param conditional_parent_value: The conditional_parent_value of this EmailAddress.
:type: str
"""
self._conditional_parent_value = conditional_parent_value
@property
def custom_tab_id(self):
"""
Gets the custom_tab_id of this EmailAddress.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties.
:return: The custom_tab_id of this EmailAddress.
:rtype: str
"""
return self._custom_tab_id
@custom_tab_id.setter
def custom_tab_id(self, custom_tab_id):
"""
Sets the custom_tab_id of this EmailAddress.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties.
:param custom_tab_id: The custom_tab_id of this EmailAddress.
:type: str
"""
self._custom_tab_id = custom_tab_id
@property
def document_id(self):
"""
Gets the document_id of this EmailAddress.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:return: The document_id of this EmailAddress.
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""
Sets the document_id of this EmailAddress.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:param document_id: The document_id of this EmailAddress.
:type: str
"""
self._document_id = document_id
@property
def error_details(self):
"""
Gets the error_details of this EmailAddress.
:return: The error_details of this EmailAddress.
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this EmailAddress.
:param error_details: The error_details of this EmailAddress.
:type: ErrorDetails
"""
self._error_details = error_details
@property
def font(self):
"""
Gets the font of this EmailAddress.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default.
:return: The font of this EmailAddress.
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""
Sets the font of this EmailAddress.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default.
:param font: The font of this EmailAddress.
:type: str
"""
self._font = font
@property
def font_color(self):
"""
Gets the font_color of this EmailAddress.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White.
:return: The font_color of this EmailAddress.
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""
Sets the font_color of this EmailAddress.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White.
:param font_color: The font_color of this EmailAddress.
:type: str
"""
self._font_color = font_color
@property
def font_size(self):
"""
Gets the font_size of this EmailAddress.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72.
:return: The font_size of this EmailAddress.
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""
Sets the font_size of this EmailAddress.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72.
:param font_size: The font_size of this EmailAddress.
:type: str
"""
self._font_size = font_size
@property
def italic(self):
"""
Gets the italic of this EmailAddress.
When set to **true**, the information in the tab is italic.
:return: The italic of this EmailAddress.
:rtype: str
"""
return self._italic
@italic.setter
def italic(self, italic):
"""
Sets the italic of this EmailAddress.
When set to **true**, the information in the tab is italic.
:param italic: The italic of this EmailAddress.
:type: str
"""
self._italic = italic
@property
def merge_field(self):
"""
Gets the merge_field of this EmailAddress.
:return: The merge_field of this EmailAddress.
:rtype: MergeField
"""
return self._merge_field
@merge_field.setter
def merge_field(self, merge_field):
"""
Sets the merge_field of this EmailAddress.
:param merge_field: The merge_field of this EmailAddress.
:type: MergeField
"""
self._merge_field = merge_field
@property
def name(self):
"""
Gets the name of this EmailAddress.
:return: The name of this EmailAddress.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this EmailAddress.
:param name: The name of this EmailAddress.
:type: str
"""
self._name = name
@property
def page_number(self):
"""
Gets the page_number of this EmailAddress.
Specifies the page number on which the tab is located.
:return: The page_number of this EmailAddress.
:rtype: str
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""
Sets the page_number of this EmailAddress.
Specifies the page number on which the tab is located.
:param page_number: The page_number of this EmailAddress.
:type: str
"""
self._page_number = page_number
@property
def recipient_id(self):
"""
Gets the recipient_id of this EmailAddress.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:return: The recipient_id of this EmailAddress.
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""
Sets the recipient_id of this EmailAddress.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:param recipient_id: The recipient_id of this EmailAddress.
:type: str
"""
self._recipient_id = recipient_id
@property
def status(self):
"""
Gets the status of this EmailAddress.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:return: The status of this EmailAddress.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this EmailAddress.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:param status: The status of this EmailAddress.
:type: str
"""
self._status = status
@property
def tab_group_labels(self):
"""
Gets the tab_group_labels of this EmailAddress.
:return: The tab_group_labels of this EmailAddress.
:rtype: list[str]
"""
return self._tab_group_labels
@tab_group_labels.setter
def tab_group_labels(self, tab_group_labels):
"""
Sets the tab_group_labels of this EmailAddress.
:param tab_group_labels: The tab_group_labels of this EmailAddress.
:type: list[str]
"""
self._tab_group_labels = tab_group_labels
@property
def tab_id(self):
"""
Gets the tab_id of this EmailAddress.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call].
:return: The tab_id of this EmailAddress.
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""
Sets the tab_id of this EmailAddress.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call].
:param tab_id: The tab_id of this EmailAddress.
:type: str
"""
self._tab_id = tab_id
@property
def tab_label(self):
"""
Gets the tab_label of this EmailAddress.
The label string associated with the tab.
:return: The tab_label of this EmailAddress.
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""
Sets the tab_label of this EmailAddress.
The label string associated with the tab.
:param tab_label: The tab_label of this EmailAddress.
:type: str
"""
self._tab_label = tab_label
@property
def tab_order(self):
"""
Gets the tab_order of this EmailAddress.
:return: The tab_order of this EmailAddress.
:rtype: str
"""
return self._tab_order
@tab_order.setter
def tab_order(self, tab_order):
"""
Sets the tab_order of this EmailAddress.
:param tab_order: The tab_order of this EmailAddress.
:type: str
"""
self._tab_order = tab_order
@property
def template_locked(self):
"""
Gets the template_locked of this EmailAddress.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients.
:return: The template_locked of this EmailAddress.
:rtype: str
"""
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
"""
Sets the template_locked of this EmailAddress.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients.
:param template_locked: The template_locked of this EmailAddress.
:type: str
"""
self._template_locked = template_locked
@property
def template_required(self):
"""
Gets the template_required of this EmailAddress.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients.
:return: The template_required of this EmailAddress.
:rtype: str
"""
return self._template_required
@template_required.setter
def template_required(self, template_required):
"""
Sets the template_required of this EmailAddress.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients.
:param template_required: The template_required of this EmailAddress.
:type: str
"""
self._template_required = template_required
@property
def tooltip(self):
"""
Gets the tooltip of this EmailAddress.
:return: The tooltip of this EmailAddress.
:rtype: str
"""
return self._tooltip
@tooltip.setter
def tooltip(self, tooltip):
"""
Sets the tooltip of this EmailAddress.
:param tooltip: The tooltip of this EmailAddress.
:type: str
"""
self._tooltip = tooltip
@property
def underline(self):
"""
Gets the underline of this EmailAddress.
When set to **true**, the information in the tab is underlined.
:return: The underline of this EmailAddress.
:rtype: str
"""
return self._underline
@underline.setter
def underline(self, underline):
"""
Sets the underline of this EmailAddress.
When set to **true**, the information in the tab is underlined.
:param underline: The underline of this EmailAddress.
:type: str
"""
self._underline = underline
@property
def value(self):
"""
Gets the value of this EmailAddress.
Specifies the value of the tab.
:return: The value of this EmailAddress.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this EmailAddress.
Specifies the value of the tab.
:param value: The value of this EmailAddress.
:type: str
"""
self._value = value
@property
def x_position(self):
"""
Gets the x_position of this EmailAddress.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position.
:return: The x_position of this EmailAddress.
:rtype: str
"""
return self._x_position
@x_position.setter
def x_position(self, x_position):
"""
Sets the x_position of this EmailAddress.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position.
:param x_position: The x_position of this EmailAddress.
:type: str
"""
self._x_position = x_position
@property
def y_position(self):
"""
Gets the y_position of this EmailAddress.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position.
:return: The y_position of this EmailAddress.
:rtype: str
"""
return self._y_position
@y_position.setter
def y_position(self, y_position):
"""
Sets the y_position of this EmailAddress.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position.
:param y_position: The y_position of this EmailAddress.
:type: str
"""
self._y_position = y_position
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.708978
| 690
| 0.629405
|
from pprint import pformat
from six import iteritems
import re
class EmailAddress(object):
def __init__(self, anchor_case_sensitive=None, anchor_horizontal_alignment=None, anchor_ignore_if_not_present=None, anchor_match_whole_word=None, anchor_string=None, anchor_units=None, anchor_x_offset=None, anchor_y_offset=None, bold=None, conditional_parent_label=None, conditional_parent_value=None, custom_tab_id=None, document_id=None, error_details=None, font=None, font_color=None, font_size=None, italic=None, merge_field=None, name=None, page_number=None, recipient_id=None, status=None, tab_group_labels=None, tab_id=None, tab_label=None, tab_order=None, template_locked=None, template_required=None, tooltip=None, underline=None, value=None, x_position=None, y_position=None):
self.swagger_types = {
'anchor_case_sensitive': 'str',
'anchor_horizontal_alignment': 'str',
'anchor_ignore_if_not_present': 'str',
'anchor_match_whole_word': 'str',
'anchor_string': 'str',
'anchor_units': 'str',
'anchor_x_offset': 'str',
'anchor_y_offset': 'str',
'bold': 'str',
'conditional_parent_label': 'str',
'conditional_parent_value': 'str',
'custom_tab_id': 'str',
'document_id': 'str',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_size': 'str',
'italic': 'str',
'merge_field': 'MergeField',
'name': 'str',
'page_number': 'str',
'recipient_id': 'str',
'status': 'str',
'tab_group_labels': 'list[str]',
'tab_id': 'str',
'tab_label': 'str',
'tab_order': 'str',
'template_locked': 'str',
'template_required': 'str',
'tooltip': 'str',
'underline': 'str',
'value': 'str',
'x_position': 'str',
'y_position': 'str'
}
self.attribute_map = {
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_string': 'anchorString',
'anchor_units': 'anchorUnits',
'anchor_x_offset': 'anchorXOffset',
'anchor_y_offset': 'anchorYOffset',
'bold': 'bold',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_value': 'conditionalParentValue',
'custom_tab_id': 'customTabId',
'document_id': 'documentId',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_size': 'fontSize',
'italic': 'italic',
'merge_field': 'mergeField',
'name': 'name',
'page_number': 'pageNumber',
'recipient_id': 'recipientId',
'status': 'status',
'tab_group_labels': 'tabGroupLabels',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_order': 'tabOrder',
'template_locked': 'templateLocked',
'template_required': 'templateRequired',
'tooltip': 'tooltip',
'underline': 'underline',
'value': 'value',
'x_position': 'xPosition',
'y_position': 'yPosition'
}
self._anchor_case_sensitive = anchor_case_sensitive
self._anchor_horizontal_alignment = anchor_horizontal_alignment
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
self._anchor_match_whole_word = anchor_match_whole_word
self._anchor_string = anchor_string
self._anchor_units = anchor_units
self._anchor_x_offset = anchor_x_offset
self._anchor_y_offset = anchor_y_offset
self._bold = bold
self._conditional_parent_label = conditional_parent_label
self._conditional_parent_value = conditional_parent_value
self._custom_tab_id = custom_tab_id
self._document_id = document_id
self._error_details = error_details
self._font = font
self._font_color = font_color
self._font_size = font_size
self._italic = italic
self._merge_field = merge_field
self._name = name
self._page_number = page_number
self._recipient_id = recipient_id
self._status = status
self._tab_group_labels = tab_group_labels
self._tab_id = tab_id
self._tab_label = tab_label
self._tab_order = tab_order
self._template_locked = template_locked
self._template_required = template_required
self._tooltip = tooltip
self._underline = underline
self._value = value
self._x_position = x_position
self._y_position = y_position
@property
def anchor_case_sensitive(self):
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_horizontal_alignment(self):
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_ignore_if_not_present(self):
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_match_whole_word(self):
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_string(self):
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
self._anchor_string = anchor_string
@property
def anchor_units(self):
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
self._anchor_units = anchor_units
@property
def anchor_x_offset(self):
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
self._anchor_x_offset = anchor_x_offset
@property
def anchor_y_offset(self):
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
self._anchor_y_offset = anchor_y_offset
@property
def bold(self):
return self._bold
@bold.setter
def bold(self, bold):
self._bold = bold
@property
def conditional_parent_label(self):
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_value(self):
return self._conditional_parent_value
@conditional_parent_value.setter
def conditional_parent_value(self, conditional_parent_value):
self._conditional_parent_value = conditional_parent_value
@property
def custom_tab_id(self):
return self._custom_tab_id
@custom_tab_id.setter
def custom_tab_id(self, custom_tab_id):
self._custom_tab_id = custom_tab_id
@property
def document_id(self):
return self._document_id
@document_id.setter
def document_id(self, document_id):
self._document_id = document_id
@property
def error_details(self):
return self._error_details
@error_details.setter
def error_details(self, error_details):
self._error_details = error_details
@property
def font(self):
return self._font
@font.setter
def font(self, font):
self._font = font
@property
def font_color(self):
return self._font_color
@font_color.setter
def font_color(self, font_color):
self._font_color = font_color
@property
def font_size(self):
return self._font_size
@font_size.setter
def font_size(self, font_size):
self._font_size = font_size
@property
def italic(self):
return self._italic
@italic.setter
def italic(self, italic):
self._italic = italic
@property
def merge_field(self):
return self._merge_field
@merge_field.setter
def merge_field(self, merge_field):
self._merge_field = merge_field
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def page_number(self):
return self._page_number
@page_number.setter
def page_number(self, page_number):
self._page_number = page_number
@property
def recipient_id(self):
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
self._recipient_id = recipient_id
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def tab_group_labels(self):
return self._tab_group_labels
@tab_group_labels.setter
def tab_group_labels(self, tab_group_labels):
self._tab_group_labels = tab_group_labels
@property
def tab_id(self):
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
self._tab_id = tab_id
@property
def tab_label(self):
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
self._tab_label = tab_label
@property
def tab_order(self):
return self._tab_order
@tab_order.setter
def tab_order(self, tab_order):
self._tab_order = tab_order
@property
def template_locked(self):
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
self._template_locked = template_locked
@property
def template_required(self):
return self._template_required
@template_required.setter
def template_required(self, template_required):
self._template_required = template_required
@property
def tooltip(self):
return self._tooltip
@tooltip.setter
def tooltip(self, tooltip):
self._tooltip = tooltip
@property
def underline(self):
return self._underline
@underline.setter
def underline(self, underline):
self._underline = underline
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def x_position(self):
return self._x_position
@x_position.setter
def x_position(self, x_position):
self._x_position = x_position
@property
def y_position(self):
return self._y_position
@y_position.setter
def y_position(self, y_position):
self._y_position = y_position
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70b85c39ef0540db908890e3580c1d06caf96f5
| 70
|
py
|
Python
|
rltrain.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | 2
|
2019-12-02T04:32:36.000Z
|
2019-12-03T03:17:40.000Z
|
rltrain.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | null | null | null |
rltrain.py
|
leopd/jasrlp
|
4ebc0a91bd0a5533aeb9b2d136612c862ec8f6a8
|
[
"MIT"
] | null | null | null |
from rldqn import DQN, FCNet, RandomLearner
from rlddpg import DDPG
| 14
| 43
| 0.8
|
from rldqn import DQN, FCNet, RandomLearner
from rlddpg import DDPG
| true
| true
|
f70b86ad326a4d97bea318ce1998d3afe340e4cd
| 368
|
py
|
Python
|
scripts/ex_concat.py
|
spisakt/PUMI
|
bea29696aa90e5581f08919e1a2cd9f569284984
|
[
"BSD-3-Clause"
] | 5
|
2018-06-12T08:17:13.000Z
|
2022-02-25T20:07:00.000Z
|
scripts/ex_concat.py
|
spisakt/PUMI
|
bea29696aa90e5581f08919e1a2cd9f569284984
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/ex_concat.py
|
spisakt/PUMI
|
bea29696aa90e5581f08919e1a2cd9f569284984
|
[
"BSD-3-Clause"
] | 2
|
2020-10-19T15:27:28.000Z
|
2021-06-04T17:02:27.000Z
|
#!/usr/bin/env python
import PUMI.utils.Concat as conc
conc=conc.concat_workflow(2)
conc.inputs.inputspec.par1="abc"
conc.inputs.inputspec.par2="def"
conc.write_graph('graph-orig.dot', graph2use='orig', simple_form=True);
conc.write_graph('graph-exec-detailed.dot', graph2use='exec', simple_form=False);
conc.write_graph('graph.dot', graph2use='colored');
conc.run()
| 33.454545
| 81
| 0.769022
|
import PUMI.utils.Concat as conc
conc=conc.concat_workflow(2)
conc.inputs.inputspec.par1="abc"
conc.inputs.inputspec.par2="def"
conc.write_graph('graph-orig.dot', graph2use='orig', simple_form=True);
conc.write_graph('graph-exec-detailed.dot', graph2use='exec', simple_form=False);
conc.write_graph('graph.dot', graph2use='colored');
conc.run()
| true
| true
|
f70b87c4ef72db99b9638adb5bff6118843e5de5
| 40,741
|
py
|
Python
|
tests/test_djangocache.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_djangocache.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_djangocache.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Most of this file was copied from:
# https://raw.githubusercontent.com/django/django/1.11.12/tests/cache/tests.py
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import io
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, mock, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
################################################################################
# Setup Django for models import.
################################################################################
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
############################################################################
# GrantJ 2017-03-27 Ignore deprecation warnings. Django's metaclass magic does
# not always play well with Python 3.6. Read
# http://stackoverflow.com/questions/41343263/ for details
############################################################################
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import django
django.setup()
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
def custom_key_func2(key, key_prefix, version):
"Another customized cache key function"
return '-'.join(['CUSTOM', key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': custom_key_func2},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1', timeout=2))
time.sleep(1)
self.assertTrue(cache.has_key('expire1'))
time.sleep(2)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1'))
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertFalse(cache.touch('nonexistent'))
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaises(TypeError):
cache.get_or_set('brian')
with self.assertRaises(TypeError):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='diskcache.DjangoCache',
))
class DiskCacheTests(BaseCacheTests, TestCase):
"Specific test cases for diskcache.DjangoCache."
def setUp(self):
super(DiskCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Cache location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(DiskCacheTests, self).tearDown()
cache.close()
shutil.rmtree(self.dirname, ignore_errors=True)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
def test_cull(self):
cache.cull()
def test_zero_cull(self):
pass # DiskCache has its own cull strategy.
def test_invalid_key_characters(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_invalid_key_length(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_directory(self):
self.assertTrue('tmp' in cache.directory)
def test_read(self):
value = b'abcd' * 2 ** 20
result = cache.set(b'test-key', value)
self.assertTrue(result)
with cache.read(b'test-key') as reader:
self.assertEqual(reader.read(), value)
try:
with cache.read(b'dne') as reader:
error = False
except KeyError:
error = True
self.assertTrue(error)
def test_expire(self):
cache.clear()
cache.set(b'expire-key', 0, timeout=0.05)
time.sleep(0.1)
self.assertEqual(cache.expire(), 1)
self.assertEqual(cache.get(b'expire-key'), None)
def test_evict(self):
cache.clear()
for num in range(100):
cache.set(num, num, tag=(num % 4))
self.assertEqual(cache.evict(1), 25)
cache.create_tag_index()
self.assertEqual(cache.evict(2), 25)
cache.drop_tag_index()
self.assertEqual(cache.evict(3), 25)
for num in range(0, 100, 4):
self.assertEqual(cache.get(num), num)
def test_pop(self):
cache.clear()
for num in range(5):
cache.set(num, num, timeout=None)
self.assertEqual(cache.pop(0), 0)
self.assertEqual(cache.pop(0), None)
self.assertEqual(cache.pop(0, 1), 1)
self.assertEqual(cache.pop(0, default=1), 1)
self.assertEqual(cache.pop(1, expire_time=True), (1, None))
self.assertEqual(cache.pop(2, tag=True), (2, None))
self.assertEqual(cache.pop(3, expire_time=True, tag=True), (3, None, None))
self.assertEqual(cache.pop(4, retry=False), 4)
def test_pickle(self):
letters = 'abcde'
cache.clear()
for num, val in enumerate(letters):
cache.set(val, num)
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in letters:
self.assertEqual(other.get(key), cache.get(key))
def test_cache(self):
subcache = cache.cache('test')
directory = os.path.join(cache.directory, 'cache', 'test')
self.assertEqual(subcache.directory, directory)
def test_deque(self):
deque = cache.deque('test')
directory = os.path.join(cache.directory, 'deque', 'test')
self.assertEqual(deque.directory, directory)
def test_index(self):
index = cache.index('test')
directory = os.path.join(cache.directory, 'index', 'test')
self.assertEqual(index.directory, directory)
def test_memoize(self):
with self.assertRaises(TypeError):
@cache.memoize # <-- Missing parens!
def test():
pass
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize()
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits1, misses1 = cache.stats()
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits2, misses2 = cache.stats()
self.assertEqual(hits2, hits1 + count)
self.assertEqual(misses2, misses1)
| 39.174038
| 114
| 0.628188
|
from __future__ import unicode_literals
import copy
import io
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, mock, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
lizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaises(TypeError):
cache.get_or_set('brian')
with self.assertRaises(TypeError):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='diskcache.DjangoCache',
))
class DiskCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(DiskCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Cache location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(DiskCacheTests, self).tearDown()
cache.close()
shutil.rmtree(self.dirname, ignore_errors=True)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
def test_cull(self):
cache.cull()
def test_zero_cull(self):
pass # DiskCache has its own cull strategy.
def test_invalid_key_characters(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_invalid_key_length(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_directory(self):
self.assertTrue('tmp' in cache.directory)
def test_read(self):
value = b'abcd' * 2 ** 20
result = cache.set(b'test-key', value)
self.assertTrue(result)
with cache.read(b'test-key') as reader:
self.assertEqual(reader.read(), value)
try:
with cache.read(b'dne') as reader:
error = False
except KeyError:
error = True
self.assertTrue(error)
def test_expire(self):
cache.clear()
cache.set(b'expire-key', 0, timeout=0.05)
time.sleep(0.1)
self.assertEqual(cache.expire(), 1)
self.assertEqual(cache.get(b'expire-key'), None)
def test_evict(self):
cache.clear()
for num in range(100):
cache.set(num, num, tag=(num % 4))
self.assertEqual(cache.evict(1), 25)
cache.create_tag_index()
self.assertEqual(cache.evict(2), 25)
cache.drop_tag_index()
self.assertEqual(cache.evict(3), 25)
for num in range(0, 100, 4):
self.assertEqual(cache.get(num), num)
def test_pop(self):
cache.clear()
for num in range(5):
cache.set(num, num, timeout=None)
self.assertEqual(cache.pop(0), 0)
self.assertEqual(cache.pop(0), None)
self.assertEqual(cache.pop(0, 1), 1)
self.assertEqual(cache.pop(0, default=1), 1)
self.assertEqual(cache.pop(1, expire_time=True), (1, None))
self.assertEqual(cache.pop(2, tag=True), (2, None))
self.assertEqual(cache.pop(3, expire_time=True, tag=True), (3, None, None))
self.assertEqual(cache.pop(4, retry=False), 4)
def test_pickle(self):
letters = 'abcde'
cache.clear()
for num, val in enumerate(letters):
cache.set(val, num)
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in letters:
self.assertEqual(other.get(key), cache.get(key))
def test_cache(self):
subcache = cache.cache('test')
directory = os.path.join(cache.directory, 'cache', 'test')
self.assertEqual(subcache.directory, directory)
def test_deque(self):
deque = cache.deque('test')
directory = os.path.join(cache.directory, 'deque', 'test')
self.assertEqual(deque.directory, directory)
def test_index(self):
index = cache.index('test')
directory = os.path.join(cache.directory, 'index', 'test')
self.assertEqual(index.directory, directory)
def test_memoize(self):
with self.assertRaises(TypeError):
@cache.memoize # <-- Missing parens!
def test():
pass
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize()
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits1, misses1 = cache.stats()
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits2, misses2 = cache.stats()
self.assertEqual(hits2, hits1 + count)
self.assertEqual(misses2, misses1)
| true
| true
|
f70b881f1b2b4ed66c203c087f52e37d9971e9b5
| 2,571
|
py
|
Python
|
test_requests.py
|
OT022/Threading-OCR
|
50379078c5885a0046cb3b0598306da0bd6f5a0a
|
[
"MIT"
] | null | null | null |
test_requests.py
|
OT022/Threading-OCR
|
50379078c5885a0046cb3b0598306da0bd6f5a0a
|
[
"MIT"
] | null | null | null |
test_requests.py
|
OT022/Threading-OCR
|
50379078c5885a0046cb3b0598306da0bd6f5a0a
|
[
"MIT"
] | null | null | null |
import pytest
import os
import logging
import requests_helper
@pytest.fixture
def valid_post_image():
return open('_test/src/img001.jpg', 'rb')
@pytest.fixture
def valid_post_url():
return os.environ['COMPUTER_VISION_ENDPOINT'] + "/vision/v3.0/read/analyze"
@pytest.fixture
def valid_headers():
return {
'Ocp-Apim-Subscription-Key': os.environ['COMPUTER_VISION_KEY'],
'Content-Type': 'application/octet-stream'
}
@pytest.fixture
def valid_get_url():
return "operation-location"
class MockResponse:
def __init__(self, json_data, status_code, headers):
self.json_data = json_data
self.status_code = status_code
self.headers = headers
def json(self):
return self.json_data
def test_post_response_is_ok(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse(None, 202, { "Operation-Location": "a-valid-url" })
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert(response.headers["Operation-Location"]) == "a-valid-url"
def test_post_response_handles_500_error(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse({"error": {"code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}}, 500, {})
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert response == { "status_code": 500, "code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}
def test_get_read_result_is_ok(mocker, valid_headers):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse( {"analyzeResult": { "lines": [{"text": "this is text"}]}}, 200, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response.json()["analyzeResult"] is not None
def test_get_read_result_handles_error(mocker, valid_headers, valid_get_url):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse({"error": { "code": "fail", "message": "because"}}, 500, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response["code"] == "fail"
| 38.954545
| 222
| 0.736289
|
import pytest
import os
import logging
import requests_helper
@pytest.fixture
def valid_post_image():
return open('_test/src/img001.jpg', 'rb')
@pytest.fixture
def valid_post_url():
return os.environ['COMPUTER_VISION_ENDPOINT'] + "/vision/v3.0/read/analyze"
@pytest.fixture
def valid_headers():
return {
'Ocp-Apim-Subscription-Key': os.environ['COMPUTER_VISION_KEY'],
'Content-Type': 'application/octet-stream'
}
@pytest.fixture
def valid_get_url():
return "operation-location"
class MockResponse:
def __init__(self, json_data, status_code, headers):
self.json_data = json_data
self.status_code = status_code
self.headers = headers
def json(self):
return self.json_data
def test_post_response_is_ok(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse(None, 202, { "Operation-Location": "a-valid-url" })
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert(response.headers["Operation-Location"]) == "a-valid-url"
def test_post_response_handles_500_error(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse({"error": {"code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}}, 500, {})
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert response == { "status_code": 500, "code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}
def test_get_read_result_is_ok(mocker, valid_headers):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse( {"analyzeResult": { "lines": [{"text": "this is text"}]}}, 200, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response.json()["analyzeResult"] is not None
def test_get_read_result_handles_error(mocker, valid_headers, valid_get_url):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse({"error": { "code": "fail", "message": "because"}}, 500, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response["code"] == "fail"
| true
| true
|
f70b8964b3beea5bf3a3e0e4b46e46f93cf419a2
| 51,566
|
bzl
|
Python
|
third_party/gpus/cuda_configure.bzl
|
parallelo/tensorflow-upstream
|
41c9f4d4435707ed64b5a4fa5a964f73a5b99986
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/cuda_configure.bzl
|
parallelo/tensorflow-upstream
|
41c9f4d4435707ed64b5a4fa5a964f73a5b99986
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/cuda_configure.bzl
|
parallelo/tensorflow-upstream
|
41c9f4d4435707ed64b5a4fa5a964f73a5b99986
|
[
"Apache-2.0"
] | null | null | null |
"""Repository rule for CUDA autoconfiguration.
`cuda_configure` depends on the following environment variables:
* `TF_NEED_CUDA`: Whether to enable building with CUDA.
* `GCC_HOST_COMPILER_PATH`: The GCC host compiler path
* `TF_CUDA_CLANG`: Whether to use clang as a cuda compiler.
* `CLANG_CUDA_COMPILER_PATH`: The clang compiler path that will be used for
both host and device code compilation if TF_CUDA_CLANG is 1.
* `TF_SYSROOT`: The sysroot to use when compiling.
* `TF_DOWNLOAD_CLANG`: Whether to download a recent release of clang
compiler and use it to build tensorflow. When this option is set
CLANG_CUDA_COMPILER_PATH is ignored.
* `TF_CUDA_PATHS`: The base paths to look for CUDA and cuDNN. Default is
`/usr/local/cuda,usr/`.
* `CUDA_TOOLKIT_PATH` (deprecated): The path to the CUDA toolkit. Default is
`/usr/local/cuda`.
* `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then
use the system default.
* `TF_CUDNN_VERSION`: The version of the cuDNN library.
* `CUDNN_INSTALL_PATH` (deprecated): The path to the cuDNN library. Default is
`/usr/local/cuda`.
* `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
`3.5,5.2`.
* `PYTHON_BIN_PATH`: The python binary path
"""
load("//third_party/clang_toolchain:download_clang.bzl", "download_clang")
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"escape_string",
"get_env_var",
)
load(
"@bazel_tools//tools/cpp:windows_cc_configure.bzl",
"find_msvc_tool",
"find_vc_path",
"setup_vc_env_vars",
)
load(
"//third_party/remote_config:common.bzl",
"config_repo_label",
"err_out",
"execute",
"get_bash_bin",
"get_cpu_value",
"get_host_environ",
"get_python_bin",
"is_windows",
"raw_exec",
"read_dir",
"realpath",
"which",
)
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_GCC_HOST_COMPILER_PREFIX = "GCC_HOST_COMPILER_PREFIX"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_TF_SYSROOT = "TF_SYSROOT"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION"
_TF_CUDNN_VERSION = "TF_CUDNN_VERSION"
_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_TF_DOWNLOAD_CLANG = "TF_DOWNLOAD_CLANG"
_PYTHON_BIN_PATH = "PYTHON_BIN_PATH"
def to_list_of_strings(elements):
"""Convert the list of ["a", "b", "c"] into '"a", "b", "c"'.
This is to be used to put a list of strings into the bzl file templates
so it gets interpreted as list of strings in Starlark.
Args:
elements: list of string elements
Returns:
single string of elements wrapped in quotes separated by a comma."""
quoted_strings = ["\"" + element + "\"" for element in elements]
return ", ".join(quoted_strings)
def verify_build_defines(params):
"""Verify all variables that crosstool/BUILD.tpl expects are substituted.
Args:
params: dict of variables that will be passed to the BUILD.tpl template.
"""
missing = []
for param in [
"cxx_builtin_include_directories",
"extra_no_canonical_prefixes_flags",
"host_compiler_path",
"host_compiler_prefix",
"host_compiler_warnings",
"linker_bin_path",
"compiler_deps",
"msvc_cl_path",
"msvc_env_include",
"msvc_env_lib",
"msvc_env_path",
"msvc_env_tmp",
"msvc_lib_path",
"msvc_link_path",
"msvc_ml_path",
"unfiltered_compile_flags",
"win_compiler_deps",
]:
if ("%{" + param + "}") not in params:
missing.append(param)
if missing:
auto_configure_fail(
"BUILD.tpl template is missing these variables: " +
str(missing) +
".\nWe only got: " +
str(params) +
".",
)
def _get_nvcc_tmp_dir_for_windows(repository_ctx):
"""Return the Windows tmp directory for nvcc to generate intermediate source files."""
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
return escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
def _get_msvc_compiler(repository_ctx):
vc_path = find_vc_path(repository_ctx)
return find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
def _get_win_cuda_defines(repository_ctx):
"""Return CROSSTOOL defines for Windows"""
# If we are not on Windows, return fake vaules for Windows specific fields.
# This ensures the CROSSTOOL file parser is happy.
if not is_windows(repository_ctx):
return {
"%{msvc_env_tmp}": "msvc_not_used",
"%{msvc_env_path}": "msvc_not_used",
"%{msvc_env_include}": "msvc_not_used",
"%{msvc_env_lib}": "msvc_not_used",
"%{msvc_cl_path}": "msvc_not_used",
"%{msvc_ml_path}": "msvc_not_used",
"%{msvc_link_path}": "msvc_not_used",
"%{msvc_lib_path}": "msvc_not_used",
}
vc_path = find_vc_path(repository_ctx)
if not vc_path:
auto_configure_fail(
"Visual C++ build tools not found on your machine." +
"Please check your installation following https://docs.bazel.build/versions/master/windows.html#using",
)
return {}
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
msvc_cl_path = get_python_bin(repository_ctx)
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace(
"\\",
"/",
)
msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace(
"\\",
"/",
)
msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace(
"\\",
"/",
)
# nvcc will generate some temporary source files under %{nvcc_tmp_dir}
# The generated files are guaranteed to have unique name, so they can share
# the same tmp directory
escaped_cxx_include_directories = [
_get_nvcc_tmp_dir_for_windows(repository_ctx),
"C:\\\\botcode\\\\w",
]
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append(path)
return {
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": msvc_cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": msvc_link_path,
"%{msvc_lib_path}": msvc_lib_path,
"%{cxx_builtin_include_directories}": to_list_of_strings(
escaped_cxx_include_directories,
),
}
# TODO(dzc): Once these functions have been factored out of Bazel's
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
"""Find the C++ compiler."""
if is_windows(repository_ctx):
return _get_msvc_compiler(repository_ctx)
if _use_cuda_clang(repository_ctx):
target_cc_name = "clang"
cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
if _flag_enabled(repository_ctx, _TF_DOWNLOAD_CLANG):
return "extra_tools/bin/clang"
else:
target_cc_name = "gcc"
cc_path_envvar = _GCC_HOST_COMPILER_PATH
cc_name = target_cc_name
cc_name_from_env = get_host_environ(repository_ctx, cc_path_envvar)
if cc_name_from_env:
cc_name = cc_name_from_env
if cc_name.startswith("/"):
# Absolute path, maybe we should make this supported by our which function.
return cc_name
cc = which(repository_ctx, cc_name)
if cc == None:
fail(("Cannot find {}, either correct your path or set the {}" +
" environment variable").format(target_cc_name, cc_path_envvar))
return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
"""Convert path returned by cc -E xc++ in a complete path."""
path = path.strip()
if path.endswith(_OSX_FRAMEWORK_SUFFIX):
path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
return path
def _normalize_include_path(repository_ctx, path):
"""Normalizes include paths before writing them to the crosstool.
If path points inside the 'crosstool' folder of the repository, a relative
path is returned.
If path points outside the 'crosstool' folder, an absolute path is returned.
"""
path = str(repository_ctx.path(path))
crosstool_folder = str(repository_ctx.path(".").get_child("crosstool"))
if path.startswith(crosstool_folder):
# We drop the path to "$REPO/crosstool" and a trailing path separator.
return path[len(crosstool_folder) + 1:]
return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp, tf_sysroot):
"""Compute the list of default C or C++ include directories."""
if lang_is_cpp:
lang = "c++"
else:
lang = "c"
sysroot = []
if tf_sysroot:
sysroot += ["--sysroot", tf_sysroot]
result = raw_exec(repository_ctx, [cc, "-E", "-x" + lang, "-", "-v"] +
sysroot)
stderr = err_out(result)
index1 = stderr.find(_INC_DIR_MARKER_BEGIN)
if index1 == -1:
return []
index1 = stderr.find("\n", index1)
if index1 == -1:
return []
index2 = stderr.rfind("\n ")
if index2 == -1 or index2 < index1:
return []
index2 = stderr.find("\n", index2 + 1)
if index2 == -1:
inc_dirs = stderr[index1 + 1:]
else:
inc_dirs = stderr[index1 + 1:index2].strip()
return [
_normalize_include_path(repository_ctx, _cxx_inc_convert(p))
for p in inc_dirs.split("\n")
]
def get_cxx_inc_directories(repository_ctx, cc, tf_sysroot):
"""Compute the list of default C and C++ include directories."""
# For some reason `clang -xc` sometimes returns include paths that are
# different from the ones from `clang -xc++`. (Symlink and a dir)
# So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
includes_cpp = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
True,
tf_sysroot,
)
includes_c = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
False,
tf_sysroot,
)
return includes_cpp + [
inc
for inc in includes_c
if inc not in includes_cpp
]
def auto_configure_fail(msg):
"""Output failure message when cuda configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def _cuda_include_path(repository_ctx, cuda_config):
"""Generates the Starlark string with cuda include directories.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A list of the gcc host compiler include directories.
"""
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" % (
cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else "",
))
# The expected exit code of this command is non-zero. Bazel remote execution
# only caches commands with zero exit code. So force a zero exit code.
cmd = "%s -v /dev/null -o /dev/null ; [ $? -eq 1 ]" % str(nvcc_path)
result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd])
target_dir = ""
for one_line in err_out(result).splitlines():
if one_line.startswith("#$ _TARGET_DIR_="):
target_dir = (
cuda_config.cuda_toolkit_path + "/" + one_line.replace(
"#$ _TARGET_DIR_=",
"",
) + "/include"
)
inc_entries = []
if target_dir != "":
inc_entries.append(realpath(repository_ctx, target_dir))
inc_entries.append(realpath(repository_ctx, cuda_config.cuda_toolkit_path + "/include"))
return inc_entries
def enable_cuda(repository_ctx):
"""Returns whether to build with CUDA support."""
return int(get_host_environ(repository_ctx, "TF_NEED_CUDA", False))
def matches_version(environ_version, detected_version):
"""Checks whether the user-specified version matches the detected version.
This function performs a weak matching so that if the user specifies only
the
major or major and minor versions, the versions are still considered
matching
if the version parts match. To illustrate:
environ_version detected_version result
-----------------------------------------
5.1.3 5.1.3 True
5.1 5.1.3 True
5 5.1 True
5.1.3 5.1 False
5.2.3 5.1.3 False
Args:
environ_version: The version specified by the user via environment
variables.
detected_version: The version autodetected from the CUDA installation on
the system.
Returns: True if user-specified version matches detected version and False
otherwise.
"""
environ_version_parts = environ_version.split(".")
detected_version_parts = detected_version.split(".")
if len(detected_version_parts) < len(environ_version_parts):
return False
for i, part in enumerate(detected_version_parts):
if i >= len(environ_version_parts):
break
if part != environ_version_parts[i]:
return False
return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
def compute_capabilities(repository_ctx):
"""Returns a list of strings representing cuda compute capabilities.
Args:
repository_ctx: the repo rule's context.
Returns: list of cuda architectures to compile for. 'compute_xy' refers to
both PTX and SASS, 'sm_xy' refers to SASS only.
"""
capabilities = get_host_environ(
repository_ctx,
_TF_CUDA_COMPUTE_CAPABILITIES,
"compute_35,compute_52",
).split(",")
# Map old 'x.y' capabilities to 'compute_xy'.
for i, capability in enumerate(capabilities):
parts = capability.split(".")
if len(parts) != 2:
continue
capabilities[i] = "compute_%s%s" % (parts[0], parts[1])
# Make list unique
capabilities = dict(zip(capabilities, capabilities)).keys()
# Validate capabilities.
for capability in capabilities:
if not capability.startswith(("compute_", "sm_")):
auto_configure_fail("Invalid compute capability: %s" % capability)
for prefix in ["compute_", "sm_"]:
if not capability.startswith(prefix):
continue
if len(capability) == len(prefix) + 2 and capability[-2:].isdigit():
continue
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def lib_name(base_name, cpu_value, version = None, static = False):
"""Constructs the platform-specific name of a library.
Args:
base_name: The name of the library, such as "cudart"
cpu_value: The name of the host operating system.
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
version = "" if not version else "." + version
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % base_name
return "lib%s.so%s" % (base_name, version)
elif cpu_value == "Windows":
return "%s.lib" % base_name
elif cpu_value == "Darwin":
if static:
return "lib%s.a" % base_name
return "lib%s%s.dylib" % (base_name, version)
else:
auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
def _lib_path(lib, cpu_value, basedir, version, static):
file_name = lib_name(lib, cpu_value, version, static)
return "%s/%s" % (basedir, file_name)
def _should_check_soname(version, static):
return version and not static
def _check_cuda_lib_params(lib, cpu_value, basedir, version, static = False):
return (
_lib_path(lib, cpu_value, basedir, version, static),
_should_check_soname(version, static),
)
def _check_cuda_libs(repository_ctx, script_path, libs):
python_bin = get_python_bin(repository_ctx)
contents = repository_ctx.read(script_path).splitlines()
cmd = "from os import linesep;"
cmd += "f = open('script.py', 'w');"
for line in contents:
cmd += "f.write('%s' + linesep);" % line
cmd += "f.close();"
cmd += "from os import system;"
args = " ".join(["\"" + path + "\" " + str(check) for path, check in libs])
cmd += "system('%s script.py %s');" % (python_bin, args)
all_paths = [path for path, _ in libs]
checked_paths = execute(repository_ctx, [python_bin, "-c", cmd]).stdout.splitlines()
# Filter out empty lines from splitting on '\r\n' on Windows
checked_paths = [path for path in checked_paths if len(path) > 0]
if all_paths != checked_paths:
auto_configure_fail("Error with installed CUDA libs. Expected '%s'. Actual '%s'." % (all_paths, checked_paths))
def _find_libs(repository_ctx, check_cuda_libs_script, cuda_config):
"""Returns the CUDA and cuDNN libraries on the system.
Also, verifies that the script actually exist.
Args:
repository_ctx: The repository context.
check_cuda_libs_script: The path to a script verifying that the cuda
libraries exist on the system.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
Map of library names to structs of filename and path.
"""
cpu_value = cuda_config.cpu_value
stub_dir = "" if is_windows(repository_ctx) else "/stubs"
check_cuda_libs_params = {
"cuda": _check_cuda_lib_params(
"cuda",
cpu_value,
cuda_config.config["cuda_library_dir"] + stub_dir,
version = None,
static = False,
),
"cudart": _check_cuda_lib_params(
"cudart",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cudart_static": _check_cuda_lib_params(
"cudart_static",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = True,
),
"cublas": _check_cuda_lib_params(
"cublas",
cpu_value,
cuda_config.config["cublas_library_dir"],
cuda_config.cublas_version,
static = False,
),
"cusolver": _check_cuda_lib_params(
"cusolver",
cpu_value,
cuda_config.config["cusolver_library_dir"],
cuda_config.cusolver_version,
static = False,
),
"curand": _check_cuda_lib_params(
"curand",
cpu_value,
cuda_config.config["curand_library_dir"],
cuda_config.curand_version,
static = False,
),
"cufft": _check_cuda_lib_params(
"cufft",
cpu_value,
cuda_config.config["cufft_library_dir"],
cuda_config.cufft_version,
static = False,
),
"cudnn": _check_cuda_lib_params(
"cudnn",
cpu_value,
cuda_config.config["cudnn_library_dir"],
cuda_config.cudnn_version,
static = False,
),
"cupti": _check_cuda_lib_params(
"cupti",
cpu_value,
cuda_config.config["cupti_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cusparse": _check_cuda_lib_params(
"cusparse",
cpu_value,
cuda_config.config["cusparse_library_dir"],
cuda_config.cusparse_version,
static = False,
),
}
# Verify that the libs actually exist at their locations.
_check_cuda_libs(repository_ctx, check_cuda_libs_script, check_cuda_libs_params.values())
paths = {filename: v[0] for (filename, v) in check_cuda_libs_params.items()}
return paths
def _cudart_static_linkopt(cpu_value):
"""Returns additional platform-specific linkopts for cudart."""
return "" if cpu_value == "Darwin" else "\"-lrt\","
def _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries):
python_bin = get_python_bin(repository_ctx)
# If used with remote execution then repository_ctx.execute() can't
# access files from the source tree. A trick is to read the contents
# of the file in Starlark and embed them as part of the command. In
# this case the trick is not sufficient as the find_cuda_config.py
# script has more than 8192 characters. 8192 is the command length
# limit of cmd.exe on Windows. Thus we additionally need to compress
# the contents locally and decompress them as part of the execute().
compressed_contents = repository_ctx.read(script_path)
decompress_and_execute_cmd = (
"from zlib import decompress;" +
"from base64 import b64decode;" +
"from os import system;" +
"script = decompress(b64decode('%s'));" % compressed_contents +
"f = open('script.py', 'wb');" +
"f.write(script);" +
"f.close();" +
"system('\"%s\" script.py %s');" % (python_bin, " ".join(cuda_libraries))
)
return execute(repository_ctx, [python_bin, "-c", decompress_and_execute_cmd])
# TODO(csigg): Only call once instead of from here, tensorrt_configure.bzl,
# and nccl_configure.bzl.
def find_cuda_config(repository_ctx, script_path, cuda_libraries):
"""Returns CUDA config dictionary from running find_cuda_config.py"""
exec_result = _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries)
if exec_result.return_code:
auto_configure_fail("Failed to run find_cuda_config.py: %s" % err_out(exec_result))
# Parse the dict from stdout.
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()])
def _get_cuda_config(repository_ctx, find_cuda_config_script):
"""Detects and returns information about the CUDA installation on the system.
Args:
repository_ctx: The repository context.
Returns:
A struct containing the following fields:
cuda_toolkit_path: The CUDA toolkit installation directory.
cudnn_install_basedir: The cuDNN installation directory.
cuda_version: The version of CUDA on the system.
cudnn_version: The version of cuDNN on the system.
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
config = find_cuda_config(repository_ctx, find_cuda_config_script, ["cuda", "cudnn"])
cpu_value = get_cpu_value(repository_ctx)
toolkit_path = config["cuda_toolkit_path"]
is_windows = cpu_value == "Windows"
cuda_version = config["cuda_version"].split(".")
cuda_major = cuda_version[0]
cuda_minor = cuda_version[1]
cuda_version = ("64_%s%s" if is_windows else "%s.%s") % (cuda_major, cuda_minor)
cudnn_version = ("64_%s" if is_windows else "%s") % config["cudnn_version"]
if int(cuda_major) >= 11:
cublas_version = ("64_%s" if is_windows else "%s") % config["cublas_version"].split(".")[0]
cusolver_version = ("64_%s" if is_windows else "%s") % config["cusolver_version"].split(".")[0]
curand_version = ("64_%s" if is_windows else "%s") % config["curand_version"].split(".")[0]
cufft_version = ("64_%s" if is_windows else "%s") % config["cufft_version"].split(".")[0]
cusparse_version = ("64_%s" if is_windows else "%s") % config["cusparse_version"].split(".")[0]
elif (int(cuda_major), int(cuda_minor)) >= (10, 1):
# cuda_lib_version is for libraries like cuBLAS, cuFFT, cuSOLVER, etc.
# It changed from 'x.y' to just 'x' in CUDA 10.1.
cuda_lib_version = ("64_%s" if is_windows else "%s") % cuda_major
cublas_version = cuda_lib_version
cusolver_version = cuda_lib_version
curand_version = cuda_lib_version
cufft_version = cuda_lib_version
cusparse_version = cuda_lib_version
else:
cublas_version = cuda_version
cusolver_version = cuda_version
curand_version = cuda_version
cufft_version = cuda_version
cusparse_version = cuda_version
return struct(
cuda_toolkit_path = toolkit_path,
cuda_version = cuda_version,
cublas_version = cublas_version,
cusolver_version = cusolver_version,
curand_version = curand_version,
cufft_version = cufft_version,
cusparse_version = cusparse_version,
cudnn_version = cudnn_version,
compute_capabilities = compute_capabilities(repository_ctx),
cpu_value = cpu_value,
config = config,
)
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl.replace(":", "/")
repository_ctx.template(
out,
Label("//third_party/gpus/%s.tpl" % tpl),
substitutions,
)
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
Label("//third_party/gpus/%s.tpl" % label),
{},
)
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
fail("ERROR: Building with --config=cuda but TensorFlow is not configured " +
"to build with GPU support. Please re-run ./configure and enter 'Y' " +
"at the prompt to build with GPU support.")
native.genrule(
name = "error_gen_crosstool",
outs = ["CROSSTOOL"],
cmd = "echo 'Should not be run.' && exit 1",
)
native.filegroup(
name = "crosstool",
srcs = [":CROSSTOOL"],
output_licenses = ["unencumbered"],
)
"""
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
def _create_dummy_repository(repository_ctx):
cpu_value = get_cpu_value(repository_ctx)
# Set up BUILD file for cuda/.
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
"%{cuda_extra_copts}": "[]",
"%{cuda_gpu_architectures}": "[]",
},
)
_tpl(
repository_ctx,
"cuda:BUILD",
{
"%{cuda_driver_lib}": lib_name("cuda", cpu_value),
"%{cudart_static_lib}": lib_name(
"cudart_static",
cpu_value,
static = True,
),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
"%{cudart_lib}": lib_name("cudart", cpu_value),
"%{cublas_lib}": lib_name("cublas", cpu_value),
"%{cusolver_lib}": lib_name("cusolver", cpu_value),
"%{cudnn_lib}": lib_name("cudnn", cpu_value),
"%{cufft_lib}": lib_name("cufft", cpu_value),
"%{curand_lib}": lib_name("curand", cpu_value),
"%{cupti_lib}": lib_name("cupti", cpu_value),
"%{cusparse_lib}": lib_name("cusparse", cpu_value),
"%{copy_rules}": """
filegroup(name="cuda-include")
filegroup(name="cublas-include")
filegroup(name="cusolver-include")
filegroup(name="cufft-include")
filegroup(name="cusparse-include")
filegroup(name="curand-include")
filegroup(name="cudnn-include")
""",
},
)
# Create dummy files for the CUDA toolkit since they are still required by
# tensorflow/core/platform/default/build_config:cuda.
repository_ctx.file("cuda/cuda/include/cuda.h")
repository_ctx.file("cuda/cuda/include/cublas.h")
repository_ctx.file("cuda/cuda/include/cudnn.h")
repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h")
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cuda", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudart", cpu_value))
repository_ctx.file(
"cuda/cuda/lib/%s" % lib_name("cudart_static", cpu_value),
)
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cublas", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusolver", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudnn", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("curand", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cufft", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cupti", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusparse", cpu_value))
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(
repository_ctx,
"cuda:cuda_config.h",
{
"%{cuda_version}": "",
"%{cublas_version}": "",
"%{cusolver_version}": "",
"%{curand_version}": "",
"%{cufft_version}": "",
"%{cusparse_version}": "",
"%{cudnn_version}": "",
"%{cuda_toolkit_path}": "",
},
"cuda/cuda/cuda_config.h",
)
# Set up cuda_config.py, which is used by gen_build_info to provide
# static build environment info to the API
_tpl(
repository_ctx,
"cuda:cuda_config.py",
_py_tmpl_dict({}),
"cuda/cuda/cuda_config.py",
)
# If cuda_configure is not configured to build with GPU support, and the user
# attempts to build with --config=cuda, add a dummy build rule to intercept
# this and fail with an actionable error message.
repository_ctx.file(
"crosstool/error_gpu_disabled.bzl",
_DUMMY_CROSSTOOL_BZL_FILE,
)
repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def make_copy_files_rule(repository_ctx, name, srcs, outs):
"""Returns a rule to copy a set of files."""
cmds = []
# Copy files.
for src, out in zip(srcs, outs):
cmds.append('cp -f "%s" "$(location %s)"' % (src, out))
outs = [(' "%s",' % out) for out in outs]
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""%s \""",
)""" % (name, "\n".join(outs), " && \\\n".join(cmds))
def make_copy_dir_rule(repository_ctx, name, src_dir, out_dir, exceptions = None):
"""Returns a rule to recursively copy a directory.
If exceptions is not None, it must be a list of files or directories in
'src_dir'; these will be excluded from copying.
"""
src_dir = _norm_path(src_dir)
out_dir = _norm_path(out_dir)
outs = read_dir(repository_ctx, src_dir)
post_cmd = ""
if exceptions != None:
outs = [x for x in outs if not any([
x.startswith(src_dir + "/" + y)
for y in exceptions
])]
outs = [(' "%s",' % out.replace(src_dir, out_dir)) for out in outs]
# '@D' already contains the relative path for a single file, see
# http://docs.bazel.build/versions/master/be/make-variables.html#predefined_genrule_variables
out_dir = "$(@D)/%s" % out_dir if len(outs) > 1 else "$(@D)"
if exceptions != None:
for x in exceptions:
post_cmd += " ; rm -fR " + out_dir + "/" + x
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""cp -rLf "%s/." "%s/" %s\""",
)""" % (name, "\n".join(outs), src_dir, out_dir, post_cmd)
def _flag_enabled(repository_ctx, flag_name):
return get_host_environ(repository_ctx, flag_name) == "1"
def _use_cuda_clang(repository_ctx):
return _flag_enabled(repository_ctx, "TF_CUDA_CLANG")
def _tf_sysroot(repository_ctx):
return get_host_environ(repository_ctx, _TF_SYSROOT, "")
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
copts = []
for capability in compute_capabilities:
if capability.startswith("compute_"):
capability = capability.replace("compute_", "sm_")
copts.append("--cuda-include-ptx=%s" % capability)
copts.append("--cuda-gpu-arch=%s" % capability)
return str(copts)
def _tpl_path(repository_ctx, filename):
return repository_ctx.path(Label("//third_party/gpus/%s.tpl" % filename))
def _basename(repository_ctx, path_str):
"""Returns the basename of a path of type string.
This method is different from path.basename in that it also works if
the host platform is different from the execution platform
i.e. linux -> windows.
"""
num_chars = len(path_str)
is_win = is_windows(repository_ctx)
for i in range(num_chars):
r_i = num_chars - 1 - i
if (is_win and path_str[r_i] == "\\") or path_str[r_i] == "/":
return path_str[r_i + 1:]
return path_str
def _create_local_cuda_repository(repository_ctx):
"""Creates the repository containing files set up to build with CUDA."""
# Resolve all labels before doing any real work. Resolving causes the
# function to be restarted with all previous state being lost. This
# can easily lead to a O(n^2) runtime in the number of labels.
# See https://github.com/tensorflow/tensorflow/commit/62bd3534525a036f07d9851b3199d68212904778
tpl_paths = {filename: _tpl_path(repository_ctx, filename) for filename in [
"cuda:build_defs.bzl",
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
"crosstool:windows/msvc_wrapper_for_nvcc.py",
"crosstool:BUILD",
"crosstool:cc_toolchain_config.bzl",
"cuda:cuda_config.h",
"cuda:cuda_config.py",
]}
tpl_paths["cuda:BUILD"] = _tpl_path(repository_ctx, "cuda:BUILD.windows" if is_windows(repository_ctx) else "cuda:BUILD")
find_cuda_config_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:find_cuda_config.py.gz.base64"))
cuda_config = _get_cuda_config(repository_ctx, find_cuda_config_script)
cuda_include_path = cuda_config.config["cuda_include_dir"]
cublas_include_path = cuda_config.config["cublas_include_dir"]
cudnn_header_dir = cuda_config.config["cudnn_include_dir"]
cupti_header_dir = cuda_config.config["cupti_include_dir"]
nvvm_libdevice_dir = cuda_config.config["nvvm_library_dir"]
# Create genrule to copy files from the installed CUDA toolkit into execroot.
copy_rules = [
make_copy_dir_rule(
repository_ctx,
name = "cuda-include",
src_dir = cuda_include_path,
out_dir = "cuda/include",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-nvvm",
src_dir = nvvm_libdevice_dir,
out_dir = "cuda/nvvm/libdevice",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-extras",
src_dir = cupti_header_dir,
out_dir = "cuda/extras/CUPTI/include",
),
]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cublas-include",
srcs = [
cublas_include_path + "/cublas.h",
cublas_include_path + "/cublas_v2.h",
cublas_include_path + "/cublas_api.h",
],
outs = [
"cublas/include/cublas.h",
"cublas/include/cublas_v2.h",
"cublas/include/cublas_api.h",
],
))
cusolver_include_path = cuda_config.config["cusolver_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusolver-include",
srcs = [
cusolver_include_path + "/cusolver_common.h",
cusolver_include_path + "/cusolverDn.h",
],
outs = [
"cusolver/include/cusolver_common.h",
"cusolver/include/cusolverDn.h",
],
))
cufft_include_path = cuda_config.config["cufft_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cufft-include",
srcs = [
cufft_include_path + "/cufft.h",
],
outs = [
"cufft/include/cufft.h",
],
))
cusparse_include_path = cuda_config.config["cusparse_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusparse-include",
srcs = [
cusparse_include_path + "/cusparse.h",
],
outs = [
"cusparse/include/cusparse.h",
],
))
curand_include_path = cuda_config.config["curand_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "curand-include",
srcs = [
curand_include_path + "/curand.h",
],
outs = [
"curand/include/curand.h",
],
))
check_cuda_libs_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:check_cuda_libs.py"))
cuda_libs = _find_libs(repository_ctx, check_cuda_libs_script, cuda_config)
cuda_lib_srcs = []
cuda_lib_outs = []
for path in cuda_libs.values():
cuda_lib_srcs.append(path)
cuda_lib_outs.append("cuda/lib/" + _basename(repository_ctx, path))
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-lib",
srcs = cuda_lib_srcs,
outs = cuda_lib_outs,
))
# copy files mentioned in third_party/nccl/build_defs.bzl.tpl
file_ext = ".exe" if is_windows(repository_ctx) else ""
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-bin",
srcs = [
cuda_config.cuda_toolkit_path + "/bin/" + "crt/link.stub",
cuda_config.cuda_toolkit_path + "/bin/" + "nvlink" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "fatbinary" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "bin2c" + file_ext,
],
outs = [
"cuda/bin/" + "crt/link.stub",
"cuda/bin/" + "nvlink" + file_ext,
"cuda/bin/" + "fatbinary" + file_ext,
"cuda/bin/" + "bin2c" + file_ext,
],
))
# Select the headers based on the cuDNN version (strip '64_' for Windows).
cudnn_headers = ["cudnn.h"]
if cuda_config.cudnn_version.rsplit("_", 1)[0] >= "8":
cudnn_headers += [
"cudnn_backend.h",
"cudnn_adv_infer.h",
"cudnn_adv_train.h",
"cudnn_cnn_infer.h",
"cudnn_cnn_train.h",
"cudnn_ops_infer.h",
"cudnn_ops_train.h",
"cudnn_version.h",
]
cudnn_srcs = []
cudnn_outs = []
for header in cudnn_headers:
cudnn_srcs.append(cudnn_header_dir + "/" + header)
cudnn_outs.append("cudnn/include/" + header)
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cudnn-include",
srcs = cudnn_srcs,
outs = cudnn_outs,
))
# Set up BUILD file for cuda/
repository_ctx.template(
"cuda/build_defs.bzl",
tpl_paths["cuda:build_defs.bzl"],
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
cuda_config.compute_capabilities,
),
"%{cuda_gpu_architectures}": str(cuda_config.compute_capabilities),
},
)
repository_ctx.template(
"cuda/BUILD",
tpl_paths["cuda:BUILD"],
{
"%{cuda_driver_lib}": _basename(repository_ctx, cuda_libs["cuda"]),
"%{cudart_static_lib}": _basename(repository_ctx, cuda_libs["cudart_static"]),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cuda_config.cpu_value),
"%{cudart_lib}": _basename(repository_ctx, cuda_libs["cudart"]),
"%{cublas_lib}": _basename(repository_ctx, cuda_libs["cublas"]),
"%{cusolver_lib}": _basename(repository_ctx, cuda_libs["cusolver"]),
"%{cudnn_lib}": _basename(repository_ctx, cuda_libs["cudnn"]),
"%{cufft_lib}": _basename(repository_ctx, cuda_libs["cufft"]),
"%{curand_lib}": _basename(repository_ctx, cuda_libs["curand"]),
"%{cupti_lib}": _basename(repository_ctx, cuda_libs["cupti"]),
"%{cusparse_lib}": _basename(repository_ctx, cuda_libs["cusparse"]),
"%{copy_rules}": "\n".join(copy_rules),
},
)
is_cuda_clang = _use_cuda_clang(repository_ctx)
tf_sysroot = _tf_sysroot(repository_ctx)
should_download_clang = is_cuda_clang and _flag_enabled(
repository_ctx,
_TF_DOWNLOAD_CLANG,
)
if should_download_clang:
download_clang(repository_ctx, "crosstool/extra_tools")
# Set up crosstool/
cc = find_cc(repository_ctx)
cc_fullpath = cc if not should_download_clang else "crosstool/" + cc
host_compiler_includes = get_cxx_inc_directories(
repository_ctx,
cc_fullpath,
tf_sysroot,
)
cuda_defines = {}
cuda_defines["%{builtin_sysroot}"] = tf_sysroot
cuda_defines["%{cuda_toolkit_path}"] = ""
cuda_defines["%{compiler}"] = "unknown"
if is_cuda_clang:
cuda_defines["%{cuda_toolkit_path}"] = cuda_config.config["cuda_toolkit_path"]
cuda_defines["%{compiler}"] = "clang"
host_compiler_prefix = get_host_environ(repository_ctx, _GCC_HOST_COMPILER_PREFIX)
if not host_compiler_prefix:
host_compiler_prefix = "/usr/bin"
cuda_defines["%{host_compiler_prefix}"] = host_compiler_prefix
# Bazel sets '-B/usr/bin' flag to workaround build errors on RHEL (see
# https://github.com/bazelbuild/bazel/issues/760).
# However, this stops our custom clang toolchain from picking the provided
# LLD linker, so we're only adding '-B/usr/bin' when using non-downloaded
# toolchain.
# TODO: when bazel stops adding '-B/usr/bin' by default, remove this
# flag from the CROSSTOOL completely (see
# https://github.com/bazelbuild/bazel/issues/5634)
if should_download_clang:
cuda_defines["%{linker_bin_path}"] = ""
else:
cuda_defines["%{linker_bin_path}"] = host_compiler_prefix
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = ""
cuda_defines["%{unfiltered_compile_flags}"] = ""
if is_cuda_clang:
cuda_defines["%{host_compiler_path}"] = str(cc)
cuda_defines["%{host_compiler_warnings}"] = """
# Some parts of the codebase set -Werror and hit this warning, so
# switch it off for now.
"-Wno-invalid-partial-specialization"
"""
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(host_compiler_includes)
cuda_defines["%{compiler_deps}"] = ":empty"
cuda_defines["%{win_compiler_deps}"] = ":empty"
repository_ctx.file(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
"",
)
repository_ctx.file("crosstool/windows/msvc_wrapper_for_nvcc.py", "")
else:
cuda_defines["%{host_compiler_path}"] = "clang/bin/crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{host_compiler_warnings}"] = ""
# nvcc has the system include paths built in and will automatically
# search them; we cannot work around that, so we add the relevant cuda
# system paths to the allowed compiler specific include paths.
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(
host_compiler_includes + _cuda_include_path(
repository_ctx,
cuda_config,
) + [cupti_header_dir, cudnn_header_dir],
)
# For gcc, do not canonicalize system header paths; some versions of gcc
# pick the shortest possible path for system includes when creating the
# .d file - given that includes that are prefixed with "../" multiple
# time quickly grow longer than the root of the tree, this can lead to
# bazel's header check failing.
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = "\"-fno-canonical-system-headers\""
file_ext = ".exe" if is_windows(repository_ctx) else ""
nvcc_path = "%s/nvcc%s" % (cuda_config.config["cuda_binary_dir"], file_ext)
cuda_defines["%{compiler_deps}"] = ":crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{win_compiler_deps}"] = ":windows_msvc_wrapper_files"
wrapper_defines = {
"%{cpu_compiler}": str(cc),
"%{cuda_version}": cuda_config.cuda_version,
"%{nvcc_path}": nvcc_path,
"%{gcc_host_compiler_path}": str(cc),
"%{nvcc_tmp_dir}": _get_nvcc_tmp_dir_for_windows(repository_ctx),
}
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
tpl_paths["crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"],
wrapper_defines,
)
repository_ctx.template(
"crosstool/windows/msvc_wrapper_for_nvcc.py",
tpl_paths["crosstool:windows/msvc_wrapper_for_nvcc.py"],
wrapper_defines,
)
cuda_defines.update(_get_win_cuda_defines(repository_ctx))
verify_build_defines(cuda_defines)
# Only expand template variables in the BUILD file
repository_ctx.template(
"crosstool/BUILD",
tpl_paths["crosstool:BUILD"],
cuda_defines,
)
# No templating of cc_toolchain_config - use attributes and templatize the
# BUILD file.
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
tpl_paths["crosstool:cc_toolchain_config.bzl"],
{},
)
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
repository_ctx.template(
"cuda/cuda/cuda_config.h",
tpl_paths["cuda:cuda_config.h"],
{
"%{cuda_version}": cuda_config.cuda_version,
"%{cublas_version}": cuda_config.cublas_version,
"%{cusolver_version}": cuda_config.cusolver_version,
"%{curand_version}": cuda_config.curand_version,
"%{cufft_version}": cuda_config.cufft_version,
"%{cusparse_version}": cuda_config.cusparse_version,
"%{cudnn_version}": cuda_config.cudnn_version,
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
},
)
# Set up cuda_config.py, which is used by gen_build_info to provide
# static build environment info to the API
repository_ctx.template(
"cuda/cuda/cuda_config.py",
tpl_paths["cuda:cuda_config.py"],
_py_tmpl_dict({
"cuda_version": cuda_config.cuda_version,
"cudnn_version": cuda_config.cudnn_version,
"cuda_compute_capabilities": cuda_config.compute_capabilities,
"cpu_compiler": str(cc),
}),
)
def _py_tmpl_dict(d):
return {"%{cuda_config}": str(d)}
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
"""Creates pointers to a remotely configured repo set up to build with CUDA."""
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
compute_capabilities(repository_ctx),
),
},
)
repository_ctx.template(
"cuda/BUILD",
config_repo_label(remote_config_repo, "cuda:BUILD"),
{},
)
repository_ctx.template(
"cuda/build_defs.bzl",
config_repo_label(remote_config_repo, "cuda:build_defs.bzl"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.h",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.h"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.py",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.py"),
_py_tmpl_dict({}),
)
repository_ctx.template(
"crosstool/BUILD",
config_repo_label(remote_config_repo, "crosstool:BUILD"),
{},
)
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
config_repo_label(remote_config_repo, "crosstool:cc_toolchain_config.bzl"),
{},
)
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
config_repo_label(remote_config_repo, "crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"),
{},
)
def _cuda_autoconf_impl(repository_ctx):
"""Implementation of the cuda_autoconf repository rule."""
if not enable_cuda(repository_ctx):
_create_dummy_repository(repository_ctx)
elif get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO) != None:
has_cuda_version = get_host_environ(repository_ctx, _TF_CUDA_VERSION) != None
has_cudnn_version = get_host_environ(repository_ctx, _TF_CUDNN_VERSION) != None
if not has_cuda_version or not has_cudnn_version:
auto_configure_fail("%s and %s must also be set if %s is specified" %
(_TF_CUDA_VERSION, _TF_CUDNN_VERSION, _TF_CUDA_CONFIG_REPO))
_create_remote_cuda_repository(
repository_ctx,
get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO),
)
else:
_create_local_cuda_repository(repository_ctx)
_ENVIRONS = [
_GCC_HOST_COMPILER_PATH,
_GCC_HOST_COMPILER_PREFIX,
_CLANG_CUDA_COMPILER_PATH,
"TF_NEED_CUDA",
"TF_CUDA_CLANG",
_TF_DOWNLOAD_CLANG,
_CUDA_TOOLKIT_PATH,
_CUDNN_INSTALL_PATH,
_TF_CUDA_VERSION,
_TF_CUDNN_VERSION,
_TF_CUDA_COMPUTE_CAPABILITIES,
"NVVMIR_LIBRARY_DIR",
_PYTHON_BIN_PATH,
"TMP",
"TMPDIR",
"TF_CUDA_PATHS",
]
remote_cuda_configure = repository_rule(
implementation = _create_local_cuda_repository,
environ = _ENVIRONS,
remotable = True,
attrs = {
"environ": attr.string_dict(),
},
)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
environ = _ENVIRONS + [_TF_CUDA_CONFIG_REPO],
)
"""Detects and configures the local CUDA toolchain.
Add the following to your WORKSPACE FILE:
```python
cuda_configure(name = "local_config_cuda")
```
Args:
name: A unique name for this workspace rule.
"""
| 36.468175
| 125
| 0.638134
|
load("//third_party/clang_toolchain:download_clang.bzl", "download_clang")
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"escape_string",
"get_env_var",
)
load(
"@bazel_tools//tools/cpp:windows_cc_configure.bzl",
"find_msvc_tool",
"find_vc_path",
"setup_vc_env_vars",
)
load(
"//third_party/remote_config:common.bzl",
"config_repo_label",
"err_out",
"execute",
"get_bash_bin",
"get_cpu_value",
"get_host_environ",
"get_python_bin",
"is_windows",
"raw_exec",
"read_dir",
"realpath",
"which",
)
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_GCC_HOST_COMPILER_PREFIX = "GCC_HOST_COMPILER_PREFIX"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_TF_SYSROOT = "TF_SYSROOT"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION"
_TF_CUDNN_VERSION = "TF_CUDNN_VERSION"
_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_TF_DOWNLOAD_CLANG = "TF_DOWNLOAD_CLANG"
_PYTHON_BIN_PATH = "PYTHON_BIN_PATH"
def to_list_of_strings(elements):
quoted_strings = ["\"" + element + "\"" for element in elements]
return ", ".join(quoted_strings)
def verify_build_defines(params):
missing = []
for param in [
"cxx_builtin_include_directories",
"extra_no_canonical_prefixes_flags",
"host_compiler_path",
"host_compiler_prefix",
"host_compiler_warnings",
"linker_bin_path",
"compiler_deps",
"msvc_cl_path",
"msvc_env_include",
"msvc_env_lib",
"msvc_env_path",
"msvc_env_tmp",
"msvc_lib_path",
"msvc_link_path",
"msvc_ml_path",
"unfiltered_compile_flags",
"win_compiler_deps",
]:
if ("%{" + param + "}") not in params:
missing.append(param)
if missing:
auto_configure_fail(
"BUILD.tpl template is missing these variables: " +
str(missing) +
".\nWe only got: " +
str(params) +
".",
)
def _get_nvcc_tmp_dir_for_windows(repository_ctx):
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
return escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
def _get_msvc_compiler(repository_ctx):
vc_path = find_vc_path(repository_ctx)
return find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
def _get_win_cuda_defines(repository_ctx):
if not is_windows(repository_ctx):
return {
"%{msvc_env_tmp}": "msvc_not_used",
"%{msvc_env_path}": "msvc_not_used",
"%{msvc_env_include}": "msvc_not_used",
"%{msvc_env_lib}": "msvc_not_used",
"%{msvc_cl_path}": "msvc_not_used",
"%{msvc_ml_path}": "msvc_not_used",
"%{msvc_link_path}": "msvc_not_used",
"%{msvc_lib_path}": "msvc_not_used",
}
vc_path = find_vc_path(repository_ctx)
if not vc_path:
auto_configure_fail(
"Visual C++ build tools not found on your machine." +
"Please check your installation following https://docs.bazel.build/versions/master/windows.html#using",
)
return {}
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
msvc_cl_path = get_python_bin(repository_ctx)
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace(
"\\",
"/",
)
msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace(
"\\",
"/",
)
msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace(
"\\",
"/",
)
escaped_cxx_include_directories = [
_get_nvcc_tmp_dir_for_windows(repository_ctx),
"C:\\\\botcode\\\\w",
]
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append(path)
return {
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": msvc_cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": msvc_link_path,
"%{msvc_lib_path}": msvc_lib_path,
"%{cxx_builtin_include_directories}": to_list_of_strings(
escaped_cxx_include_directories,
),
}
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
if is_windows(repository_ctx):
return _get_msvc_compiler(repository_ctx)
if _use_cuda_clang(repository_ctx):
target_cc_name = "clang"
cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
if _flag_enabled(repository_ctx, _TF_DOWNLOAD_CLANG):
return "extra_tools/bin/clang"
else:
target_cc_name = "gcc"
cc_path_envvar = _GCC_HOST_COMPILER_PATH
cc_name = target_cc_name
cc_name_from_env = get_host_environ(repository_ctx, cc_path_envvar)
if cc_name_from_env:
cc_name = cc_name_from_env
if cc_name.startswith("/"):
# Absolute path, maybe we should make this supported by our which function.
return cc_name
cc = which(repository_ctx, cc_name)
if cc == None:
fail(("Cannot find {}, either correct your path or set the {}" +
" environment variable").format(target_cc_name, cc_path_envvar))
return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
path = path.strip()
if path.endswith(_OSX_FRAMEWORK_SUFFIX):
path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
return path
def _normalize_include_path(repository_ctx, path):
path = str(repository_ctx.path(path))
crosstool_folder = str(repository_ctx.path(".").get_child("crosstool"))
if path.startswith(crosstool_folder):
# We drop the path to "$REPO/crosstool" and a trailing path separator.
return path[len(crosstool_folder) + 1:]
return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp, tf_sysroot):
if lang_is_cpp:
lang = "c++"
else:
lang = "c"
sysroot = []
if tf_sysroot:
sysroot += ["--sysroot", tf_sysroot]
result = raw_exec(repository_ctx, [cc, "-E", "-x" + lang, "-", "-v"] +
sysroot)
stderr = err_out(result)
index1 = stderr.find(_INC_DIR_MARKER_BEGIN)
if index1 == -1:
return []
index1 = stderr.find("\n", index1)
if index1 == -1:
return []
index2 = stderr.rfind("\n ")
if index2 == -1 or index2 < index1:
return []
index2 = stderr.find("\n", index2 + 1)
if index2 == -1:
inc_dirs = stderr[index1 + 1:]
else:
inc_dirs = stderr[index1 + 1:index2].strip()
return [
_normalize_include_path(repository_ctx, _cxx_inc_convert(p))
for p in inc_dirs.split("\n")
]
def get_cxx_inc_directories(repository_ctx, cc, tf_sysroot):
# For some reason `clang -xc` sometimes returns include paths that are
# different from the ones from `clang -xc++`. (Symlink and a dir)
# So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
includes_cpp = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
True,
tf_sysroot,
)
includes_c = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
False,
tf_sysroot,
)
return includes_cpp + [
inc
for inc in includes_c
if inc not in includes_cpp
]
def auto_configure_fail(msg):
red = "\033[0;31m"
no_color = "\033[0m"
fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def _cuda_include_path(repository_ctx, cuda_config):
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" % (
cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else "",
))
# The expected exit code of this command is non-zero. Bazel remote execution
# only caches commands with zero exit code. So force a zero exit code.
cmd = "%s -v /dev/null -o /dev/null ; [ $? -eq 1 ]" % str(nvcc_path)
result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd])
target_dir = ""
for one_line in err_out(result).splitlines():
if one_line.startswith("#$ _TARGET_DIR_="):
target_dir = (
cuda_config.cuda_toolkit_path + "/" + one_line.replace(
"#$ _TARGET_DIR_=",
"",
) + "/include"
)
inc_entries = []
if target_dir != "":
inc_entries.append(realpath(repository_ctx, target_dir))
inc_entries.append(realpath(repository_ctx, cuda_config.cuda_toolkit_path + "/include"))
return inc_entries
def enable_cuda(repository_ctx):
return int(get_host_environ(repository_ctx, "TF_NEED_CUDA", False))
def matches_version(environ_version, detected_version):
environ_version_parts = environ_version.split(".")
detected_version_parts = detected_version.split(".")
if len(detected_version_parts) < len(environ_version_parts):
return False
for i, part in enumerate(detected_version_parts):
if i >= len(environ_version_parts):
break
if part != environ_version_parts[i]:
return False
return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
def compute_capabilities(repository_ctx):
capabilities = get_host_environ(
repository_ctx,
_TF_CUDA_COMPUTE_CAPABILITIES,
"compute_35,compute_52",
).split(",")
# Map old 'x.y' capabilities to 'compute_xy'.
for i, capability in enumerate(capabilities):
parts = capability.split(".")
if len(parts) != 2:
continue
capabilities[i] = "compute_%s%s" % (parts[0], parts[1])
# Make list unique
capabilities = dict(zip(capabilities, capabilities)).keys()
# Validate capabilities.
for capability in capabilities:
if not capability.startswith(("compute_", "sm_")):
auto_configure_fail("Invalid compute capability: %s" % capability)
for prefix in ["compute_", "sm_"]:
if not capability.startswith(prefix):
continue
if len(capability) == len(prefix) + 2 and capability[-2:].isdigit():
continue
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def lib_name(base_name, cpu_value, version = None, static = False):
version = "" if not version else "." + version
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % base_name
return "lib%s.so%s" % (base_name, version)
elif cpu_value == "Windows":
return "%s.lib" % base_name
elif cpu_value == "Darwin":
if static:
return "lib%s.a" % base_name
return "lib%s%s.dylib" % (base_name, version)
else:
auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
def _lib_path(lib, cpu_value, basedir, version, static):
file_name = lib_name(lib, cpu_value, version, static)
return "%s/%s" % (basedir, file_name)
def _should_check_soname(version, static):
return version and not static
def _check_cuda_lib_params(lib, cpu_value, basedir, version, static = False):
return (
_lib_path(lib, cpu_value, basedir, version, static),
_should_check_soname(version, static),
)
def _check_cuda_libs(repository_ctx, script_path, libs):
python_bin = get_python_bin(repository_ctx)
contents = repository_ctx.read(script_path).splitlines()
cmd = "from os import linesep;"
cmd += "f = open('script.py', 'w');"
for line in contents:
cmd += "f.write('%s' + linesep);" % line
cmd += "f.close();"
cmd += "from os import system;"
args = " ".join(["\"" + path + "\" " + str(check) for path, check in libs])
cmd += "system('%s script.py %s');" % (python_bin, args)
all_paths = [path for path, _ in libs]
checked_paths = execute(repository_ctx, [python_bin, "-c", cmd]).stdout.splitlines()
# Filter out empty lines from splitting on '\r\n' on Windows
checked_paths = [path for path in checked_paths if len(path) > 0]
if all_paths != checked_paths:
auto_configure_fail("Error with installed CUDA libs. Expected '%s'. Actual '%s'." % (all_paths, checked_paths))
def _find_libs(repository_ctx, check_cuda_libs_script, cuda_config):
cpu_value = cuda_config.cpu_value
stub_dir = "" if is_windows(repository_ctx) else "/stubs"
check_cuda_libs_params = {
"cuda": _check_cuda_lib_params(
"cuda",
cpu_value,
cuda_config.config["cuda_library_dir"] + stub_dir,
version = None,
static = False,
),
"cudart": _check_cuda_lib_params(
"cudart",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cudart_static": _check_cuda_lib_params(
"cudart_static",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = True,
),
"cublas": _check_cuda_lib_params(
"cublas",
cpu_value,
cuda_config.config["cublas_library_dir"],
cuda_config.cublas_version,
static = False,
),
"cusolver": _check_cuda_lib_params(
"cusolver",
cpu_value,
cuda_config.config["cusolver_library_dir"],
cuda_config.cusolver_version,
static = False,
),
"curand": _check_cuda_lib_params(
"curand",
cpu_value,
cuda_config.config["curand_library_dir"],
cuda_config.curand_version,
static = False,
),
"cufft": _check_cuda_lib_params(
"cufft",
cpu_value,
cuda_config.config["cufft_library_dir"],
cuda_config.cufft_version,
static = False,
),
"cudnn": _check_cuda_lib_params(
"cudnn",
cpu_value,
cuda_config.config["cudnn_library_dir"],
cuda_config.cudnn_version,
static = False,
),
"cupti": _check_cuda_lib_params(
"cupti",
cpu_value,
cuda_config.config["cupti_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cusparse": _check_cuda_lib_params(
"cusparse",
cpu_value,
cuda_config.config["cusparse_library_dir"],
cuda_config.cusparse_version,
static = False,
),
}
# Verify that the libs actually exist at their locations.
_check_cuda_libs(repository_ctx, check_cuda_libs_script, check_cuda_libs_params.values())
paths = {filename: v[0] for (filename, v) in check_cuda_libs_params.items()}
return paths
def _cudart_static_linkopt(cpu_value):
return "" if cpu_value == "Darwin" else "\"-lrt\","
def _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries):
python_bin = get_python_bin(repository_ctx)
# If used with remote execution then repository_ctx.execute() can't
compressed_contents = repository_ctx.read(script_path)
decompress_and_execute_cmd = (
"from zlib import decompress;" +
"from base64 import b64decode;" +
"from os import system;" +
"script = decompress(b64decode('%s'));" % compressed_contents +
"f = open('script.py', 'wb');" +
"f.write(script);" +
"f.close();" +
"system('\"%s\" script.py %s');" % (python_bin, " ".join(cuda_libraries))
)
return execute(repository_ctx, [python_bin, "-c", decompress_and_execute_cmd])
def find_cuda_config(repository_ctx, script_path, cuda_libraries):
exec_result = _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries)
if exec_result.return_code:
auto_configure_fail("Failed to run find_cuda_config.py: %s" % err_out(exec_result))
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()])
def _get_cuda_config(repository_ctx, find_cuda_config_script):
config = find_cuda_config(repository_ctx, find_cuda_config_script, ["cuda", "cudnn"])
cpu_value = get_cpu_value(repository_ctx)
toolkit_path = config["cuda_toolkit_path"]
is_windows = cpu_value == "Windows"
cuda_version = config["cuda_version"].split(".")
cuda_major = cuda_version[0]
cuda_minor = cuda_version[1]
cuda_version = ("64_%s%s" if is_windows else "%s.%s") % (cuda_major, cuda_minor)
cudnn_version = ("64_%s" if is_windows else "%s") % config["cudnn_version"]
if int(cuda_major) >= 11:
cublas_version = ("64_%s" if is_windows else "%s") % config["cublas_version"].split(".")[0]
cusolver_version = ("64_%s" if is_windows else "%s") % config["cusolver_version"].split(".")[0]
curand_version = ("64_%s" if is_windows else "%s") % config["curand_version"].split(".")[0]
cufft_version = ("64_%s" if is_windows else "%s") % config["cufft_version"].split(".")[0]
cusparse_version = ("64_%s" if is_windows else "%s") % config["cusparse_version"].split(".")[0]
elif (int(cuda_major), int(cuda_minor)) >= (10, 1):
cuda_lib_version = ("64_%s" if is_windows else "%s") % cuda_major
cublas_version = cuda_lib_version
cusolver_version = cuda_lib_version
curand_version = cuda_lib_version
cufft_version = cuda_lib_version
cusparse_version = cuda_lib_version
else:
cublas_version = cuda_version
cusolver_version = cuda_version
curand_version = cuda_version
cufft_version = cuda_version
cusparse_version = cuda_version
return struct(
cuda_toolkit_path = toolkit_path,
cuda_version = cuda_version,
cublas_version = cublas_version,
cusolver_version = cusolver_version,
curand_version = curand_version,
cufft_version = cufft_version,
cusparse_version = cusparse_version,
cudnn_version = cudnn_version,
compute_capabilities = compute_capabilities(repository_ctx),
cpu_value = cpu_value,
config = config,
)
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl.replace(":", "/")
repository_ctx.template(
out,
Label("//third_party/gpus/%s.tpl" % tpl),
substitutions,
)
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
Label("//third_party/gpus/%s.tpl" % label),
{},
)
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
fail("ERROR: Building with --config=cuda but TensorFlow is not configured " +
"to build with GPU support. Please re-run ./configure and enter 'Y' " +
"at the prompt to build with GPU support.")
native.genrule(
name = "error_gen_crosstool",
outs = ["CROSSTOOL"],
cmd = "echo 'Should not be run.' && exit 1",
)
native.filegroup(
name = "crosstool",
srcs = [":CROSSTOOL"],
output_licenses = ["unencumbered"],
)
"""
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
def _create_dummy_repository(repository_ctx):
cpu_value = get_cpu_value(repository_ctx)
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
"%{cuda_extra_copts}": "[]",
"%{cuda_gpu_architectures}": "[]",
},
)
_tpl(
repository_ctx,
"cuda:BUILD",
{
"%{cuda_driver_lib}": lib_name("cuda", cpu_value),
"%{cudart_static_lib}": lib_name(
"cudart_static",
cpu_value,
static = True,
),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
"%{cudart_lib}": lib_name("cudart", cpu_value),
"%{cublas_lib}": lib_name("cublas", cpu_value),
"%{cusolver_lib}": lib_name("cusolver", cpu_value),
"%{cudnn_lib}": lib_name("cudnn", cpu_value),
"%{cufft_lib}": lib_name("cufft", cpu_value),
"%{curand_lib}": lib_name("curand", cpu_value),
"%{cupti_lib}": lib_name("cupti", cpu_value),
"%{cusparse_lib}": lib_name("cusparse", cpu_value),
"%{copy_rules}": """
filegroup(name="cuda-include")
filegroup(name="cublas-include")
filegroup(name="cusolver-include")
filegroup(name="cufft-include")
filegroup(name="cusparse-include")
filegroup(name="curand-include")
filegroup(name="cudnn-include")
""",
},
)
repository_ctx.file("cuda/cuda/include/cuda.h")
repository_ctx.file("cuda/cuda/include/cublas.h")
repository_ctx.file("cuda/cuda/include/cudnn.h")
repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h")
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cuda", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudart", cpu_value))
repository_ctx.file(
"cuda/cuda/lib/%s" % lib_name("cudart_static", cpu_value),
)
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cublas", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusolver", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudnn", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("curand", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cufft", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cupti", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusparse", cpu_value))
_tpl(
repository_ctx,
"cuda:cuda_config.h",
{
"%{cuda_version}": "",
"%{cublas_version}": "",
"%{cusolver_version}": "",
"%{curand_version}": "",
"%{cufft_version}": "",
"%{cusparse_version}": "",
"%{cudnn_version}": "",
"%{cuda_toolkit_path}": "",
},
"cuda/cuda/cuda_config.h",
)
_tpl(
repository_ctx,
"cuda:cuda_config.py",
_py_tmpl_dict({}),
"cuda/cuda/cuda_config.py",
)
repository_ctx.file(
"crosstool/error_gpu_disabled.bzl",
_DUMMY_CROSSTOOL_BZL_FILE,
)
repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
def _norm_path(path):
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def make_copy_files_rule(repository_ctx, name, srcs, outs):
cmds = []
for src, out in zip(srcs, outs):
cmds.append('cp -f "%s" "$(location %s)"' % (src, out))
outs = [(' "%s",' % out) for out in outs]
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""%s \""",
)""" % (name, "\n".join(outs), " && \\\n".join(cmds))
def make_copy_dir_rule(repository_ctx, name, src_dir, out_dir, exceptions = None):
src_dir = _norm_path(src_dir)
out_dir = _norm_path(out_dir)
outs = read_dir(repository_ctx, src_dir)
post_cmd = ""
if exceptions != None:
outs = [x for x in outs if not any([
x.startswith(src_dir + "/" + y)
for y in exceptions
])]
outs = [(' "%s",' % out.replace(src_dir, out_dir)) for out in outs]
ut_dir if len(outs) > 1 else "$(@D)"
if exceptions != None:
for x in exceptions:
post_cmd += " ; rm -fR " + out_dir + "/" + x
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""cp -rLf "%s/." "%s/" %s\""",
)""" % (name, "\n".join(outs), src_dir, out_dir, post_cmd)
def _flag_enabled(repository_ctx, flag_name):
return get_host_environ(repository_ctx, flag_name) == "1"
def _use_cuda_clang(repository_ctx):
return _flag_enabled(repository_ctx, "TF_CUDA_CLANG")
def _tf_sysroot(repository_ctx):
return get_host_environ(repository_ctx, _TF_SYSROOT, "")
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
copts = []
for capability in compute_capabilities:
if capability.startswith("compute_"):
capability = capability.replace("compute_", "sm_")
copts.append("--cuda-include-ptx=%s" % capability)
copts.append("--cuda-gpu-arch=%s" % capability)
return str(copts)
def _tpl_path(repository_ctx, filename):
return repository_ctx.path(Label("//third_party/gpus/%s.tpl" % filename))
def _basename(repository_ctx, path_str):
num_chars = len(path_str)
is_win = is_windows(repository_ctx)
for i in range(num_chars):
r_i = num_chars - 1 - i
if (is_win and path_str[r_i] == "\\") or path_str[r_i] == "/":
return path_str[r_i + 1:]
return path_str
def _create_local_cuda_repository(repository_ctx):
tpl_paths = {filename: _tpl_path(repository_ctx, filename) for filename in [
"cuda:build_defs.bzl",
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
"crosstool:windows/msvc_wrapper_for_nvcc.py",
"crosstool:BUILD",
"crosstool:cc_toolchain_config.bzl",
"cuda:cuda_config.h",
"cuda:cuda_config.py",
]}
tpl_paths["cuda:BUILD"] = _tpl_path(repository_ctx, "cuda:BUILD.windows" if is_windows(repository_ctx) else "cuda:BUILD")
find_cuda_config_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:find_cuda_config.py.gz.base64"))
cuda_config = _get_cuda_config(repository_ctx, find_cuda_config_script)
cuda_include_path = cuda_config.config["cuda_include_dir"]
cublas_include_path = cuda_config.config["cublas_include_dir"]
cudnn_header_dir = cuda_config.config["cudnn_include_dir"]
cupti_header_dir = cuda_config.config["cupti_include_dir"]
nvvm_libdevice_dir = cuda_config.config["nvvm_library_dir"]
copy_rules = [
make_copy_dir_rule(
repository_ctx,
name = "cuda-include",
src_dir = cuda_include_path,
out_dir = "cuda/include",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-nvvm",
src_dir = nvvm_libdevice_dir,
out_dir = "cuda/nvvm/libdevice",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-extras",
src_dir = cupti_header_dir,
out_dir = "cuda/extras/CUPTI/include",
),
]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cublas-include",
srcs = [
cublas_include_path + "/cublas.h",
cublas_include_path + "/cublas_v2.h",
cublas_include_path + "/cublas_api.h",
],
outs = [
"cublas/include/cublas.h",
"cublas/include/cublas_v2.h",
"cublas/include/cublas_api.h",
],
))
cusolver_include_path = cuda_config.config["cusolver_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusolver-include",
srcs = [
cusolver_include_path + "/cusolver_common.h",
cusolver_include_path + "/cusolverDn.h",
],
outs = [
"cusolver/include/cusolver_common.h",
"cusolver/include/cusolverDn.h",
],
))
cufft_include_path = cuda_config.config["cufft_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cufft-include",
srcs = [
cufft_include_path + "/cufft.h",
],
outs = [
"cufft/include/cufft.h",
],
))
cusparse_include_path = cuda_config.config["cusparse_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusparse-include",
srcs = [
cusparse_include_path + "/cusparse.h",
],
outs = [
"cusparse/include/cusparse.h",
],
))
curand_include_path = cuda_config.config["curand_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "curand-include",
srcs = [
curand_include_path + "/curand.h",
],
outs = [
"curand/include/curand.h",
],
))
check_cuda_libs_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:check_cuda_libs.py"))
cuda_libs = _find_libs(repository_ctx, check_cuda_libs_script, cuda_config)
cuda_lib_srcs = []
cuda_lib_outs = []
for path in cuda_libs.values():
cuda_lib_srcs.append(path)
cuda_lib_outs.append("cuda/lib/" + _basename(repository_ctx, path))
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-lib",
srcs = cuda_lib_srcs,
outs = cuda_lib_outs,
))
file_ext = ".exe" if is_windows(repository_ctx) else ""
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-bin",
srcs = [
cuda_config.cuda_toolkit_path + "/bin/" + "crt/link.stub",
cuda_config.cuda_toolkit_path + "/bin/" + "nvlink" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "fatbinary" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "bin2c" + file_ext,
],
outs = [
"cuda/bin/" + "crt/link.stub",
"cuda/bin/" + "nvlink" + file_ext,
"cuda/bin/" + "fatbinary" + file_ext,
"cuda/bin/" + "bin2c" + file_ext,
],
))
cudnn_headers = ["cudnn.h"]
if cuda_config.cudnn_version.rsplit("_", 1)[0] >= "8":
cudnn_headers += [
"cudnn_backend.h",
"cudnn_adv_infer.h",
"cudnn_adv_train.h",
"cudnn_cnn_infer.h",
"cudnn_cnn_train.h",
"cudnn_ops_infer.h",
"cudnn_ops_train.h",
"cudnn_version.h",
]
cudnn_srcs = []
cudnn_outs = []
for header in cudnn_headers:
cudnn_srcs.append(cudnn_header_dir + "/" + header)
cudnn_outs.append("cudnn/include/" + header)
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cudnn-include",
srcs = cudnn_srcs,
outs = cudnn_outs,
))
repository_ctx.template(
"cuda/build_defs.bzl",
tpl_paths["cuda:build_defs.bzl"],
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
cuda_config.compute_capabilities,
),
"%{cuda_gpu_architectures}": str(cuda_config.compute_capabilities),
},
)
repository_ctx.template(
"cuda/BUILD",
tpl_paths["cuda:BUILD"],
{
"%{cuda_driver_lib}": _basename(repository_ctx, cuda_libs["cuda"]),
"%{cudart_static_lib}": _basename(repository_ctx, cuda_libs["cudart_static"]),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cuda_config.cpu_value),
"%{cudart_lib}": _basename(repository_ctx, cuda_libs["cudart"]),
"%{cublas_lib}": _basename(repository_ctx, cuda_libs["cublas"]),
"%{cusolver_lib}": _basename(repository_ctx, cuda_libs["cusolver"]),
"%{cudnn_lib}": _basename(repository_ctx, cuda_libs["cudnn"]),
"%{cufft_lib}": _basename(repository_ctx, cuda_libs["cufft"]),
"%{curand_lib}": _basename(repository_ctx, cuda_libs["curand"]),
"%{cupti_lib}": _basename(repository_ctx, cuda_libs["cupti"]),
"%{cusparse_lib}": _basename(repository_ctx, cuda_libs["cusparse"]),
"%{copy_rules}": "\n".join(copy_rules),
},
)
is_cuda_clang = _use_cuda_clang(repository_ctx)
tf_sysroot = _tf_sysroot(repository_ctx)
should_download_clang = is_cuda_clang and _flag_enabled(
repository_ctx,
_TF_DOWNLOAD_CLANG,
)
if should_download_clang:
download_clang(repository_ctx, "crosstool/extra_tools")
cc = find_cc(repository_ctx)
cc_fullpath = cc if not should_download_clang else "crosstool/" + cc
host_compiler_includes = get_cxx_inc_directories(
repository_ctx,
cc_fullpath,
tf_sysroot,
)
cuda_defines = {}
cuda_defines["%{builtin_sysroot}"] = tf_sysroot
cuda_defines["%{cuda_toolkit_path}"] = ""
cuda_defines["%{compiler}"] = "unknown"
if is_cuda_clang:
cuda_defines["%{cuda_toolkit_path}"] = cuda_config.config["cuda_toolkit_path"]
cuda_defines["%{compiler}"] = "clang"
host_compiler_prefix = get_host_environ(repository_ctx, _GCC_HOST_COMPILER_PREFIX)
if not host_compiler_prefix:
host_compiler_prefix = "/usr/bin"
cuda_defines["%{host_compiler_prefix}"] = host_compiler_prefix
# toolchain.
# TODO: when bazel stops adding '-B/usr/bin' by default, remove this
# flag from the CROSSTOOL completely (see
# https://github.com/bazelbuild/bazel/issues/5634)
if should_download_clang:
cuda_defines["%{linker_bin_path}"] = ""
else:
cuda_defines["%{linker_bin_path}"] = host_compiler_prefix
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = ""
cuda_defines["%{unfiltered_compile_flags}"] = ""
if is_cuda_clang:
cuda_defines["%{host_compiler_path}"] = str(cc)
cuda_defines["%{host_compiler_warnings}"] = """
# Some parts of the codebase set -Werror and hit this warning, so
# switch it off for now.
"-Wno-invalid-partial-specialization"
"""
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(host_compiler_includes)
cuda_defines["%{compiler_deps}"] = ":empty"
cuda_defines["%{win_compiler_deps}"] = ":empty"
repository_ctx.file(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
"",
)
repository_ctx.file("crosstool/windows/msvc_wrapper_for_nvcc.py", "")
else:
cuda_defines["%{host_compiler_path}"] = "clang/bin/crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{host_compiler_warnings}"] = ""
# nvcc has the system include paths built in and will automatically
# search them; we cannot work around that, so we add the relevant cuda
# system paths to the allowed compiler specific include paths.
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(
host_compiler_includes + _cuda_include_path(
repository_ctx,
cuda_config,
) + [cupti_header_dir, cudnn_header_dir],
)
# For gcc, do not canonicalize system header paths; some versions of gcc
# pick the shortest possible path for system includes when creating the
# .d file - given that includes that are prefixed with "../" multiple
# time quickly grow longer than the root of the tree, this can lead to
# bazel's header check failing.
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = "\"-fno-canonical-system-headers\""
file_ext = ".exe" if is_windows(repository_ctx) else ""
nvcc_path = "%s/nvcc%s" % (cuda_config.config["cuda_binary_dir"], file_ext)
cuda_defines["%{compiler_deps}"] = ":crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{win_compiler_deps}"] = ":windows_msvc_wrapper_files"
wrapper_defines = {
"%{cpu_compiler}": str(cc),
"%{cuda_version}": cuda_config.cuda_version,
"%{nvcc_path}": nvcc_path,
"%{gcc_host_compiler_path}": str(cc),
"%{nvcc_tmp_dir}": _get_nvcc_tmp_dir_for_windows(repository_ctx),
}
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
tpl_paths["crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"],
wrapper_defines,
)
repository_ctx.template(
"crosstool/windows/msvc_wrapper_for_nvcc.py",
tpl_paths["crosstool:windows/msvc_wrapper_for_nvcc.py"],
wrapper_defines,
)
cuda_defines.update(_get_win_cuda_defines(repository_ctx))
verify_build_defines(cuda_defines)
repository_ctx.template(
"crosstool/BUILD",
tpl_paths["crosstool:BUILD"],
cuda_defines,
)
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
tpl_paths["crosstool:cc_toolchain_config.bzl"],
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.h",
tpl_paths["cuda:cuda_config.h"],
{
"%{cuda_version}": cuda_config.cuda_version,
"%{cublas_version}": cuda_config.cublas_version,
"%{cusolver_version}": cuda_config.cusolver_version,
"%{curand_version}": cuda_config.curand_version,
"%{cufft_version}": cuda_config.cufft_version,
"%{cusparse_version}": cuda_config.cusparse_version,
"%{cudnn_version}": cuda_config.cudnn_version,
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
},
)
repository_ctx.template(
"cuda/cuda/cuda_config.py",
tpl_paths["cuda:cuda_config.py"],
_py_tmpl_dict({
"cuda_version": cuda_config.cuda_version,
"cudnn_version": cuda_config.cudnn_version,
"cuda_compute_capabilities": cuda_config.compute_capabilities,
"cpu_compiler": str(cc),
}),
)
def _py_tmpl_dict(d):
return {"%{cuda_config}": str(d)}
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
compute_capabilities(repository_ctx),
),
},
)
repository_ctx.template(
"cuda/BUILD",
config_repo_label(remote_config_repo, "cuda:BUILD"),
{},
)
repository_ctx.template(
"cuda/build_defs.bzl",
config_repo_label(remote_config_repo, "cuda:build_defs.bzl"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.h",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.h"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.py",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.py"),
_py_tmpl_dict({}),
)
repository_ctx.template(
"crosstool/BUILD",
config_repo_label(remote_config_repo, "crosstool:BUILD"),
{},
)
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
config_repo_label(remote_config_repo, "crosstool:cc_toolchain_config.bzl"),
{},
)
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
config_repo_label(remote_config_repo, "crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"),
{},
)
def _cuda_autoconf_impl(repository_ctx):
if not enable_cuda(repository_ctx):
_create_dummy_repository(repository_ctx)
elif get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO) != None:
has_cuda_version = get_host_environ(repository_ctx, _TF_CUDA_VERSION) != None
has_cudnn_version = get_host_environ(repository_ctx, _TF_CUDNN_VERSION) != None
if not has_cuda_version or not has_cudnn_version:
auto_configure_fail("%s and %s must also be set if %s is specified" %
(_TF_CUDA_VERSION, _TF_CUDNN_VERSION, _TF_CUDA_CONFIG_REPO))
_create_remote_cuda_repository(
repository_ctx,
get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO),
)
else:
_create_local_cuda_repository(repository_ctx)
_ENVIRONS = [
_GCC_HOST_COMPILER_PATH,
_GCC_HOST_COMPILER_PREFIX,
_CLANG_CUDA_COMPILER_PATH,
"TF_NEED_CUDA",
"TF_CUDA_CLANG",
_TF_DOWNLOAD_CLANG,
_CUDA_TOOLKIT_PATH,
_CUDNN_INSTALL_PATH,
_TF_CUDA_VERSION,
_TF_CUDNN_VERSION,
_TF_CUDA_COMPUTE_CAPABILITIES,
"NVVMIR_LIBRARY_DIR",
_PYTHON_BIN_PATH,
"TMP",
"TMPDIR",
"TF_CUDA_PATHS",
]
remote_cuda_configure = repository_rule(
implementation = _create_local_cuda_repository,
environ = _ENVIRONS,
remotable = True,
attrs = {
"environ": attr.string_dict(),
},
)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
environ = _ENVIRONS + [_TF_CUDA_CONFIG_REPO],
)
| true
| true
|
f70b8a2bb9b965788aeed7882a1db5c0a0a6b4de
| 40,693
|
py
|
Python
|
forms/forms_func.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
forms/forms_func.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
forms/forms_func.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
import datetime
import zlib
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from django.db.models import Q
from clients.models import Document, DispensaryReg, Card
from directions.models import Napravleniya, Issledovaniya, ParaclinicResult, IstochnikiFinansirovaniya, PersonContract
from directory.models import Researches
from laboratory import utils
from laboratory.utils import strdate
from api.stationar.stationar_func import hosp_get_data_direction, check_transfer_epicrisis
from api.stationar.sql_func import get_result_value_iss
from utils.dates import normalize_date
def get_all_doc(docs: [Document]):
"""
возвращает словарь словарей documents. Данные о документах: паспорт : номер: серия, полис: номер, снислс: номер
"""
documents = {
'passport': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
'polis': {'serial': "", 'num': "", 'issued': ""},
'snils': {'num': ""},
'bc': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
}
for d in docs:
if d.document_type.title == "СНИЛС":
documents["snils"]["num"] = d.number
if d.document_type.title == 'Паспорт гражданина РФ':
documents["passport"]["num"] = d.number
documents["passport"]["serial"] = d.serial
documents["passport"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Полис ОМС':
documents["polis"]["num"] = d.number
documents["polis"]["serial"] = d.serial
documents["polis"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Свидетельство о рождении':
documents["bc"]["num"] = d.number
documents["bc"]["serial"] = d.serial
documents["bc"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["bc"]["issued"] = d.who_give
return documents
def get_coast_from_issledovanie(dir_research_loc):
"""
При печати листа на оплату возвращает (цены из записанных в Исследования)
На основании прайса, услуг возвращает Для листа на оплату {
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
}
"""
d = tuple()
if type(dir_research_loc) == dict:
dict_coast = {}
for k, v in dir_research_loc.items():
d = {
r: [
s,
d,
h,
]
for r, s, d, h in Issledovaniya.objects.filter(napravleniye=k, research__in=v, coast__isnull=False).values_list('research_id', 'coast', 'discount', 'how_many')
}
dict_coast[k] = d
return dict_coast
else:
return 0
def get_research_by_dir(dir_temp_l):
"""
Получить словаь: {направление1:[услуга1, услуга2, услуга3],направление2:[услуга1].....}
:param dir_temp_l:
:return:
"""
dict_research_dir = {}
for i in dir_temp_l:
# Если есть хотя бы одно сохранения услуги по направлению, то не учитывается
if any([x.doc_save is not None for x in Issledovaniya.objects.filter(napravleniye=i)]):
continue
else:
research_l = [x.research_id for x in Issledovaniya.objects.filter(napravleniye=i)]
dict_research_dir[i] = research_l
return dict_research_dir
def get_final_data(research_price_loc):
"""
Получить итоговую структуру данных: код услуги, напрвление, услуга, цена, скидка/наценка, цена со скидкой, кол-во, сумма
Направление указывается один раз для нескольких строк
"""
total_sum = 0
tmp_data = []
# is_discount = False
z = ""
x = ""
tmp_napr = []
for k, v in research_price_loc.items():
# research_attr = ([s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title')])
research_attr = [s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title', 'internal_code')]
research_attr_list = [list(z) for z in research_attr]
for research_id, research_coast in v.items():
h = []
for j in research_attr_list:
if research_id == j[0]:
if k != 0:
h.append(k)
k = 0
else:
h.append("")
h.extend([j[2], j[1]])
h.append("{:,.2f}".format(research_coast[0]).replace(",", " "))
coast_with_discount = research_coast[0] + (research_coast[0] * research_coast[1] / 100)
if research_coast[1] != 0:
z = "+"
if research_coast[1] > 0:
x = "+"
else:
x = ""
h.append(x + str(research_coast[1]))
h.append("{:,.2f}".format(coast_with_discount).replace(",", " "))
h.append(research_coast[2])
research_sum = coast_with_discount * research_coast[2]
h.append("{:,.2f}".format(research_sum).replace(",", " "))
h[0], h[1] = h[1], h[0]
total_sum += research_sum
research_attr_list.remove(j)
tmp_data.append(h)
if h[1]:
tmp_napr.append(h[1])
if h:
break
res_lis = []
for t in tmp_data:
tmp_d = list(map(str, t))
res_lis.append(tmp_d)
total_data = []
total_data.append(res_lis)
total_data.append("{:,.2f}".format(total_sum).replace(",", " "))
if z == "+":
total_data.append("is_discount")
else:
total_data.append("no_discount")
total_data.append(tmp_napr)
# total_data:[стру-рка данных, итоговая сумма, есть ли скидка, номера направлений]
return total_data
def get_data_individual(card_object):
"""
Получает на входе объект Карта
возвращает словарь атрибутов по карте и Физ.лицу(Индивидуалу)
:param card_object:
:return:
"""
ind_data = {'ind': card_object.individual}
ind_data['age'] = ind_data['ind'].age()
ind_data['doc'] = Document.objects.filter(individual=ind_data['ind'], is_active=True)
ind_data['fio'] = ind_data['ind'].fio()
ind_data['born'] = ind_data['ind'].bd()
ind_data['main_address'] = "____________________________________________________" if not card_object.main_address else card_object.main_address
ind_data['fact_address'] = "____________________________________________________" if not card_object.fact_address else card_object.fact_address
# document_passport = "Паспорт РФ"
ind_documents = get_all_doc(ind_data['doc'])
ind_data['passport_num'] = ind_documents['passport']['num']
ind_data['passport_serial'] = ind_documents['passport']['serial']
ind_data['passport_date_start'] = ind_documents['passport']['date_start']
ind_data['passport_issued'] = ind_documents['passport']['issued']
ind_data['bc_num'] = ind_documents['bc']['num']
ind_data['bc_serial'] = ind_documents['bc']['serial']
ind_data['bc_date_start'] = ind_documents['bc']['date_start']
ind_data['bc_issued'] = ind_documents['bc']['issued']
ind_data['snils'] = ind_documents["snils"]["num"]
ind_data['oms'] = {}
ind_data['oms']['polis_num'] = ind_documents["polis"]["num"]
ind_data['oms']['polis_serial'] = ind_documents["polis"]["serial"]
# ind_data['oms']['polis_date_start'] = ind_documents["polis"]["date_start"]
ind_data['oms']['polis_issued'] = ind_documents["polis"]["issued"]
return ind_data
def form_notfound():
"""
В случае не верной настройки форм по типам и функциям или переданным аргументам в параметры
:return:
"""
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER
import os.path
from io import BytesIO
from laboratory.settings import FONTS_FOLDER
buffer = BytesIO()
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
doc = SimpleDocTemplate(
buffer, pagesize=A4, leftMargin=10 * mm, rightMargin=10 * mm, topMargin=10 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("Паспорт здоровья")
)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifBold"
style.fontSize = 16
style.leading = 15
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
objs = [
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Ая-я-я-я-я-я-я-яй!</font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Что-то Администраторы не верно настроили с типами форм! </font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">А-та-та-та им!</font>', styleCenter),
]
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
def get_doc_results(doc_obj, date_result):
"""
возвращает результаты врача за определенную дату. ***** Ни в коем случае не переделывать на диапозон дат
"""
doc_results = Issledovaniya.objects.filter(doc_confirmation=doc_obj, time_confirmation__date=date_result, napravleniye__isnull=False)
return doc_results
def get_finaldata_talon(doc_result_obj):
"""
Вход результаты врача за определенную дату
Выход: стр-ра данных {'№п.п':'номер', 'ФИО пациента':'Иванов Иван Иванович', '№ карты (тип)':'1212 (L2)',
'Данные полиса':'номер;Компаня', 'цель посещения': '(код)', 'первичны прием':'Нет',
'Диагноз по МКБ': '(код)', 'Впервые':'Да', 'Результат обращения':'код',
'Исход':'Код', 'Д-стоит':'коды', 'Д-взят':'коды', 'Д-снят':'коды'
'причина снятия':'', 'Онкоподозрение':'Да'
"""
fin_oms = 'омс'
fin_dms = 'дмс'
fin_pay = 'платно'
fin_medexam = 'медосмотр'
fin_disp = 'диспансеризация'
fin_budget = 'бюджет'
fin_source = OrderedDict()
fin_source[fin_oms] = OrderedDict()
fin_source[fin_pay] = OrderedDict()
fin_source[fin_dms] = OrderedDict()
fin_source[fin_medexam] = OrderedDict()
fin_source[fin_disp] = OrderedDict()
fin_source[fin_budget] = OrderedDict()
fin_source_iss = OrderedDict()
fin_source_iss[fin_oms] = OrderedDict()
fin_source_iss[fin_pay] = OrderedDict()
fin_source_iss[fin_dms] = OrderedDict()
fin_source_iss[fin_medexam] = OrderedDict()
fin_source_iss[fin_disp] = OrderedDict()
fin_source_iss[fin_budget] = OrderedDict()
oms_count = 0
dms_count = 0
pay_count = 0
disp_count = 0
medexam_count = 0
budget_count = 0
empty = '-'
today = utils.timezone.now().date()
for i in doc_result_obj:
napr_attr = Napravleniya.get_attr(i.napravleniye)
temp_dict = OrderedDict()
temp_dict_iss = OrderedDict()
dict_fsourcce = ''
order = ''
if napr_attr['istochnik_f'] in ['омс', '']:
oms_count += 1
dict_fsourcce = fin_oms
order = oms_count
elif napr_attr['istochnik_f'] == 'платно':
pay_count += 1
dict_fsourcce = fin_pay
order = pay_count
elif napr_attr['istochnik_f'] == 'дмс':
dms_count += 1
dict_fsourcce = fin_dms
order = dms_count
elif napr_attr['istochnik_f'] == 'медосмотр':
medexam_count += 1
dict_fsourcce = fin_medexam
order = medexam_count
elif napr_attr['istochnik_f'] == 'диспансеризация':
disp_count += 1
dict_fsourcce = fin_disp
order = disp_count
elif napr_attr['istochnik_f'] == 'бюджет':
budget_count += 1
dict_fsourcce = fin_budget
order = budget_count
else:
continue
polis_who_giv = empty if not napr_attr['polis_who_give'] else napr_attr['polis_who_give']
polis_num = empty if not napr_attr['polis_n'] else napr_attr['polis_n']
temp_dict['client_fio'] = napr_attr['client_fio'] + ', ' + napr_attr['client_bd']
temp_dict['med_exam'] = strdate(i.medical_examination) + ', ' + str(i.napravleniye_id)
num_poliklinika = f'\n({napr_attr["number_poliklinika"]})' if napr_attr['number_poliklinika'] else ''
temp_dict['card_num'] = napr_attr['card_num'] + num_poliklinika
temp_dict['polis_data'] = '<u>' + polis_num + '</u>' + '<br/>' + polis_who_giv
temp_dict_iss = temp_dict.copy()
temp_dict_iss['research_code'] = i.research.code
temp_dict_iss['research_title'] = i.research.title
temp_dict['purpose'] = empty if not i.purpose else i.purpose
temp_dict['is_first_reception'] = 'Да' if i.research.is_first_reception else 'Нет'
temp_dict['diagnos'] = empty if not i.diagnos else i.diagnos
temp_dict['first_time'] = 'Да' if i.first_time else 'Нет'
temp_dict['result_reception'] = empty if not i.result_reception else i.result_reception
temp_dict['outcome_illness'] = empty if not i.outcome_illness else i.outcome_illness
# Данные Д-учета
disp = DispensaryReg.objects.filter(Q(card=i.napravleniye.client), (Q(date_end=None) | Q(date_end=today)))
d_stand = []
d_take = []
d_stop = []
d_whystop = []
if disp:
for d in disp:
if d.date_end is None and d.date_start != i.time_confirmation.date():
date_start = strdate(d.date_start, short_year=True)
date_start = normalize_date(date_start)
d_stand.append(f'{d.diagnos}<br/>{date_start}<br/>')
elif d.date_end is None and d.date_start == i.time_confirmation.date():
d_take.append(d.diagnos)
elif d.date_end == i.time_confirmation.date():
d_stop.append(d.diagnos)
d_whystop.append(d.why_stop)
temp_dict['d_stand'] = '' if not d_stand else ''.join(d_stand)
temp_dict['d_take'] = '' if not d_take else ', '.join(d_take)
temp_dict['d_stop'] = '' if not d_stand else ', '.join(d_stop)
temp_dict['d_whystop'] = '' if not d_whystop else ', '.join(d_whystop)
temp_dict['maybe_onco'] = 'Да' if i.maybe_onco else ''
fin_source[dict_fsourcce].update({order: temp_dict})
fin_source_iss[dict_fsourcce].update({order: temp_dict_iss})
if Issledovaniya.objects.filter(parent=i).exists():
temp_dict_iss_copy = deepcopy(temp_dict_iss)
add_iss_dict = OrderedDict()
for iss in Issledovaniya.objects.filter(parent=i):
temp_dict_iss_copy['research_code'] = iss.research.code
temp_dict_iss_copy['research_title'] = iss.research.title
order = Decimal(str(order)) + Decimal('0.1')
add_iss_dict[order] = deepcopy(temp_dict_iss_copy)
fin_source_iss[dict_fsourcce].update(add_iss_dict)
return [fin_source, fin_source_iss]
def primary_reception_get_data(hosp_first_num):
# Получение данных из певичного приема
hosp_primary_receptions = hosp_get_data_direction(hosp_first_num, site_type=0, type_service='None', level=2)
hosp_primary_iss, primary_research_id = None, None
if hosp_primary_receptions:
hosp_primary_iss = hosp_primary_receptions[0].get('iss')
primary_research_id = hosp_primary_receptions[0].get('research_id')
titles_field = [
'Дата поступления',
'Время поступления',
'Виды транспортировки',
'Побочное действие лекарств (непереносимость)',
'Кем направлен больной',
'Вид госпитализации',
'Время через, которое доставлен после начала заболевания, получения травмы',
'Диагноз направившего учреждения',
'Диагноз при поступлении',
'Госпитализирован по поводу данного заболевания',
'Общее состояние',
'Социальный статус',
'Категория льготности',
'Всего госпитализаций',
'Вид травмы',
'Группа крови',
'Резус принадлежность',
'Вес',
]
list_values = None
if titles_field and hosp_primary_receptions:
list_values = get_result_value_iss(hosp_primary_iss, primary_research_id, titles_field)
date_entered_value, time_entered_value, type_transport, medicament_allergy = '', '', '', ''
who_directed, plan_hospital, extra_hospital, type_hospital = '', '', '', ''
time_start_ill, diagnos_who_directed, diagnos_entered = '', '', ''
what_time_hospitalized, state, social_status, category_privilege = '', '', '', ''
all_hospitalized, type_trauma, blood_group, resus_factor = '', '', '', ''
weight = ''
if list_values:
for i in list_values:
if i[3] == 'Дата поступления':
date_entered_value = normalize_date(i[2])
continue
if i[3] == 'Время поступления':
time_entered_value = i[2]
continue
if i[3] == 'Виды транспортировки':
type_transport = i[2]
continue
if i[3] == 'Побочное действие лекарств (непереносимость)':
medicament_allergy = i[2]
continue
if i[3] == 'Кем направлен больной':
who_directed = i[2]
continue
if i[3] == 'Вид госпитализации':
type_hospital = i[2]
if type_hospital.lower() == 'экстренная':
time_start_ill_obj = get_result_value_iss(hosp_primary_iss, primary_research_id, ['Время через, которое доставлен после начала заболевания, получения травмы'])
if time_start_ill_obj:
time_start_ill = time_start_ill_obj[0][2]
extra_hospital = "Да"
plan_hospital = "Нет"
else:
plan_hospital = "Да"
extra_hospital = "Нет"
time_start_ill = ''
if i[3] == 'Диагноз направившего учреждения':
diagnos_who_directed = i[2]
continue
if i[3] == 'Диагноз при поступлении':
diagnos_entered = i[2]
continue
if i[3] == 'Госпитализирован по поводу данного заболевания':
what_time_hospitalized = i[2]
continue
if i[3] == 'Общее состояние':
state = i[2]
continue
if i[3] == 'Социальный статус':
social_status = i[2]
continue
if i[3] == 'Категория льготности':
category_privilege = i[2]
continue
if i[3] == 'Всего госпитализаций':
all_hospitalized = i[2]
continue
if i[3] == 'Вид травмы':
type_trauma = i[2]
continue
if i[3] == 'Группа крови':
blood_group = i[2]
continue
if i[3] == 'Резус принадлежность':
resus_factor = i[2]
continue
if i[3] == 'Вес':
weight = i[2]
continue
return {
'date_entered_value': date_entered_value,
'time_entered_value': time_entered_value,
'type_transport': type_transport,
'medicament_allergy': medicament_allergy,
'who_directed': who_directed,
'plan_hospital': plan_hospital,
'extra_hospital': extra_hospital,
'type_hospital': type_hospital,
'time_start_ill': time_start_ill,
'diagnos_who_directed': diagnos_who_directed,
'diagnos_entered': diagnos_entered,
'what_time_hospitalized': what_time_hospitalized,
'state': state,
'social_status': social_status,
'category_privilege': category_privilege,
'all_hospitalized': all_hospitalized,
'type_trauma': type_trauma,
'blood_group': blood_group,
'resus_factor': resus_factor,
'weight': weight,
}
def hosp_extract_get_data(hosp_last_num):
# Получение данных из выписки
hosp_extract = hosp_get_data_direction(hosp_last_num, site_type=7, type_service='None', level=2)
if not hosp_extract:
return {}
hosp_extract_iss, extract_research_id, doc_confirm = None, None, None
if hosp_extract:
hosp_extract_iss = hosp_extract[0].get('iss')
doc_confirm = Issledovaniya.objects.get(pk=hosp_extract_iss).doc_confirmation
if not doc_confirm:
return {}
extract_research_id = hosp_extract[0].get('research_id')
titles_field = [
'Время выписки',
'Дата выписки',
'Основной диагноз (описание)',
'Основной диагноз по МКБ',
'Осложнение основного диагноза (описание)',
'Осложнение основного диагноза по МКБ',
'Сопутствующий диагноз (описание)',
'Сопутствующий диагноз по МКБ',
'Исход госпитализации',
'Результат госпитализации',
'Проведено койко-дней',
'Заведующий отделением',
'Палата №',
]
list_values = None
if titles_field and hosp_extract:
list_values = get_result_value_iss(hosp_extract_iss, extract_research_id, titles_field)
date_value, time_value = '', ''
final_diagnos, other_diagnos, near_diagnos, outcome, final_diagnos_mkb, other_diagnos_mkb, near_diagnos_mkb = '', '', '', '', '', '', ''
days_count, result_hospital, manager_depart, room_num = '', '', '', ''
if list_values:
for i in list_values:
if i[3] == 'Дата выписки':
date_value = normalize_date(i[2])
if i[3] == 'Время выписки':
time_value = i[2]
if i[3] == 'Основной диагноз (описание)':
final_diagnos = i[2]
if i[3] == 'Осложнение основного диагноза (описание)':
other_diagnos = i[2]
if i[3] == 'Сопутствующий диагноз (описание)':
near_diagnos = i[2]
if i[3] == 'Исход госпитализации':
outcome = i[2]
if i[3] == 'Результат госпитализации':
result_hospital = i[2]
if i[3] == 'Основной диагноз по МКБ':
final_diagnos_mkb = str(i[2])
if i[3] == 'Осложнение основного диагноза по МКБ':
other_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Сопутствующий диагноз по МКБ':
near_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Проведено койко-дней':
days_count = str(i[2])
if i[3] == 'Заведующий отделением':
manager_depart = str(i[2])
if i[3] == 'Палата №':
room_num = str(i[2])
doc_fio = doc_confirm.get_fio()
return {
'date_value': date_value,
'time_value': time_value,
'final_diagnos': final_diagnos,
'other_diagnos': other_diagnos,
'near_diagnos': near_diagnos,
'outcome': outcome,
'final_diagnos_mkb': final_diagnos_mkb,
'other_diagnos_mkb': other_diagnos_mkb,
'near_diagnos_mkb': near_diagnos_mkb,
'extract_iss': hosp_extract_iss,
'days_count': days_count,
'result_hospital': result_hospital,
'doc_fio': doc_fio,
'manager_depart': manager_depart,
'room_num': room_num,
}
def hosp_get_clinical_diagnos(hosp_obj):
clinic_diagnos = ''
tmp_clinic_diagnos = []
for i in hosp_obj:
hosp_diagnostic_epicris = hosp_get_data_direction(i['direction'], site_type=6, type_service='None', level=2)
day_entries_iss = []
day_entries_research_id = None
if hosp_diagnostic_epicris:
for i in hosp_diagnostic_epicris:
# найти эпикризы диагностические
if i.get('research_title').lower().find('диагностич') != -1:
day_entries_iss.append(i.get('iss'))
if not day_entries_research_id:
day_entries_research_id = i.get('research_id')
titles_field = ['Диагноз клинический', 'Дата установления диагноза', 'Основной', 'Осложнение', 'Сопутствующий']
list_values = []
if titles_field and day_entries_iss:
for i in day_entries_iss:
list_values.append(get_result_value_iss(i, day_entries_research_id, titles_field))
if list_values:
for fields in list_values:
clinical_data = {'clinic_diagnos': '', 'main_diagnos': '', 'other_diagnos': '', 'near_diagnos': '', 'date': ''}
for i in fields:
if i[3] == 'Дата установления диагноза':
clinical_data['date'] = normalize_date(i[2])
continue
if i[3] == 'Диагноз клинический':
clinical_data['clinic_diagnos'] = i[2]
continue
if i[3] == 'Основной':
clinical_data['main_diagnos'] = f"Основной: {i[2]}"
continue
if i[3] == 'Осложнение':
clinical_data['other_diagnos'] = f"; Осложнение: {i[2]}"
continue
if i[3] == 'Сопутствующий':
clinical_data['near_diagnos'] = f"; Сопутствующий: {i[2]}"
continue
if clinical_data['date'] and (clinical_data['clinic_diagnos'] or clinical_data['main_diagnos']):
tmp_clinic_diagnos.append(clinical_data.copy())
for i in tmp_clinic_diagnos:
clinic_diagnos = f"{clinic_diagnos}{i['clinic_diagnos']} <u>{i['main_diagnos']}</u>{i['other_diagnos']}{i['near_diagnos']}; дата: {i['date']}<br/>"
return clinic_diagnos
def hosp_get_transfers_data(hosp_nums_obj):
titles_field = ['Дата перевода', 'Время перевода']
date_transfer_value, time_transfer_value = '', ''
transfers = []
list_values = None
for i in range(len(hosp_nums_obj)):
if i == 0:
continue
transfer_research_title = hosp_nums_obj[i].get('research_title')
# получить для текущего hosp_dir эпикриз с title - перевод.....
from_hosp_dir_transfer = hosp_nums_obj[i - 1].get('direction')
epicrisis_data = hosp_get_data_direction(from_hosp_dir_transfer, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
else:
continue
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_transfer_value = normalize_date(i[2])
continue
if i[3] == 'Время перевода':
time_transfer_value = i[2]
continue
transfers.append({'transfer_research_title': transfer_research_title, 'date_transfer_value': date_transfer_value, 'time_transfer_value': time_transfer_value})
return transfers
def hosp_patient_movement(hosp_nums_obj):
titles_field = ['Дата перевода']
patient_movement = []
list_values = None
for i in range(len(hosp_nums_obj)):
date_out, diagnos_mkb, doc_confirm_code = '', '', ''
bed_profile_research_title = hosp_nums_obj[i].get('research_title')
hosp_dir = hosp_nums_obj[i].get('direction')
primary_reception_data = primary_reception_get_data(hosp_dir)
hosp_extract_data = hosp_get_data_direction(hosp_dir, site_type=7, type_service='None', level=2)
if hosp_extract_data:
extract_data = hosp_extract_get_data(hosp_dir)
if extract_data:
date_out = extract_data['date_value']
diagnos_mkb = extract_data['final_diagnos_mkb']
doc_confirm_code = (
None if not Issledovaniya.objects.get(pk=extract_data['extract_iss']) else Issledovaniya.objects.get(pk=extract_data['extract_iss']).doc_confirmation.personal_code
)
list_values = None
epicrisis_data = hosp_get_data_direction(hosp_dir, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_out = normalize_date(i[2])
if i[3] == 'Клинический диагноз по МКБ':
diagnos_mkb = i[2]
patient_movement.append(
{
'bed_profile_research_title': bed_profile_research_title,
'date_entered_value': primary_reception_data['date_entered_value'],
'date_oute': date_out,
'diagnos_mkb': diagnos_mkb,
'doc_confirm_code': doc_confirm_code,
}
)
return patient_movement
def hosp_get_operation_data(num_dir):
hosp_operation = hosp_get_data_direction(num_dir, site_type=3, type_service='None', level=-1)
operation_iss_research = []
if hosp_operation:
for i in hosp_operation:
# найти протоколы по типу операции
if (i.get('research_title').lower().find('операци') != -1 or i.get('research_title').lower().find('манипул') != -1) and i['date_confirm']:
operation_iss_research.append({'iss': i['iss'], 'research': i['research_id']})
titles_field = [
'Название операции',
'Дата проведения',
'Время начала',
'Время окончания',
'Метод обезболивания',
'Осложнения',
'Код операции',
'Код манипуляции',
'Оперативное вмешательство',
'Код анестезиолога',
'Категория сложности',
'Диагноз после оперативного лечения',
'МКБ 10',
'Оперировал',
'Код хирурга',
]
list_values = []
operation_result = []
if titles_field and operation_iss_research and hosp_operation:
for i in operation_iss_research:
list_values.append(get_result_value_iss(i['iss'], i['research'], titles_field))
operation_result = []
for fields_operation in list_values:
pk_iss_operation = fields_operation[0][1]
operation_data = {
'name_operation': '',
'date': '',
'time_start': '',
'time_end': '',
'anesthesia method': '',
'complications': '',
'doc_fio': '',
'code_operation': '',
'code_doc_anesthesia': '',
'plan_operation': '',
'diagnos_after_operation': '',
'mkb10': '',
'category_difficult': '',
'doc_code': '',
}
iss_obj = Issledovaniya.objects.filter(pk=pk_iss_operation).first()
if not iss_obj.time_confirmation:
continue
operation_data['doc_fio'] = iss_obj.doc_confirmation_fio
operation_data['doc_code'] = None if not Issledovaniya.objects.get(pk=pk_iss_operation) else Issledovaniya.objects.get(pk=pk_iss_operation).doc_confirmation.personal_code
if operation_data['doc_code'] == 0:
operation_data['doc_code'] = ''
category_difficult = ''
for field in fields_operation:
if field[3] == 'Название операции':
operation_data['name_operation'] = field[2]
continue
if field[3] == 'Дата проведения':
operation_data['date'] = normalize_date(field[2])
continue
if field[3] == 'Время начала':
operation_data['time_start'] = field[2]
continue
if field[3] == 'Время окончания':
operation_data['time_end'] = field[2]
continue
if field[3] == 'Метод обезболивания':
operation_data['anesthesia method'] = field[2]
continue
if field[3] == 'Осложнения':
operation_data['complications'] = field[2]
continue
if field[3] == 'Код операции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код манипуляции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код анестезиолога':
operation_data['code_doc_anesthesia'] = field[2]
continue
if field[3] == 'Оперативное вмешательство':
operation_data['plan_operation'] = field[2]
continue
if field[3] == 'Категория сложности':
operation_data['category_difficult'] = f"Сложность - {field[2]}"
continue
if field[3] == 'Диагноз после оперативного лечения':
operation_data['diagnos_after_operation'] = field[2]
continue
if field[3] == 'МКБ 10':
operation_data['mkb10'] = field[2]
continue
if field[3] == 'Оперировал':
if field[2]:
operation_data['doc_fio'] = field[2]
continue
if field[3] == 'Код хирурга':
if field[2]:
operation_data['doc_code'] = field[2]
continue
operation_data['name_operation'] = f"{operation_data['name_operation']} {category_difficult}"
operation_result.append(operation_data.copy())
return operation_result
def closed_bl(hosp_num_dir):
"""
Подтверждены больничные-протоколы со словом закрытие среди Б/Л?
"""
result_bl = hosp_get_data_direction(hosp_num_dir, site_type=8, type_service='None', level=-1)
num, who_get, who_care, start_date, end_date, start_work = '', '', '', '', '', ''
for i in result_bl:
if i['date_confirm'] is None:
continue
if i["research_title"].lower().find('закрыт') != -1:
data_closed_bl = ParaclinicResult.objects.filter(issledovaniye=i['iss'])
for b in data_closed_bl:
if b.field.title == "Лист нетрудоспособности №":
num = b.value
continue
if b.field.title == "Выдан кому":
who_get = b.value
continue
if b.field.title == "по уходу за":
who_care = b.value
continue
if b.field.title == "выдан с":
start_date = b.value
if start_date.find('-') != -1:
start_date = normalize_date(start_date)
continue
if b.field.title == "по":
end_date = b.value
if end_date.find('-') != -1:
end_date = normalize_date(end_date)
continue
if b.field.title == "к труду":
start_work = b.value
if start_work.find('-') != -1:
start_work = normalize_date(start_work)
continue
return {'is_closed': True, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
return {'is_closed': False, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
def create_contract(ind_dir, card_pk):
ind_card = Card.objects.get(pk=card_pk)
# exec_person = request_data['user'].doctorprofile.get_full_fio()
patient_data = ind_card.get_data_individual()
p_agent = None
if ind_card.who_is_agent:
p_agent = getattr(ind_card, ind_card.who_is_agent)
p_payer = None
if ind_card.payer:
p_payer = ind_card.payer
# Получить все источники, у которых title-ПЛАТНО
ist_f = list(IstochnikiFinansirovaniya.objects.values_list('id').filter(title__exact='Платно'))
ist_f_list = [int(x[0]) for x in ist_f]
napr = Napravleniya.objects.filter(pk__in=ind_dir)
dir_temp = []
# Проверить, что все направления принадлежат к одной карте и имеют ист. финансирования "Платно"
num_contract_set = set()
for n in napr:
if n.istochnik_f_id in ist_f_list and n.client == ind_card:
num_contract_set.add(n.num_contract)
dir_temp.append(n.pk)
if not dir_temp:
return False
# получить УСЛУГИ по направлениям(отфильтрованы по "платно" и нет сохраненных исследований) в Issledovaniya
research_direction = get_research_by_dir(dir_temp)
if not research_direction:
return False
# получить по направлению-услугам цену из Issledovaniya
research_price = get_coast_from_issledovanie(research_direction)
# Получить Итоговую стр-ру данных
result_data = get_final_data(research_price)
sum_research = result_data[1]
# Контрольная сумма расчет: послдеовательность направлений+Итоговая сумма (стоимость денежная)
qr_napr = ','.join([str(elem) for elem in result_data[3]])
protect_val = sum_research.replace(' ', '')
bstr = (qr_napr + protect_val).encode()
protect_code = str(zlib.crc32(bstr))
today = utils.current_time()
date_now1 = datetime.datetime.strftime(today, '%y%m%d%H%M%S%f')[:-3]
date_now_str = str(ind_card.pk) + str(date_now1)
# Проверить записан ли номер контракта в направлениях, и контрольная сумма
# ПереЗаписать номер контракта Если в наборе направлений значение None, или в направлениях разные контракты,
# а также разные контрольные суммы, все перезаписать.
num_contract_set = set()
protect_code_set = set()
napr_end = Napravleniya.objects.filter(id__in=result_data[3])
for n in napr_end:
num_contract_set.add(n.num_contract)
protect_code_set.add(n.protect_code)
if len(num_contract_set) == 1 and None in num_contract_set or None in protect_code_set:
PersonContract.person_contract_save(date_now_str, protect_code, qr_napr, sum_research, patient_data['fio'], ind_card, p_payer, p_agent)
Napravleniya.objects.filter(id__in=result_data[3]).update(num_contract=date_now_str, protect_code=protect_code)
return PersonContract.pk
| 42.081696
| 183
| 0.591527
|
import datetime
import zlib
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from django.db.models import Q
from clients.models import Document, DispensaryReg, Card
from directions.models import Napravleniya, Issledovaniya, ParaclinicResult, IstochnikiFinansirovaniya, PersonContract
from directory.models import Researches
from laboratory import utils
from laboratory.utils import strdate
from api.stationar.stationar_func import hosp_get_data_direction, check_transfer_epicrisis
from api.stationar.sql_func import get_result_value_iss
from utils.dates import normalize_date
def get_all_doc(docs: [Document]):
documents = {
'passport': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
'polis': {'serial': "", 'num': "", 'issued': ""},
'snils': {'num': ""},
'bc': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
}
for d in docs:
if d.document_type.title == "СНИЛС":
documents["snils"]["num"] = d.number
if d.document_type.title == 'Паспорт гражданина РФ':
documents["passport"]["num"] = d.number
documents["passport"]["serial"] = d.serial
documents["passport"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Полис ОМС':
documents["polis"]["num"] = d.number
documents["polis"]["serial"] = d.serial
documents["polis"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Свидетельство о рождении':
documents["bc"]["num"] = d.number
documents["bc"]["serial"] = d.serial
documents["bc"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["bc"]["issued"] = d.who_give
return documents
def get_coast_from_issledovanie(dir_research_loc):
d = tuple()
if type(dir_research_loc) == dict:
dict_coast = {}
for k, v in dir_research_loc.items():
d = {
r: [
s,
d,
h,
]
for r, s, d, h in Issledovaniya.objects.filter(napravleniye=k, research__in=v, coast__isnull=False).values_list('research_id', 'coast', 'discount', 'how_many')
}
dict_coast[k] = d
return dict_coast
else:
return 0
def get_research_by_dir(dir_temp_l):
dict_research_dir = {}
for i in dir_temp_l:
if any([x.doc_save is not None for x in Issledovaniya.objects.filter(napravleniye=i)]):
continue
else:
research_l = [x.research_id for x in Issledovaniya.objects.filter(napravleniye=i)]
dict_research_dir[i] = research_l
return dict_research_dir
def get_final_data(research_price_loc):
total_sum = 0
tmp_data = []
z = ""
x = ""
tmp_napr = []
for k, v in research_price_loc.items():
research_attr = [s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title', 'internal_code')]
research_attr_list = [list(z) for z in research_attr]
for research_id, research_coast in v.items():
h = []
for j in research_attr_list:
if research_id == j[0]:
if k != 0:
h.append(k)
k = 0
else:
h.append("")
h.extend([j[2], j[1]])
h.append("{:,.2f}".format(research_coast[0]).replace(",", " "))
coast_with_discount = research_coast[0] + (research_coast[0] * research_coast[1] / 100)
if research_coast[1] != 0:
z = "+"
if research_coast[1] > 0:
x = "+"
else:
x = ""
h.append(x + str(research_coast[1]))
h.append("{:,.2f}".format(coast_with_discount).replace(",", " "))
h.append(research_coast[2])
research_sum = coast_with_discount * research_coast[2]
h.append("{:,.2f}".format(research_sum).replace(",", " "))
h[0], h[1] = h[1], h[0]
total_sum += research_sum
research_attr_list.remove(j)
tmp_data.append(h)
if h[1]:
tmp_napr.append(h[1])
if h:
break
res_lis = []
for t in tmp_data:
tmp_d = list(map(str, t))
res_lis.append(tmp_d)
total_data = []
total_data.append(res_lis)
total_data.append("{:,.2f}".format(total_sum).replace(",", " "))
if z == "+":
total_data.append("is_discount")
else:
total_data.append("no_discount")
total_data.append(tmp_napr)
return total_data
def get_data_individual(card_object):
ind_data = {'ind': card_object.individual}
ind_data['age'] = ind_data['ind'].age()
ind_data['doc'] = Document.objects.filter(individual=ind_data['ind'], is_active=True)
ind_data['fio'] = ind_data['ind'].fio()
ind_data['born'] = ind_data['ind'].bd()
ind_data['main_address'] = "____________________________________________________" if not card_object.main_address else card_object.main_address
ind_data['fact_address'] = "____________________________________________________" if not card_object.fact_address else card_object.fact_address
ind_documents = get_all_doc(ind_data['doc'])
ind_data['passport_num'] = ind_documents['passport']['num']
ind_data['passport_serial'] = ind_documents['passport']['serial']
ind_data['passport_date_start'] = ind_documents['passport']['date_start']
ind_data['passport_issued'] = ind_documents['passport']['issued']
ind_data['bc_num'] = ind_documents['bc']['num']
ind_data['bc_serial'] = ind_documents['bc']['serial']
ind_data['bc_date_start'] = ind_documents['bc']['date_start']
ind_data['bc_issued'] = ind_documents['bc']['issued']
ind_data['snils'] = ind_documents["snils"]["num"]
ind_data['oms'] = {}
ind_data['oms']['polis_num'] = ind_documents["polis"]["num"]
ind_data['oms']['polis_serial'] = ind_documents["polis"]["serial"]
ind_data['oms']['polis_issued'] = ind_documents["polis"]["issued"]
return ind_data
def form_notfound():
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER
import os.path
from io import BytesIO
from laboratory.settings import FONTS_FOLDER
buffer = BytesIO()
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
doc = SimpleDocTemplate(
buffer, pagesize=A4, leftMargin=10 * mm, rightMargin=10 * mm, topMargin=10 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("Паспорт здоровья")
)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifBold"
style.fontSize = 16
style.leading = 15
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
objs = [
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Ая-я-я-я-я-я-я-яй!</font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Что-то Администраторы не верно настроили с типами форм! </font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">А-та-та-та им!</font>', styleCenter),
]
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
def get_doc_results(doc_obj, date_result):
doc_results = Issledovaniya.objects.filter(doc_confirmation=doc_obj, time_confirmation__date=date_result, napravleniye__isnull=False)
return doc_results
def get_finaldata_talon(doc_result_obj):
fin_oms = 'омс'
fin_dms = 'дмс'
fin_pay = 'платно'
fin_medexam = 'медосмотр'
fin_disp = 'диспансеризация'
fin_budget = 'бюджет'
fin_source = OrderedDict()
fin_source[fin_oms] = OrderedDict()
fin_source[fin_pay] = OrderedDict()
fin_source[fin_dms] = OrderedDict()
fin_source[fin_medexam] = OrderedDict()
fin_source[fin_disp] = OrderedDict()
fin_source[fin_budget] = OrderedDict()
fin_source_iss = OrderedDict()
fin_source_iss[fin_oms] = OrderedDict()
fin_source_iss[fin_pay] = OrderedDict()
fin_source_iss[fin_dms] = OrderedDict()
fin_source_iss[fin_medexam] = OrderedDict()
fin_source_iss[fin_disp] = OrderedDict()
fin_source_iss[fin_budget] = OrderedDict()
oms_count = 0
dms_count = 0
pay_count = 0
disp_count = 0
medexam_count = 0
budget_count = 0
empty = '-'
today = utils.timezone.now().date()
for i in doc_result_obj:
napr_attr = Napravleniya.get_attr(i.napravleniye)
temp_dict = OrderedDict()
temp_dict_iss = OrderedDict()
dict_fsourcce = ''
order = ''
if napr_attr['istochnik_f'] in ['омс', '']:
oms_count += 1
dict_fsourcce = fin_oms
order = oms_count
elif napr_attr['istochnik_f'] == 'платно':
pay_count += 1
dict_fsourcce = fin_pay
order = pay_count
elif napr_attr['istochnik_f'] == 'дмс':
dms_count += 1
dict_fsourcce = fin_dms
order = dms_count
elif napr_attr['istochnik_f'] == 'медосмотр':
medexam_count += 1
dict_fsourcce = fin_medexam
order = medexam_count
elif napr_attr['istochnik_f'] == 'диспансеризация':
disp_count += 1
dict_fsourcce = fin_disp
order = disp_count
elif napr_attr['istochnik_f'] == 'бюджет':
budget_count += 1
dict_fsourcce = fin_budget
order = budget_count
else:
continue
polis_who_giv = empty if not napr_attr['polis_who_give'] else napr_attr['polis_who_give']
polis_num = empty if not napr_attr['polis_n'] else napr_attr['polis_n']
temp_dict['client_fio'] = napr_attr['client_fio'] + ', ' + napr_attr['client_bd']
temp_dict['med_exam'] = strdate(i.medical_examination) + ', ' + str(i.napravleniye_id)
num_poliklinika = f'\n({napr_attr["number_poliklinika"]})' if napr_attr['number_poliklinika'] else ''
temp_dict['card_num'] = napr_attr['card_num'] + num_poliklinika
temp_dict['polis_data'] = '<u>' + polis_num + '</u>' + '<br/>' + polis_who_giv
temp_dict_iss = temp_dict.copy()
temp_dict_iss['research_code'] = i.research.code
temp_dict_iss['research_title'] = i.research.title
temp_dict['purpose'] = empty if not i.purpose else i.purpose
temp_dict['is_first_reception'] = 'Да' if i.research.is_first_reception else 'Нет'
temp_dict['diagnos'] = empty if not i.diagnos else i.diagnos
temp_dict['first_time'] = 'Да' if i.first_time else 'Нет'
temp_dict['result_reception'] = empty if not i.result_reception else i.result_reception
temp_dict['outcome_illness'] = empty if not i.outcome_illness else i.outcome_illness
disp = DispensaryReg.objects.filter(Q(card=i.napravleniye.client), (Q(date_end=None) | Q(date_end=today)))
d_stand = []
d_take = []
d_stop = []
d_whystop = []
if disp:
for d in disp:
if d.date_end is None and d.date_start != i.time_confirmation.date():
date_start = strdate(d.date_start, short_year=True)
date_start = normalize_date(date_start)
d_stand.append(f'{d.diagnos}<br/>{date_start}<br/>')
elif d.date_end is None and d.date_start == i.time_confirmation.date():
d_take.append(d.diagnos)
elif d.date_end == i.time_confirmation.date():
d_stop.append(d.diagnos)
d_whystop.append(d.why_stop)
temp_dict['d_stand'] = '' if not d_stand else ''.join(d_stand)
temp_dict['d_take'] = '' if not d_take else ', '.join(d_take)
temp_dict['d_stop'] = '' if not d_stand else ', '.join(d_stop)
temp_dict['d_whystop'] = '' if not d_whystop else ', '.join(d_whystop)
temp_dict['maybe_onco'] = 'Да' if i.maybe_onco else ''
fin_source[dict_fsourcce].update({order: temp_dict})
fin_source_iss[dict_fsourcce].update({order: temp_dict_iss})
if Issledovaniya.objects.filter(parent=i).exists():
temp_dict_iss_copy = deepcopy(temp_dict_iss)
add_iss_dict = OrderedDict()
for iss in Issledovaniya.objects.filter(parent=i):
temp_dict_iss_copy['research_code'] = iss.research.code
temp_dict_iss_copy['research_title'] = iss.research.title
order = Decimal(str(order)) + Decimal('0.1')
add_iss_dict[order] = deepcopy(temp_dict_iss_copy)
fin_source_iss[dict_fsourcce].update(add_iss_dict)
return [fin_source, fin_source_iss]
def primary_reception_get_data(hosp_first_num):
hosp_primary_receptions = hosp_get_data_direction(hosp_first_num, site_type=0, type_service='None', level=2)
hosp_primary_iss, primary_research_id = None, None
if hosp_primary_receptions:
hosp_primary_iss = hosp_primary_receptions[0].get('iss')
primary_research_id = hosp_primary_receptions[0].get('research_id')
titles_field = [
'Дата поступления',
'Время поступления',
'Виды транспортировки',
'Побочное действие лекарств (непереносимость)',
'Кем направлен больной',
'Вид госпитализации',
'Время через, которое доставлен после начала заболевания, получения травмы',
'Диагноз направившего учреждения',
'Диагноз при поступлении',
'Госпитализирован по поводу данного заболевания',
'Общее состояние',
'Социальный статус',
'Категория льготности',
'Всего госпитализаций',
'Вид травмы',
'Группа крови',
'Резус принадлежность',
'Вес',
]
list_values = None
if titles_field and hosp_primary_receptions:
list_values = get_result_value_iss(hosp_primary_iss, primary_research_id, titles_field)
date_entered_value, time_entered_value, type_transport, medicament_allergy = '', '', '', ''
who_directed, plan_hospital, extra_hospital, type_hospital = '', '', '', ''
time_start_ill, diagnos_who_directed, diagnos_entered = '', '', ''
what_time_hospitalized, state, social_status, category_privilege = '', '', '', ''
all_hospitalized, type_trauma, blood_group, resus_factor = '', '', '', ''
weight = ''
if list_values:
for i in list_values:
if i[3] == 'Дата поступления':
date_entered_value = normalize_date(i[2])
continue
if i[3] == 'Время поступления':
time_entered_value = i[2]
continue
if i[3] == 'Виды транспортировки':
type_transport = i[2]
continue
if i[3] == 'Побочное действие лекарств (непереносимость)':
medicament_allergy = i[2]
continue
if i[3] == 'Кем направлен больной':
who_directed = i[2]
continue
if i[3] == 'Вид госпитализации':
type_hospital = i[2]
if type_hospital.lower() == 'экстренная':
time_start_ill_obj = get_result_value_iss(hosp_primary_iss, primary_research_id, ['Время через, которое доставлен после начала заболевания, получения травмы'])
if time_start_ill_obj:
time_start_ill = time_start_ill_obj[0][2]
extra_hospital = "Да"
plan_hospital = "Нет"
else:
plan_hospital = "Да"
extra_hospital = "Нет"
time_start_ill = ''
if i[3] == 'Диагноз направившего учреждения':
diagnos_who_directed = i[2]
continue
if i[3] == 'Диагноз при поступлении':
diagnos_entered = i[2]
continue
if i[3] == 'Госпитализирован по поводу данного заболевания':
what_time_hospitalized = i[2]
continue
if i[3] == 'Общее состояние':
state = i[2]
continue
if i[3] == 'Социальный статус':
social_status = i[2]
continue
if i[3] == 'Категория льготности':
category_privilege = i[2]
continue
if i[3] == 'Всего госпитализаций':
all_hospitalized = i[2]
continue
if i[3] == 'Вид травмы':
type_trauma = i[2]
continue
if i[3] == 'Группа крови':
blood_group = i[2]
continue
if i[3] == 'Резус принадлежность':
resus_factor = i[2]
continue
if i[3] == 'Вес':
weight = i[2]
continue
return {
'date_entered_value': date_entered_value,
'time_entered_value': time_entered_value,
'type_transport': type_transport,
'medicament_allergy': medicament_allergy,
'who_directed': who_directed,
'plan_hospital': plan_hospital,
'extra_hospital': extra_hospital,
'type_hospital': type_hospital,
'time_start_ill': time_start_ill,
'diagnos_who_directed': diagnos_who_directed,
'diagnos_entered': diagnos_entered,
'what_time_hospitalized': what_time_hospitalized,
'state': state,
'social_status': social_status,
'category_privilege': category_privilege,
'all_hospitalized': all_hospitalized,
'type_trauma': type_trauma,
'blood_group': blood_group,
'resus_factor': resus_factor,
'weight': weight,
}
def hosp_extract_get_data(hosp_last_num):
hosp_extract = hosp_get_data_direction(hosp_last_num, site_type=7, type_service='None', level=2)
if not hosp_extract:
return {}
hosp_extract_iss, extract_research_id, doc_confirm = None, None, None
if hosp_extract:
hosp_extract_iss = hosp_extract[0].get('iss')
doc_confirm = Issledovaniya.objects.get(pk=hosp_extract_iss).doc_confirmation
if not doc_confirm:
return {}
extract_research_id = hosp_extract[0].get('research_id')
titles_field = [
'Время выписки',
'Дата выписки',
'Основной диагноз (описание)',
'Основной диагноз по МКБ',
'Осложнение основного диагноза (описание)',
'Осложнение основного диагноза по МКБ',
'Сопутствующий диагноз (описание)',
'Сопутствующий диагноз по МКБ',
'Исход госпитализации',
'Результат госпитализации',
'Проведено койко-дней',
'Заведующий отделением',
'Палата №',
]
list_values = None
if titles_field and hosp_extract:
list_values = get_result_value_iss(hosp_extract_iss, extract_research_id, titles_field)
date_value, time_value = '', ''
final_diagnos, other_diagnos, near_diagnos, outcome, final_diagnos_mkb, other_diagnos_mkb, near_diagnos_mkb = '', '', '', '', '', '', ''
days_count, result_hospital, manager_depart, room_num = '', '', '', ''
if list_values:
for i in list_values:
if i[3] == 'Дата выписки':
date_value = normalize_date(i[2])
if i[3] == 'Время выписки':
time_value = i[2]
if i[3] == 'Основной диагноз (описание)':
final_diagnos = i[2]
if i[3] == 'Осложнение основного диагноза (описание)':
other_diagnos = i[2]
if i[3] == 'Сопутствующий диагноз (описание)':
near_diagnos = i[2]
if i[3] == 'Исход госпитализации':
outcome = i[2]
if i[3] == 'Результат госпитализации':
result_hospital = i[2]
if i[3] == 'Основной диагноз по МКБ':
final_diagnos_mkb = str(i[2])
if i[3] == 'Осложнение основного диагноза по МКБ':
other_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Сопутствующий диагноз по МКБ':
near_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Проведено койко-дней':
days_count = str(i[2])
if i[3] == 'Заведующий отделением':
manager_depart = str(i[2])
if i[3] == 'Палата №':
room_num = str(i[2])
doc_fio = doc_confirm.get_fio()
return {
'date_value': date_value,
'time_value': time_value,
'final_diagnos': final_diagnos,
'other_diagnos': other_diagnos,
'near_diagnos': near_diagnos,
'outcome': outcome,
'final_diagnos_mkb': final_diagnos_mkb,
'other_diagnos_mkb': other_diagnos_mkb,
'near_diagnos_mkb': near_diagnos_mkb,
'extract_iss': hosp_extract_iss,
'days_count': days_count,
'result_hospital': result_hospital,
'doc_fio': doc_fio,
'manager_depart': manager_depart,
'room_num': room_num,
}
def hosp_get_clinical_diagnos(hosp_obj):
clinic_diagnos = ''
tmp_clinic_diagnos = []
for i in hosp_obj:
hosp_diagnostic_epicris = hosp_get_data_direction(i['direction'], site_type=6, type_service='None', level=2)
day_entries_iss = []
day_entries_research_id = None
if hosp_diagnostic_epicris:
for i in hosp_diagnostic_epicris:
if i.get('research_title').lower().find('диагностич') != -1:
day_entries_iss.append(i.get('iss'))
if not day_entries_research_id:
day_entries_research_id = i.get('research_id')
titles_field = ['Диагноз клинический', 'Дата установления диагноза', 'Основной', 'Осложнение', 'Сопутствующий']
list_values = []
if titles_field and day_entries_iss:
for i in day_entries_iss:
list_values.append(get_result_value_iss(i, day_entries_research_id, titles_field))
if list_values:
for fields in list_values:
clinical_data = {'clinic_diagnos': '', 'main_diagnos': '', 'other_diagnos': '', 'near_diagnos': '', 'date': ''}
for i in fields:
if i[3] == 'Дата установления диагноза':
clinical_data['date'] = normalize_date(i[2])
continue
if i[3] == 'Диагноз клинический':
clinical_data['clinic_diagnos'] = i[2]
continue
if i[3] == 'Основной':
clinical_data['main_diagnos'] = f"Основной: {i[2]}"
continue
if i[3] == 'Осложнение':
clinical_data['other_diagnos'] = f"; Осложнение: {i[2]}"
continue
if i[3] == 'Сопутствующий':
clinical_data['near_diagnos'] = f"; Сопутствующий: {i[2]}"
continue
if clinical_data['date'] and (clinical_data['clinic_diagnos'] or clinical_data['main_diagnos']):
tmp_clinic_diagnos.append(clinical_data.copy())
for i in tmp_clinic_diagnos:
clinic_diagnos = f"{clinic_diagnos}{i['clinic_diagnos']} <u>{i['main_diagnos']}</u>{i['other_diagnos']}{i['near_diagnos']}; дата: {i['date']}<br/>"
return clinic_diagnos
def hosp_get_transfers_data(hosp_nums_obj):
titles_field = ['Дата перевода', 'Время перевода']
date_transfer_value, time_transfer_value = '', ''
transfers = []
list_values = None
for i in range(len(hosp_nums_obj)):
if i == 0:
continue
transfer_research_title = hosp_nums_obj[i].get('research_title')
from_hosp_dir_transfer = hosp_nums_obj[i - 1].get('direction')
epicrisis_data = hosp_get_data_direction(from_hosp_dir_transfer, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
else:
continue
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_transfer_value = normalize_date(i[2])
continue
if i[3] == 'Время перевода':
time_transfer_value = i[2]
continue
transfers.append({'transfer_research_title': transfer_research_title, 'date_transfer_value': date_transfer_value, 'time_transfer_value': time_transfer_value})
return transfers
def hosp_patient_movement(hosp_nums_obj):
titles_field = ['Дата перевода']
patient_movement = []
list_values = None
for i in range(len(hosp_nums_obj)):
date_out, diagnos_mkb, doc_confirm_code = '', '', ''
bed_profile_research_title = hosp_nums_obj[i].get('research_title')
hosp_dir = hosp_nums_obj[i].get('direction')
primary_reception_data = primary_reception_get_data(hosp_dir)
hosp_extract_data = hosp_get_data_direction(hosp_dir, site_type=7, type_service='None', level=2)
if hosp_extract_data:
extract_data = hosp_extract_get_data(hosp_dir)
if extract_data:
date_out = extract_data['date_value']
diagnos_mkb = extract_data['final_diagnos_mkb']
doc_confirm_code = (
None if not Issledovaniya.objects.get(pk=extract_data['extract_iss']) else Issledovaniya.objects.get(pk=extract_data['extract_iss']).doc_confirmation.personal_code
)
list_values = None
epicrisis_data = hosp_get_data_direction(hosp_dir, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_out = normalize_date(i[2])
if i[3] == 'Клинический диагноз по МКБ':
diagnos_mkb = i[2]
patient_movement.append(
{
'bed_profile_research_title': bed_profile_research_title,
'date_entered_value': primary_reception_data['date_entered_value'],
'date_oute': date_out,
'diagnos_mkb': diagnos_mkb,
'doc_confirm_code': doc_confirm_code,
}
)
return patient_movement
def hosp_get_operation_data(num_dir):
hosp_operation = hosp_get_data_direction(num_dir, site_type=3, type_service='None', level=-1)
operation_iss_research = []
if hosp_operation:
for i in hosp_operation:
if (i.get('research_title').lower().find('операци') != -1 or i.get('research_title').lower().find('манипул') != -1) and i['date_confirm']:
operation_iss_research.append({'iss': i['iss'], 'research': i['research_id']})
titles_field = [
'Название операции',
'Дата проведения',
'Время начала',
'Время окончания',
'Метод обезболивания',
'Осложнения',
'Код операции',
'Код манипуляции',
'Оперативное вмешательство',
'Код анестезиолога',
'Категория сложности',
'Диагноз после оперативного лечения',
'МКБ 10',
'Оперировал',
'Код хирурга',
]
list_values = []
operation_result = []
if titles_field and operation_iss_research and hosp_operation:
for i in operation_iss_research:
list_values.append(get_result_value_iss(i['iss'], i['research'], titles_field))
operation_result = []
for fields_operation in list_values:
pk_iss_operation = fields_operation[0][1]
operation_data = {
'name_operation': '',
'date': '',
'time_start': '',
'time_end': '',
'anesthesia method': '',
'complications': '',
'doc_fio': '',
'code_operation': '',
'code_doc_anesthesia': '',
'plan_operation': '',
'diagnos_after_operation': '',
'mkb10': '',
'category_difficult': '',
'doc_code': '',
}
iss_obj = Issledovaniya.objects.filter(pk=pk_iss_operation).first()
if not iss_obj.time_confirmation:
continue
operation_data['doc_fio'] = iss_obj.doc_confirmation_fio
operation_data['doc_code'] = None if not Issledovaniya.objects.get(pk=pk_iss_operation) else Issledovaniya.objects.get(pk=pk_iss_operation).doc_confirmation.personal_code
if operation_data['doc_code'] == 0:
operation_data['doc_code'] = ''
category_difficult = ''
for field in fields_operation:
if field[3] == 'Название операции':
operation_data['name_operation'] = field[2]
continue
if field[3] == 'Дата проведения':
operation_data['date'] = normalize_date(field[2])
continue
if field[3] == 'Время начала':
operation_data['time_start'] = field[2]
continue
if field[3] == 'Время окончания':
operation_data['time_end'] = field[2]
continue
if field[3] == 'Метод обезболивания':
operation_data['anesthesia method'] = field[2]
continue
if field[3] == 'Осложнения':
operation_data['complications'] = field[2]
continue
if field[3] == 'Код операции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код манипуляции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код анестезиолога':
operation_data['code_doc_anesthesia'] = field[2]
continue
if field[3] == 'Оперативное вмешательство':
operation_data['plan_operation'] = field[2]
continue
if field[3] == 'Категория сложности':
operation_data['category_difficult'] = f"Сложность - {field[2]}"
continue
if field[3] == 'Диагноз после оперативного лечения':
operation_data['diagnos_after_operation'] = field[2]
continue
if field[3] == 'МКБ 10':
operation_data['mkb10'] = field[2]
continue
if field[3] == 'Оперировал':
if field[2]:
operation_data['doc_fio'] = field[2]
continue
if field[3] == 'Код хирурга':
if field[2]:
operation_data['doc_code'] = field[2]
continue
operation_data['name_operation'] = f"{operation_data['name_operation']} {category_difficult}"
operation_result.append(operation_data.copy())
return operation_result
def closed_bl(hosp_num_dir):
result_bl = hosp_get_data_direction(hosp_num_dir, site_type=8, type_service='None', level=-1)
num, who_get, who_care, start_date, end_date, start_work = '', '', '', '', '', ''
for i in result_bl:
if i['date_confirm'] is None:
continue
if i["research_title"].lower().find('закрыт') != -1:
data_closed_bl = ParaclinicResult.objects.filter(issledovaniye=i['iss'])
for b in data_closed_bl:
if b.field.title == "Лист нетрудоспособности №":
num = b.value
continue
if b.field.title == "Выдан кому":
who_get = b.value
continue
if b.field.title == "по уходу за":
who_care = b.value
continue
if b.field.title == "выдан с":
start_date = b.value
if start_date.find('-') != -1:
start_date = normalize_date(start_date)
continue
if b.field.title == "по":
end_date = b.value
if end_date.find('-') != -1:
end_date = normalize_date(end_date)
continue
if b.field.title == "к труду":
start_work = b.value
if start_work.find('-') != -1:
start_work = normalize_date(start_work)
continue
return {'is_closed': True, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
return {'is_closed': False, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
def create_contract(ind_dir, card_pk):
ind_card = Card.objects.get(pk=card_pk)
patient_data = ind_card.get_data_individual()
p_agent = None
if ind_card.who_is_agent:
p_agent = getattr(ind_card, ind_card.who_is_agent)
p_payer = None
if ind_card.payer:
p_payer = ind_card.payer
ist_f = list(IstochnikiFinansirovaniya.objects.values_list('id').filter(title__exact='Платно'))
ist_f_list = [int(x[0]) for x in ist_f]
napr = Napravleniya.objects.filter(pk__in=ind_dir)
dir_temp = []
num_contract_set = set()
for n in napr:
if n.istochnik_f_id in ist_f_list and n.client == ind_card:
num_contract_set.add(n.num_contract)
dir_temp.append(n.pk)
if not dir_temp:
return False
research_direction = get_research_by_dir(dir_temp)
if not research_direction:
return False
research_price = get_coast_from_issledovanie(research_direction)
result_data = get_final_data(research_price)
sum_research = result_data[1]
qr_napr = ','.join([str(elem) for elem in result_data[3]])
protect_val = sum_research.replace(' ', '')
bstr = (qr_napr + protect_val).encode()
protect_code = str(zlib.crc32(bstr))
today = utils.current_time()
date_now1 = datetime.datetime.strftime(today, '%y%m%d%H%M%S%f')[:-3]
date_now_str = str(ind_card.pk) + str(date_now1)
num_contract_set = set()
protect_code_set = set()
napr_end = Napravleniya.objects.filter(id__in=result_data[3])
for n in napr_end:
num_contract_set.add(n.num_contract)
protect_code_set.add(n.protect_code)
if len(num_contract_set) == 1 and None in num_contract_set or None in protect_code_set:
PersonContract.person_contract_save(date_now_str, protect_code, qr_napr, sum_research, patient_data['fio'], ind_card, p_payer, p_agent)
Napravleniya.objects.filter(id__in=result_data[3]).update(num_contract=date_now_str, protect_code=protect_code)
return PersonContract.pk
| true
| true
|
f70b8b74c1eeee8137644ecf3c06c9597b396f6b
| 10,428
|
py
|
Python
|
vb_simulation_pkgs/pkg_vb_sim/scripts/task3_spawn_models.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | 1
|
2021-07-13T07:05:29.000Z
|
2021-07-13T07:05:29.000Z
|
vb_simulation_pkgs/pkg_vb_sim/scripts/task3_spawn_models.py
|
TejasPhutane/Eyantra-2021-Vargi-Bots
|
ab84a1304101850be8c0f69cfe6de70d53c33189
|
[
"MIT"
] | 1
|
2021-06-05T07:58:03.000Z
|
2021-06-05T07:58:03.000Z
|
vb_simulation_pkgs/pkg_vb_sim/scripts/task3_spawn_models.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse
# from gazebo_msgs.srv import ApplyBodyWrench, GetModelProperties, GetWorldProperties, SetModelState
from copy import deepcopy
from tf.transformations import quaternion_from_euler
sdf_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>1.0</mu>
<mu2>1.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Blue</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_dummy_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/White</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_blue = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<mesh>
<uri>file://box_qr.obj</uri>
</mesh>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_green = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Green</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_red = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Red</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
def create_cube_request(sdf_model, modelname, px, py, pz, rr, rp, ry, sx, sy, sz):
"""Create a SpawnModelRequest with the parameters of the cube given.
modelname: name of the model for gazebo
px py pz: position of the cube (and it's collision cube)
rr rp ry: rotation (roll, pitch, yaw) of the model
sx sy sz: size of the cube"""
cube = deepcopy(sdf_model)
# Replace size of model
size_str = str(round(sx, 3)) + " " + \
str(round(sy, 3)) + " " + str(round(sz, 3))
cube = cube.replace('SIZEXYZ', size_str)
# Replace modelname
cube = cube.replace('MODELNAME', str(modelname))
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
if __name__ == '__main__':
rospy.init_node('spawn_models')
spawn_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.loginfo("Waiting for /gazebo/spawn_sdf_model service...")
spawn_srv.wait_for_service()
rospy.loginfo("Connected to service!")
rospy.sleep(5)
# Spawn Box
req1 = create_cube_request(sdf_cube_red, "packagen1",
-0.8, 1.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
req2 = create_cube_request(sdf_cube_green, "packagen2",
-0.66, 2.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
req3 = create_cube_request(sdf_cube_blue, "packagen3",
-0.90, 3.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
rospy.sleep(1)
spawn_srv.call(req1)
rospy.sleep(1)
spawn_srv.call(req2)
rospy.sleep(1)
spawn_srv.call(req3)
rospy.sleep(1.0)
| 26.07
| 100
| 0.476122
|
import rospy
from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse
from copy import deepcopy
from tf.transformations import quaternion_from_euler
sdf_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>1.0</mu>
<mu2>1.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Blue</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_dummy_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/White</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_blue = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<mesh>
<uri>file://box_qr.obj</uri>
</mesh>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_green = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Green</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_red = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Red</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
def create_cube_request(sdf_model, modelname, px, py, pz, rr, rp, ry, sx, sy, sz):
cube = deepcopy(sdf_model)
size_str = str(round(sx, 3)) + " " + \
str(round(sy, 3)) + " " + str(round(sz, 3))
cube = cube.replace('SIZEXYZ', size_str)
cube = cube.replace('MODELNAME', str(modelname))
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
if __name__ == '__main__':
rospy.init_node('spawn_models')
spawn_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.loginfo("Waiting for /gazebo/spawn_sdf_model service...")
spawn_srv.wait_for_service()
rospy.loginfo("Connected to service!")
rospy.sleep(5)
req1 = create_cube_request(sdf_cube_red, "packagen1",
-0.8, 1.80, 1.0,
0.0, 0.0, 0.0,
0.15, 0.15, 0.15)
req2 = create_cube_request(sdf_cube_green, "packagen2",
-0.66, 2.80, 1.0,
0.0, 0.0, 0.0,
0.15, 0.15, 0.15)
req3 = create_cube_request(sdf_cube_blue, "packagen3",
-0.90, 3.80, 1.0,
0.0, 0.0, 0.0,
0.15, 0.15, 0.15)
rospy.sleep(1)
spawn_srv.call(req1)
rospy.sleep(1)
spawn_srv.call(req2)
rospy.sleep(1)
spawn_srv.call(req3)
rospy.sleep(1.0)
| true
| true
|
f70b8bc855024eae8c8724cea7763f1ea53656f2
| 5,413
|
py
|
Python
|
test/test_expected_behaviors_configurations.py
|
michael7198/deeplenstronomy_tests
|
e310684669f403969e169843185255a468c299d9
|
[
"MIT"
] | 17
|
2020-11-13T17:39:28.000Z
|
2022-03-18T11:22:01.000Z
|
test/test_expected_behaviors_configurations.py
|
michael7198/deeplenstronomy_tests
|
e310684669f403969e169843185255a468c299d9
|
[
"MIT"
] | 23
|
2020-12-09T21:50:27.000Z
|
2022-01-11T17:26:17.000Z
|
test/test_expected_behaviors_configurations.py
|
michael7198/deeplenstronomy_tests
|
e310684669f403969e169843185255a468c299d9
|
[
"MIT"
] | 9
|
2020-11-11T19:15:19.000Z
|
2022-03-01T17:50:55.000Z
|
"""
Parsed Config File Produces Expected Behaviors - configurations
"""
import inspect
import os
import deeplenstronomy.deeplenstronomy as dl
doc = """
\tRunning tests from test_expected_behaviors_configurations.py
\tThe tests included in this module demonstrate that the properties of each
\tconfiguration were simulated as expected. These properties include the
\texpected size of each configuration, the objects and planes included, and
\twhether time-series functionalities appear as expected. The functions are:
\t\t- test_configuration_existence
\t\t\tTesting that all configurations present in the config file are found by
\t\t\tdeeplenstronomy and are present in the simulation outputs
\t\t- test_configuration_fractions
\t\t\tTesting that the FRACTION keyword for each configuration resulted in
\t\t\tthe expected number of images for that configuration being produced
\t\t- test_timeseries
\t\t\tTime-series functionalities, if present, get tested by the function
\t\t\ttest_configuration_fractions
\t\t- test_planes_and_objects
\t\t\tTesting that each specified object and plane is was included in the
\t\t\tsimulation and is present in the metadata corresponding to its
\t\t\tconfiguration
"""
print(doc)
# Below are all of the possible operation modes
kwargs_sets = {0: {}, # default arguments
1: {'save_to_disk': True},
2: {'save_to_disk': True, 'image_file_format': 'h5'},
3: {'save_to_disk': True, 'skip_image_generation': True},
4: {'store_in_memory': False},
5: {'store_sample': True},
6: {'skip_image_generation': True, 'survey': 'des'},
7: {'solve_lens_equation': True},
8: {'return_planes': True}
}
f = open('status.txt', 'r')
current_test = int(f.read().strip())
f.close()
# Generate the dataset
kwargs_set = kwargs_sets[current_test]
config_filename = 'config.yaml'
dataset = dl.make_dataset(config_filename, **kwargs_set)
has_images = [hasattr(dataset, x + '_images') for x in dataset.configurations]
has_metadata = [hasattr(dataset, x + '_metadata')
for x in dataset.configurations]
has_planes = [hasattr(dataset, x + '_planes') for x in dataset.configurations]
images_exist = [os.path.exists(dataset.outdir +'/' + x + '_images.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
metadata_exist = [os.path.exists(dataset.outdir +'/' + x + '_metadata.csv')
for x in dataset.configurations]
planes_exist = [os.path.exists(dataset.outdir +'/' + x + '_planes.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
# Begin test functions
def test_configuration_existence():
for conf in dataset.configurations:
assert conf in dataset.config_dict['GEOMETRY'].keys()
def test_configuration_fractions():
for conf in dataset.configurations:
frac = dataset.config_dict['GEOMETRY'][conf]['FRACTION']
simulated_images = int(frac * dataset.size)
if all(has_images):
assert eval(f'dataset.{conf}_images').shape[0] == simulated_images
if all(has_metadata):
# not time-series
if 'TIMESERIES' not in dataset.config_dict['GEOMETRY'][conf].keys():
assert len(eval(f'dataset.{conf}_metadata')) == simulated_images
# time-series
else:
nites = dataset.config_dict['GEOMETRY'][conf]['TIMESERIES']['NITES']
md_rows = len(nites) * simulated_images
assert md_rows == len(eval(f'dataset.{conf}_metadata'))
def test_timeseries():
# already tested in test_configuration_fractions()
pass
def test_planes_and_objects():
for conf in dataset.configurations:
if all(has_metadata):
md = eval(f'dataset.{conf}_metadata')
else:
# this test requires metadata
return
number_of_planes = 0
for plane in dataset.config_dict['GEOMETRY'][conf].keys():
if plane.startswith('PLANE_'):
number_of_planes += 1
number_of_objects = 0
for obj in dataset.config_dict['GEOMETRY'][conf][plane].keys():
if obj.startswith('OBJECT_'):
number_of_objects += 1
if all(has_metadata):
for band in dataset.bands:
num_md_cols = 0
for col in md.columns:
if (col.startswith(f'{plane}-{obj}') and
col.endswith(band)):
num_md_cols += 1
# Plane and obj info in metadata for band
assert num_md_cols > 0
# expected number of objects in plane
for band in dataset.bands:
md_objects = md[plane + '-NUMBER_OF_OBJECTS-' + band].values
assert all(md_objects == number_of_objects)
# expected number of planes in configuration
for band in dataset.bands:
md_planes = md['NUMBER_OF_PLANES-' + band].values
assert all(md_planes == number_of_planes)
| 34.922581
| 84
| 0.616664
|
import inspect
import os
import deeplenstronomy.deeplenstronomy as dl
doc = """
\tRunning tests from test_expected_behaviors_configurations.py
\tThe tests included in this module demonstrate that the properties of each
\tconfiguration were simulated as expected. These properties include the
\texpected size of each configuration, the objects and planes included, and
\twhether time-series functionalities appear as expected. The functions are:
\t\t- test_configuration_existence
\t\t\tTesting that all configurations present in the config file are found by
\t\t\tdeeplenstronomy and are present in the simulation outputs
\t\t- test_configuration_fractions
\t\t\tTesting that the FRACTION keyword for each configuration resulted in
\t\t\tthe expected number of images for that configuration being produced
\t\t- test_timeseries
\t\t\tTime-series functionalities, if present, get tested by the function
\t\t\ttest_configuration_fractions
\t\t- test_planes_and_objects
\t\t\tTesting that each specified object and plane is was included in the
\t\t\tsimulation and is present in the metadata corresponding to its
\t\t\tconfiguration
"""
print(doc)
kwargs_sets = {0: {},
1: {'save_to_disk': True},
2: {'save_to_disk': True, 'image_file_format': 'h5'},
3: {'save_to_disk': True, 'skip_image_generation': True},
4: {'store_in_memory': False},
5: {'store_sample': True},
6: {'skip_image_generation': True, 'survey': 'des'},
7: {'solve_lens_equation': True},
8: {'return_planes': True}
}
f = open('status.txt', 'r')
current_test = int(f.read().strip())
f.close()
kwargs_set = kwargs_sets[current_test]
config_filename = 'config.yaml'
dataset = dl.make_dataset(config_filename, **kwargs_set)
has_images = [hasattr(dataset, x + '_images') for x in dataset.configurations]
has_metadata = [hasattr(dataset, x + '_metadata')
for x in dataset.configurations]
has_planes = [hasattr(dataset, x + '_planes') for x in dataset.configurations]
images_exist = [os.path.exists(dataset.outdir +'/' + x + '_images.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
metadata_exist = [os.path.exists(dataset.outdir +'/' + x + '_metadata.csv')
for x in dataset.configurations]
planes_exist = [os.path.exists(dataset.outdir +'/' + x + '_planes.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
def test_configuration_existence():
for conf in dataset.configurations:
assert conf in dataset.config_dict['GEOMETRY'].keys()
def test_configuration_fractions():
for conf in dataset.configurations:
frac = dataset.config_dict['GEOMETRY'][conf]['FRACTION']
simulated_images = int(frac * dataset.size)
if all(has_images):
assert eval(f'dataset.{conf}_images').shape[0] == simulated_images
if all(has_metadata):
if 'TIMESERIES' not in dataset.config_dict['GEOMETRY'][conf].keys():
assert len(eval(f'dataset.{conf}_metadata')) == simulated_images
else:
nites = dataset.config_dict['GEOMETRY'][conf]['TIMESERIES']['NITES']
md_rows = len(nites) * simulated_images
assert md_rows == len(eval(f'dataset.{conf}_metadata'))
def test_timeseries():
pass
def test_planes_and_objects():
for conf in dataset.configurations:
if all(has_metadata):
md = eval(f'dataset.{conf}_metadata')
else:
return
number_of_planes = 0
for plane in dataset.config_dict['GEOMETRY'][conf].keys():
if plane.startswith('PLANE_'):
number_of_planes += 1
number_of_objects = 0
for obj in dataset.config_dict['GEOMETRY'][conf][plane].keys():
if obj.startswith('OBJECT_'):
number_of_objects += 1
if all(has_metadata):
for band in dataset.bands:
num_md_cols = 0
for col in md.columns:
if (col.startswith(f'{plane}-{obj}') and
col.endswith(band)):
num_md_cols += 1
assert num_md_cols > 0
for band in dataset.bands:
md_objects = md[plane + '-NUMBER_OF_OBJECTS-' + band].values
assert all(md_objects == number_of_objects)
for band in dataset.bands:
md_planes = md['NUMBER_OF_PLANES-' + band].values
assert all(md_planes == number_of_planes)
| true
| true
|
f70b8bc93123bb50d895e673ddd956f0d95d791d
| 1,299
|
py
|
Python
|
setup.py
|
lietu/twitch-bot
|
e1f3462a8851031bc2cbd5dffb6440edc2e45116
|
[
"MIT"
] | 6
|
2015-12-21T14:43:26.000Z
|
2019-09-08T12:56:36.000Z
|
setup.py
|
lietu/twitch-quote-bot
|
e1f3462a8851031bc2cbd5dffb6440edc2e45116
|
[
"MIT"
] | 5
|
2015-04-06T08:33:20.000Z
|
2016-02-09T03:28:39.000Z
|
setup.py
|
lietu/twitch-bot
|
e1f3462a8851031bc2cbd5dffb6440edc2e45116
|
[
"MIT"
] | 5
|
2015-12-03T17:54:51.000Z
|
2020-06-29T12:43:07.000Z
|
import sys
from cx_Freeze import setup, Executable
base = None
# Uncomment to disable the console on Windows, once the thing is stable
#if sys.platform == "win32":
# base = "Win32GUI"
config = {
'description': 'Twitch Bot',
'author': 'Janne Enberg',
'url': 'https://github.com/lietu/twitch-bot',
'download_url': 'https://github.com/lietu/twitch-bot',
'author_email': 'janne.enberg@lietu.net',
'version': '0.1',
'install_requires': [
# str(r.req) for r in parse_requirements("requirements.txt")
],
'packages': [
'bot'
],
'scripts': [],
'name': 'bot'
}
packages = ['irc', 'jaraco', 'packaging', 'PySide']
namespace_packages = ['zc.lockfile', 'yg.lockfile']
include_files = ['db_migrations/', 'lua/', 'ui/']
excludes = ["settings"] # Let's not distribute the local settings.py file
includes = []
setup(
name=config["description"],
version=config["version"],
description=config["description"],
options={
"build_exe": {
"packages": packages,
"namespace_packages": namespace_packages,
"include_files": include_files,
"includes": includes,
"excludes": excludes
}
},
executables=[
Executable("twitchbot.py", base=base),
]
)
| 27.0625
| 74
| 0.602771
|
import sys
from cx_Freeze import setup, Executable
base = None
config = {
'description': 'Twitch Bot',
'author': 'Janne Enberg',
'url': 'https://github.com/lietu/twitch-bot',
'download_url': 'https://github.com/lietu/twitch-bot',
'author_email': 'janne.enberg@lietu.net',
'version': '0.1',
'install_requires': [
],
'packages': [
'bot'
],
'scripts': [],
'name': 'bot'
}
packages = ['irc', 'jaraco', 'packaging', 'PySide']
namespace_packages = ['zc.lockfile', 'yg.lockfile']
include_files = ['db_migrations/', 'lua/', 'ui/']
excludes = ["settings"]
includes = []
setup(
name=config["description"],
version=config["version"],
description=config["description"],
options={
"build_exe": {
"packages": packages,
"namespace_packages": namespace_packages,
"include_files": include_files,
"includes": includes,
"excludes": excludes
}
},
executables=[
Executable("twitchbot.py", base=base),
]
)
| true
| true
|
f70b8cb2129444e1f6211239a197af3e5f9f6cb3
| 14,418
|
py
|
Python
|
conveyor_2.py
|
bjnortier/ai-experiments-1
|
aff4496d84b059af6096f8f6b51d0ebcf6ed5c37
|
[
"CC0-1.0"
] | null | null | null |
conveyor_2.py
|
bjnortier/ai-experiments-1
|
aff4496d84b059af6096f8f6b51d0ebcf6ed5c37
|
[
"CC0-1.0"
] | null | null | null |
conveyor_2.py
|
bjnortier/ai-experiments-1
|
aff4496d84b059af6096f8f6b51d0ebcf6ed5c37
|
[
"CC0-1.0"
] | null | null | null |
import os
import glob
from pathlib import Path
import numpy as np
import random
import carb
from PIL import Image
from tensorflow import keras
from pxr import Usd, UsdGeom, Gf, UsdPhysics
import omni.kit
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.prims import create_prim, delete_prim
from omni.usd import get_context
from omni.kit.viewport import get_viewport_interface
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.syntheticdata import sensors
import omni.syntheticdata._syntheticdata as sd
def setColliderSubtree(prim, approximationShape="none", execute_command_fn=None):
pit = iter(Usd.PrimRange(prim))
for p in pit:
if p.GetMetadata("hide_in_stage_window"):
pit.PruneChildren()
continue
if p.IsA(UsdGeom.Gprim) or p.IsInstanceable():
if len(p.GetAttribute("faceVertexIndices").Get()) > 0:
omni.physx.scripts.utils.setCollider(p, approximationShape, execute_command_fn)
def setRigidBody(prim, approximationShape, kinematic, custom_execute_fn=None):
omni.physx.scripts.utils.setPhysics(prim, kinematic, custom_execute_fn)
if prim.IsA(UsdGeom.Xformable):
setColliderSubtree(prim, approximationShape, custom_execute_fn)
else:
omni.physx.scripts.utils.setCollider(prim, approximationShape, custom_execute_fn)
def create_light():
create_prim(
"/World/SphereLight",
"SphereLight",
position=np.array([0, 500, 500]),
attributes={
"radius": 150,
"intensity": 5e4
}
)
def create_classification_camera():
create_prim(
"/World/ClassificationCamera",
"Camera",
orientation=np.array([0.33, 0.197, 0.464, 0.794]),
position=np.array([151, 250, 135])
)
def find_usd_assets(shapenet_dir, categories, max_asset_size=50):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
from omni.isaac.shapenet.utils import LABEL_TO_SYNSET
references = {}
for category in categories:
category_id = LABEL_TO_SYNSET[category]
all_assets = glob.glob(
os.path.join(shapenet_dir, category_id, "*/*.usd"),
recursive=True)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
carb.log_warn(
f"{a} skipped as it exceeded the max \
size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(
f"No USDs found for category {category} \
under max size {max_asset_size} MB.")
references[category] = assets_filtered
return references
def create_conveyor_anchor(plate_size):
size = 5
conveyor_anchor = create_prim(
"/World/Conveyor/Anchor",
"Cube",
position=np.array([0.0, -plate_size/2 - size, 0.0]),
scale=np.array([plate_size / 2, size, size]))
conveyor_anchor.GetAttribute("visibility").Set("invisible")
return conveyor_anchor
def create_conveyor_plate(stage, size, index):
plate_path = f"/World/Conveyor/Plates/Plate{index + 1}"
plate = DynamicCuboid(
prim_path=plate_path,
position=np.array([0, index * 100, 0.0]),
size=np.array([size - 5, size - 5, 10.0]),
color=np.array([0.28, 0.65, 1.0])
)
# prismatic joint
joint_path = f"/World/Conveyor/Joints/PrismaticJoint{index + 1}"
prismatic_joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
prismatic_joint.CreateAxisAttr("Y")
prismatic_joint.CreateBody0Rel().SetTargets(["/World/Conveyor/Anchor"])
prismatic_joint.CreateBody1Rel().SetTargets([plate_path])
prismatic_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 1.0, 0.0))
prismatic_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, -0.5, 0.0))
# add linear drive
driver = UsdPhysics.DriveAPI.Apply(
prismatic_joint.GetPrim(),
"linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateTargetVelocityAttr(200.0)
driver.CreateDampingAttr(1e10)
driver.CreateStiffnessAttr(0)
return plate
def create_pusher(stage, plate_size, index):
actuator_path = f"/World/Pushers/Actuators/Actuator{index + 1}"
anchor_path = f"/World/Pushers/Anchors/Anchor{index + 1}"
depth = 10
anchor = create_prim(
anchor_path,
"Cube",
position=np.array([
-plate_size/2 - depth - 5,
(index + 2) * plate_size * 2,
20.0]),
scale=np.array([5, 5, 5]))
anchor.GetAttribute("visibility").Set("invisible")
pusher = DynamicCuboid(
prim_path=actuator_path,
position=np.array([
-plate_size/2 - 5,
(index + 2) * plate_size * 2,
20.0]),
size=np.array([depth, plate_size * 2, 30]),
color=np.array([0.1, 0.1, 0.5])
)
mass_api = UsdPhysics.MassAPI.Apply(pusher.prim)
mass_api.CreateMassAttr(1)
# Prismatic joint
joint_path = f"/World/Pushers/Joints/Joint{index + 1}"
joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
joint.CreateAxisAttr("X")
joint.CreateBody0Rel().SetTargets([anchor_path])
joint.CreateBody1Rel().SetTargets([actuator_path])
joint.CreateLocalPos0Attr().Set(Gf.Vec3f(1.0, 0.0, 0.0))
joint.CreateLocalPos1Attr().Set(Gf.Vec3f(-0.5, 0.0, 0.0))
# Linear drive. No position target is set, only activated when needed.
driver = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), "linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateDampingAttr(2e4)
driver.CreateStiffnessAttr(1e5)
return driver
def create_bucket(stage, plate_size, index):
bucket_path = f"/World/Buckets/Bucket{index + 1}"
width = plate_size * 2
depth = width
height = 20
a = create_prim(
f"{bucket_path}/a",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size - width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
b = create_prim(
f"{bucket_path}/b",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size + width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
c = create_prim(
f"{bucket_path}/c",
"Cube",
position=np.array([
plate_size/2 + 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
d = create_prim(
f"{bucket_path}/d",
"Cube",
position=np.array([
plate_size/2 + depth - 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
UsdPhysics.CollisionAPI.Apply(a)
UsdPhysics.CollisionAPI.Apply(b)
UsdPhysics.CollisionAPI.Apply(c)
UsdPhysics.CollisionAPI.Apply(d)
class Conveyor2(BaseSample):
def __init__(self) -> None:
super().__init__()
return
def setup_scene(self):
world = self.get_world()
self.model = keras.models.load_model("/home/bjnortier/isaac/sorting/save_at_30-augmented-3.h5")
self.categories = [
"bus", "car", "plane", "rocket", "watercraft"
]
shapenet_dir = Path(os.environ["SHAPENET_LOCAL_DIR"])
self.asset_references = find_usd_assets(
f"{shapenet_dir}_nomat",
self.categories)
self.num_classes = len(self.categories)
self.num_plates = self.num_classes * 2 + 4
plate_size = 100.0
self.max_plate_position = plate_size * self.num_plates
self.widget_index = 0
self.plate_reset_count = 0
stage = get_context().get_stage()
world.scene.add_ground_plane(z_position=-45.0)
create_light()
create_classification_camera()
create_conveyor_anchor(plate_size)
self.plates = []
for i in range(self.num_plates):
self.plates.append(create_conveyor_plate(stage, plate_size, i))
self.pushers = []
for i in range(self.num_classes):
self.pushers.append(create_pusher(stage, plate_size, i))
for i in range(self.num_classes):
create_bucket(stage, plate_size, i)
viewport_interface = get_viewport_interface()
viewport_handle = viewport_interface.create_instance()
vp = viewport_interface.get_viewport_window(viewport_handle)
vp.set_active_camera("/World/ClassificationCamera")
vp.set_texture_resolution(299, 299)
self.classification_viewport = vp
self.sd_interface = sd.acquire_syntheticdata_interface()
self.is_sensor_initialized = False
# # Create the first widget
self.drop_widget(y_position=100.0)
return
def drop_widget(self, y_position=0.0):
category = random.choice(self.categories)
asset_reference = random.choice(self.asset_references[category])
widget_path = f"/World/widget_{self.widget_index}"
widget_prim = create_prim(
widget_path,
"Xform",
scale=np.array([50.0, 50.0, 50.0]),
orientation=euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True),
position=np.array([0.0, y_position, 50.0]),
usd_path=asset_reference,
semantic_label=category)
self.current_widget_category = category
widget = XFormPrim(widget_path)
material = PreviewSurface(
prim_path="/World/Looks/ShapeMaterial",
color=np.array([0.1, 0.6, 0.1]))
widget.apply_visual_material(material)
# Determine bounds and translate to sit on the Z=0 plane
orientation_on_plane = euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True)
widget.set_local_pose(
np.array([0.0, 0.0, 0.0]),
orientation_on_plane)
bounds = UsdGeom.Mesh(widget_prim).ComputeWorldBound(0.0, "default")
new_position = np.array([0.0, 0.0, -bounds.GetBox().GetMin()[2] + 5.0])
widget.set_local_pose(new_position)
mass_api = UsdPhysics.MassAPI.Apply(widget_prim)
mass_api.CreateMassAttr(1)
setRigidBody(widget_prim, "convexHull", False)
self.widget = widget
self.widget_index += 1
self.widget_class = None
self.classification_requested = False
self.classification_complete = False
self.arm_activated = False
for pusher in self.pushers:
pusher.CreateTargetPositionAttr(0.0)
async def setup_post_load(self):
self._world = self.get_world()
self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_callback)
return
def sim_step_callback(self, step_size):
if not self.is_sensor_initialized:
print("Waiting for sensor to initialize")
sensor = sensors.create_or_retrieve_sensor(
self.classification_viewport, sd.SensorType.Rgb)
self.is_sensor_initialized = \
self.sd_interface.is_sensor_initialized(sensor)
if self.is_sensor_initialized:
print("Sensor initialized!")
for plate in self.plates:
# When a plate reaches the end ov the conveyour belt,
# reset it's position to the start. Drop a widget if it's
# the first plate
plate_position, _ = plate.get_world_pose()
if plate_position[1] > self.max_plate_position:
plate_position[1] -= self.max_plate_position
plate.set_world_pose(plate_position)
self.plate_reset_count += 1
if self.plate_reset_count == self.num_plates:
self.plate_reset_count = 0
self.drop_widget()
# Classify the widget when it passes under the camera
if not self.classification_requested:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > 100:
self.capture_gt()
self.classification_requested = True
if self.classification_complete and not self.arm_activated:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > (self.widget_class + 1) * 200 + 100:
self.arm_activated = True
self.pushers[self.widget_class].CreateTargetPositionAttr(120.0)
def capture_gt(self):
rgb = sensors.get_rgb(self.classification_viewport)
# Discard alpha channel
rgb = rgb[:, :, :3]
input = np.expand_dims(rgb, axis=0)
prediction = self.model.predict(input)
self.widget_class = np.argmax(prediction)
print(f"actual:predicted {self.current_widget_category}:{self.categories[self.widget_class]}")
image = Image.fromarray(rgb)
image.save("/tmp/rgb.png")
self.classification_complete = True
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
def world_cleanup(self):
return
| 34.410501
| 103
| 0.619087
|
import os
import glob
from pathlib import Path
import numpy as np
import random
import carb
from PIL import Image
from tensorflow import keras
from pxr import Usd, UsdGeom, Gf, UsdPhysics
import omni.kit
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.prims import create_prim, delete_prim
from omni.usd import get_context
from omni.kit.viewport import get_viewport_interface
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.syntheticdata import sensors
import omni.syntheticdata._syntheticdata as sd
def setColliderSubtree(prim, approximationShape="none", execute_command_fn=None):
pit = iter(Usd.PrimRange(prim))
for p in pit:
if p.GetMetadata("hide_in_stage_window"):
pit.PruneChildren()
continue
if p.IsA(UsdGeom.Gprim) or p.IsInstanceable():
if len(p.GetAttribute("faceVertexIndices").Get()) > 0:
omni.physx.scripts.utils.setCollider(p, approximationShape, execute_command_fn)
def setRigidBody(prim, approximationShape, kinematic, custom_execute_fn=None):
omni.physx.scripts.utils.setPhysics(prim, kinematic, custom_execute_fn)
if prim.IsA(UsdGeom.Xformable):
setColliderSubtree(prim, approximationShape, custom_execute_fn)
else:
omni.physx.scripts.utils.setCollider(prim, approximationShape, custom_execute_fn)
def create_light():
create_prim(
"/World/SphereLight",
"SphereLight",
position=np.array([0, 500, 500]),
attributes={
"radius": 150,
"intensity": 5e4
}
)
def create_classification_camera():
create_prim(
"/World/ClassificationCamera",
"Camera",
orientation=np.array([0.33, 0.197, 0.464, 0.794]),
position=np.array([151, 250, 135])
)
def find_usd_assets(shapenet_dir, categories, max_asset_size=50):
from omni.isaac.shapenet.utils import LABEL_TO_SYNSET
references = {}
for category in categories:
category_id = LABEL_TO_SYNSET[category]
all_assets = glob.glob(
os.path.join(shapenet_dir, category_id, "*/*.usd"),
recursive=True)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
carb.log_warn(
f"{a} skipped as it exceeded the max \
size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(
f"No USDs found for category {category} \
under max size {max_asset_size} MB.")
references[category] = assets_filtered
return references
def create_conveyor_anchor(plate_size):
size = 5
conveyor_anchor = create_prim(
"/World/Conveyor/Anchor",
"Cube",
position=np.array([0.0, -plate_size/2 - size, 0.0]),
scale=np.array([plate_size / 2, size, size]))
conveyor_anchor.GetAttribute("visibility").Set("invisible")
return conveyor_anchor
def create_conveyor_plate(stage, size, index):
plate_path = f"/World/Conveyor/Plates/Plate{index + 1}"
plate = DynamicCuboid(
prim_path=plate_path,
position=np.array([0, index * 100, 0.0]),
size=np.array([size - 5, size - 5, 10.0]),
color=np.array([0.28, 0.65, 1.0])
)
joint_path = f"/World/Conveyor/Joints/PrismaticJoint{index + 1}"
prismatic_joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
prismatic_joint.CreateAxisAttr("Y")
prismatic_joint.CreateBody0Rel().SetTargets(["/World/Conveyor/Anchor"])
prismatic_joint.CreateBody1Rel().SetTargets([plate_path])
prismatic_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 1.0, 0.0))
prismatic_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, -0.5, 0.0))
driver = UsdPhysics.DriveAPI.Apply(
prismatic_joint.GetPrim(),
"linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateTargetVelocityAttr(200.0)
driver.CreateDampingAttr(1e10)
driver.CreateStiffnessAttr(0)
return plate
def create_pusher(stage, plate_size, index):
actuator_path = f"/World/Pushers/Actuators/Actuator{index + 1}"
anchor_path = f"/World/Pushers/Anchors/Anchor{index + 1}"
depth = 10
anchor = create_prim(
anchor_path,
"Cube",
position=np.array([
-plate_size/2 - depth - 5,
(index + 2) * plate_size * 2,
20.0]),
scale=np.array([5, 5, 5]))
anchor.GetAttribute("visibility").Set("invisible")
pusher = DynamicCuboid(
prim_path=actuator_path,
position=np.array([
-plate_size/2 - 5,
(index + 2) * plate_size * 2,
20.0]),
size=np.array([depth, plate_size * 2, 30]),
color=np.array([0.1, 0.1, 0.5])
)
mass_api = UsdPhysics.MassAPI.Apply(pusher.prim)
mass_api.CreateMassAttr(1)
joint_path = f"/World/Pushers/Joints/Joint{index + 1}"
joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
joint.CreateAxisAttr("X")
joint.CreateBody0Rel().SetTargets([anchor_path])
joint.CreateBody1Rel().SetTargets([actuator_path])
joint.CreateLocalPos0Attr().Set(Gf.Vec3f(1.0, 0.0, 0.0))
joint.CreateLocalPos1Attr().Set(Gf.Vec3f(-0.5, 0.0, 0.0))
driver = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), "linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateDampingAttr(2e4)
driver.CreateStiffnessAttr(1e5)
return driver
def create_bucket(stage, plate_size, index):
bucket_path = f"/World/Buckets/Bucket{index + 1}"
width = plate_size * 2
depth = width
height = 20
a = create_prim(
f"{bucket_path}/a",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size - width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
b = create_prim(
f"{bucket_path}/b",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size + width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
c = create_prim(
f"{bucket_path}/c",
"Cube",
position=np.array([
plate_size/2 + 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
d = create_prim(
f"{bucket_path}/d",
"Cube",
position=np.array([
plate_size/2 + depth - 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
UsdPhysics.CollisionAPI.Apply(a)
UsdPhysics.CollisionAPI.Apply(b)
UsdPhysics.CollisionAPI.Apply(c)
UsdPhysics.CollisionAPI.Apply(d)
class Conveyor2(BaseSample):
def __init__(self) -> None:
super().__init__()
return
def setup_scene(self):
world = self.get_world()
self.model = keras.models.load_model("/home/bjnortier/isaac/sorting/save_at_30-augmented-3.h5")
self.categories = [
"bus", "car", "plane", "rocket", "watercraft"
]
shapenet_dir = Path(os.environ["SHAPENET_LOCAL_DIR"])
self.asset_references = find_usd_assets(
f"{shapenet_dir}_nomat",
self.categories)
self.num_classes = len(self.categories)
self.num_plates = self.num_classes * 2 + 4
plate_size = 100.0
self.max_plate_position = plate_size * self.num_plates
self.widget_index = 0
self.plate_reset_count = 0
stage = get_context().get_stage()
world.scene.add_ground_plane(z_position=-45.0)
create_light()
create_classification_camera()
create_conveyor_anchor(plate_size)
self.plates = []
for i in range(self.num_plates):
self.plates.append(create_conveyor_plate(stage, plate_size, i))
self.pushers = []
for i in range(self.num_classes):
self.pushers.append(create_pusher(stage, plate_size, i))
for i in range(self.num_classes):
create_bucket(stage, plate_size, i)
viewport_interface = get_viewport_interface()
viewport_handle = viewport_interface.create_instance()
vp = viewport_interface.get_viewport_window(viewport_handle)
vp.set_active_camera("/World/ClassificationCamera")
vp.set_texture_resolution(299, 299)
self.classification_viewport = vp
self.sd_interface = sd.acquire_syntheticdata_interface()
self.is_sensor_initialized = False
(y_position=100.0)
return
def drop_widget(self, y_position=0.0):
category = random.choice(self.categories)
asset_reference = random.choice(self.asset_references[category])
widget_path = f"/World/widget_{self.widget_index}"
widget_prim = create_prim(
widget_path,
"Xform",
scale=np.array([50.0, 50.0, 50.0]),
orientation=euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True),
position=np.array([0.0, y_position, 50.0]),
usd_path=asset_reference,
semantic_label=category)
self.current_widget_category = category
widget = XFormPrim(widget_path)
material = PreviewSurface(
prim_path="/World/Looks/ShapeMaterial",
color=np.array([0.1, 0.6, 0.1]))
widget.apply_visual_material(material)
orientation_on_plane = euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True)
widget.set_local_pose(
np.array([0.0, 0.0, 0.0]),
orientation_on_plane)
bounds = UsdGeom.Mesh(widget_prim).ComputeWorldBound(0.0, "default")
new_position = np.array([0.0, 0.0, -bounds.GetBox().GetMin()[2] + 5.0])
widget.set_local_pose(new_position)
mass_api = UsdPhysics.MassAPI.Apply(widget_prim)
mass_api.CreateMassAttr(1)
setRigidBody(widget_prim, "convexHull", False)
self.widget = widget
self.widget_index += 1
self.widget_class = None
self.classification_requested = False
self.classification_complete = False
self.arm_activated = False
for pusher in self.pushers:
pusher.CreateTargetPositionAttr(0.0)
async def setup_post_load(self):
self._world = self.get_world()
self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_callback)
return
def sim_step_callback(self, step_size):
if not self.is_sensor_initialized:
print("Waiting for sensor to initialize")
sensor = sensors.create_or_retrieve_sensor(
self.classification_viewport, sd.SensorType.Rgb)
self.is_sensor_initialized = \
self.sd_interface.is_sensor_initialized(sensor)
if self.is_sensor_initialized:
print("Sensor initialized!")
for plate in self.plates:
plate_position, _ = plate.get_world_pose()
if plate_position[1] > self.max_plate_position:
plate_position[1] -= self.max_plate_position
plate.set_world_pose(plate_position)
self.plate_reset_count += 1
if self.plate_reset_count == self.num_plates:
self.plate_reset_count = 0
self.drop_widget()
if not self.classification_requested:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > 100:
self.capture_gt()
self.classification_requested = True
if self.classification_complete and not self.arm_activated:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > (self.widget_class + 1) * 200 + 100:
self.arm_activated = True
self.pushers[self.widget_class].CreateTargetPositionAttr(120.0)
def capture_gt(self):
rgb = sensors.get_rgb(self.classification_viewport)
rgb = rgb[:, :, :3]
input = np.expand_dims(rgb, axis=0)
prediction = self.model.predict(input)
self.widget_class = np.argmax(prediction)
print(f"actual:predicted {self.current_widget_category}:{self.categories[self.widget_class]}")
image = Image.fromarray(rgb)
image.save("/tmp/rgb.png")
self.classification_complete = True
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
def world_cleanup(self):
return
| true
| true
|
f70b8d0e2b5f41a42fe57c9b6a33830ab0c71fa9
| 305
|
py
|
Python
|
config.py
|
tensorush/Neural-Painter-Bot
|
420fd2d01a1a91b45553e3da07d4a5c18a60ec11
|
[
"MIT"
] | 1
|
2021-02-18T02:52:10.000Z
|
2021-02-18T02:52:10.000Z
|
config.py
|
tensorush/Neural-Painter-Bot
|
420fd2d01a1a91b45553e3da07d4a5c18a60ec11
|
[
"MIT"
] | null | null | null |
config.py
|
tensorush/Neural-Painter-Bot
|
420fd2d01a1a91b45553e3da07d4a5c18a60ec11
|
[
"MIT"
] | null | null | null |
import os
# Bot token
BOT_TOKEN = os.getenv('BOT_TOKEN')
# Web application setup
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = int(os.getenv('PORT'))
# Webhook setup
WEBHOOK_HOST = 'https://neural-painter-bot.herokuapp.com'
WEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
| 20.333333
| 57
| 0.734426
|
import os
BOT_TOKEN = os.getenv('BOT_TOKEN')
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = int(os.getenv('PORT'))
WEBHOOK_HOST = 'https://neural-painter-bot.herokuapp.com'
WEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
| true
| true
|
f70b8dd29628c3270d786ed902fdfe1bff153136
| 998
|
py
|
Python
|
two_factor/management/commands/two_factor_disable.py
|
ercpe/django-two-factor-auth
|
76866dd310903b3a34526becaa0a5012dea7debe
|
[
"MIT"
] | null | null | null |
two_factor/management/commands/two_factor_disable.py
|
ercpe/django-two-factor-auth
|
76866dd310903b3a34526becaa0a5012dea7debe
|
[
"MIT"
] | 1
|
2015-07-13T16:52:33.000Z
|
2015-07-16T20:24:59.000Z
|
two_factor/management/commands/two_factor_disable.py
|
ercpe/django-two-factor-auth
|
76866dd310903b3a34526becaa0a5012dea7debe
|
[
"MIT"
] | 1
|
2019-12-30T15:38:13.000Z
|
2019-12-30T15:38:13.000Z
|
from django.core.management.base import BaseCommand, CommandError
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from django_otp import devices_for_user
class Command(BaseCommand):
"""
Command for disabling two-factor authentication for certain users.
The command accepts any number of usernames, and will remove all OTP
devices for those users.
Example usage::
manage.py disable bouke steve
"""
args = '<username username ...>'
help = 'Disables two-factor authentication for the given users'
def handle(self, *args, **options):
for username in args:
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
raise CommandError('User "%s" does not exist' % username)
for device in devices_for_user(user):
device.delete()
| 28.514286
| 73
| 0.670341
|
from django.core.management.base import BaseCommand, CommandError
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from django_otp import devices_for_user
class Command(BaseCommand):
args = '<username username ...>'
help = 'Disables two-factor authentication for the given users'
def handle(self, *args, **options):
for username in args:
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
raise CommandError('User "%s" does not exist' % username)
for device in devices_for_user(user):
device.delete()
| true
| true
|
f70b8fac17e6bde268e662cd3401fce8726fa90e
| 599
|
py
|
Python
|
tests/test_comments.py
|
EugeneZnm/pitches
|
64edf62503f9195de2f1e11a7a7cf29e88fa00de
|
[
"Unlicense"
] | null | null | null |
tests/test_comments.py
|
EugeneZnm/pitches
|
64edf62503f9195de2f1e11a7a7cf29e88fa00de
|
[
"Unlicense"
] | null | null | null |
tests/test_comments.py
|
EugeneZnm/pitches
|
64edf62503f9195de2f1e11a7a7cf29e88fa00de
|
[
"Unlicense"
] | null | null | null |
import unittest
from app.models import Comments
class CommentsModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(comment='a')
def test_instance(self):
self.assertEqual(self.new_comment.comment, 'a')
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all()) > 0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comment = Comments.get_comment(1)
self.assertTrue(len(got_comment) > 0)
if __name__ == '__main__':
unittest.main()
| 24.958333
| 55
| 0.686144
|
import unittest
from app.models import Comments
class CommentsModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(comment='a')
def test_instance(self):
self.assertEqual(self.new_comment.comment, 'a')
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all()) > 0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comment = Comments.get_comment(1)
self.assertTrue(len(got_comment) > 0)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70b918593a9967c3c6a32aab2c0bf4d8d1dbaef
| 1,505
|
py
|
Python
|
dls_ade/dls_list_modules_test.py
|
hir12111/dls_ade
|
92449cc2a0fadc1af4c125d72cfc392df4763f2c
|
[
"Apache-2.0"
] | null | null | null |
dls_ade/dls_list_modules_test.py
|
hir12111/dls_ade
|
92449cc2a0fadc1af4c125d72cfc392df4763f2c
|
[
"Apache-2.0"
] | null | null | null |
dls_ade/dls_list_modules_test.py
|
hir12111/dls_ade
|
92449cc2a0fadc1af4c125d72cfc392df4763f2c
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env dls-python
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # Allows for Python 2/3 compatibility, 'builtins' is namespace for inbuilt functions
else:
import builtins
import unittest
from mock import patch, MagicMock
p = patch('dls_ade.Server')
server_mock = MagicMock()
m = p.start()
m.return_value = server_mock
from dls_ade import dls_list_modules
p.stop()
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_list_modules.make_parser()
def test_parser_understands_domain(self):
args = self.parser.parse_args("-i TS".split())
self.assertEqual(args.area, "ioc")
self.assertEqual(args.domain_name, "TS")
class PrintModuleListTest(unittest.TestCase):
def setUp(self):
self.server_mock = server_mock
def tearDown(self):
self.server_mock.reset_mock()
def test_server_repo_list_called(self):
source = "test/source"
dls_list_modules.get_module_list(source)
self.server_mock.get_server_repo_list.assert_called_once_with(source)
def test_given_valid_source_then_list_of_modules(self):
self.server_mock.get_server_repo_list.return_value = [
"test/source/module", "test/source/module2.git"
]
source = "test/source"
module_list = dls_list_modules.get_module_list(source)
self.assertIsNotNone(module_list)
self.assertListEqual(module_list, ['module', 'module2'])
| 26.403509
| 120
| 0.711628
|
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins
else:
import builtins
import unittest
from mock import patch, MagicMock
p = patch('dls_ade.Server')
server_mock = MagicMock()
m = p.start()
m.return_value = server_mock
from dls_ade import dls_list_modules
p.stop()
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_list_modules.make_parser()
def test_parser_understands_domain(self):
args = self.parser.parse_args("-i TS".split())
self.assertEqual(args.area, "ioc")
self.assertEqual(args.domain_name, "TS")
class PrintModuleListTest(unittest.TestCase):
def setUp(self):
self.server_mock = server_mock
def tearDown(self):
self.server_mock.reset_mock()
def test_server_repo_list_called(self):
source = "test/source"
dls_list_modules.get_module_list(source)
self.server_mock.get_server_repo_list.assert_called_once_with(source)
def test_given_valid_source_then_list_of_modules(self):
self.server_mock.get_server_repo_list.return_value = [
"test/source/module", "test/source/module2.git"
]
source = "test/source"
module_list = dls_list_modules.get_module_list(source)
self.assertIsNotNone(module_list)
self.assertListEqual(module_list, ['module', 'module2'])
| true
| true
|
f70b928fea37b2c0df2781362cb19ba7188b7b27
| 1,220
|
py
|
Python
|
project/train.py
|
Lucklyric/hydra-pytorch-lightning-seed
|
2fd1ef2795c8705f03334f0af66e78aaa565a52e
|
[
"Apache-2.0"
] | 4
|
2021-05-03T14:00:12.000Z
|
2022-03-16T18:39:24.000Z
|
project/train.py
|
Lucklyric/dl-optimizer-poc
|
fd7ddc91e10f3d9e6fa6154221c960cc6ff6a8a7
|
[
"Apache-2.0"
] | null | null | null |
project/train.py
|
Lucklyric/dl-optimizer-poc
|
fd7ddc91e10f3d9e6fa6154221c960cc6ff6a8a7
|
[
"Apache-2.0"
] | 1
|
2021-09-07T13:15:51.000Z
|
2021-09-07T13:15:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : train.py
# Author: Alvin(Xinyao) Sun <xinyao1@ualberta.ca>
# Date : 02.05.2021
import logging
import os
import sys
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
log = logging.getLogger(__name__)
@hydra.main(config_path='config', config_name='train_config')
def main(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
pl.seed_everything(cfg.seed)
# ------------
# data
# ------------
data_module = hydra.utils.instantiate(cfg.data)
# ------------
# model
# ------------
model = hydra.utils.instantiate(cfg.model)
# ------------
# training
# ------------
trainer = pl.Trainer(**(cfg.pl_trainer), checkpoint_callback=True)
log.info('run training...')
train_dataloader = data_module.train_dataloader()
val_dataloader = data_module.val_dataloader()
trainer.fit(model,
train_dataloaders=train_dataloader,
val_dataloaders=[val_dataloader])
if __name__ == '__main__':
try:
main()
except Exception as e:
log.error(e)
exit(1)
| 23.461538
| 70
| 0.622951
|
import logging
import os
import sys
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
log = logging.getLogger(__name__)
@hydra.main(config_path='config', config_name='train_config')
def main(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
pl.seed_everything(cfg.seed)
data_module = hydra.utils.instantiate(cfg.data)
model = hydra.utils.instantiate(cfg.model)
trainer = pl.Trainer(**(cfg.pl_trainer), checkpoint_callback=True)
log.info('run training...')
train_dataloader = data_module.train_dataloader()
val_dataloader = data_module.val_dataloader()
trainer.fit(model,
train_dataloaders=train_dataloader,
val_dataloaders=[val_dataloader])
if __name__ == '__main__':
try:
main()
except Exception as e:
log.error(e)
exit(1)
| true
| true
|
f70b92b9aa2e2b3ff6177472d1bfaf0b814109aa
| 182
|
py
|
Python
|
src/python/gudhi/hera/__init__.py
|
m0baxter/gudhi-devel
|
6e14ef1f31e09f3875316440303450ff870d9881
|
[
"MIT"
] | 146
|
2019-03-15T14:10:31.000Z
|
2022-03-23T21:14:52.000Z
|
src/python/gudhi/hera/__init__.py
|
m0baxter/gudhi-devel
|
6e14ef1f31e09f3875316440303450ff870d9881
|
[
"MIT"
] | 398
|
2019-03-07T14:55:22.000Z
|
2022-03-31T14:50:40.000Z
|
src/python/gudhi/hera/__init__.py
|
m0baxter/gudhi-devel
|
6e14ef1f31e09f3875316440303450ff870d9881
|
[
"MIT"
] | 51
|
2019-03-08T15:58:48.000Z
|
2022-03-14T10:23:23.000Z
|
from .wasserstein import wasserstein_distance
from .bottleneck import bottleneck_distance
__author__ = "Marc Glisse"
__copyright__ = "Copyright (C) 2020 Inria"
__license__ = "MIT"
| 22.75
| 45
| 0.802198
|
from .wasserstein import wasserstein_distance
from .bottleneck import bottleneck_distance
__author__ = "Marc Glisse"
__copyright__ = "Copyright (C) 2020 Inria"
__license__ = "MIT"
| true
| true
|
f70b9516de8ebd320f979b8a39b117ab92fb9820
| 8,050
|
py
|
Python
|
docs/languages/en/conf.py
|
chrisoconnell/zf2-documentation
|
f7ea720801db65c82448128cb173944e81a10d82
|
[
"BSD-3-Clause"
] | null | null | null |
docs/languages/en/conf.py
|
chrisoconnell/zf2-documentation
|
f7ea720801db65c82448128cb173944e81a10d82
|
[
"BSD-3-Clause"
] | null | null | null |
docs/languages/en/conf.py
|
chrisoconnell/zf2-documentation
|
f7ea720801db65c82448128cb173944e81a10d82
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Zend Framework 2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 6 18:55:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zend Framework 2'
copyright = u'2012, Zend Technologies Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0rc1'
# The full version, including alpha/beta/rc tags.
release = '2.0.0rc1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../zf2_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendFramework2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendFramework2.tex', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendframework2', u'Zend Framework 2 Documentation',
[u'Zend Technologies Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendFramework2', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'ZendFramework2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Hack to render the php source code without the <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
| 32.459677
| 83
| 0.717143
|
import sys, os
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Zend Framework 2'
copyright = u'2012, Zend Technologies Ltd.'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0rc1'
# The full version, including alpha/beta/rc tags.
release = '2.0.0rc1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../zf2_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendFramework2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendFramework2.tex', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendframework2', u'Zend Framework 2 Documentation',
[u'Zend Technologies Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendFramework2', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'ZendFramework2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Hack to render the php source code without the <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
| true
| true
|
f70b959b58bee55c3001698484754add230dbb4e
| 18,187
|
py
|
Python
|
common/webapp/views/misc_views.py
|
jisantuc/labeller
|
11c7738d43b860fbdad660b459572843f873abce
|
[
"Apache-2.0"
] | 2
|
2021-12-02T08:42:31.000Z
|
2022-03-11T19:58:40.000Z
|
common/webapp/views/misc_views.py
|
jisantuc/labeller
|
11c7738d43b860fbdad660b459572843f873abce
|
[
"Apache-2.0"
] | null | null | null |
common/webapp/views/misc_views.py
|
jisantuc/labeller
|
11c7738d43b860fbdad660b459572843f873abce
|
[
"Apache-2.0"
] | 2
|
2021-12-03T17:49:49.000Z
|
2022-03-21T17:05:06.000Z
|
# Copyright 2014 SolidBuilds.com. All rights reserved #
# Authors: Ling Thio <ling.thio@gmail.com>
from datetime import datetime
from flask import current_app, flash
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted
from flask_user.views import _get_safe_next_param, render, _send_registered_email, _endpoint_url, _do_login_user
from flask_user import signals
from webapp import db
from webapp.models.user_models import User, Role, AdminRegisterForm, EmployerRegisterForm, EmployeeRegisterForm
from webapp.models.user_models import AdminProfileForm, EmployerProfileForm, EmployeeProfileForm, SuspendUserForm
from webapp.models.user_models import TrainingVideoForm
from MappingCommon import MappingCommon
# When using a Flask app factory we must use a blueprint to avoid needing 'app' for '@app.route'
main_blueprint = Blueprint('main', __name__, template_folder='templates')
@main_blueprint.route('/')
def base_page():
return redirect(url_for('main.home_page'))
# The Home page is accessible to anyone
@main_blueprint.route('/home')
def home_page():
return render_template('pages/home_page.html')
# ----------------------------------------------------------------
# The Administrator page is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin')
@roles_accepted('admin')
@login_required
def admin_page():
return render_template('pages/admin_page.html')
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/list_admins_employers')
@roles_accepted('admin')
@login_required
def list_admins_employers():
# Get all users that are admins or employers.
users = User.query.filter(User.roles.any((Role.name=='admin') | (Role.name=='employer'))).all()
admin_list = []
employer_list = []
for user in users:
if user.get_roles_string() == 'admin':
admin_list.append((user.last_name, user.first_name, user.email))
elif user.get_roles_string() == 'employer':
employer_list.append((user.company_name, user.last_name, user.first_name, user.email))
admin_list.sort()
employer_list.sort()
return render_template('pages/list_admins_employers_page.html', admin_list=admin_list, employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role.
@main_blueprint.route('/employer/list_employees_by_admin')
@roles_accepted('admin')
@login_required
def list_employees_by_admin():
# Get all users that are employers.
employers = User.query.filter(User.roles.any(Role.name=='employer')).all()
employer_list = []
for employer in employers:
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == employer.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer_list.append((employer.company_name, employee_list))
employer_list.sort()
return render_template('pages/list_employees_by_admin_page.html', employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/admin_employer_invite')
@roles_accepted('admin')
@login_required
def admin_employer_invite():
return redirect(url_for('user.invite'))
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/suspend_admin_employer_employee', methods=['GET', 'POST'])
@roles_accepted('admin')
@login_required
def suspend_admin_employer_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter(User.email == email).first()
if not user:
flash("No such user", "error")
return redirect(url_for('main.suspend_admin_employer_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('User has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_admin_employer_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employer page is accessible to authenticated users with the 'employer' or 'admin' role.
@main_blueprint.route('/employer')
@roles_accepted('employer', 'admin')
@login_required
def employer_page():
return render_template('pages/employer_page.html')
# The Employer submenu is accessible to authenticated users with the 'employer' role.
@main_blueprint.route('/employer/list_employees_by employer')
@roles_accepted('employer')
@login_required
def list_employees_by_employer():
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == current_user.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer = User.query.filter(User.id == current_user.id).first()
return render_template('pages/list_employees_by_employer_page.html', company_name=employer.company_name, employee_list=employee_list)
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/employee_invite')
@roles_accepted('employer')
@login_required
def employee_invite():
return redirect(url_for('user.invite'))
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/suspend_employee', methods=['GET', 'POST'])
@roles_accepted('employer')
@login_required
def suspend_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter((User.email == email) & (User.invited_by == current_user.id)).first()
if not user:
flash("No such employee", "error")
return redirect(url_for('main.suspend_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('Employee has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employee page is accessible to authenticated users with the 'employee' or 'admin' role.
@main_blueprint.route('/employee')
@roles_accepted('employee', 'admin')
@login_required # Limits access to authenticated users
def employee_page():
return render_template('pages/employee_page.html')
# The Employee submenu is accessible to authenticated users with the 'employee' role
@main_blueprint.route('/employee/training')
@roles_accepted('employee')
@login_required # Limits access to authenticated users
def training():
trainingForm = TrainingVideoForm(request.form)
mapc = MappingCommon()
# Read configuration parameters.
videoUrl = mapc.getConfiguration('VideoUrl')
introVideo = mapc.getConfiguration('QualTest_IntroVideo')
introWidth = mapc.getConfiguration('QualTest_IntroVideoWidth')
introHeight = mapc.getConfiguration('QualTest_IntroVideoHeight')
instructionalVideo = mapc.getConfiguration('QualTest_InstructionalVideo')
instructionalWidth = mapc.getConfiguration('QualTest_InstructionalVideoWidth')
instructionalHeight = mapc.getConfiguration('QualTest_InstructionalVideoHeight')
introUrl = "%s/%s" % (videoUrl, introVideo)
instructionalUrl = "%s/%s" % (videoUrl, instructionalVideo)
# Load up the training form.
trainingForm.introUrl.data = introUrl
trainingForm.introWidth.data = introWidth
trainingForm.introHeight.data = introHeight
trainingForm.instructionalUrl.data = instructionalUrl
trainingForm.instructionalWidth.data = instructionalWidth
trainingForm.instructionalHeight.data = instructionalHeight
return render_template('pages/training_page.html', form=trainingForm)
# ----------------------------------------------------------------
# The registration page is accessible to all users by invitation only.
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite is None:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
if user_invite.role == 'admin':
register_form = AdminRegisterForm(request.form)
elif user_invite.role == 'employer':
register_form = EmployerRegisterForm(request.form)
elif user_invite.role == 'employee':
register_form = EmployeeRegisterForm(request.form)
if user_invite:
register_form.invite_token.data = invite_token
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
if hasattr(db_adapter.UserInvitationClass, 'role'):
register_form.role.data = user_invite.role
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
Role = db_adapter.RoleClass
role_class_fields = Role.__dict__
role_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
role = None
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
elif field_name == 'role':
role = Role.query.filter(Role.name == field_value).first()
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
if user_invite:
user_fields['invited_by'] = user_invite.invited_by
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if (role):
user.roles.append(role)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
# Clear token so invite can only be used once.
user_invite.token = None
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
# Auto-login after register or redirect to login page
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
return _do_login_user(user, safe_reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_reg_next)) # redirect to login page
# Process GET or invalid POST
return render(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
# ----------------------------------------------------------------
@main_blueprint.route('/user/profile', methods=['GET', 'POST'])
@login_required
def user_profile():
# Initialize form
if current_user.has_role('admin'):
form = AdminProfileForm(request.form)
elif current_user.has_role('employer'):
form = EmployerProfileForm(request.form)
elif current_user.has_role('employee'):
form = EmployeeProfileForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to user_profile page
return redirect(url_for('main.user_profile'))
# Process GET or invalid POST
return render_template('pages/user_profile_page.html', form=form)
# ----------------------------------------------------------------
@main_blueprint.route('/select_role_page')
@login_required
def select_role_page():
if current_user.has_role('admin'):
return redirect(url_for('main.admin_page'))
elif current_user.has_role('employer'):
return redirect(url_for('main.employer_page'))
elif current_user.has_role('employee'):
return redirect(url_for('main.employee_page'))
return redirect(url_for('main.home_page'))
| 41.617849
| 137
| 0.675647
|
from datetime import datetime
from flask import current_app, flash
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted
from flask_user.views import _get_safe_next_param, render, _send_registered_email, _endpoint_url, _do_login_user
from flask_user import signals
from webapp import db
from webapp.models.user_models import User, Role, AdminRegisterForm, EmployerRegisterForm, EmployeeRegisterForm
from webapp.models.user_models import AdminProfileForm, EmployerProfileForm, EmployeeProfileForm, SuspendUserForm
from webapp.models.user_models import TrainingVideoForm
from MappingCommon import MappingCommon
main_blueprint = Blueprint('main', __name__, template_folder='templates')
@main_blueprint.route('/')
def base_page():
return redirect(url_for('main.home_page'))
@main_blueprint.route('/home')
def home_page():
return render_template('pages/home_page.html')
@main_blueprint.route('/admin')
@roles_accepted('admin')
@login_required
def admin_page():
return render_template('pages/admin_page.html')
@main_blueprint.route('/admin/list_admins_employers')
@roles_accepted('admin')
@login_required
def list_admins_employers():
users = User.query.filter(User.roles.any((Role.name=='admin') | (Role.name=='employer'))).all()
admin_list = []
employer_list = []
for user in users:
if user.get_roles_string() == 'admin':
admin_list.append((user.last_name, user.first_name, user.email))
elif user.get_roles_string() == 'employer':
employer_list.append((user.company_name, user.last_name, user.first_name, user.email))
admin_list.sort()
employer_list.sort()
return render_template('pages/list_admins_employers_page.html', admin_list=admin_list, employer_list=employer_list)
@main_blueprint.route('/employer/list_employees_by_admin')
@roles_accepted('admin')
@login_required
def list_employees_by_admin():
employers = User.query.filter(User.roles.any(Role.name=='employer')).all()
employer_list = []
for employer in employers:
users = User.query.filter(User.invited_by == employer.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer_list.append((employer.company_name, employee_list))
employer_list.sort()
return render_template('pages/list_employees_by_admin_page.html', employer_list=employer_list)
@main_blueprint.route('/admin/admin_employer_invite')
@roles_accepted('admin')
@login_required
def admin_employer_invite():
return redirect(url_for('user.invite'))
@main_blueprint.route('/admin/suspend_admin_employer_employee', methods=['GET', 'POST'])
@roles_accepted('admin')
@login_required
def suspend_admin_employer_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data
user = User.query.filter(User.email == email).first()
if not user:
flash("No such user", "error")
return redirect(url_for('main.suspend_admin_employer_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
db_adapter.commit()
flash('User has been successfully ' + verb, 'success')
return render_template('pages/suspend_admin_employer_employee_page.html', form=form)
@main_blueprint.route('/employer')
@roles_accepted('employer', 'admin')
@login_required
def employer_page():
return render_template('pages/employer_page.html')
@main_blueprint.route('/employer/list_employees_by employer')
@roles_accepted('employer')
@login_required
def list_employees_by_employer():
users = User.query.filter(User.invited_by == current_user.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer = User.query.filter(User.id == current_user.id).first()
return render_template('pages/list_employees_by_employer_page.html', company_name=employer.company_name, employee_list=employee_list)
@main_blueprint.route('/employer/employee_invite')
@roles_accepted('employer')
@login_required
def employee_invite():
return redirect(url_for('user.invite'))
@main_blueprint.route('/employer/suspend_employee', methods=['GET', 'POST'])
@roles_accepted('employer')
@login_required
def suspend_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data
user = User.query.filter((User.email == email) & (User.invited_by == current_user.id)).first()
if not user:
flash("No such employee", "error")
return redirect(url_for('main.suspend_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
db_adapter.commit()
flash('Employee has been successfully ' + verb, 'success')
return render_template('pages/suspend_employee_page.html', form=form)
@main_blueprint.route('/employee')
@roles_accepted('employee', 'admin')
@login_required
def employee_page():
return render_template('pages/employee_page.html')
@main_blueprint.route('/employee/training')
@roles_accepted('employee')
@login_required
def training():
trainingForm = TrainingVideoForm(request.form)
mapc = MappingCommon()
videoUrl = mapc.getConfiguration('VideoUrl')
introVideo = mapc.getConfiguration('QualTest_IntroVideo')
introWidth = mapc.getConfiguration('QualTest_IntroVideoWidth')
introHeight = mapc.getConfiguration('QualTest_IntroVideoHeight')
instructionalVideo = mapc.getConfiguration('QualTest_InstructionalVideo')
instructionalWidth = mapc.getConfiguration('QualTest_InstructionalVideoWidth')
instructionalHeight = mapc.getConfiguration('QualTest_InstructionalVideoHeight')
introUrl = "%s/%s" % (videoUrl, introVideo)
instructionalUrl = "%s/%s" % (videoUrl, instructionalVideo)
trainingForm.introUrl.data = introUrl
trainingForm.introWidth.data = introWidth
trainingForm.introHeight.data = introHeight
trainingForm.instructionalUrl.data = instructionalUrl
trainingForm.instructionalWidth.data = instructionalWidth
trainingForm.instructionalHeight.data = instructionalHeight
return render_template('pages/training_page.html', form=trainingForm)
def register():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
invite_token = request.values.get("token")
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite is None:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
login_form = user_manager.login_form()
if user_invite.role == 'admin':
register_form = AdminRegisterForm(request.form)
elif user_invite.role == 'employer':
register_form = EmployerRegisterForm(request.form)
elif user_invite.role == 'employee':
register_form = EmployeeRegisterForm(request.form)
if user_invite:
register_form.invite_token.data = invite_token
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
if hasattr(db_adapter.UserInvitationClass, 'role'):
register_form.role.data = user_invite.role
if request.method=='POST' and register_form.validate():
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
Role = db_adapter.RoleClass
role_class_fields = Role.__dict__
role_fields = {}
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
role = None
for field_name, field_value in register_form.data.items():
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
elif field_name == 'role':
role = Role.query.filter(Role.name == field_value).first()
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
if user_invite:
user_fields['invited_by'] = user_invite.invited_by
user = db_adapter.add_object(User, **user_fields)
if (role):
user.roles.append(role)
if db_adapter.UserProfileClass:
user_profile = user
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
user_invite.token = None
db_adapter.commit()
if user_manager.send_registered_email:
try:
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
db_adapter.delete_object(user)
db_adapter.commit()
raise
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
return _do_login_user(user, safe_reg_next)
else:
return redirect(url_for('user.login')+'?next='+quote(safe_reg_next))
return render(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@main_blueprint.route('/user/profile', methods=['GET', 'POST'])
@login_required
def user_profile():
if current_user.has_role('admin'):
form = AdminProfileForm(request.form)
elif current_user.has_role('employer'):
form = EmployerProfileForm(request.form)
elif current_user.has_role('employee'):
form = EmployeeProfileForm(request.form)
if request.method == 'POST' and form.validate():
form.populate_obj(current_user)
db.session.commit()
return redirect(url_for('main.user_profile'))
return render_template('pages/user_profile_page.html', form=form)
@main_blueprint.route('/select_role_page')
@login_required
def select_role_page():
if current_user.has_role('admin'):
return redirect(url_for('main.admin_page'))
elif current_user.has_role('employer'):
return redirect(url_for('main.employer_page'))
elif current_user.has_role('employee'):
return redirect(url_for('main.employee_page'))
return redirect(url_for('main.home_page'))
| true
| true
|
f70b95eb5cf834f80422d263ac7df828a5ca831d
| 73
|
py
|
Python
|
__init__.py
|
dshatz/unionfind
|
4c1f76b344e126ec9f08c5c992a34434ce1150ee
|
[
"MIT"
] | 51
|
2017-06-07T16:44:52.000Z
|
2022-02-12T21:49:18.000Z
|
__init__.py
|
dshatz/unionfind
|
4c1f76b344e126ec9f08c5c992a34434ce1150ee
|
[
"MIT"
] | 3
|
2018-06-14T04:04:05.000Z
|
2021-10-07T18:55:21.000Z
|
__init__.py
|
dshatz/unionfind
|
4c1f76b344e126ec9f08c5c992a34434ce1150ee
|
[
"MIT"
] | 26
|
2018-03-23T18:42:05.000Z
|
2021-09-07T11:29:11.000Z
|
"""
UnionFind disjoint sets data structure.
"""
from . import unionfind
| 12.166667
| 39
| 0.726027
|
from . import unionfind
| true
| true
|
f70b965627fb06acd42bbdb804a082cfe0104a24
| 280
|
py
|
Python
|
core/task/__init__.py
|
HyokaChen/DailyNewsSpider
|
ea70c69fb4cf10130a45e00a148246525571c013
|
[
"MIT"
] | 10
|
2020-07-30T14:46:43.000Z
|
2021-11-16T12:04:01.000Z
|
core/task/__init__.py
|
HyokaChen/DailyNewsSpider
|
ea70c69fb4cf10130a45e00a148246525571c013
|
[
"MIT"
] | null | null | null |
core/task/__init__.py
|
HyokaChen/DailyNewsSpider
|
ea70c69fb4cf10130a45e00a148246525571c013
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : __init__.py.py
@Time : 2020/3/27 22:36
@Author : Empty Chan
@Contact : chen19941018@gmail.com
@Description:
@License : (C) Copyright 2016-2020, iFuture Corporation Limited.
"""
from . import *
| 21.538462
| 68
| 0.603571
|
from . import *
| true
| true
|
f70b99540978fb2f332b286d95f514e1c656d922
| 8,634
|
py
|
Python
|
tests/test_myplex.py
|
jjlawren/python-plexapi
|
9f9e2350006da3c613a71f5bdee07d2c1181d89f
|
[
"BSD-3-Clause"
] | 1
|
2022-01-15T19:02:33.000Z
|
2022-01-15T19:02:33.000Z
|
tests/test_myplex.py
|
jjlawren/python-plexapi
|
9f9e2350006da3c613a71f5bdee07d2c1181d89f
|
[
"BSD-3-Clause"
] | 43
|
2021-07-27T01:31:21.000Z
|
2022-03-30T11:20:55.000Z
|
tests/test_myplex.py
|
Montellese/python-plexapi
|
bd7de4281ec599e096c8991bbd1e583a0d196d8d
|
[
"BSD-3-Clause"
] | 2
|
2020-09-08T21:09:26.000Z
|
2020-09-08T21:44:02.000Z
|
# -*- coding: utf-8 -*-
import pytest
from plexapi.exceptions import BadRequest, NotFound
from . import conftest as utils
def test_myplex_accounts(account, plex):
assert account, "Must specify username, password & resource to run this test."
print("MyPlexAccount:")
print("username: %s" % account.username)
print("email: %s" % account.email)
print("home: %s" % account.home)
print("queueEmail: %s" % account.queueEmail)
assert account.username, "Account has no username"
assert account.authenticationToken, "Account has no authenticationToken"
assert account.email, "Account has no email"
assert account.home is not None, "Account has no home"
assert account.queueEmail, "Account has no queueEmail"
account = plex.account()
print("Local PlexServer.account():")
print("username: %s" % account.username)
# print('authToken: %s' % account.authToken)
print("signInState: %s" % account.signInState)
assert account.username, "Account has no username"
assert account.authToken, "Account has no authToken"
assert account.signInState, "Account has no signInState"
def test_myplex_resources(account):
assert account, "Must specify username, password & resource to run this test."
resources = account.resources()
for resource in resources:
name = resource.name or "Unknown"
connections = [c.uri for c in resource.connections]
connections = ", ".join(connections) if connections else "None"
print("%s (%s): %s" % (name, resource.product, connections))
assert resources, "No resources found for account: %s" % account.name
def test_myplex_connect_to_resource(plex, account):
servername = plex.friendlyName
for resource in account.resources():
if resource.name == servername:
break
assert resource.connect(timeout=10)
def test_myplex_devices(account):
devices = account.devices()
for device in devices:
name = device.name or "Unknown"
connections = ", ".join(device.connections) if device.connections else "None"
print("%s (%s): %s" % (name, device.product, connections))
assert devices, "No devices found for account: %s" % account.name
def test_myplex_device(account, plex):
assert account.device(plex.friendlyName)
def _test_myplex_connect_to_device(account):
devices = account.devices()
for device in devices:
if device.name == "some client name" and len(device.connections):
break
client = device.connect()
assert client, "Unable to connect to device"
def test_myplex_users(account):
users = account.users()
if not len(users):
return pytest.skip("You have to add a shared account into your MyPlex")
print("Found %s users." % len(users))
user = account.user(users[0].title)
print("Found user: %s" % user)
assert user, "Could not find user %s" % users[0].title
assert (
len(users[0].servers[0].sections()) > 0
), "Couldn't info about the shared libraries"
def test_myplex_resource(account, plex):
assert account.resource(plex.friendlyName)
def test_myplex_webhooks(account):
if account.subscriptionActive:
assert isinstance(account.webhooks(), list)
else:
with pytest.raises(BadRequest):
account.webhooks()
def test_myplex_addwebhooks(account):
if account.subscriptionActive:
assert "http://example.com" in account.addWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.addWebhook("http://example.com")
def test_myplex_deletewebhooks(account):
if account.subscriptionActive:
assert "http://example.com" not in account.deleteWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.deleteWebhook("http://example.com")
def test_myplex_optout(account_once):
def enabled():
ele = account_once.query("https://plex.tv/api/v2/user/privacy")
lib = ele.attrib.get("optOutLibraryStats")
play = ele.attrib.get("optOutPlayback")
return bool(int(lib)), bool(int(play))
account_once.optOut(library=True, playback=True)
utils.wait_until(lambda: enabled() == (True, True))
account_once.optOut(library=False, playback=False)
utils.wait_until(lambda: enabled() == (False, False))
@pytest.mark.authenticated
@pytest.mark.xfail(reason="Test account is missing online media sources?")
def test_myplex_onlineMediaSources_optOut(account):
onlineMediaSources = account.onlineMediaSources()
for optOut in onlineMediaSources:
if optOut.key == 'tv.plex.provider.news':
# News is no longer available
continue
optOutValue = optOut.value
optOut.optIn()
assert optOut.value == 'opt_in'
optOut.optOut()
assert optOut.value == 'opt_out'
if optOut.key == 'tv.plex.provider.music':
with pytest.raises(BadRequest):
optOut.optOutManaged()
else:
optOut.optOutManaged()
assert optOut.value == 'opt_out_managed'
# Reset original value
optOut._updateOptOut(optOutValue)
with pytest.raises(NotFound):
onlineMediaSources[0]._updateOptOut('unknown')
def test_myplex_inviteFriend_remove(account, plex, mocker):
inv_user = "hellowlol"
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
with utils.callable_http_patch():
account.inviteFriend(
inv_user,
plex,
secs,
allowSync=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
assert inv_user not in [u.title for u in account.users()]
with pytest.raises(NotFound):
with utils.callable_http_patch():
account.removeFriend(inv_user)
def test_myplex_updateFriend(account, plex, mocker, shared_username):
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
user = account.user(shared_username)
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
mocker.patch.object(account, "user", return_value=user)
with utils.callable_http_patch():
account.updateFriend(
shared_username,
plex,
secs,
allowSync=True,
removeSections=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
def test_myplex_createExistingUser(account, plex, shared_username):
user = account.user(shared_username)
url = "https://plex.tv/api/invites/requested/{}?friend=0&server=0&home=1".format(
user.id
)
account.createExistingUser(user, plex)
assert shared_username in [u.username for u in account.users() if u.home is True]
# Remove Home invite
account.query(url, account._session.delete)
# Confirm user was removed from home and has returned to friend
assert shared_username not in [
u.username for u in plex.myPlexAccount().users() if u.home is True
]
assert shared_username in [
u.username for u in plex.myPlexAccount().users() if u.home is False
]
@pytest.mark.skip(reason="broken test?")
def test_myplex_createHomeUser_remove(account, plex):
homeuser = "New Home User"
account.createHomeUser(homeuser, plex)
assert homeuser in [u.title for u in plex.myPlexAccount().users() if u.home is True]
account.removeHomeUser(homeuser)
assert homeuser not in [
u.title for u in plex.myPlexAccount().users() if u.home is True
]
def test_myplex_plexpass_attributes(account_plexpass):
assert account_plexpass.subscriptionActive
assert account_plexpass.subscriptionStatus == "Active"
assert account_plexpass.subscriptionPlan
assert "sync" in account_plexpass.subscriptionFeatures
assert "premium_music_metadata" in account_plexpass.subscriptionFeatures
assert "plexpass" in account_plexpass.roles
assert utils.ENTITLEMENTS <= set(account_plexpass.entitlements)
def test_myplex_claimToken(account):
assert account.claimToken().startswith("claim-")
| 35.097561
| 88
| 0.677322
|
import pytest
from plexapi.exceptions import BadRequest, NotFound
from . import conftest as utils
def test_myplex_accounts(account, plex):
assert account, "Must specify username, password & resource to run this test."
print("MyPlexAccount:")
print("username: %s" % account.username)
print("email: %s" % account.email)
print("home: %s" % account.home)
print("queueEmail: %s" % account.queueEmail)
assert account.username, "Account has no username"
assert account.authenticationToken, "Account has no authenticationToken"
assert account.email, "Account has no email"
assert account.home is not None, "Account has no home"
assert account.queueEmail, "Account has no queueEmail"
account = plex.account()
print("Local PlexServer.account():")
print("username: %s" % account.username)
print("signInState: %s" % account.signInState)
assert account.username, "Account has no username"
assert account.authToken, "Account has no authToken"
assert account.signInState, "Account has no signInState"
def test_myplex_resources(account):
assert account, "Must specify username, password & resource to run this test."
resources = account.resources()
for resource in resources:
name = resource.name or "Unknown"
connections = [c.uri for c in resource.connections]
connections = ", ".join(connections) if connections else "None"
print("%s (%s): %s" % (name, resource.product, connections))
assert resources, "No resources found for account: %s" % account.name
def test_myplex_connect_to_resource(plex, account):
servername = plex.friendlyName
for resource in account.resources():
if resource.name == servername:
break
assert resource.connect(timeout=10)
def test_myplex_devices(account):
devices = account.devices()
for device in devices:
name = device.name or "Unknown"
connections = ", ".join(device.connections) if device.connections else "None"
print("%s (%s): %s" % (name, device.product, connections))
assert devices, "No devices found for account: %s" % account.name
def test_myplex_device(account, plex):
assert account.device(plex.friendlyName)
def _test_myplex_connect_to_device(account):
devices = account.devices()
for device in devices:
if device.name == "some client name" and len(device.connections):
break
client = device.connect()
assert client, "Unable to connect to device"
def test_myplex_users(account):
users = account.users()
if not len(users):
return pytest.skip("You have to add a shared account into your MyPlex")
print("Found %s users." % len(users))
user = account.user(users[0].title)
print("Found user: %s" % user)
assert user, "Could not find user %s" % users[0].title
assert (
len(users[0].servers[0].sections()) > 0
), "Couldn't info about the shared libraries"
def test_myplex_resource(account, plex):
assert account.resource(plex.friendlyName)
def test_myplex_webhooks(account):
if account.subscriptionActive:
assert isinstance(account.webhooks(), list)
else:
with pytest.raises(BadRequest):
account.webhooks()
def test_myplex_addwebhooks(account):
if account.subscriptionActive:
assert "http://example.com" in account.addWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.addWebhook("http://example.com")
def test_myplex_deletewebhooks(account):
if account.subscriptionActive:
assert "http://example.com" not in account.deleteWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.deleteWebhook("http://example.com")
def test_myplex_optout(account_once):
def enabled():
ele = account_once.query("https://plex.tv/api/v2/user/privacy")
lib = ele.attrib.get("optOutLibraryStats")
play = ele.attrib.get("optOutPlayback")
return bool(int(lib)), bool(int(play))
account_once.optOut(library=True, playback=True)
utils.wait_until(lambda: enabled() == (True, True))
account_once.optOut(library=False, playback=False)
utils.wait_until(lambda: enabled() == (False, False))
@pytest.mark.authenticated
@pytest.mark.xfail(reason="Test account is missing online media sources?")
def test_myplex_onlineMediaSources_optOut(account):
onlineMediaSources = account.onlineMediaSources()
for optOut in onlineMediaSources:
if optOut.key == 'tv.plex.provider.news':
# News is no longer available
continue
optOutValue = optOut.value
optOut.optIn()
assert optOut.value == 'opt_in'
optOut.optOut()
assert optOut.value == 'opt_out'
if optOut.key == 'tv.plex.provider.music':
with pytest.raises(BadRequest):
optOut.optOutManaged()
else:
optOut.optOutManaged()
assert optOut.value == 'opt_out_managed'
# Reset original value
optOut._updateOptOut(optOutValue)
with pytest.raises(NotFound):
onlineMediaSources[0]._updateOptOut('unknown')
def test_myplex_inviteFriend_remove(account, plex, mocker):
inv_user = "hellowlol"
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
with utils.callable_http_patch():
account.inviteFriend(
inv_user,
plex,
secs,
allowSync=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
assert inv_user not in [u.title for u in account.users()]
with pytest.raises(NotFound):
with utils.callable_http_patch():
account.removeFriend(inv_user)
def test_myplex_updateFriend(account, plex, mocker, shared_username):
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
user = account.user(shared_username)
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
mocker.patch.object(account, "user", return_value=user)
with utils.callable_http_patch():
account.updateFriend(
shared_username,
plex,
secs,
allowSync=True,
removeSections=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
def test_myplex_createExistingUser(account, plex, shared_username):
user = account.user(shared_username)
url = "https://plex.tv/api/invites/requested/{}?friend=0&server=0&home=1".format(
user.id
)
account.createExistingUser(user, plex)
assert shared_username in [u.username for u in account.users() if u.home is True]
# Remove Home invite
account.query(url, account._session.delete)
# Confirm user was removed from home and has returned to friend
assert shared_username not in [
u.username for u in plex.myPlexAccount().users() if u.home is True
]
assert shared_username in [
u.username for u in plex.myPlexAccount().users() if u.home is False
]
@pytest.mark.skip(reason="broken test?")
def test_myplex_createHomeUser_remove(account, plex):
homeuser = "New Home User"
account.createHomeUser(homeuser, plex)
assert homeuser in [u.title for u in plex.myPlexAccount().users() if u.home is True]
account.removeHomeUser(homeuser)
assert homeuser not in [
u.title for u in plex.myPlexAccount().users() if u.home is True
]
def test_myplex_plexpass_attributes(account_plexpass):
assert account_plexpass.subscriptionActive
assert account_plexpass.subscriptionStatus == "Active"
assert account_plexpass.subscriptionPlan
assert "sync" in account_plexpass.subscriptionFeatures
assert "premium_music_metadata" in account_plexpass.subscriptionFeatures
assert "plexpass" in account_plexpass.roles
assert utils.ENTITLEMENTS <= set(account_plexpass.entitlements)
def test_myplex_claimToken(account):
assert account.claimToken().startswith("claim-")
| true
| true
|
f70b9a2e490b150981301c9d54d99efeb3e5f99f
| 1,970
|
py
|
Python
|
app/service/file_svc.py
|
FumblingBear/caldera
|
adef51b27ac04ab21bab33a3c988965ce69fb0f3
|
[
"Apache-2.0"
] | null | null | null |
app/service/file_svc.py
|
FumblingBear/caldera
|
adef51b27ac04ab21bab33a3c988965ce69fb0f3
|
[
"Apache-2.0"
] | null | null | null |
app/service/file_svc.py
|
FumblingBear/caldera
|
adef51b27ac04ab21bab33a3c988965ce69fb0f3
|
[
"Apache-2.0"
] | null | null | null |
import os
from aiohttp import web
from app.utility.logger import Logger
class FileSvc:
def __init__(self, payload_dirs, exfil_dir):
self.payload_dirs = payload_dirs
self.log = Logger('file_svc')
self.exfil_dir = exfil_dir
async def download(self, request):
name = request.headers.get('file')
file_path, headers = await self.find_file(name)
if file_path:
self.log.debug('downloading %s...' % name)
return web.FileResponse(path=file_path, headers=headers)
return web.HTTPNotFound(body='File not found')
async def find_file(self, name):
for store in self.payload_dirs:
for root, dirs, files in os.walk(store):
if name in files:
headers = dict([('CONTENT-DISPOSITION', 'attachment; filename="%s"' % name)])
return os.path.join(root, name), headers
return None, None
async def upload(self, request):
try:
reader = await request.multipart()
exfil_dir = await self._create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field:
break
filename = field.filename
with open(os.path.join(exfil_dir, filename), 'wb') as f:
while True:
chunk = await field.read_chunk()
if not chunk:
break
f.write(chunk)
self.log.debug('Uploaded file %s' % filename)
return web.Response()
except Exception as e:
self.log.debug('Exception uploading file %s' % e)
""" PRIVATE """
async def _create_unique_exfil_sub_directory(self):
dir_name = str(uuid.uuid4())
path = os.path.join(self.exfil_dir, dir_name)
os.makedirs(path)
return path
| 33.965517
| 97
| 0.553807
|
import os
from aiohttp import web
from app.utility.logger import Logger
class FileSvc:
def __init__(self, payload_dirs, exfil_dir):
self.payload_dirs = payload_dirs
self.log = Logger('file_svc')
self.exfil_dir = exfil_dir
async def download(self, request):
name = request.headers.get('file')
file_path, headers = await self.find_file(name)
if file_path:
self.log.debug('downloading %s...' % name)
return web.FileResponse(path=file_path, headers=headers)
return web.HTTPNotFound(body='File not found')
async def find_file(self, name):
for store in self.payload_dirs:
for root, dirs, files in os.walk(store):
if name in files:
headers = dict([('CONTENT-DISPOSITION', 'attachment; filename="%s"' % name)])
return os.path.join(root, name), headers
return None, None
async def upload(self, request):
try:
reader = await request.multipart()
exfil_dir = await self._create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field:
break
filename = field.filename
with open(os.path.join(exfil_dir, filename), 'wb') as f:
while True:
chunk = await field.read_chunk()
if not chunk:
break
f.write(chunk)
self.log.debug('Uploaded file %s' % filename)
return web.Response()
except Exception as e:
self.log.debug('Exception uploading file %s' % e)
async def _create_unique_exfil_sub_directory(self):
dir_name = str(uuid.uuid4())
path = os.path.join(self.exfil_dir, dir_name)
os.makedirs(path)
return path
| true
| true
|
f70b9c2b4ad81820ced65c41979ef8e1756fe72a
| 995
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/layers/protocol_acks/protocolentities/ack.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_acks/protocolentities/ack.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_acks/protocolentities/ack.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class AckProtocolEntity(ProtocolEntity):
'''
<ack class="{{receipt | message | ?}}" id="{{message_id}}">
</ack>
'''
def __init__(self, _id, _class):
super(AckProtocolEntity, self).__init__("ack")
self._id = _id
self._class = _class
def getId(self):
return self._id
def getClass(self):
return self._class
def toProtocolTreeNode(self):
attribs = {
"id" : self._id,
"class" : self._class,
}
return self._createProtocolTreeNode(attribs, None, data = None)
def __str__(self):
out = "ACK:\n"
out += "ID: %s\n" % self._id
out += "Class: %s\n" % self._class
return out
@staticmethod
def fromProtocolTreeNode(node):
return AckProtocolEntity(
node.getAttributeValue("id"),
node.getAttributeValue("class")
)
| 24.875
| 71
| 0.554774
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class AckProtocolEntity(ProtocolEntity):
def __init__(self, _id, _class):
super(AckProtocolEntity, self).__init__("ack")
self._id = _id
self._class = _class
def getId(self):
return self._id
def getClass(self):
return self._class
def toProtocolTreeNode(self):
attribs = {
"id" : self._id,
"class" : self._class,
}
return self._createProtocolTreeNode(attribs, None, data = None)
def __str__(self):
out = "ACK:\n"
out += "ID: %s\n" % self._id
out += "Class: %s\n" % self._class
return out
@staticmethod
def fromProtocolTreeNode(node):
return AckProtocolEntity(
node.getAttributeValue("id"),
node.getAttributeValue("class")
)
| true
| true
|
f70b9ca9a7409b6984913947757c73688e40b12c
| 775
|
py
|
Python
|
HW01/sha256bruteforce.py
|
ideaPeng/UW-Madison-CS642
|
ae4ce979f9bd55a1807a0809ec84ccb679e71d5c
|
[
"MIT"
] | null | null | null |
HW01/sha256bruteforce.py
|
ideaPeng/UW-Madison-CS642
|
ae4ce979f9bd55a1807a0809ec84ccb679e71d5c
|
[
"MIT"
] | null | null | null |
HW01/sha256bruteforce.py
|
ideaPeng/UW-Madison-CS642
|
ae4ce979f9bd55a1807a0809ec84ccb679e71d5c
|
[
"MIT"
] | 1
|
2021-02-23T03:29:11.000Z
|
2021-02-23T03:29:11.000Z
|
#!/usr/bin/env python3
import hashlib
def main():
print(hashlib.sha256("hugh,13145820,20193833".encode("ascii")).hexdigest())
# 13145820
guess_flag = True
digits = 1
while guess_flag:
bound = 10**digits
guess = 0
while guess < bound:
guess_str = ("hugh,{:0" + str(digits) +
"d},20193833").format(guess)
print(guess_str, end='\r')
result = hashlib.sha256(guess_str.encode("ascii")).hexdigest()
if result == "ee688ca24c201a27fcc94ebd46e87ae6a7c4f54b445fccfc0727a70332353f7f":
print("Right! %s" % guess)
guess_flag = False
break
guess += 1
digits += 1
if __name__ == "__main__":
main()
| 28.703704
| 92
| 0.547097
|
import hashlib
def main():
print(hashlib.sha256("hugh,13145820,20193833".encode("ascii")).hexdigest())
guess_flag = True
digits = 1
while guess_flag:
bound = 10**digits
guess = 0
while guess < bound:
guess_str = ("hugh,{:0" + str(digits) +
"d},20193833").format(guess)
print(guess_str, end='\r')
result = hashlib.sha256(guess_str.encode("ascii")).hexdigest()
if result == "ee688ca24c201a27fcc94ebd46e87ae6a7c4f54b445fccfc0727a70332353f7f":
print("Right! %s" % guess)
guess_flag = False
break
guess += 1
digits += 1
if __name__ == "__main__":
main()
| true
| true
|
f70b9dfdf88a8fb2039774b40ccfcf8d12c02620
| 6,479
|
py
|
Python
|
turbinia/workers/analysis/jenkins.py
|
giovannt0/turbinia
|
6733eea42ba3a2442c49aaf933656ace45bd20e1
|
[
"Apache-2.0"
] | 1
|
2021-01-21T19:53:33.000Z
|
2021-01-21T19:53:33.000Z
|
turbinia/workers/analysis/jenkins.py
|
joachimmetz/turbinia
|
f69b34b7da72d9f9eb0d0c4a11e2b8d5443faab8
|
[
"Apache-2.0"
] | null | null | null |
turbinia/workers/analysis/jenkins.py
|
joachimmetz/turbinia
|
f69b34b7da72d9f9eb0d0c4a11e2b8d5443faab8
|
[
"Apache-2.0"
] | 1
|
2019-10-31T10:16:08.000Z
|
2019-10-31T10:16:08.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for analysing Jenkins."""
from __future__ import unicode_literals
import os
import re
from turbinia import TurbiniaException
from turbinia.evidence import ReportText
from turbinia.lib import text_formatter as fmt
from turbinia.workers import TurbiniaTask
from turbinia.workers import Priority
from turbinia.lib.utils import extract_files
from turbinia.lib.utils import bruteforce_password_hashes
class JenkinsAnalysisTask(TurbiniaTask):
"""Task to analyze a Jenkins install."""
def run(self, evidence, result):
"""Run the Jenkins worker.
Args:
evidence (Evidence object): The evidence to process
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
# Where to store the resulting output file.
output_file_name = 'jenkins_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)
# What type of evidence we should output.
output_evidence = ReportText(source_path=output_file_path)
# TODO(aarontp): We should find a more optimal solution for this because
# this requires traversing the entire filesystem and extracting more files
# than we need. Tracked in https://github.com/google/turbinia/issues/402
try:
collected_artifacts = extract_files(
file_name='config.xml', disk_path=evidence.local_path,
output_dir=os.path.join(self.output_dir, 'artifacts'))
except TurbiniaException as e:
result.close(self, success=False, status=str(e))
return result
jenkins_artifacts = []
jenkins_re = re.compile(r'^.*jenkins[^\/]*(\/users\/[^\/]+)*\/config\.xml$')
for collected_artifact in collected_artifacts:
if re.match(jenkins_re, collected_artifact):
jenkins_artifacts.append(collected_artifact)
version = None
credentials = []
for filepath in jenkins_artifacts:
with open(filepath, 'r') as input_file:
config = input_file.read()
extracted_version = self._extract_jenkins_version(config)
extracted_credentials = self._extract_jenkins_credentials(config)
if extracted_version:
version = extracted_version
credentials.extend(extracted_credentials)
(report, priority, summary) = self.analyze_jenkins(version, credentials)
output_evidence.text_data = report
result.report_data = report
result.report_priority = priority
# Write the report to the output file.
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf8'))
fh.write('\n'.encode('utf8'))
# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
result.close(self, success=True, status=summary)
return result
@staticmethod
def _extract_jenkins_version(config):
"""Extract version from Jenkins configuration files.
Args:
config (str): configuration file content.
Returns:
str: The version of Jenkins.
"""
version = None
version_re = re.compile('<version>(.*)</version>')
version_match = re.search(version_re, config)
if version_match:
version = version_match.group(1)
return version
@staticmethod
def _extract_jenkins_credentials(config):
"""Extract credentials from Jenkins configuration files.
Args:
config (str): configuration file content.
Returns:
list: of tuples with username and password hash.
"""
credentials = []
password_hash_re = re.compile('<passwordHash>#jbcrypt:(.*)</passwordHash>')
username_re = re.compile('<fullName>(.*)</fullName>')
password_hash_match = re.search(password_hash_re, config)
username_match = re.search(username_re, config)
if username_match and password_hash_match:
username = username_match.group(1)
password_hash = password_hash_match.group(1)
credentials.append((username, password_hash))
return credentials
@staticmethod
def analyze_jenkins(version, credentials):
"""Analyses a Jenkins configuration.
Args:
version (str): Version of Jenkins.
credentials (list): of tuples with username and password hash.
Returns:
Tuple(
report_text(str): The report data
report_priority(int): The priority of the report (0 - 100)
summary(str): A summary of the report (used for task status)
)
"""
report = []
summary = ''
priority = Priority.LOW
credentials_registry = {hash: username for username, hash in credentials}
# TODO: Add timeout parameter when dynamic configuration is ready.
# Ref: https://github.com/google/turbinia/issues/244
weak_passwords = bruteforce_password_hashes(credentials_registry.keys())
if not version:
version = 'Unknown'
report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))
if weak_passwords:
priority = Priority.CRITICAL
summary = 'Jenkins analysis found potential issues'
report.insert(0, fmt.heading4(fmt.bold(summary)))
line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
report.append(fmt.bullet(fmt.bold(line)))
for password_hash, plaintext in weak_passwords:
line = 'User "{0:s}" with password "{1:s}"'.format(
credentials_registry.get(password_hash), plaintext)
report.append(fmt.bullet(line, level=2))
elif credentials_registry or version != 'Unknown':
summary = (
'Jenkins version {0:s} found with {1:d} credentials, but no issues '
'detected'.format(version, len(credentials_registry)))
report.insert(0, fmt.heading4(summary))
priority = Priority.MEDIUM
else:
summary = 'No Jenkins instance found'
report.insert(0, fmt.heading4(summary))
report = '\n'.join(report)
return (report, priority, summary)
| 33.921466
| 80
| 0.703812
|
from __future__ import unicode_literals
import os
import re
from turbinia import TurbiniaException
from turbinia.evidence import ReportText
from turbinia.lib import text_formatter as fmt
from turbinia.workers import TurbiniaTask
from turbinia.workers import Priority
from turbinia.lib.utils import extract_files
from turbinia.lib.utils import bruteforce_password_hashes
class JenkinsAnalysisTask(TurbiniaTask):
def run(self, evidence, result):
output_file_name = 'jenkins_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)
output_evidence = ReportText(source_path=output_file_path)
try:
collected_artifacts = extract_files(
file_name='config.xml', disk_path=evidence.local_path,
output_dir=os.path.join(self.output_dir, 'artifacts'))
except TurbiniaException as e:
result.close(self, success=False, status=str(e))
return result
jenkins_artifacts = []
jenkins_re = re.compile(r'^.*jenkins[^\/]*(\/users\/[^\/]+)*\/config\.xml$')
for collected_artifact in collected_artifacts:
if re.match(jenkins_re, collected_artifact):
jenkins_artifacts.append(collected_artifact)
version = None
credentials = []
for filepath in jenkins_artifacts:
with open(filepath, 'r') as input_file:
config = input_file.read()
extracted_version = self._extract_jenkins_version(config)
extracted_credentials = self._extract_jenkins_credentials(config)
if extracted_version:
version = extracted_version
credentials.extend(extracted_credentials)
(report, priority, summary) = self.analyze_jenkins(version, credentials)
output_evidence.text_data = report
result.report_data = report
result.report_priority = priority
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf8'))
fh.write('\n'.encode('utf8'))
result.add_evidence(output_evidence, evidence.config)
result.close(self, success=True, status=summary)
return result
@staticmethod
def _extract_jenkins_version(config):
version = None
version_re = re.compile('<version>(.*)</version>')
version_match = re.search(version_re, config)
if version_match:
version = version_match.group(1)
return version
@staticmethod
def _extract_jenkins_credentials(config):
credentials = []
password_hash_re = re.compile('<passwordHash>#jbcrypt:(.*)</passwordHash>')
username_re = re.compile('<fullName>(.*)</fullName>')
password_hash_match = re.search(password_hash_re, config)
username_match = re.search(username_re, config)
if username_match and password_hash_match:
username = username_match.group(1)
password_hash = password_hash_match.group(1)
credentials.append((username, password_hash))
return credentials
@staticmethod
def analyze_jenkins(version, credentials):
report = []
summary = ''
priority = Priority.LOW
credentials_registry = {hash: username for username, hash in credentials}
weak_passwords = bruteforce_password_hashes(credentials_registry.keys())
if not version:
version = 'Unknown'
report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))
if weak_passwords:
priority = Priority.CRITICAL
summary = 'Jenkins analysis found potential issues'
report.insert(0, fmt.heading4(fmt.bold(summary)))
line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
report.append(fmt.bullet(fmt.bold(line)))
for password_hash, plaintext in weak_passwords:
line = 'User "{0:s}" with password "{1:s}"'.format(
credentials_registry.get(password_hash), plaintext)
report.append(fmt.bullet(line, level=2))
elif credentials_registry or version != 'Unknown':
summary = (
'Jenkins version {0:s} found with {1:d} credentials, but no issues '
'detected'.format(version, len(credentials_registry)))
report.insert(0, fmt.heading4(summary))
priority = Priority.MEDIUM
else:
summary = 'No Jenkins instance found'
report.insert(0, fmt.heading4(summary))
report = '\n'.join(report)
return (report, priority, summary)
| true
| true
|
f70b9eda58e54f0a70a16842e2cf09a28ec76236
| 13,889
|
py
|
Python
|
tangleanalyzer/filter/time.py
|
bingyanglin/tangle-analyzer.py
|
70f43604aa13fdeaeeb15535508532da935e45d3
|
[
"MIT"
] | 1
|
2020-07-27T17:18:03.000Z
|
2020-07-27T17:18:03.000Z
|
tangleanalyzer/filter/time.py
|
bingyanglin/tangle-analyzer.py
|
70f43604aa13fdeaeeb15535508532da935e45d3
|
[
"MIT"
] | null | null | null |
tangleanalyzer/filter/time.py
|
bingyanglin/tangle-analyzer.py
|
70f43604aa13fdeaeeb15535508532da935e45d3
|
[
"MIT"
] | null | null | null |
from typing import Callable
from datetime import datetime, timezone
from time import mktime
from ..common.const import (
MILESTONES_USING_TIMESTAMP_ONLY,
TIMESTAMP_B,
TIMESTAMP_E,
ATCH_TIMESTAMP_B,
ATCH_TIMESTAMP_E
)
from ..common import tryte_to_int
import logging
__all__ = [
'TimeFilter',
]
class TimeFilter():
"""
Time filter for transaction
Attributes
----------
min : int
The private earliest Unix epoch time for filtering
max : int
The private latest Unix epoch time for filtering
Methods
-------
make_filter()
Return the built time filter
"""
def __init__(self, start_date: str, end_date: str) -> None:
"""
Parameters
----------
start_date : str
The start_date (%Y%m%d) of transaction to monitor (e.g., "20200101")
end_date : str
The end_date (%Y%m%d) of transaction to monitor (e.g., "20200201")
"""
try:
self._min = mktime(datetime.strptime(
start_date, "%Y%m%d").timetuple())
self._max = mktime(datetime.strptime(
end_date, "%Y%m%d").timetuple())
except:
logging.error("Dates {} and {} are not supported!".format(
start_date, end_date))
logging.error("Plese use \"%Y%m%d\" instead, e.g., \"20200101\"")
def _get_transaction_dmp(self, timestamp: int, attachmenttimestame: int, milestone: str) -> int:
if milestone in MILESTONES_USING_TIMESTAMP_ONLY:
return timestamp
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _get_transaction_time(self, timestamp: int, attachmenttimestame: int) -> int:
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _time_range_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _time_filter_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _time_filter_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _time_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _time_range_with_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _time_filter_equal_to_or_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _time_filter_equal_to_or_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _dmptime_range_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _dmptime_filter_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _dmptime_filter_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _dmptime_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _dmptime_range_with_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _dmptime_filter_equal_to_or_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _dmptime_filter_equal_to_or_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _time_range_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max and t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t == self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_range_with_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max and t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def make_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._time_range_filter_str
elif range_larger_smaller == 'm':
return self._time_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._time_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._time_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._time_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._time_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._time_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
def make_dmp_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function for dmp data.
When using this filter, the milestone for each transaction should be indicated.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._dmptime_range_filter_str
elif range_larger_smaller == 'm':
return self._dmptime_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._dmptime_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._dmptime_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._dmptime_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._dmptime_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._dmptime_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
| 38.796089
| 101
| 0.606379
|
from typing import Callable
from datetime import datetime, timezone
from time import mktime
from ..common.const import (
MILESTONES_USING_TIMESTAMP_ONLY,
TIMESTAMP_B,
TIMESTAMP_E,
ATCH_TIMESTAMP_B,
ATCH_TIMESTAMP_E
)
from ..common import tryte_to_int
import logging
__all__ = [
'TimeFilter',
]
class TimeFilter():
def __init__(self, start_date: str, end_date: str) -> None:
try:
self._min = mktime(datetime.strptime(
start_date, "%Y%m%d").timetuple())
self._max = mktime(datetime.strptime(
end_date, "%Y%m%d").timetuple())
except:
logging.error("Dates {} and {} are not supported!".format(
start_date, end_date))
logging.error("Plese use \"%Y%m%d\" instead, e.g., \"20200101\"")
def _get_transaction_dmp(self, timestamp: int, attachmenttimestame: int, milestone: str) -> int:
if milestone in MILESTONES_USING_TIMESTAMP_ONLY:
return timestamp
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _get_transaction_time(self, timestamp: int, attachmenttimestame: int) -> int:
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _time_range_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _time_filter_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _time_filter_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _time_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _time_range_with_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _time_filter_equal_to_or_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _time_filter_equal_to_or_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _dmptime_range_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _dmptime_filter_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _dmptime_filter_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _dmptime_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _dmptime_range_with_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _dmptime_filter_equal_to_or_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _dmptime_filter_equal_to_or_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _time_range_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max and t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t == self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_range_with_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max and t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def make_filter(self, range_larger_smaller='R') -> Callable:
if range_larger_smaller == 'R':
return self._time_range_filter_str
elif range_larger_smaller == 'm':
return self._time_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._time_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._time_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._time_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._time_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._time_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
def make_dmp_filter(self, range_larger_smaller='R') -> Callable:
if range_larger_smaller == 'R':
return self._dmptime_range_filter_str
elif range_larger_smaller == 'm':
return self._dmptime_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._dmptime_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._dmptime_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._dmptime_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._dmptime_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._dmptime_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
| true
| true
|
f70b9f243dd1b2f97a048c91c57187db026d813e
| 4,939
|
py
|
Python
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_associate_group_with_project_permission_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_associate_group_with_project_permission_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_associate_group_with_project_permission_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class KeystoneAssociateGroupWithProjectPermissionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'group_id': 'str',
'role_id': 'str'
}
attribute_map = {
'project_id': 'project_id',
'group_id': 'group_id',
'role_id': 'role_id'
}
def __init__(self, project_id=None, group_id=None, role_id=None):
"""KeystoneAssociateGroupWithProjectPermissionRequest - a model defined in huaweicloud sdk"""
self._project_id = None
self._group_id = None
self._role_id = None
self.discriminator = None
self.project_id = project_id
self.group_id = group_id
self.role_id = role_id
@property
def project_id(self):
"""Gets the project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
项目ID,获取方式请参见:[获取项目名称、项目ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
项目ID,获取方式请参见:[获取项目名称、项目ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param project_id: The project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._project_id = project_id
@property
def group_id(self):
"""Gets the group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
用户组ID,获取方式请参见:[获取用户组ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
用户组ID,获取方式请参见:[获取用户组ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param group_id: The group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._group_id = group_id
@property
def role_id(self):
"""Gets the role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
权限ID,获取方式请参见:[获取权限名、权限ID](https://support.huaweicloud.com/api-iam/iam_10_0001.html)。
:return: The role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._role_id
@role_id.setter
def role_id(self, role_id):
"""Sets the role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
权限ID,获取方式请参见:[获取权限名、权限ID](https://support.huaweicloud.com/api-iam/iam_10_0001.html)。
:param role_id: The role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._role_id = role_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneAssociateGroupWithProjectPermissionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.487654
| 101
| 0.617331
|
import pprint
import re
import six
class KeystoneAssociateGroupWithProjectPermissionRequest:
sensitive_list = []
openapi_types = {
'project_id': 'str',
'group_id': 'str',
'role_id': 'str'
}
attribute_map = {
'project_id': 'project_id',
'group_id': 'group_id',
'role_id': 'role_id'
}
def __init__(self, project_id=None, group_id=None, role_id=None):
self._project_id = None
self._group_id = None
self._role_id = None
self.discriminator = None
self.project_id = project_id
self.group_id = group_id
self.role_id = role_id
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, project_id):
self._project_id = project_id
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, group_id):
self._group_id = group_id
@property
def role_id(self):
return self._role_id
@role_id.setter
def role_id(self, role_id):
self._role_id = role_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, KeystoneAssociateGroupWithProjectPermissionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70ba056fcad84e17f9e4cbcc182c6fcaf5951d5
| 5,896
|
py
|
Python
|
tensorflow/python/ops/parallel_for/gradients.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 3
|
2019-11-19T14:07:27.000Z
|
2020-10-04T12:57:40.000Z
|
tensorflow/python/ops/parallel_for/gradients.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2020-04-09T16:22:20.000Z
|
2021-12-15T13:57:36.000Z
|
tensorflow/python/ops/parallel_for/gradients.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients_impl as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested structure of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
sparse (IndexedSlices), jacobian function currently makes it dense and
returns a Tensor instead. This may change in the future.
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None, when use_pfor is
true, corresponds to vectorizing all the iterations. When use_pfor is
false, the default value of None corresponds to parallel_iterations=10.
This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
f"and `inp` shape ({inp.shape}) to match.")
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
| 39.837838
| 80
| 0.690807
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients_impl as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
f"and `inp` shape ({inp.shape}) to match.")
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
| true
| true
|
f70ba09090c619abec9379a4d93b0b6aea65b856
| 15,804
|
py
|
Python
|
classification/models/model23.py
|
anonymous2submit/Pointsformer
|
0eaa141b3d79d45cd925976bde6097b51e0d3819
|
[
"MIT"
] | null | null | null |
classification/models/model23.py
|
anonymous2submit/Pointsformer
|
0eaa141b3d79d45cd925976bde6097b51e0d3819
|
[
"MIT"
] | null | null | null |
classification/models/model23.py
|
anonymous2submit/Pointsformer
|
0eaa141b3d79d45cd925976bde6097b51e0d3819
|
[
"MIT"
] | null | null | null |
"""
Exactly equals to Model21 (the best results so far), but differnt configurations.
Exactly based on Model10, but ReLU to GeLU
Based on Model8, add dropout and max, avg combine.
Based on Local model, add residual connections.
The extraction is doubled for depth.
Learning Point Cloud with Progressively Local representation.
[B,3,N] - {[B,G,K,d]-[B,G,d]} - {[B,G',K,d]-[B,G',d]} -cls
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
from pointnet2_ops import pointnet2_utils
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
distance = torch.min(distance, dist)
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
class LocalGrouper(nn.Module):
def __init__(self, groups, kneighbors, **kwargs):
"""
Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]
:param groups: groups number
:param kneighbors: k-nerighbors
:param kwargs: others
"""
super(LocalGrouper, self).__init__()
self.groups = groups
self.kneighbors = kneighbors
def forward(self, xyz, points):
B, N, C = xyz.shape
S = self.groups
xyz = xyz.contiguous() # xyz [btach, points, xyz]
# fps_idx = farthest_point_sample(xyz, self.groups).long()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]
new_xyz = index_points(xyz, fps_idx)
new_points = index_points(points, fps_idx)
idx = knn_point(self.kneighbors, xyz, new_xyz)
# idx = query_ball_point(radius, nsample, xyz, new_xyz)
# grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_points = index_points(points, idx)
grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
new_points = torch.cat([grouped_points_norm,
new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]
, dim=-1)
return new_xyz, new_points
class FCBNReLU1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):
super(FCBNReLU1D, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(out_channels),
nn.GELU()
)
def forward(self, x):
return self.net(x)
class FCBNReLU1DRes(nn.Module):
def __init__(self, channel, kernel_size=1, bias=False):
super(FCBNReLU1DRes, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel),
nn.GELU(),
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel)
)
def forward(self, x):
return F.gelu(self.net(x)+x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
# project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Conv1d(inner_dim, dim,1),
nn.BatchNorm1d(dim)
)
def forward(self, x):
x = x.permute(0,2,1)
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(self, dim, heads=8, dim_head=32, **kwargs):
"""
[b batch, d dimension, k points]
:param dim: input data dimension
:param heads: heads number
:param dim_head: dimension in each head
:param kwargs:
"""
super(TransformerBlock, self).__init__()
self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)
self.ffn = nn.Sequential(
nn.Conv1d(dim, dim, 1, bias=False),
nn.BatchNorm1d(dim)
)
def forward(self, x):
"""
:input x: [b batch, d dimension, p points,]
:return: [b batch, d dimension, p points,]
"""
att = self.attention(x)
att = F.gelu(att+x)
out = self.ffn(att)
out = F.gelu(att+out)
return out
class PreExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input: [b,g,k,d]: output:[b,d,g]
:param channels:
:param blocks:
"""
super(PreExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = self.operation(x) # [b, d, k]
x = self.transformer(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class PosExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input[b,d,g]; output[b,d,g]
:param channels:
:param blocks:
"""
super(PosExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x): # [b, d, k]
return self.transformer(self.operation(x))
class Model23(nn.Module):
def __init__(self, points=1024, class_num=40, embed_dim=64,
pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],
reducers=[2,2,2,2], **kwargs):
super(Model23, self).__init__()
self.stages = len(pre_blocks)
self.class_num = class_num
self.points=points
self.embedding = nn.Sequential(
FCBNReLU1D(3, embed_dim),
FCBNReLU1D(embed_dim, embed_dim)
)
assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \
"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers."
self.local_grouper_list = nn.ModuleList()
self.pre_blocks_list = nn.ModuleList()
self.pos_blocks_list = nn.ModuleList()
last_channel = embed_dim
anchor_points = self.points
for i in range(len(pre_blocks)):
out_channel = last_channel*2
pre_block_num=pre_blocks[i]
pos_block_num = pos_blocks[i]
kneighbor = k_neighbors[i]
reduce = reducers[i]
anchor_points = anchor_points//reduce
# append local_grouper_list
local_grouper = LocalGrouper(anchor_points, kneighbor) #[b,g,k,d]
self.local_grouper_list.append(local_grouper)
# append pre_block_list
pre_block_module = PreExtraction(out_channel, pre_block_num)
self.pre_blocks_list.append(pre_block_module)
# append pos_block_list
pos_block_module = PosExtraction(out_channel, pos_block_num)
self.pos_blocks_list.append(pos_block_module)
last_channel = out_channel
self.classifier = nn.Sequential(
nn.Linear(last_channel*2, 512),
nn.BatchNorm1d(512),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(256, self.class_num)
)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
x = self.embedding(x) # B,D,N
for i in range(self.stages):
xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]
x = self.pre_blocks_list[i](x) # [b,d,g]
x = self.pos_blocks_list[i](x) # [b,d,g]
x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)
x_mean = x.mean(dim=-1,keepdim=False)
x = torch.cat([x_max, x_mean], dim=-1)
x = self.classifier(x)
return x
def model23A(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23B(num_classes=40, **kwargs) -> Model23: # 19185MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23C(num_classes=40, **kwargs) -> Model23: # 19537MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],
reducers=[4,2,2], **kwargs)
def model23D(num_classes=40, **kwargs) -> Model23: # 31927MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],
reducers=[2,2,2], **kwargs)
def model23E(num_classes=40, **kwargs) -> Model23: # 19215MiB # 93.476% on vis sever
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23F(num_classes=40, **kwargs) -> Model23: # 6437MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],
reducers=[4,4], **kwargs)
def model23G(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],
reducers=[4,4], **kwargs)
# don't train H, it is same to model21H
def model23H(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB
return Model23(points=1024, class_num=num_classes, embed_dim=256,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
# Extremely large model, 101 layers in total.
def model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],
reducers=[4,2,2,2], **kwargs)
# Also Eextremely large model, 101 layers in total.
def model23K(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
if __name__ == '__main__':
data = torch.rand(2,128,10)
att = Attention(128)
out = att(data)
print(out.shape)
batch, groups,neighbors,dim=2,512,32,16
x = torch.rand(batch,groups,neighbors,dim)
pre_extractor = PreExtraction(dim,3)
out = pre_extractor(x)
print(out.shape)
x = torch.rand(batch, dim, groups)
pos_extractor = PosExtraction(dim, 3)
out = pos_extractor(x)
print(out.shape)
data = torch.rand(2, 3, 1024)
print("===> testing model ...")
model = Model23()
out = model(data)
print(out.shape)
print("===> testing modelE ...")
model = model23E()
out = model(data)
print(out.shape)
| 35.198218
| 110
| 0.590926
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
from pointnet2_ops import pointnet2_utils
def square_distance(src, dst):
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
distance = torch.min(distance, dist)
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
class LocalGrouper(nn.Module):
def __init__(self, groups, kneighbors, **kwargs):
super(LocalGrouper, self).__init__()
self.groups = groups
self.kneighbors = kneighbors
def forward(self, xyz, points):
B, N, C = xyz.shape
S = self.groups
xyz = xyz.contiguous()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long()
new_xyz = index_points(xyz, fps_idx)
new_points = index_points(points, fps_idx)
idx = knn_point(self.kneighbors, xyz, new_xyz)
index_points(points, idx)
grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
new_points = torch.cat([grouped_points_norm,
new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]
, dim=-1)
return new_xyz, new_points
class FCBNReLU1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):
super(FCBNReLU1D, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(out_channels),
nn.GELU()
)
def forward(self, x):
return self.net(x)
class FCBNReLU1DRes(nn.Module):
def __init__(self, channel, kernel_size=1, bias=False):
super(FCBNReLU1DRes, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel),
nn.GELU(),
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel)
)
def forward(self, x):
return F.gelu(self.net(x)+x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Conv1d(inner_dim, dim,1),
nn.BatchNorm1d(dim)
)
def forward(self, x):
x = x.permute(0,2,1)
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(self, dim, heads=8, dim_head=32, **kwargs):
super(TransformerBlock, self).__init__()
self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)
self.ffn = nn.Sequential(
nn.Conv1d(dim, dim, 1, bias=False),
nn.BatchNorm1d(dim)
)
def forward(self, x):
att = self.attention(x)
att = F.gelu(att+x)
out = self.ffn(att)
out = F.gelu(att+out)
return out
class PreExtraction(nn.Module):
def __init__(self, channels, blocks=1):
super(PreExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x):
b, n, s, d = x.size()
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = self.operation(x)
x = self.transformer(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class PosExtraction(nn.Module):
def __init__(self, channels, blocks=1):
super(PosExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x):
return self.transformer(self.operation(x))
class Model23(nn.Module):
def __init__(self, points=1024, class_num=40, embed_dim=64,
pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],
reducers=[2,2,2,2], **kwargs):
super(Model23, self).__init__()
self.stages = len(pre_blocks)
self.class_num = class_num
self.points=points
self.embedding = nn.Sequential(
FCBNReLU1D(3, embed_dim),
FCBNReLU1D(embed_dim, embed_dim)
)
assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \
"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers."
self.local_grouper_list = nn.ModuleList()
self.pre_blocks_list = nn.ModuleList()
self.pos_blocks_list = nn.ModuleList()
last_channel = embed_dim
anchor_points = self.points
for i in range(len(pre_blocks)):
out_channel = last_channel*2
pre_block_num=pre_blocks[i]
pos_block_num = pos_blocks[i]
kneighbor = k_neighbors[i]
reduce = reducers[i]
anchor_points = anchor_points//reduce
local_grouper = LocalGrouper(anchor_points, kneighbor)
self.local_grouper_list.append(local_grouper)
pre_block_module = PreExtraction(out_channel, pre_block_num)
self.pre_blocks_list.append(pre_block_module)
pos_block_module = PosExtraction(out_channel, pos_block_num)
self.pos_blocks_list.append(pos_block_module)
last_channel = out_channel
self.classifier = nn.Sequential(
nn.Linear(last_channel*2, 512),
nn.BatchNorm1d(512),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(256, self.class_num)
)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
x = self.embedding(x)
for i in range(self.stages):
xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1))
x = self.pre_blocks_list[i](x)
x = self.pos_blocks_list[i](x)
x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)
x_mean = x.mean(dim=-1,keepdim=False)
x = torch.cat([x_max, x_mean], dim=-1)
x = self.classifier(x)
return x
def model23A(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23B(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23C(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],
reducers=[4,2,2], **kwargs)
def model23D(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],
reducers=[2,2,2], **kwargs)
def model23E(num_classes=40, **kwargs) -> Model23: ints=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23F(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],
reducers=[4,4], **kwargs)
def model23G(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],
reducers=[4,4], **kwargs)
def model23H(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB
return Model23(points=1024, class_num=num_classes, embed_dim=256,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
# Extremely large model, 101 layers in total.
def model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],
reducers=[4,2,2,2], **kwargs)
# Also Eextremely large model, 101 layers in total.
def model23K(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
if __name__ == '__main__':
data = torch.rand(2,128,10)
att = Attention(128)
out = att(data)
print(out.shape)
batch, groups,neighbors,dim=2,512,32,16
x = torch.rand(batch,groups,neighbors,dim)
pre_extractor = PreExtraction(dim,3)
out = pre_extractor(x)
print(out.shape)
x = torch.rand(batch, dim, groups)
pos_extractor = PosExtraction(dim, 3)
out = pos_extractor(x)
print(out.shape)
data = torch.rand(2, 3, 1024)
print("===> testing model ...")
model = Model23()
out = model(data)
print(out.shape)
print("===> testing modelE ...")
model = model23E()
out = model(data)
print(out.shape)
| true
| true
|
f70ba0b9684f17eb9c083521f1cbfc758a822d2e
| 1,013
|
py
|
Python
|
source/data_preparation/00b-generate_assembly_nochain.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
source/data_preparation/00b-generate_assembly_nochain.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
source/data_preparation/00b-generate_assembly_nochain.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from SBI.structure import PDB
from default_config.masif_opts import masif_opts
print(masif_opts["ligand"]["assembly_dir"])
if not os.path.exists(masif_opts["ligand"]["assembly_dir"]):
os.mkdir(masif_opts["ligand"]["assembly_dir"])
def assemble(pdb_id):
# Reads and builds the biological assembly of a structure
print(os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)))
struct = PDB(
os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)), header=True
)
exit(0)
try:
struct_assembly = struct.apply_biomolecule_matrices()[0]
except:
return 0
struct_assembly.write(
os.path.join(masif_opts["ligand"]["assembly_dir"], "{}.pdb".format(pdb_id))
)
return 1
pdb_id = sys.argv[1]
res = assemble(pdb_id)
if res:
print("Building assembly was successfull for {}".format(pdb_id))
else:
print("Building assembly was not successfull for {}".format(pdb_id))
| 30.69697
| 104
| 0.686081
|
import os
import sys
from SBI.structure import PDB
from default_config.masif_opts import masif_opts
print(masif_opts["ligand"]["assembly_dir"])
if not os.path.exists(masif_opts["ligand"]["assembly_dir"]):
os.mkdir(masif_opts["ligand"]["assembly_dir"])
def assemble(pdb_id):
print(os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)))
struct = PDB(
os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)), header=True
)
exit(0)
try:
struct_assembly = struct.apply_biomolecule_matrices()[0]
except:
return 0
struct_assembly.write(
os.path.join(masif_opts["ligand"]["assembly_dir"], "{}.pdb".format(pdb_id))
)
return 1
pdb_id = sys.argv[1]
res = assemble(pdb_id)
if res:
print("Building assembly was successfull for {}".format(pdb_id))
else:
print("Building assembly was not successfull for {}".format(pdb_id))
| true
| true
|
f70ba0cd1ceec91a3daa39c0d4e996db44d6f725
| 13,271
|
py
|
Python
|
log_mito_act/model_57.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito_act/model_57.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito_act/model_57.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 14250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 79.467066
| 614
| 0.809736
|
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 14250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| true
| true
|
f70ba1c7f2cb64fa136136f269dd626b70b2a811
| 13,693
|
py
|
Python
|
tensorflow/python/framework/sparse_tensor_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/framework/sparse_tensor_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/framework/sparse_tensor_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.sparse_tensor."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.dense_shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
sp_value = self.evaluate(sp)
self.assertAllEqual(sp_value.indices, value.indices)
self.assertAllEqual(sp_value.values, value.values)
self.assertAllEqual(sp_value.dense_shape, value.dense_shape)
def testShape(self):
@def_function.function
def test_fn(tensor):
tensor = sparse_ops.sparse_transpose(tensor)
self.assertEqual(tensor.shape.rank, 2)
return tensor
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
test_fn(tensor)
def testIsSparse(self):
self.assertFalse(sparse_tensor.is_sparse(3))
self.assertFalse(sparse_tensor.is_sparse("foo"))
self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
self.assertTrue(
sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
self.assertTrue(
sparse_tensor.is_sparse(
sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testConsumers(self):
with context.graph_mode():
sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
self.assertEqual(len(sp.consumers()), 1)
self.assertEqual(sp.consumers()[0], out.op)
dense = sparse_ops.sparse_tensor_to_dense(sp)
self.assertEqual(len(sp.consumers()), 2)
self.assertIn(dense.op, sp.consumers())
self.assertIn(out.op, sp.consumers())
def testWithValues(self):
source = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
new_tensor = source.with_values([5.0, 1.0])
self.assertAllEqual(new_tensor.indices, source.indices)
self.assertAllEqual(new_tensor.values, [5.0, 1.0])
self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)
# ensure new value's shape is checked
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
source.with_values([[5.0, 1.0]])
class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
def test_convert_dense(self):
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, self.evaluate(from_value))
def test_convert_sparse(self):
indices = [[0, 1], [1, 0]]
values = [42, 43]
shape = [2, 2]
sparse_tensor_value = sparse_tensor.SparseTensorValue(
indices, values, shape)
st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
from_value = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))
from_tensor = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(st))
for convertee in [from_value, from_tensor]:
self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
self.assertAllEqual(sparse_tensor_value.values, convertee.values)
self.assertAllEqual(
sparse_tensor_value.dense_shape, convertee.dense_shape)
class SparseTensorShapeTest(test_util.TensorFlowTestCase):
def test_simple(self):
indices = [[0, 2]]
values = [1]
dense_shape = [5, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertIsInstance(sp.shape, tensor_shape.TensorShape)
self.assertIsInstance(sp.dense_shape, ops.Tensor)
self.assertEqual(sp.shape.as_list(), [5, 5])
def test_unknown_shape(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 2]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(
dtype=dtypes.int64, shape=[2,]))
def test_partial_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 2]]
values = [1]
y = ops.convert_to_tensor(3, dtype=dtypes.int64)
dense_shape = [x, y]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 3])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))
def test_neg_shape(self):
indices = [[0, 2]]
values = [1]
dense_shape = [-1, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 5])
def test_unknown_tensor_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 0]]
values = [1]
dense_shape = array_ops.shape(x)
dense_shape = math_ops.cast(dense_shape, dtypes.int64)
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))
def test_unknown_rank(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 0]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.rank, None)
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))
@test_util.run_all_in_graph_and_eager_modes
class SparseTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.shape.rank, None)
self.assertEqual(spec1.dtype, dtypes.float32)
spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)
self.assertEqual(spec2.shape.as_list(), [None, None])
self.assertEqual(spec2.dtype, dtypes.string)
def testValueType(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(),
(tensor_shape.TensorShape(None), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),
(tensor_shape.TensorShape(None), dtypes.int32)),
]) # pyformat: disable
def testSerialize(self, st_spec, expected):
serialization = st_spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [
tensor_spec.TensorSpec([None, None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64)
]),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [
tensor_spec.TensorSpec([None, 3], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.float32),
tensor_spec.TensorSpec([3], dtypes.int64)
]),
])
def testComponentSpecs(self, st_spec, expected):
self.assertEqual(st_spec._component_specs, expected)
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromComponents(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
actual_components = st_spec._to_components(st)
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = st_spec._from_components(actual_components)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("SparseTensorValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([[0], [8]])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec = sparse_tensor.SparseTensorSpec()
st = spec._from_components([indices, values, dense_shape])
self.assertIsInstance(st, sparse_tensor.SparseTensorValue)
self.assertAllEqual(st.indices, indices)
self.assertAllEqual(st.values, values)
self.assertAllEqual(st.dense_shape, dense_shape)
@parameterized.parameters([
sparse_tensor.SparseTensorSpec(dtype=dtypes.string),
sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
])
def testFlatTensorSpecs(self, st_spec):
self.assertEqual(st_spec._flat_tensor_specs,
[tensor_spec.TensorSpec(None, dtypes.variant)])
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromTensorList(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
tensor_list = st_spec._to_tensor_list(st)
st_reconstructed = st_spec._from_tensor_list(tensor_list)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),
])
def testBatch(self, spec, batch_size, expected):
self.assertEqual(spec._batch(batch_size), expected)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),
sparse_tensor.SparseTensorSpec([2], dtypes.float32)),
])
def testUnbatch(self, spec, expected):
self.assertEqual(spec._unbatch(), expected)
if __name__ == "__main__":
googletest.main()
| 38.463483
| 80
| 0.693274
|
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.dense_shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
sp_value = self.evaluate(sp)
self.assertAllEqual(sp_value.indices, value.indices)
self.assertAllEqual(sp_value.values, value.values)
self.assertAllEqual(sp_value.dense_shape, value.dense_shape)
def testShape(self):
@def_function.function
def test_fn(tensor):
tensor = sparse_ops.sparse_transpose(tensor)
self.assertEqual(tensor.shape.rank, 2)
return tensor
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
test_fn(tensor)
def testIsSparse(self):
self.assertFalse(sparse_tensor.is_sparse(3))
self.assertFalse(sparse_tensor.is_sparse("foo"))
self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
self.assertTrue(
sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
self.assertTrue(
sparse_tensor.is_sparse(
sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testConsumers(self):
with context.graph_mode():
sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
self.assertEqual(len(sp.consumers()), 1)
self.assertEqual(sp.consumers()[0], out.op)
dense = sparse_ops.sparse_tensor_to_dense(sp)
self.assertEqual(len(sp.consumers()), 2)
self.assertIn(dense.op, sp.consumers())
self.assertIn(out.op, sp.consumers())
def testWithValues(self):
source = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
new_tensor = source.with_values([5.0, 1.0])
self.assertAllEqual(new_tensor.indices, source.indices)
self.assertAllEqual(new_tensor.values, [5.0, 1.0])
self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
source.with_values([[5.0, 1.0]])
class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
def test_convert_dense(self):
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, self.evaluate(from_value))
def test_convert_sparse(self):
indices = [[0, 1], [1, 0]]
values = [42, 43]
shape = [2, 2]
sparse_tensor_value = sparse_tensor.SparseTensorValue(
indices, values, shape)
st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
from_value = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))
from_tensor = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(st))
for convertee in [from_value, from_tensor]:
self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
self.assertAllEqual(sparse_tensor_value.values, convertee.values)
self.assertAllEqual(
sparse_tensor_value.dense_shape, convertee.dense_shape)
class SparseTensorShapeTest(test_util.TensorFlowTestCase):
def test_simple(self):
indices = [[0, 2]]
values = [1]
dense_shape = [5, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertIsInstance(sp.shape, tensor_shape.TensorShape)
self.assertIsInstance(sp.dense_shape, ops.Tensor)
self.assertEqual(sp.shape.as_list(), [5, 5])
def test_unknown_shape(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 2]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(
dtype=dtypes.int64, shape=[2,]))
def test_partial_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 2]]
values = [1]
y = ops.convert_to_tensor(3, dtype=dtypes.int64)
dense_shape = [x, y]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 3])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))
def test_neg_shape(self):
indices = [[0, 2]]
values = [1]
dense_shape = [-1, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 5])
def test_unknown_tensor_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 0]]
values = [1]
dense_shape = array_ops.shape(x)
dense_shape = math_ops.cast(dense_shape, dtypes.int64)
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))
def test_unknown_rank(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 0]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.rank, None)
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))
@test_util.run_all_in_graph_and_eager_modes
class SparseTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.shape.rank, None)
self.assertEqual(spec1.dtype, dtypes.float32)
spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)
self.assertEqual(spec2.shape.as_list(), [None, None])
self.assertEqual(spec2.dtype, dtypes.string)
def testValueType(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(),
(tensor_shape.TensorShape(None), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),
(tensor_shape.TensorShape(None), dtypes.int32)),
]) # pyformat: disable
def testSerialize(self, st_spec, expected):
serialization = st_spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [
tensor_spec.TensorSpec([None, None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64)
]),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [
tensor_spec.TensorSpec([None, 3], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.float32),
tensor_spec.TensorSpec([3], dtypes.int64)
]),
])
def testComponentSpecs(self, st_spec, expected):
self.assertEqual(st_spec._component_specs, expected)
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromComponents(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
actual_components = st_spec._to_components(st)
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = st_spec._from_components(actual_components)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("SparseTensorValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([[0], [8]])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec = sparse_tensor.SparseTensorSpec()
st = spec._from_components([indices, values, dense_shape])
self.assertIsInstance(st, sparse_tensor.SparseTensorValue)
self.assertAllEqual(st.indices, indices)
self.assertAllEqual(st.values, values)
self.assertAllEqual(st.dense_shape, dense_shape)
@parameterized.parameters([
sparse_tensor.SparseTensorSpec(dtype=dtypes.string),
sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
])
def testFlatTensorSpecs(self, st_spec):
self.assertEqual(st_spec._flat_tensor_specs,
[tensor_spec.TensorSpec(None, dtypes.variant)])
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromTensorList(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
tensor_list = st_spec._to_tensor_list(st)
st_reconstructed = st_spec._from_tensor_list(tensor_list)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),
])
def testBatch(self, spec, batch_size, expected):
self.assertEqual(spec._batch(batch_size), expected)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),
sparse_tensor.SparseTensorSpec([2], dtypes.float32)),
])
def testUnbatch(self, spec, expected):
self.assertEqual(spec._unbatch(), expected)
if __name__ == "__main__":
googletest.main()
| true
| true
|
f70ba1fbd0b88f8e278bb8592c895d1b36e75c20
| 11,615
|
py
|
Python
|
devicely/empatica.py
|
DigitalBiomarkerDiscoveryPipeline/devicely
|
9773fead4d3969a32ca2760b8db4ae728c4d5d50
|
[
"MIT"
] | 13
|
2020-07-13T22:26:25.000Z
|
2022-03-18T17:40:56.000Z
|
devicely/empatica.py
|
DigitalBiomarkerDiscoveryPipeline/devicely
|
9773fead4d3969a32ca2760b8db4ae728c4d5d50
|
[
"MIT"
] | 26
|
2020-11-29T11:11:09.000Z
|
2022-01-12T11:34:19.000Z
|
devicely/empatica.py
|
DigitalBiomarkerDiscoveryPipeline/devicely
|
9773fead4d3969a32ca2760b8db4ae728c4d5d50
|
[
"MIT"
] | 5
|
2021-07-26T11:01:38.000Z
|
2022-02-22T18:23:57.000Z
|
"""
Empatica E4 is a wearable device that offers real-time physiological data
acquisition such as blood volume pulse, electrodermal activity (EDA), heart
rate, interbeat intervals, 3-axis acceleration and skin temperature.
"""
import os
import random
import numpy as np
import pandas as pd
class EmpaticaReader:
"""
Read, timeshift and write data generated by Empatica E4.
Attributes
----------
start_times : dict
Contain the timestamp of the first measurement for all
measured signals (BVP, ACC, etc.).
sample_freqs : dict ]
Contain the sampling frequencies of all measured signals
in Hz.
IBI : pandas.DataFrame
Contain inter-beat interval data. The column
"seconds_since_start" is the time in seconds between the start of
measurements and the column "IBI" is the duration in seconds between
consecutive beats.
ACC : pandas.DataFrame
Contain the data measured with the onboard MEMS type
3-axis accelerometer, indexed by time of measurement.
BVP : pandas.DataFrame
Contain blood volume pulse data, indexed by time of
measurement.
EDA : pandas.DataFrame
Contain data captured from the electrodermal activity
sensor, indexed by time of measurement.
HR : pandas.DataFrame
Contain heart rate data, indexed by time of
measurement.
TEMP : pandas.DataFrame
Contain temperature data, indexed by time of
measurement.
data : pandas.DataFrame
Joined dataframe of the ACC, BVP, EDA, HR and TEMP
dataframes (see above). May contain NaN values because sampling
frequencies differ across signals.
"""
def __init__(self, path):
"""
Parse the csv files located in the specified directory into dataframes.
Parameters
----------
path : str
Path of the directory that contains the individual signal csv
files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,
IBI.csv and TEMP.csv. If present, the file tags.csv is also read.
"""
self.start_times = {}
self.sample_freqs = {}
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if files is None:
print('Empty directory. Nothing to read.')
return None
self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])
self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')
self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')
self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')
self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')
self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))
self.tags = self._read_tags(os.path.join(path, 'tags.csv'))
self.data = self._get_joined_dataframe()
def write(self, dir_path):
"""
Write the signal dataframes back to individual csv files formatted the
same way as they were read.
Parameters
----------
path : str
Path of the directory in which the csv files are created.
If the directory exists, the csv files are written using writing mode 'w'
ignoring other files in the directory.
If the directory doe not exist, it will be created.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if self.ACC is not None:
self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')
if self.BVP is not None:
self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')
if self.EDA is not None:
self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')
if self.HR is not None:
self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')
if self.TEMP is not None:
self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')
if self.IBI is not None:
self._write_ibi(os.path.join(dir_path, 'IBI.csv'))
if self.tags is not None:
self._write_tags(os.path.join(dir_path, 'tags.csv'))
def _read_signal(self, path, signal_name, col_names=None):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time_str = file.readline().split(', ')[0]
self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')
sample_freq_str = file.readline().split(', ')[0]
self.sample_freqs[signal_name] = float(sample_freq_str)
col_names = [signal_name] if col_names is None else col_names
dataframe = pd.read_csv(file, header=None, names=col_names)
dataframe.index = pd.date_range(
start=self.start_times[signal_name],
freq=f"{1 / self.sample_freqs[signal_name]}S",
periods=len(dataframe))
if col_names is not None:
dataframe.rename(dict(enumerate(col_names)), inplace=True)
else:
dataframe.rename({0: signal_name}, inplace=True)
return dataframe.squeeze()
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_signal(self, path, dataframe, signal_name):
n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1
meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,
[self.sample_freqs[signal_name]] * n_cols])
with open(path, 'w') as file:
np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\n')
dataframe.to_csv(file, index=None, header=None, line_terminator='\n')
def _read_ibi(self, path):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')
self.start_times['IBI'] = start_time
df = pd.read_csv(file, names=['time', 'IBI'], header=None)
df['time'] = pd.to_timedelta(df['time'], unit='s')
df['time'] = start_time + df['time']
return df.set_index('time')
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_ibi(self, path):
with open(path, 'w') as file:
file.write(f"{self.start_times['IBI'].value // 1e9}, IBI\n")
write_df = self.IBI.copy()
write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9
write_df.to_csv(file, header=None, line_terminator='\n')
def _read_tags(self, path):
try:
if os.stat(path).st_size > 0:
return pd.read_csv(path, header=None,
parse_dates=[0],
date_parser=lambda x : pd.to_datetime(x, unit='s'),
names=['tags'],
squeeze=True)
else:
print(f"Not reading tags because the file {path} is empty.")
except OSError:
print(f"Not reading tags because the file {path} does not exist.")
return None
def _write_tags(self, path):
if self.tags is not None:
tags_write_series = self.tags.map(lambda x: x.value / 1e9)
tags_write_series.to_csv(path, header=None, index=None, line_terminator='\n')
def timeshift(self, shift='random'):
"""
Timeshift all time related columns as well as the starting_times dict.
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, adds that timedelta to all time-related attributes.
If shift is a timestamp, shifts the data such that the earliest entry
has that timestamp. The remaining values will mantain the same
time difference to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP, self.data]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if isinstance(shift, pd.Timestamp):
min_start_time = min(self.start_times.values())
new_start_times = dict()
for signal_name, start_time in self.start_times.items():
new_start_times[signal_name] = shift + (start_time - min_start_time)
self.start_times = new_start_times
if self.tags is not None:
timedeltas = self.tags - self.tags.min()
self.tags = shift + timedeltas
for dataframe in dataframes:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for signal_name in self.start_times:
self.start_times[signal_name] += shift
if self.tags is not None:
self.tags += shift
for dataframe in dataframes:
dataframe.index += shift
def _get_joined_dataframe(self):
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if not dataframes:
print('No joined dataframe possible due to lack of data.')
return None
joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
joined_dataframe = pd.DataFrame(index=joined_idx)
if self.ACC is not None:
joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
if self.BVP is not None:
joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP
if self.EDA is not None:
joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA
if self.HR is not None:
joined_dataframe.loc[self.HR.index, 'HR'] = self.HR
if self.TEMP is not None:
joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP
return joined_dataframe
| 40.611888
| 101
| 0.584245
|
import os
import random
import numpy as np
import pandas as pd
class EmpaticaReader:
def __init__(self, path):
self.start_times = {}
self.sample_freqs = {}
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if files is None:
print('Empty directory. Nothing to read.')
return None
self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])
self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')
self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')
self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')
self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')
self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))
self.tags = self._read_tags(os.path.join(path, 'tags.csv'))
self.data = self._get_joined_dataframe()
def write(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if self.ACC is not None:
self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')
if self.BVP is not None:
self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')
if self.EDA is not None:
self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')
if self.HR is not None:
self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')
if self.TEMP is not None:
self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')
if self.IBI is not None:
self._write_ibi(os.path.join(dir_path, 'IBI.csv'))
if self.tags is not None:
self._write_tags(os.path.join(dir_path, 'tags.csv'))
def _read_signal(self, path, signal_name, col_names=None):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time_str = file.readline().split(', ')[0]
self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')
sample_freq_str = file.readline().split(', ')[0]
self.sample_freqs[signal_name] = float(sample_freq_str)
col_names = [signal_name] if col_names is None else col_names
dataframe = pd.read_csv(file, header=None, names=col_names)
dataframe.index = pd.date_range(
start=self.start_times[signal_name],
freq=f"{1 / self.sample_freqs[signal_name]}S",
periods=len(dataframe))
if col_names is not None:
dataframe.rename(dict(enumerate(col_names)), inplace=True)
else:
dataframe.rename({0: signal_name}, inplace=True)
return dataframe.squeeze()
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_signal(self, path, dataframe, signal_name):
n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1
meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,
[self.sample_freqs[signal_name]] * n_cols])
with open(path, 'w') as file:
np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\n')
dataframe.to_csv(file, index=None, header=None, line_terminator='\n')
def _read_ibi(self, path):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')
self.start_times['IBI'] = start_time
df = pd.read_csv(file, names=['time', 'IBI'], header=None)
df['time'] = pd.to_timedelta(df['time'], unit='s')
df['time'] = start_time + df['time']
return df.set_index('time')
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_ibi(self, path):
with open(path, 'w') as file:
file.write(f"{self.start_times['IBI'].value // 1e9}, IBI\n")
write_df = self.IBI.copy()
write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9
write_df.to_csv(file, header=None, line_terminator='\n')
def _read_tags(self, path):
try:
if os.stat(path).st_size > 0:
return pd.read_csv(path, header=None,
parse_dates=[0],
date_parser=lambda x : pd.to_datetime(x, unit='s'),
names=['tags'],
squeeze=True)
else:
print(f"Not reading tags because the file {path} is empty.")
except OSError:
print(f"Not reading tags because the file {path} does not exist.")
return None
def _write_tags(self, path):
if self.tags is not None:
tags_write_series = self.tags.map(lambda x: x.value / 1e9)
tags_write_series.to_csv(path, header=None, index=None, line_terminator='\n')
def timeshift(self, shift='random'):
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP, self.data]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if isinstance(shift, pd.Timestamp):
min_start_time = min(self.start_times.values())
new_start_times = dict()
for signal_name, start_time in self.start_times.items():
new_start_times[signal_name] = shift + (start_time - min_start_time)
self.start_times = new_start_times
if self.tags is not None:
timedeltas = self.tags - self.tags.min()
self.tags = shift + timedeltas
for dataframe in dataframes:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for signal_name in self.start_times:
self.start_times[signal_name] += shift
if self.tags is not None:
self.tags += shift
for dataframe in dataframes:
dataframe.index += shift
def _get_joined_dataframe(self):
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if not dataframes:
print('No joined dataframe possible due to lack of data.')
return None
joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
joined_dataframe = pd.DataFrame(index=joined_idx)
if self.ACC is not None:
joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
if self.BVP is not None:
joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP
if self.EDA is not None:
joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA
if self.HR is not None:
joined_dataframe.loc[self.HR.index, 'HR'] = self.HR
if self.TEMP is not None:
joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP
return joined_dataframe
| true
| true
|
f70ba28c0ff01d16d24b0d53e79a3c5456d834b5
| 7,925
|
py
|
Python
|
thor/filter_orbits.py
|
moeyensj/thor
|
ec1150e23ec69944e45f6beddf57cfb46e9e44dc
|
[
"BSD-3-Clause"
] | 11
|
2019-08-22T18:37:09.000Z
|
2022-02-28T22:49:25.000Z
|
thor/filter_orbits.py
|
moeyensj/thor
|
ec1150e23ec69944e45f6beddf57cfb46e9e44dc
|
[
"BSD-3-Clause"
] | 57
|
2019-08-20T19:57:14.000Z
|
2021-09-16T20:54:59.000Z
|
thor/filter_orbits.py
|
moeyensj/thor
|
ec1150e23ec69944e45f6beddf57cfb46e9e44dc
|
[
"BSD-3-Clause"
] | 7
|
2021-02-09T21:28:43.000Z
|
2022-02-01T08:55:29.000Z
|
import pandas as pd
from typing import Tuple
from .data_processing import UNKNOWN_ID_REGEX
from .utils import calcDeltas
__all__ = [
"filterKnownOrbits",
"filterOrbits"
]
def filterKnownOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
) -> Tuple[pd.DataFrame]:
"""
Remove all observations of unknown objects, keeping only observations of objects with
a known association. If any orbits have fewer than min_obs observations after removing
unknown observations then remove those orbits as well.
This function will also set the provisional and permanent designation columns as required
by the ADES file format.
Parameters
----------
orbits : `~pandas.DataFrame`
DataFrame of orbits.
orbit_observations : `~pandas.DataFrame`
Dataframe of orbit observations with at least one column with the orbit ID ('orbit_id') and
one column with the 'obs_id'
associations : `~pandas.DataFrame`
DataFrame of known associations, with one column of containing the observation ID ('obs_id')
and another column containing the association ('obj_id'). Any unknown objects should have
been assigned an unknown ID. See preprocessObservations.
min_obs : int
The minimum number of observations for an object to be considered as recovered.
Returns
-------
known_orbits : `~pandas.DataFrame`
Orbits of previously known objects.
known_orbit_observations : `~pandas.DataFrame`
Observations of previously known objects, the constituent observations
to which the orbits were fit.
"""
# Merge associations with orbit observations
labeled_observations = orbit_observations.merge(associations[["obs_id", "obj_id"]], on="obs_id", how="left")
# Keep only observations of known objects
labeled_observations = labeled_observations[~labeled_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True)]
# Keep only known objects with at least min_obs observations
occurences = labeled_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
# Filter input orbits
orbits_mask = orbits["orbit_id"].isin(orbit_ids)
orbit_observations_mask = labeled_observations["orbit_id"].isin(orbit_ids)
known_orbits = orbits[orbits_mask].copy()
known_orbit_observations = labeled_observations[orbit_observations_mask].copy()
# Split into permanent and provisional designations
if len(known_orbit_observations) > 0:
known_orbit_observations.loc[:, "permID"] = ""
known_orbit_observations.loc[:, "provID"] = ""
else:
known_orbit_observations["permID"] = ""
known_orbit_observations["provID"] = ""
# Process permanent IDs first
# TODO : add an equivalent for Comets
perm_ids = known_orbit_observations["obj_id"].str.isnumeric()
known_orbit_observations.loc[perm_ids, "permID"] = known_orbit_observations[perm_ids]["obj_id"].values
# Identify provisional IDs next
prov_ids = (
(~known_orbit_observations["obj_id"].str.isnumeric())
& (~known_orbit_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True))
)
known_orbit_observations.loc[prov_ids, "provID"] = known_orbit_observations[prov_ids]["obj_id"].values
# Reorder the columns to put the labels toward the front
cols = known_orbit_observations.columns
first = ["orbit_id", "permID", "provID", "obj_id", "obs_id"]
cols_ordered = first + cols[~cols.isin(first)].tolist()
known_orbit_observations = known_orbit_observations[cols_ordered]
return known_orbits, known_orbit_observations
def filterOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
min_time_separation: float = 30.,
delta_cols: list = ["mjd_utc", "mag", "RA_deg", "Dec_deg"]
) -> Tuple[Tuple[pd.DataFrame]]:
"""
Filter orbits into orbits of previously known objects and potential discovery candidates.
Parameters
----------
orbits : `~pandas.DataFrame`
DataFrame of orbits.
orbit_observations : `~pandas.DataFrame`
Dataframe of orbit observations with at least one column with the orbit ID ('orbit_id') and
one column with the 'obs_id'
associations : `~pandas.DataFrame`
DataFrame of known associations, with one column of containing the observation ID ('obs_id')
and another column containing the association ('obj_id'). Any unknown objects should have
been assigned an unknown ID. See preprocessObservations.
min_obs : int
The minimum number of observations for an object to be considered as recovered.
min_time_separation : int
The minimum time two observations should be separated in minutes. If any observations
for a single orbit are seperated by less than this amount then only the first observation is kept.
This is useful to prevent stationary sources from biasing orbit fits, although may decrease overall
completeness.
delta_cols : list[str]
Columns for which to calculate deltas (must include mjd_utc).
Returns
-------
discovery_candidates : (`~pandas.DataFrame`, `~pandas.DataFrame`)
DataFrame of dicovery candidate orbits and discovery candidate observations.
known_orbits : (`~pandas.DataFrame`, `~pandas.DataFrame`)
DataFrame of known orbits and known orbit observations.
"""
# Calculate deltas of a variety of quantities (this returns the orbit_observations dataframe
# with the delta columns added)
deltas = calcDeltas(
orbit_observations,
groupby_cols=["orbit_id", "night_id"],
delta_cols=delta_cols
)
# Mark all observations within min_time of another as filtered
deltas.loc[:, "filtered"] = 1
deltas.loc[(deltas["dmjd_utc"].isna()) | (deltas["dmjd_utc"] >= min_time_separation / 60 / 24), "filtered"] = 0
orbits_ = orbits.copy()
orbit_observations_ = deltas.copy()
# Identify known orbits (also remove any observations of unknown objects from these orbits)
recovered_known_orbits, recovered_known_orbit_observations = filterKnownOrbits(
orbits_,
orbit_observations_,
associations,
min_obs=min_obs
)
# Remove the known orbits from the pool of orbits
# The remaining orbits are potential candidates
known_orbit_ids = recovered_known_orbits["orbit_id"].values
candidate_orbits = orbits_[~orbits_["orbit_id"].isin(known_orbit_ids)]
candidate_orbit_observations = orbit_observations_[~orbit_observations_["orbit_id"].isin(known_orbit_ids)]
# Remove any observations of the candidate discoveries that are potentially
# too close in time to eachother (removes stationary source that could bias results)
# Any orbits that now have fewer than min_obs observations are also removed
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["filtered"] == 0]
occurences = candidate_orbit_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
candidate_orbits = orbits[orbits["orbit_id"].isin(orbit_ids)]
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["orbit_id"].isin(orbit_ids)]
# Add a trkSub column to the discovery candidates
trk_subs = [f"t{i[0:4]}{i[-3:]}" for i in candidate_orbit_observations["orbit_id"].values]
candidate_orbit_observations.insert(1, "trkSub", trk_subs)
discovery_candidates = (candidate_orbits, candidate_orbit_observations)
known_orbits = (recovered_known_orbits, recovered_known_orbit_observations)
return discovery_candidates, known_orbits
| 45.285714
| 123
| 0.722145
|
import pandas as pd
from typing import Tuple
from .data_processing import UNKNOWN_ID_REGEX
from .utils import calcDeltas
__all__ = [
"filterKnownOrbits",
"filterOrbits"
]
def filterKnownOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
) -> Tuple[pd.DataFrame]:
labeled_observations = orbit_observations.merge(associations[["obs_id", "obj_id"]], on="obs_id", how="left")
labeled_observations = labeled_observations[~labeled_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True)]
occurences = labeled_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
orbits_mask = orbits["orbit_id"].isin(orbit_ids)
orbit_observations_mask = labeled_observations["orbit_id"].isin(orbit_ids)
known_orbits = orbits[orbits_mask].copy()
known_orbit_observations = labeled_observations[orbit_observations_mask].copy()
if len(known_orbit_observations) > 0:
known_orbit_observations.loc[:, "permID"] = ""
known_orbit_observations.loc[:, "provID"] = ""
else:
known_orbit_observations["permID"] = ""
known_orbit_observations["provID"] = ""
perm_ids = known_orbit_observations["obj_id"].str.isnumeric()
known_orbit_observations.loc[perm_ids, "permID"] = known_orbit_observations[perm_ids]["obj_id"].values
prov_ids = (
(~known_orbit_observations["obj_id"].str.isnumeric())
& (~known_orbit_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True))
)
known_orbit_observations.loc[prov_ids, "provID"] = known_orbit_observations[prov_ids]["obj_id"].values
cols = known_orbit_observations.columns
first = ["orbit_id", "permID", "provID", "obj_id", "obs_id"]
cols_ordered = first + cols[~cols.isin(first)].tolist()
known_orbit_observations = known_orbit_observations[cols_ordered]
return known_orbits, known_orbit_observations
def filterOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
min_time_separation: float = 30.,
delta_cols: list = ["mjd_utc", "mag", "RA_deg", "Dec_deg"]
) -> Tuple[Tuple[pd.DataFrame]]:
deltas = calcDeltas(
orbit_observations,
groupby_cols=["orbit_id", "night_id"],
delta_cols=delta_cols
)
deltas.loc[:, "filtered"] = 1
deltas.loc[(deltas["dmjd_utc"].isna()) | (deltas["dmjd_utc"] >= min_time_separation / 60 / 24), "filtered"] = 0
orbits_ = orbits.copy()
orbit_observations_ = deltas.copy()
recovered_known_orbits, recovered_known_orbit_observations = filterKnownOrbits(
orbits_,
orbit_observations_,
associations,
min_obs=min_obs
)
known_orbit_ids = recovered_known_orbits["orbit_id"].values
candidate_orbits = orbits_[~orbits_["orbit_id"].isin(known_orbit_ids)]
candidate_orbit_observations = orbit_observations_[~orbit_observations_["orbit_id"].isin(known_orbit_ids)]
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["filtered"] == 0]
occurences = candidate_orbit_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
candidate_orbits = orbits[orbits["orbit_id"].isin(orbit_ids)]
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["orbit_id"].isin(orbit_ids)]
trk_subs = [f"t{i[0:4]}{i[-3:]}" for i in candidate_orbit_observations["orbit_id"].values]
candidate_orbit_observations.insert(1, "trkSub", trk_subs)
discovery_candidates = (candidate_orbits, candidate_orbit_observations)
known_orbits = (recovered_known_orbits, recovered_known_orbit_observations)
return discovery_candidates, known_orbits
| true
| true
|
f70ba298444fd5ee9e0963aa7594d931ec799e5c
| 2,064
|
py
|
Python
|
Archi/tests/test_esbn_model.py
|
Near32/Archi
|
0005713fa4e37c7cd9b34cd257c481d08928db8a
|
[
"MIT"
] | null | null | null |
Archi/tests/test_esbn_model.py
|
Near32/Archi
|
0005713fa4e37c7cd9b34cd257c481d08928db8a
|
[
"MIT"
] | null | null | null |
Archi/tests/test_esbn_model.py
|
Near32/Archi
|
0005713fa4e37c7cd9b34cd257c481d08928db8a
|
[
"MIT"
] | null | null | null |
import Archi
import yaml
def test_model_loading():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
assert 'KeyValueMemory' in model.modules.keys()
assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'CoreLSTM' in model.modules.keys()
assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()
assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
def test_model_forward():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
import torch
inputs_dict = {
'x':torch.rand(4,3,64,64),
}
output = model(**inputs_dict)
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
output1 = model(**inputs_dict)
assert 'lstm_output' in output['modules']['CoreLSTM']
assert 'processed_input' in output['modules']['Encoder']
assert 'processed_input' in output['modules']['ToGateFCN']
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0
assert len(dict(model.named_parameters())) != 0
for np, p in model.named_parameters():
print(np)
if __name__ == '__main__':
test_model_loading()
test_model_forward()
| 32.761905
| 103
| 0.66376
|
import Archi
import yaml
def test_model_loading():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
assert 'KeyValueMemory' in model.modules.keys()
assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'CoreLSTM' in model.modules.keys()
assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()
assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
def test_model_forward():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
import torch
inputs_dict = {
'x':torch.rand(4,3,64,64),
}
output = model(**inputs_dict)
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
output1 = model(**inputs_dict)
assert 'lstm_output' in output['modules']['CoreLSTM']
assert 'processed_input' in output['modules']['Encoder']
assert 'processed_input' in output['modules']['ToGateFCN']
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0
assert len(dict(model.named_parameters())) != 0
for np, p in model.named_parameters():
print(np)
if __name__ == '__main__':
test_model_loading()
test_model_forward()
| true
| true
|
f70ba2dbbed027a097316912aef62f7e4fca727a
| 5,378
|
py
|
Python
|
airflow/operators/bash.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 2
|
2020-05-15T02:40:25.000Z
|
2020-06-08T04:30:00.000Z
|
airflow/operators/bash.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 33
|
2020-06-16T15:12:33.000Z
|
2021-07-23T07:04:35.000Z
|
airflow/operators/bash.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 2
|
2021-01-11T13:53:03.000Z
|
2021-10-02T05:06:34.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import signal
from subprocess import PIPE, STDOUT, Popen
from tempfile import TemporaryDirectory, gettempdir
from typing import Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: Output encoding of bash command
:type output_encoding: str
On execution of this operator the task will be up for retry
when exception is raised. However, if a sub-command exits with non-zero
value Airflow will not recognize it as failure unless the whole shell exits
with a failure. The easiest way of achieving this is to prefix the command
with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
@apply_defaults
def __init__(
self,
bash_command: str,
env: Optional[Dict[str, str]] = None,
output_encoding: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
self.sub_process = None
def execute(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
self.log.info('Tmp dir root location: \n %s', gettempdir())
# Prepare env for child process.
env = self.env
if env is None:
env = os.environ.copy()
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug('Exporting the following env vars:\n%s',
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
env.update(airflow_context_vars)
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info('Running command: %s', self.bash_command)
self.sub_process = Popen( # pylint: disable=subprocess-popen-preexec-fn
['bash', "-c", self.bash_command],
stdout=PIPE,
stderr=STDOUT,
cwd=tmp_dir,
env=env,
preexec_fn=pre_exec)
self.log.info('Output:')
line = ''
for raw_line in iter(self.sub_process.stdout.readline, b''):
line = raw_line.decode(self.output_encoding).rstrip()
self.log.info("%s", line)
self.sub_process.wait()
self.log.info('Command exited with return code %s', self.sub_process.returncode)
if self.sub_process.returncode != 0:
raise AirflowException('Bash command failed. The command returned a non-zero exit code.')
return line
def on_kill(self):
self.log.info('Sending SIGTERM signal to bash process group')
if self.sub_process and hasattr(self.sub_process, 'pid'):
os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
| 38.141844
| 105
| 0.649498
|
import os
import signal
from subprocess import PIPE, STDOUT, Popen
from tempfile import TemporaryDirectory, gettempdir
from typing import Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
@apply_defaults
def __init__(
self,
bash_command: str,
env: Optional[Dict[str, str]] = None,
output_encoding: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
self.sub_process = None
def execute(self, context):
self.log.info('Tmp dir root location: \n %s', gettempdir())
env = self.env
if env is None:
env = os.environ.copy()
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug('Exporting the following env vars:\n%s',
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
env.update(airflow_context_vars)
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
def pre_exec():
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info('Running command: %s', self.bash_command)
self.sub_process = Popen(
['bash', "-c", self.bash_command],
stdout=PIPE,
stderr=STDOUT,
cwd=tmp_dir,
env=env,
preexec_fn=pre_exec)
self.log.info('Output:')
line = ''
for raw_line in iter(self.sub_process.stdout.readline, b''):
line = raw_line.decode(self.output_encoding).rstrip()
self.log.info("%s", line)
self.sub_process.wait()
self.log.info('Command exited with return code %s', self.sub_process.returncode)
if self.sub_process.returncode != 0:
raise AirflowException('Bash command failed. The command returned a non-zero exit code.')
return line
def on_kill(self):
self.log.info('Sending SIGTERM signal to bash process group')
if self.sub_process and hasattr(self.sub_process, 'pid'):
os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
| true
| true
|
f70ba333ac2eec228f11aaf00c56987a66df3504
| 2,669
|
py
|
Python
|
api_tests/users/views/test_user_list.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | null | null | null |
api_tests/users/views/test_user_list.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | 13
|
2020-03-24T15:29:41.000Z
|
2022-03-11T23:15:28.000Z
|
api_tests/users/views/test_user_list.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import urlparse
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory
from api.base.settings.defaults import API_BASE
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
| 32.54878
| 86
| 0.630948
|
import urlparse
from nose.tools import *
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory
from api.base.settings.defaults import API_BASE
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
| true
| true
|
f70ba36f5449e78d850667a6792ccd3d50af645e
| 155
|
py
|
Python
|
muteria/drivers/testgeneration/testcase_formats/python_unittest/__init__.py
|
muteria/muteria
|
2cb72ff04548b011bce9296833bceb295199ae8e
|
[
"MIT"
] | 5
|
2020-05-06T03:13:01.000Z
|
2021-12-09T22:39:26.000Z
|
muteria/drivers/testgeneration/testcase_formats/python_unittest/__init__.py
|
muteria/muteria
|
2cb72ff04548b011bce9296833bceb295199ae8e
|
[
"MIT"
] | 6
|
2019-11-27T18:38:09.000Z
|
2021-12-16T20:40:50.000Z
|
muteria/drivers/testgeneration/testcase_formats/python_unittest/__init__.py
|
muteria/muteria
|
2cb72ff04548b011bce9296833bceb295199ae8e
|
[
"MIT"
] | 4
|
2019-06-24T08:54:36.000Z
|
2022-03-31T15:38:35.000Z
|
from muteria.drivers.testgeneration.testcase_formats.python_unittest.unittest\
import *
| 77.5
| 78
| 0.496774
|
from muteria.drivers.testgeneration.testcase_formats.python_unittest.unittest\
import *
| true
| true
|
f70ba37efe5a4d8ded4bea670347e317978fe155
| 4,833
|
py
|
Python
|
pixelprint.py
|
optoisolator/pixel-print
|
99ed669bdc47d50c6f9785b7232e97d9b6653467
|
[
"MIT"
] | null | null | null |
pixelprint.py
|
optoisolator/pixel-print
|
99ed669bdc47d50c6f9785b7232e97d9b6653467
|
[
"MIT"
] | null | null | null |
pixelprint.py
|
optoisolator/pixel-print
|
99ed669bdc47d50c6f9785b7232e97d9b6653467
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
--------------------------------------------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2016 William Yang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------------------------------------------------
pixelprint.py
LED matrix printer
--------------------------------------------------------------------------------------------------------------
"""
# Raspberry Pi 2 GPIO
import time
import RPi.GPIO as GPIO
# Letters and Numbers
import alphanumeric
# 5x8 LED Matrix row pins
pin_r1 = 5
pin_r2 = 6
pin_r3 = 13
pin_r4 = 19
pin_r5 = 26
# 5x8 LED Matrix col pins
pin_c1 = 21
pin_c2 = 20
pin_c3 = 16
pin_c4 = 12
# Time to wait before next letter
PAUSE_INTERVAL = 300
# Time it takes to scan col
COL_SCAN = 0.0001
# Time it taks to scan row
ROW_SCAN = 0.0008
# Number of cols
NUM_COLS = 8
"""
-------------------------------------------------------
Main LED Matrix class
-------------------------------------------------------
"""
class LEDMatrixControl:
def __init__(self):
"""
---------------------------------------------------------
constructor
---------------------------------------------------------
"""
self.row_ctrl = [pin_r1, pin_r2, pin_r3, pin_r4, pin_r5]
self.col_ctrl = [pin_c1, pin_c2, pin_c3, pin_c4]
GPIO.setmode(GPIO.BCM)
for each in self.row_ctrl:
GPIO.setup(each, GPIO.OUT)
for each in self.col_ctrl:
GPIO.setup(each, GPIO.OUT)
def _decToBinPadded(self, decimal):
"""
---------------------------------------------------------
private method to convert decimal to binary, then pad 0's
---------------------------------------------------------
"""
raw = str(bin(decimal))
part = raw[2:]
final = part.zfill(4)
a = True if final[0] == "1" else False
b = True if final[1] == "1" else False
c = True if final[2] == "1" else False
d = True if final[3] == "1" else False
return [a, b, c, d]
def matrixPrint(self, user_str):
"""
---------------------------------------------------------
main print function.
Use LEDMatrixControlObj.matrixPrint("YOUR TEXT 123.456")
---------------------------------------------------------
"""
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, True)
def matrixPrintRepeat(self, user_str):
"""
---------------------------------------------------------
main print function repeating.
Use LEDMatrixControlObj.matrixPrintRepeat("YOUR TEXT 123.456")
---------------------------------------------------------
"""
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, False)
def _printPipeline(self, chars, mode):
"""
---------------------------------------------------------
Internal printer pipeline
---------------------------------------------------------
"""
order = 0
count = 0
i = 0
repeat = True
while repeat:
current = chars[order]
for each in self.row_ctrl:
GPIO.output(each, True)
j = 0
if(count == PAUSE_INTERVAL and order < len(chars)):
count = 0
order = order + 1
if(order == len(chars)):
order = 0
if(mode):
repeat = False
count = count + 1
while(j < NUM_COLS):
answer = self._decToBinPadded(j)
for i in range(0, len(self.col_ctrl)):
GPIO.output(self.col_ctrl[i], answer[i])
for i in range(0, len(self.row_ctrl)):
if(i in current[len(current) - j - 1]):
GPIO.output(self.row_ctrl[i], False)
else:
GPIO.output(self.row_ctrl[i], True)
j += 1
time.sleep(COL_SCAN)
time.sleep(ROW_SCAN)
if(i == 4):
i = 0
else:
i += 1
| 26.124324
| 110
| 0.53652
|
import time
import RPi.GPIO as GPIO
import alphanumeric
pin_r1 = 5
pin_r2 = 6
pin_r3 = 13
pin_r4 = 19
pin_r5 = 26
pin_c1 = 21
pin_c2 = 20
pin_c3 = 16
pin_c4 = 12
PAUSE_INTERVAL = 300
COL_SCAN = 0.0001
ROW_SCAN = 0.0008
NUM_COLS = 8
class LEDMatrixControl:
def __init__(self):
self.row_ctrl = [pin_r1, pin_r2, pin_r3, pin_r4, pin_r5]
self.col_ctrl = [pin_c1, pin_c2, pin_c3, pin_c4]
GPIO.setmode(GPIO.BCM)
for each in self.row_ctrl:
GPIO.setup(each, GPIO.OUT)
for each in self.col_ctrl:
GPIO.setup(each, GPIO.OUT)
def _decToBinPadded(self, decimal):
raw = str(bin(decimal))
part = raw[2:]
final = part.zfill(4)
a = True if final[0] == "1" else False
b = True if final[1] == "1" else False
c = True if final[2] == "1" else False
d = True if final[3] == "1" else False
return [a, b, c, d]
def matrixPrint(self, user_str):
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, True)
def matrixPrintRepeat(self, user_str):
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, False)
def _printPipeline(self, chars, mode):
order = 0
count = 0
i = 0
repeat = True
while repeat:
current = chars[order]
for each in self.row_ctrl:
GPIO.output(each, True)
j = 0
if(count == PAUSE_INTERVAL and order < len(chars)):
count = 0
order = order + 1
if(order == len(chars)):
order = 0
if(mode):
repeat = False
count = count + 1
while(j < NUM_COLS):
answer = self._decToBinPadded(j)
for i in range(0, len(self.col_ctrl)):
GPIO.output(self.col_ctrl[i], answer[i])
for i in range(0, len(self.row_ctrl)):
if(i in current[len(current) - j - 1]):
GPIO.output(self.row_ctrl[i], False)
else:
GPIO.output(self.row_ctrl[i], True)
j += 1
time.sleep(COL_SCAN)
time.sleep(ROW_SCAN)
if(i == 4):
i = 0
else:
i += 1
| true
| true
|
f70ba4152c3dda319f435fa092eb83c1d673060f
| 9,175
|
py
|
Python
|
tests/utils/test_calculate_accuracies.py
|
RubensZimbres/pytorch-metric-learning
|
3ff3b9ae6065fdf470f7c19ea8c11f9850d697ea
|
[
"MIT"
] | 1
|
2020-12-22T01:11:46.000Z
|
2020-12-22T01:11:46.000Z
|
tests/utils/test_calculate_accuracies.py
|
marijnl/pytorch-metric-learning
|
41e06ef5af398c05d238e0a74ee6c42fa7bd574c
|
[
"MIT"
] | null | null | null |
tests/utils/test_calculate_accuracies.py
|
marijnl/pytorch-metric-learning
|
41e06ef5af398c05d238e0a74ee6c42fa7bd574c
|
[
"MIT"
] | null | null | null |
import unittest
from pytorch_metric_learning.utils import accuracy_calculator
import numpy as np
class TestCalculateAccuracies(unittest.TestCase):
def test_accuracy_calculator(self):
query_labels = np.array([1, 1, 2, 3, 4])
knn_labels1 = np.array(
[
[0, 1, 1, 2, 2],
[1, 0, 1, 1, 3],
[4, 4, 4, 4, 2],
[3, 1, 3, 1, 3],
[0, 0, 4, 2, 2],
]
)
label_counts1 = {1: 3, 2: 5, 3: 4, 4: 5}
knn_labels2 = knn_labels1 + 5
label_counts2 = {k + 5: v for k, v in label_counts1.items()}
for avg_of_avgs in [False, True]:
for i, (knn_labels, label_counts) in enumerate(
[(knn_labels1, label_counts1), (knn_labels2, label_counts2)]
):
AC = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=avg_of_avgs
)
kwargs = {
"query_labels": query_labels,
"label_counts": label_counts,
"knn_labels": knn_labels,
"not_lone_query_mask": np.ones(5).astype(np.bool)
if i == 0
else np.zeros(5).astype(np.bool),
}
function_dict = AC.get_function_dict()
for ecfss in [False, True]:
if ecfss:
kwargs["knn_labels"] = kwargs["knn_labels"][:, 1:]
kwargs["embeddings_come_from_same_source"] = ecfss
acc = AC._get_accuracy(function_dict, **kwargs)
if i == 1:
self.assertTrue(acc["precision_at_1"] == 0)
self.assertTrue(acc["r_precision"] == 0)
self.assertTrue(acc["mean_average_precision_at_r"] == 0)
self.assertTrue(acc["mean_average_precision"] == 0)
else:
self.assertTrue(
acc["precision_at_1"]
== self.correct_precision_at_1(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["r_precision"]
== self.correct_r_precision(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["mean_average_precision_at_r"]
== self.correct_mean_average_precision_at_r(
ecfss, avg_of_avgs
)
)
self.assertTrue(
acc["mean_average_precision"]
== self.correct_mean_average_precision(ecfss, avg_of_avgs)
)
def correct_precision_at_1(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
if not avg_of_avgs:
return 0.4
else:
return (0.5 + 0 + 1 + 0) / 4
else:
if not avg_of_avgs:
return 1.0 / 5
else:
return (0.5 + 0 + 0 + 0) / 4
def correct_r_precision(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
acc0 = 2.0 / 3
acc1 = 2.0 / 3
acc2 = 1.0 / 5
acc3 = 2.0 / 4
acc4 = 1.0 / 5
else:
acc0 = 1.0 / 1
acc1 = 1.0 / 2
acc2 = 1.0 / 4
acc3 = 1.0 / 3
acc4 = 1.0 / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision_at_r(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 3
acc1 = (1 + 2.0 / 3) / 3
acc2 = (1.0 / 5) / 5
acc3 = (1 + 2.0 / 3) / 4
acc4 = (1.0 / 3) / 5
else:
acc0 = 1
acc1 = (1.0 / 2) / 2
acc2 = (1.0 / 4) / 4
acc3 = (1.0 / 2) / 3
acc4 = (1.0 / 2) / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 2
acc1 = (1 + 2.0 / 3 + 3.0 / 4) / 3
acc2 = (1.0 / 5) / 1
acc3 = (1 + 2.0 / 3 + 3.0 / 5) / 3
acc4 = (1.0 / 3) / 1
else:
acc0 = 1
acc1 = (1.0 / 2 + 2.0 / 3) / 2
acc2 = 1.0 / 4
acc3 = (1.0 / 2 + 2.0 / 4) / 2
acc4 = 1.0 / 2
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def test_get_label_counts(self):
label_counts, num_k = accuracy_calculator.get_label_counts(
[0, 1, 3, 2, 3, 1, 3, 3, 4, 6, 5, 10, 4, 4, 4, 4, 6, 6, 5]
)
self.assertTrue(
label_counts == {0: 1, 1: 2, 2: 1, 3: 4, 4: 5, 5: 2, 6: 3, 10: 1}
)
self.assertTrue(num_k == 5)
def test_get_lone_query_labels(self):
query_labels = np.array([0, 1, 2, 3, 4, 5, 6])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 3, 4, 5, 6])
reference_label_counts, _ = accuracy_calculator.get_label_counts(
reference_labels
)
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, True
)
self.assertTrue(
np.all(np.unique(lone_query_labels) == np.array([1, 3, 4, 5, 6]))
)
query_labels = np.array([0, 1, 2, 3, 4])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 4, 5, 6])
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, False
)
self.assertTrue(np.all(np.unique(lone_query_labels) == np.array([3])))
class TestCalculateAccuraciesAndFaiss(unittest.TestCase):
def test_accuracy_calculator_and_faiss(self):
AC = accuracy_calculator.AccuracyCalculator(exclude=("NMI", "AMI"))
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query_labels = np.arange(10).astype(np.int)
reference_labels = np.arange(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, False)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 1)
self.assertTrue(acc["mean_average_precision_at_r"] == 1)
reference = (np.arange(20) / 2.0)[:, None].astype(np.float32)
reference_labels = np.zeros(20).astype(np.int)
reference_labels[::2] = query_labels
reference_labels[1::2] = np.ones(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, True)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(
acc["mean_average_precision_at_r"]
== (1 + 2.0 / 2 + 3.0 / 5 + 4.0 / 7 + 5.0 / 9) / 10
)
def test_accuracy_calculator_and_faiss_avg_of_avgs(self):
AC_global_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=False
)
AC_per_class_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=True
)
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query[-1] = 100
reference[0] = -100
query_labels = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
reference_labels = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
acc = AC_global_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.9)
self.assertTrue(acc["r_precision"] == 0.9)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.9)
acc = AC_per_class_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.5)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.5)
| 40.597345
| 87
| 0.500817
|
import unittest
from pytorch_metric_learning.utils import accuracy_calculator
import numpy as np
class TestCalculateAccuracies(unittest.TestCase):
def test_accuracy_calculator(self):
query_labels = np.array([1, 1, 2, 3, 4])
knn_labels1 = np.array(
[
[0, 1, 1, 2, 2],
[1, 0, 1, 1, 3],
[4, 4, 4, 4, 2],
[3, 1, 3, 1, 3],
[0, 0, 4, 2, 2],
]
)
label_counts1 = {1: 3, 2: 5, 3: 4, 4: 5}
knn_labels2 = knn_labels1 + 5
label_counts2 = {k + 5: v for k, v in label_counts1.items()}
for avg_of_avgs in [False, True]:
for i, (knn_labels, label_counts) in enumerate(
[(knn_labels1, label_counts1), (knn_labels2, label_counts2)]
):
AC = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=avg_of_avgs
)
kwargs = {
"query_labels": query_labels,
"label_counts": label_counts,
"knn_labels": knn_labels,
"not_lone_query_mask": np.ones(5).astype(np.bool)
if i == 0
else np.zeros(5).astype(np.bool),
}
function_dict = AC.get_function_dict()
for ecfss in [False, True]:
if ecfss:
kwargs["knn_labels"] = kwargs["knn_labels"][:, 1:]
kwargs["embeddings_come_from_same_source"] = ecfss
acc = AC._get_accuracy(function_dict, **kwargs)
if i == 1:
self.assertTrue(acc["precision_at_1"] == 0)
self.assertTrue(acc["r_precision"] == 0)
self.assertTrue(acc["mean_average_precision_at_r"] == 0)
self.assertTrue(acc["mean_average_precision"] == 0)
else:
self.assertTrue(
acc["precision_at_1"]
== self.correct_precision_at_1(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["r_precision"]
== self.correct_r_precision(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["mean_average_precision_at_r"]
== self.correct_mean_average_precision_at_r(
ecfss, avg_of_avgs
)
)
self.assertTrue(
acc["mean_average_precision"]
== self.correct_mean_average_precision(ecfss, avg_of_avgs)
)
def correct_precision_at_1(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
if not avg_of_avgs:
return 0.4
else:
return (0.5 + 0 + 1 + 0) / 4
else:
if not avg_of_avgs:
return 1.0 / 5
else:
return (0.5 + 0 + 0 + 0) / 4
def correct_r_precision(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
acc0 = 2.0 / 3
acc1 = 2.0 / 3
acc2 = 1.0 / 5
acc3 = 2.0 / 4
acc4 = 1.0 / 5
else:
acc0 = 1.0 / 1
acc1 = 1.0 / 2
acc2 = 1.0 / 4
acc3 = 1.0 / 3
acc4 = 1.0 / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision_at_r(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 3
acc1 = (1 + 2.0 / 3) / 3
acc2 = (1.0 / 5) / 5
acc3 = (1 + 2.0 / 3) / 4
acc4 = (1.0 / 3) / 5
else:
acc0 = 1
acc1 = (1.0 / 2) / 2
acc2 = (1.0 / 4) / 4
acc3 = (1.0 / 2) / 3
acc4 = (1.0 / 2) / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 2
acc1 = (1 + 2.0 / 3 + 3.0 / 4) / 3
acc2 = (1.0 / 5) / 1
acc3 = (1 + 2.0 / 3 + 3.0 / 5) / 3
acc4 = (1.0 / 3) / 1
else:
acc0 = 1
acc1 = (1.0 / 2 + 2.0 / 3) / 2
acc2 = 1.0 / 4
acc3 = (1.0 / 2 + 2.0 / 4) / 2
acc4 = 1.0 / 2
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def test_get_label_counts(self):
label_counts, num_k = accuracy_calculator.get_label_counts(
[0, 1, 3, 2, 3, 1, 3, 3, 4, 6, 5, 10, 4, 4, 4, 4, 6, 6, 5]
)
self.assertTrue(
label_counts == {0: 1, 1: 2, 2: 1, 3: 4, 4: 5, 5: 2, 6: 3, 10: 1}
)
self.assertTrue(num_k == 5)
def test_get_lone_query_labels(self):
query_labels = np.array([0, 1, 2, 3, 4, 5, 6])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 3, 4, 5, 6])
reference_label_counts, _ = accuracy_calculator.get_label_counts(
reference_labels
)
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, True
)
self.assertTrue(
np.all(np.unique(lone_query_labels) == np.array([1, 3, 4, 5, 6]))
)
query_labels = np.array([0, 1, 2, 3, 4])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 4, 5, 6])
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, False
)
self.assertTrue(np.all(np.unique(lone_query_labels) == np.array([3])))
class TestCalculateAccuraciesAndFaiss(unittest.TestCase):
def test_accuracy_calculator_and_faiss(self):
AC = accuracy_calculator.AccuracyCalculator(exclude=("NMI", "AMI"))
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query_labels = np.arange(10).astype(np.int)
reference_labels = np.arange(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, False)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 1)
self.assertTrue(acc["mean_average_precision_at_r"] == 1)
reference = (np.arange(20) / 2.0)[:, None].astype(np.float32)
reference_labels = np.zeros(20).astype(np.int)
reference_labels[::2] = query_labels
reference_labels[1::2] = np.ones(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, True)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(
acc["mean_average_precision_at_r"]
== (1 + 2.0 / 2 + 3.0 / 5 + 4.0 / 7 + 5.0 / 9) / 10
)
def test_accuracy_calculator_and_faiss_avg_of_avgs(self):
AC_global_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=False
)
AC_per_class_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=True
)
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query[-1] = 100
reference[0] = -100
query_labels = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
reference_labels = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
acc = AC_global_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.9)
self.assertTrue(acc["r_precision"] == 0.9)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.9)
acc = AC_per_class_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.5)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.5)
| true
| true
|
f70ba52887f184ad281e8caf21f7d1bf136269b9
| 51,226
|
py
|
Python
|
tensorflow/python/eager/backprop.py
|
piquark6046/tensorflow
|
57771c5d008f6d16fd147110213855d145a7e0bc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/backprop.py
|
piquark6046/tensorflow
|
57771c5d008f6d16fd147110213855d145a7e0bc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/backprop.py
|
piquark6046/tensorflow
|
57771c5d008f6d16fd147110213855d145a7e0bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
# TODO(b/159343581): Properly support CompositeTensor in all functions in this
# file.
import functools
import operator
import sys
import six
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor_gradient
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Note that we need to lazy load the following two modules to avoid creating
# circular dependencies.
# TODO(b/119775953): fix the circular dependencies.
pfor_ops = LazyLoader(
"pfor_ops", globals(),
"tensorflow.python.ops.parallel_for.control_flow_ops")
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
context.ensure_initialized()
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
# pybind11 enums do not return the raw value like SWIG enums do. They are
# useful when comparing amongst each other but not direct integers as we are
# doing in most tests.
# https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types
# TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons
# from integer value to class.
if attr_type == int(pywrap_tfe.TF_ATTR_TYPE):
return dtypes.as_dtype(value)
if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:
return [dtypes.as_dtype(v) for v in value]
if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):
return tensor_shape.as_shape(value).as_proto()
if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
if isinstance(value, str):
return value.encode()
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
self.skip_input_indices = skip_input_indices
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads, skip_input_indices, forward_pass_name_scope):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
skip_input_indices: a tuple that is passed to the gradient function,
indicating which inputs to skip calculating the gradient for
forward_pass_name_scope: the namescope of the op in the forward pass.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
# This does not work with v1 TensorArrays.
if ops.executing_eagerly_outside_functions(
) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
gradient_name_scope = "gradient_tape/"
if forward_pass_name_scope:
gradient_name_scope += forward_pass_name_scope + "/"
with ops.name_scope(gradient_name_scope):
return grad_fn(mock_op, *out_grads)
else:
return grad_fn(mock_op, *out_grads)
pywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)
def _must_record_gradient():
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
@tf_export("__internal__.record_gradient", v1=[])
def record_gradient(op_name, inputs, attrs, outputs):
"""Explicitly record the gradient for a given op.
Args:
op_name: The op name as listed in the `OpDef` for the op.
inputs: A list of tensor inputs to the op.
attrs: The op attributes as a flattened list of alternating attribute names
and attribute values.
outputs: A list of tensor outputs from the op.
"""
pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,
ops.get_name_scope())
execute.must_record_gradient = _must_record_gradient
execute.record_gradient = record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
for s in sources:
if getattr(s, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getfullargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
if args[0] == "self":
return range(len(args) - 1)
else:
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of `f` with respect to all of `params`. The function takes an extra optional
keyword argument `dy`. Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and its vjp w.r.t.
params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function returns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(arg) if i in parameter_positions else arg
for i, arg in enumerate(args)
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
if getattr(args[i], "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors"
"yet.")
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
return decorated
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, indexed_slices.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, indexed_slices.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return indexed_slices.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
if len(grads) == 1:
return grads[0]
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = indexed_slices.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all(isinstance(g, ops.Tensor) for g in gradients):
return gen_math_ops.add_n(gradients)
else:
assert all(
isinstance(g, (ops.Tensor, indexed_slices.IndexedSlices))
for g in gradients)
return aggregate_indexed_slices_gradients(gradients)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
shape_tuple = grad._shape_tuple() # pylint: disable=protected-access
elif isinstance(grad, indexed_slices.IndexedSlices):
shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access
else:
raise ValueError("`grad` not a Tensor or IndexedSlices.")
if shape_tuple is None or None in shape_tuple:
return 0
return functools.reduce(operator.mul, shape_tuple, 1)
def _fast_fill(value, shape, dtype):
return array_ops.fill(
constant_op.constant(shape, dtype=dtypes.int32),
constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Helper to return (possibly cached) zero tensors in eager mode."""
# Note: variants will use _zeros_like
if dtype == dtypes.string or dtype == dtypes.resource:
return None
ctx = context.context()
if not ctx.executing_eagerly():
return array_ops.zeros(shape, dtype)
device = ctx.device_name
if tensor_util.is_tf_type(shape):
shape_key = shape.ref()
else:
shape_key = shape
cache_key = shape_key, dtype, device
cached = ctx.zeros_cache().get(cache_key)
if cached is None:
if dtypes.as_dtype(dtype).is_bool:
value = False
else:
value = 0
cached = _fast_fill(value, shape, dtype)
ctx.zeros_cache().put(cache_key, cached)
return cached
def _ones(shape, dtype):
as_dtype = dtypes.as_dtype(dtype)
if as_dtype == dtypes.string:
return None
if not context.executing_eagerly():
return array_ops.ones(shape, dtype)
if as_dtype.is_bool:
value = True
else:
value = 1
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(value, dtype=dtype)
return _fast_fill(value, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
zeros_fn=_zeros,
ones_fn=_ones,
zeros_like_fn=default_gradient.zeros_like,
ones_like_fn=default_gradient.ones_like,
graph_shape_fn=gen_array_ops.shape)
pywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)
def _handle_or_self(x):
"""Unwrap resource variable/ndarray to return tensors."""
if resource_variable_ops.is_resource_variable(x):
return x.handle
return x
@tf_export("GradientTape", "autodiff.GradientTape", v1=["GradientTape"])
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,
where `trainable=True` is default in both cases) are automatically watched.
Tensors can be manually watched by invoking the `watch` method on this context
manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
>>> x = tf.constant(3.0)
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = x * x
>>> dy_dx = g.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(6.0, shape=(), dtype=float32)
GradientTapes can be nested to compute higher-order derivatives. For example,
>>> x = tf.constant(5.0)
>>> with tf.GradientTape() as g:
... g.watch(x)
... with tf.GradientTape() as gg:
... gg.watch(x)
... y = x * x
... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x
>>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2
>>> print(dy_dx)
tf.Tensor(10.0, shape=(), dtype=float32)
>>> print(d2y_dx2)
tf.Tensor(2.0, shape=(), dtype=float32)
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
>>> x = tf.constant(3.0)
>>> with tf.GradientTape(persistent=True) as g:
... g.watch(x)
... y = x * x
... z = y * y
>>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)
>>> print(dz_dx)
tf.Tensor(108.0, shape=(), dtype=float32)
>>> dy_dx = g.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(6.0, shape=(), dtype=float32)
By default GradientTape will automatically watch any trainable variables that
are accessed inside the context. If you want fine grained control over which
variables are watched you can disable automatic tracking by passing
`watch_accessed_variables=False` to the tape constructor:
>>> x = tf.Variable(2.0)
>>> w = tf.Variable(5.0)
>>> with tf.GradientTape(
... watch_accessed_variables=False, persistent=True) as tape:
... tape.watch(x)
... y = x ** 2 # Gradients will be available for `x`.
... z = w ** 3 # No gradients will be available as `w` isn't being watched.
>>> dy_dx = tape.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(4.0, shape=(), dtype=float32)
>>> # No gradients will be available as `w` isn't being watched.
>>> dz_dw = tape.gradient(z, w)
>>> print(dz_dw)
None
Note that when using models you should ensure that your variables exist when
using `watch_accessed_variables=False`. Otherwise it's quite easy to make your
first iteration not have any gradients:
```python
a = tf.keras.layers.Dense(32)
b = tf.keras.layers.Dense(32)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(a.variables) # Since `a.build` has not been called at this point
# `a.variables` will return an empty list and the
# tape will not be watching anything.
result = b(a(inputs))
tape.gradient(result, a.variables) # The result of this computation will be
# a list of `None`s since a's variables
# are not being watched.
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False, watch_accessed_variables=True):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
watch_accessed_variables: Boolean controlling whether the tape will
automatically `watch` any (trainable) variables accessed while the tape
is active. Defaults to True meaning gradients can be requested from any
result computed in the tape derived from reading a trainable `Variable`.
If False users must explicitly `watch` any `Variable`s they want to
request gradients from.
"""
self._tape = None
self._persistent = persistent
self._watch_accessed_variables = watch_accessed_variables
self._watched_variables = ()
self._recording = False
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self):
"""Pushes a new tape onto the tape stack."""
if self._recording:
raise ValueError("Tape is still recording, This can happen if you try to "
"re-enter an already-active tape.")
if self._tape is None:
self._tape = tape.push_new_tape(
persistent=self._persistent,
watch_accessed_variables=self._watch_accessed_variables)
else:
tape.push_tape(self._tape)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
@tf_contextlib.contextmanager
def _ensure_recording(self):
"""Ensures that this tape is recording."""
if not self._recording:
try:
self._push_tape()
yield
finally:
self._pop_tape()
else:
yield
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
Raises:
ValueError: if it encounters something that is not a tensor.
"""
for t in nest.flatten(tensor, expand_composites=True):
if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):
raise ValueError("Passed in object of type {}, not tf.Tensor".format(
type(t)))
if not backprop_util.IsTrainable(t):
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
>>> x = tf.constant(4.0)
>>> with tf.GradientTape() as tape:
... with tape.stop_recording():
... y = x ** 2
>>> dy_dx = tape.gradient(y, x)
>>> print(dy_dx)
None
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape()
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._tape = None
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
if self._tape is not None:
self._watched_variables = self._tape.watched_variables()
return self._watched_variables
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
In addition to Tensors, gradient also supports RaggedTensors. For example,
>>> x = tf.ragged.constant([[1.0, 2.0], [3.0]])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = x * x
>>> g.gradient(y, x)
<tf.RaggedTensor [[2.0, 4.0], [6.0]]>
Args:
target: a list or nested structure of Tensors or Variables or
CompositeTensors to be differentiated.
sources: a list or nested structure of Tensors or Variables or
CompositeTensors. `target` will be differentiated against elements in
`sources`.
output_gradients: a list of gradients, one for each differentiable
element of target. Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None, or
CompositeTensor), one for each element in `sources`. Returned structure
is the same as the structure of `sources`.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called inside the context of the tape.
TypeError: If the target is a None object.
ValueError: If the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(
logging.WARN, "Calling GradientTape.gradient on a persistent "
"tape inside its context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derivatives.", 1)
if target is None:
raise TypeError("Argument `target` should be a list or nested structure"
" of Tensors, Variables or CompositeTensors to be "
"differentiated, but received None.")
flat_targets = []
for t in nest.flatten(target):
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the target tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if resource_variable_ops.is_resource_variable(t):
with self:
t = ops.convert_to_tensor(t)
flat_targets.append(t)
flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_targets)
flat_sources = nest.flatten(sources)
for t in flat_sources:
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the source tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if getattr(t, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
flat_sources_raw = flat_sources
flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = nest.flatten(output_gradients)
output_gradients = (
composite_tensor_gradient.get_flat_tensors_for_gradients(
output_gradients))
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in output_gradients]
flat_grad = imperative_grad.imperative_grad(
self._tape,
flat_targets,
flat_sources,
output_gradients=output_gradients,
sources_raw=flat_sources_raw,
unconnected_gradients=unconnected_gradients)
if not self._persistent:
# Keep track of watched variables before setting tape to None
self._watched_variables = self._tape.watched_variables()
self._tape = None
flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients(
flat_sources_raw, flat_grad)
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
def jacobian(self,
target,
sources,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes the jacobian using operations recorded in context of this tape.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
Note: By default the jacobian implementation uses parallel for (pfor), which
creates a tf.function under the hood for each jacobian call. For better
performance, and to avoid recompilation and vectorization rewrites on each
call, enclose GradientTape code in @tf.function.
See[wikipedia
article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)
for the definition of a Jacobian.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([1.0, 2.0])
g.watch(x)
y = x * x
jacobian = g.jacobian(y, x)
# jacobian value is [[2., 0.], [0., 4.]]
```
Args:
target: Tensor to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, vectorizes the jacobian computation. Else
falls back to a sequential while_loop. Vectorization can sometimes fail
or lead to excessive memory usage. This option can be used to disable
vectorization in such cases.
Returns:
A list or nested structure of Tensors (or None), one for each element in
`sources`. Returned structure is the same as the structure of `sources`.
Note if any gradient is sparse (IndexedSlices), jacobian function
currently makes it dense and returns a Tensor instead. This may change in
the future.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
flat_sources = nest.flatten(sources)
target_static_shape = target.shape
target_shape = array_ops.shape(target)
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
target = array_ops.reshape(target, [-1])
def loop_fn(i):
with self._ensure_recording():
y = array_ops.gather(target, i)
return self.gradient(y, flat_sources,
unconnected_gradients=unconnected_gradients)
try:
target_size = int(target.shape[0])
except TypeError:
target_size = array_ops.shape(target)[0]
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"jacobian computation. Vectorization can be disabled by setting"
" experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the jacobian with eager execution enabled and with "
" experimental_use_pfor set to False.")
output = pfor_ops.for_loop(
loop_fn, [target.dtype] * len(flat_sources), target_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(output):
if out is not None:
new_shape = array_ops.concat(
[target_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
if context.executing_eagerly():
out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))
output[i] = out
return nest.pack_sequence_as(sources, output)
def batch_jacobian(self,
target,
source,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes and stacks per-example jacobians.
See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)
for the definition of a Jacobian. This function is essentially an efficient
implementation of the following:
`tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.
Note that compared to `GradientTape.jacobian` which computes gradient of
each output value w.r.t each input value, this function is useful when
`target[i,...]` is independent of `source[j,...]` for `j != i`. This
assumption allows more efficient computation as compared to
`GradientTape.jacobian`. The output, as well as intermediate activations,
are lower dimensional and avoid a bunch of redundant zeros which would
result in the jacobian computation given the independence assumption.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
Note: By default the batch_jacobian implementation uses parallel for (pfor),
which creates a tf.function under the hood for each batch_jacobian call.
For better performance, and to avoid recompilation and vectorization
rewrites on each call, enclose GradientTape code in @tf.function.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
g.watch(x)
y = x * x
batch_jacobian = g.batch_jacobian(y, x)
# batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
```
Args:
target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].
`target[i,...]` should only depend on `source[i,...]`.
source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else
uses a tf.while_loop.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails or if first
dimension of `target` and `source` do not match.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to"
"compute one set of gradients (or jacobians)")
target_shape = target.shape
if target_shape.rank is None:
dim = tensor_shape.Dimension(None)
else:
dim = target_shape.dims[0]
if not (target_shape.with_rank_at_least(2) and
source.shape.with_rank_at_least(2) and
dim.is_compatible_with(source.shape[0])):
raise ValueError(
"Need first dimension of target shape (%s) and "
"source shape (%s) to match." % (target.shape, source.shape))
if target_shape.is_fully_defined():
batch_size = int(target_shape[0])
target_row_size = target_shape.num_elements() // batch_size
else:
target_shape = array_ops.shape(target)
batch_size = target_shape[0]
target_row_size = array_ops.size(target) // batch_size
source_shape = array_ops.shape(source)
# Flatten target to 2-D.
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, source_shape[0])]):
target = array_ops.reshape(target, [batch_size, target_row_size])
run_once = False
def loop_fn(i):
nonlocal run_once
if run_once and not self._persistent:
if parallel_iterations is not None:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with parallel_iterations.")
else:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian.")
run_once = True
with self._ensure_recording():
y = array_ops.gather(target, i, axis=1)
return self.gradient(y, source,
unconnected_gradients=unconnected_gradients)
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_row_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"batch_jacobian computation. Vectorization can be disabled by "
"setting experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with eager execution enabled and "
" with experimental_use_pfor set to False.")
output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,
parallel_iterations=parallel_iterations)
new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
if output is None:
# Note that this block is returning zeros when it could use `None` to
# represent unconnected gradients. This is to maintain compatibility with
# the previous behavior, which ignored `unconnected_gradients`.
output = array_ops.zeros(new_shape, target.dtype)
return output
else:
output = array_ops.reshape(output,
[target_row_size, batch_size, -1])
output = array_ops.transpose(output, [1, 0, 2])
output = array_ops.reshape(output, new_shape)
return output
| 36.986282
| 90
| 0.679011
|
import functools
import operator
import sys
import six
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor_gradient
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
pfor_ops = LazyLoader(
"pfor_ops", globals(),
"tensorflow.python.ops.parallel_for.control_flow_ops")
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
context.ensure_initialized()
h = context.context()._handle
attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
rap_tfe.TF_ATTR_TYPE):
return dtypes.as_dtype(value)
if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:
return [dtypes.as_dtype(v) for v in value]
if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):
return tensor_shape.as_shape(value).as_proto()
if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
if isinstance(value, str):
return value.encode()
return value
class _MockOp(object):
def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
self.skip_input_indices = skip_input_indices
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads, skip_input_indices, forward_pass_name_scope):
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name)
if grad_fn is None:
return [None] * num_inputs
if ops.executing_eagerly_outside_functions(
) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
gradient_name_scope = "gradient_tape/"
if forward_pass_name_scope:
gradient_name_scope += forward_pass_name_scope + "/"
with ops.name_scope(gradient_name_scope):
return grad_fn(mock_op, *out_grads)
else:
return grad_fn(mock_op, *out_grads)
pywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)
def _must_record_gradient():
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
@tf_export("__internal__.record_gradient", v1=[])
def record_gradient(op_name, inputs, attrs, outputs):
pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,
ops.get_name_scope())
execute.must_record_gradient = _must_record_gradient
execute.record_gradient = record_gradient
def implicit_val_and_grad(f):
def grad_fn(*args, **kwds):
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
for s in sources:
if getattr(s, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
def grad_fn(*args, **kwds):
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
try:
args = tf_inspect.getfullargspec(f).args
except TypeError as e:
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
if args[0] == "self":
return range(len(args) - 1)
else:
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
def decorated(*args, **kwds):
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
def decorated(*args, **kwds):
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
def decorated(*args, **kwds):
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(arg) if i in parameter_positions else arg
for i, arg in enumerate(args)
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
if getattr(args[i], "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors"
"yet.")
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
return decorated
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, indexed_slices.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, indexed_slices.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return indexed_slices.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
if len(grads) < 1:
return None
if len(grads) == 1:
return grads[0]
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = indexed_slices.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def _aggregate_grads(gradients):
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all(isinstance(g, ops.Tensor) for g in gradients):
return gen_math_ops.add_n(gradients)
else:
assert all(
isinstance(g, (ops.Tensor, indexed_slices.IndexedSlices))
for g in gradients)
return aggregate_indexed_slices_gradients(gradients)
def _num_elements(grad):
if isinstance(grad, ops.Tensor):
shape_tuple = grad._shape_tuple() # pylint: disable=protected-access
elif isinstance(grad, indexed_slices.IndexedSlices):
shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access
else:
raise ValueError("`grad` not a Tensor or IndexedSlices.")
if shape_tuple is None or None in shape_tuple:
return 0
return functools.reduce(operator.mul, shape_tuple, 1)
def _fast_fill(value, shape, dtype):
return array_ops.fill(
constant_op.constant(shape, dtype=dtypes.int32),
constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
# Note: variants will use _zeros_like
if dtype == dtypes.string or dtype == dtypes.resource:
return None
ctx = context.context()
if not ctx.executing_eagerly():
return array_ops.zeros(shape, dtype)
device = ctx.device_name
if tensor_util.is_tf_type(shape):
shape_key = shape.ref()
else:
shape_key = shape
cache_key = shape_key, dtype, device
cached = ctx.zeros_cache().get(cache_key)
if cached is None:
if dtypes.as_dtype(dtype).is_bool:
value = False
else:
value = 0
cached = _fast_fill(value, shape, dtype)
ctx.zeros_cache().put(cache_key, cached)
return cached
def _ones(shape, dtype):
as_dtype = dtypes.as_dtype(dtype)
if as_dtype == dtypes.string:
return None
if not context.executing_eagerly():
return array_ops.ones(shape, dtype)
if as_dtype.is_bool:
value = True
else:
value = 1
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(value, dtype=dtype)
return _fast_fill(value, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
zeros_fn=_zeros,
ones_fn=_ones,
zeros_like_fn=default_gradient.zeros_like,
ones_like_fn=default_gradient.ones_like,
graph_shape_fn=gen_array_ops.shape)
pywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)
def _handle_or_self(x):
if resource_variable_ops.is_resource_variable(x):
return x.handle
return x
@tf_export("GradientTape", "autodiff.GradientTape", v1=["GradientTape"])
class GradientTape(object):
def __init__(self, persistent=False, watch_accessed_variables=True):
self._tape = None
self._persistent = persistent
self._watch_accessed_variables = watch_accessed_variables
self._watched_variables = ()
self._recording = False
def __enter__(self):
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
if self._recording:
self._pop_tape()
def _push_tape(self):
if self._recording:
raise ValueError("Tape is still recording, This can happen if you try to "
"re-enter an already-active tape.")
if self._tape is None:
self._tape = tape.push_new_tape(
persistent=self._persistent,
watch_accessed_variables=self._watch_accessed_variables)
else:
tape.push_tape(self._tape)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
@tf_contextlib.contextmanager
def _ensure_recording(self):
if not self._recording:
try:
self._push_tape()
yield
finally:
self._pop_tape()
else:
yield
def watch(self, tensor):
for t in nest.flatten(tensor, expand_composites=True):
if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):
raise ValueError("Passed in object of type {}, not tf.Tensor".format(
type(t)))
if not backprop_util.IsTrainable(t):
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
@tf_contextlib.contextmanager
def stop_recording(self):
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape()
def reset(self):
self._pop_tape()
self._tape = None
self._push_tape()
def watched_variables(self):
if self._tape is not None:
self._watched_variables = self._tape.watched_variables()
return self._watched_variables
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(
logging.WARN, "Calling GradientTape.gradient on a persistent "
"tape inside its context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derivatives.", 1)
if target is None:
raise TypeError("Argument `target` should be a list or nested structure"
" of Tensors, Variables or CompositeTensors to be "
"differentiated, but received None.")
flat_targets = []
for t in nest.flatten(target):
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the target tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if resource_variable_ops.is_resource_variable(t):
with self:
t = ops.convert_to_tensor(t)
flat_targets.append(t)
flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_targets)
flat_sources = nest.flatten(sources)
for t in flat_sources:
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the source tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if getattr(t, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
flat_sources_raw = flat_sources
flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = nest.flatten(output_gradients)
output_gradients = (
composite_tensor_gradient.get_flat_tensors_for_gradients(
output_gradients))
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in output_gradients]
flat_grad = imperative_grad.imperative_grad(
self._tape,
flat_targets,
flat_sources,
output_gradients=output_gradients,
sources_raw=flat_sources_raw,
unconnected_gradients=unconnected_gradients)
if not self._persistent:
# Keep track of watched variables before setting tape to None
self._watched_variables = self._tape.watched_variables()
self._tape = None
flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients(
flat_sources_raw, flat_grad)
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
def jacobian(self,
target,
sources,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
flat_sources = nest.flatten(sources)
target_static_shape = target.shape
target_shape = array_ops.shape(target)
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
target = array_ops.reshape(target, [-1])
def loop_fn(i):
with self._ensure_recording():
y = array_ops.gather(target, i)
return self.gradient(y, flat_sources,
unconnected_gradients=unconnected_gradients)
try:
target_size = int(target.shape[0])
except TypeError:
target_size = array_ops.shape(target)[0]
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"jacobian computation. Vectorization can be disabled by setting"
" experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the jacobian with eager execution enabled and with "
" experimental_use_pfor set to False.")
output = pfor_ops.for_loop(
loop_fn, [target.dtype] * len(flat_sources), target_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(output):
if out is not None:
new_shape = array_ops.concat(
[target_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
if context.executing_eagerly():
out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))
output[i] = out
return nest.pack_sequence_as(sources, output)
def batch_jacobian(self,
target,
source,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to"
"compute one set of gradients (or jacobians)")
target_shape = target.shape
if target_shape.rank is None:
dim = tensor_shape.Dimension(None)
else:
dim = target_shape.dims[0]
if not (target_shape.with_rank_at_least(2) and
source.shape.with_rank_at_least(2) and
dim.is_compatible_with(source.shape[0])):
raise ValueError(
"Need first dimension of target shape (%s) and "
"source shape (%s) to match." % (target.shape, source.shape))
if target_shape.is_fully_defined():
batch_size = int(target_shape[0])
target_row_size = target_shape.num_elements() // batch_size
else:
target_shape = array_ops.shape(target)
batch_size = target_shape[0]
target_row_size = array_ops.size(target) // batch_size
source_shape = array_ops.shape(source)
# Flatten target to 2-D.
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, source_shape[0])]):
target = array_ops.reshape(target, [batch_size, target_row_size])
run_once = False
def loop_fn(i):
nonlocal run_once
if run_once and not self._persistent:
if parallel_iterations is not None:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with parallel_iterations.")
else:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian.")
run_once = True
with self._ensure_recording():
y = array_ops.gather(target, i, axis=1)
return self.gradient(y, source,
unconnected_gradients=unconnected_gradients)
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_row_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"batch_jacobian computation. Vectorization can be disabled by "
"setting experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with eager execution enabled and "
" with experimental_use_pfor set to False.")
output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,
parallel_iterations=parallel_iterations)
new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
if output is None:
# Note that this block is returning zeros when it could use `None` to
# represent unconnected gradients. This is to maintain compatibility with
# the previous behavior, which ignored `unconnected_gradients`.
output = array_ops.zeros(new_shape, target.dtype)
return output
else:
output = array_ops.reshape(output,
[target_row_size, batch_size, -1])
output = array_ops.transpose(output, [1, 0, 2])
output = array_ops.reshape(output, new_shape)
return output
| true
| true
|
f70ba52a2bd12e48541dace861de83c615f1e6a9
| 142,862
|
py
|
Python
|
test/test_fx.py
|
ammar1510/pytorch
|
ec8d6777255821bed73b471eadddde068cd60c0b
|
[
"Intel"
] | 1
|
2022-02-23T08:20:59.000Z
|
2022-02-23T08:20:59.000Z
|
test/test_fx.py
|
ammar1510/pytorch
|
ec8d6777255821bed73b471eadddde068cd60c0b
|
[
"Intel"
] | null | null | null |
test/test_fx.py
|
ammar1510/pytorch
|
ec8d6777255821bed73b471eadddde068cd60c0b
|
[
"Intel"
] | null | null | null |
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| 36.687725
| 128
| 0.585348
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter
from fx.test_dce_pass import TestDCE
from fx.test_fx_const_fold import TestConstFold
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
class Foo(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs):
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
mod = symbolic_trace(orig_mod)
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
interpreter_node = graph.create_node('get_attr', 'interpreter')
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
graph.output(output_node)
graph.lint()
return GraphModule(wrapper, graph)
lowered = lower_to_elementwise_interpreter(msm)
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
traced = symbolic_trace(m)
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4]
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
a.get_submodule("")
a.get_parameter("param")
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
a.delete_all_unused_submodules()
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
contained = getattr(t, '__args__', None) or []
contained = t if isinstance(t, list) else contained
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| true
| true
|
f70ba6b87b169178ffb85b354aeb87156f54dfd1
| 3,275
|
py
|
Python
|
generator/verify/sim_sram.py
|
VLSIDA/OpenCache
|
0e79bf353c68d57dcc49d78178b12fd0b468f19a
|
[
"BSD-3-Clause"
] | 5
|
2021-09-15T18:29:49.000Z
|
2022-03-26T04:41:01.000Z
|
generator/verify/sim_sram.py
|
VLSIDA/OpenCache
|
0e79bf353c68d57dcc49d78178b12fd0b468f19a
|
[
"BSD-3-Clause"
] | null | null | null |
generator/verify/sim_sram.py
|
VLSIDA/OpenCache
|
0e79bf353c68d57dcc49d78178b12fd0b468f19a
|
[
"BSD-3-Clause"
] | null | null | null |
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
from policy import replacement_policy as rp
from globals import OPTS
class sim_sram:
"""
This is a simulation module for SRAMs.
It is used in sim_cache to read and write data.
"""
def __init__(self, num_words, num_ways, num_rows):
self.num_words = num_words
self.num_ways = num_ways
self.num_rows = num_rows
def reset(self):
""" Reset all arrays of the SRAM. """
self.valid_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.dirty_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.tag_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.data_array = [[[0] * self.num_words for _ in range(self.num_ways)] for _ in range(self.num_rows)]
if OPTS.replacement_policy == rp.FIFO:
self.fifo_array = [0] * self.num_rows
if OPTS.replacement_policy == rp.LRU:
self.lru_array = [[0] * self.num_ways for _ in range(self.num_rows)]
def read_valid(self, set, way):
""" Return the valid bit of given set and way. """
return self.valid_array[set][way]
def read_dirty(self, set, way):
""" Return the dirty bit of given set and way. """
return self.dirty_array[set][way]
def read_tag(self, set, way):
""" Return the tag of given set and way. """
return self.tag_array[set][way]
def read_fifo(self, set):
""" Return the FIFO bits of given set and way. """
return self.fifo_array[set]
def read_lru(self, set, way):
""" Return the LRU bits of given set and way. """
return self.lru_array[set][way]
def read_word(self, set, way, offset):
""" Return the data word of given set, way, and offset. """
return self.data_array[set][way][offset]
def read_line(self, set, way):
""" Return the data line of given set and way. """
return self.data_array[set][way].copy()
def write_valid(self, set, way, data):
""" Write the valid bit of given set and way. """
self.valid_array[set][way] = data
def write_dirty(self, set, way, data):
""" Write the dirty bit of given set and way. """
self.dirty_array[set][way] = data
def write_tag(self, set, way, data):
""" Write the tag of given set and way. """
self.tag_array[set][way] = data
def write_fifo(self, set, data):
""" Write the FIFO bits of given set and way. """
self.fifo_array[set] = data % self.num_ways
def write_lru(self, set, way, data):
""" Write the LRU bits of given set and way. """
self.lru_array[set][way] = data
def write_word(self, set, way, offset, data):
""" Write the data word of given set, way, and offset. """
self.data_array[set][way][offset] = data
def write_line(self, set, way, data):
""" Write the data line of given set and way. """
self.data_array[set][way] = data
| 27.521008
| 110
| 0.617405
|
from policy import replacement_policy as rp
from globals import OPTS
class sim_sram:
def __init__(self, num_words, num_ways, num_rows):
self.num_words = num_words
self.num_ways = num_ways
self.num_rows = num_rows
def reset(self):
self.valid_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.dirty_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.tag_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.data_array = [[[0] * self.num_words for _ in range(self.num_ways)] for _ in range(self.num_rows)]
if OPTS.replacement_policy == rp.FIFO:
self.fifo_array = [0] * self.num_rows
if OPTS.replacement_policy == rp.LRU:
self.lru_array = [[0] * self.num_ways for _ in range(self.num_rows)]
def read_valid(self, set, way):
return self.valid_array[set][way]
def read_dirty(self, set, way):
return self.dirty_array[set][way]
def read_tag(self, set, way):
return self.tag_array[set][way]
def read_fifo(self, set):
return self.fifo_array[set]
def read_lru(self, set, way):
return self.lru_array[set][way]
def read_word(self, set, way, offset):
return self.data_array[set][way][offset]
def read_line(self, set, way):
return self.data_array[set][way].copy()
def write_valid(self, set, way, data):
self.valid_array[set][way] = data
def write_dirty(self, set, way, data):
self.dirty_array[set][way] = data
def write_tag(self, set, way, data):
self.tag_array[set][way] = data
def write_fifo(self, set, data):
self.fifo_array[set] = data % self.num_ways
def write_lru(self, set, way, data):
self.lru_array[set][way] = data
def write_word(self, set, way, offset, data):
self.data_array[set][way][offset] = data
def write_line(self, set, way, data):
self.data_array[set][way] = data
| true
| true
|
f70ba70746408cde2e2e445a071d007f2f2b62f8
| 1,369
|
wsgi
|
Python
|
files/config-files/maposmatic.wsgi
|
chatelao/maposmatic-vagrant
|
c4864a5da5c40a5755f7432c3e2e77eaa87e99e4
|
[
"Unlicense"
] | 25
|
2016-03-24T23:24:41.000Z
|
2022-03-04T16:52:47.000Z
|
files/config-files/maposmatic.wsgi
|
chatelao/maposmatic-vagrant
|
c4864a5da5c40a5755f7432c3e2e77eaa87e99e4
|
[
"Unlicense"
] | 30
|
2016-03-25T06:53:18.000Z
|
2022-03-12T18:51:27.000Z
|
files/config-files/maposmatic.wsgi
|
chatelao/maposmatic-vagrant
|
c4864a5da5c40a5755f7432c3e2e77eaa87e99e4
|
[
"Unlicense"
] | 13
|
2016-03-26T23:36:04.000Z
|
2021-01-20T18:41:10.000Z
|
# coding: utf-8
# maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey
# Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys
sys.path.append("/home/maposmatic/maposmatic")
sys.path.append("/home/maposmatic/ocitysmap")
os.environ["DJANGO_SETTINGS_MODULE"] = 'www.settings'
os.environ["MAPOSMATIC_LOG_FILE"] = "/home/maposmatic/maposmatic/logs/maposmatic-www.log"
os.environ["PGCONNECT_TIMEOUT"] = "1"
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 37
| 89
| 0.770636
|
import os, sys
sys.path.append("/home/maposmatic/maposmatic")
sys.path.append("/home/maposmatic/ocitysmap")
os.environ["DJANGO_SETTINGS_MODULE"] = 'www.settings'
os.environ["MAPOSMATIC_LOG_FILE"] = "/home/maposmatic/maposmatic/logs/maposmatic-www.log"
os.environ["PGCONNECT_TIMEOUT"] = "1"
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| true
| true
|
f70ba851bced003f1bd4ef374153c70502f27c10
| 2,395
|
py
|
Python
|
bam/task_specific/task.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
bam/task_specific/task.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
bam/task_specific/task.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
class Example(object):
__metaclass__ = abc.ABCMeta
def __init__(self, task_name):
self.task_name = task_name
class Task(object):
"""Override this class to add a new task."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, long_sequences=False):
self.config = config
self.name = name
self.long_sequences = long_sequences
def get_examples(self, split):
return self.load_data(split + ".tsv", split)
def get_test_splits(self):
return ["test"]
def load_data(self, fname, split):
examples = self._create_examples(
read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),
max_lines=50 if self.config.debug else None),
split)
return examples
@abc.abstractmethod
def _create_examples(self, lines, split):
pass
@abc.abstractmethod
def get_scorer(self):
pass
@abc.abstractmethod
def get_feature_specs(self):
pass
@abc.abstractmethod
def featurize(self, example, is_training):
pass
@abc.abstractmethod
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
pass
def __repr__(self):
return "Task(" + self.name + ")"
def read_tsv(input_file, quotechar=None, max_lines=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for i, line in enumerate(reader):
if max_lines and i >= max_lines:
break
lines.append(line)
return lines
| 25.752688
| 74
| 0.704384
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
class Example(object):
__metaclass__ = abc.ABCMeta
def __init__(self, task_name):
self.task_name = task_name
class Task(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, long_sequences=False):
self.config = config
self.name = name
self.long_sequences = long_sequences
def get_examples(self, split):
return self.load_data(split + ".tsv", split)
def get_test_splits(self):
return ["test"]
def load_data(self, fname, split):
examples = self._create_examples(
read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),
max_lines=50 if self.config.debug else None),
split)
return examples
@abc.abstractmethod
def _create_examples(self, lines, split):
pass
@abc.abstractmethod
def get_scorer(self):
pass
@abc.abstractmethod
def get_feature_specs(self):
pass
@abc.abstractmethod
def featurize(self, example, is_training):
pass
@abc.abstractmethod
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
pass
def __repr__(self):
return "Task(" + self.name + ")"
def read_tsv(input_file, quotechar=None, max_lines=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for i, line in enumerate(reader):
if max_lines and i >= max_lines:
break
lines.append(line)
return lines
| true
| true
|
f70ba8cd1beb723b680a53896c7f7d9b27f3178c
| 3,345
|
py
|
Python
|
amazon_scraper/settings.py
|
Samyak2/amazon-scraper
|
f8922e5e9c7e8a1184b59b758757b192f7aa6c29
|
[
"MIT"
] | 1
|
2019-11-22T13:42:56.000Z
|
2019-11-22T13:42:56.000Z
|
amazon_scraper/settings.py
|
Samyak2/amazon-scraper
|
f8922e5e9c7e8a1184b59b758757b192f7aa6c29
|
[
"MIT"
] | null | null | null |
amazon_scraper/settings.py
|
Samyak2/amazon-scraper
|
f8922e5e9c7e8a1184b59b758757b192f7aa6c29
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for amazon_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amazon_scraper'
SPIDER_MODULES = ['amazon_scraper.spiders']
NEWSPIDER_MODULE = 'amazon_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'amazon_scraper_3 (+your@email.here)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 2
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperDownloaderMiddleware': 543,
#}
# DOWNLOADER_MIDDLEWARES = {
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,
# }
# RANDOM_UA_TYPE = "desktop"
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'amazon_scraper.pipelines.AmazonScraperPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.210526
| 102
| 0.781166
|
BOT_NAME = 'amazon_scraper'
SPIDER_MODULES = ['amazon_scraper.spiders']
NEWSPIDER_MODULE = 'amazon_scraper.spiders'
USER_AGENT = 'amazon_scraper_3 (+your@email.here)'
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 2
Y = 3
ITEM_PIPELINES = {
'amazon_scraper.pipelines.AmazonScraperPipeline': 300,
}
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 5
AUTOTHROTTLE_MAX_DELAY = 60
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
| true
| true
|
f70babde43bff0c8c248591d5b1a2c576b8fd8cb
| 6,101
|
py
|
Python
|
chainer/functions/pooling/average_pooling_2d.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | null | null | null |
chainer/functions/pooling/average_pooling_2d.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | null | null | null |
chainer/functions/pooling/average_pooling_2d.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | 1
|
2022-02-20T10:32:59.000Z
|
2022-02-20T10:32:59.000Z
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
class AveragePooling2D(pooling_2d.Pooling2D):
"""Average pooling over a set of 2d planes."""
# TODO(beam2d): Support cover_all mode.
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,
self.ph, self.pw)
y = col.mean(axis=(2, 3))
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto'):
self.retain_inputs((0,))
return super(AveragePooling2D, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)
coeff = 1. / (self.kh * self.kw)
kern = cuda.elementwise(
'raw T in, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T out', '''
int c0 = i / (out_h * out_w);
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y_0 = max(0, out_y * sy - ph);
int in_y_1 = min(h, out_y * sy + kh - ph);
int in_x_0 = max(0, out_x * sx - pw);
int in_x_1 = min(w, out_x * sx + kw - pw);
T val = 0;
for (int y = in_y_0; y < in_y_1; ++y) {
int offset_y = w * (y + h * c0);
for (int x = in_x_0; x < in_x_1; ++x) {
val = val + in[x + offset_y];
}
}
out = val * coeff;
''', 'avg_pool_fwd')
kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff, y)
return y,
def backward(self, indexes, gy):
return AveragePooling2DGrad(self).apply(gy)
def create_pool_desc(self):
return cuda.cudnn.create_pooling_descriptor(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
class AveragePooling2DGrad(function_node.FunctionNode):
def __init__(self, apool2d):
self.kh = apool2d.kh
self.kw = apool2d.kw
self.sy = apool2d.sy
self.sx = apool2d.sx
self.ph = apool2d.ph
self.pw = apool2d.pw
self._used_cudnn = apool2d._used_cudnn
if not self._used_cudnn:
self._in_shape = apool2d._in_shape
self._in_dtype = apool2d._in_dtype
self.apool2d = apool2d
def forward_cpu(self, gy):
h, w = self._in_shape[2:]
gcol = numpy.tile(gy[0][:, :, None, None],
(1, 1, self.kh, self.kw, 1, 1))
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
gx /= self.kh * self.kw
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.apool2d.get_retained_inputs()
return self.apool2d.backward_gpu((x.data,), gy)
n, c, h, w = self._in_shape
y_h, y_w = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
coeff = 1. / (self.kh * self.kw)
cuda.elementwise(
'raw T gy, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T gx',
'''
int c0 = i / (h * w);
int y = i / w % h + ph;
int x = i % w + pw;
int out_y_0 = max(0, (y - kh + sy) / sy);
int out_y_1 = min(out_h, (y + sy) / sy);
int out_x_0 = max(0, (x - kw + sx) / sx);
int out_x_1 = min(out_w, (x + sx) / sx);
int hc0 = out_h * c0;
T val = 0;
for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {
for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {
val = val + gy[out_x + out_w * (out_y + hc0)];
}
}
gx = val * coeff;
''', 'avg_pool_bwd')(gy[0].reduced_view(),
h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff,
gx)
return gx,
def backward(self, indexes, grad_outputs):
return AveragePooling2D(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
False).apply(grad_outputs)
def average_pooling_2d(x, ksize, stride=None, pad=0):
"""Spatial average pooling function.
This function acts similarly to :class:`~functions.Convolution2D`, but
it computes the average of input spatial patch for each channel
without any parameter instead of computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.
"""
return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]
| 37.20122
| 77
| 0.532208
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
class AveragePooling2D(pooling_2d.Pooling2D):
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,
self.ph, self.pw)
y = col.mean(axis=(2, 3))
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto'):
self.retain_inputs((0,))
return super(AveragePooling2D, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)
coeff = 1. / (self.kh * self.kw)
kern = cuda.elementwise(
'raw T in, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T out', '''
int c0 = i / (out_h * out_w);
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y_0 = max(0, out_y * sy - ph);
int in_y_1 = min(h, out_y * sy + kh - ph);
int in_x_0 = max(0, out_x * sx - pw);
int in_x_1 = min(w, out_x * sx + kw - pw);
T val = 0;
for (int y = in_y_0; y < in_y_1; ++y) {
int offset_y = w * (y + h * c0);
for (int x = in_x_0; x < in_x_1; ++x) {
val = val + in[x + offset_y];
}
}
out = val * coeff;
''', 'avg_pool_fwd')
kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff, y)
return y,
def backward(self, indexes, gy):
return AveragePooling2DGrad(self).apply(gy)
def create_pool_desc(self):
return cuda.cudnn.create_pooling_descriptor(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
class AveragePooling2DGrad(function_node.FunctionNode):
def __init__(self, apool2d):
self.kh = apool2d.kh
self.kw = apool2d.kw
self.sy = apool2d.sy
self.sx = apool2d.sx
self.ph = apool2d.ph
self.pw = apool2d.pw
self._used_cudnn = apool2d._used_cudnn
if not self._used_cudnn:
self._in_shape = apool2d._in_shape
self._in_dtype = apool2d._in_dtype
self.apool2d = apool2d
def forward_cpu(self, gy):
h, w = self._in_shape[2:]
gcol = numpy.tile(gy[0][:, :, None, None],
(1, 1, self.kh, self.kw, 1, 1))
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
gx /= self.kh * self.kw
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.apool2d.get_retained_inputs()
return self.apool2d.backward_gpu((x.data,), gy)
n, c, h, w = self._in_shape
y_h, y_w = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
coeff = 1. / (self.kh * self.kw)
cuda.elementwise(
'raw T gy, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T gx',
'''
int c0 = i / (h * w);
int y = i / w % h + ph;
int x = i % w + pw;
int out_y_0 = max(0, (y - kh + sy) / sy);
int out_y_1 = min(out_h, (y + sy) / sy);
int out_x_0 = max(0, (x - kw + sx) / sx);
int out_x_1 = min(out_w, (x + sx) / sx);
int hc0 = out_h * c0;
T val = 0;
for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {
for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {
val = val + gy[out_x + out_w * (out_y + hc0)];
}
}
gx = val * coeff;
''', 'avg_pool_bwd')(gy[0].reduced_view(),
h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff,
gx)
return gx,
def backward(self, indexes, grad_outputs):
return AveragePooling2D(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
False).apply(grad_outputs)
def average_pooling_2d(x, ksize, stride=None, pad=0):
return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]
| true
| true
|
f70bac85c3a38d428186f259f52a297e8e68a3f2
| 632
|
py
|
Python
|
app/forms.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
app/forms.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
app/forms.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/5 19:35
# @Author : cendeavor
# @File : forms.py
# @Software: PyCharm
from flask_wtf import Form, FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Required, DataRequired, EqualTo
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('注册')
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('登录')
| 27.478261
| 69
| 0.693038
|
from flask_wtf import Form, FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Required, DataRequired, EqualTo
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('注册')
class LoginForm(FlaskForm):
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('登录')
| true
| true
|
f70bacb5720192a7e432abae6a62ee6600ea78fe
| 13,872
|
py
|
Python
|
label_script/label script.py
|
rechardchen123/tensorflow_understand
|
aa271efc7253bd273ce8f7ac76eb50ebb68a4534
|
[
"Apache-2.0"
] | null | null | null |
label_script/label script.py
|
rechardchen123/tensorflow_understand
|
aa271efc7253bd273ce8f7ac76eb50ebb68a4534
|
[
"Apache-2.0"
] | null | null | null |
label_script/label script.py
|
rechardchen123/tensorflow_understand
|
aa271efc7253bd273ce8f7ac76eb50ebb68a4534
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from tkinter import *
import tkMessageBox
from PIL import Image, ImageTk
import os
import glob
import random
w0 = 1 #图片原始宽度
h0 = 1 #图片原始高度
# colors for the bboxes
COLORS = ['red','blue','yellow','pink','cyan','green','black']
#image size
SIZE = 256,256
#指定缩放后的图像大小
DEST_SIZE = 500,500
class LabelTool():
def __init__(self,master):
#set up the main frame
self.parent = master
self.parent.title('LabelTool')
self.frame = Frame(self.parent)
self.frame.pack(fill=BOTH,expand=1)
self.parent.resizable(width=TRUE,height=TRUE)
#initialize global state
self.imageDir = ''
self.imageList = []
self.egDir = ''
self.egList = []
self.outDir =''
self.cur = 0
self.total = 0
self.category =0
self.imagename=''
self.labelfilename=''
self.tkimg = None
# initialize mouse state
self.STATE={}
self.STATE['click']=0
self.STATE['x'],self.STATE['y']=0,0
#reference to bbox
self.bboxIdList = []
self.bboxId = None
self.bboxList = []
self.hl=None
self.vl=None
# ----------------- GUI stuff ---------------------
# dir entry & load
self.label = Label(self.frame,text='Image Dir:')
self.label.grid(row=0,column=0,sticky=E)
self.entry=Entry(self.frame)
self.entry.grid(row=0, column=1, sticky=W + E)
self.ldBtn = Button(self.frame, text="Load", command=self.loadDir)
self.ldBtn.grid(row=0, column=2, sticky=W + E)
# main panel for labeling
self.mainPanel = Canvas(self.frame, cursor='tcross')
self.mainPanel.bind("<Button-1>", self.mouseClick)
self.mainPanel.bind("<Motion>", self.mouseMove)
self.parent.bind("<Escape>", self.cancelBBox) # press <Espace> to cancel current bbox
self.parent.bind("s", self.cancelBBox)
self.parent.bind("a", self.prevImage) # press 'a' to go backforward
self.parent.bind("d", self.nextImage) # press 'd' to go forward
self.mainPanel.grid(row=1, column=1, rowspan=4, sticky=W + N)
# showing bbox info & delete bbox
self.lb1 = Label(self.frame, text='Bounding boxes:')
self.lb1.grid(row=1, column=2, sticky=W + N)
self.listbox = Listbox(self.frame, width=28, height=12)
self.listbox.grid(row=2, column=2, sticky=N)
self.btnDel = Button(self.frame, text='Delete', command=self.delBBox)
self.btnDel.grid(row=3, column=2, sticky=W + E + N)
self.btnClear = Button(self.frame, text='ClearAll', command=self.clearBBox)
self.btnClear.grid(row=4, column=2, sticky=W + E + N)
# control panel for image navigation
self.ctrPanel = Frame(self.frame)
self.ctrPanel.grid(row=5, column=1, columnspan=2, sticky=W + E)
self.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10, command=self.prevImage)
self.prevBtn.pack(side=LEFT, padx=5, pady=3)
self.nextBtn = Button(self.ctrPanel, text='Next >>', width=10, command=self.nextImage)
self.nextBtn.pack(side=LEFT, padx=5, pady=3)
self.progLabel = Label(self.ctrPanel, text="Progress: / ")
self.progLabel.pack(side=LEFT, padx=5)
self.tmpLabel = Label(self.ctrPanel, text="Go to Image No.")
self.tmpLabel.pack(side=LEFT, padx=5)
self.idxEntry = Entry(self.ctrPanel, width=5)
self.idxEntry.pack(side=LEFT)
self.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)
self.goBtn.pack(side=LEFT)
# example pannel for illustration
self.egPanel = Frame(self.frame, border=10)
self.egPanel.grid(row=1, column=0, rowspan=5, sticky=N)
self.tmpLabel2 = Label(self.egPanel, text="Examples:")
self.tmpLabel2.pack(side=TOP, pady=5)
self.egLabels = []
for i in range(3):
self.egLabels.append(Label(self.egPanel))
self.egLabels[-1].pack(side=TOP)
# display mouse position
self.disp = Label(self.ctrPanel, text='')
self.disp.pack(side=RIGHT)
self.frame.columnconfigure(1, weight=1)
self.frame.rowconfigure(4, weight=1)
def loadDir(self,dbg=False):
if not dbg:
s = self.entry.get()
self.parent.focus()
self.category=int(s)
else:
s = r'D:\Data store file\labelGUI'
print('self.category =%d' % (self.category))
self.imageDir = os.path.join(r'./images', '%03d' % (self.category))
print(self.imageDir)
self.imageList = glob.glob(os.path.join(self.imageDir, '*.jpg'))
if len(self.imageList) == 0:
print
'No .jpg images found in the specified dir!'
return
else:
print
'num=%d' % (len(self.imageList))
# default to the 1st image in the collection
self.cur = 1
self.total = len(self.imageList)
# set up output dir
self.outDir = os.path.join(r'./labels', '%03d' % (self.category))
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
# load example bboxes
self.egDir = os.path.join(r'./Examples', '%03d' % (self.category))
# if not os.path.exists(self.egDir):
# return
filelist = glob.glob(os.path.join(self.egDir, '*.jpg'))
self.tmp = []
self.egList = []
random.shuffle(filelist)
for (i, f) in enumerate(filelist):
if i == 3:
break
im = Image.open(f)
r = min(SIZE[0] / im.size[0], SIZE[1] / im.size[1])
new_size = int(r * im.size[0]), int(r * im.size[1])
self.tmp.append(im.resize(new_size, Image.ANTIALIAS))
self.egList.append(ImageTk.PhotoImage(self.tmp[-1]))
self.egLabels[i].config(image=self.egList[-1], width=SIZE[0], height=SIZE[1])
self.loadImage()
print
'%d images loaded from %s' % (self.total, s)
def loadImage(self):
# load image
imagepath = self.imageList[self.cur - 1]
pil_image = Image.open(imagepath)
global w0,h0
w0,h0=pil_image.size
# 缩放到指定大小
pil_image = pil_image.resize((DEST_SIZE[0], DEST_SIZE[1]), Image.ANTIALIAS)
# pil_image = imgresize(w, h, w_box, h_box, pil_image)
self.img = pil_image
self.tkimg = ImageTk.PhotoImage(pil_image)
self.mainPanel.config(width=max(self.tkimg.width(), 400), height=max(self.tkimg.height(), 400))
self.mainPanel.create_image(0, 0, image=self.tkimg, anchor=NW)
self.progLabel.config(text="%04d/%04d" % (self.cur, self.total))
# load labels
self.clearBBox()
self.imagename = os.path.split(imagepath)[-1].split('.')[0]
labelname = self.imagename + '.txt'
self.labelfilename = os.path.join(self.outDir, labelname)
bbox_cnt = 0
if os.path.exists(self.labelfilename):
with open(self.labelfilename) as f:
for (i, line) in enumerate(f):
if i == 0:
bbox_cnt = int(line.strip())
continue
print
line
tmp = [(t.strip()) for t in line.split()]
print
"********************"
print
DEST_SIZE
# tmp = (0.1, 0.3, 0.5, 0.5)
print
"tmp[0,1,2,3]===%.2f, %.2f, %.2f, %.2f" % (float(tmp[0]), float(tmp[1]), float(tmp[2]), float(tmp[3]))
# print "%.2f,%.2f,%.2f,%.2f" %(tmp[0] tmp[1] tmp[2] tmp[3] )
print
"********************"
# tx = (10, 20, 30, 40)
# self.bboxList.append(tuple(tx))
self.bboxList.append(tuple(tmp))
tmp[0] = float(tmp[0])
tmp[1] = float(tmp[1])
tmp[2] = float(tmp[2])
tmp[3] = float(tmp[3])
tx0 = int(tmp[0] * DEST_SIZE[0])
ty0 = int(tmp[1] * DEST_SIZE[1])
tx1 = int(tmp[2] * DEST_SIZE[0])
ty1 = int(tmp[3] * DEST_SIZE[1])
print
"tx0, ty0, tx1, ty1"
print
tx0, ty0, tx1, ty1
tmpId = self.mainPanel.create_rectangle(tx0, ty0, tx1, ty1, \
width=2, \
outline=COLORS[(len(self.bboxList) - 1) % len(COLORS)])
self.bboxIdList.append(tmpId)
self.listbox.insert(END, '(%.2f,%.2f)-(%.2f,%.2f)' % (tmp[0], tmp[1], tmp[2], tmp[3]))
# self.listbox.insert(END, '(%d, %d) -> (%d, %d)' %(tmp[0], tmp[1], tmp[2], tmp[3]))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
def saveImage(self):
# print "-----1--self.bboxList---------"
print
self.bboxList
# print "-----2--self.bboxList---------"
with open(self.labelfilename, 'w') as f:
f.write('%d\n' % len(self.bboxList))
for bbox in self.bboxList:
f.write(' '.join(map(str, bbox)) + '\n')
print('Image No. %d saved' % (self.cur))
def mouseClick(self, event):
if self.STATE['click'] == 0:
self.STATE['x'], self.STATE['y'] = event.x, event.y
else:
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
x1, x2 = x1 / DEST_SIZE[0], x2 / DEST_SIZE[0];
y1, y2 = y1 / DEST_SIZE[1], y2 / DEST_SIZE[1];
self.bboxList.append((x1, y1, x2, y2))
self.bboxIdList.append(self.bboxId)
self.bboxId = None
self.listbox.insert(END, '(%.2f, %.2f)-(%.2f, %.2f)' % (x1, y1, x2, y2))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
self.STATE['click'] = 1 - self.STATE['click']
def mouseMove(self, event):
self.disp.config(text='x: %.2f, y: %.2f' % (event.x / DEST_SIZE[0], event.y / DEST_SIZE[1]))
if self.tkimg:
if self.hl:
self.mainPanel.delete(self.hl)
self.hl = self.mainPanel.create_line(0, event.y, self.tkimg.width(), event.y, width=2)
if self.vl:
self.mainPanel.delete(self.vl)
self.vl = self.mainPanel.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = self.mainPanel.create_rectangle(self.STATE['x'], self.STATE['y'], \
event.x, event.y, \
width=2, \
outline=COLORS[len(self.bboxList) % len(COLORS)])
def cancelBBox(self, event):
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
def delBBox(self):
sel = self.listbox.curselection()
if len(sel) != 1:
return
idx = int(sel[0])
self.mainPanel.delete(self.bboxIdList[idx])
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.listbox.delete(idx)
def clearBBox(self):
for idx in range(len(self.bboxIdList)):
self.mainPanel.delete(self.bboxIdList[idx])
self.listbox.delete(0, len(self.bboxList))
self.bboxIdList = []
self.bboxList = []
def prevImage(self, event=None):
self.saveImage()
if self.cur > 1:
self.cur -= 1
self.loadImage()
def nextImage(self, event=None):
self.saveImage()
if self.cur < self.total:
self.cur += 1
self.loadImage()
def gotoImage(self):
idx = int(self.idxEntry.get())
if 1 <= idx and idx <= self.total:
self.saveImage()
self.cur = idx
self.loadImage()
## def setImage(self, imagepath = r'test2.png'):
## self.img = Image.open(imagepath)
## self.tkimg = ImageTk.PhotoImage(self.img)
## self.mainPanel.config(width = self.tkimg.width())
## self.mainPanel.config(height = self.tkimg.height())
## self.mainPanel.create_image(0, 0, image = self.tkimg, anchor=NW)
def imgresize(w, h, w_box, h_box, pil_image):
'''
resize a pil_image object so it will fit into
a box of size w_box times h_box, but retain aspect ratio
'''
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
if __name__ == '__main__':
root = Tk()
tool = LabelTool(root)
root.mainloop()
| 35.118987
| 118
| 0.517445
|
from __future__ import division
from tkinter import *
import tkMessageBox
from PIL import Image, ImageTk
import os
import glob
import random
w0 = 1
h0 = 1
COLORS = ['red','blue','yellow','pink','cyan','green','black']
SIZE = 256,256
DEST_SIZE = 500,500
class LabelTool():
def __init__(self,master):
self.parent = master
self.parent.title('LabelTool')
self.frame = Frame(self.parent)
self.frame.pack(fill=BOTH,expand=1)
self.parent.resizable(width=TRUE,height=TRUE)
self.imageDir = ''
self.imageList = []
self.egDir = ''
self.egList = []
self.outDir =''
self.cur = 0
self.total = 0
self.category =0
self.imagename=''
self.labelfilename=''
self.tkimg = None
self.STATE={}
self.STATE['click']=0
self.STATE['x'],self.STATE['y']=0,0
self.bboxIdList = []
self.bboxId = None
self.bboxList = []
self.hl=None
self.vl=None
self.label = Label(self.frame,text='Image Dir:')
self.label.grid(row=0,column=0,sticky=E)
self.entry=Entry(self.frame)
self.entry.grid(row=0, column=1, sticky=W + E)
self.ldBtn = Button(self.frame, text="Load", command=self.loadDir)
self.ldBtn.grid(row=0, column=2, sticky=W + E)
self.mainPanel = Canvas(self.frame, cursor='tcross')
self.mainPanel.bind("<Button-1>", self.mouseClick)
self.mainPanel.bind("<Motion>", self.mouseMove)
self.parent.bind("<Escape>", self.cancelBBox)
self.parent.bind("s", self.cancelBBox)
self.parent.bind("a", self.prevImage)
self.parent.bind("d", self.nextImage)
self.mainPanel.grid(row=1, column=1, rowspan=4, sticky=W + N)
self.lb1 = Label(self.frame, text='Bounding boxes:')
self.lb1.grid(row=1, column=2, sticky=W + N)
self.listbox = Listbox(self.frame, width=28, height=12)
self.listbox.grid(row=2, column=2, sticky=N)
self.btnDel = Button(self.frame, text='Delete', command=self.delBBox)
self.btnDel.grid(row=3, column=2, sticky=W + E + N)
self.btnClear = Button(self.frame, text='ClearAll', command=self.clearBBox)
self.btnClear.grid(row=4, column=2, sticky=W + E + N)
self.ctrPanel = Frame(self.frame)
self.ctrPanel.grid(row=5, column=1, columnspan=2, sticky=W + E)
self.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10, command=self.prevImage)
self.prevBtn.pack(side=LEFT, padx=5, pady=3)
self.nextBtn = Button(self.ctrPanel, text='Next >>', width=10, command=self.nextImage)
self.nextBtn.pack(side=LEFT, padx=5, pady=3)
self.progLabel = Label(self.ctrPanel, text="Progress: / ")
self.progLabel.pack(side=LEFT, padx=5)
self.tmpLabel = Label(self.ctrPanel, text="Go to Image No.")
self.tmpLabel.pack(side=LEFT, padx=5)
self.idxEntry = Entry(self.ctrPanel, width=5)
self.idxEntry.pack(side=LEFT)
self.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)
self.goBtn.pack(side=LEFT)
self.egPanel = Frame(self.frame, border=10)
self.egPanel.grid(row=1, column=0, rowspan=5, sticky=N)
self.tmpLabel2 = Label(self.egPanel, text="Examples:")
self.tmpLabel2.pack(side=TOP, pady=5)
self.egLabels = []
for i in range(3):
self.egLabels.append(Label(self.egPanel))
self.egLabels[-1].pack(side=TOP)
self.disp = Label(self.ctrPanel, text='')
self.disp.pack(side=RIGHT)
self.frame.columnconfigure(1, weight=1)
self.frame.rowconfigure(4, weight=1)
def loadDir(self,dbg=False):
if not dbg:
s = self.entry.get()
self.parent.focus()
self.category=int(s)
else:
s = r'D:\Data store file\labelGUI'
print('self.category =%d' % (self.category))
self.imageDir = os.path.join(r'./images', '%03d' % (self.category))
print(self.imageDir)
self.imageList = glob.glob(os.path.join(self.imageDir, '*.jpg'))
if len(self.imageList) == 0:
print
return
else:
print
'num=%d' % (len(self.imageList))
self.cur = 1
self.total = len(self.imageList)
self.outDir = os.path.join(r'./labels', '%03d' % (self.category))
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
self.egDir = os.path.join(r'./Examples', '%03d' % (self.category))
filelist = glob.glob(os.path.join(self.egDir, '*.jpg'))
self.tmp = []
self.egList = []
random.shuffle(filelist)
for (i, f) in enumerate(filelist):
if i == 3:
break
im = Image.open(f)
r = min(SIZE[0] / im.size[0], SIZE[1] / im.size[1])
new_size = int(r * im.size[0]), int(r * im.size[1])
self.tmp.append(im.resize(new_size, Image.ANTIALIAS))
self.egList.append(ImageTk.PhotoImage(self.tmp[-1]))
self.egLabels[i].config(image=self.egList[-1], width=SIZE[0], height=SIZE[1])
self.loadImage()
print
'%d images loaded from %s' % (self.total, s)
def loadImage(self):
imagepath = self.imageList[self.cur - 1]
pil_image = Image.open(imagepath)
global w0,h0
w0,h0=pil_image.size
pil_image = pil_image.resize((DEST_SIZE[0], DEST_SIZE[1]), Image.ANTIALIAS)
self.img = pil_image
self.tkimg = ImageTk.PhotoImage(pil_image)
self.mainPanel.config(width=max(self.tkimg.width(), 400), height=max(self.tkimg.height(), 400))
self.mainPanel.create_image(0, 0, image=self.tkimg, anchor=NW)
self.progLabel.config(text="%04d/%04d" % (self.cur, self.total))
self.clearBBox()
self.imagename = os.path.split(imagepath)[-1].split('.')[0]
labelname = self.imagename + '.txt'
self.labelfilename = os.path.join(self.outDir, labelname)
bbox_cnt = 0
if os.path.exists(self.labelfilename):
with open(self.labelfilename) as f:
for (i, line) in enumerate(f):
if i == 0:
bbox_cnt = int(line.strip())
continue
print
line
tmp = [(t.strip()) for t in line.split()]
print
print
DEST_SIZE
print
"tmp[0,1,2,3]===%.2f, %.2f, %.2f, %.2f" % (float(tmp[0]), float(tmp[1]), float(tmp[2]), float(tmp[3]))
print
self.bboxList.append(tuple(tmp))
tmp[0] = float(tmp[0])
tmp[1] = float(tmp[1])
tmp[2] = float(tmp[2])
tmp[3] = float(tmp[3])
tx0 = int(tmp[0] * DEST_SIZE[0])
ty0 = int(tmp[1] * DEST_SIZE[1])
tx1 = int(tmp[2] * DEST_SIZE[0])
ty1 = int(tmp[3] * DEST_SIZE[1])
print
print
tx0, ty0, tx1, ty1
tmpId = self.mainPanel.create_rectangle(tx0, ty0, tx1, ty1, \
width=2, \
outline=COLORS[(len(self.bboxList) - 1) % len(COLORS)])
self.bboxIdList.append(tmpId)
self.listbox.insert(END, '(%.2f,%.2f)-(%.2f,%.2f)' % (tmp[0], tmp[1], tmp[2], tmp[3]))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
def saveImage(self):
print
self.bboxList
with open(self.labelfilename, 'w') as f:
f.write('%d\n' % len(self.bboxList))
for bbox in self.bboxList:
f.write(' '.join(map(str, bbox)) + '\n')
print('Image No. %d saved' % (self.cur))
def mouseClick(self, event):
if self.STATE['click'] == 0:
self.STATE['x'], self.STATE['y'] = event.x, event.y
else:
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
x1, x2 = x1 / DEST_SIZE[0], x2 / DEST_SIZE[0];
y1, y2 = y1 / DEST_SIZE[1], y2 / DEST_SIZE[1];
self.bboxList.append((x1, y1, x2, y2))
self.bboxIdList.append(self.bboxId)
self.bboxId = None
self.listbox.insert(END, '(%.2f, %.2f)-(%.2f, %.2f)' % (x1, y1, x2, y2))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
self.STATE['click'] = 1 - self.STATE['click']
def mouseMove(self, event):
self.disp.config(text='x: %.2f, y: %.2f' % (event.x / DEST_SIZE[0], event.y / DEST_SIZE[1]))
if self.tkimg:
if self.hl:
self.mainPanel.delete(self.hl)
self.hl = self.mainPanel.create_line(0, event.y, self.tkimg.width(), event.y, width=2)
if self.vl:
self.mainPanel.delete(self.vl)
self.vl = self.mainPanel.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = self.mainPanel.create_rectangle(self.STATE['x'], self.STATE['y'], \
event.x, event.y, \
width=2, \
outline=COLORS[len(self.bboxList) % len(COLORS)])
def cancelBBox(self, event):
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
def delBBox(self):
sel = self.listbox.curselection()
if len(sel) != 1:
return
idx = int(sel[0])
self.mainPanel.delete(self.bboxIdList[idx])
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.listbox.delete(idx)
def clearBBox(self):
for idx in range(len(self.bboxIdList)):
self.mainPanel.delete(self.bboxIdList[idx])
self.listbox.delete(0, len(self.bboxList))
self.bboxIdList = []
self.bboxList = []
def prevImage(self, event=None):
self.saveImage()
if self.cur > 1:
self.cur -= 1
self.loadImage()
def nextImage(self, event=None):
self.saveImage()
if self.cur < self.total:
self.cur += 1
self.loadImage()
def gotoImage(self):
idx = int(self.idxEntry.get())
if 1 <= idx and idx <= self.total:
self.saveImage()
self.cur = idx
self.loadImage()
__name__ == '__main__':
root = Tk()
tool = LabelTool(root)
root.mainloop()
| true
| true
|
f70bace2128f09dc26c0dcf8ff0f701cb867a582
| 8,016
|
py
|
Python
|
adafruit_pyportal/network.py
|
jposada202020/Adafruit_CircuitPython_PyPortal
|
2c09cc1ba6130de03f3f6d2643af5fcc6c82bb8e
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
adafruit_pyportal/network.py
|
jposada202020/Adafruit_CircuitPython_PyPortal
|
2c09cc1ba6130de03f3f6d2643af5fcc6c82bb8e
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
adafruit_pyportal/network.py
|
jposada202020/Adafruit_CircuitPython_PyPortal
|
2c09cc1ba6130de03f3f6d2643af5fcc6c82bb8e
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_pyportal.network`
================================================================================
CircuitPython driver for Adafruit PyPortal.
* Author(s): Limor Fried, Kevin J. Walters, Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit PyPortal <https://www.adafruit.com/product/4116>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
# pylint: disable=unused-import
from adafruit_portalbase.network import (
NetworkBase,
CONTENT_JSON,
CONTENT_TEXT,
)
# pylint: enable=unused-import
from adafruit_pyportal.wifi import WiFi
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyPortal.git"
# you'll need to pass in an io username, width, height, format (bit depth), io key, and then url!
IMAGE_CONVERTER_SERVICE = (
"https://io.adafruit.com/api/v2/%s/integrations/image-formatter?"
"x-aio-key=%s&width=%d&height=%d&output=BMP%d&url=%s"
)
class Network(NetworkBase):
"""Class representing the Adafruit PyPortal.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
:param convert_image: Determine whether or not to use the AdafruitIO image converter service.
Set as False if your image is already resized. Defaults to True.
:param image_url_path: The HTTP traversal path for a background image to display.
Defaults to ``None``.
:param image_json_path: The JSON traversal path for a background image to display. Defaults to
``None``.
:param image_resize: What size to resize the image we got from the json_path, make this a tuple
of the width and height you want. Defaults to ``None``.
:param image_position: The position of the image on the display as an (x, y) tuple. Defaults to
``None``.
:param image_dim_json_path: The JSON traversal path for the original dimensions of image tuple.
Used with fetch(). Defaults to ``None``.
"""
def __init__(
self,
*,
status_neopixel=None,
esp=None,
external_spi=None,
extract_values=True,
debug=False,
convert_image=True,
image_url_path=None,
image_json_path=None,
image_resize=None,
image_position=None,
image_dim_json_path=None,
secrets_data=None,
):
wifi = WiFi(status_neopixel=status_neopixel, esp=esp, external_spi=external_spi)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
secrets_data=secrets_data,
)
self._convert_image = convert_image
self._image_json_path = image_json_path
self._image_url_path = image_url_path
self._image_resize = image_resize
self._image_position = image_position
self._image_dim_json_path = image_dim_json_path
gc.collect()
@property
def ip_address(self):
"""Return the IP Address nicely formatted"""
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
def image_converter_url(self, image_url, width, height, color_depth=16):
"""Generate a converted image url from the url passed in,
with the given width and height. aio_username and aio_key must be
set in secrets."""
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError as error:
raise KeyError(
"\n\nOur image converter service require a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'" # pylint: disable=line-too-long
) from error
return IMAGE_CONVERTER_SERVICE % (
aio_username,
aio_key,
width,
height,
color_depth,
image_url,
)
# pylint: disable=too-many-branches, too-many-statements
def process_image(self, json_data, sd_card=False):
"""
Process image content
:param json_data: The JSON data that we can pluck values from
:param bool sd_card: Whether or not we have an SD card inserted
"""
filename = None
position = None
image_url = None
if self._image_url_path:
image_url = self._image_url_path
if self._image_json_path:
image_url = self.json_traverse(json_data, self._image_json_path)
iwidth = 0
iheight = 0
if self._image_dim_json_path:
iwidth = int(self.json_traverse(json_data, self._image_dim_json_path[0]))
iheight = int(self.json_traverse(json_data, self._image_dim_json_path[1]))
print("image dim:", iwidth, iheight)
if image_url:
print("original URL:", image_url)
if self._convert_image:
if iwidth < iheight:
image_url = self.image_converter_url(
image_url,
int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
),
self._image_resize[1],
)
else:
image_url = self.image_converter_url(
image_url, self._image_resize[0], self._image_resize[1]
)
print("convert URL:", image_url)
# convert image to bitmap and cache
# print("**not actually wgetting**")
filename = "/cache.bmp"
chunk_size = 4096 # default chunk size is 12K (for QSPI)
if sd_card:
filename = "/sd" + filename
chunk_size = 512 # current bug in big SD writes -> stick to 1 block
try:
self.wget(image_url, filename, chunk_size=chunk_size)
except OSError as error:
raise OSError(
"""\n\nNo writable filesystem found for saving datastream. Insert an SD card or set internal filesystem to be unsafe by setting 'disable_concurrent_write_protection' in the mount options in boot.py""" # pylint: disable=line-too-long
) from error
except RuntimeError as error:
raise RuntimeError("wget didn't write a complete file") from error
if iwidth < iheight:
pwidth = int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
)
position = (
self._image_position[0] + int((self._image_resize[0] - pwidth) / 2),
self._image_position[1],
)
else:
position = self._image_position
image_url = None
gc.collect()
return filename, position
| 38.171429
| 253
| 0.594561
|
import gc
from adafruit_portalbase.network import (
NetworkBase,
CONTENT_JSON,
CONTENT_TEXT,
)
from adafruit_pyportal.wifi import WiFi
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyPortal.git"
IMAGE_CONVERTER_SERVICE = (
"https://io.adafruit.com/api/v2/%s/integrations/image-formatter?"
"x-aio-key=%s&width=%d&height=%d&output=BMP%d&url=%s"
)
class Network(NetworkBase):
def __init__(
self,
*,
status_neopixel=None,
esp=None,
external_spi=None,
extract_values=True,
debug=False,
convert_image=True,
image_url_path=None,
image_json_path=None,
image_resize=None,
image_position=None,
image_dim_json_path=None,
secrets_data=None,
):
wifi = WiFi(status_neopixel=status_neopixel, esp=esp, external_spi=external_spi)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
secrets_data=secrets_data,
)
self._convert_image = convert_image
self._image_json_path = image_json_path
self._image_url_path = image_url_path
self._image_resize = image_resize
self._image_position = image_position
self._image_dim_json_path = image_dim_json_path
gc.collect()
@property
def ip_address(self):
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
def image_converter_url(self, image_url, width, height, color_depth=16):
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError as error:
raise KeyError(
"\n\nOur image converter service require a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'" # pylint: disable=line-too-long
) from error
return IMAGE_CONVERTER_SERVICE % (
aio_username,
aio_key,
width,
height,
color_depth,
image_url,
)
# pylint: disable=too-many-branches, too-many-statements
def process_image(self, json_data, sd_card=False):
filename = None
position = None
image_url = None
if self._image_url_path:
image_url = self._image_url_path
if self._image_json_path:
image_url = self.json_traverse(json_data, self._image_json_path)
iwidth = 0
iheight = 0
if self._image_dim_json_path:
iwidth = int(self.json_traverse(json_data, self._image_dim_json_path[0]))
iheight = int(self.json_traverse(json_data, self._image_dim_json_path[1]))
print("image dim:", iwidth, iheight)
if image_url:
print("original URL:", image_url)
if self._convert_image:
if iwidth < iheight:
image_url = self.image_converter_url(
image_url,
int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
),
self._image_resize[1],
)
else:
image_url = self.image_converter_url(
image_url, self._image_resize[0], self._image_resize[1]
)
print("convert URL:", image_url)
# convert image to bitmap and cache
# print("**not actually wgetting**")
filename = "/cache.bmp"
chunk_size = 4096 # default chunk size is 12K (for QSPI)
if sd_card:
filename = "/sd" + filename
chunk_size = 512 # current bug in big SD writes -> stick to 1 block
try:
self.wget(image_url, filename, chunk_size=chunk_size)
except OSError as error:
raise OSError(
"""\n\nNo writable filesystem found for saving datastream. Insert an SD card or set internal filesystem to be unsafe by setting 'disable_concurrent_write_protection' in the mount options in boot.py""" # pylint: disable=line-too-long
) from error
except RuntimeError as error:
raise RuntimeError("wget didn't write a complete file") from error
if iwidth < iheight:
pwidth = int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
)
position = (
self._image_position[0] + int((self._image_resize[0] - pwidth) / 2),
self._image_position[1],
)
else:
position = self._image_position
image_url = None
gc.collect()
return filename, position
| true
| true
|
f70bad7ea69e067caab69fe4854350f67d504f31
| 5,718
|
py
|
Python
|
src/sadie/renumbering/result.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
src/sadie/renumbering/result.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
src/sadie/renumbering/result.py
|
jwillis0720/pybody
|
2d7c68650ac1ef5f3003ccb67171898eac1f63eb
|
[
"MIT"
] | null | null | null |
import logging
import pandas as pd
from ast import literal_eval
from .constants import NUMBERING_RESULTS
from sadie.numbering.scheme_numbering import scheme_numbering
logger = logging.getLogger("NUMBERING")
class NumberingResults(pd.DataFrame):
def __init__(self, *args, scheme="", region_definition="", allowed_chains=[], allowed_species=[], **kwargs):
# use the __init__ method from DataFrame to ensure
# that we're inheriting the correct behavior
super(NumberingResults, self).__init__(*args, **kwargs)
# self["scheme"] = scheme
# self["region_definition"] = region_definition
# self["allowed_species"] = ",".join(allowed_species)
# self["allowed_chains"] = ",".join(allowed_chains)
# self._add_segment_regions()
@property
def _constructor(self):
return NumberingResults
def get_alignment_table(self) -> pd.DataFrame:
"""Get a numbered alignment table from the numbering and insertions
Returns
-------
pd.DataFrame
A dataframe with Id, chain_type, scheme and numbering. Values are the amino acid sequences
"""
all_dataframes = []
# I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row
for index in range(len(self)):
all_dataframes.append(self._pivot_alignment(self.iloc[index]))
all_dataframes = pd.concat(all_dataframes)
all_dataframes = all_dataframes.sort_index(axis=1, level=[0, 1])
all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))
all_dataframes = all_dataframes.reset_index()
return self[["Id", "chain_type", "scheme"]].merge(all_dataframes, on="Id").copy()
def _get_region(self, row, start, end, segment_name):
with_segment = "".join(
list(
map(
lambda x: x[-1],
list(
filter(
lambda x: x[0] >= start and x[0] <= end,
list(
zip(
row["Numbering"],
row["Insertion"],
row["Numbered_Sequence"],
)
),
)
),
)
)
)
without_segment = with_segment.replace("-", "")
return pd.Series(
{
f"{segment_name}_gaps": with_segment,
f"{segment_name}_no_gaps": without_segment,
}
)
def _add_segment_regions(self) -> "NumberingResults":
"""Private method to delineate the framework and cdr boundaries from the numbering
Returns
-------
NumberingResults
Instance of NumberingResults
"""
return_frames = []
for group, sub_df in self.groupby(["scheme", "region_definition", "Chain"]):
numbering = group[0]
chain = {"H": "heavy", "KL": "light"}[group[-1]]
boundaries = group[1]
numbering_lookup = scheme_numbering[numbering][chain][boundaries]
for region in [
"fwr1_aa",
"cdr1_aa",
"fwr2_aa",
"cdr2_aa",
"fwr3_aa",
"cdr3_aa",
"fwr4_aa",
]:
_start = numbering_lookup[f"{region}_start"]
_end = numbering_lookup[f"{region}_end"]
sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))
return_frames.append(sub_df)
segmented_df = pd.concat(return_frames).reset_index(drop=True)
# everything preceding the antibody
segmented_df["leader"] = segmented_df[["sequence", "seqstart_index"]].apply(lambda x: x[0][: x[1]], axis=1)
# everything following the antibody. keyword tail will clash with pandas
segmented_df["follow"] = segmented_df[["sequence", "seqend_index"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)
return segmented_df
def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:
"""Private method to pivot a segmented row into an alignment series
Parameters
----------
row : pd.Series
indidual Numbering result row
Returns
-------
pivoted dataframe
"""
pivoted_df = (
pd.DataFrame(
zip(row["Numbering"], row["Insertion"], row["Numbered_Sequence"]),
columns=["numbering", "insertion", "sequence"],
)
.assign(Id=row["Id"])
.pivot("Id", ["numbering", "insertion"], "sequence")
)
return pivoted_df
def get_sanatized_antibodies(self):
# drop sequences that don't start at the first amino acid and dont end at the last amino acid.
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
@staticmethod
def read_csv(*args, **kwargs):
return NumberingResults(
pd.read_csv(
*args,
index_col=0,
dtype=NUMBERING_RESULTS,
converters={"Numbering": literal_eval, "Insertion": literal_eval, "Numbered_Sequence": literal_eval},
**kwargs,
)
)
def drop_bad_numbering(self) -> "NumberingResults":
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
| 38.635135
| 120
| 0.549668
|
import logging
import pandas as pd
from ast import literal_eval
from .constants import NUMBERING_RESULTS
from sadie.numbering.scheme_numbering import scheme_numbering
logger = logging.getLogger("NUMBERING")
class NumberingResults(pd.DataFrame):
def __init__(self, *args, scheme="", region_definition="", allowed_chains=[], allowed_species=[], **kwargs):
super(NumberingResults, self).__init__(*args, **kwargs)
# self["scheme"] = scheme
# self["region_definition"] = region_definition
# self["allowed_species"] = ",".join(allowed_species)
# self["allowed_chains"] = ",".join(allowed_chains)
# self._add_segment_regions()
@property
def _constructor(self):
return NumberingResults
def get_alignment_table(self) -> pd.DataFrame:
all_dataframes = []
# I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row
for index in range(len(self)):
all_dataframes.append(self._pivot_alignment(self.iloc[index]))
all_dataframes = pd.concat(all_dataframes)
all_dataframes = all_dataframes.sort_index(axis=1, level=[0, 1])
all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))
all_dataframes = all_dataframes.reset_index()
return self[["Id", "chain_type", "scheme"]].merge(all_dataframes, on="Id").copy()
def _get_region(self, row, start, end, segment_name):
with_segment = "".join(
list(
map(
lambda x: x[-1],
list(
filter(
lambda x: x[0] >= start and x[0] <= end,
list(
zip(
row["Numbering"],
row["Insertion"],
row["Numbered_Sequence"],
)
),
)
),
)
)
)
without_segment = with_segment.replace("-", "")
return pd.Series(
{
f"{segment_name}_gaps": with_segment,
f"{segment_name}_no_gaps": without_segment,
}
)
def _add_segment_regions(self) -> "NumberingResults":
return_frames = []
for group, sub_df in self.groupby(["scheme", "region_definition", "Chain"]):
numbering = group[0]
chain = {"H": "heavy", "KL": "light"}[group[-1]]
boundaries = group[1]
numbering_lookup = scheme_numbering[numbering][chain][boundaries]
for region in [
"fwr1_aa",
"cdr1_aa",
"fwr2_aa",
"cdr2_aa",
"fwr3_aa",
"cdr3_aa",
"fwr4_aa",
]:
_start = numbering_lookup[f"{region}_start"]
_end = numbering_lookup[f"{region}_end"]
sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))
return_frames.append(sub_df)
segmented_df = pd.concat(return_frames).reset_index(drop=True)
segmented_df["leader"] = segmented_df[["sequence", "seqstart_index"]].apply(lambda x: x[0][: x[1]], axis=1)
segmented_df["follow"] = segmented_df[["sequence", "seqend_index"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)
return segmented_df
def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:
pivoted_df = (
pd.DataFrame(
zip(row["Numbering"], row["Insertion"], row["Numbered_Sequence"]),
columns=["numbering", "insertion", "sequence"],
)
.assign(Id=row["Id"])
.pivot("Id", ["numbering", "insertion"], "sequence")
)
return pivoted_df
def get_sanatized_antibodies(self):
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
@staticmethod
def read_csv(*args, **kwargs):
return NumberingResults(
pd.read_csv(
*args,
index_col=0,
dtype=NUMBERING_RESULTS,
converters={"Numbering": literal_eval, "Insertion": literal_eval, "Numbered_Sequence": literal_eval},
**kwargs,
)
)
def drop_bad_numbering(self) -> "NumberingResults":
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
| true
| true
|
f70bade14a31fddad9b14b51709da0b4d1094b8f
| 1,071
|
py
|
Python
|
tests/test_split_settings.py
|
abdulniyaspm/django-split-settings
|
9a004ce261ffd16782da08577fb700300f3bd40b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-21T03:07:15.000Z
|
2021-04-21T03:07:15.000Z
|
tests/test_split_settings.py
|
abdulniyaspm/django-split-settings
|
9a004ce261ffd16782da08577fb700300f3bd40b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_split_settings.py
|
abdulniyaspm/django-split-settings
|
9a004ce261ffd16782da08577fb700300f3bd40b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=no-member
"""
This file contains tests with base functionality.
"""
def test_merge(merged):
"""
Test that all values from settings are present.
"""
assert hasattr(merged, 'SECRET_KEY')
assert hasattr(merged, 'STATIC_ROOT')
def test_override(merged, monkeypatch):
"""
This setting must be overridden in the testing.py
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tests.settings.merged')
from django.conf import settings
# noinspection PyUnresolvedReferences
assert merged.STATIC_ROOT == settings.STATIC_ROOT
def test_recursion_inclusion(recursion):
"""
Tests `include` function for inclusion files only once.
It protects of infinite recursion.
"""
assert hasattr(recursion, 'RECURSION_OK')
def test_stacked_settings(stacked):
"""
Tests `include` function for inclusion files only once.
It protects of infinite recursion.
"""
assert hasattr(stacked, 'STACKED_BASE_LOADED')
assert hasattr(stacked, 'STACKED_DB_PERSISTENT')
| 23.8
| 73
| 0.704949
|
def test_merge(merged):
assert hasattr(merged, 'SECRET_KEY')
assert hasattr(merged, 'STATIC_ROOT')
def test_override(merged, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tests.settings.merged')
from django.conf import settings
assert merged.STATIC_ROOT == settings.STATIC_ROOT
def test_recursion_inclusion(recursion):
assert hasattr(recursion, 'RECURSION_OK')
def test_stacked_settings(stacked):
assert hasattr(stacked, 'STACKED_BASE_LOADED')
assert hasattr(stacked, 'STACKED_DB_PERSISTENT')
| true
| true
|
f70bae9c979732376e4cb729a58df45c13daa528
| 528
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/share-34244
|
b4acf167275d5bf120b1f0254aabc2e0e95111a9
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/share-34244
|
b4acf167275d5bf120b1f0254aabc2e0e95111a9
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/share-34244
|
b4acf167275d5bf120b1f0254aabc2e0e95111a9
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "share-34244.botics.co"
site_params = {
"name": "SHARE",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.307692
| 61
| 0.651515
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "share-34244.botics.co"
site_params = {
"name": "SHARE",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true
| true
|
f70baf1f4016fb95e12e305e4c79a70b93b9b4a6
| 3,210
|
py
|
Python
|
do_it_django_prj/settings.py
|
Eliksny/do_it_django_a_to_z
|
728d08f11cbed05aa93004d116926df26f681ccf
|
[
"MIT"
] | null | null | null |
do_it_django_prj/settings.py
|
Eliksny/do_it_django_a_to_z
|
728d08f11cbed05aa93004d116926df26f681ccf
|
[
"MIT"
] | null | null | null |
do_it_django_prj/settings.py
|
Eliksny/do_it_django_a_to_z
|
728d08f11cbed05aa93004d116926df26f681ccf
|
[
"MIT"
] | null | null | null |
"""
Django settings for do_it_django_prj project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^il&w&37030%c0kbg@9(h+k(jsps53_)brjyw)mksmj=*c^5vf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'single_pages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'do_it_django_prj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'do_it_django_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, '_media')
| 25.275591
| 91
| 0.697508
|
import os
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '^il&w&37030%c0kbg@9(h+k(jsps53_)brjyw)mksmj=*c^5vf'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'single_pages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'do_it_django_prj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'do_it_django_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, '_media')
| true
| true
|
f70baf4ada1e8690727609371f5fe6b0550ce622
| 1,073
|
py
|
Python
|
run_on_vacuum/my_logger.py
|
edwios/xiaomiWifiMapper
|
55a4bafcca9f7d000e5df4360e2c8e4584541942
|
[
"MIT"
] | 6
|
2019-04-13T07:22:38.000Z
|
2022-03-30T16:38:07.000Z
|
run_on_vacuum/my_logger.py
|
edwios/xiaomiWifiMapper
|
55a4bafcca9f7d000e5df4360e2c8e4584541942
|
[
"MIT"
] | null | null | null |
run_on_vacuum/my_logger.py
|
edwios/xiaomiWifiMapper
|
55a4bafcca9f7d000e5df4360e2c8e4584541942
|
[
"MIT"
] | 1
|
2022-02-08T16:08:21.000Z
|
2022-02-08T16:08:21.000Z
|
import logging
import datetime
import os
def config_logs():
# Logfile
logfolder = "logs/"
logdate = datetime.datetime.now().strftime("%y-%m-%d_%H:%M") + "_"
logfile = "aerodust.log"
logpath = logfolder + logfile
#logpath = logfolder + logdate + logfile
if not os.path.exists(logfolder):
os.makedirs(logfolder)
# Format
logformat = '%(asctime)s %(levelname)s: %(message)s'
datefmt='%m/%d/%Y %I:%M:%S %p'
# Get the Root Logger and
rootLogger = logging.getLogger()
# Create a formatter
logFormatter = logging.Formatter(logformat, datefmt)
# Create and add the file stream handler to the logger
fileHandler = logging.FileHandler(logpath)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# Create and add the console stream handler to the logger
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
#rootLogger.setLevel(logging.DEBUG)
| 29
| 70
| 0.691519
|
import logging
import datetime
import os
def config_logs():
logfolder = "logs/"
logdate = datetime.datetime.now().strftime("%y-%m-%d_%H:%M") + "_"
logfile = "aerodust.log"
logpath = logfolder + logfile
if not os.path.exists(logfolder):
os.makedirs(logfolder)
logformat = '%(asctime)s %(levelname)s: %(message)s'
datefmt='%m/%d/%Y %I:%M:%S %p'
rootLogger = logging.getLogger()
logFormatter = logging.Formatter(logformat, datefmt)
fileHandler = logging.FileHandler(logpath)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
| true
| true
|
f70bafa9d216982e080b2ecad32e8a1c7f55bbe2
| 11,523
|
py
|
Python
|
httpBridge/http_server.py
|
neilbroadbent/captive-web-view
|
ff0b541727ab60df6d05cae1eb66dd9d7b572b89
|
[
"BSD-2-Clause"
] | null | null | null |
httpBridge/http_server.py
|
neilbroadbent/captive-web-view
|
ff0b541727ab60df6d05cae1eb66dd9d7b572b89
|
[
"BSD-2-Clause"
] | null | null | null |
httpBridge/http_server.py
|
neilbroadbent/captive-web-view
|
ff0b541727ab60df6d05cae1eb66dd9d7b572b89
|
[
"BSD-2-Clause"
] | null | null | null |
# Run with Python 3
# Copyright 2019 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
"""\
HTTP server that can be used as a back end to Captive Web View applications.
The server is based around a Python3 Simple HTTP Server extended to pick files
from one of a number of directories.
The server will change directory to the common parent of all directories
specified.
"""
#
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# Tutorial: https://docs.python.org/3/howto/argparse.html
# Reference: https://docs.python.org/3/library/argparse.html
import argparse
#
# Module for HTTP server
# https://docs.python.org/3/library/http.server.html
from http.server import HTTPServer, SimpleHTTPRequestHandler
#
# JSON module.
# https://docs.python.org/3/library/json.html
import json
#
# Module for changing the current directory.
# https://docs.python.org/3/library/os.html#os.chdir
from os import chdir
#
# File path module.
# https://docs.python.org/3/library/os.path.html
import os.path
#
# Module for OO path handling.
# https://docs.python.org/3/library/pathlib.html
from pathlib import Path
#
# Module for recursive copy.
# https://docs.python.org/3/library/shutil.html
import shutil
#
# Module to create an HTTP server that spawns a thread for each request.
# https://docs.python.org/3/library/socketserver.html#module-socketserver
# The ThreadingMixIn is needed because of an apparent defect in Python, see:
# https://github.com/Microsoft/WSL/issues/1906
# https://bugs.python.org/issue31639
# The defect is fixed in 3.7 Python.
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L45
from socketserver import ThreadingMixIn
#
# Module for manipulation of the import path.
# https://docs.python.org/3/library/sys.html#sys.path
import sys
#
# Module for text dedentation.
# Only used for --help description.
# https://docs.python.org/3/library/textwrap.html
import textwrap
def project_path(*segments):
return Path(__file__).resolve().parents[1].joinpath(*segments)
class Server(ThreadingMixIn, HTTPServer):
@property
def directories(self):
return self._directories
@directories.setter
def directories(self, directories):
self._directories = tuple(directories)
@property
def relativePaths(self):
return self._relativePaths
def path_for_file(self, filename):
filename = os.path.basename(filename)
if filename == "":
filename = "index.html"
for index, directory in enumerate(self.directories):
if directory.joinpath(filename).is_file():
return self.relativePaths[index].joinpath(filename)
raise ValueError('File "{}" not found.'.format(filename))
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Server method `handle_command` must be set by Main subclass.")
@property
def start_message(self):
"""Message suitable for logging when the server is started."""
def directory_lines(width=80, indent=2):
# This array accumulates diagnostic logs. It is yield'd after
# everything, unless the final yield is commented out.
transcript = ["\n"]
for directory in self.directories:
first = True
lineLen = 0
for index, leg in enumerate(directory.parts):
if leg == os.path.sep and index == 0:
continue
append = ''.join(("" if index == 0 else os.path.sep, leg))
appendLen = len(append)
while True:
lineStart = False
transcript.extend('{:2d} {:2d} "{}"\n'.format(
lineLen, appendLen, append))
if lineLen == 0:
line = "{:<{indent}}".format(
">" if first else "", indent=indent)
lineLen += len(line)
yield "\n"
yield line
lineStart = True
if lineLen + appendLen > width:
if lineStart:
yield append
first = False
lineLen = 0
if lineStart:
break
else:
lineLen += appendLen
yield append
break
# Uncomment the following line to get diagnostic logs.
# yield "".join(transcript)
#
# Get the actual port number and server address. The port number could
# be different, if zero was specified.
address = self.server_address
return 'Starting HTTP server at http://{}:{} for:{}\ncd {}'.format(
'localhost' if address[0] == '127.0.0.1' else address[0]
, int(address[1])
, "".join(tuple(directory_lines()))
, os.path.commonpath(self.directories))
def serve_forever(self):
chdir(os.path.commonpath(self.directories))
fromDir = Path.cwd()
self._relativePaths = tuple(
directory.relative_to(fromDir) for directory in self.directories)
return super().serve_forever()
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
responsePath = None
# Check for resources that are allowed to be requested from root. Chrome
# seems to request everything other than the favicon with a path though.
try:
parted = self.path.rpartition("/")
if parted[0] == "" and (parted[1] == "/" or parted[1] == ""):
self.log_message("%s", 'Root resource "{}".'.format(self.path))
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
# Check for other resources in allowed directories.
directoryIndex = None
if responsePath is None:
effectivePath = (
self.path[1:] if self.path.startswith("/") else self.path)
for index, prefix in enumerate(self.server.relativePaths):
if effectivePath.startswith(str(prefix)):
directoryIndex = index
break
if directoryIndex is None:
self.send_error(403)
return
# By now, it's determined that the path in the request is one that
# is allowed by the server. It might have been requested from a
# resource in one directory but be in another. The path_for_file()
# method takes care of that.
try:
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
self.log_message("%s", 'Response path "{}" "{}" {}.'.format(
self.path, responsePath, directoryIndex))
if responsePath is not None:
self.path = str(responsePath)
super().do_GET()
def _send_object(self, responseObject):
responseBytes = json.dumps(responseObject).encode()
self.log_message("%s", 'Response object {} {}.'.format(
responseObject, responseBytes))
self.send_response(200)
self.end_headers()
self.wfile.write(responseBytes)
def do_POST(self):
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L263
contentLengthHeader = self.headers.get('Content-Length')
contentLength = (
0 if contentLengthHeader is None else int(contentLengthHeader))
contentJSON = (
self.rfile.read(contentLength).decode('utf-8') if contentLength > 0
else None)
content = None if contentJSON is None else json.loads(contentJSON)
self.log_message("%s", "POST object {}.".format(
json.dumps(content, indent=2)))
if content is None:
self.send_error(400)
else:
try:
response = self.server.handle_command(content, self)
if response is not None:
self._send_object(response)
except:
self.send_error(501)
raise
# self.path is ignored.
class Main:
def __init__(self, argv):
argumentParser = argparse.ArgumentParser(
# formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
argumentParser.add_argument(
'-p', '--port', type=int, default=8001, help=
'Port number. Default: 8001.')
argumentParser.add_argument(
dest='directories', metavar='directory', type=str, nargs='+', help=
'Directory from which to server web content.')
self.arguments = argumentParser.parse_args(argv[1:])
self.server = Server(('localhost', self.arguments.port), Handler)
self.server.handle_command = self.handle_command
def __call__(self):
self.server.directories = (
*(
Path(directory).resolve()
for directory in self.arguments.directories
), project_path(
'forAndroid', 'captivewebview', 'src', 'main', 'assets',
'library')
)
for directory in self.server.directories:
if not directory.is_dir():
raise ValueError(f'Not a directory "{directory}".')
print(self.server.start_message)
self.server.serve_forever()
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Method `handle_command` must be implemented by Main subclass.")
class CaptivityMain(Main):
def __init__(self, argv):
argv = (*argv, str(project_path(
'forAndroid', 'Captivity', 'src', 'main', 'assets', 'UserInterface'
)))
return super().__init__(argv)
# Override.
def handle_command(self, commandObject, httpHandler):
# Following code would send a redirect to the client. Unfortunately,
# that causes the client to redirect the POST, instead of it loading
# another page instead.
#
# if "load" in commandObject:
# responseBytes = json.dumps({}).encode()
# httpHandler.log_message("%s", 'Redirect {}.'.format(
# responseBytes))
# httpHandler.send_response(303, json.dumps(commandObject))
# httpHandler.send_header('Location', commandObject["load"])
# httpHandler.end_headers()
# httpHandler.wfile.write(responseBytes)
# return None
# TOTH for ** syntax: https://stackoverflow.com/a/26853961
return {
**commandObject,
"confirm": " ".join((self.__class__.__name__,
httpHandler.server_version,
httpHandler.sys_version))
}
if __name__ == '__main__':
sys.exit(CaptivityMain(sys.argv)())
| 38.667785
| 110
| 0.586392
|
import argparse
from http.server import HTTPServer, SimpleHTTPRequestHandler
import json
import chdir
import os.path
from pathlib import Path
import shutil
import ThreadingMixIn
ys
import textwrap
def project_path(*segments):
return Path(__file__).resolve().parents[1].joinpath(*segments)
class Server(ThreadingMixIn, HTTPServer):
@property
def directories(self):
return self._directories
@directories.setter
def directories(self, directories):
self._directories = tuple(directories)
@property
def relativePaths(self):
return self._relativePaths
def path_for_file(self, filename):
filename = os.path.basename(filename)
if filename == "":
filename = "index.html"
for index, directory in enumerate(self.directories):
if directory.joinpath(filename).is_file():
return self.relativePaths[index].joinpath(filename)
raise ValueError('File "{}" not found.'.format(filename))
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Server method `handle_command` must be set by Main subclass.")
@property
def start_message(self):
def directory_lines(width=80, indent=2):
# everything, unless the final yield is commented out.
transcript = ["\n"]
for directory in self.directories:
first = True
lineLen = 0
for index, leg in enumerate(directory.parts):
if leg == os.path.sep and index == 0:
continue
append = ''.join(("" if index == 0 else os.path.sep, leg))
appendLen = len(append)
while True:
lineStart = False
transcript.extend('{:2d} {:2d} "{}"\n'.format(
lineLen, appendLen, append))
if lineLen == 0:
line = "{:<{indent}}".format(
">" if first else "", indent=indent)
lineLen += len(line)
yield "\n"
yield line
lineStart = True
if lineLen + appendLen > width:
if lineStart:
yield append
first = False
lineLen = 0
if lineStart:
break
else:
lineLen += appendLen
yield append
break
# Uncomment the following line to get diagnostic logs.
# yield "".join(transcript)
#
# Get the actual port number and server address. The port number could
# be different, if zero was specified.
address = self.server_address
return 'Starting HTTP server at http://{}:{} for:{}\ncd {}'.format(
'localhost' if address[0] == '127.0.0.1' else address[0]
, int(address[1])
, "".join(tuple(directory_lines()))
, os.path.commonpath(self.directories))
def serve_forever(self):
chdir(os.path.commonpath(self.directories))
fromDir = Path.cwd()
self._relativePaths = tuple(
directory.relative_to(fromDir) for directory in self.directories)
return super().serve_forever()
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
responsePath = None
# Check for resources that are allowed to be requested from root. Chrome
# seems to request everything other than the favicon with a path though.
try:
parted = self.path.rpartition("/")
if parted[0] == "" and (parted[1] == "/" or parted[1] == ""):
self.log_message("%s", 'Root resource "{}".'.format(self.path))
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
# Check for other resources in allowed directories.
directoryIndex = None
if responsePath is None:
effectivePath = (
self.path[1:] if self.path.startswith("/") else self.path)
for index, prefix in enumerate(self.server.relativePaths):
if effectivePath.startswith(str(prefix)):
directoryIndex = index
break
if directoryIndex is None:
self.send_error(403)
return
# By now, it's determined that the path in the request is one that
try:
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
self.log_message("%s", 'Response path "{}" "{}" {}.'.format(
self.path, responsePath, directoryIndex))
if responsePath is not None:
self.path = str(responsePath)
super().do_GET()
def _send_object(self, responseObject):
responseBytes = json.dumps(responseObject).encode()
self.log_message("%s", 'Response object {} {}.'.format(
responseObject, responseBytes))
self.send_response(200)
self.end_headers()
self.wfile.write(responseBytes)
def do_POST(self):
contentLengthHeader = self.headers.get('Content-Length')
contentLength = (
0 if contentLengthHeader is None else int(contentLengthHeader))
contentJSON = (
self.rfile.read(contentLength).decode('utf-8') if contentLength > 0
else None)
content = None if contentJSON is None else json.loads(contentJSON)
self.log_message("%s", "POST object {}.".format(
json.dumps(content, indent=2)))
if content is None:
self.send_error(400)
else:
try:
response = self.server.handle_command(content, self)
if response is not None:
self._send_object(response)
except:
self.send_error(501)
raise
class Main:
def __init__(self, argv):
argumentParser = argparse.ArgumentParser(
description=textwrap.dedent(__doc__))
argumentParser.add_argument(
'-p', '--port', type=int, default=8001, help=
'Port number. Default: 8001.')
argumentParser.add_argument(
dest='directories', metavar='directory', type=str, nargs='+', help=
'Directory from which to server web content.')
self.arguments = argumentParser.parse_args(argv[1:])
self.server = Server(('localhost', self.arguments.port), Handler)
self.server.handle_command = self.handle_command
def __call__(self):
self.server.directories = (
*(
Path(directory).resolve()
for directory in self.arguments.directories
), project_path(
'forAndroid', 'captivewebview', 'src', 'main', 'assets',
'library')
)
for directory in self.server.directories:
if not directory.is_dir():
raise ValueError(f'Not a directory "{directory}".')
print(self.server.start_message)
self.server.serve_forever()
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Method `handle_command` must be implemented by Main subclass.")
class CaptivityMain(Main):
def __init__(self, argv):
argv = (*argv, str(project_path(
'forAndroid', 'Captivity', 'src', 'main', 'assets', 'UserInterface'
)))
return super().__init__(argv)
def handle_command(self, commandObject, httpHandler):
return {
**commandObject,
"confirm": " ".join((self.__class__.__name__,
httpHandler.server_version,
httpHandler.sys_version))
}
if __name__ == '__main__':
sys.exit(CaptivityMain(sys.argv)())
| true
| true
|
f70bb050c191468d1bf7ec98d2a0e34ce404620f
| 1,940
|
py
|
Python
|
environment/controller/ppo_test.py
|
rafaelcostafrf/UAV_3d_virtual_env
|
bccaa52ec97fff5c0a17e1351a09f913d91c4c7b
|
[
"CC0-1.0"
] | 7
|
2020-07-16T08:23:58.000Z
|
2022-02-03T17:51:13.000Z
|
environment/controller/ppo_test.py
|
rafaelcostafrf/UAV_3D_Virtual_Env
|
bccaa52ec97fff5c0a17e1351a09f913d91c4c7b
|
[
"CC0-1.0"
] | null | null | null |
environment/controller/ppo_test.py
|
rafaelcostafrf/UAV_3D_Virtual_Env
|
bccaa52ec97fff5c0a17e1351a09f913d91c4c7b
|
[
"CC0-1.0"
] | 3
|
2020-09-16T14:24:48.000Z
|
2021-02-03T10:01:00.000Z
|
import sys
from quadrotor_env import quad, render, animation
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import numpy as np
from quadrotor_env import quad, render, animation
from model import ActorCritic
"""
MECHANICAL ENGINEERING POST-GRADUATE PROGRAM
UNIVERSIDADE FEDERAL DO ABC - SANTO ANDRÉ, BRASIL
NOME: RAFAEL COSTA FERNANDES
RA: 21201920754
E−MAIL: COSTA.FERNANDES@UFABC.EDU.BR
DESCRIPTION:
PPO testing algorithm (no training, only forward passes)
"""
time_int_step = 0.01
max_timesteps = 1000
T = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = quad(time_int_step, max_timesteps, euler=0, direct_control=1, deep_learning=1, T=T, debug=0)
state_dim = env.deep_learning_in_size
policy = ActorCritic(state_dim, action_dim=4, action_std=0).to(device)
#LOAD TRAINED POLICY
try:
policy.load_state_dict(torch.load('PPO_continuous_solved_drone.pth',map_location=device))
print('Saved policy loaded')
except:
print('Could not load policy')
sys.exit(1)
#PLOTTER SETUP
print_states = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12]
plot_labels = ['x', 'y', 'z', 'phi', 'theta', 'psi', 'f1', 'f2', 'f3', 'f4']
line_styles = ['-', '-', '-', '--', '--', '--', ':', ':', ':', ':',]
plotter = render(print_states, plot_labels, line_styles, depth_plot_list=0, animate=0)
# DO ONE RANDOM EPISODE
plotter.clear()
state = env.reset()
first_state = np.concatenate((env.previous_state[0:6],env.ang,np.zeros(4)))
plotter.add(0,first_state)
done = False
t=0
while not done:
t+=time_int_step
action = policy.actor(torch.FloatTensor(state).to(device)).cpu().detach().numpy()
state, _, done = env.step(action)
plot_state = np.concatenate((env.state[0:6],env.ang,action))
plotter.add(t,plot_state)
print('Env Solved, printing...')
plotter.plot()
# plotter.depth_plot()
an = animation()
an.animate(plotter.states)
plotter.clear()
| 28.115942
| 98
| 0.716495
|
import sys
from quadrotor_env import quad, render, animation
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import numpy as np
from quadrotor_env import quad, render, animation
from model import ActorCritic
time_int_step = 0.01
max_timesteps = 1000
T = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = quad(time_int_step, max_timesteps, euler=0, direct_control=1, deep_learning=1, T=T, debug=0)
state_dim = env.deep_learning_in_size
policy = ActorCritic(state_dim, action_dim=4, action_std=0).to(device)
try:
policy.load_state_dict(torch.load('PPO_continuous_solved_drone.pth',map_location=device))
print('Saved policy loaded')
except:
print('Could not load policy')
sys.exit(1)
print_states = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12]
plot_labels = ['x', 'y', 'z', 'phi', 'theta', 'psi', 'f1', 'f2', 'f3', 'f4']
line_styles = ['-', '-', '-', '--', '--', '--', ':', ':', ':', ':',]
plotter = render(print_states, plot_labels, line_styles, depth_plot_list=0, animate=0)
plotter.clear()
state = env.reset()
first_state = np.concatenate((env.previous_state[0:6],env.ang,np.zeros(4)))
plotter.add(0,first_state)
done = False
t=0
while not done:
t+=time_int_step
action = policy.actor(torch.FloatTensor(state).to(device)).cpu().detach().numpy()
state, _, done = env.step(action)
plot_state = np.concatenate((env.state[0:6],env.ang,action))
plotter.add(t,plot_state)
print('Env Solved, printing...')
plotter.plot()
an = animation()
an.animate(plotter.states)
plotter.clear()
| true
| true
|
f70bb0dcace5468cfd76a8c6fc2bbcfa258e969a
| 926
|
py
|
Python
|
server.py
|
serchrod/PlotWebService
|
3d744641e7fa187d46903e71b3da6faa1ca80197
|
[
"MIT"
] | null | null | null |
server.py
|
serchrod/PlotWebService
|
3d744641e7fa187d46903e71b3da6faa1ca80197
|
[
"MIT"
] | null | null | null |
server.py
|
serchrod/PlotWebService
|
3d744641e7fa187d46903e71b3da6faa1ca80197
|
[
"MIT"
] | null | null | null |
from flask import Flask, escape, request
from flask import send_file
from Graph.plot import Plot
app = Flask(__name__)
@app.route('/', methods=["POST"])
def hello():
print(request.method)
req_data= request.get_json()
print(req_data)
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
@app.route('/get_image',methods=["POST"])
def get_image():
req_data= request.get_json()
plot= Plot()
plot.labels_x=list(req_data["labels_x"])
plot.labels_y=req_data["label_y"]
plot.title=req_data["title"]
plot.legend=list(req_data["legend"])
plot.valueGroup1=list(req_data["valueGroup"][0])
plot.valueGroup2=list(req_data["valueGroup"][1])
plot.filename=req_data["filename"]
if req_data["type"]=="1":
plot.createGroupBarPlot()
elif req_data["type"]=="2":
plot.createPieChart()
return send_file(req_data["filename"], mimetype='image/png')
| 25.027027
| 63
| 0.679266
|
from flask import Flask, escape, request
from flask import send_file
from Graph.plot import Plot
app = Flask(__name__)
@app.route('/', methods=["POST"])
def hello():
print(request.method)
req_data= request.get_json()
print(req_data)
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
@app.route('/get_image',methods=["POST"])
def get_image():
req_data= request.get_json()
plot= Plot()
plot.labels_x=list(req_data["labels_x"])
plot.labels_y=req_data["label_y"]
plot.title=req_data["title"]
plot.legend=list(req_data["legend"])
plot.valueGroup1=list(req_data["valueGroup"][0])
plot.valueGroup2=list(req_data["valueGroup"][1])
plot.filename=req_data["filename"]
if req_data["type"]=="1":
plot.createGroupBarPlot()
elif req_data["type"]=="2":
plot.createPieChart()
return send_file(req_data["filename"], mimetype='image/png')
| true
| true
|
f70bb2806abe8753fedf0fa824ed6ca2d2632ea8
| 56
|
py
|
Python
|
django_menus/__init__.py
|
jonesim/django-menus
|
11f46eead9dec3c99724d9d5df87ce7eb0bee730
|
[
"MIT"
] | 1
|
2021-11-20T06:24:41.000Z
|
2021-11-20T06:24:41.000Z
|
django_menus/__init__.py
|
jonesim/django-menus
|
11f46eead9dec3c99724d9d5df87ce7eb0bee730
|
[
"MIT"
] | null | null | null |
django_menus/__init__.py
|
jonesim/django-menus
|
11f46eead9dec3c99724d9d5df87ce7eb0bee730
|
[
"MIT"
] | null | null | null |
DUMMY_MENU_ID = 999999
DUMMY_MENU_SLUG = 'SLUGGOESHERE'
| 18.666667
| 32
| 0.821429
|
DUMMY_MENU_ID = 999999
DUMMY_MENU_SLUG = 'SLUGGOESHERE'
| true
| true
|
f70bb319fc1590c0ee070b2f24afe5ede7e22037
| 22,466
|
py
|
Python
|
tests/wav2vec2/test_modeling_flax_wav2vec2.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 2
|
2020-02-26T08:10:20.000Z
|
2020-02-28T19:10:01.000Z
|
tests/wav2vec2/test_modeling_flax_wav2vec2.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 1
|
2022-03-26T12:10:11.000Z
|
2022-03-26T12:10:11.000Z
|
tests/wav2vec2/test_modeling_flax_wav2vec2.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 1
|
2022-01-12T14:45:41.000Z
|
2022-01-12T14:45:41.000Z
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import unittest
import numpy as np
from datasets import load_dataset
from transformers import Wav2Vec2Config, is_flax_available
from transformers.testing_utils import (
is_librosa_available,
is_pyctcdecode_available,
require_flax,
require_librosa,
require_pyctcdecode,
require_soundfile,
slow,
)
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
import optax
from flax.traverse_util import flatten_dict
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_flax_wav2vec2 import (
FlaxWav2Vec2ForCTC,
FlaxWav2Vec2ForPreTraining,
FlaxWav2Vec2GumbelVectorQuantizer,
FlaxWav2Vec2Model,
_compute_mask_indices,
_sample_negative_indices,
)
if is_pyctcdecode_available():
from transformers import Wav2Vec2ProcessorWithLM
if is_librosa_available():
import librosa
class FlaxWav2Vec2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=24,
feat_extract_norm="layer",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = Wav2Vec2Config(
do_stable_layer_norm=self.do_stable_layer_norm,
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
return config, input_values, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_values, attention_mask = config_and_inputs
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (
(FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else ()
)
def setUp(self):
self.model_tester = FlaxWav2Vec2ModelTester(self)
def test_train(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
features_shape = (
input_values.shape[0],
model._get_feat_extract_output_lengths(np.array(input_values.shape[1])),
)
batch_size, sequence_length = features_shape[:2]
mask_prob = 0.5
mask_length = 4
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0))
output = model(
input_values,
attention_mask=attention_mask,
mask_time_indices=mask_time_indices,
train=True,
dropout_rng=dropout_rng,
gumbel_rng=gumbel_rng,
)[0]
self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim))
# overwrite because of `input_values`
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
# overwrite because of `input_values`
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_values, attention_mask=None, **kwargs):
return model(input_values=input_values, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_freeze_feature_encoder(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
params = model.params
# dummy loss function
def compute_loss(
params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8
):
outputs = model(
input_values,
attention_mask=attention_mask,
freeze_feature_encoder=freeze_feature_encoder,
params=params,
)
# compute cosine similarity of projected and projected_quantized states
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon
)
loss = cosine_sim.sum()
return loss, outputs.to_tuple()
# transform the loss function to get the gradients
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
# compute loss, outputs and gradients for unfrozen model
(loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False)
# compare to loss, outputs and gradients for frozen model
(loss_frozen, outputs_frozen), grads_frozen = grad_fn(
params, input_values, attention_mask, freeze_feature_encoder=True
)
# ensure that the outputs and losses remain precisely equal
for output, output_frozen in zip(outputs, outputs_frozen):
self.assertTrue((output == output_frozen).all())
self.assertEqual(loss, loss_frozen)
grads = flatten_dict(grads)
grads_frozen = flatten_dict(grads_frozen)
# ensure that the dicts of gradients contain the same keys
self.assertEqual(grads.keys(), grads_frozen.keys())
# ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen
feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k)
feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k)
for feature_extractor_grad, feature_extractor_grad_frozen in zip(
feature_extractor_grads, feature_extractor_grads_frozen
):
self.assertTrue((feature_extractor_grad_frozen == 0.0).all())
self.assertTrue((feature_extractor_grad > 0.0).any())
# ensure that the gradients of all unfrozen layers remain equal, i.e. all layers excluding the frozen 'feature_extractor'
grads = tuple(grads[k] for k in grads if "feature_extractor" not in k)
grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k)
for grad, grad_frozen in zip(grads, grads_frozen):
self.assertTrue((grad == grad_frozen).all())
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
outputs = model(np.ones((1, 1024), dtype="f4"))
self.assertIsNotNone(outputs)
@require_flax
class FlaxWav2Vec2UtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
def test_compute_mask_indices_attn_mask_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
attention_mask[:2, sequence_length // 2 :] = 0
mask = _compute_mask_indices(
(batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask
)
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)
def test_compute_perplexity(self):
probs = np.arange(100).reshape(2, 5, 10) / 100
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs)
self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)
# mask half of the input
mask = np.ones((2,), dtype=np.bool)
mask[0] = 0
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)
self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3)
def test_sample_negatives(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives)
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not values of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
def test_sample_negatives_with_attn_mask(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
# second half of last input tensor is padded
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8)
attention_mask[-1, sequence_length // 2 :] = 0
forbidden_indices = (
np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length
).tolist()
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask)
# make sure that no padding tokens are sampled
self.assertTrue(all([idx not in negative_indices for idx in forbidden_indices]))
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not just slices of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
@require_flax
@require_soundfile
@slow
class FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_ctc_robust_batched(self):
model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True)
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="np", padding=True)
input_values = inputs.input_values
attention_mask = inputs.attention_mask
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = jnp.argmax(logits, axis=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_pretrained(self):
model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60", from_pt=True)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-large-lv60", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="np", padding=True)
features_shape = (
inputs_dict["input_values"].shape[0],
model._get_feat_extract_output_lengths(np.array(inputs_dict["input_values"].shape[1])),
)
mask_time_indices = _compute_mask_indices(
features_shape,
model.config.mask_time_prob,
model.config.mask_time_length,
min_masks=2,
)
outputs = model(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
# compute cosine similarity
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8
)
# retrieve cosine sim of masked features
cosine_sim_masked = cosine_sim[mask_time_indices]
# ... now compare to randomly initialized model
config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-large-lv60")
model_rand = FlaxWav2Vec2ForPreTraining(config)
outputs_rand = model_rand(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
# compute cosine similarity
cosine_sim_rand = optax.cosine_similarity(
outputs_rand.projected_states, outputs_rand.projected_quantized_states
)
# retrieve cosine sim of masked features
cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices]
# a pretrained wav2vec2 model has learned to predict the quantized latent states
# => the cosine similarity between quantized states and predicted states > 0.5
# a random wav2vec2 model has not learned to predict the quantized latent states
# => the cosine similarity between quantized states and predicted states is very likely < 0.1
self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
@require_pyctcdecode
@require_librosa
def test_wav2vec2_with_lm(self):
ds = load_dataset("common_voice", "es", split="test", streaming=True)
sample = next(iter(ds))
resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000)
model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
input_values = processor(resampled_audio, return_tensors="np").input_values
logits = model(input_values).logits
transcription = processor.batch_decode(np.array(logits)).text
self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero")
| 40.921676
| 200
| 0.685658
|
import inspect
import math
import unittest
import numpy as np
from datasets import load_dataset
from transformers import Wav2Vec2Config, is_flax_available
from transformers.testing_utils import (
is_librosa_available,
is_pyctcdecode_available,
require_flax,
require_librosa,
require_pyctcdecode,
require_soundfile,
slow,
)
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
import optax
from flax.traverse_util import flatten_dict
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_flax_wav2vec2 import (
FlaxWav2Vec2ForCTC,
FlaxWav2Vec2ForPreTraining,
FlaxWav2Vec2GumbelVectorQuantizer,
FlaxWav2Vec2Model,
_compute_mask_indices,
_sample_negative_indices,
)
if is_pyctcdecode_available():
from transformers import Wav2Vec2ProcessorWithLM
if is_librosa_available():
import librosa
class FlaxWav2Vec2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024,
is_training=False,
hidden_size=24,
feat_extract_norm="layer",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = Wav2Vec2Config(
do_stable_layer_norm=self.do_stable_layer_norm,
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
return config, input_values, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_values, attention_mask = config_and_inputs
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (
(FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else ()
)
def setUp(self):
self.model_tester = FlaxWav2Vec2ModelTester(self)
def test_train(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
features_shape = (
input_values.shape[0],
model._get_feat_extract_output_lengths(np.array(input_values.shape[1])),
)
batch_size, sequence_length = features_shape[:2]
mask_prob = 0.5
mask_length = 4
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0))
output = model(
input_values,
attention_mask=attention_mask,
mask_time_indices=mask_time_indices,
train=True,
dropout_rng=dropout_rng,
gumbel_rng=gumbel_rng,
)[0]
self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_values, attention_mask=None, **kwargs):
return model(input_values=input_values, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_freeze_feature_encoder(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
params = model.params
def compute_loss(
params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8
):
outputs = model(
input_values,
attention_mask=attention_mask,
freeze_feature_encoder=freeze_feature_encoder,
params=params,
)
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon
)
loss = cosine_sim.sum()
return loss, outputs.to_tuple()
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
(loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False)
(loss_frozen, outputs_frozen), grads_frozen = grad_fn(
params, input_values, attention_mask, freeze_feature_encoder=True
)
for output, output_frozen in zip(outputs, outputs_frozen):
self.assertTrue((output == output_frozen).all())
self.assertEqual(loss, loss_frozen)
grads = flatten_dict(grads)
grads_frozen = flatten_dict(grads_frozen)
self.assertEqual(grads.keys(), grads_frozen.keys())
feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k)
feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k)
for feature_extractor_grad, feature_extractor_grad_frozen in zip(
feature_extractor_grads, feature_extractor_grads_frozen
):
self.assertTrue((feature_extractor_grad_frozen == 0.0).all())
self.assertTrue((feature_extractor_grad > 0.0).any())
grads = tuple(grads[k] for k in grads if "feature_extractor" not in k)
grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k)
for grad, grad_frozen in zip(grads, grads_frozen):
self.assertTrue((grad == grad_frozen).all())
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
outputs = model(np.ones((1, 1024), dtype="f4"))
self.assertIsNotNone(outputs)
@require_flax
class FlaxWav2Vec2UtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
def test_compute_mask_indices_attn_mask_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
attention_mask[:2, sequence_length // 2 :] = 0
mask = _compute_mask_indices(
(batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask
)
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)
def test_compute_perplexity(self):
probs = np.arange(100).reshape(2, 5, 10) / 100
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs)
self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)
# mask half of the input
mask = np.ones((2,), dtype=np.bool)
mask[0] = 0
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)
self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3)
def test_sample_negatives(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives)
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not values of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
def test_sample_negatives_with_attn_mask(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
# second half of last input tensor is padded
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8)
attention_mask[-1, sequence_length // 2 :] = 0
forbidden_indices = (
np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length
).tolist()
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask)
# make sure that no padding tokens are sampled
self.assertTrue(all([idx not in negative_indices for idx in forbidden_indices]))
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not just slices of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
@require_flax
@require_soundfile
@slow
class FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_ctc_robust_batched(self):
model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True)
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="np", padding=True)
input_values = inputs.input_values
attention_mask = inputs.attention_mask
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = jnp.argmax(logits, axis=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_pretrained(self):
model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60", from_pt=True)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-large-lv60", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="np", padding=True)
features_shape = (
inputs_dict["input_values"].shape[0],
model._get_feat_extract_output_lengths(np.array(inputs_dict["input_values"].shape[1])),
)
mask_time_indices = _compute_mask_indices(
features_shape,
model.config.mask_time_prob,
model.config.mask_time_length,
min_masks=2,
)
outputs = model(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8
)
cosine_sim_masked = cosine_sim[mask_time_indices]
config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-large-lv60")
model_rand = FlaxWav2Vec2ForPreTraining(config)
outputs_rand = model_rand(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
cosine_sim_rand = optax.cosine_similarity(
outputs_rand.projected_states, outputs_rand.projected_quantized_states
)
cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices]
self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
@require_pyctcdecode
@require_librosa
def test_wav2vec2_with_lm(self):
ds = load_dataset("common_voice", "es", split="test", streaming=True)
sample = next(iter(ds))
resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000)
model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
input_values = processor(resampled_audio, return_tensors="np").input_values
logits = model(input_values).logits
transcription = processor.batch_decode(np.array(logits)).text
self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero")
| true
| true
|
f70bb3e18995f3440115fb6135f9b27b8831cd83
| 157
|
py
|
Python
|
src/silicium/utils/require.py
|
PH-KDX/silicium
|
813e8719a4ba381691d3d1b11ea5738bb2ee2d36
|
[
"MIT"
] | 2
|
2021-12-12T12:06:46.000Z
|
2021-12-12T12:21:18.000Z
|
src/silicium/utils/require.py
|
PH-KDX/silicium
|
813e8719a4ba381691d3d1b11ea5738bb2ee2d36
|
[
"MIT"
] | 1
|
2021-12-12T12:21:43.000Z
|
2021-12-12T22:49:46.000Z
|
src/silicium/utils/require.py
|
PH-KDX/silicium
|
813e8719a4ba381691d3d1b11ea5738bb2ee2d36
|
[
"MIT"
] | 2
|
2021-12-12T15:13:54.000Z
|
2021-12-21T09:08:42.000Z
|
import os
def require(file, *args):
with open(os.path.join(os.path.dirname(file), *args), "r") as fh:
source = fh.read()
return source
| 19.625
| 69
| 0.598726
|
import os
def require(file, *args):
with open(os.path.join(os.path.dirname(file), *args), "r") as fh:
source = fh.read()
return source
| true
| true
|
f70bb3f0b9097225846ad1f4840fb47c67f84b16
| 25,331
|
py
|
Python
|
efficientdet/det_model_fn.py
|
templeblock/automl
|
0a73e836fd4a9d22919cb1ff5af9ca30082fa4b2
|
[
"Apache-2.0"
] | null | null | null |
efficientdet/det_model_fn.py
|
templeblock/automl
|
0a73e836fd4a9d22919cb1ff5af9ca30082fa4b2
|
[
"Apache-2.0"
] | null | null | null |
efficientdet/det_model_fn.py
|
templeblock/automl
|
0a73e836fd4a9d22919cb1ff5af9ca30082fa4b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model function definition, including both architecture and loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import coco_metric
import efficientdet_arch
import hparams_config
import iou_utils
import nms_np
import retinanet_arch
import utils
from keras import anchors
from keras import postprocess
_DEFAULT_BATCH_SIZE = 64
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule."""
# params['batch_size'] is per-shard within model_fn if strategy=tpu.
batch_size = (
params['batch_size'] * params['num_shards']
if params['strategy'] == 'tpu' else params['batch_size'])
# Learning rate is proportional to the batch size
params['adjusted_learning_rate'] = (
params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *
steps_per_epoch)
params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *
steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
params['steps_per_epoch'] = steps_per_epoch
def stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,
first_lr_drop_step, second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_step` before decaying.
logging.info('LR schedule method: stepwise')
linear_warmup = (
lr_warmup_init +
(tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *
(adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,
adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
def cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,
step):
logging.info('LR schedule method: cosine')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
cosine_lr = 0.5 * adjusted_lr * (
1 + tf.cos(np.pi * tf.cast(step, tf.float32) / decay_steps))
return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)
def polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,
total_steps, step):
logging.info('LR schedule method: polynomial')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
polynomial_lr = adjusted_lr * tf.pow(
1 - (tf.cast(step, tf.float32) / total_steps), power)
return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)
def learning_rate_schedule(params, global_step):
"""Learning rate schedule based on global step."""
lr_decay_method = params['lr_decay_method']
if lr_decay_method == 'stepwise':
return stepwise_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['first_lr_drop_step'],
params['second_lr_drop_step'], global_step)
if lr_decay_method == 'cosine':
return cosine_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'], params['total_steps'],
global_step)
if lr_decay_method == 'polynomial':
return polynomial_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['poly_lr_power'],
params['total_steps'], global_step)
if lr_decay_method == 'constant':
return params['adjusted_learning_rate']
raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))
def focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
y_pred: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
y_true: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: Divide loss by this value.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
with tf.name_scope('focal_loss'):
alpha = tf.convert_to_tensor(alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(gamma, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t) ** gamma
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return alpha_factor * modulating_factor * ce / normalizer
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM)
box_loss /= normalizer
return box_loss
def _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):
"""Computes box iou loss."""
normalizer = num_positives * 4.0
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
def detection_loss(cls_outputs, box_outputs, labels, params):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],
params['num_classes'])
if params['data_format'] == 'channels_first':
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % level]
cls_loss = focal_loss(
cls_outputs[level],
cls_targets_at_level,
params['alpha'],
params['gamma'],
normalizer=num_positives_sum,
label_smoothing=params['label_smoothing'])
if params['data_format'] == 'channels_first':
cls_loss = tf.reshape(cls_loss,
[bs, -1, width, height, params['num_classes']])
else:
cls_loss = tf.reshape(cls_loss,
[bs, width, height, -1, params['num_classes']])
cls_loss *= tf.cast(
tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
if params['box_loss_weight']:
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=params['delta']))
if params['iou_loss_type']:
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
box_output_list = [tf.reshape(box_outputs[i], [-1, 4]) for i in levels]
box_outputs = tf.concat(box_output_list, axis=0)
box_target_list = [
tf.reshape(labels['box_targets_%d' % level], [-1, 4])
for level in levels
]
box_targets = tf.concat(box_target_list, axis=0)
anchor_boxes = tf.tile(input_anchors.boxes, [params['batch_size'], 1])
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes)
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes)
box_iou_loss = _box_iou_loss(box_outputs, box_targets, num_positives_sum,
params['iou_loss_type'])
else:
box_iou_loss = 0
# Sum per level losses to total loss.
cls_loss = tf.add_n(cls_losses)
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss +
params['box_loss_weight'] * box_loss +
params['iou_loss_weight'] * box_iou_loss)
return total_loss, cls_loss, box_loss, box_iou_loss
def reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if var_match.match(v.name)
])
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
"""Model definition entry.
Args:
features: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
labels: the input labels in a dictionary. The labels include class targets
and box targets which are dense label maps. The labels are generated from
get_input_fn function in data/dataloader.py
mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the model outputs class logits and box regression outputs.
variable_filter_fn: the filter function that takes trainable_variables and
returns the variable list after applying the filter rule.
Returns:
tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.
Raises:
RuntimeError: if both ckpt and backbone_ckpt are set.
"""
utils.image('input_image', features)
training_hooks = []
def _model_outputs(inputs):
# Convert params (dict) to Config for easier access.
return model(inputs, config=hparams_config.Config(params))
precision = utils.get_precision(params['strategy'], params['mixed_precision'])
cls_outputs, box_outputs = utils.build_model_with_precision(
precision, _model_outputs, features, params['is_training_bn'])
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
# First check if it is in PREDICT mode.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'image': features,
}
for level in levels:
predictions['cls_outputs_%d' % level] = cls_outputs[level]
predictions['box_outputs_%d' % level] = box_outputs[level]
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Set up training loss and learning rate.
update_learning_rate_schedule_parameters(params)
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_schedule(params, global_step)
# cls_loss and box_loss are for logging. only total_loss is optimized.
det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
cls_outputs, box_outputs, labels, params)
reg_l2loss = reg_l2_loss(params['weight_decay'])
total_loss = det_loss + reg_l2loss
if mode == tf.estimator.ModeKeys.TRAIN:
utils.scalar('lrn_rate', learning_rate)
utils.scalar('trainloss/cls_loss', cls_loss)
utils.scalar('trainloss/box_loss', box_loss)
utils.scalar('trainloss/det_loss', det_loss)
utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
utils.scalar('trainloss/loss', total_loss)
if params['iou_loss_type']:
utils.scalar('trainloss/box_iou_loss', box_iou_loss)
train_epochs = tf.cast(global_step, tf.float32) / params['steps_per_epoch']
utils.scalar('train_epochs', train_epochs)
moving_average_decay = params['moving_average_decay']
if moving_average_decay:
ema = tf.train.ExponentialMovingAverage(
decay=moving_average_decay, num_updates=global_step)
ema_vars = utils.get_ema_vars()
if params['strategy'] == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
learning_rate = learning_rate * hvd.size()
if mode == tf.estimator.ModeKeys.TRAIN:
if params['optimizer'].lower() == 'sgd':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'].lower() == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError('optimizers should be adam or sgd')
if params['strategy'] == 'tpu':
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
elif params['strategy'] == 'horovod':
optimizer = hvd.DistributedOptimizer(optimizer)
training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
if variable_filter_fn:
var_list = variable_filter_fn(var_list)
if params.get('clip_gradients_norm', 0) > 0:
logging.info('clip gradients norm by %f', params['clip_gradients_norm'])
grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
with tf.name_scope('clip'):
grads = [gv[0] for gv in grads_and_vars]
tvars = [gv[1] for gv in grads_and_vars]
clipped_grads, gnorm = tf.clip_by_global_norm(
grads, params['clip_gradients_norm'])
utils.scalar('gnorm', gnorm)
grads_and_vars = list(zip(clipped_grads, tvars))
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
else:
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
total_loss, global_step, var_list=var_list)
if moving_average_decay:
with tf.control_dependencies([train_op]):
train_op = ema.apply(ema_vars)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(**kwargs):
"""Returns a dictionary that has the evaluation metrics."""
if params['nms_configs'].get('pyfunc', True):
detections_bs = []
for index in range(kwargs['boxes'].shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),
[
kwargs['boxes'][index],
kwargs['scores'][index],
kwargs['classes'][index],
tf.slice(kwargs['image_ids'], [index], [1]),
tf.slice(kwargs['image_scales'], [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
detections_bs.append(detections)
else:
# These two branches should be equivalent, but currently they are not.
# TODO(tanmingxing): enable the non_pyfun path after bug fix.
nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
kwargs['image_scales'])
img_ids = tf.cast(
tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)
detections_bs = [
img_ids * tf.ones_like(nms_scores),
nms_boxes[:, :, 1],
nms_boxes[:, :, 0],
nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
nms_scores,
nms_classes,
]
detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')
if params.get('testdev_dir', None):
logging.info('Eval testdev_dir %s', params['testdev_dir'])
eval_metric = coco_metric.EvaluationMetric(
testdev_dir=params['testdev_dir'])
coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
tf.zeros([1]))
else:
logging.info('Eval val with groudtruths %s.', params['val_json_file'])
eval_metric = coco_metric.EvaluationMetric(
filename=params['val_json_file'])
coco_metrics = eval_metric.estimator_metric_fn(
detections_bs, kwargs['groundtruth_data'])
# Add metrics to output.
cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
output_metrics = {
'cls_loss': cls_loss,
'box_loss': box_loss,
}
output_metrics.update(coco_metrics)
return output_metrics
cls_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(cls_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
box_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(box_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
cls_outputs = postprocess.to_list(cls_outputs)
box_outputs = postprocess.to_list(box_outputs)
params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,
box_outputs)
metric_fn_inputs = {
'cls_loss_repeat': cls_loss_repeat,
'box_loss_repeat': box_loss_repeat,
'image_ids': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
'image_scales': labels['image_scales'],
'boxes': boxes,
'scores': scores,
'classes': classes,
}
eval_metrics = (metric_fn, metric_fn_inputs)
checkpoint = params.get('ckpt') or params.get('backbone_ckpt')
if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
# Initialize the model from an EfficientDet or backbone checkpoint.
if params.get('ckpt') and params.get('backbone_ckpt'):
raise RuntimeError(
'--backbone_ckpt and --checkpoint are mutually exclusive')
if params.get('backbone_ckpt'):
var_scope = params['backbone_name'] + '/'
if params['ckpt_var_scope'] is None:
# Use backbone name as default checkpoint scope.
ckpt_scope = params['backbone_name'] + '/'
else:
ckpt_scope = params['ckpt_var_scope'] + '/'
else:
# Load every var in the given checkpoint
var_scope = ckpt_scope = '/'
def scaffold_fn():
"""Loads pretrained model through scaffold function."""
logging.info('restore variables from %s', checkpoint)
var_map = utils.get_ckpt_var_map(
ckpt_path=checkpoint,
ckpt_scope=ckpt_scope,
var_scope=var_scope,
skip_mismatch=params['skip_mismatch'])
tf.train.init_from_checkpoint(checkpoint, var_map)
return tf.train.Scaffold()
elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:
def scaffold_fn():
"""Load moving average variables for eval."""
logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)
restore_vars_dict = ema.variables_to_restore(ema_vars)
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
else:
scaffold_fn = None
if params['strategy'] != 'tpu':
# Profile every 1K steps.
profile_hook = tf.train.ProfilerHook(
save_steps=1000, output_dir=params['model_dir'])
training_hooks.append(profile_hook)
# Report memory allocation if OOM
class OomReportingHook(tf.estimator.SessionRunHook):
def before_run(self, run_context):
return tf.estimator.SessionRunArgs(
fetches=[],
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
training_hooks.append(OomReportingHook())
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metrics,
host_call=utils.get_tpu_host_call(global_step, params),
scaffold_fn=scaffold_fn,
training_hooks=training_hooks)
def retinanet_model_fn(features, labels, mode, params):
"""RetinaNet model."""
variable_filter_fn = functools.partial(
retinanet_arch.remove_variables, resnet_depth=params['resnet_depth'])
return _model_fn(
features,
labels,
mode,
params,
model=retinanet_arch.retinanet,
variable_filter_fn=variable_filter_fn)
def efficientdet_model_fn(features, labels, mode, params):
"""EfficientDet model."""
variable_filter_fn = functools.partial(
efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])
return _model_fn(
features,
labels,
mode,
params,
model=efficientdet_arch.efficientdet,
variable_filter_fn=variable_filter_fn)
def get_model_arch(model_name='efficientdet-d0'):
"""Get model architecture for a given model name."""
if 'retinanet' in model_name:
return retinanet_arch.retinanet
if 'efficientdet' in model_name:
return efficientdet_arch.efficientdet
raise ValueError('Invalide model name {}'.format(model_name))
def get_model_fn(model_name='efficientdet-d0'):
"""Get model fn for a given model name."""
if 'retinanet' in model_name:
return retinanet_model_fn
if 'efficientdet' in model_name:
return efficientdet_model_fn
raise ValueError('Invalide model name {}'.format(model_name))
| 39.641628
| 80
| 0.67218
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import coco_metric
import efficientdet_arch
import hparams_config
import iou_utils
import nms_np
import retinanet_arch
import utils
from keras import anchors
from keras import postprocess
_DEFAULT_BATCH_SIZE = 64
def update_learning_rate_schedule_parameters(params):
batch_size = (
params['batch_size'] * params['num_shards']
if params['strategy'] == 'tpu' else params['batch_size'])
params['adjusted_learning_rate'] = (
params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *
steps_per_epoch)
params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *
steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
params['steps_per_epoch'] = steps_per_epoch
def stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,
first_lr_drop_step, second_lr_drop_step, global_step):
logging.info('LR schedule method: stepwise')
linear_warmup = (
lr_warmup_init +
(tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *
(adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,
adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
def cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,
step):
logging.info('LR schedule method: cosine')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
cosine_lr = 0.5 * adjusted_lr * (
1 + tf.cos(np.pi * tf.cast(step, tf.float32) / decay_steps))
return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)
def polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,
total_steps, step):
logging.info('LR schedule method: polynomial')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
polynomial_lr = adjusted_lr * tf.pow(
1 - (tf.cast(step, tf.float32) / total_steps), power)
return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)
def learning_rate_schedule(params, global_step):
lr_decay_method = params['lr_decay_method']
if lr_decay_method == 'stepwise':
return stepwise_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['first_lr_drop_step'],
params['second_lr_drop_step'], global_step)
if lr_decay_method == 'cosine':
return cosine_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'], params['total_steps'],
global_step)
if lr_decay_method == 'polynomial':
return polynomial_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['poly_lr_power'],
params['total_steps'], global_step)
if lr_decay_method == 'constant':
return params['adjusted_learning_rate']
raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))
def focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):
with tf.name_scope('focal_loss'):
alpha = tf.convert_to_tensor(alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(gamma, dtype=y_pred.dtype)
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t) ** gamma
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
return alpha_factor * modulating_factor * ce / normalizer
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM)
box_loss /= normalizer
return box_loss
def _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):
normalizer = num_positives * 4.0
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
def detection_loss(cls_outputs, box_outputs, labels, params):
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
for level in levels:
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],
params['num_classes'])
if params['data_format'] == 'channels_first':
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % level]
cls_loss = focal_loss(
cls_outputs[level],
cls_targets_at_level,
params['alpha'],
params['gamma'],
normalizer=num_positives_sum,
label_smoothing=params['label_smoothing'])
if params['data_format'] == 'channels_first':
cls_loss = tf.reshape(cls_loss,
[bs, -1, width, height, params['num_classes']])
else:
cls_loss = tf.reshape(cls_loss,
[bs, width, height, -1, params['num_classes']])
cls_loss *= tf.cast(
tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
if params['box_loss_weight']:
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=params['delta']))
if params['iou_loss_type']:
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
box_output_list = [tf.reshape(box_outputs[i], [-1, 4]) for i in levels]
box_outputs = tf.concat(box_output_list, axis=0)
box_target_list = [
tf.reshape(labels['box_targets_%d' % level], [-1, 4])
for level in levels
]
box_targets = tf.concat(box_target_list, axis=0)
anchor_boxes = tf.tile(input_anchors.boxes, [params['batch_size'], 1])
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes)
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes)
box_iou_loss = _box_iou_loss(box_outputs, box_targets, num_positives_sum,
params['iou_loss_type'])
else:
box_iou_loss = 0
cls_loss = tf.add_n(cls_losses)
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss +
params['box_loss_weight'] * box_loss +
params['iou_loss_weight'] * box_iou_loss)
return total_loss, cls_loss, box_loss, box_iou_loss
def reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if var_match.match(v.name)
])
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
utils.image('input_image', features)
training_hooks = []
def _model_outputs(inputs):
return model(inputs, config=hparams_config.Config(params))
precision = utils.get_precision(params['strategy'], params['mixed_precision'])
cls_outputs, box_outputs = utils.build_model_with_precision(
precision, _model_outputs, features, params['is_training_bn'])
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'image': features,
}
for level in levels:
predictions['cls_outputs_%d' % level] = cls_outputs[level]
predictions['box_outputs_%d' % level] = box_outputs[level]
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
update_learning_rate_schedule_parameters(params)
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_schedule(params, global_step)
det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
cls_outputs, box_outputs, labels, params)
reg_l2loss = reg_l2_loss(params['weight_decay'])
total_loss = det_loss + reg_l2loss
if mode == tf.estimator.ModeKeys.TRAIN:
utils.scalar('lrn_rate', learning_rate)
utils.scalar('trainloss/cls_loss', cls_loss)
utils.scalar('trainloss/box_loss', box_loss)
utils.scalar('trainloss/det_loss', det_loss)
utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
utils.scalar('trainloss/loss', total_loss)
if params['iou_loss_type']:
utils.scalar('trainloss/box_iou_loss', box_iou_loss)
train_epochs = tf.cast(global_step, tf.float32) / params['steps_per_epoch']
utils.scalar('train_epochs', train_epochs)
moving_average_decay = params['moving_average_decay']
if moving_average_decay:
ema = tf.train.ExponentialMovingAverage(
decay=moving_average_decay, num_updates=global_step)
ema_vars = utils.get_ema_vars()
if params['strategy'] == 'horovod':
import horovod.tensorflow as hvd
learning_rate = learning_rate * hvd.size()
if mode == tf.estimator.ModeKeys.TRAIN:
if params['optimizer'].lower() == 'sgd':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'].lower() == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError('optimizers should be adam or sgd')
if params['strategy'] == 'tpu':
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
elif params['strategy'] == 'horovod':
optimizer = hvd.DistributedOptimizer(optimizer)
training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
if variable_filter_fn:
var_list = variable_filter_fn(var_list)
if params.get('clip_gradients_norm', 0) > 0:
logging.info('clip gradients norm by %f', params['clip_gradients_norm'])
grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
with tf.name_scope('clip'):
grads = [gv[0] for gv in grads_and_vars]
tvars = [gv[1] for gv in grads_and_vars]
clipped_grads, gnorm = tf.clip_by_global_norm(
grads, params['clip_gradients_norm'])
utils.scalar('gnorm', gnorm)
grads_and_vars = list(zip(clipped_grads, tvars))
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
else:
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
total_loss, global_step, var_list=var_list)
if moving_average_decay:
with tf.control_dependencies([train_op]):
train_op = ema.apply(ema_vars)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(**kwargs):
if params['nms_configs'].get('pyfunc', True):
detections_bs = []
for index in range(kwargs['boxes'].shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),
[
kwargs['boxes'][index],
kwargs['scores'][index],
kwargs['classes'][index],
tf.slice(kwargs['image_ids'], [index], [1]),
tf.slice(kwargs['image_scales'], [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
detections_bs.append(detections)
else:
nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
kwargs['image_scales'])
img_ids = tf.cast(
tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)
detections_bs = [
img_ids * tf.ones_like(nms_scores),
nms_boxes[:, :, 1],
nms_boxes[:, :, 0],
nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
nms_scores,
nms_classes,
]
detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')
if params.get('testdev_dir', None):
logging.info('Eval testdev_dir %s', params['testdev_dir'])
eval_metric = coco_metric.EvaluationMetric(
testdev_dir=params['testdev_dir'])
coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
tf.zeros([1]))
else:
logging.info('Eval val with groudtruths %s.', params['val_json_file'])
eval_metric = coco_metric.EvaluationMetric(
filename=params['val_json_file'])
coco_metrics = eval_metric.estimator_metric_fn(
detections_bs, kwargs['groundtruth_data'])
cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
output_metrics = {
'cls_loss': cls_loss,
'box_loss': box_loss,
}
output_metrics.update(coco_metrics)
return output_metrics
cls_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(cls_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
box_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(box_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
cls_outputs = postprocess.to_list(cls_outputs)
box_outputs = postprocess.to_list(box_outputs)
params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,
box_outputs)
metric_fn_inputs = {
'cls_loss_repeat': cls_loss_repeat,
'box_loss_repeat': box_loss_repeat,
'image_ids': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
'image_scales': labels['image_scales'],
'boxes': boxes,
'scores': scores,
'classes': classes,
}
eval_metrics = (metric_fn, metric_fn_inputs)
checkpoint = params.get('ckpt') or params.get('backbone_ckpt')
if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
if params.get('ckpt') and params.get('backbone_ckpt'):
raise RuntimeError(
'--backbone_ckpt and --checkpoint are mutually exclusive')
if params.get('backbone_ckpt'):
var_scope = params['backbone_name'] + '/'
if params['ckpt_var_scope'] is None:
ckpt_scope = params['backbone_name'] + '/'
else:
ckpt_scope = params['ckpt_var_scope'] + '/'
else:
var_scope = ckpt_scope = '/'
def scaffold_fn():
logging.info('restore variables from %s', checkpoint)
var_map = utils.get_ckpt_var_map(
ckpt_path=checkpoint,
ckpt_scope=ckpt_scope,
var_scope=var_scope,
skip_mismatch=params['skip_mismatch'])
tf.train.init_from_checkpoint(checkpoint, var_map)
return tf.train.Scaffold()
elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:
def scaffold_fn():
"""Load moving average variables for eval."""
logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)
restore_vars_dict = ema.variables_to_restore(ema_vars)
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
else:
scaffold_fn = None
if params['strategy'] != 'tpu':
profile_hook = tf.train.ProfilerHook(
save_steps=1000, output_dir=params['model_dir'])
training_hooks.append(profile_hook)
class OomReportingHook(tf.estimator.SessionRunHook):
def before_run(self, run_context):
return tf.estimator.SessionRunArgs(
fetches=[],
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
training_hooks.append(OomReportingHook())
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metrics,
host_call=utils.get_tpu_host_call(global_step, params),
scaffold_fn=scaffold_fn,
training_hooks=training_hooks)
def retinanet_model_fn(features, labels, mode, params):
variable_filter_fn = functools.partial(
retinanet_arch.remove_variables, resnet_depth=params['resnet_depth'])
return _model_fn(
features,
labels,
mode,
params,
model=retinanet_arch.retinanet,
variable_filter_fn=variable_filter_fn)
def efficientdet_model_fn(features, labels, mode, params):
variable_filter_fn = functools.partial(
efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])
return _model_fn(
features,
labels,
mode,
params,
model=efficientdet_arch.efficientdet,
variable_filter_fn=variable_filter_fn)
def get_model_arch(model_name='efficientdet-d0'):
if 'retinanet' in model_name:
return retinanet_arch.retinanet
if 'efficientdet' in model_name:
return efficientdet_arch.efficientdet
raise ValueError('Invalide model name {}'.format(model_name))
def get_model_fn(model_name='efficientdet-d0'):
if 'retinanet' in model_name:
return retinanet_model_fn
if 'efficientdet' in model_name:
return efficientdet_model_fn
raise ValueError('Invalide model name {}'.format(model_name))
| true
| true
|
f70bb51fb5f77dd016a4f81eefdabf218aad321e
| 2,730
|
py
|
Python
|
app/tests/v1/test_meetup.py
|
KelynPNjeri/Questioner-API
|
5d71b169be0db2d18642b13075b2cc4e3904e9ee
|
[
"MIT"
] | 1
|
2019-01-15T06:12:37.000Z
|
2019-01-15T06:12:37.000Z
|
app/tests/v1/test_meetup.py
|
KelynPNjeri/Questioner-API
|
5d71b169be0db2d18642b13075b2cc4e3904e9ee
|
[
"MIT"
] | 17
|
2019-01-08T16:02:37.000Z
|
2019-10-21T17:38:01.000Z
|
app/tests/v1/test_meetup.py
|
KelynPNjeri/Questioner-API
|
5d71b169be0db2d18642b13075b2cc4e3904e9ee
|
[
"MIT"
] | null | null | null |
"""Module for Testing the Meetup Endpoint."""
import json
# Local Import
from .basecase import TestBaseCase as base
class TestMeetup(base):
"""Testing the Meetup Endpoints with valid input."""
def setUp(self):
base.setUp(self)
def test_create_meetup(self):
"""Testing Creation of a Meetup."""
response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type,
)
response_data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(response_data["message"], "Meetup was created successfully.")
def test_fetching_all_meetups(self):
"""Testing Fetching of all meetups."""
post_response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type
)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(
post_response_data["message"], "Meetup was created successfully."
)
response = self.client.get("/api/v1/meetups/upcoming", content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_fetch_single_meetup(self):
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Fetching Single Question.
response = self.client.get('api/v1/meetups/{}'.format(post_response_data["data"]["id"]), content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_rsvp_to_meetup(self):
"""Test RSVPing to a meetup."""
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Posting RSVP.
response = self.client.post('/api/v1/meetups/{}/rsvps'.format(post_response_data["data"]["id"]), data=json.dumps(self.rsvp_payload), content_type=self.content_type)
self.assertEqual(response.status_code, 201)
| 42
| 172
| 0.675824
|
import json
from .basecase import TestBaseCase as base
class TestMeetup(base):
def setUp(self):
base.setUp(self)
def test_create_meetup(self):
response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type,
)
response_data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(response_data["message"], "Meetup was created successfully.")
def test_fetching_all_meetups(self):
post_response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type
)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(
post_response_data["message"], "Meetup was created successfully."
)
response = self.client.get("/api/v1/meetups/upcoming", content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_fetch_single_meetup(self):
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
response = self.client.get('api/v1/meetups/{}'.format(post_response_data["data"]["id"]), content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_rsvp_to_meetup(self):
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
response = self.client.post('/api/v1/meetups/{}/rsvps'.format(post_response_data["data"]["id"]), data=json.dumps(self.rsvp_payload), content_type=self.content_type)
self.assertEqual(response.status_code, 201)
| true
| true
|
f70bb60cd9a165991bea09f982f2310be199ff23
| 4,891
|
py
|
Python
|
yolo3/models/yolo3_resnet50.py
|
holajoa/keras-YOLOv3-model-set
|
c15b8a2f48371c063f6482b25593dc70d5956323
|
[
"MIT"
] | 601
|
2019-08-24T10:14:52.000Z
|
2022-03-29T15:05:33.000Z
|
yolo3/models/yolo3_resnet50.py
|
holajoa/keras-YOLOv3-model-set
|
c15b8a2f48371c063f6482b25593dc70d5956323
|
[
"MIT"
] | 220
|
2019-10-04T18:57:59.000Z
|
2022-03-31T15:30:37.000Z
|
yolo3/models/yolo3_resnet50.py
|
holajoa/keras-YOLOv3-model-set
|
c15b8a2f48371c063f6482b25593dc70d5956323
|
[
"MIT"
] | 218
|
2019-10-31T03:32:11.000Z
|
2022-03-25T14:44:19.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 ResNet50 Model Defined in Keras."""
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet import ResNet50
from yolo3.models.layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def yolo3_resnet50_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 ResNet50 model CNN body in Keras."""
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_spp_resnet50_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite SPP ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_resnet50_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 Lite ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
| 36.22963
| 143
| 0.686363
|
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet import ResNet50
from yolo3.models.layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def yolo3_resnet50_body(inputs, num_anchors, num_classes):
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
f1 = resnet50.get_layer('conv5_block3_out').output
f2 = resnet50.get_layer('conv4_block6_out').output
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
f1 = resnet50.get_layer('conv5_block3_out').output
f2 = resnet50.get_layer('conv4_block6_out').output
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_spp_resnet50_body(inputs, num_anchors, num_classes):
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
f1 = resnet50.get_layer('conv5_block3_out').output
f2 = resnet50.get_layer('conv4_block6_out').output
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_resnet50_body(inputs, num_anchors, num_classes):
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
f1 = resnet50.get_layer('conv5_block3_out').output
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
f1 = resnet50.get_layer('conv5_block3_out').output
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
| true
| true
|
f70bb65474dfcd0bf14ad96f156718369f73d25c
| 4,069
|
py
|
Python
|
experiments/ashvin/icml2020/hand/sparse/rewards_relocate1.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/hand/sparse/rewards_relocate1.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/hand/sparse/rewards_relocate1.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
"""
AWR + SAC from demo experiment
"""
from railrl.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from railrl.demos.source.mdp_path_loader import MDPPathLoader
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.arglauncher import run_variants
from railrl.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=5000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(5),
'trainer_kwargs.beta': [0.1, 0.3, 1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", "shared"],
'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| 30.140741
| 75
| 0.574343
|
from railrl.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from railrl.demos.source.mdp_path_loader import MDPPathLoader
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.arglauncher import run_variants
from railrl.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=5000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None,
terminal_transform_kwargs=None, # t = 0
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(5),
'trainer_kwargs.beta': [0.1, 0.3, 1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", "shared"],
'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| true
| true
|
f70bb6d64ee156cd9362f37bcdaef380680dc2cd
| 26,277
|
py
|
Python
|
parlai/tasks/blended_skill_talk/agents.py
|
Misterion777/ParlAI
|
1a6849d643a30a9a981825d9f50470b6512817c5
|
[
"MIT"
] | null | null | null |
parlai/tasks/blended_skill_talk/agents.py
|
Misterion777/ParlAI
|
1a6849d643a30a9a981825d9f50470b6512817c5
|
[
"MIT"
] | null | null | null |
parlai/tasks/blended_skill_talk/agents.py
|
Misterion777/ParlAI
|
1a6849d643a30a9a981825d9f50470b6512817c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import random
import re
from collections import defaultdict
from typing import List, Optional, Dict, Tuple
from parlai.core.opt import Opt
from parlai.core.teachers import ParlAIDialogTeacher, create_task_agent_from_taskname
from parlai.tasks.convai2.agents import BothTeacher
from parlai.tasks.empathetic_dialogues.agents import EmpatheticDialoguesTeacher
from parlai.tasks.wizard_of_wikipedia.agents import WizardDialogKnowledgeTeacher
from parlai.utils.misc import warn_once
from parlai.utils.io import PathManager
from parlai.utils.concepts import split_concepts
from .build import build
##################################################
#### Teacher for the BlendedSkillTalk Dataset ####
##################################################
def raw_data_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'blended_skill_talk', dt + '.json')
def _processed_data_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'blended_skill_talk', dt + '.txt')
def _persona_list_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(opt['datapath'], 'blended_skill_talk', 'persona_list.txt')
def _topic_to_persona_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(
opt['datapath'], 'blended_skill_talk', 'topic_to_persona_list.txt'
)
def _cached_data_path(opt: Opt, experiencer_side_only: bool) -> str:
"""
Build the data if it doesn't exist.
See EDPersonaTopicifierTeacher in ParlAI v1.5.1 and earlier for the code to add
persona strings to the base EmpatheticDialogues dataset.
"""
build(opt)
dt = opt['datatype'].split(':')[0]
side_string = 'experiencer_only' if experiencer_side_only else 'both_sides'
return os.path.join(
opt['datapath'],
'blended_skill_talk',
f'ed_persona_topicifier__{dt}__{side_string}.json',
)
def safe_personas_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(opt['datapath'], 'blended_skill_talk', 'safe_personas.txt')
class BlendedSkillTalkTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['parlaidialogteacher_datafile'] = _processed_data_path(opt)
super().__init__(opt, shared)
class InteractiveTeacher(BlendedSkillTalkTeacher):
# Dummy class to add arguments for interactive world.
pass
class SelfchatTeacher(BlendedSkillTalkTeacher):
# Dummy class to add arguments for interactive world.
pass
class DefaultTeacher(BlendedSkillTalkTeacher):
pass
def create_agents(opt):
if not opt.get('interactive_task', False):
return create_task_agent_from_taskname(opt)
else:
# interactive task has no task agents (they are attached as user agents)
return []
################################################################################
## Teachers for adding ConvAI2 personas and WoW topics to existing datasets ##
################################################################################
class PersonaTopicifier:
def __init__(
self,
opt: Opt,
should_have_personas: bool = False,
should_have_topics: bool = False,
no_persona_is_error: bool = False,
):
self.utterance_to_persona_map = {}
self.should_have_personas = should_have_personas
self.should_have_topics = should_have_topics
self.no_persona_is_error = no_persona_is_error
# Throw an exception if a persona is not found for the input WoW topic
# this returns map of persona line str to WoW topic
self.personas_file_path = _persona_list_path(opt)
self.topic_to_persona_path = _topic_to_persona_path(opt)
(
self.wow_topics_to_persona_strings_map,
self.persona_strings_to_wow_topics_map,
) = self._setup_personas_to_wow_topics()
with PathManager.open(self.personas_file_path, 'r') as f:
self.personas = f.read().strip().split('||')
# There's an extra line at the end of the file which is ''
self.personas = [p for p in self.personas if p]
def _setup_personas_to_wow_topics(
self,
) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
persona_strings_to_topics = defaultdict(list)
topics_to_persona_strings = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
topics_to_persona_strings[topic] = persona_strings
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
warn_once(
f'FINISHED MAPPING personas to topics, got: {len(list(persona_strings_to_topics.keys()))} persona strings to map to topics.'
)
return topics_to_persona_strings, persona_strings_to_topics
def __calculate_word_overlap(self, a, b):
"""
Very rudimentary way to calculate word overlap.
"""
score = 0
tokens_a = a.split(' ')
tokens_a = [ta for ta in tokens_a if len(ta) >= 5]
for ta in tokens_a:
if ta in b:
score += 1
tokens_b = b.split(' ')
tokens_b = [tb for tb in tokens_b if len(tb) >= 5]
for tb in tokens_b:
if tb in a:
score += 1
return score
def __choose_persona_from_text(self, utt):
utt = utt.strip()
if utt not in self.utterance_to_persona_map:
best_word_overlap = 0
best_persona = None
for p in self.personas:
word_overlap = self.__calculate_word_overlap(utt, p)
if word_overlap >= best_word_overlap:
best_word_overlap = word_overlap
best_persona = p
if not best_persona:
raise Exception(
f'No persona found for utterance: \"{utt}\". This should not happen.'
)
self.utterance_to_persona_map[utt] = best_persona
# Should have a \n at the end of it already
return best_persona
return self.utterance_to_persona_map[utt]
def __choose_persona_from_topic(self, topic):
topic = topic.strip()
persona_strings = self.wow_topics_to_persona_strings_map[topic]
for p in persona_strings:
for persona in self.personas:
if p in persona:
return persona
if self.no_persona_is_error:
raise ValueError(f'ERROR: Found no persona for topic: {topic}.')
else:
warn_once(f'Found no persona for topic: {topic}. Returning first persona.')
return self.personas[0]
def __choose_topic(self, persona):
persona_lines = persona.strip().split('\n')
for p in persona_lines:
p_str = p.replace('your persona:', '')
p_str = p_str.strip()
if p_str in self.persona_strings_to_wow_topics_map:
topics = self.persona_strings_to_wow_topics_map[p_str]
topic = topics[0] + '\n'
return topic
for utt, topics in self.persona_strings_to_wow_topics_map.items():
utt_words = utt.split()
utt_words_long = [utt for utt in utt_words if len(utt) > 6]
for long_utt in utt_words_long:
if long_utt in persona:
return topics[0] + '\n'
return topics[0] + '\n'
def get_modified_text(self, text):
# Order should be <Persona> \n <Topic> \n <Utterance>
# Should be used for entry_idx == 0 only (for all first
# utterances only)
# has_neither = 'persona:' not in text and '\n' not in text
# has_wow_topic_only = 'persona:' not in text and '\n' in text
# has_persona_only = 'persona:' in text
has_neither = not self.should_have_personas and not self.should_have_topics
has_wow_topic_only = not self.should_have_personas and self.should_have_topics
has_persona_only = not self.should_have_topics and self.should_have_personas
if (self.should_have_personas and (has_neither or has_wow_topic_only)) or (
self.should_have_topics and (has_neither or has_persona_only)
):
raise Exception(
f'Malformed text: {text}, should_have_personas: {self.should_have_personas}, should_have_topics: {self.should_have_topics}, has_neither: {has_neither}, has_wow_topic_only: {has_wow_topic_only}, has_persona_only: {has_persona_only}'
)
if has_neither:
# Will occur with ED
persona = self.__choose_persona_from_text(text)
topic = self.__choose_topic(persona)
utt = text
elif has_wow_topic_only:
# Will occur with Wizard
parts = text.strip().split('\n')
if len(parts) > 1:
topic = parts[0] + '\n'
utt = parts[1]
persona = self.__choose_persona_from_topic(topic)
else:
# Only has a topic, no utterance
topic = parts[0] + '\n'
utt = ''
persona = self.__choose_persona_from_topic(topic)
elif has_persona_only:
# Will occur with Convai2
lines = text.strip().split('\n')
utt = lines[-1]
persona = ''.join(l + '\n' for l in lines[:-1])
topic = self.__choose_topic(persona)
else:
raise Exception(f'Unknown structure of utterance: {text}')
modified_utterance = persona + topic + utt
return modified_utterance
################################################################
## Generator of context for crowdsourcing BST conversations ##
################################################################
class ContextGenerator:
"""
Generates contexts shown to crowdsourced workers when collecting BST conversations.
This generator was used to generate the context information shown to workers at the
beginning of a conversation, when crowdsourcing the conversations that make up the
BST dataset.
"""
def __init__(self, opt, datatype: str = 'train', seed: Optional[int] = None):
"""
Initialize the context generator.
opt: only a 'datapath' key is required, to specify the ParlAI data folder
"""
if seed is not None:
self.rng = random.Random(seed)
else:
self.rng = random.Random()
convai2_opt = Opt({'datapath': opt['datapath'], 'datatype': datatype})
self.convai2_teacher = BothTeacher(convai2_opt)
ed_opt = Opt(
{
'datapath': opt['datapath'],
'datatype': datatype,
'train_experiencer_only': True,
}
)
# Specify train_experiencer_only = True because we want to ensure that the text
# will correspond to a Speaker utterance and the label to a Listener response
self.ed_teacher = EmpatheticDialoguesTeacher(ed_opt)
wow_opt = Opt({'datapath': opt['datapath'], 'datatype': datatype})
self.wow_teacher = WizardDialogKnowledgeTeacher(wow_opt)
self.topic_to_persona_path = _topic_to_persona_path(opt)
self.wow_topics_to_episode_idxes = self._setup_topics_to_episodes()
self.persona_strings_to_wow_topics = self._setup_personas_to_topics()
def get_context(self) -> dict:
"""
Get context information to be shown at the beginning of one conversation.
Values in return dict:
- context_dataset: the dataset (ConvAI2, EmpatheticDialogues, or Wizard of
Wikipedia) used to generate the context information.
- persona_1_strings, persona_2_strings: 2 persona strings each for the two
speakers, chosen randomly from the ConvAI2 dataset. If context_dataset ==
"wizard_of_wikipedia", these persona strings will be matched to the WoW
topic returned in the "additional_context" field.
- additional_context: provides additional bits of information to give context
for the speakers. If context_dataset == "empathetic_dialogues", this is a
situation from the start of an ED conversation. If context_dataset ==
"wizard_of_wikipedia", this is a topic from the WoW dataset that matches the
persona strings. If context_dataset == "convai2", this is None.
- person1_seed_utterance, person2_seed_utterance: two lines of a conversation
from the dataset specified by "context_dataset". They will be shown to the
speakers to "seed" the conversation, and the speakers continue from where
the lines left off.
"""
# Determine which dataset we will show context for
rand_value = self.rng.random()
if rand_value < 1 / 3:
context_dataset = 'convai2'
elif rand_value < 2 / 3:
context_dataset = 'empathetic_dialogues'
else:
context_dataset = 'wizard_of_wikipedia'
if context_dataset == 'convai2':
# Select episode
episode_idx = self.rng.randrange(self.convai2_teacher.num_episodes())
# Extract personas
persona_1_strings, persona_2_strings = self._extract_personas(episode_idx)
# Sample persona strings
selected_persona_1_strings = self.rng.sample(persona_1_strings, 2)
selected_persona_2_strings = self.rng.sample(persona_2_strings, 2)
# Select previous utterances
num_entries = len(self.convai2_teacher.data.data[episode_idx])
entry_idx = self.rng.randrange(1, num_entries)
# Don't select the first entry, which often doesn't include an apprentice
# utterance
chosen_entry = self.convai2_teacher.get(episode_idx, entry_idx=entry_idx)
person1_seed_utterance = chosen_entry['text']
assert len(chosen_entry['labels']) == 1
person2_seed_utterance = chosen_entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': None,
'person1_seed_utterance': person1_seed_utterance,
'person2_seed_utterance': person2_seed_utterance,
}
elif context_dataset == 'empathetic_dialogues':
# Select episode
persona_episode_idx = self.rng.randrange(
self.convai2_teacher.num_episodes()
)
# Extract personas
persona_1_strings, persona_2_strings = self._extract_personas(
persona_episode_idx
)
# Sample persona strings
selected_persona_1_strings = self.rng.sample(persona_1_strings, 2)
selected_persona_2_strings = self.rng.sample(persona_2_strings, 2)
# Select previous utterances
episode_idx = self.rng.randrange(self.ed_teacher.num_episodes())
entry_idx = 0 # We'll only use the first pair of utterances
entry = self.ed_teacher.get(episode_idx, entry_idx=entry_idx)
situation = entry['situation']
speaker_utterance = entry['text']
assert len(entry['labels']) == 1
listener_response = entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': situation,
'person1_seed_utterance': speaker_utterance,
'person2_seed_utterance': listener_response,
}
elif context_dataset == 'wizard_of_wikipedia':
# Pull different personas until you get a pair for which at least one
# sentence has a WoW topic bound to it
num_tries = 0
while True:
num_tries += 1
# Extract a random (matched) pair of personas
persona_episode_idx = self.rng.randrange(
self.convai2_teacher.num_episodes()
)
all_persona_strings = dict()
all_persona_strings[1], all_persona_strings[2] = self._extract_personas(
persona_episode_idx
)
# See if any of the persona strings have a matching WoW topic
matching_persona_string_idxes = []
for persona_idx, persona_strings in all_persona_strings.items():
for str_idx, str_ in enumerate(persona_strings):
wow_topics = self.persona_strings_to_wow_topics[str_]
if len(wow_topics) > 0:
matching_persona_string_idxes.append((persona_idx, str_idx))
if len(matching_persona_string_idxes) > 0:
break
print(
f'{num_tries:d} try/tries needed to find a pair of personas with an '
f'associated WoW topic.'
)
# Pick out the WoW topic and matching persona string
matching_persona_idx, matching_persona_string_idx = self.rng.sample(
matching_persona_string_idxes, k=1
)[0]
matching_persona_string = all_persona_strings[matching_persona_idx][
matching_persona_string_idx
]
wow_topic = self.rng.sample(
self.persona_strings_to_wow_topics[matching_persona_string], k=1
)[0]
# Sample persona strings, making sure that we keep the one connected to the
# WoW topic
if matching_persona_idx == 1:
remaining_persona_1_strings = [
str_
for str_ in all_persona_strings[1]
if str_ != matching_persona_string
]
selected_persona_1_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_1_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_1_strings)
selected_persona_2_strings = self.rng.sample(all_persona_strings[2], 2)
else:
selected_persona_1_strings = self.rng.sample(all_persona_strings[1], 2)
remaining_persona_2_strings = [
str_
for str_ in all_persona_strings[2]
if str_ != matching_persona_string
]
selected_persona_2_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_2_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_2_strings)
# Sample WoW previous utterances, given the topic
episode_idx = self.rng.sample(
self.wow_topics_to_episode_idxes[wow_topic], k=1
)[0]
entry_idx = 1
# Select the second entry, which (unlike the first entry) will always have
# two valid utterances and which will not usually be so far along in the
# conversation that the new Turkers will be confused
entry = self.wow_teacher.get(episode_idx, entry_idx=entry_idx)
apprentice_utterance = entry['text']
assert len(entry['labels']) == 1
wizard_utterance = entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': wow_topic,
'person1_seed_utterance': apprentice_utterance,
'person2_seed_utterance': wizard_utterance,
}
def _setup_personas_to_topics(self) -> Dict[str, List[str]]:
"""
Create a map from ConvAI2 personas to WoW topics that they correspond to.
"""
print('Starting to map personas to topics.')
persona_strings_to_topics = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
if topic not in self.wow_topics_to_episode_idxes:
continue
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
print('Finished mapping personas to topics.')
return persona_strings_to_topics
def _setup_topics_to_episodes(self) -> Dict[str, List[int]]:
"""
Create a map from WoW topics to the indices of the WoW episodes that use them.
"""
print('Starting to map topics to episodes.')
topics_to_episodes = defaultdict(list)
for episode_idx in range(self.wow_teacher.num_episodes()):
topic = self.wow_teacher.get(episode_idx, entry_idx=0)['chosen_topic']
topics_to_episodes[topic].append(episode_idx)
print('Finished mapping topics to episodes.')
return topics_to_episodes
def _extract_personas(self, episode_idx: str) -> Tuple[List[str], List[str]]:
"""
For the given ConvAI2 conversation, return strings of both speakers' personas.
"""
first_entry = self.convai2_teacher.get(episode_idx, entry_idx=0)
first_text_strings = first_entry['text'].split('\n')
persona_1_strings = []
persona_2_strings = []
for str_ in first_text_strings[:-1]: # The last string is the first utterance
if str_.startswith('your persona: '): # Here, "you" are Person 2
persona_2_strings.append(str_[len('your persona: ') :])
elif str_.startswith("partner's persona: "):
persona_1_strings.append(str_[len("partner's persona: ") :])
else:
raise ValueError('Persona string cannot be parsed!')
return persona_1_strings, persona_2_strings
import parlai.utils.logging as logging
from parlai.utils.misc import str_to_msg
TOKEN_KNOWLEDGE = '__knowledge__'
TOKEN_END_KNOWLEDGE = '__endknowledge__'
class ConceptsTeacher(BlendedSkillTalkTeacher):
def _setup_data(self, path):
logging.info(f"Loading ParlAI text data: {path}")
self.episodes = []
self.num_exs = 0
eps = []
with PathManager.open(path, newline='\n', encoding='utf-8') as read:
for line_no, line in enumerate(read, 1):
msg = str_to_msg(line.rstrip('\n'))
if msg and 'eval_labels' in msg:
raise ValueError(
f"It looks like you've written eval_labels as a key in your "
f"data file. This is not appropriate; labels will be converted "
f"for you automatically. This is happening on Line {line_no} "
f"in {path}. The line is:\n\t{line}"
)
if msg and 'text' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "text" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'labels' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "labels" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'concepts' not in msg:
raise ValueError(
f'BlendedSkillTalkConceptsTeacher requires a "concepts" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg:
self.num_exs += 1
# concepts = .replace("|",". ")
concepts = msg["concepts"]
if self.opt.get("dict_tokenizer", "") == "re":
concepts = split_concepts(concepts)
text = msg['text'] + concepts
msg.force_set('text',text)
del msg['concepts']
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if len(eps) > 0:
# add last episode
eps[-1].force_set('episode_done', True)
self.episodes.append(eps)
if len(self.episodes) == 1 and line_no > 100:
logging.error(
f'The data in {path} looks like one very long episode. If this '
f'is intentional, you may ignore this, but you MAY have a bug in '
f'your data.'
)
| 41.381102
| 247
| 0.595464
|
import copy
import os
import random
import re
from collections import defaultdict
from typing import List, Optional, Dict, Tuple
from parlai.core.opt import Opt
from parlai.core.teachers import ParlAIDialogTeacher, create_task_agent_from_taskname
from parlai.tasks.convai2.agents import BothTeacher
from parlai.tasks.empathetic_dialogues.agents import EmpatheticDialoguesTeacher
from parlai.tasks.wizard_of_wikipedia.agents import WizardDialogKnowledgeTeacher
from parlai.utils.misc import warn_once
from parlai.utils.io import PathManager
from parlai.utils.concepts import split_concepts
from .build import build
s = should_have_topics
self.no_persona_is_error = no_persona_is_error
# Throw an exception if a persona is not found for the input WoW topic
# this returns map of persona line str to WoW topic
self.personas_file_path = _persona_list_path(opt)
self.topic_to_persona_path = _topic_to_persona_path(opt)
(
self.wow_topics_to_persona_strings_map,
self.persona_strings_to_wow_topics_map,
) = self._setup_personas_to_wow_topics()
with PathManager.open(self.personas_file_path, 'r') as f:
self.personas = f.read().strip().split('||')
# There's an extra line at the end of the file which is ''
self.personas = [p for p in self.personas if p]
def _setup_personas_to_wow_topics(
self,
) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
persona_strings_to_topics = defaultdict(list)
topics_to_persona_strings = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
topics_to_persona_strings[topic] = persona_strings
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
warn_once(
f'FINISHED MAPPING personas to topics, got: {len(list(persona_strings_to_topics.keys()))} persona strings to map to topics.'
)
return topics_to_persona_strings, persona_strings_to_topics
def __calculate_word_overlap(self, a, b):
score = 0
tokens_a = a.split(' ')
tokens_a = [ta for ta in tokens_a if len(ta) >= 5]
for ta in tokens_a:
if ta in b:
score += 1
tokens_b = b.split(' ')
tokens_b = [tb for tb in tokens_b if len(tb) >= 5]
for tb in tokens_b:
if tb in a:
score += 1
return score
def __choose_persona_from_text(self, utt):
utt = utt.strip()
if utt not in self.utterance_to_persona_map:
best_word_overlap = 0
best_persona = None
for p in self.personas:
word_overlap = self.__calculate_word_overlap(utt, p)
if word_overlap >= best_word_overlap:
best_word_overlap = word_overlap
best_persona = p
if not best_persona:
raise Exception(
f'No persona found for utterance: \"{utt}\". This should not happen.'
)
self.utterance_to_persona_map[utt] = best_persona
return best_persona
return self.utterance_to_persona_map[utt]
def __choose_persona_from_topic(self, topic):
topic = topic.strip()
persona_strings = self.wow_topics_to_persona_strings_map[topic]
for p in persona_strings:
for persona in self.personas:
if p in persona:
return persona
if self.no_persona_is_error:
raise ValueError(f'ERROR: Found no persona for topic: {topic}.')
else:
warn_once(f'Found no persona for topic: {topic}. Returning first persona.')
return self.personas[0]
def __choose_topic(self, persona):
persona_lines = persona.strip().split('\n')
for p in persona_lines:
p_str = p.replace('your persona:', '')
p_str = p_str.strip()
if p_str in self.persona_strings_to_wow_topics_map:
topics = self.persona_strings_to_wow_topics_map[p_str]
topic = topics[0] + '\n'
return topic
for utt, topics in self.persona_strings_to_wow_topics_map.items():
utt_words = utt.split()
utt_words_long = [utt for utt in utt_words if len(utt) > 6]
for long_utt in utt_words_long:
if long_utt in persona:
return topics[0] + '\n'
return topics[0] + '\n'
def get_modified_text(self, text):
has_neither = not self.should_have_personas and not self.should_have_topics
has_wow_topic_only = not self.should_have_personas and self.should_have_topics
has_persona_only = not self.should_have_topics and self.should_have_personas
if (self.should_have_personas and (has_neither or has_wow_topic_only)) or (
self.should_have_topics and (has_neither or has_persona_only)
):
raise Exception(
f'Malformed text: {text}, should_have_personas: {self.should_have_personas}, should_have_topics: {self.should_have_topics}, has_neither: {has_neither}, has_wow_topic_only: {has_wow_topic_only}, has_persona_only: {has_persona_only}'
)
if has_neither:
persona = self.__choose_persona_from_text(text)
topic = self.__choose_topic(persona)
utt = text
elif has_wow_topic_only:
parts = text.strip().split('\n')
if len(parts) > 1:
topic = parts[0] + '\n'
utt = parts[1]
persona = self.__choose_persona_from_topic(topic)
else:
topic = parts[0] + '\n'
utt = ''
persona = self.__choose_persona_from_topic(topic)
elif has_persona_only:
lines = text.strip().split('\n')
utt = lines[-1]
persona = ''.join(l + '\n' for l in lines[:-1])
topic = self.__choose_topic(persona)
else:
raise Exception(f'Unknown structure of utterance: {text}')
modified_utterance = persona + topic + utt
return modified_utterance
# Extract a random (matched) pair of personas
persona_episode_idx = self.rng.randrange(
self.convai2_teacher.num_episodes()
)
all_persona_strings = dict()
all_persona_strings[1], all_persona_strings[2] = self._extract_personas(
persona_episode_idx
)
# See if any of the persona strings have a matching WoW topic
matching_persona_string_idxes = []
for persona_idx, persona_strings in all_persona_strings.items():
for str_idx, str_ in enumerate(persona_strings):
wow_topics = self.persona_strings_to_wow_topics[str_]
if len(wow_topics) > 0:
matching_persona_string_idxes.append((persona_idx, str_idx))
if len(matching_persona_string_idxes) > 0:
break
print(
f'{num_tries:d} try/tries needed to find a pair of personas with an '
f'associated WoW topic.'
)
# Pick out the WoW topic and matching persona string
matching_persona_idx, matching_persona_string_idx = self.rng.sample(
matching_persona_string_idxes, k=1
)[0]
matching_persona_string = all_persona_strings[matching_persona_idx][
matching_persona_string_idx
]
wow_topic = self.rng.sample(
self.persona_strings_to_wow_topics[matching_persona_string], k=1
)[0]
# Sample persona strings, making sure that we keep the one connected to the
# WoW topic
if matching_persona_idx == 1:
remaining_persona_1_strings = [
str_
for str_ in all_persona_strings[1]
if str_ != matching_persona_string
]
selected_persona_1_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_1_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_1_strings)
selected_persona_2_strings = self.rng.sample(all_persona_strings[2], 2)
else:
selected_persona_1_strings = self.rng.sample(all_persona_strings[1], 2)
remaining_persona_2_strings = [
str_
for str_ in all_persona_strings[2]
if str_ != matching_persona_string
]
selected_persona_2_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_2_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_2_strings)
# Sample WoW previous utterances, given the topic
episode_idx = self.rng.sample(
self.wow_topics_to_episode_idxes[wow_topic], k=1
)[0]
entry_idx = 1
# Select the second entry, which (unlike the first entry) will always have
# two valid utterances and which will not usually be so far along in the
# conversation that the new Turkers will be confused
entry = self.wow_teacher.get(episode_idx, entry_idx=entry_idx)
apprentice_utterance = entry['text']
assert len(entry['labels']) == 1
wizard_utterance = entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': wow_topic,
'person1_seed_utterance': apprentice_utterance,
'person2_seed_utterance': wizard_utterance,
}
def _setup_personas_to_topics(self) -> Dict[str, List[str]]:
print('Starting to map personas to topics.')
persona_strings_to_topics = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
if topic not in self.wow_topics_to_episode_idxes:
continue
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
print('Finished mapping personas to topics.')
return persona_strings_to_topics
def _setup_topics_to_episodes(self) -> Dict[str, List[int]]:
print('Starting to map topics to episodes.')
topics_to_episodes = defaultdict(list)
for episode_idx in range(self.wow_teacher.num_episodes()):
topic = self.wow_teacher.get(episode_idx, entry_idx=0)['chosen_topic']
topics_to_episodes[topic].append(episode_idx)
print('Finished mapping topics to episodes.')
return topics_to_episodes
def _extract_personas(self, episode_idx: str) -> Tuple[List[str], List[str]]:
first_entry = self.convai2_teacher.get(episode_idx, entry_idx=0)
first_text_strings = first_entry['text'].split('\n')
persona_1_strings = []
persona_2_strings = []
for str_ in first_text_strings[:-1]: # The last string is the first utterance
if str_.startswith('your persona: '): # Here, "you" are Person 2
persona_2_strings.append(str_[len('your persona: ') :])
elif str_.startswith("partner's persona: "):
persona_1_strings.append(str_[len("partner's persona: ") :])
else:
raise ValueError('Persona string cannot be parsed!')
return persona_1_strings, persona_2_strings
import parlai.utils.logging as logging
from parlai.utils.misc import str_to_msg
TOKEN_KNOWLEDGE = '__knowledge__'
TOKEN_END_KNOWLEDGE = '__endknowledge__'
class ConceptsTeacher(BlendedSkillTalkTeacher):
def _setup_data(self, path):
logging.info(f"Loading ParlAI text data: {path}")
self.episodes = []
self.num_exs = 0
eps = []
with PathManager.open(path, newline='\n', encoding='utf-8') as read:
for line_no, line in enumerate(read, 1):
msg = str_to_msg(line.rstrip('\n'))
if msg and 'eval_labels' in msg:
raise ValueError(
f"It looks like you've written eval_labels as a key in your "
f"data file. This is not appropriate; labels will be converted "
f"for you automatically. This is happening on Line {line_no} "
f"in {path}. The line is:\n\t{line}"
)
if msg and 'text' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "text" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'labels' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "labels" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'concepts' not in msg:
raise ValueError(
f'BlendedSkillTalkConceptsTeacher requires a "concepts" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg:
self.num_exs += 1
concepts = msg["concepts"]
if self.opt.get("dict_tokenizer", "") == "re":
concepts = split_concepts(concepts)
text = msg['text'] + concepts
msg.force_set('text',text)
del msg['concepts']
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if len(eps) > 0:
eps[-1].force_set('episode_done', True)
self.episodes.append(eps)
if len(self.episodes) == 1 and line_no > 100:
logging.error(
f'The data in {path} looks like one very long episode. If this '
f'is intentional, you may ignore this, but you MAY have a bug in '
f'your data.'
)
| true
| true
|
f70bb732c7dba05e730ab3f4b6cafea04d163ce2
| 2,582
|
py
|
Python
|
meiduo_mall02/apps/users/migrations/0003_auto_20190519_1544.py
|
hongyinwang/meiduo_project02
|
3f21773d2d98204400ea2c3738969ac2a593b242
|
[
"MIT"
] | null | null | null |
meiduo_mall02/apps/users/migrations/0003_auto_20190519_1544.py
|
hongyinwang/meiduo_project02
|
3f21773d2d98204400ea2c3738969ac2a593b242
|
[
"MIT"
] | null | null | null |
meiduo_mall02/apps/users/migrations/0003_auto_20190519_1544.py
|
hongyinwang/meiduo_project02
|
3f21773d2d98204400ea2c3738969ac2a593b242
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-05-19 15:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('areas', '0001_initial'),
('users', '0002_user_email_active'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('title', models.CharField(max_length=20, verbose_name='地址名称')),
('receiver', models.CharField(max_length=20, verbose_name='收货人')),
('place', models.CharField(max_length=50, verbose_name='地址')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('tel', models.CharField(blank=True, default='', max_length=20, null=True, verbose_name='固定电话')),
('email', models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='电子邮箱')),
('is_deleted', models.BooleanField(default=False, verbose_name='逻辑删除')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='city_addresses', to='areas.Area', verbose_name='市')),
('district', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='district_addresses', to='areas.Area', verbose_name='区')),
('province', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='province_addresses', to='areas.Area', verbose_name='省')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to=settings.AUTH_USER_MODEL, verbose_name='省')),
],
options={
'verbose_name': '用户地址',
'ordering': ['-update_time'],
'db_table': 'tb_address',
'verbose_name_plural': '用户地址',
},
),
migrations.AddField(
model_name='user',
name='default_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users', to='users.Address', verbose_name='默认地址'),
),
]
| 52.693878
| 168
| 0.62316
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('areas', '0001_initial'),
('users', '0002_user_email_active'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('title', models.CharField(max_length=20, verbose_name='地址名称')),
('receiver', models.CharField(max_length=20, verbose_name='收货人')),
('place', models.CharField(max_length=50, verbose_name='地址')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('tel', models.CharField(blank=True, default='', max_length=20, null=True, verbose_name='固定电话')),
('email', models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='电子邮箱')),
('is_deleted', models.BooleanField(default=False, verbose_name='逻辑删除')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='city_addresses', to='areas.Area', verbose_name='市')),
('district', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='district_addresses', to='areas.Area', verbose_name='区')),
('province', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='province_addresses', to='areas.Area', verbose_name='省')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to=settings.AUTH_USER_MODEL, verbose_name='省')),
],
options={
'verbose_name': '用户地址',
'ordering': ['-update_time'],
'db_table': 'tb_address',
'verbose_name_plural': '用户地址',
},
),
migrations.AddField(
model_name='user',
name='default_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users', to='users.Address', verbose_name='默认地址'),
),
]
| true
| true
|
f70bb8099e8fc99c6b1915a64aa50ef8bc9e551b
| 466
|
py
|
Python
|
pylearn/assignment/triangle/04.py
|
wangding/demo-code
|
ecc225642ba3aa1463f7e15b0f7fd96ecd43f067
|
[
"MIT"
] | 6
|
2017-10-12T06:17:37.000Z
|
2022-03-09T13:57:32.000Z
|
pylearn/assignment/triangle/04.py
|
wangding/demo-code
|
ecc225642ba3aa1463f7e15b0f7fd96ecd43f067
|
[
"MIT"
] | 4
|
2017-06-09T01:31:13.000Z
|
2020-09-01T20:08:17.000Z
|
pylearn/assignment/triangle/04.py
|
wangding/demo-code
|
ecc225642ba3aa1463f7e15b0f7fd96ecd43f067
|
[
"MIT"
] | 4
|
2017-10-10T08:57:53.000Z
|
2018-07-05T09:03:47.000Z
|
#! /user/bin/env python
# _*_ coding: utf-8 _*_
# __author__ = "王顶"
# Email: 408542507@qq.com
"""
循环切片实现
需求总是改变,一会是4层金字塔,一会儿是5层金子塔
到底要几层,改一下 while 循环的条件变量就行了
"""
level = 0
line = ''
stars = '*******************************************'
spaces = ' '
while level < 4:
n = level * 2 + 1 # n 代表* 的个数
m = 4 - level # m 代表空格个数
line = spaces[:m] + stars[:n]
print(line)
level = level + 1
| 19.416667
| 54
| 0.450644
|
level = 0
line = ''
stars = '*******************************************'
spaces = ' '
while level < 4:
n = level * 2 + 1
m = 4 - level
line = spaces[:m] + stars[:n]
print(line)
level = level + 1
| true
| true
|
f70bba66989a10c1afc4308f981a114b3aac0610
| 4,231
|
py
|
Python
|
tests/st/ops/cpu/test_broadcast_to_op.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/ops/cpu/test_broadcast_to_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/ops/cpu/test_broadcast_to_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
shape = (4, 5, 2, 3, 4, 5, 6)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 5, 7, 4, 5, 6)
x_np = np.arange(20).reshape((4, 5, 1)).astype(np.int32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (8, 5, 7, 4, 5, 6)
x_np = np.arange(24).reshape((1, 4, 1, 6)).astype(np.bool)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 2, 3, 4, 5, 7)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (4, 5)
x1_np = np.ones((1,)).astype(np.bool_)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_init():
"""
Test running the op with -1's in the init shape to support varied inputs.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (-1, 4, 5, 6)
np_shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
ms_shape = (2, 3, -1, 5)
np_shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_invalid_init():
"""
Test running the op with -1's in the init shape in incorrect positions.
Expected to fail.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (2, -1, 4, 5)
x_np = np.random.rand(4, 5).astype(np.float32)
with pytest.raises(ValueError):
P.BroadcastTo(ms_shape)(Tensor(x_np))
| 35.258333
| 78
| 0.670527
|
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
shape = (4, 5, 2, 3, 4, 5, 6)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 5, 7, 4, 5, 6)
x_np = np.arange(20).reshape((4, 5, 1)).astype(np.int32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (8, 5, 7, 4, 5, 6)
x_np = np.arange(24).reshape((1, 4, 1, 6)).astype(np.bool)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 2, 3, 4, 5, 7)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (4, 5)
x1_np = np.ones((1,)).astype(np.bool_)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_init():
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (-1, 4, 5, 6)
np_shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
ms_shape = (2, 3, -1, 5)
np_shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_invalid_init():
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (2, -1, 4, 5)
x_np = np.random.rand(4, 5).astype(np.float32)
with pytest.raises(ValueError):
P.BroadcastTo(ms_shape)(Tensor(x_np))
| true
| true
|
f70bbae8184a4fbe5f16077443f13743bf9bd7ba
| 3,695
|
py
|
Python
|
huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/update_application_endpoint_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/update_application_endpoint_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/update_application_endpoint_request_body.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class UpdateApplicationEndpointRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enabled': 'str',
'user_data': 'str'
}
attribute_map = {
'enabled': 'enabled',
'user_data': 'user_data'
}
def __init__(self, enabled=None, user_data=None):
"""UpdateApplicationEndpointRequestBody - a model defined in huaweicloud sdk"""
self._enabled = None
self._user_data = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if user_data is not None:
self.user_data = user_data
@property
def enabled(self):
"""Gets the enabled of this UpdateApplicationEndpointRequestBody.
设备是否可用,值为true或false字符串。
:return: The enabled of this UpdateApplicationEndpointRequestBody.
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this UpdateApplicationEndpointRequestBody.
设备是否可用,值为true或false字符串。
:param enabled: The enabled of this UpdateApplicationEndpointRequestBody.
:type: str
"""
self._enabled = enabled
@property
def user_data(self):
"""Gets the user_data of this UpdateApplicationEndpointRequestBody.
用户自定义数据,最大长度支持UTF-8编码后2048字节。
:return: The user_data of this UpdateApplicationEndpointRequestBody.
:rtype: str
"""
return self._user_data
@user_data.setter
def user_data(self, user_data):
"""Sets the user_data of this UpdateApplicationEndpointRequestBody.
用户自定义数据,最大长度支持UTF-8编码后2048字节。
:param user_data: The user_data of this UpdateApplicationEndpointRequestBody.
:type: str
"""
self._user_data = user_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateApplicationEndpointRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.775362
| 87
| 0.573748
|
import pprint
import re
import six
class UpdateApplicationEndpointRequestBody:
sensitive_list = []
openapi_types = {
'enabled': 'str',
'user_data': 'str'
}
attribute_map = {
'enabled': 'enabled',
'user_data': 'user_data'
}
def __init__(self, enabled=None, user_data=None):
self._enabled = None
self._user_data = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if user_data is not None:
self.user_data = user_data
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
self._enabled = enabled
@property
def user_data(self):
return self._user_data
@user_data.setter
def user_data(self, user_data):
self._user_data = user_data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UpdateApplicationEndpointRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70bbd6d1707e41946b26993726d5c4787519f5a
| 332
|
py
|
Python
|
tests/test_info_ordering.py
|
aventon1/text-summarizer
|
d7540bfd862b1222f6ebd7875948bbf20c52f603
|
[
"MIT"
] | null | null | null |
tests/test_info_ordering.py
|
aventon1/text-summarizer
|
d7540bfd862b1222f6ebd7875948bbf20c52f603
|
[
"MIT"
] | null | null | null |
tests/test_info_ordering.py
|
aventon1/text-summarizer
|
d7540bfd862b1222f6ebd7875948bbf20c52f603
|
[
"MIT"
] | 2
|
2019-10-09T17:17:40.000Z
|
2020-11-30T05:05:07.000Z
|
#!opt/python-3.6/bin/python3
import unittest
import sys
sys.path.append("../src")
from info_ordering import order_info
class TestInfoOrdering(unittest.TestCase):
def test_order_info(self):
# TODO: fix to actually test
value = 5
self.assertEqual(value, 5)
if __name__ == '__main__':
unittest.main()
| 18.444444
| 42
| 0.692771
|
import unittest
import sys
sys.path.append("../src")
from info_ordering import order_info
class TestInfoOrdering(unittest.TestCase):
def test_order_info(self):
value = 5
self.assertEqual(value, 5)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70bbe427124f831ca320689a3b1138ac1f62dfa
| 15,815
|
py
|
Python
|
mypy/semanal_typeddict.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | 12,496
|
2016-02-19T13:38:26.000Z
|
2022-03-31T23:56:19.000Z
|
mypy/semanal_typeddict.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | 9,429
|
2016-02-19T13:41:32.000Z
|
2022-03-31T23:29:38.000Z
|
mypy/semanal_typeddict.py
|
SwagatSBhuyan/mypy
|
218b91c5576a69da51e0813abd0fc7c5fd2d627e
|
[
"PSF-2.0"
] | 2,770
|
2016-02-19T16:18:19.000Z
|
2022-03-31T08:12:49.000Z
|
"""Semantic analysis of TypedDict definitions."""
from mypy.backports import OrderedDict
from typing import Optional, List, Set, Tuple
from typing_extensions import Final
from mypy.types import Type, AnyType, TypeOfAny, TypedDictType, TPDICT_NAMES
from mypy.nodes import (
CallExpr, TypedDictExpr, Expression, NameExpr, Context, StrExpr, BytesExpr, UnicodeExpr,
ClassDef, RefExpr, TypeInfo, AssignmentStmt, PassStmt, ExpressionStmt, EllipsisExpr, TempNode,
DictExpr, ARG_POS, ARG_NAMED
)
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.messages import MessageBuilder
from mypy.errorcodes import ErrorCode
from mypy import errorcodes as codes
TPDICT_CLASS_ERROR: Final = (
"Invalid statement in TypedDict definition; " 'expected "field_name: field_type"'
)
class TypedDictAnalyzer:
def __init__(self,
options: Options,
api: SemanticAnalyzerInterface,
msg: MessageBuilder) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_typeddict_classdef(self, defn: ClassDef) -> Tuple[bool, Optional[TypeInfo]]:
"""Analyze a class that may define a TypedDict.
Assume that base classes have been analyzed already.
Note: Unlike normal classes, we won't create a TypeInfo until
the whole definition of the TypeDict (including the body and all
key names and types) is complete. This is mostly because we
store the corresponding TypedDictType in the TypeInfo.
Return (is this a TypedDict, new TypeInfo). Specifics:
* If we couldn't finish due to incomplete reference anywhere in
the definition, return (True, None).
* If this is not a TypedDict, return (False, None).
"""
possible = False
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TPDICT_NAMES or self.is_typeddict(base_expr):
possible = True
if possible:
if (len(defn.base_type_exprs) == 1 and
isinstance(defn.base_type_exprs[0], RefExpr) and
defn.base_type_exprs[0].fullname in TPDICT_NAMES):
# Building a new TypedDict
fields, types, required_keys = self.analyze_typeddict_classdef_fields(defn)
if fields is None:
return True, None # Defer
info = self.build_typeddict_typeinfo(defn.name, fields, types, required_keys,
defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
# Extending/merging existing TypedDicts
if any(not isinstance(expr, RefExpr) or
expr.fullname not in TPDICT_NAMES and
not self.is_typeddict(expr) for expr in defn.base_type_exprs):
self.fail("All bases of a new TypedDict must be TypedDict types", defn)
typeddict_bases = list(filter(self.is_typeddict, defn.base_type_exprs))
keys: List[str] = []
types = []
required_keys = set()
# Iterate over bases in reverse order so that leftmost base class' keys take precedence
for base in reversed(typeddict_bases):
assert isinstance(base, RefExpr)
assert isinstance(base.node, TypeInfo)
assert isinstance(base.node.typeddict_type, TypedDictType)
base_typed_dict = base.node.typeddict_type
base_items = base_typed_dict.items
valid_items = base_items.copy()
for key in base_items:
if key in keys:
self.fail('Overwriting TypedDict field "{}" while merging'
.format(key), defn)
keys.extend(valid_items.keys())
types.extend(valid_items.values())
required_keys.update(base_typed_dict.required_keys)
new_keys, new_types, new_required_keys = self.analyze_typeddict_classdef_fields(defn,
keys)
if new_keys is None:
return True, None # Defer
keys.extend(new_keys)
types.extend(new_types)
required_keys.update(new_required_keys)
info = self.build_typeddict_typeinfo(defn.name, keys, types, required_keys, defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
return False, None
def analyze_typeddict_classdef_fields(
self,
defn: ClassDef,
oldfields: Optional[List[str]] = None) -> Tuple[Optional[List[str]],
List[Type],
Set[str]]:
"""Analyze fields defined in a TypedDict class definition.
This doesn't consider inherited fields (if any). Also consider totality,
if given.
Return tuple with these items:
* List of keys (or None if found an incomplete reference --> deferral)
* List of types for each key
* Set of required keys
"""
fields: List[str] = []
types: List[Type] = []
for stmt in defn.defs.body:
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty TypedDict's).
if (not isinstance(stmt, PassStmt) and
not (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, (EllipsisExpr, StrExpr)))):
self.fail(TPDICT_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
# An assignment, but an invalid one.
self.fail(TPDICT_CLASS_ERROR, stmt)
else:
name = stmt.lvalues[0].name
if name in (oldfields or []):
self.fail('Overwriting TypedDict field "{}" while extending'
.format(name), stmt)
if name in fields:
self.fail('Duplicate TypedDict key "{}"'.format(name), stmt)
continue
# Append name and type in this case...
fields.append(name)
if stmt.type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
analyzed = self.api.anal_type(stmt.type)
if analyzed is None:
return None, [], set() # Need to defer
types.append(analyzed)
# ...despite possible minor failures that allow further analyzis.
if stmt.type is None or hasattr(stmt, 'new_syntax') and not stmt.new_syntax:
self.fail(TPDICT_CLASS_ERROR, stmt)
elif not isinstance(stmt.rvalue, TempNode):
# x: int assigns rvalue to TempNode(AnyType())
self.fail('Right hand side values are not supported in TypedDict', stmt)
total: Optional[bool] = True
if 'total' in defn.keywords:
total = self.api.parse_bool(defn.keywords['total'])
if total is None:
self.fail('Value of "total" must be True or False', defn)
total = True
required_keys = set(fields) if total else set()
return fields, types, required_keys
def check_typeddict(self,
node: Expression,
var_name: Optional[str],
is_func_scope: bool) -> Tuple[bool, Optional[TypeInfo]]:
"""Check if a call defines a TypedDict.
The optional var_name argument is the name of the variable to
which this is assigned, if any.
Return a pair (is it a typed dict, corresponding TypeInfo).
If the definition is invalid but looks like a TypedDict,
report errors but return (some) TypeInfo. If some type is not ready,
return (True, None).
"""
if not isinstance(node, CallExpr):
return False, None
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return False, None
fullname = callee.fullname
if fullname not in TPDICT_NAMES:
return False, None
res = self.parse_typeddict_args(call)
if res is None:
# This is a valid typed dict, but some type is not ready.
# The caller should defer this until next iteration.
return True, None
name, items, types, total, ok = res
if not ok:
# Error. Construct dummy return value.
info = self.build_typeddict_typeinfo('TypedDict', [], [], set(), call.line)
else:
if var_name is not None and name != var_name:
self.fail(
'First argument "{}" to TypedDict() does not match variable name "{}"'.format(
name, var_name), node, code=codes.NAME_MATCH)
if name != var_name or is_func_scope:
# Give it a unique name derived from the line number.
name += '@' + str(call.line)
required_keys = set(items) if total else set()
info = self.build_typeddict_typeinfo(name, items, types, required_keys, call.line)
info.line = node.line
# Store generated TypeInfo under both names, see semanal_namedtuple for more details.
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
if var_name:
self.api.add_symbol(var_name, info, node)
call.analyzed = TypedDictExpr(info)
call.analyzed.set_line(call.line, call.column)
return True, info
def parse_typeddict_args(
self, call: CallExpr) -> Optional[Tuple[str, List[str], List[Type], bool, bool]]:
"""Parse typed dict call expression.
Return names, types, totality, was there an error during parsing.
If some type is not ready, return None.
"""
# TODO: Share code with check_argument_count in checkexpr.py?
args = call.args
if len(args) < 2:
return self.fail_typeddict_arg("Too few arguments for TypedDict()", call)
if len(args) > 3:
return self.fail_typeddict_arg("Too many arguments for TypedDict()", call)
# TODO: Support keyword arguments
if call.arg_kinds not in ([ARG_POS, ARG_POS], [ARG_POS, ARG_POS, ARG_NAMED]):
return self.fail_typeddict_arg("Unexpected arguments to TypedDict()", call)
if len(args) == 3 and call.arg_names[2] != 'total':
return self.fail_typeddict_arg(
'Unexpected keyword argument "{}" for "TypedDict"'.format(call.arg_names[2]), call)
if not isinstance(args[0], (StrExpr, BytesExpr, UnicodeExpr)):
return self.fail_typeddict_arg(
"TypedDict() expects a string literal as the first argument", call)
if not isinstance(args[1], DictExpr):
return self.fail_typeddict_arg(
"TypedDict() expects a dictionary literal as the second argument", call)
total: Optional[bool] = True
if len(args) == 3:
total = self.api.parse_bool(call.args[2])
if total is None:
return self.fail_typeddict_arg(
'TypedDict() "total" argument must be True or False', call)
dictexpr = args[1]
res = self.parse_typeddict_fields_with_types(dictexpr.items, call)
if res is None:
# One of the types is not ready, defer.
return None
items, types, ok = res
for t in types:
check_for_explicit_any(t, self.options, self.api.is_typeshed_stub_file, self.msg,
context=call)
if self.options.disallow_any_unimported:
for t in types:
if has_any_from_unimported_type(t):
self.msg.unimported_type_becomes_any("Type of a TypedDict key", t, dictexpr)
assert total is not None
return args[0].value, items, types, total, ok
def parse_typeddict_fields_with_types(
self,
dict_items: List[Tuple[Optional[Expression], Expression]],
context: Context) -> Optional[Tuple[List[str], List[Type], bool]]:
"""Parse typed dict items passed as pairs (name expression, type expression).
Return names, types, was there an error. If some type is not ready, return None.
"""
seen_keys = set()
items: List[str] = []
types: List[Type] = []
for (field_name_expr, field_type_expr) in dict_items:
if isinstance(field_name_expr, (StrExpr, BytesExpr, UnicodeExpr)):
key = field_name_expr.value
items.append(key)
if key in seen_keys:
self.fail('Duplicate TypedDict key "{}"'.format(key), field_name_expr)
seen_keys.add(key)
else:
name_context = field_name_expr or field_type_expr
self.fail_typeddict_arg("Invalid TypedDict() field name", name_context)
return [], [], False
try:
type = expr_to_unanalyzed_type(field_type_expr, self.options,
self.api.is_stub_file)
except TypeTranslationError:
self.fail_typeddict_arg('Invalid field type', field_type_expr)
return [], [], False
analyzed = self.api.anal_type(type)
if analyzed is None:
return None
types.append(analyzed)
return items, types, True
def fail_typeddict_arg(self, message: str,
context: Context) -> Tuple[str, List[str], List[Type], bool, bool]:
self.fail(message, context)
return '', [], [], True, False
def build_typeddict_typeinfo(self, name: str, items: List[str],
types: List[Type],
required_keys: Set[str],
line: int) -> TypeInfo:
# Prefer typing then typing_extensions if available.
fallback = (self.api.named_type_or_none('typing._TypedDict', []) or
self.api.named_type_or_none('typing_extensions._TypedDict', []) or
self.api.named_type_or_none('mypy_extensions._TypedDict', []))
assert fallback is not None
info = self.api.basic_new_typeinfo(name, fallback, line)
info.typeddict_type = TypedDictType(OrderedDict(zip(items, types)), required_keys,
fallback)
return info
# Helpers
def is_typeddict(self, expr: Expression) -> bool:
return (isinstance(expr, RefExpr) and isinstance(expr.node, TypeInfo) and
expr.node.typeddict_type is not None)
def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.api.fail(msg, ctx, code=code)
| 47.779456
| 99
| 0.583244
|
from mypy.backports import OrderedDict
from typing import Optional, List, Set, Tuple
from typing_extensions import Final
from mypy.types import Type, AnyType, TypeOfAny, TypedDictType, TPDICT_NAMES
from mypy.nodes import (
CallExpr, TypedDictExpr, Expression, NameExpr, Context, StrExpr, BytesExpr, UnicodeExpr,
ClassDef, RefExpr, TypeInfo, AssignmentStmt, PassStmt, ExpressionStmt, EllipsisExpr, TempNode,
DictExpr, ARG_POS, ARG_NAMED
)
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.messages import MessageBuilder
from mypy.errorcodes import ErrorCode
from mypy import errorcodes as codes
TPDICT_CLASS_ERROR: Final = (
"Invalid statement in TypedDict definition; " 'expected "field_name: field_type"'
)
class TypedDictAnalyzer:
def __init__(self,
options: Options,
api: SemanticAnalyzerInterface,
msg: MessageBuilder) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_typeddict_classdef(self, defn: ClassDef) -> Tuple[bool, Optional[TypeInfo]]:
possible = False
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TPDICT_NAMES or self.is_typeddict(base_expr):
possible = True
if possible:
if (len(defn.base_type_exprs) == 1 and
isinstance(defn.base_type_exprs[0], RefExpr) and
defn.base_type_exprs[0].fullname in TPDICT_NAMES):
fields, types, required_keys = self.analyze_typeddict_classdef_fields(defn)
if fields is None:
return True, None
info = self.build_typeddict_typeinfo(defn.name, fields, types, required_keys,
defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
if any(not isinstance(expr, RefExpr) or
expr.fullname not in TPDICT_NAMES and
not self.is_typeddict(expr) for expr in defn.base_type_exprs):
self.fail("All bases of a new TypedDict must be TypedDict types", defn)
typeddict_bases = list(filter(self.is_typeddict, defn.base_type_exprs))
keys: List[str] = []
types = []
required_keys = set()
for base in reversed(typeddict_bases):
assert isinstance(base, RefExpr)
assert isinstance(base.node, TypeInfo)
assert isinstance(base.node.typeddict_type, TypedDictType)
base_typed_dict = base.node.typeddict_type
base_items = base_typed_dict.items
valid_items = base_items.copy()
for key in base_items:
if key in keys:
self.fail('Overwriting TypedDict field "{}" while merging'
.format(key), defn)
keys.extend(valid_items.keys())
types.extend(valid_items.values())
required_keys.update(base_typed_dict.required_keys)
new_keys, new_types, new_required_keys = self.analyze_typeddict_classdef_fields(defn,
keys)
if new_keys is None:
return True, None # Defer
keys.extend(new_keys)
types.extend(new_types)
required_keys.update(new_required_keys)
info = self.build_typeddict_typeinfo(defn.name, keys, types, required_keys, defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
return False, None
def analyze_typeddict_classdef_fields(
self,
defn: ClassDef,
oldfields: Optional[List[str]] = None) -> Tuple[Optional[List[str]],
List[Type],
Set[str]]:
fields: List[str] = []
types: List[Type] = []
for stmt in defn.defs.body:
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty TypedDict's).
if (not isinstance(stmt, PassStmt) and
not (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, (EllipsisExpr, StrExpr)))):
self.fail(TPDICT_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
self.fail(TPDICT_CLASS_ERROR, stmt)
else:
name = stmt.lvalues[0].name
if name in (oldfields or []):
self.fail('Overwriting TypedDict field "{}" while extending'
.format(name), stmt)
if name in fields:
self.fail('Duplicate TypedDict key "{}"'.format(name), stmt)
continue
fields.append(name)
if stmt.type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
analyzed = self.api.anal_type(stmt.type)
if analyzed is None:
return None, [], set()
types.append(analyzed)
if stmt.type is None or hasattr(stmt, 'new_syntax') and not stmt.new_syntax:
self.fail(TPDICT_CLASS_ERROR, stmt)
elif not isinstance(stmt.rvalue, TempNode):
self.fail('Right hand side values are not supported in TypedDict', stmt)
total: Optional[bool] = True
if 'total' in defn.keywords:
total = self.api.parse_bool(defn.keywords['total'])
if total is None:
self.fail('Value of "total" must be True or False', defn)
total = True
required_keys = set(fields) if total else set()
return fields, types, required_keys
def check_typeddict(self,
node: Expression,
var_name: Optional[str],
is_func_scope: bool) -> Tuple[bool, Optional[TypeInfo]]:
if not isinstance(node, CallExpr):
return False, None
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return False, None
fullname = callee.fullname
if fullname not in TPDICT_NAMES:
return False, None
res = self.parse_typeddict_args(call)
if res is None:
return True, None
name, items, types, total, ok = res
if not ok:
info = self.build_typeddict_typeinfo('TypedDict', [], [], set(), call.line)
else:
if var_name is not None and name != var_name:
self.fail(
'First argument "{}" to TypedDict() does not match variable name "{}"'.format(
name, var_name), node, code=codes.NAME_MATCH)
if name != var_name or is_func_scope:
name += '@' + str(call.line)
required_keys = set(items) if total else set()
info = self.build_typeddict_typeinfo(name, items, types, required_keys, call.line)
info.line = node.line
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
if var_name:
self.api.add_symbol(var_name, info, node)
call.analyzed = TypedDictExpr(info)
call.analyzed.set_line(call.line, call.column)
return True, info
def parse_typeddict_args(
self, call: CallExpr) -> Optional[Tuple[str, List[str], List[Type], bool, bool]]:
args = call.args
if len(args) < 2:
return self.fail_typeddict_arg("Too few arguments for TypedDict()", call)
if len(args) > 3:
return self.fail_typeddict_arg("Too many arguments for TypedDict()", call)
if call.arg_kinds not in ([ARG_POS, ARG_POS], [ARG_POS, ARG_POS, ARG_NAMED]):
return self.fail_typeddict_arg("Unexpected arguments to TypedDict()", call)
if len(args) == 3 and call.arg_names[2] != 'total':
return self.fail_typeddict_arg(
'Unexpected keyword argument "{}" for "TypedDict"'.format(call.arg_names[2]), call)
if not isinstance(args[0], (StrExpr, BytesExpr, UnicodeExpr)):
return self.fail_typeddict_arg(
"TypedDict() expects a string literal as the first argument", call)
if not isinstance(args[1], DictExpr):
return self.fail_typeddict_arg(
"TypedDict() expects a dictionary literal as the second argument", call)
total: Optional[bool] = True
if len(args) == 3:
total = self.api.parse_bool(call.args[2])
if total is None:
return self.fail_typeddict_arg(
'TypedDict() "total" argument must be True or False', call)
dictexpr = args[1]
res = self.parse_typeddict_fields_with_types(dictexpr.items, call)
if res is None:
return None
items, types, ok = res
for t in types:
check_for_explicit_any(t, self.options, self.api.is_typeshed_stub_file, self.msg,
context=call)
if self.options.disallow_any_unimported:
for t in types:
if has_any_from_unimported_type(t):
self.msg.unimported_type_becomes_any("Type of a TypedDict key", t, dictexpr)
assert total is not None
return args[0].value, items, types, total, ok
def parse_typeddict_fields_with_types(
self,
dict_items: List[Tuple[Optional[Expression], Expression]],
context: Context) -> Optional[Tuple[List[str], List[Type], bool]]:
seen_keys = set()
items: List[str] = []
types: List[Type] = []
for (field_name_expr, field_type_expr) in dict_items:
if isinstance(field_name_expr, (StrExpr, BytesExpr, UnicodeExpr)):
key = field_name_expr.value
items.append(key)
if key in seen_keys:
self.fail('Duplicate TypedDict key "{}"'.format(key), field_name_expr)
seen_keys.add(key)
else:
name_context = field_name_expr or field_type_expr
self.fail_typeddict_arg("Invalid TypedDict() field name", name_context)
return [], [], False
try:
type = expr_to_unanalyzed_type(field_type_expr, self.options,
self.api.is_stub_file)
except TypeTranslationError:
self.fail_typeddict_arg('Invalid field type', field_type_expr)
return [], [], False
analyzed = self.api.anal_type(type)
if analyzed is None:
return None
types.append(analyzed)
return items, types, True
def fail_typeddict_arg(self, message: str,
context: Context) -> Tuple[str, List[str], List[Type], bool, bool]:
self.fail(message, context)
return '', [], [], True, False
def build_typeddict_typeinfo(self, name: str, items: List[str],
types: List[Type],
required_keys: Set[str],
line: int) -> TypeInfo:
fallback = (self.api.named_type_or_none('typing._TypedDict', []) or
self.api.named_type_or_none('typing_extensions._TypedDict', []) or
self.api.named_type_or_none('mypy_extensions._TypedDict', []))
assert fallback is not None
info = self.api.basic_new_typeinfo(name, fallback, line)
info.typeddict_type = TypedDictType(OrderedDict(zip(items, types)), required_keys,
fallback)
return info
def is_typeddict(self, expr: Expression) -> bool:
return (isinstance(expr, RefExpr) and isinstance(expr.node, TypeInfo) and
expr.node.typeddict_type is not None)
def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.api.fail(msg, ctx, code=code)
| true
| true
|
f70bbe50147981c90239bd8aac6d747789d5ce75
| 3,673
|
py
|
Python
|
main.py
|
omergoc/ihbarBotu
|
a30028be26a65b67e0d5c94547e17ab7f00c2a81
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
omergoc/ihbarBotu
|
a30028be26a65b67e0d5c94547e17ab7f00c2a81
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
omergoc/ihbarBotu
|
a30028be26a65b67e0d5c94547e17ab7f00c2a81
|
[
"Apache-2.0"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import os
class App:
def __init__(self):
self.userlist = []
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"}
self.page = 1
os.system("title "+"THT IHBAR OTOMASYONU")
os.system("color F")
self.hashUser = input("'xf_user' Bilgisini giriniz: ").strip()
self.hashTfaTrust = input("'xf_tfa_trust' Bilgisini giriniz: ").strip()
self.cookies = {
'xf_user':f'{self.hashUser}',
'xf_tfa_trust':f'{self.hashTfaTrust}'
}
self.Transactions()
def ControlAccount(self):
request = requests.get("https://www.turkhackteam.org/uye/kaptantr.744109/", cookies=self.cookies, headers = self.headers)
controltext = "Giriş yap"
html = request.text
if controltext in html:
return "Giris Yapılmadı"
else:
return"Giriş Yapıldı"
def Scarping(self):
request = requests.get("https://www.turkhackteam.org/reports/closed?page="+ str(self.page), cookies=self.cookies, headers=self.headers).text
parser = BeautifulSoup(request, 'html.parser')
urls = parser.findAll("a", {"class": "structItem-title"},href=True)
for url in urls:
file = open("rapor.txt","a",encoding='utf-8')
file.write("*"*40)
file.write("\n")
reportedLink = "https://www.turkhackteam.org"+url["href"]
request = requests.get(reportedLink, cookies=self.cookies, headers=self.headers).text
contentParser = BeautifulSoup(request, 'html.parser')
content = contentParser.find_all("header",{"class":"message-attribution message-attribution--plain"})
for item in content:
userLink = item.find('a')["href"]
userLink = "https://www.turkhackteam.org"+userLink
userSituation = item.find("span", {"class": "label label--accent"})
userSituation = userSituation is None
userName = item.find('h4',{"class":"attribution"}).text
userSituation ={True: "İhbar Yapan", False: "İhbar Eden"} [userSituation]
text = f"{userLink} // {userName} // ({userSituation})"
file.write(reportedLink)
file.write("\n")
file.write(text)
file.write("\n")
file.write("-"*20)
file.write("\n")
file.close()
def Transactions(self):
print("""
///////////////////////////////////////////
// //
// THT Ihbar Otomasyonu //
// 1.0 //
// //
// Created By //
// Ar-Ge Team //
///////////////////////////////////////////
""")
if self.ControlAccount() == "Giris Yapılmadı":
print("Giriş Yapılamadı. Çıkış yapmak için lütfen bir tuşa basınız.")
input()
exit()
else:
print(f"Login Control: {self.ControlAccount()}")
print("İşlem Başladı, Lütfen Bekleyiniz")
self.Scarping()
print("İşlem Tamamlandı, Çıkış Yapmak İçin Bir tuşa Basınız.")
input()
if __name__ == '__main__':
main = App()
| 39.923913
| 157
| 0.494963
|
from bs4 import BeautifulSoup
import requests
import os
class App:
def __init__(self):
self.userlist = []
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"}
self.page = 1
os.system("title "+"THT IHBAR OTOMASYONU")
os.system("color F")
self.hashUser = input("'xf_user' Bilgisini giriniz: ").strip()
self.hashTfaTrust = input("'xf_tfa_trust' Bilgisini giriniz: ").strip()
self.cookies = {
'xf_user':f'{self.hashUser}',
'xf_tfa_trust':f'{self.hashTfaTrust}'
}
self.Transactions()
def ControlAccount(self):
request = requests.get("https://www.turkhackteam.org/uye/kaptantr.744109/", cookies=self.cookies, headers = self.headers)
controltext = "Giriş yap"
html = request.text
if controltext in html:
return "Giris Yapılmadı"
else:
return"Giriş Yapıldı"
def Scarping(self):
request = requests.get("https://www.turkhackteam.org/reports/closed?page="+ str(self.page), cookies=self.cookies, headers=self.headers).text
parser = BeautifulSoup(request, 'html.parser')
urls = parser.findAll("a", {"class": "structItem-title"},href=True)
for url in urls:
file = open("rapor.txt","a",encoding='utf-8')
file.write("*"*40)
file.write("\n")
reportedLink = "https://www.turkhackteam.org"+url["href"]
request = requests.get(reportedLink, cookies=self.cookies, headers=self.headers).text
contentParser = BeautifulSoup(request, 'html.parser')
content = contentParser.find_all("header",{"class":"message-attribution message-attribution--plain"})
for item in content:
userLink = item.find('a')["href"]
userLink = "https://www.turkhackteam.org"+userLink
userSituation = item.find("span", {"class": "label label--accent"})
userSituation = userSituation is None
userName = item.find('h4',{"class":"attribution"}).text
userSituation ={True: "İhbar Yapan", False: "İhbar Eden"} [userSituation]
text = f"{userLink} // {userName} // ({userSituation})"
file.write(reportedLink)
file.write("\n")
file.write(text)
file.write("\n")
file.write("-"*20)
file.write("\n")
file.close()
def Transactions(self):
print("""
///////////////////////////////////////////
// //
// THT Ihbar Otomasyonu //
// 1.0 //
// //
// Created By //
// Ar-Ge Team //
///////////////////////////////////////////
""")
if self.ControlAccount() == "Giris Yapılmadı":
print("Giriş Yapılamadı. Çıkış yapmak için lütfen bir tuşa basınız.")
input()
exit()
else:
print(f"Login Control: {self.ControlAccount()}")
print("İşlem Başladı, Lütfen Bekleyiniz")
self.Scarping()
print("İşlem Tamamlandı, Çıkış Yapmak İçin Bir tuşa Basınız.")
input()
if __name__ == '__main__':
main = App()
| true
| true
|
f70bbe7af3801fa430e6f0bec3f47ab6459ea35a
| 4,689
|
py
|
Python
|
esmvaltool/diag_scripts/ocean/diagnostic_profiles.py
|
ruthlorenz/ESMValTool
|
c3c61b5341037d01c776c3524c0dd4c767507a3d
|
[
"Apache-2.0"
] | null | null | null |
esmvaltool/diag_scripts/ocean/diagnostic_profiles.py
|
ruthlorenz/ESMValTool
|
c3c61b5341037d01c776c3524c0dd4c767507a3d
|
[
"Apache-2.0"
] | null | null | null |
esmvaltool/diag_scripts/ocean/diagnostic_profiles.py
|
ruthlorenz/ESMValTool
|
c3c61b5341037d01c776c3524c0dd4c767507a3d
|
[
"Apache-2.0"
] | null | null | null |
"""
Diagnostic:
Diagnostic to produce images of the profile over time from a cube.
These plost show cube value (ie temperature) on the x-axis, and depth/height
on the y axis. The colour scale is the annual mean of the cube data.
Note that this diagnostic assumes that the preprocessors do the bulk of the
hard work, and that the cube received by this diagnostic (via the settings.yml
and metadata.yml files) has a time component, and depth component, but no
latitude or longitude coordinates.
An approproate preprocessor for a 3D+time field would be:
preprocessors:
prep_profile:
extract_volume:
long1: 0.
long2: 20.
lat1: -30.
lat2: 30.
z_min: 0.
z_max: 3000.
average_region:
coord1: longitude
coord2: latitude
This tool is part of the ocean diagnostic tools package in the ESMValTool.
Author: Lee de Mora (PML)
ledm@pml.ac.uk
"""
import logging
import os
import sys
import matplotlib
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
import iris
import iris.quickplot as qplt
import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def determine_profiles_str(cube):
"""
Determine a string from the cube, to describe the profile.
Used in image titles, descriptions and filenames.
"""
options = ['latitude', 'longitude']
for option in options:
coord = cube.coord(option)
if len(coord.points) > 1:
continue
value = coord.points.mean()
if option == 'latitude':
return str(value) + ' N'
if option == 'longitude':
if value > 180.:
return str(value - 360.) + ' W'
return str(value) + ' E'
return ''
def make_profiles_plots(
cfg,
metadata,
filename,
):
"""
Make a simple profile plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
# Make annual means from:
cube = cube.aggregated_by('year', iris.analysis.MEAN)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
#
times = cube.coord('time')
times_float = diagtools.timecoord_to_float(times)
time_0 = times_float[0]
cmap = plt.cm.get_cmap('jet')
plot_details = {}
for time_index, time in enumerate(times_float):
color = cmap((time - time_0) / (times_float[-1] - time_0))
qplt.plot(cube[time_index, :], cube[time_index, :].coord('depth'),
c=color)
plot_details[time_index] = {'c': color, 'ls': '-', 'lw': 1,
'label': str(int(time))}
# Add title to plot
title = ' '.join([
metadata['dataset'],
metadata['long_name'],
])
plt.title(title)
# Add Legend outside right.
diagtools.add_legend_outside_right(plot_details, plt.gca())
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Determine image filename:
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename).replace(
'.nc', '_profile' + image_extention)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix='profile' + image_extention,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
"""
Load the config file, and send it to the plot maker.
The cfg is the opened global config.
"""
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas.keys()):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
######
# Time series of individual model
make_profiles_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
| 26.794286
| 78
| 0.622521
|
import logging
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import iris
import iris.quickplot as qplt
import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def determine_profiles_str(cube):
options = ['latitude', 'longitude']
for option in options:
coord = cube.coord(option)
if len(coord.points) > 1:
continue
value = coord.points.mean()
if option == 'latitude':
return str(value) + ' N'
if option == 'longitude':
if value > 180.:
return str(value - 360.) + ' W'
return str(value) + ' E'
return ''
def make_profiles_plots(
cfg,
metadata,
filename,
):
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = cube.aggregated_by('year', iris.analysis.MEAN)
multi_model = metadata['dataset'].find('MultiModel') > -1
times = cube.coord('time')
times_float = diagtools.timecoord_to_float(times)
time_0 = times_float[0]
cmap = plt.cm.get_cmap('jet')
plot_details = {}
for time_index, time in enumerate(times_float):
color = cmap((time - time_0) / (times_float[-1] - time_0))
qplt.plot(cube[time_index, :], cube[time_index, :].coord('depth'),
c=color)
plot_details[time_index] = {'c': color, 'ls': '-', 'lw': 1,
'label': str(int(time))}
title = ' '.join([
metadata['dataset'],
metadata['long_name'],
])
plt.title(title)
diagtools.add_legend_outside_right(plot_details, plt.gca())
image_extention = diagtools.get_image_format(cfg)
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename).replace(
'.nc', '_profile' + image_extention)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix='profile' + image_extention,
)
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas.keys()):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
make_profiles_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
| true
| true
|
f70bbe7da9fe3c9d459fd663fd9800dbefafe584
| 2,122
|
py
|
Python
|
test/python/test_problem_options.py
|
vlad17/BlitzML
|
f13e089acf7435416bec17e87e5b3130426fc2cd
|
[
"BSD-3-Clause"
] | 6
|
2015-06-16T05:17:17.000Z
|
2018-08-02T05:50:01.000Z
|
test/python/test_problem_options.py
|
vlad17/BlitzML
|
f13e089acf7435416bec17e87e5b3130426fc2cd
|
[
"BSD-3-Clause"
] | 2
|
2018-05-13T13:53:58.000Z
|
2019-06-11T14:53:26.000Z
|
test/python/test_problem_options.py
|
vlad17/BlitzML
|
f13e089acf7435416bec17e87e5b3130426fc2cd
|
[
"BSD-3-Clause"
] | 3
|
2018-08-02T05:50:03.000Z
|
2021-02-21T04:44:15.000Z
|
import unittest
import blitzml
import numpy as np
from common import captured_output
class TestProblemOptions(unittest.TestCase):
def setUp(self):
A = np.arange(20).reshape(5, 4)
b = np.arange(5).astype(np.float64)
self.prob = blitzml.LassoProblem(A, b)
def tearDown(self):
del self.prob
def test_min_time(self):
self.assertLessEqual(self.prob._min_time, 0.)
self.prob._min_time = 2.0
self.assertEqual(self.prob._min_time, 2.0)
def test_max_time(self):
self.assertGreaterEqual(self.prob._max_time, 3600.)
self.prob._max_time = 5.0
self.assertEqual(self.prob._max_time, 5.0)
def test_max_iterations(self):
self.assertGreaterEqual(self.prob._max_iterations, 100)
self.prob._max_iterations = 10
self.assertEqual(self.prob._max_iterations, 10)
def test_tolerance(self):
self.assertGreater(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.
self.assertEqual(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.1
self.assertEqual(self.prob._stopping_tolerance, 0.1)
def test_verbose(self):
self.assertEqual(self.prob._verbose, False)
self.prob._verbose = True
self.assertEqual(self.prob._verbose, True)
def test_use_screening(self):
self.assertEqual(self.prob._use_screening, True)
self.prob._use_screening = False
self.assertEqual(self.prob._use_screening, False)
def test_use_working_sets(self):
self.assertEqual(self.prob._use_working_sets, True)
self.prob._use_working_sets = False
self.assertEqual(self.prob._use_working_sets, False)
def test_suppress_warnings(self):
bad_log_dir = "path/to/bad_log/dir/zxc8aj3n"
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertIn("Warning", out[0])
blitzml.suppress_warnings()
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertNotIn("Warning", out[0])
blitzml.unsuppress_warnings()
| 30.753623
| 59
| 0.723845
|
import unittest
import blitzml
import numpy as np
from common import captured_output
class TestProblemOptions(unittest.TestCase):
def setUp(self):
A = np.arange(20).reshape(5, 4)
b = np.arange(5).astype(np.float64)
self.prob = blitzml.LassoProblem(A, b)
def tearDown(self):
del self.prob
def test_min_time(self):
self.assertLessEqual(self.prob._min_time, 0.)
self.prob._min_time = 2.0
self.assertEqual(self.prob._min_time, 2.0)
def test_max_time(self):
self.assertGreaterEqual(self.prob._max_time, 3600.)
self.prob._max_time = 5.0
self.assertEqual(self.prob._max_time, 5.0)
def test_max_iterations(self):
self.assertGreaterEqual(self.prob._max_iterations, 100)
self.prob._max_iterations = 10
self.assertEqual(self.prob._max_iterations, 10)
def test_tolerance(self):
self.assertGreater(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.
self.assertEqual(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.1
self.assertEqual(self.prob._stopping_tolerance, 0.1)
def test_verbose(self):
self.assertEqual(self.prob._verbose, False)
self.prob._verbose = True
self.assertEqual(self.prob._verbose, True)
def test_use_screening(self):
self.assertEqual(self.prob._use_screening, True)
self.prob._use_screening = False
self.assertEqual(self.prob._use_screening, False)
def test_use_working_sets(self):
self.assertEqual(self.prob._use_working_sets, True)
self.prob._use_working_sets = False
self.assertEqual(self.prob._use_working_sets, False)
def test_suppress_warnings(self):
bad_log_dir = "path/to/bad_log/dir/zxc8aj3n"
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertIn("Warning", out[0])
blitzml.suppress_warnings()
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertNotIn("Warning", out[0])
blitzml.unsuppress_warnings()
| true
| true
|
f70bbfa4c46a7b61211c94260b55968ce1dd3e22
| 5,156
|
py
|
Python
|
webscraping/Tokenize.py
|
jfmendozam/ontotoutra
|
bea4ceafa62500b23495a6de120884ca40f785e9
|
[
"Apache-2.0"
] | null | null | null |
webscraping/Tokenize.py
|
jfmendozam/ontotoutra
|
bea4ceafa62500b23495a6de120884ca40f785e9
|
[
"Apache-2.0"
] | null | null | null |
webscraping/Tokenize.py
|
jfmendozam/ontotoutra
|
bea4ceafa62500b23495a6de120884ca40f785e9
|
[
"Apache-2.0"
] | null | null | null |
import nltk
from langdetect import detect
import csv
class Tokenize:
""" Text tokenizer """
def __init__(self):
""" Default constructor """
self.language = "en"
self.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_en/"
self.tagFilename = "tags_en.csv"
self.wfFilename = "words_freq_en.csv"
self.structFilename = "structure_en.csv"
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
self.tagCategories_en = {
'Adjective' : ['ADJ', 'JJ', 'JJR', 'JJS'],
'Adverb' : ['ADV', 'RB', 'RBR', 'RBS', 'WRB'],
'Conjunction' : ['CONJ', 'CC'],
'Determiner' : ['DET', 'DT', 'EX', 'PDT', 'WDT'],
'Noun' : ['NOUN', 'NN', 'NNP', 'NNPS', 'NNS'],
'Numeral' : ['NUM', 'CD'],
'Particle' : ['PRT', 'POS', 'RP', 'TO'],
'Preposition' : ['ADP', 'IN'],
'Pronoun' : ['PRON', 'PRP', 'PRP$', 'WP', 'WP$'],
'Punctuation' : ['.', '#', '$', "''", '”', '``', ',', '.', ':', "''", '(', ')', '-LRB-', '-RRB-'],
'Verb' : ['VERB', 'MD', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'X' : ['X', 'FW', 'LS', 'SYM', 'UH'],
}
self.reviews = []
self.tokens = []
self.tags = []
self.entities = []
self.other = []
def getCategory(self, tag):
""" Get the tag's category """
for cat in self.tagCategories_en:
if (tag in self.tagCategories_en[cat]):
return(cat)
return("")
def tokenizing(self):
""" Text tokenizer """
self.tokens = []
self.tags = []
self.entities = []
self.other = []
for review in self.reviews:
try:
if (detect(review) == self.language):
token = nltk.word_tokenize(review)
tag = nltk.pos_tag(token)
entity = nltk.chunk.ne_chunk(tag)
self.tokens.append(token)
self.tags.append(tag)
self.entities.append(entity)
else :
self.other.append(review)
except Exception as e:
continue
with open(self.workDirectory + self.tagFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for tag in self.tags:
for value in tag:
writer.writerow(value)
def tagFrequencies(self):
""" Tag Frequencies """
fr = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(fr)):
if (fr[i][0] == value):
fr[i][1] += 1
found = True
break
if not found:
fr.append([value, 1])
def wordFrequencies(self):
""" Word Frequencies """
wd = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(wd)):
if (wd[i][0].lower() == key.lower()):
wd[i][1] += 1
found = True
break
if not found:
wd.append([key, 1])
with open(self.workDirectory + self.wfFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for w in wd:
writer.writerow(w)
def wordCategory(self):
""" Word - category """
cats = []
for tag in self.tags:
for key, value in tag:
cats.append([key, self.getCategory(value)])
for cat in self.tagCategories_en:
with open(self.workDirectory + "_" + cat + '.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for i in cats:
if (i[1] == cat):
writer.writerow(i)
def getRules(self):
""" Get rules """
rules = []
for tag in self.tags:
s = ""
for w, t in tag:
s += self.getCategory(t) + " "
if (t == '.' or t == ','):
rules.append(s)
s = ""
if (len(s) > 0):
rules.append(s)
with open(self.workDirectory + self.structFilename, 'w') as csvfile:
for rule in rules:
csvfile.write("%s\n" % rule)
#from Tokenize import Tokenize
#tk = Tokenize()
#tk.reviews = reviews
#tk.language = "es"
#tk.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_es/"
#tk.tagFilename = "location_tags_es.csv"
#tk.wfFilename = "location_words_freq_es.csv"
#tk.structFilename = "location_structure_es.csv"
#tk.tokenizing()
| 32.840764
| 114
| 0.442591
|
import nltk
from langdetect import detect
import csv
class Tokenize:
def __init__(self):
self.language = "en"
self.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_en/"
self.tagFilename = "tags_en.csv"
self.wfFilename = "words_freq_en.csv"
self.structFilename = "structure_en.csv"
self.tagCategories_en = {
'Adjective' : ['ADJ', 'JJ', 'JJR', 'JJS'],
'Adverb' : ['ADV', 'RB', 'RBR', 'RBS', 'WRB'],
'Conjunction' : ['CONJ', 'CC'],
'Determiner' : ['DET', 'DT', 'EX', 'PDT', 'WDT'],
'Noun' : ['NOUN', 'NN', 'NNP', 'NNPS', 'NNS'],
'Numeral' : ['NUM', 'CD'],
'Particle' : ['PRT', 'POS', 'RP', 'TO'],
'Preposition' : ['ADP', 'IN'],
'Pronoun' : ['PRON', 'PRP', 'PRP$', 'WP', 'WP$'],
'Punctuation' : ['.', '#', '$', "''", '”', '``', ',', '.', ':', "''", '(', ')', '-LRB-', '-RRB-'],
'Verb' : ['VERB', 'MD', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'X' : ['X', 'FW', 'LS', 'SYM', 'UH'],
}
self.reviews = []
self.tokens = []
self.tags = []
self.entities = []
self.other = []
def getCategory(self, tag):
for cat in self.tagCategories_en:
if (tag in self.tagCategories_en[cat]):
return(cat)
return("")
def tokenizing(self):
self.tokens = []
self.tags = []
self.entities = []
self.other = []
for review in self.reviews:
try:
if (detect(review) == self.language):
token = nltk.word_tokenize(review)
tag = nltk.pos_tag(token)
entity = nltk.chunk.ne_chunk(tag)
self.tokens.append(token)
self.tags.append(tag)
self.entities.append(entity)
else :
self.other.append(review)
except Exception as e:
continue
with open(self.workDirectory + self.tagFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for tag in self.tags:
for value in tag:
writer.writerow(value)
def tagFrequencies(self):
fr = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(fr)):
if (fr[i][0] == value):
fr[i][1] += 1
found = True
break
if not found:
fr.append([value, 1])
def wordFrequencies(self):
wd = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(wd)):
if (wd[i][0].lower() == key.lower()):
wd[i][1] += 1
found = True
break
if not found:
wd.append([key, 1])
with open(self.workDirectory + self.wfFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for w in wd:
writer.writerow(w)
def wordCategory(self):
cats = []
for tag in self.tags:
for key, value in tag:
cats.append([key, self.getCategory(value)])
for cat in self.tagCategories_en:
with open(self.workDirectory + "_" + cat + '.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for i in cats:
if (i[1] == cat):
writer.writerow(i)
def getRules(self):
rules = []
for tag in self.tags:
s = ""
for w, t in tag:
s += self.getCategory(t) + " "
if (t == '.' or t == ','):
rules.append(s)
s = ""
if (len(s) > 0):
rules.append(s)
with open(self.workDirectory + self.structFilename, 'w') as csvfile:
for rule in rules:
csvfile.write("%s\n" % rule)
#from Tokenize import Tokenize
#tk = Tokenize()
#tk.reviews = reviews
#tk.language = "es"
#tk.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_es/"
#tk.tagFilename = "location_tags_es.csv"
#tk.wfFilename = "location_words_freq_es.csv"
#tk.structFilename = "location_structure_es.csv"
#tk.tokenizing()
| true
| true
|
f70bbfe4549985072572f1ddf6fb4774ad2ef538
| 562
|
py
|
Python
|
tests/static/yaff_test_files/sp/yscript.py
|
t-brink/pyiron
|
c07552b54a39e3f036ba395325cd4b372af0f794
|
[
"BSD-3-Clause"
] | null | null | null |
tests/static/yaff_test_files/sp/yscript.py
|
t-brink/pyiron
|
c07552b54a39e3f036ba395325cd4b372af0f794
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T09:22:56.000Z
|
2021-11-02T09:22:56.000Z
|
tests/static/yaff_test_files/sp/yscript.py
|
t-brink/pyiron
|
c07552b54a39e3f036ba395325cd4b372af0f794
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T08:35:47.000Z
|
2021-11-02T08:35:47.000Z
|
#! /usr/bin/python
from molmod.units import *
from yaff import *
import h5py, numpy as np
#Setting up system and force field
system = System.from_file('system.chk')
ff = ForceField.generate(system, 'pars.txt', rcut=15.0*angstrom, alpha_scale=3.2, gcut_scale=1.5, smooth_ei=True)
#Setting up output
f = h5py.File('output.h5', mode='w')
hdf5 = HDF5Writer(f, step=1)
r = h5py.File('restart.h5', mode='w')
restart = RestartWriter(r, step=10000)
hooks = [hdf5, restart]
#Setting up simulation
energy = ff.compute()
system.to_hdf5(f)
f['system/energy'] = energy
| 23.416667
| 113
| 0.717082
|
from molmod.units import *
from yaff import *
import h5py, numpy as np
system = System.from_file('system.chk')
ff = ForceField.generate(system, 'pars.txt', rcut=15.0*angstrom, alpha_scale=3.2, gcut_scale=1.5, smooth_ei=True)
f = h5py.File('output.h5', mode='w')
hdf5 = HDF5Writer(f, step=1)
r = h5py.File('restart.h5', mode='w')
restart = RestartWriter(r, step=10000)
hooks = [hdf5, restart]
energy = ff.compute()
system.to_hdf5(f)
f['system/energy'] = energy
| true
| true
|
f70bc02a75fa586549688f9970179c4376a1feab
| 1,410
|
py
|
Python
|
multiprune_plusone/multiprune_plusone.py
|
5joono/Swin-Transformer
|
b5b7e85aa11ad72b2bec2d458fa78066e4c3d0f2
|
[
"MIT"
] | null | null | null |
multiprune_plusone/multiprune_plusone.py
|
5joono/Swin-Transformer
|
b5b7e85aa11ad72b2bec2d458fa78066e4c3d0f2
|
[
"MIT"
] | null | null | null |
multiprune_plusone/multiprune_plusone.py
|
5joono/Swin-Transformer
|
b5b7e85aa11ad72b2bec2d458fa78066e4c3d0f2
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
os.environ['MKL_THREADING_LAYER'] = 'GNU'
# df = pd.DataFrame(columns=['multiprune', 'headstr', 'pluslayer', 'plushead', 'acc1'])
# df.to_csv("multiprune_plusone.csv",index=False)
prevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]
plusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]
for multiprune in range(1,12):
headstr = []
for oneset in prevheadlist:
setstr = [str(int(s)) for s in oneset]
setstr = '+'.join(setstr)
headstr.append(setstr)
headstr = '.'.join(headstr)
for pluslayer in range(6):
for plushead in plusheadlist[pluslayer]:
os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')
df = pd.read_csv("multiprune_plusone.csv")
df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]
df = df.apply(pd.to_numeric, errors = 'coerce')
max_acc1_idx = df.idxmax().acc1
plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)
prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)
| 45.483871
| 285
| 0.656028
|
import os
import numpy as np
import pandas as pd
os.environ['MKL_THREADING_LAYER'] = 'GNU'
prevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]
plusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]
for multiprune in range(1,12):
headstr = []
for oneset in prevheadlist:
setstr = [str(int(s)) for s in oneset]
setstr = '+'.join(setstr)
headstr.append(setstr)
headstr = '.'.join(headstr)
for pluslayer in range(6):
for plushead in plusheadlist[pluslayer]:
os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')
df = pd.read_csv("multiprune_plusone.csv")
df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]
df = df.apply(pd.to_numeric, errors = 'coerce')
max_acc1_idx = df.idxmax().acc1
plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)
prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)
| true
| true
|
f70bc203e41ad8b576f3082f590f0bd6ff805df1
| 1,969
|
py
|
Python
|
pythonturtle/my_turtle.py
|
Cleverect/PythonTurtle
|
961f8d13cd835e55efa8fd04ebbcb0120ec7dec4
|
[
"MIT"
] | 114
|
2019-08-27T11:47:21.000Z
|
2022-02-22T11:50:49.000Z
|
pythonturtle/my_turtle.py
|
Cleverect/PythonTurtle
|
961f8d13cd835e55efa8fd04ebbcb0120ec7dec4
|
[
"MIT"
] | 31
|
2019-08-26T22:54:26.000Z
|
2022-01-10T17:13:27.000Z
|
pythonturtle/my_turtle.py
|
Cleverect/PythonTurtle
|
961f8d13cd835e55efa8fd04ebbcb0120ec7dec4
|
[
"MIT"
] | 38
|
2019-10-05T07:41:33.000Z
|
2022-01-15T03:32:23.000Z
|
import wx
from .misc.helpers import deg_to_rad, rad_to_deg
from .misc.vector import Vector
# Size of the turtle canvas. We assume no user will have a screen
# so big that the canvas will be bigger than this.
BITMAP_SIZE = Vector((2000, 1200))
# Center of the canvas.
origin = BITMAP_SIZE / 2.0
def to_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return rad_to_deg(-angle) - 180
def from_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return deg_to_rad(-angle + 180)
def from_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
def to_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
class Turtle:
"""
A Turtle object defines a turtle by its attributes, such as
position, orientation, color, etc. See source of __init__ for
a complete list.
"""
def __init__(self):
self.pos = Vector((0, 0))
self.orientation = 180
self.color = "red"
self.width = 3
self.visible = True
self.pen_down = True
# the `clear` attribute is only made True momentarily when
# the `clear()` function is called by the user to clear the screen.
self.clear = False
self.SPEED = 400.0 # Pixels per second
self.ANGULAR_SPEED = 360.0 # Degrees per second
def give_pen(self):
"""
Gives a wxPython pen that corresponds to the color, width,
and pen_downity of the Turtle instance.
"""
return wx.Pen(self.color,
self.width,
wx.SOLID if self.pen_down else wx.TRANSPARENT)
| 25.907895
| 75
| 0.643982
|
import wx
from .misc.helpers import deg_to_rad, rad_to_deg
from .misc.vector import Vector
BITMAP_SIZE = Vector((2000, 1200))
origin = BITMAP_SIZE / 2.0
def to_my_angle(angle):
return rad_to_deg(-angle) - 180
def from_my_angle(angle):
return deg_to_rad(-angle + 180)
def from_my_pos(pos):
return -pos + origin
def to_my_pos(pos):
return -pos + origin
class Turtle:
def __init__(self):
self.pos = Vector((0, 0))
self.orientation = 180
self.color = "red"
self.width = 3
self.visible = True
self.pen_down = True
self.clear = False
self.SPEED = 400.0
self.ANGULAR_SPEED = 360.0
def give_pen(self):
return wx.Pen(self.color,
self.width,
wx.SOLID if self.pen_down else wx.TRANSPARENT)
| true
| true
|
f70bc44faef09945bde95e95977b11d93d4dcd31
| 1,698
|
py
|
Python
|
cmsis-svd-parsing/main.py
|
michael-christen/prototypes
|
676dbcfc750b7a0b4a88bcd6a9fc8b109d8cd88f
|
[
"MIT"
] | null | null | null |
cmsis-svd-parsing/main.py
|
michael-christen/prototypes
|
676dbcfc750b7a0b4a88bcd6a9fc8b109d8cd88f
|
[
"MIT"
] | 9
|
2021-03-10T14:00:17.000Z
|
2022-02-27T02:41:57.000Z
|
cmsis-svd-parsing/main.py
|
michael-christen/prototypes
|
676dbcfc750b7a0b4a88bcd6a9fc8b109d8cd88f
|
[
"MIT"
] | null | null | null |
import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {",".join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
"""Given a chip and peripheral, prints the registers.
"""
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
| 29.789474
| 112
| 0.668433
|
import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {",".join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
| true
| true
|
f70bc45cd14977ecb22dd06d2ffddc212674349a
| 14,910
|
py
|
Python
|
mycv/train.py
|
duanzhiihao/mycv
|
184b52f7a5c1b6f603122d4f4050952b65ba0ead
|
[
"MIT"
] | null | null | null |
mycv/train.py
|
duanzhiihao/mycv
|
184b52f7a5c1b6f603122d4f4050952b65ba0ead
|
[
"MIT"
] | null | null | null |
mycv/train.py
|
duanzhiihao/mycv
|
184b52f7a5c1b6f603122d4f4050952b65ba0ead
|
[
"MIT"
] | null | null | null |
from mycv.utils.general import disable_multithreads
disable_multithreads()
import os
from pathlib import Path
import argparse
from tqdm import tqdm
import math
import torch
import torch.cuda.amp as amp
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parallel import DistributedDataParallel as DDP
import wandb
from mycv.utils.general import increment_dir
from mycv.utils.torch_utils import set_random_seeds, ModelEMA
from mycv.datasets.imagenet import ImageNetCls, imagenet_val
def cal_acc(p: torch.Tensor, labels: torch.LongTensor):
assert not p.requires_grad and p.device == labels.device
assert p.dim() == 2 and p.shape[0] == labels.shape[0]
_, p_cls = torch.max(p, dim=1)
tp = (p_cls == labels)
acc = tp.sum() / len(tp)
return acc
def train():
# ====== set the run settings ======
parser = argparse.ArgumentParser()
parser.add_argument('--project', type=str, default='imagenet')
parser.add_argument('--group', type=str, default='mini200')
parser.add_argument('--model', type=str, default='csp_s')
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--amp', type=bool, default=True)
parser.add_argument('--ema', type=bool, default=True)
parser.add_argument('--optimizer', type=str, default='SGD', choices=['Adam', 'SGD'])
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--metric', type=str, default='top1', choices=['top1'])
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--local_rank', type=int, default=-1, help='DDP arg, do not modify')
parser.add_argument('--wbmode', action='store_true')
cfg = parser.parse_args()
# model
cfg.img_size = 224
cfg.input_norm = False
cfg.sync_bn = False
# optimizer
cfg.lr = 0.01
cfg.momentum = 0.9
cfg.weight_decay = 0.0001
cfg.nesterov = False
# lr scheduler
cfg.lrf = 0.2 # min lr factor
cfg.lr_warmup_epochs = 1
# EMA
# cfg.ema_decay = 0.999
cfg.ema_warmup_epochs = 4
# Main process
IS_MAIN = (cfg.local_rank in [-1, 0])
# check arguments
metric: str = cfg.metric.lower()
epochs: int = cfg.epochs
local_rank: int = cfg.local_rank
world_size: int = int(os.environ.get('WORLD_SIZE', 1))
assert local_rank == int(os.environ.get('RANK', -1)), 'Only support single node'
assert cfg.batch_size % world_size == 0, 'batch_size must be multiple of device count'
batch_size: int = cfg.batch_size // world_size
if IS_MAIN:
print(cfg, '\n')
print('Batch size on each single GPU =', batch_size, '\n')
# fix random seeds for reproducibility
set_random_seeds(1)
torch.backends.cudnn.benchmark = True
# device setting
assert torch.cuda.is_available()
if local_rank == -1: # Single GPU
device = torch.device(f'cuda:{cfg.device}')
else: # DDP mode
assert torch.cuda.device_count() > local_rank and torch.distributed.is_available()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method='env://', world_size=world_size, rank=local_rank
)
print(f'Local rank: {local_rank}, using device {device}:', 'device property:',
torch.cuda.get_device_properties(device))
# Dataset
if IS_MAIN:
print('Initializing Datasets and Dataloaders...')
if cfg.group == 'default':
train_split = 'train'
val_split = 'val'
cfg.num_class = 1000
elif cfg.group == 'mini200':
train_split = 'train200_600'
val_split = 'val200_600'
cfg.num_class = 200
else:
raise ValueError()
# training set
trainset = ImageNetCls(train_split, img_size=cfg.img_size, input_norm=cfg.input_norm)
sampler = torch.utils.data.distributed.DistributedSampler(
trainset, num_replicas=world_size, rank=local_rank, shuffle=True
) if local_rank != -1 else None
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=(sampler is None), sampler=sampler,
num_workers=cfg.workers, pin_memory=True
)
# test set
testloader = torch.utils.data.DataLoader(
ImageNetCls(split=val_split, img_size=cfg.img_size, input_norm=cfg.input_norm),
batch_size=batch_size, shuffle=False, num_workers=cfg.workers//2,
pin_memory=True, drop_last=False
)
# Initialize model
if cfg.model == 'res50':
from mycv.models.cls.resnet import resnet50
model = resnet50(num_classes=cfg.num_class)
elif cfg.model == 'res101':
from mycv.models.cls.resnet import resnet101
model = resnet101(num_classes=cfg.num_class)
elif cfg.model.startswith('yolov5'):
from mycv.models.yolov5.cls import YOLOv5Cls
assert cfg.model[-1] in ['s', 'm', 'l']
model = YOLOv5Cls(model=cfg.model[-1], num_class=cfg.num_class)
elif cfg.model.startswith('csp'):
from mycv.models.yolov5.cls import CSP
assert cfg.model[-1] in ['s', 'm', 'l']
model = CSP(model=cfg.model[-1], num_class=cfg.num_class)
else:
raise NotImplementedError()
model = model.to(device)
# loss function
loss_func = torch.nn.CrossEntropyLoss(reduction='mean')
# different optimization setting for different layers
pgb, pgw = [], []
for k, v in model.named_parameters():
if ('.bn' in k) or ('.bias' in k): # batchnorm or bias
pgb.append(v)
else: # conv weights
assert '.weight' in k
pgw.append(v)
parameters = [
{'params': pgb, 'lr': cfg.lr, 'weight_decay': 0.0},
{'params': pgw, 'lr': cfg.lr, 'weight_decay': cfg.weight_decay}
]
if IS_MAIN:
print('Parameter groups:', [len(pg['params']) for pg in parameters])
del pgb, pgw
# optimizer
if cfg.optimizer == 'SGD':
optimizer = torch.optim.SGD(parameters, lr=cfg.lr,
momentum=cfg.momentum, nesterov=cfg.nesterov)
elif cfg.optimizer == 'Adam':
optimizer = torch.optim.Adam(parameters, lr=cfg.lr)
else:
raise ValueError()
# AMP
scaler = amp.GradScaler(enabled=cfg.amp)
log_parent = Path(f'runs/{cfg.project}')
wb_id = None
results = {metric: 0}
if cfg.resume:
# resume
run_name = cfg.resume
log_dir = log_parent / run_name
assert log_dir.is_dir()
checkpoint = torch.load(log_dir / 'last.pt')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
cur_fitness = best_fitness = checkpoint.get(metric, 0)
if IS_MAIN:
wb_id = open(log_dir / 'wandb_id.txt', 'r').read()
else:
# new experiment
run_name = increment_dir(dir_root=log_parent, name=cfg.model)
log_dir = log_parent / run_name # wandb logging dir
if IS_MAIN:
os.makedirs(log_dir, exist_ok=False)
print(str(model), file=open(log_dir / 'model.txt', 'w'))
start_epoch = 0
cur_fitness = best_fitness = 0
# initialize wandb
if IS_MAIN:
wbrun = wandb.init(project=cfg.project, group=cfg.group, name=run_name, config=cfg,
dir='runs/', resume='allow', id=wb_id, mode=cfg.wbmode)
cfg = wbrun.config
cfg.log_dir = log_dir
cfg.wandb_id = wbrun.id
if not (log_dir / 'wandb_id.txt').exists():
with open(log_dir / 'wandb_id.txt', 'w') as f:
f.write(wbrun.id)
else:
wbrun = None
# lr scheduler
def warmup_cosine(x):
warmup_iter = cfg.lr_warmup_epochs * len(trainloader)
if x < warmup_iter:
factor = x / warmup_iter
else:
_cur = x - warmup_iter + 1
_total = epochs * len(trainloader)
factor = cfg.lrf + 0.5 * (1 - cfg.lrf) * (1 + math.cos(_cur * math.pi / _total))
return factor
scheduler = LambdaLR(optimizer, lr_lambda=warmup_cosine, last_epoch=start_epoch - 1)
# SyncBatchNorm
if local_rank != -1 and cfg.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Exponential moving average
if IS_MAIN and cfg.ema:
emas = [
ModelEMA(model, decay=0.99),
ModelEMA(model, decay=0.999),
ModelEMA(model, decay=0.9999)
]
for ema in emas:
ema.updates = start_epoch * len(trainloader) # set EMA updates
ema.warmup = cfg.ema_warmup_epochs * len(trainloader) # set EMA warmup
else:
emas = None
# DDP mode
if local_rank != -1:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
# ======================== start training ========================
niter = s = None
for epoch in range(start_epoch, epochs):
model.train()
if local_rank != -1:
trainloader.sampler.set_epoch(epoch)
optimizer.zero_grad()
pbar = enumerate(trainloader)
train_loss, train_acc = 0.0, 0.0
if IS_MAIN:
pbar_title = ('%-10s' * 6) % (
'Epoch', 'GPU_mem', 'lr', 'tr_loss', 'tr_acc', metric
)
print('\n' + pbar_title) # title
pbar = tqdm(pbar, total=len(trainloader))
for i, (imgs, labels) in pbar:
# debugging
# if True:
# import matplotlib.pyplot as plt
# from mycv.datasets.food101 import CLASS_NAMES
# for im, lbl in zip(imgs, labels):
# im = im * trainset._input_std + trainset._input_mean
# im = im.permute(1,2,0).numpy()
# print(CLASS_NAMES[lbl])
# plt.imshow(im); plt.show()
imgs = imgs.to(device=device)
labels = labels.to(device=device)
# forward
with amp.autocast(enabled=cfg.amp):
p = model(imgs)
loss = loss_func(p, labels) * imgs.shape[0]
if local_rank != -1:
loss = loss * world_size
# loss is averaged within image, sumed over batch, and sumed over gpus
# backward, update
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if emas:
for ema in emas:
ema.update(model)
# Scheduler
scheduler.step()
# logging
if IS_MAIN:
niter = epoch * len(trainloader) + i
cur_lr = optimizer.param_groups[0]['lr']
loss = loss.detach().cpu().item()
acc = cal_acc(p.detach(), labels)
train_loss = (train_loss*i + loss) / (i+1)
train_acc = (train_acc*i + acc) / (i+1)
mem = torch.cuda.max_memory_allocated(device) / 1e9
s = ('%-10s' * 2 + '%-10.4g' * 4) % (
f'{epoch}/{epochs-1}', f'{mem:.3g}G',
cur_lr, train_loss, 100*train_acc, 100*cur_fitness
)
pbar.set_description(s)
torch.cuda.reset_peak_memory_stats()
# Weights & Biases logging
if niter % 100 == 0:
wbrun.log({
'general/lr': cur_lr,
'metric/train_loss': train_loss,
'metric/train_acc': train_acc,
'ema/n_updates': emas[0].updates if emas is not None else 0,
'ema0/decay': emas[0].get_decay() if emas is not None else 0,
'ema1/decay': emas[1].get_decay() if emas is not None else 0,
'ema2/decay': emas[2].get_decay() if emas is not None else 0,
}, step=niter)
# logging end
# ----Mini batch end
# ----Epoch end
# If DDP mode, synchronize model parameters on all gpus
if local_rank != -1:
model._sync_params_and_buffers(authoritative_rank=0)
# Evaluation
if IS_MAIN:
# results is like {'top1': xxx, 'top5': xxx}
_log_dic = {'general/epoch': epoch}
results = imagenet_val(model, split=val_split, testloader=testloader)
_log_dic.update({'metric/plain_val_'+k: v for k,v in results.items()})
res_emas = torch.zeros(len(emas))
if emas is not None:
for ei, ema in enumerate(emas):
results = imagenet_val(ema.ema, split=val_split, testloader=testloader)
_log_dic.update({f'metric/ema{ei}_val_'+k: v for k,v in results.items()})
res_emas[ei] = results[metric]
# select best result among all emas
_idx = torch.argmax(res_emas)
cur_fitness = res_emas[_idx]
_save_model = emas[_idx].ema
best_decay = emas[_idx].final_decay
else:
cur_fitness = results[metric]
_save_model = model
best_decay = 0
# wandb log
wbrun.log(_log_dic, step=niter)
# Write evaluation results
res = s + '||' + '%10.4g' * 1 % (results[metric])
with open(log_dir / 'results.txt', 'a') as f:
f.write(res + '\n')
# save last checkpoint
checkpoint = {
'model' : _save_model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler' : scaler.state_dict(),
'epoch' : epoch,
metric : cur_fitness,
'best_decay': best_decay
}
torch.save(checkpoint, log_dir / 'last.pt')
# save best checkpoint
if cur_fitness > best_fitness:
best_fitness = cur_fitness
torch.save(checkpoint, log_dir / 'best.pt')
del checkpoint
# ----Epoch end
# ----Training end
if __name__ == '__main__':
train()
# from mycv.models.cls.resnet import resnet50
# model = resnet50(num_classes=1000)
# weights = torch.load('weights/resnet50-19c8e357.pth')
# model.load_state_dict(weights)
# model = model.cuda()
# model.eval()
# results = imagenet_val(model, img_size=224, batch_size=64, workers=4)
# print(results['top1'])
| 39.340369
| 93
| 0.576526
|
from mycv.utils.general import disable_multithreads
disable_multithreads()
import os
from pathlib import Path
import argparse
from tqdm import tqdm
import math
import torch
import torch.cuda.amp as amp
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parallel import DistributedDataParallel as DDP
import wandb
from mycv.utils.general import increment_dir
from mycv.utils.torch_utils import set_random_seeds, ModelEMA
from mycv.datasets.imagenet import ImageNetCls, imagenet_val
def cal_acc(p: torch.Tensor, labels: torch.LongTensor):
assert not p.requires_grad and p.device == labels.device
assert p.dim() == 2 and p.shape[0] == labels.shape[0]
_, p_cls = torch.max(p, dim=1)
tp = (p_cls == labels)
acc = tp.sum() / len(tp)
return acc
def train():
parser = argparse.ArgumentParser()
parser.add_argument('--project', type=str, default='imagenet')
parser.add_argument('--group', type=str, default='mini200')
parser.add_argument('--model', type=str, default='csp_s')
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--amp', type=bool, default=True)
parser.add_argument('--ema', type=bool, default=True)
parser.add_argument('--optimizer', type=str, default='SGD', choices=['Adam', 'SGD'])
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--metric', type=str, default='top1', choices=['top1'])
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--local_rank', type=int, default=-1, help='DDP arg, do not modify')
parser.add_argument('--wbmode', action='store_true')
cfg = parser.parse_args()
cfg.img_size = 224
cfg.input_norm = False
cfg.sync_bn = False
cfg.lr = 0.01
cfg.momentum = 0.9
cfg.weight_decay = 0.0001
cfg.nesterov = False
cfg.lrf = 0.2
cfg.lr_warmup_epochs = 1
cfg.ema_warmup_epochs = 4
IS_MAIN = (cfg.local_rank in [-1, 0])
metric: str = cfg.metric.lower()
epochs: int = cfg.epochs
local_rank: int = cfg.local_rank
world_size: int = int(os.environ.get('WORLD_SIZE', 1))
assert local_rank == int(os.environ.get('RANK', -1)), 'Only support single node'
assert cfg.batch_size % world_size == 0, 'batch_size must be multiple of device count'
batch_size: int = cfg.batch_size // world_size
if IS_MAIN:
print(cfg, '\n')
print('Batch size on each single GPU =', batch_size, '\n')
set_random_seeds(1)
torch.backends.cudnn.benchmark = True
assert torch.cuda.is_available()
if local_rank == -1:
device = torch.device(f'cuda:{cfg.device}')
else:
assert torch.cuda.device_count() > local_rank and torch.distributed.is_available()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method='env://', world_size=world_size, rank=local_rank
)
print(f'Local rank: {local_rank}, using device {device}:', 'device property:',
torch.cuda.get_device_properties(device))
if IS_MAIN:
print('Initializing Datasets and Dataloaders...')
if cfg.group == 'default':
train_split = 'train'
val_split = 'val'
cfg.num_class = 1000
elif cfg.group == 'mini200':
train_split = 'train200_600'
val_split = 'val200_600'
cfg.num_class = 200
else:
raise ValueError()
trainset = ImageNetCls(train_split, img_size=cfg.img_size, input_norm=cfg.input_norm)
sampler = torch.utils.data.distributed.DistributedSampler(
trainset, num_replicas=world_size, rank=local_rank, shuffle=True
) if local_rank != -1 else None
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=(sampler is None), sampler=sampler,
num_workers=cfg.workers, pin_memory=True
)
testloader = torch.utils.data.DataLoader(
ImageNetCls(split=val_split, img_size=cfg.img_size, input_norm=cfg.input_norm),
batch_size=batch_size, shuffle=False, num_workers=cfg.workers//2,
pin_memory=True, drop_last=False
)
if cfg.model == 'res50':
from mycv.models.cls.resnet import resnet50
model = resnet50(num_classes=cfg.num_class)
elif cfg.model == 'res101':
from mycv.models.cls.resnet import resnet101
model = resnet101(num_classes=cfg.num_class)
elif cfg.model.startswith('yolov5'):
from mycv.models.yolov5.cls import YOLOv5Cls
assert cfg.model[-1] in ['s', 'm', 'l']
model = YOLOv5Cls(model=cfg.model[-1], num_class=cfg.num_class)
elif cfg.model.startswith('csp'):
from mycv.models.yolov5.cls import CSP
assert cfg.model[-1] in ['s', 'm', 'l']
model = CSP(model=cfg.model[-1], num_class=cfg.num_class)
else:
raise NotImplementedError()
model = model.to(device)
loss_func = torch.nn.CrossEntropyLoss(reduction='mean')
pgb, pgw = [], []
for k, v in model.named_parameters():
if ('.bn' in k) or ('.bias' in k):
pgb.append(v)
else:
assert '.weight' in k
pgw.append(v)
parameters = [
{'params': pgb, 'lr': cfg.lr, 'weight_decay': 0.0},
{'params': pgw, 'lr': cfg.lr, 'weight_decay': cfg.weight_decay}
]
if IS_MAIN:
print('Parameter groups:', [len(pg['params']) for pg in parameters])
del pgb, pgw
if cfg.optimizer == 'SGD':
optimizer = torch.optim.SGD(parameters, lr=cfg.lr,
momentum=cfg.momentum, nesterov=cfg.nesterov)
elif cfg.optimizer == 'Adam':
optimizer = torch.optim.Adam(parameters, lr=cfg.lr)
else:
raise ValueError()
scaler = amp.GradScaler(enabled=cfg.amp)
log_parent = Path(f'runs/{cfg.project}')
wb_id = None
results = {metric: 0}
if cfg.resume:
run_name = cfg.resume
log_dir = log_parent / run_name
assert log_dir.is_dir()
checkpoint = torch.load(log_dir / 'last.pt')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
cur_fitness = best_fitness = checkpoint.get(metric, 0)
if IS_MAIN:
wb_id = open(log_dir / 'wandb_id.txt', 'r').read()
else:
run_name = increment_dir(dir_root=log_parent, name=cfg.model)
log_dir = log_parent / run_name
if IS_MAIN:
os.makedirs(log_dir, exist_ok=False)
print(str(model), file=open(log_dir / 'model.txt', 'w'))
start_epoch = 0
cur_fitness = best_fitness = 0
if IS_MAIN:
wbrun = wandb.init(project=cfg.project, group=cfg.group, name=run_name, config=cfg,
dir='runs/', resume='allow', id=wb_id, mode=cfg.wbmode)
cfg = wbrun.config
cfg.log_dir = log_dir
cfg.wandb_id = wbrun.id
if not (log_dir / 'wandb_id.txt').exists():
with open(log_dir / 'wandb_id.txt', 'w') as f:
f.write(wbrun.id)
else:
wbrun = None
def warmup_cosine(x):
warmup_iter = cfg.lr_warmup_epochs * len(trainloader)
if x < warmup_iter:
factor = x / warmup_iter
else:
_cur = x - warmup_iter + 1
_total = epochs * len(trainloader)
factor = cfg.lrf + 0.5 * (1 - cfg.lrf) * (1 + math.cos(_cur * math.pi / _total))
return factor
scheduler = LambdaLR(optimizer, lr_lambda=warmup_cosine, last_epoch=start_epoch - 1)
if local_rank != -1 and cfg.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if IS_MAIN and cfg.ema:
emas = [
ModelEMA(model, decay=0.99),
ModelEMA(model, decay=0.999),
ModelEMA(model, decay=0.9999)
]
for ema in emas:
ema.updates = start_epoch * len(trainloader)
ema.warmup = cfg.ema_warmup_epochs * len(trainloader)
else:
emas = None
if local_rank != -1:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
niter = s = None
for epoch in range(start_epoch, epochs):
model.train()
if local_rank != -1:
trainloader.sampler.set_epoch(epoch)
optimizer.zero_grad()
pbar = enumerate(trainloader)
train_loss, train_acc = 0.0, 0.0
if IS_MAIN:
pbar_title = ('%-10s' * 6) % (
'Epoch', 'GPU_mem', 'lr', 'tr_loss', 'tr_acc', metric
)
print('\n' + pbar_title)
pbar = tqdm(pbar, total=len(trainloader))
for i, (imgs, labels) in pbar:
imgs = imgs.to(device=device)
labels = labels.to(device=device)
with amp.autocast(enabled=cfg.amp):
p = model(imgs)
loss = loss_func(p, labels) * imgs.shape[0]
if local_rank != -1:
loss = loss * world_size
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if emas:
for ema in emas:
ema.update(model)
scheduler.step()
if IS_MAIN:
niter = epoch * len(trainloader) + i
cur_lr = optimizer.param_groups[0]['lr']
loss = loss.detach().cpu().item()
acc = cal_acc(p.detach(), labels)
train_loss = (train_loss*i + loss) / (i+1)
train_acc = (train_acc*i + acc) / (i+1)
mem = torch.cuda.max_memory_allocated(device) / 1e9
s = ('%-10s' * 2 + '%-10.4g' * 4) % (
f'{epoch}/{epochs-1}', f'{mem:.3g}G',
cur_lr, train_loss, 100*train_acc, 100*cur_fitness
)
pbar.set_description(s)
torch.cuda.reset_peak_memory_stats()
if niter % 100 == 0:
wbrun.log({
'general/lr': cur_lr,
'metric/train_loss': train_loss,
'metric/train_acc': train_acc,
'ema/n_updates': emas[0].updates if emas is not None else 0,
'ema0/decay': emas[0].get_decay() if emas is not None else 0,
'ema1/decay': emas[1].get_decay() if emas is not None else 0,
'ema2/decay': emas[2].get_decay() if emas is not None else 0,
}, step=niter)
if local_rank != -1:
model._sync_params_and_buffers(authoritative_rank=0)
if IS_MAIN:
_log_dic = {'general/epoch': epoch}
results = imagenet_val(model, split=val_split, testloader=testloader)
_log_dic.update({'metric/plain_val_'+k: v for k,v in results.items()})
res_emas = torch.zeros(len(emas))
if emas is not None:
for ei, ema in enumerate(emas):
results = imagenet_val(ema.ema, split=val_split, testloader=testloader)
_log_dic.update({f'metric/ema{ei}_val_'+k: v for k,v in results.items()})
res_emas[ei] = results[metric]
_idx = torch.argmax(res_emas)
cur_fitness = res_emas[_idx]
_save_model = emas[_idx].ema
best_decay = emas[_idx].final_decay
else:
cur_fitness = results[metric]
_save_model = model
best_decay = 0
wbrun.log(_log_dic, step=niter)
res = s + '||' + '%10.4g' * 1 % (results[metric])
with open(log_dir / 'results.txt', 'a') as f:
f.write(res + '\n')
checkpoint = {
'model' : _save_model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler' : scaler.state_dict(),
'epoch' : epoch,
metric : cur_fitness,
'best_decay': best_decay
}
torch.save(checkpoint, log_dir / 'last.pt')
if cur_fitness > best_fitness:
best_fitness = cur_fitness
torch.save(checkpoint, log_dir / 'best.pt')
del checkpoint
if __name__ == '__main__':
train()
| true
| true
|
f70bc4b73a9f5b613ca9d0d69fb70d4dd3db32df
| 13,451
|
py
|
Python
|
config/settings/base.py
|
CrazyMath/smcrm
|
7027026d450279d63b81147e49cc2be2be622550
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
CrazyMath/smcrm
|
7027026d450279d63b81147e49cc2be2be622550
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
CrazyMath/smcrm
|
7027026d450279d63b81147e49cc2be2be622550
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# smcrm/
APPS_DIR = ROOT_DIR / "smcrm"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"smcrm.users.apps.UsersConfig",
"smcrm.projects.apps.ProjectsConfig",
"smcrm.developers.apps.DevelopersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "smcrm.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"smcrm.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = False
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Konstantin Moiseenko""", "moiseenko.k.s@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "smcrm.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "smcrm.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
| 42.701587
| 100
| 0.647833
|
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
APPS_DIR = ROOT_DIR / "smcrm"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
env.read_env(str(ROOT_DIR / ".env"))
= env.bool("DJANGO_DEBUG", False)
TIME_ZONE = "UTC"
= "en-us"
= 1
= True
= True
= True
= [str(ROOT_DIR / "locale")]
= {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
= "config.urls"
= "config.wsgi.application"
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"smcrm.users.apps.UsersConfig",
"smcrm.projects.apps.ProjectsConfig",
"smcrm.developers.apps.DevelopersConfig",
]
= DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
= {"sites": "smcrm.contrib.sites.migrations"}
= [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
= "users.User"
= "users:redirect"
= "account_login"
= [
.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
= [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
= [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
= str(ROOT_DIR / "staticfiles")
= "/static/"
_DIR / "static")]
= [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
= str(APPS_DIR / "media")
= "/media/"
= [
{
mplate.backends.django.DjangoTemplates",
": [str(APPS_DIR / "templates")],
"OPTIONS": {
ders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
sors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"smcrm.utils.context_processors.settings_context",
],
},
}
]
= "django.forms.renderers.TemplatesSetting"
E_PACK = "bootstrap4"
= (str(APPS_DIR / "fixtures"),)
= False
= False
= True
= "DENY"
= env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
= 5
ADMIN_URL = "admin/"
= [("""Konstantin Moiseenko""", "moiseenko.k.s@gmail.com")]
= ADMINS
NG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
if USE_TZ:
= TIME_ZONE
v("CELERY_BROKER_URL")
LERY_BROKER_URL
json"]
son"
son"
ME_LIMIT = 5 * 60
ME_LIMIT = 60
HEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_ADAPTER = "smcrm.users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "smcrm.users.adapters.SocialAccountAdapter"
PS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
URLS_REGEX = r"^/api/.*$"
| true
| true
|
f70bc4c80a5039b58b01dee499a76cc5f8cb418b
| 1,693
|
py
|
Python
|
utils/imgs_getter.py
|
albertschr/wechat_robot_supported_blockchain
|
27b257bb9cfc491f0c6b8178a0fe0de9e92dd5c5
|
[
"MIT"
] | 7
|
2019-04-01T01:04:52.000Z
|
2019-04-30T09:09:30.000Z
|
utils/imgs_getter.py
|
albertschr/wechat_robot_supported_blockchain
|
27b257bb9cfc491f0c6b8178a0fe0de9e92dd5c5
|
[
"MIT"
] | 6
|
2019-03-09T03:17:02.000Z
|
2019-04-03T11:51:13.000Z
|
utils/imgs_getter.py
|
leeduckgo/wechat_robot_supported_blockchain
|
27b257bb9cfc491f0c6b8178a0fe0de9e92dd5c5
|
[
"MIT"
] | 6
|
2019-03-08T01:50:40.000Z
|
2019-03-22T02:06:02.000Z
|
# -*- coding: utf-8 -*-
"""
获取YCY图片
"""
import json
import os
import requests
from settings import PROJECT_PATH
class YCYImage(object):
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
# "Content-Type": "application/x-www-form-urlencoded",
}
def get_img(self):
"""获取100页的图片链接"""
url = "https://www.duitang.com/napi/blog/list/by_search/"
result = []
for page in range(0, 240, 24):
data = {
'kw': '杨超越',
'type': 'feed',
'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
'_type': '',
'start': str(page),
}
r = requests.get(url, headers=self.headers, params=data, verify=False)
d = json.loads(r.text)
if d.get('data').get('object_list'):
d = d['data']['object_list']
result.extend(d)
return result
def download_img_and_save(self, result):
"""下载图片并保存"""
if not result:
return
for index, d in enumerate(result):
r = requests.get(url=d['photo']['path'])
file_name = os.path.join(PROJECT_PATH, "pics", "ycy_{}.jpg".format(index))
with open(file_name, 'wb') as f:
f.write(r.content)
def run(self):
result = self.get_img()
self.download_img_and_save(result)
if __name__ == '__main__':
ycy = YCYImage()
ycy.run()
| 29.189655
| 158
| 0.546958
|
import json
import os
import requests
from settings import PROJECT_PATH
class YCYImage(object):
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
def get_img(self):
url = "https://www.duitang.com/napi/blog/list/by_search/"
result = []
for page in range(0, 240, 24):
data = {
'kw': '杨超越',
'type': 'feed',
'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
'_type': '',
'start': str(page),
}
r = requests.get(url, headers=self.headers, params=data, verify=False)
d = json.loads(r.text)
if d.get('data').get('object_list'):
d = d['data']['object_list']
result.extend(d)
return result
def download_img_and_save(self, result):
if not result:
return
for index, d in enumerate(result):
r = requests.get(url=d['photo']['path'])
file_name = os.path.join(PROJECT_PATH, "pics", "ycy_{}.jpg".format(index))
with open(file_name, 'wb') as f:
f.write(r.content)
def run(self):
result = self.get_img()
self.download_img_and_save(result)
if __name__ == '__main__':
ycy = YCYImage()
ycy.run()
| true
| true
|
f70bc60d5ef867aef041dfad4d51b7dc3f6c6c3a
| 4,772
|
py
|
Python
|
dace/transformation/testing.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
dace/transformation/testing.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
dace/transformation/testing.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import copy
from io import StringIO
import os
import sys
import traceback
from dace.sdfg import SDFG
from dace.transformation.optimizer import Optimizer
class TransformationTester(Optimizer):
""" An SDFG optimizer that consecutively applies available transformations
up to a fixed depth. """
def __init__(self,
sdfg: SDFG,
depth=1,
validate=True,
generate_code=True,
compile=False,
print_exception=True,
halt_on_exception=False):
""" Creates a new Transformation tester, which brute-forces applying the
available transformations up to a certain level.
:param sdfg: The SDFG to transform.
:param depth: The number of levels to run transformations. For
instance, depth=1 means to only run immediate
transformations, whereas depth=2 would run
transformations resulting from those transformations.
:param validate: If True, the SDFG is validated after applying.
:param generate_code: If True, the SDFG will generate code after
transformation.
:param compile: If True, the SDFG will be compiled after applying.
:param print_exception: If True, prints exception when it is raised.
:param halt_on_exception: If True, stops when a transformation
raises an exception.
"""
super().__init__(sdfg)
self.depth = depth
self.validate = validate
self.generate_code = generate_code
self.compile = compile
self.print_exception = print_exception
self.halt_on_exception = halt_on_exception
self.passed_tests = 0
self.failed_tests = 0
self.stdout = sys.stdout
self.stderr = sys.stderr
def _optimize_recursive(self, sdfg: SDFG, depth: int):
if depth == self.depth:
return
matches = list(self.get_pattern_matches(sdfg=sdfg))
# Apply each transformation
for match in matches:
# Copy the SDFG
new_sdfg: SDFG = copy.deepcopy(sdfg)
# Try to apply, handle any exception
try:
# Redirect outputs
output = StringIO()
sys.stdout = output
sys.stderr = output
print(' ' * depth,
type(match).__name__,
'- ',
end='',
file=self.stdout)
tsdfg: SDFG = new_sdfg.sdfg_list[match.sdfg_id]
match.apply(tsdfg)
sdfg.save(os.path.join('_dacegraphs', 'program.sdfg'))
# Validate
if self.validate:
new_sdfg.validate()
# Expand library nodes
new_sdfg.expand_library_nodes()
# Generate code
if self.generate_code:
new_sdfg.generate_code()
if self.compile:
compiled = new_sdfg.compile()
del compiled
print('PASS', file=self.stdout)
self.passed_tests += 1
# Recursively optimize as necessary
self._optimize_recursive(sdfg, depth + 1)
except: # Literally anything can happen here
print('FAIL', file=self.stdout)
self.failed_tests += 1
if self.halt_on_exception:
print(output.getvalue(), file=self.stderr)
raise
if self.print_exception:
print(output.getvalue(), file=self.stderr)
traceback.print_exc(file=self.stderr)
continue
finally:
# Restore redirected outputs
sys.stdout = self.stdout
sys.stderr = self.stderr
def optimize(self):
self._optimize_recursive(self.sdfg, 0)
if self.failed_tests > 0:
raise RuntimeError(
'%d / %d transformations passed' %
(self.passed_tests, self.passed_tests + self.failed_tests))
return self.sdfg
if __name__ == '__main__':
import dace
@dace.program
def example(A: dace.float32[2]):
A *= 2
sdfg = example.to_sdfg()
tt = TransformationTester(sdfg, 2, halt_on_exception=True)
tt.optimize()
print('SUMMARY: %d / %d tests passed' %
(tt.passed_tests, tt.passed_tests + tt.failed_tests))
| 34.085714
| 80
| 0.548617
|
import copy
from io import StringIO
import os
import sys
import traceback
from dace.sdfg import SDFG
from dace.transformation.optimizer import Optimizer
class TransformationTester(Optimizer):
def __init__(self,
sdfg: SDFG,
depth=1,
validate=True,
generate_code=True,
compile=False,
print_exception=True,
halt_on_exception=False):
super().__init__(sdfg)
self.depth = depth
self.validate = validate
self.generate_code = generate_code
self.compile = compile
self.print_exception = print_exception
self.halt_on_exception = halt_on_exception
self.passed_tests = 0
self.failed_tests = 0
self.stdout = sys.stdout
self.stderr = sys.stderr
def _optimize_recursive(self, sdfg: SDFG, depth: int):
if depth == self.depth:
return
matches = list(self.get_pattern_matches(sdfg=sdfg))
for match in matches:
new_sdfg: SDFG = copy.deepcopy(sdfg)
try:
output = StringIO()
sys.stdout = output
sys.stderr = output
print(' ' * depth,
type(match).__name__,
'- ',
end='',
file=self.stdout)
tsdfg: SDFG = new_sdfg.sdfg_list[match.sdfg_id]
match.apply(tsdfg)
sdfg.save(os.path.join('_dacegraphs', 'program.sdfg'))
if self.validate:
new_sdfg.validate()
new_sdfg.expand_library_nodes()
if self.generate_code:
new_sdfg.generate_code()
if self.compile:
compiled = new_sdfg.compile()
del compiled
print('PASS', file=self.stdout)
self.passed_tests += 1
self._optimize_recursive(sdfg, depth + 1)
except:
print('FAIL', file=self.stdout)
self.failed_tests += 1
if self.halt_on_exception:
print(output.getvalue(), file=self.stderr)
raise
if self.print_exception:
print(output.getvalue(), file=self.stderr)
traceback.print_exc(file=self.stderr)
continue
finally:
sys.stdout = self.stdout
sys.stderr = self.stderr
def optimize(self):
self._optimize_recursive(self.sdfg, 0)
if self.failed_tests > 0:
raise RuntimeError(
'%d / %d transformations passed' %
(self.passed_tests, self.passed_tests + self.failed_tests))
return self.sdfg
if __name__ == '__main__':
import dace
@dace.program
def example(A: dace.float32[2]):
A *= 2
sdfg = example.to_sdfg()
tt = TransformationTester(sdfg, 2, halt_on_exception=True)
tt.optimize()
print('SUMMARY: %d / %d tests passed' %
(tt.passed_tests, tt.passed_tests + tt.failed_tests))
| true
| true
|
f70bc898982ac2eebdf07a06cfac61453b208b2a
| 1,639
|
py
|
Python
|
snake/main/point.py
|
megh-khaire/SnakeAIs
|
1dbc76a47a3bb4651c426f04671ae8ae12079c97
|
[
"Apache-2.0"
] | null | null | null |
snake/main/point.py
|
megh-khaire/SnakeAIs
|
1dbc76a47a3bb4651c426f04671ae8ae12079c97
|
[
"Apache-2.0"
] | null | null | null |
snake/main/point.py
|
megh-khaire/SnakeAIs
|
1dbc76a47a3bb4651c426f04671ae8ae12079c97
|
[
"Apache-2.0"
] | null | null | null |
import pygame
from snake.resources.constants import BLOCK_SIZE, WIDTH, HEIGHT
from snake.resources.directions import Direction
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.f = 0
self.g = 0
self.h = 0
self.neighbors = []
self.origin = None
def __eq__(self, point):
return self.__class__ == point.__class__ and self.x == point.x and self.y == point.y
def plot(self, display, color):
'''Plots the point with given color and fixed size'''
pygame.draw.rect(display, color, pygame.Rect(self.x, self.y, BLOCK_SIZE, BLOCK_SIZE))
def get_direction(self):
'''Determine direction in which the snake moves based on initial position'''
if self.x == self.origin.x and self.y < self.origin.y:
return Direction.UP
elif self.x == self.origin.x and self.y > self.origin.y:
return Direction.DOWN
elif self.x < self.origin.x and self.y == self.origin.y:
return Direction.LEFT
elif self.x > self.origin.x and self.y == self.origin.y:
return Direction.RIGHT
def generate_neighbors(self):
'''Generates neighbors for point object'''
if self.x > 0:
self.neighbors.append(Point(self.x - BLOCK_SIZE, self.y))
if self.y > 0:
self.neighbors.append(Point(self.x, self.y - BLOCK_SIZE))
if self.x < WIDTH - BLOCK_SIZE:
self.neighbors.append(Point(self.x + BLOCK_SIZE, self.y))
if self.y < HEIGHT - BLOCK_SIZE:
self.neighbors.append(Point(self.x, self.y + BLOCK_SIZE))
| 37.25
| 93
| 0.611959
|
import pygame
from snake.resources.constants import BLOCK_SIZE, WIDTH, HEIGHT
from snake.resources.directions import Direction
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.f = 0
self.g = 0
self.h = 0
self.neighbors = []
self.origin = None
def __eq__(self, point):
return self.__class__ == point.__class__ and self.x == point.x and self.y == point.y
def plot(self, display, color):
pygame.draw.rect(display, color, pygame.Rect(self.x, self.y, BLOCK_SIZE, BLOCK_SIZE))
def get_direction(self):
if self.x == self.origin.x and self.y < self.origin.y:
return Direction.UP
elif self.x == self.origin.x and self.y > self.origin.y:
return Direction.DOWN
elif self.x < self.origin.x and self.y == self.origin.y:
return Direction.LEFT
elif self.x > self.origin.x and self.y == self.origin.y:
return Direction.RIGHT
def generate_neighbors(self):
if self.x > 0:
self.neighbors.append(Point(self.x - BLOCK_SIZE, self.y))
if self.y > 0:
self.neighbors.append(Point(self.x, self.y - BLOCK_SIZE))
if self.x < WIDTH - BLOCK_SIZE:
self.neighbors.append(Point(self.x + BLOCK_SIZE, self.y))
if self.y < HEIGHT - BLOCK_SIZE:
self.neighbors.append(Point(self.x, self.y + BLOCK_SIZE))
| true
| true
|
f70bc89ae28e0a1442364b6237ac83deeb24e3ef
| 3,508
|
py
|
Python
|
examples/01_modelling/plot_06_synthetic_4d.py
|
geophysics-ubonn/crtomo_tools
|
a01b4d31d7250bc729605ae4dc035f108168128e
|
[
"MIT"
] | 2
|
2021-03-05T14:30:20.000Z
|
2021-04-16T05:31:07.000Z
|
examples/01_modelling/plot_06_synthetic_4d.py
|
geophysics-ubonn/crtomo_tools
|
a01b4d31d7250bc729605ae4dc035f108168128e
|
[
"MIT"
] | 1
|
2019-06-06T12:22:26.000Z
|
2019-06-06T12:22:26.000Z
|
examples/01_modelling/plot_06_synthetic_4d.py
|
geophysics-ubonn/crtomo_tools
|
a01b4d31d7250bc729605ae4dc035f108168128e
|
[
"MIT"
] | 9
|
2019-02-22T12:17:50.000Z
|
2021-09-01T01:47:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generating a 4D synthetic data set with noise.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A 2D space, time and frequency data set is generated for testing purposes in
reda.
"""
###############################################################################
# imports
import os
from glob import glob
import numpy as np
import crtomo
import reda
###############################################################################
# Generate the forward models
frequencies = np.logspace(-3, 3, 5)
grid = crtomo.crt_grid(
'data_synthetic_4d/elem.dat', 'data_synthetic_4d/elec.dat'
)
# this context manager makes sure that all output is relative to the given
# directory
with reda.CreateEnterDirectory('output_synthetic_4d'):
for nr, anomaly_z_pos in enumerate(range(0, -10, -3)):
outdir = 'modV_{:02}'.format(nr)
if os.path.isdir(outdir):
continue
sinv = crtomo.eitMan(grid=grid, frequencies=frequencies)
sinv.add_homogeneous_model(100, 0)
sinv.set_area_to_single_colecole(
18, 22, anomaly_z_pos -2.0, anomaly_z_pos,
[100, 0.1, 0.04, 0.6]
)
r = sinv.plot_forward_models()
r['rmag']['fig'].savefig('forward_rmag_{:02}.pdf'.format(nr))
r['rpha']['fig'].savefig('forward_rpha_{:02}.pdf'.format(nr))
for f, td in sinv.tds.items():
td.configs.gen_dipole_dipole(skipc=0, nr_voltage_dipoles=40)
td.configs.gen_reciprocals(append=True)
r = sinv.measurements()
sinv.save_measurements_to_directory(outdir)
# plot pseudosections
Vdirs = sorted(glob('modV*'))
for nr, Vdir in enumerate(Vdirs):
seit = reda.sEIT()
seit.import_crtomo(Vdir)
seit.compute_K_analytical(spacing=1)
seit.plot_pseudosections(
'r', return_fig=True
).savefig('ps_r_{:02}.jpg'.format(nr), dpi=300)
seit.plot_pseudosections(
'rho_a', return_fig=True
).savefig('ps_rho_a_{:02}.jpg'.format(nr), dpi=300)
seit.plot_pseudosections(
'rpha', return_fig=True
).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)
###############################################################################
# now generate noisy data
# this context manager makes sure that all output is relative to the given
# directory
with reda.CreateEnterDirectory('output_synthetic_4d'):
Vdirs = sorted(glob('modV*'))
for nr, Vdir in enumerate(Vdirs):
seit = reda.sEIT()
seit.import_crtomo(Vdir)
seit.compute_K_analytical(spacing=1)
# use different seeds for different time steps
np.random.seed(34 + nr)
noise = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])
r_save = seit.data['r'].values.copy()
seit.data['r'] = r_save + noise * r_save / 8000.0 * np.log(seit.data['k'])
seit.data['rho_a'] = seit.data['r'] * seit.data['k']
seit.plot_pseudosections(
'rho_a', return_fig=True
).savefig('noisy_ps_rho_a_{:02}.jpg'.format(nr), dpi=300)
rpha_save = seit.data['rpha'].values.copy()
noise_rpha = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])
seit.data['rpha'] = rpha_save + noise_rpha * rpha_save / 10.0
seit.plot_pseudosections(
'rpha', return_fig=True
).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)
seit.export_to_crtomo_multi_frequency(Vdir + '_noisy')
| 37.319149
| 82
| 0.586374
| true
| true
|
|
f70bc95dea3ed52a2d7c9b3d4c50969f525e91e1
| 619
|
py
|
Python
|
test/test_add_contact.py
|
Atush/py_learning
|
2c25151882eb0fc8864fd868cf20d04311d2bac7
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
Atush/py_learning
|
2c25151882eb0fc8864fd868cf20d04311d2bac7
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
Atush/py_learning
|
2c25151882eb0fc8864fd868cf20d04311d2bac7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, json_contacts, db, check_ui):
contact=json_contacts
app.open_home_page()
old_contacts = db.get_contact_list()
app.contact.create(contact)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key = Contact.id_con_max) == sorted(new_contacts, key = Contact.id_con_max)
if check_ui:
assert sorted(old_contacts, key=Contact.id_con_max) == sorted(app.group.get_contact_list(), key=Contact.id_con_max)
| 38.6875
| 123
| 0.73021
|
from model.contact import Contact
def test_add_contact(app, json_contacts, db, check_ui):
contact=json_contacts
app.open_home_page()
old_contacts = db.get_contact_list()
app.contact.create(contact)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key = Contact.id_con_max) == sorted(new_contacts, key = Contact.id_con_max)
if check_ui:
assert sorted(old_contacts, key=Contact.id_con_max) == sorted(app.group.get_contact_list(), key=Contact.id_con_max)
| true
| true
|
f70bcacb9fbd7c0eef2a877d9d13e34b3353d545
| 6,654
|
py
|
Python
|
src/models/model_evaluate.py
|
singh-karanpal/Capstone
|
807ca3f70276a0dd17244a123a759a914d358424
|
[
"MIT"
] | null | null | null |
src/models/model_evaluate.py
|
singh-karanpal/Capstone
|
807ca3f70276a0dd17244a123a759a914d358424
|
[
"MIT"
] | null | null | null |
src/models/model_evaluate.py
|
singh-karanpal/Capstone
|
807ca3f70276a0dd17244a123a759a914d358424
|
[
"MIT"
] | null | null | null |
# author: Carlina Kim, Karanpal Singh, Sukriti Trehan, Victor Cuspinera
# date: 2020-06-21
'''This script will read the saved theme/subtheme model(s), padded validation sets and y validation sets for model evaluation,
and will save the evaluation results in the specified directory.
There are 2 parameters Input Path and Output Path where you want to save the evaluation results.
Usage: model_evaluate.py --level='theme' --output_dir=<destination_dir_path>
Example:
python src/models/model_evaluate.py --level='theme' --output_dir=reports/
python src/models/model_evaluate.py --level='subtheme' --output_dir=reports/
Options:
--input_dir=<input_dir_path> Directory name for the padded documents and embeddings
--output_dir=<destination_dir_path> Directory for saving evaluated results
'''
import pandas as pd
import numpy as np
from docopt import docopt
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, precision_recall_curve
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
opt = docopt(__doc__)
print("\n-----START: model_evaluate.py-----\n")
def main(level, output_dir):
"""
Takes the input level and calls model_evaluate class with
output_dir as argument
"""
me = model_evaluate()
me.get_evaluations(level=level, output_dir=output_dir)
print('Thanks for your patience, the evaluation process has finished!\n')
print('----END: model_evaluate.py----\n')
return
class model_evaluate:
# Loads data and evaluates saved theme model and subtheme models on validation set
def eval_metrics(self, model_name, x_valid, y_valid, level='theme'):
"""
Evaluates model results on different threshold levels and produces data table/
precision recall curves
Parameters
-----------
model_name: (TensforFlow Saved model)
x_valid: (pandas dataframe) dataframe with validation comments
y_valid: (numpy array) array with labels
level: (string) Takes value 'theme' or 'subtheme' to evaluate accordingly
Returns
-------
Pandas DataFrame or matplotlib plot
dataframe with evaluation metrics including precision, recall, f1 score at
different threshold values
"""
pred_values = model_name.predict(x_valid)
if level == 'theme':
precision_dict = dict()
recall_dict = dict()
thresh_dict = dict()
precision_dict["BiGRU + Fasttext"], recall_dict["BiGRU + Fasttext"], thresh_dict["BiGRU + Fasttext"] = precision_recall_curve(y_valid.ravel(), pred_values.ravel())
labels = []
labels = list(precision_dict.keys())
plt.figure()
plt.step(recall_dict['BiGRU + Fasttext'], precision_dict['BiGRU + Fasttext'], where='post', color='orange')
plt.xlabel('Recall', fontsize=18)
plt.ylabel('Precision', fontsize=18)
plt.axhline(y=0.743643, xmin=0, xmax=0.71, ls='--', color="cornflowerblue")
plt.axvline(x=0.705382, ymin=0, ymax=0.71, ls='--', color="cornflowerblue")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(labels, loc=(1.01, .79), prop=dict(size=14))
plt.title('Precision Recall Curves for best performing model', fontsize = 18)
plt.savefig('reports/figures/pr_curve_valid_theme.png')
# PRECISION & RECALL
predictions_results = []
thresholds=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for val in thresholds:
pred=pred_values.copy()
pred[pred>=val]=1
pred[pred<val]=0
accuracy = accuracy_score(y_valid, pred, normalize=True, sample_weight=None)
precision = precision_score(y_valid, pred, average='micro')
recall = recall_score(y_valid, pred, average='micro')
f1 = f1_score(y_valid, pred, average='micro')
case= {'Threshold': val,
'Accuracy': accuracy,
'Precision': precision,
'Recall': recall,
'F1-measure': f1}
predictions_results.append(case)
return pd.DataFrame(predictions_results)
def get_evaluations(self, level, output_dir):
"""
Evaluates models by using eval_metrics function
"""
if level == 'theme':
print("**Loading data**")
x_valid = np.load('data/interim/question1_models/advance/X_valid_padded.npy')
y_valid = np.load('data/interim/question1_models/advance/y_valid.npy')
print("**Loading the saved theme model**")
model = tf.keras.models.load_model('models/Theme_Model/theme_model')
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid)
print("**Saving results**")
results.to_csv(output_dir + '/tables/theme_tables/theme_valid_eval.csv')
print("Evaluations saved to reports/")
else:
print("Loading data and evaluating the subthemes model on validation set")
themes = ['CPD', 'CB', 'EWC', 'Exec', 'FWE',
'SP', 'RE', 'Sup', 'SW', 'TEPE', 'VMG', 'OTH']
for label in themes:
print("****Label:", label, "****")
print("**Loading data**")
x_valid = np.load('data/interim/subthemes/' + str(label) + '/X_valid_padded.npy')
# self.x_valids.append(x_valid)
y_valid = np.load('data/interim/subthemes/' + str(label) + '/y_valid.npy')
# self.y_valids.append(y_valid)
print("**Loading the saved subtheme model**")
model = tf.keras.models.load_model('models/Subtheme_Models/' + str(label).lower() + '_model')
# self.models.append(model)
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid, level = 'subtheme')
print("**Saving results**")
results.to_csv(output_dir + '/tables/subtheme_tables' + str(label).lower() + '_valid_eval.csv')
print("Process of subtheme", label, "model completed\n")
print("Evaluations saved to reports/tables")
if __name__ == "__main__":
main(opt["--level"], opt["--output_dir"])
| 42.653846
| 175
| 0.625338
|
import pandas as pd
import numpy as np
from docopt import docopt
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, precision_recall_curve
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
opt = docopt(__doc__)
print("\n-----START: model_evaluate.py-----\n")
def main(level, output_dir):
me = model_evaluate()
me.get_evaluations(level=level, output_dir=output_dir)
print('Thanks for your patience, the evaluation process has finished!\n')
print('----END: model_evaluate.py----\n')
return
class model_evaluate:
def eval_metrics(self, model_name, x_valid, y_valid, level='theme'):
pred_values = model_name.predict(x_valid)
if level == 'theme':
precision_dict = dict()
recall_dict = dict()
thresh_dict = dict()
precision_dict["BiGRU + Fasttext"], recall_dict["BiGRU + Fasttext"], thresh_dict["BiGRU + Fasttext"] = precision_recall_curve(y_valid.ravel(), pred_values.ravel())
labels = []
labels = list(precision_dict.keys())
plt.figure()
plt.step(recall_dict['BiGRU + Fasttext'], precision_dict['BiGRU + Fasttext'], where='post', color='orange')
plt.xlabel('Recall', fontsize=18)
plt.ylabel('Precision', fontsize=18)
plt.axhline(y=0.743643, xmin=0, xmax=0.71, ls='--', color="cornflowerblue")
plt.axvline(x=0.705382, ymin=0, ymax=0.71, ls='--', color="cornflowerblue")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(labels, loc=(1.01, .79), prop=dict(size=14))
plt.title('Precision Recall Curves for best performing model', fontsize = 18)
plt.savefig('reports/figures/pr_curve_valid_theme.png')
predictions_results = []
thresholds=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for val in thresholds:
pred=pred_values.copy()
pred[pred>=val]=1
pred[pred<val]=0
accuracy = accuracy_score(y_valid, pred, normalize=True, sample_weight=None)
precision = precision_score(y_valid, pred, average='micro')
recall = recall_score(y_valid, pred, average='micro')
f1 = f1_score(y_valid, pred, average='micro')
case= {'Threshold': val,
'Accuracy': accuracy,
'Precision': precision,
'Recall': recall,
'F1-measure': f1}
predictions_results.append(case)
return pd.DataFrame(predictions_results)
def get_evaluations(self, level, output_dir):
if level == 'theme':
print("**Loading data**")
x_valid = np.load('data/interim/question1_models/advance/X_valid_padded.npy')
y_valid = np.load('data/interim/question1_models/advance/y_valid.npy')
print("**Loading the saved theme model**")
model = tf.keras.models.load_model('models/Theme_Model/theme_model')
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid)
print("**Saving results**")
results.to_csv(output_dir + '/tables/theme_tables/theme_valid_eval.csv')
print("Evaluations saved to reports/")
else:
print("Loading data and evaluating the subthemes model on validation set")
themes = ['CPD', 'CB', 'EWC', 'Exec', 'FWE',
'SP', 'RE', 'Sup', 'SW', 'TEPE', 'VMG', 'OTH']
for label in themes:
print("****Label:", label, "****")
print("**Loading data**")
x_valid = np.load('data/interim/subthemes/' + str(label) + '/X_valid_padded.npy')
y_valid = np.load('data/interim/subthemes/' + str(label) + '/y_valid.npy')
print("**Loading the saved subtheme model**")
model = tf.keras.models.load_model('models/Subtheme_Models/' + str(label).lower() + '_model')
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid, level = 'subtheme')
print("**Saving results**")
results.to_csv(output_dir + '/tables/subtheme_tables' + str(label).lower() + '_valid_eval.csv')
print("Process of subtheme", label, "model completed\n")
print("Evaluations saved to reports/tables")
if __name__ == "__main__":
main(opt["--level"], opt["--output_dir"])
| true
| true
|
f70bccb51593edf285f53782a14711199b469634
| 6,083
|
py
|
Python
|
applications/graph/node2vec/randproj.py
|
aj-prime/lbann
|
a4cf81386b3f43586057b5312192e180b1259add
|
[
"Apache-2.0"
] | null | null | null |
applications/graph/node2vec/randproj.py
|
aj-prime/lbann
|
a4cf81386b3f43586057b5312192e180b1259add
|
[
"Apache-2.0"
] | 5
|
2021-07-15T20:51:21.000Z
|
2022-01-01T03:18:05.000Z
|
applications/graph/node2vec/randproj.py
|
aj-prime/lbann
|
a4cf81386b3f43586057b5312192e180b1259add
|
[
"Apache-2.0"
] | null | null | null |
"""Learn embedding weights with LBANN."""
import argparse
import os.path
import numpy as np
import lbann
import lbann.contrib.launcher
import lbann.contrib.args
import data.data_readers
import model.random_projection
import utils
import utils.graph
import utils.snap
root_dir = os.path.dirname(os.path.realpath(__file__))
# ----------------------------------
# Options
# ----------------------------------
# Command-line arguments
parser = argparse.ArgumentParser()
lbann.contrib.args.add_scheduler_arguments(parser)
parser.add_argument(
'--job-name', action='store', default='lbann_node2vec', type=str,
help='job name', metavar='NAME')
parser.add_argument(
'--graph', action='store', default='youtube', type=str,
help='graph name (see utils.snap.download_graph) or edgelist file',
metavar='NAME')
parser.add_argument(
'--mini-batch-size', action='store', default=256, type=int,
help='mini-batch size (default: 256)', metavar='NUM')
parser.add_argument(
'--num-iterations', action='store', default=1000, type=int,
help='number of epochs (default: 1000)', metavar='NUM')
parser.add_argument(
'--proj_dim', action='store', default=1024, type=int,
help='projection space dimensions (default: 10000)', metavar='NUM')
parser.add_argument(
'--latent-dim', action='store', default=128, type=int,
help='latent space dimensions (default: 128)', metavar='NUM')
parser.add_argument(
'--learning-rate', action='store', default=-1, type=float,
help='learning rate (default: 0.25*mbsize)', metavar='VAL')
parser.add_argument(
'--work-dir', action='store', default=None, type=str,
help='working directory', metavar='DIR')
parser.add_argument(
'--batch-job', action='store_true',
help='submit as batch job')
parser.add_argument(
'--offline-walks', action='store_true',
help='perform random walks offline')
args = parser.parse_args()
# Default learning rate
# Note: Learning rate in original word2vec is 0.025
if args.learning_rate < 0:
args.learning_rate = 0.25 * args.mini_batch_size
# Random walk options
epoch_size = 100 * args.mini_batch_size
walk_length = 100
return_param = 0.25
inout_param = 0.25
num_negative_samples = 0
# ----------------------------------
# Create data reader
# ----------------------------------
# Download graph if needed
if os.path.exists(args.graph):
graph_file = args.graph
else:
graph_file = utils.snap.download_graph(args.graph)
# Construct data reader
if args.offline_walks:
# Note: Graph and walk parameters are fully specified in module
# for offline walks
import data.offline_walks
graph_file = data.offline_walks.graph_file
epoch_size = data.offline_walks.num_samples()
walk_length = data.offline_walks.walk_length
return_param = data.offline_walks.return_param
inout_param = data.offline_walks.inout_param
num_negative_samples = data.offline_walks.num_negative_samples
reader = data.data_readers.make_offline_data_reader()
else:
# Note: Preprocess graph with HavoqGT and store in shared memory
# before starting LBANN.
distributed_graph_file = '/dev/shm/graph'
reader = data.data_readers.make_online_data_reader(
graph_file=distributed_graph_file,
epoch_size=epoch_size,
walk_length=walk_length,
return_param=return_param,
inout_param=inout_param,
num_negative_samples=num_negative_samples,
)
sample_size = num_negative_samples + walk_length
# Parse graph file to get number of vertices
num_vertices = utils.graph.max_vertex_index(graph_file) + 1
# ----------------------------------
# Construct layer graph
# ----------------------------------
obj = []
metrics = []
# Autoencoder
# Note: Input is sequence of vertex IDs
input_ = lbann.Identity(lbann.Input())
proj = model.random_projection.random_projection(
input_,
sample_size,
args.proj_dim,
)
autoencoder = model.random_projection.ChannelwiseFullyConnectedAutoencoder(
args.proj_dim,
args.latent_dim,
[],
)
proj_recon = autoencoder(proj)
# Mean square error loss
scale_decay = 0.5
loss = model.random_projection.mean_squared_error(
data_dim=args.proj_dim,
sequence_length=walk_length,
source_sequence=proj_recon,
target_sequence=proj,
scale_decay=scale_decay,
)
obj.append(loss)
# ----------------------------------
# Run LBANN
# ----------------------------------
# Create optimizer
opt = lbann.SGD(learn_rate=args.learning_rate)
# Create LBANN objects
iterations_per_epoch = utils.ceildiv(epoch_size, args.mini_batch_size)
num_epochs = utils.ceildiv(args.num_iterations, iterations_per_epoch)
trainer = lbann.Trainer(
mini_batch_size=args.mini_batch_size,
num_parallel_readers=0,
)
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackDumpWeights(directory='weights',
epoch_interval=num_epochs),
]
model = lbann.Model(
num_epochs,
layers=lbann.traverse_layer_graph(input_),
objective_function=obj,
metrics=metrics,
callbacks=callbacks,
)
# Create batch script
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
script = lbann.contrib.launcher.make_batch_script(
job_name=args.job_name,
work_dir=args.work_dir,
**kwargs,
)
# Preprocess graph data with HavoqGT if needed
if not args.offline_walks:
ingest_graph_exe = os.path.join(
root_dir,
'build',
'havoqgt',
'src',
'ingest_edge_list',
)
script.add_parallel_command([
ingest_graph_exe,
f'-o {distributed_graph_file}',
f'-d {2**30}',
'-u 1',
graph_file,
])
# LBANN invocation
prototext_file = os.path.join(script.work_dir, 'experiment.prototext')
lbann.proto.save_prototext(
prototext_file,
trainer=trainer,
model=model,
data_reader=reader,
optimizer=opt,
)
script.add_parallel_command([
lbann.lbann_exe(),
f'--prototext={prototext_file}',
f'--num_io_threads=1',
])
# Run LBANN
if args.batch_job:
script.submit(True)
else:
script.run(True)
| 28.293023
| 75
| 0.687161
|
import argparse
import os.path
import numpy as np
import lbann
import lbann.contrib.launcher
import lbann.contrib.args
import data.data_readers
import model.random_projection
import utils
import utils.graph
import utils.snap
root_dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
lbann.contrib.args.add_scheduler_arguments(parser)
parser.add_argument(
'--job-name', action='store', default='lbann_node2vec', type=str,
help='job name', metavar='NAME')
parser.add_argument(
'--graph', action='store', default='youtube', type=str,
help='graph name (see utils.snap.download_graph) or edgelist file',
metavar='NAME')
parser.add_argument(
'--mini-batch-size', action='store', default=256, type=int,
help='mini-batch size (default: 256)', metavar='NUM')
parser.add_argument(
'--num-iterations', action='store', default=1000, type=int,
help='number of epochs (default: 1000)', metavar='NUM')
parser.add_argument(
'--proj_dim', action='store', default=1024, type=int,
help='projection space dimensions (default: 10000)', metavar='NUM')
parser.add_argument(
'--latent-dim', action='store', default=128, type=int,
help='latent space dimensions (default: 128)', metavar='NUM')
parser.add_argument(
'--learning-rate', action='store', default=-1, type=float,
help='learning rate (default: 0.25*mbsize)', metavar='VAL')
parser.add_argument(
'--work-dir', action='store', default=None, type=str,
help='working directory', metavar='DIR')
parser.add_argument(
'--batch-job', action='store_true',
help='submit as batch job')
parser.add_argument(
'--offline-walks', action='store_true',
help='perform random walks offline')
args = parser.parse_args()
if args.learning_rate < 0:
args.learning_rate = 0.25 * args.mini_batch_size
epoch_size = 100 * args.mini_batch_size
walk_length = 100
return_param = 0.25
inout_param = 0.25
num_negative_samples = 0
if os.path.exists(args.graph):
graph_file = args.graph
else:
graph_file = utils.snap.download_graph(args.graph)
if args.offline_walks:
import data.offline_walks
graph_file = data.offline_walks.graph_file
epoch_size = data.offline_walks.num_samples()
walk_length = data.offline_walks.walk_length
return_param = data.offline_walks.return_param
inout_param = data.offline_walks.inout_param
num_negative_samples = data.offline_walks.num_negative_samples
reader = data.data_readers.make_offline_data_reader()
else:
distributed_graph_file = '/dev/shm/graph'
reader = data.data_readers.make_online_data_reader(
graph_file=distributed_graph_file,
epoch_size=epoch_size,
walk_length=walk_length,
return_param=return_param,
inout_param=inout_param,
num_negative_samples=num_negative_samples,
)
sample_size = num_negative_samples + walk_length
num_vertices = utils.graph.max_vertex_index(graph_file) + 1
obj = []
metrics = []
input_ = lbann.Identity(lbann.Input())
proj = model.random_projection.random_projection(
input_,
sample_size,
args.proj_dim,
)
autoencoder = model.random_projection.ChannelwiseFullyConnectedAutoencoder(
args.proj_dim,
args.latent_dim,
[],
)
proj_recon = autoencoder(proj)
scale_decay = 0.5
loss = model.random_projection.mean_squared_error(
data_dim=args.proj_dim,
sequence_length=walk_length,
source_sequence=proj_recon,
target_sequence=proj,
scale_decay=scale_decay,
)
obj.append(loss)
opt = lbann.SGD(learn_rate=args.learning_rate)
iterations_per_epoch = utils.ceildiv(epoch_size, args.mini_batch_size)
num_epochs = utils.ceildiv(args.num_iterations, iterations_per_epoch)
trainer = lbann.Trainer(
mini_batch_size=args.mini_batch_size,
num_parallel_readers=0,
)
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackDumpWeights(directory='weights',
epoch_interval=num_epochs),
]
model = lbann.Model(
num_epochs,
layers=lbann.traverse_layer_graph(input_),
objective_function=obj,
metrics=metrics,
callbacks=callbacks,
)
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
script = lbann.contrib.launcher.make_batch_script(
job_name=args.job_name,
work_dir=args.work_dir,
**kwargs,
)
if not args.offline_walks:
ingest_graph_exe = os.path.join(
root_dir,
'build',
'havoqgt',
'src',
'ingest_edge_list',
)
script.add_parallel_command([
ingest_graph_exe,
f'-o {distributed_graph_file}',
f'-d {2**30}',
'-u 1',
graph_file,
])
prototext_file = os.path.join(script.work_dir, 'experiment.prototext')
lbann.proto.save_prototext(
prototext_file,
trainer=trainer,
model=model,
data_reader=reader,
optimizer=opt,
)
script.add_parallel_command([
lbann.lbann_exe(),
f'--prototext={prototext_file}',
f'--num_io_threads=1',
])
if args.batch_job:
script.submit(True)
else:
script.run(True)
| true
| true
|
f70bcdde6c2b7e2b286a382114dbe03ea56ebf57
| 3,005
|
py
|
Python
|
tests/lib/bes/testing/framework/test_unit_test_inspect.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/testing/framework/test_unit_test_inspect.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/testing/framework/test_unit_test_inspect.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os.path as path
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.testing.unit_test import unit_test
from bes.testing.framework import unit_test_inspect as UTI
from bes.testing.unit_test_skip import raise_skip
class test_unit_test_inspect(unit_test):
@classmethod
def setUpClass(clazz):
raise_skip('broken')
def test_inspect_file(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_not_unit_test(self):
content = '''
class test_apple_fixture(object):
def test_foo(self):
pass
def test_bar(self):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [], UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_disbled(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def xtest_foo(self):
self.assertEqual( 6, 3 + 3 )
def xtest_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def doesnt_work_test_inspect_file_TestCase_subclass(self):
content = '''
import unittest
class unit_super(unittest.TestCase):
_x = 5
class test_apple_fixture(unit_super):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
class somthing(unittest.TestCase):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_unit_test(self):
content = '''
from bes.testing.unit_test import unit_test
class test_apple_fixture(unit_test):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
if __name__ == '__main__':
unit_test.main()
| 26.130435
| 90
| 0.671215
|
import os.path as path
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.testing.unit_test import unit_test
from bes.testing.framework import unit_test_inspect as UTI
from bes.testing.unit_test_skip import raise_skip
class test_unit_test_inspect(unit_test):
@classmethod
def setUpClass(clazz):
raise_skip('broken')
def test_inspect_file(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_not_unit_test(self):
content = '''
class test_apple_fixture(object):
def test_foo(self):
pass
def test_bar(self):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [], UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_disbled(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def xtest_foo(self):
self.assertEqual( 6, 3 + 3 )
def xtest_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def doesnt_work_test_inspect_file_TestCase_subclass(self):
content = '''
import unittest
class unit_super(unittest.TestCase):
_x = 5
class test_apple_fixture(unit_super):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
class somthing(unittest.TestCase):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_unit_test(self):
content = '''
from bes.testing.unit_test import unit_test
class test_apple_fixture(unit_test):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
if __name__ == '__main__':
unit_test.main()
| true
| true
|
f70bcdea0c2a415ada92517635d009ce45a9da67
| 840
|
py
|
Python
|
vcx/wrappers/python3/aries-test-server/inviter.py
|
sklump/indy-sdk
|
ee05a89ddf60b42f7483bebf2d89a936e12730df
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
vcx/wrappers/python3/aries-test-server/inviter.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
vcx/wrappers/python3/aries-test-server/inviter.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
import json
from vcx.api.connection import Connection
from utils import init_vcx, run_coroutine_in_new_loop
from connection import BaseConnection
class Inviter(BaseConnection):
async def start(self):
await init_vcx()
print("Create a connection to alice and print out the invite details")
connection_ = await Connection.create('alice')
await connection_.connect('{"use_public_did": true}')
await connection_.update_state()
details = await connection_.invite_details(False)
print("**invite details**")
print(json.dumps(details))
print("******************")
self.connection_data = await connection_.serialize()
connection_.release()
return json.dumps(details)
def connect(self):
run_coroutine_in_new_loop(self.update_state)
| 30
| 78
| 0.679762
|
import json
from vcx.api.connection import Connection
from utils import init_vcx, run_coroutine_in_new_loop
from connection import BaseConnection
class Inviter(BaseConnection):
async def start(self):
await init_vcx()
print("Create a connection to alice and print out the invite details")
connection_ = await Connection.create('alice')
await connection_.connect('{"use_public_did": true}')
await connection_.update_state()
details = await connection_.invite_details(False)
print("**invite details**")
print(json.dumps(details))
print("******************")
self.connection_data = await connection_.serialize()
connection_.release()
return json.dumps(details)
def connect(self):
run_coroutine_in_new_loop(self.update_state)
| true
| true
|
f70bce271d5d7f26a676fc36c142470dd67601e0
| 5,593
|
py
|
Python
|
server.py
|
pgneditor/pgneditor
|
676334e9325a6d48ac6367d35a03fedf44ec2be9
|
[
"MIT"
] | 2
|
2019-07-15T00:52:13.000Z
|
2019-08-04T07:46:56.000Z
|
server.py
|
pgneditor/pgneditor
|
676334e9325a6d48ac6367d35a03fedf44ec2be9
|
[
"MIT"
] | 2
|
2021-02-08T20:48:35.000Z
|
2021-06-01T23:45:13.000Z
|
server.py
|
pgneditor/pgneditor
|
676334e9325a6d48ac6367d35a03fedf44ec2be9
|
[
"MIT"
] | 1
|
2019-08-04T07:47:04.000Z
|
2019-08-04T07:47:04.000Z
|
###################################################################
import logging
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os.path
import uuid
from os import environ
import json
from tornado.options import define, options
import mimetypes
import random
from tornadose.handlers import EventSource
from tornadose.stores import DataStore
###################################################################
import serverlogic
from utils.file import read_string_from_file
###################################################################
teststore = DataStore()
###################################################################
define("port", default=environ.get("PORT", 5000), help="run on the given port", type=int)
###################################################################
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/gif.worker.js", GifWorker),
(r"/static/.*", MyStaticFileHandler),
(r"/jsonapi", JsonApi),
(r"/importstudy/.*", ImportStudy),
(r"/test", Test),
(r"/docs/.*", Docs),
(r"/chatsocket", ChatSocketHandler),
(r"/testevents", EventSource, {'store': teststore}),
(r"/enginelog", EventSource, {'store': serverlogic.mainenginelog.datastore})
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
#static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False,
)
super(Application, self).__init__(handlers, **settings)
class GifWorker(tornado.web.RequestHandler):
def get(self):
with open("static/js/gif.worker.js", 'rb') as f:
data = f.read()
self.write(data)
class MyStaticFileHandler(tornado.web.RequestHandler):
def get(self):
path = self.request.path
filepath = path[1:]
if not os.path.isfile(filepath):
self.set_status(404)
return
mimetype = mimetypes.guess_type(path)
if mimetype[0]:
self.set_header("Content-Type", mimetype[0])
with open(filepath, 'rb') as f:
data = f.read()
self.write(data)
class MainHandler(tornado.web.RequestHandler):
def get(self):
#print(self.request.__dict__)
self.render("index.html", messages=ChatSocketHandler.cache)
class JsonApi(tornado.web.RequestHandler):
def post(self):
reqobj = json.loads(self.request.body.decode('utf-8'))
resobj = serverlogic.jsonapi(reqobj)
self.set_header("Content-Type", "application/json")
self.write(json.dumps(resobj))
class ImportStudy(tornado.web.RequestHandler):
def get(self):
path = self.request.path
parts = path.split("/")
paramindex = parts.index("importstudy") + 1
if ( len(parts) - paramindex ) < 2:
self.write("too few parameters, usage: /importstudy/[usercode]/[studyid]")
return
usercode = parts[paramindex]
studyid = parts[paramindex + 1]
nodeid = "root"
if ( paramindex + 2 ) < len(parts):
nodeid = parts[paramindex + 2]
self.redirect(f"/?task=importstudy&usercode={usercode}&studyid={studyid}&nodeid={nodeid}&tab=board&boardtab=tree")
class Test(tornado.web.RequestHandler):
def get(self):
self.write(read_string_from_file("templates/test.html", "test"))
class Docs(tornado.web.RequestHandler):
def get(self):
path = self.request.path
parts = path.split("/")
self.write(read_string_from_file("docs/" + parts[2] + ".md", "Pgn Editor."))
class ChatSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
cache = []
cache_size = 200
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
ChatSocketHandler.waiters.add(self)
def on_close(self):
ChatSocketHandler.waiters.remove(self)
@classmethod
def update_cache(cls, chat):
cls.cache.append(chat)
if len(cls.cache) > cls.cache_size:
cls.cache = cls.cache[-cls.cache_size :]
@classmethod
def send_updates(cls, chat):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, message):
logging.info("got message %r", message)
parsed = tornado.escape.json_decode(message)
chat = {"id": str(uuid.uuid4()), "body": parsed["body"]}
chat["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=chat)
)
ChatSocketHandler.update_cache(chat)
ChatSocketHandler.send_updates(chat)
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.PeriodicCallback(lambda: teststore.submit(random.random()), 1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
###################################################################
| 33.491018
| 122
| 0.576971
| true
| true
|
|
f70bcec89189e9db37ce50fe1f17d8e2b524dad4
| 3,994
|
py
|
Python
|
nova/tests/functional/regressions/test_bug_1689692.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/regressions/test_bug_1689692.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | 2
|
2015-02-03T06:25:24.000Z
|
2015-02-04T10:10:36.000Z
|
nova/tests/functional/regressions/test_bug_1689692.py
|
bopopescu/nova-8
|
768d7cc0a632e1a880f00c5840c1ec8051e161be
|
[
"Apache-2.0"
] | 7
|
2015-01-20T10:30:08.000Z
|
2020-02-05T10:29:05.000Z
|
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
class ServerListLimitMarkerCell0Test(test.TestCase,
integrated_helpers.InstanceHelperMixin):
"""Regression test for bug 1689692 introduced in Ocata.
The user specifies a limit which is greater than the number of instances
left in the page and the marker starts in the cell0 database. What happens
is we don't null out the marker but we still have more limit so we continue
to page in the cell database(s) but the marker isn't found in any of those,
since it's already found in cell0, so it eventually raises a MarkerNotFound
error.
"""
def setUp(self):
super(ServerListLimitMarkerCell0Test, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
# We have to get the image before we use 2.latest otherwise we'll get
# a 404 on the /images proxy API because of 2.36.
self.image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.api.microversion = 'latest'
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
# We don't start the compute service because we want NoValidHost so
# all of the instances go into ERROR state and get put into cell0.
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def test_list_servers_marker_in_cell0_more_limit(self):
"""Creates three servers, then lists them with a marker on the first
and a limit of 3 which is more than what's left to page on (2) but
it shouldn't fail, it should just give the other two back.
"""
# create three test servers
for x in range(3):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-limit%i' % x, self.image_id,
networks='none')))
self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(self.api, server, 'ERROR')
servers = self.api.get_servers()
self.assertEqual(3, len(servers))
# Take the first server and user that as our marker.
marker = servers[0]['id']
# Since we're paging after the first server as our marker, there are
# only two left so specifying three should just return two.
servers = self.api.get_servers(search_opts=dict(marker=marker,
limit=3))
self.assertEqual(2, len(servers))
| 46.988235
| 79
| 0.686279
|
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
class ServerListLimitMarkerCell0Test(test.TestCase,
integrated_helpers.InstanceHelperMixin):
def setUp(self):
super(ServerListLimitMarkerCell0Test, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
# a 404 on the /images proxy API because of 2.36.
self.image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.api.microversion = 'latest'
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
# We don't start the compute service because we want NoValidHost so
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def test_list_servers_marker_in_cell0_more_limit(self):
for x in range(3):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-limit%i' % x, self.image_id,
networks='none')))
self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(self.api, server, 'ERROR')
servers = self.api.get_servers()
self.assertEqual(3, len(servers))
marker = servers[0]['id']
# only two left so specifying three should just return two.
servers = self.api.get_servers(search_opts=dict(marker=marker,
limit=3))
self.assertEqual(2, len(servers))
| true
| true
|
f70bd07a185c559699661056ded03831035519a5
| 12,204
|
py
|
Python
|
isic_archive/models/segmentation_helpers/scikit.py
|
ImageMarkup/isic-archive
|
7cd8097886d685ec629e2fcba079271fb77d028f
|
[
"Apache-2.0"
] | 42
|
2015-12-12T14:05:46.000Z
|
2022-03-26T15:20:39.000Z
|
isic_archive/models/segmentation_helpers/scikit.py
|
ImageMarkup/isic-archive
|
7cd8097886d685ec629e2fcba079271fb77d028f
|
[
"Apache-2.0"
] | 494
|
2015-07-09T16:14:12.000Z
|
2021-03-09T09:37:36.000Z
|
isic_archive/models/segmentation_helpers/scikit.py
|
ImageMarkup/uda
|
d221af3368baf3a06ecab67e69e9d0077426c8f9
|
[
"Apache-2.0"
] | 12
|
2015-08-20T14:20:48.000Z
|
2020-10-20T01:14:44.000Z
|
import collections
import io
from typing import BinaryIO, Tuple, Union
import warnings
import numpy
import skimage.io
import skimage.measure
import skimage.morphology
import skimage.segmentation
import skimage.transform
from .base import BaseSegmentationHelper
class ScikitSegmentationHelper(BaseSegmentationHelper):
@classmethod
def loadImage(cls, imageDataStream: Union[BinaryIO, str]) -> numpy.ndarray:
"""
Load an image into an RGB array.
:param imageDataStream: A file-like object containing the encoded
(JPEG, etc.) image data or a file path.
:return: A Numpy array with the RGB image data.
"""
imageData = skimage.io.imread(imageDataStream, plugin='pil')
if len(imageData.shape) == 1 and imageData.shape[0] > 1:
# Some images seem to have a 2nd (or 3rd+) layer, which should be ignored
# https://github.com/scikit-image/scikit-image/issues/2154
# The first element within the result should be the main image
imageData = imageData[0]
if len(imageData.shape) == 3 and imageData.shape[2] == 4:
# cv2.floodFill doesn't work correctly with array views, so copy
imageData = imageData[:, :, :3].copy()
return imageData
@classmethod
def writeImage(cls, image, encoding='png', width=None):
if width is not None:
factor = float(width) / image.shape[1]
image = skimage.transform.rescale(image, factor)
imageStream = io.BytesIO()
with warnings.catch_warnings():
# Ignore warnings about low contrast images, as masks are often empty
warnings.filterwarnings('ignore', r'^.* is a low contrast image$', UserWarning)
# The 'pil' plugin is about 40% faster than the default 'imageio' plugin
# The 'pil' plugin uses 'format_str' as an argument, not 'format'
skimage.io.imsave(imageStream, image, plugin='pil', format_str=encoding)
imageStream.seek(0)
return imageStream
@classmethod
def segment(cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int
) -> numpy.ndarray:
"""
Do a flood-fill segmentation of an image, yielding a single contiguous region with no holes.
:param image: A Numpy array with the image to be segmented.
:param seedCoord: (X, Y) coordinates of the segmentation seed point.
:param tolerance: The intensity tolerance value for the segmentation.
:return: The mask image of the segmented region, with values 0 or 255.
"""
maskImage = cls._floodFill(
image,
seedCoord,
tolerance)
# Now, fill in any holes in the maskImage
# First, add a padded border, allowing the next operation to reach
# around edge-touching components
maskImage = numpy.pad(maskImage, 1, 'constant', constant_values=1)
maskImageBackground = cls._floodFill(
maskImage,
# The seed point is a part of the padded border of maskImage
seedCoord=(0, 0),
# The seed point and border will have a value of 1, but we want to
# also include the actual mask background, which has a value of 0
tolerance=1)
# Remove the extra padding
maskImageBackground = maskImageBackground[1:-1, 1:-1]
# Flip the background, to get the mask with holes removed
maskImage = numpy.invert(maskImageBackground)
return maskImage
@classmethod
def _clippedAdd(cls, array, value):
typeInfo = numpy.iinfo(array.dtype)
newArray = array.astype(int)
newArray += value
return newArray.clip(typeInfo.min, typeInfo.max).astype(array.dtype)
@classmethod
def _floodFill(
cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int,
connectivity: int = 8) -> numpy.ndarray:
"""
Segment an image into a region connected to a seed point, using OpenCV.
:param image: The image to be segmented.
:param seedCoord: The point inside the connected region where the
segmentation will start.
:param tolerance: The maximum color/intensity difference between the
seed point and a point in the connected region.
:param connectivity: (optional) The number of allowed connectivity
propagation directions. Allowed values are:
* 4 for edge pixels
* 8 for edge and corner pixels
:returns: A binary label mask, with an extra 1-pixel wide padded border.
The values are either ``0`` or ``fillValue``.
"""
seedValue = image[seedCoord[1], seedCoord[0]]
seedValueMin = cls._clippedAdd(seedValue, -tolerance)
seedValueMax = cls._clippedAdd(seedValue, tolerance)
if connectivity == 4:
connectivityArg = 1
elif connectivity == 8:
connectivityArg = 2
else:
raise ValueError('Unknown connectivity value.')
binaryImage = numpy.logical_and(
image >= seedValueMin,
image <= seedValueMax
)
if len(image.shape) == 3:
# Reduce RGB components, requiring all to be within threshold
binaryImage = numpy.all(binaryImage, 2)
labelImage = skimage.measure.label(
binaryImage.astype(int),
return_num=False,
connectivity=connectivityArg
)
del binaryImage
maskImage = numpy.equal(
labelImage, labelImage[seedCoord[1], seedCoord[0]])
del labelImage
maskImage = maskImage.astype(numpy.uint8) * 255
return maskImage
@classmethod
def _structuringElement(cls, shape, radius, elementType=bool):
size = (radius * 2) + 1
if shape == 'circle':
element = skimage.morphology.disk(radius, elementType)
elif shape == 'cross':
element = numpy.zeros((size, size), elementType)
element[:, size // 2] = elementType(True)
element[size // 2, :] = elementType(True)
elif shape == 'square':
element = skimage.morphology.square(size, elementType)
else:
raise ValueError('Unknown element shape value.')
return element
@classmethod
def _binaryOpening(cls, image, elementShape='circle', elementRadius=5):
element = cls._structuringElement(elementShape, elementRadius, bool)
morphedImage = skimage.morphology.binary_opening(
image=image,
selem=element
)
return morphedImage
@classmethod
def _collapseCoords(cls, coords):
collapsedCoords = [coords[0]]
collapsedCoords.extend([
coord
for prevCoord, coord, nextCoord in zip(
coords[0:], coords[1:], coords[2:])
if numpy.cross(nextCoord - prevCoord, coord - prevCoord) != 0
])
collapsedCoords.append(coords[-1])
collapsedCoords = numpy.array(collapsedCoords)
return collapsedCoords
@classmethod
def maskToContour(cls, maskImage: numpy.ndarray) -> numpy.ndarray:
"""
Extract the contour line within a segmented label mask, using Scikit-Image.
:param maskImage: A binary label mask of numpy.uint8.
:return: An array of point pairs.
"""
if maskImage.dtype != numpy.uint8:
raise TypeError('maskImage must be an array of uint8.')
coords = skimage.measure.find_contours(
# TODO: threshold image more efficiently
array=maskImage.astype(bool).astype(numpy.double),
level=0.5,
fully_connected='low',
positive_orientation='low'
)
coords = numpy.fliplr(coords[0])
coords = cls._collapseCoords(coords)
return coords
@classmethod
def contourToMask(cls, imageShape: Tuple[int, int], coords: numpy.ndarray) -> numpy.ndarray:
"""
Convert a contour line to a label mask.
:param imageShape: The [Y, X] shape of the image.
:param coords: An array of point pairs.
:return: A binary label mask of numpy.uint8.
"""
maskImage = skimage.measure.grid_points_in_poly(
shape=imageShape,
verts=numpy.fliplr(coords)
).astype(numpy.uint8)
maskImage *= 255
return maskImage
@classmethod
def _slic(cls, image, numSegments=None, segmentSize=None):
compactness = 0.01 # make superpixels highly deformable
maxIter = 10
sigma = 2.0
if numSegments and segmentSize:
raise ValueError(
'Only one of numSegments or segmentSize may be set.')
elif numSegments:
pass
elif segmentSize:
numSegments = (image.shape[0] * image.shape[1]) / (segmentSize ** 2)
else:
raise ValueError('One of numSegments or segmentSize must be set.')
labelImage = skimage.segmentation.slic(
image,
n_segments=numSegments,
compactness=compactness,
max_iter=maxIter,
sigma=sigma,
enforce_connectivity=True,
min_size_factor=0.5,
slic_zero=True
)
return labelImage
class _PersistentCounter(object):
def __init__(self):
self.value = 0
def __call__(self):
ret = self.value
self.value += 1
return ret
@classmethod
def _uint64ToRGB(cls, val):
return numpy.dstack((
val.astype(numpy.uint8),
(val >> numpy.uint64(8)).astype(numpy.uint8),
(val >> numpy.uint64(16)).astype(numpy.uint8)
))
@classmethod
def _RGBTounit64(cls, val: numpy.ndarray) -> numpy.ndarray:
"""
Decode an RGB representation of a superpixel label into its native scalar value.
:param val: A single pixel, or a 3-channel image.
This is an numpy.ndarray of uint8, with a shape [3] or [n, m, 3].
"""
return \
(val[..., 0].astype(numpy.uint64)) + \
(val[..., 1].astype(numpy.uint64) << numpy.uint64(8)) + \
(val[..., 2].astype(numpy.uint64) << numpy.uint64(16))
@classmethod
def superpixels(cls, image):
superpixelLabels = cls._slic(image, numSegments=1000)
superpixels = cls._uint64ToRGB(superpixelLabels)
return superpixels
@classmethod
def superpixels_legacy(cls, image, coords):
maskImage = cls.contourToMask(image.shape[:2], coords)
from .opencv import OpenCVSegmentationHelper
# This operation is much faster in OpenCV
maskImage = OpenCVSegmentationHelper._binaryOpening(
maskImage.astype(numpy.uint8),
elementShape='circle',
elementRadius=5
).astype(bool)
insideImage = image.copy()
insideImage[numpy.logical_not(maskImage)] = 0
insideSuperpixelLabels = cls._slic(insideImage, segmentSize=20)
outsideImage = image.copy()
outsideImage[maskImage] = 0
outsideSuperpixelLabels = cls._slic(outsideImage, segmentSize=60)
# https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays
insideSuperpixelMask = numpy.in1d(
insideSuperpixelLabels.flat,
numpy.unique(insideSuperpixelLabels[maskImage])
).reshape(insideSuperpixelLabels.shape)
combinedSuperpixelLabels = outsideSuperpixelLabels.copy()
combinedSuperpixelLabels[insideSuperpixelMask] = \
insideSuperpixelLabels[insideSuperpixelMask] + \
outsideSuperpixelLabels.max() + 10000
labelValues = collections.defaultdict(cls._PersistentCounter())
for value in numpy.nditer(combinedSuperpixelLabels,
op_flags=['readwrite']):
value[...] = labelValues[value.item()]
combinedSuperpixels = cls._uint64ToRGB(combinedSuperpixelLabels)
return combinedSuperpixels
| 37.207317
| 100
| 0.619879
|
import collections
import io
from typing import BinaryIO, Tuple, Union
import warnings
import numpy
import skimage.io
import skimage.measure
import skimage.morphology
import skimage.segmentation
import skimage.transform
from .base import BaseSegmentationHelper
class ScikitSegmentationHelper(BaseSegmentationHelper):
@classmethod
def loadImage(cls, imageDataStream: Union[BinaryIO, str]) -> numpy.ndarray:
imageData = skimage.io.imread(imageDataStream, plugin='pil')
if len(imageData.shape) == 1 and imageData.shape[0] > 1:
imageData = imageData[0]
if len(imageData.shape) == 3 and imageData.shape[2] == 4:
imageData = imageData[:, :, :3].copy()
return imageData
@classmethod
def writeImage(cls, image, encoding='png', width=None):
if width is not None:
factor = float(width) / image.shape[1]
image = skimage.transform.rescale(image, factor)
imageStream = io.BytesIO()
with warnings.catch_warnings():
# Ignore warnings about low contrast images, as masks are often empty
warnings.filterwarnings('ignore', r'^.* is a low contrast image$', UserWarning)
# The 'pil' plugin is about 40% faster than the default 'imageio' plugin
# The 'pil' plugin uses 'format_str' as an argument, not 'format'
skimage.io.imsave(imageStream, image, plugin='pil', format_str=encoding)
imageStream.seek(0)
return imageStream
@classmethod
def segment(cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int
) -> numpy.ndarray:
maskImage = cls._floodFill(
image,
seedCoord,
tolerance)
# Now, fill in any holes in the maskImage
# First, add a padded border, allowing the next operation to reach
# around edge-touching components
maskImage = numpy.pad(maskImage, 1, 'constant', constant_values=1)
maskImageBackground = cls._floodFill(
maskImage,
# The seed point is a part of the padded border of maskImage
seedCoord=(0, 0),
# The seed point and border will have a value of 1, but we want to
# also include the actual mask background, which has a value of 0
tolerance=1)
# Remove the extra padding
maskImageBackground = maskImageBackground[1:-1, 1:-1]
# Flip the background, to get the mask with holes removed
maskImage = numpy.invert(maskImageBackground)
return maskImage
@classmethod
def _clippedAdd(cls, array, value):
typeInfo = numpy.iinfo(array.dtype)
newArray = array.astype(int)
newArray += value
return newArray.clip(typeInfo.min, typeInfo.max).astype(array.dtype)
@classmethod
def _floodFill(
cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int,
connectivity: int = 8) -> numpy.ndarray:
seedValue = image[seedCoord[1], seedCoord[0]]
seedValueMin = cls._clippedAdd(seedValue, -tolerance)
seedValueMax = cls._clippedAdd(seedValue, tolerance)
if connectivity == 4:
connectivityArg = 1
elif connectivity == 8:
connectivityArg = 2
else:
raise ValueError('Unknown connectivity value.')
binaryImage = numpy.logical_and(
image >= seedValueMin,
image <= seedValueMax
)
if len(image.shape) == 3:
# Reduce RGB components, requiring all to be within threshold
binaryImage = numpy.all(binaryImage, 2)
labelImage = skimage.measure.label(
binaryImage.astype(int),
return_num=False,
connectivity=connectivityArg
)
del binaryImage
maskImage = numpy.equal(
labelImage, labelImage[seedCoord[1], seedCoord[0]])
del labelImage
maskImage = maskImage.astype(numpy.uint8) * 255
return maskImage
@classmethod
def _structuringElement(cls, shape, radius, elementType=bool):
size = (radius * 2) + 1
if shape == 'circle':
element = skimage.morphology.disk(radius, elementType)
elif shape == 'cross':
element = numpy.zeros((size, size), elementType)
element[:, size // 2] = elementType(True)
element[size // 2, :] = elementType(True)
elif shape == 'square':
element = skimage.morphology.square(size, elementType)
else:
raise ValueError('Unknown element shape value.')
return element
@classmethod
def _binaryOpening(cls, image, elementShape='circle', elementRadius=5):
element = cls._structuringElement(elementShape, elementRadius, bool)
morphedImage = skimage.morphology.binary_opening(
image=image,
selem=element
)
return morphedImage
@classmethod
def _collapseCoords(cls, coords):
collapsedCoords = [coords[0]]
collapsedCoords.extend([
coord
for prevCoord, coord, nextCoord in zip(
coords[0:], coords[1:], coords[2:])
if numpy.cross(nextCoord - prevCoord, coord - prevCoord) != 0
])
collapsedCoords.append(coords[-1])
collapsedCoords = numpy.array(collapsedCoords)
return collapsedCoords
@classmethod
def maskToContour(cls, maskImage: numpy.ndarray) -> numpy.ndarray:
if maskImage.dtype != numpy.uint8:
raise TypeError('maskImage must be an array of uint8.')
coords = skimage.measure.find_contours(
# TODO: threshold image more efficiently
array=maskImage.astype(bool).astype(numpy.double),
level=0.5,
fully_connected='low',
positive_orientation='low'
)
coords = numpy.fliplr(coords[0])
coords = cls._collapseCoords(coords)
return coords
@classmethod
def contourToMask(cls, imageShape: Tuple[int, int], coords: numpy.ndarray) -> numpy.ndarray:
maskImage = skimage.measure.grid_points_in_poly(
shape=imageShape,
verts=numpy.fliplr(coords)
).astype(numpy.uint8)
maskImage *= 255
return maskImage
@classmethod
def _slic(cls, image, numSegments=None, segmentSize=None):
compactness = 0.01 # make superpixels highly deformable
maxIter = 10
sigma = 2.0
if numSegments and segmentSize:
raise ValueError(
'Only one of numSegments or segmentSize may be set.')
elif numSegments:
pass
elif segmentSize:
numSegments = (image.shape[0] * image.shape[1]) / (segmentSize ** 2)
else:
raise ValueError('One of numSegments or segmentSize must be set.')
labelImage = skimage.segmentation.slic(
image,
n_segments=numSegments,
compactness=compactness,
max_iter=maxIter,
sigma=sigma,
enforce_connectivity=True,
min_size_factor=0.5,
slic_zero=True
)
return labelImage
class _PersistentCounter(object):
def __init__(self):
self.value = 0
def __call__(self):
ret = self.value
self.value += 1
return ret
@classmethod
def _uint64ToRGB(cls, val):
return numpy.dstack((
val.astype(numpy.uint8),
(val >> numpy.uint64(8)).astype(numpy.uint8),
(val >> numpy.uint64(16)).astype(numpy.uint8)
))
@classmethod
def _RGBTounit64(cls, val: numpy.ndarray) -> numpy.ndarray:
return \
(val[..., 0].astype(numpy.uint64)) + \
(val[..., 1].astype(numpy.uint64) << numpy.uint64(8)) + \
(val[..., 2].astype(numpy.uint64) << numpy.uint64(16))
@classmethod
def superpixels(cls, image):
superpixelLabels = cls._slic(image, numSegments=1000)
superpixels = cls._uint64ToRGB(superpixelLabels)
return superpixels
@classmethod
def superpixels_legacy(cls, image, coords):
maskImage = cls.contourToMask(image.shape[:2], coords)
from .opencv import OpenCVSegmentationHelper
# This operation is much faster in OpenCV
maskImage = OpenCVSegmentationHelper._binaryOpening(
maskImage.astype(numpy.uint8),
elementShape='circle',
elementRadius=5
).astype(bool)
insideImage = image.copy()
insideImage[numpy.logical_not(maskImage)] = 0
insideSuperpixelLabels = cls._slic(insideImage, segmentSize=20)
outsideImage = image.copy()
outsideImage[maskImage] = 0
outsideSuperpixelLabels = cls._slic(outsideImage, segmentSize=60)
# https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays
insideSuperpixelMask = numpy.in1d(
insideSuperpixelLabels.flat,
numpy.unique(insideSuperpixelLabels[maskImage])
).reshape(insideSuperpixelLabels.shape)
combinedSuperpixelLabels = outsideSuperpixelLabels.copy()
combinedSuperpixelLabels[insideSuperpixelMask] = \
insideSuperpixelLabels[insideSuperpixelMask] + \
outsideSuperpixelLabels.max() + 10000
labelValues = collections.defaultdict(cls._PersistentCounter())
for value in numpy.nditer(combinedSuperpixelLabels,
op_flags=['readwrite']):
value[...] = labelValues[value.item()]
combinedSuperpixels = cls._uint64ToRGB(combinedSuperpixelLabels)
return combinedSuperpixels
| true
| true
|
f70bd2041fc86759e5bde0e6cb049a727675d6f9
| 13,026
|
py
|
Python
|
mmdet/models/backbones/res2net.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/backbones/res2net.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/backbones/res2net.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
# dcn in Res2Net bottle2neck is in ModuleList
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
| 37.005682
| 80
| 0.513511
|
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.