body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
6581742dcbbb916144c6dde05e69912a0330bda22917043ecd8e6a679bf5d75e | @property
def status(self):
'Return the process status as a constant\n\n - RUNNING\n - DEAD_OR_ZOMBIE\n - UNEXISTING\n - OTHER\n '
try:
if (self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD)):
return DEAD_OR_ZOMBIE
except NoSuchProcess:
return UNEXISTING
if self._worker.is_running():
return RUNNING
return OTHER | Return the process status as a constant
- RUNNING
- DEAD_OR_ZOMBIE
- UNEXISTING
- OTHER | circus/process.py | status | cdgz/circus | 0 | python | @property
def status(self):
'Return the process status as a constant\n\n - RUNNING\n - DEAD_OR_ZOMBIE\n - UNEXISTING\n - OTHER\n '
try:
if (self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD)):
return DEAD_OR_ZOMBIE
except NoSuchProcess:
return UNEXISTING
if self._worker.is_running():
return RUNNING
return OTHER | @property
def status(self):
'Return the process status as a constant\n\n - RUNNING\n - DEAD_OR_ZOMBIE\n - UNEXISTING\n - OTHER\n '
try:
if (self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD)):
return DEAD_OR_ZOMBIE
except NoSuchProcess:
return UNEXISTING
if self._worker.is_running():
return RUNNING
return OTHER<|docstring|>Return the process status as a constant
- RUNNING
- DEAD_OR_ZOMBIE
- UNEXISTING
- OTHER<|endoftext|> |
52fd1c878a7e0503a0dd8e161e72751ceef7ec008a4142094120508e11d5b7ca | @property
def pid(self):
'Return the *pid*'
return self._worker.pid | Return the *pid* | circus/process.py | pid | cdgz/circus | 0 | python | @property
def pid(self):
return self._worker.pid | @property
def pid(self):
return self._worker.pid<|docstring|>Return the *pid*<|endoftext|> |
cf0703f3e3072761c1564c36378a432980ce456c84f17ada62c64f6ed0f70332 | @property
def stdout(self):
'Return the *stdout* stream'
return self._worker.stdout | Return the *stdout* stream | circus/process.py | stdout | cdgz/circus | 0 | python | @property
def stdout(self):
return self._worker.stdout | @property
def stdout(self):
return self._worker.stdout<|docstring|>Return the *stdout* stream<|endoftext|> |
90f45cda9cce848493ef34bd329cf854a4ddc7b2e8bae5a6e8d7d516a0124b53 | @property
def stderr(self):
'Return the *stdout* stream'
return self._worker.stderr | Return the *stdout* stream | circus/process.py | stderr | cdgz/circus | 0 | python | @property
def stderr(self):
return self._worker.stderr | @property
def stderr(self):
return self._worker.stderr<|docstring|>Return the *stdout* stream<|endoftext|> |
b0a7a39fd453ae078c57291ddd1ee42cc3b173bd8b327f0ba94360340f839ce8 | def call_func_in_py(func):
" Call a function and capture it's stdout.\n "
loop.integrate(reset=True)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
fake_stdout = FakeStream()
sys.stdout = sys.stderr = fake_stdout
try:
func()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
loop.reset()
return fake_stdout.getvalue().rstrip() | Call a function and capture it's stdout. | flexx/event/both_tester.py | call_func_in_py | levinbgu/flexx | 1,662 | python | def call_func_in_py(func):
" \n "
loop.integrate(reset=True)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
fake_stdout = FakeStream()
sys.stdout = sys.stderr = fake_stdout
try:
func()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
loop.reset()
return fake_stdout.getvalue().rstrip() | def call_func_in_py(func):
" \n "
loop.integrate(reset=True)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
fake_stdout = FakeStream()
sys.stdout = sys.stderr = fake_stdout
try:
func()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
loop.reset()
return fake_stdout.getvalue().rstrip()<|docstring|>Call a function and capture it's stdout.<|endoftext|> |
c5016f1666d59f6745dcad94440a35097d328fb4c7fd9f8b4d7787d6f930add8 | def smart_compare(func, *comparations):
' Compare multiple text-pairs, raising an error that shows where\n the texts differ for each of the mismatching pairs.\n Each comparison should be (name, text, reference).\n '
err_msgs = []
has_errors = False
for comp in comparations:
err_msg = validate_text(*comp)
if err_msg:
has_errors = True
err_msgs.append(err_msg)
else:
err_msgs.append((((' ' * 8) + comp[0]) + ' matches the reference\n'))
if has_errors:
j = (('_' * 79) + '\n')
err_msgs = (([''] + err_msgs) + [''])
t = 'Text mismatch in\nFile "%s", line %i, in %s:\n%s'
raise StdoutMismatchError((t % (func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, j.join(err_msgs)))) | Compare multiple text-pairs, raising an error that shows where
the texts differ for each of the mismatching pairs.
Each comparison should be (name, text, reference). | flexx/event/both_tester.py | smart_compare | levinbgu/flexx | 1,662 | python | def smart_compare(func, *comparations):
' Compare multiple text-pairs, raising an error that shows where\n the texts differ for each of the mismatching pairs.\n Each comparison should be (name, text, reference).\n '
err_msgs = []
has_errors = False
for comp in comparations:
err_msg = validate_text(*comp)
if err_msg:
has_errors = True
err_msgs.append(err_msg)
else:
err_msgs.append((((' ' * 8) + comp[0]) + ' matches the reference\n'))
if has_errors:
j = (('_' * 79) + '\n')
err_msgs = (([] + err_msgs) + [])
t = 'Text mismatch in\nFile "%s", line %i, in %s:\n%s'
raise StdoutMismatchError((t % (func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, j.join(err_msgs)))) | def smart_compare(func, *comparations):
' Compare multiple text-pairs, raising an error that shows where\n the texts differ for each of the mismatching pairs.\n Each comparison should be (name, text, reference).\n '
err_msgs = []
has_errors = False
for comp in comparations:
err_msg = validate_text(*comp)
if err_msg:
has_errors = True
err_msgs.append(err_msg)
else:
err_msgs.append((((' ' * 8) + comp[0]) + ' matches the reference\n'))
if has_errors:
j = (('_' * 79) + '\n')
err_msgs = (([] + err_msgs) + [])
t = 'Text mismatch in\nFile "%s", line %i, in %s:\n%s'
raise StdoutMismatchError((t % (func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, j.join(err_msgs))))<|docstring|>Compare multiple text-pairs, raising an error that shows where
the texts differ for each of the mismatching pairs.
Each comparison should be (name, text, reference).<|endoftext|> |
55bd8498257bf4e14b77511e10dc3b324817a4557abf848868293c2c61bfb2f6 | def validate_text(name, text, reference):
' Compare text with a reference. Returns None if they match, and otherwise\n an error message that outlines where they differ.\n '
lines1 = text.split('\n')
lines2 = reference.split('\n')
n = max(len(lines1), len(lines2))
for i in range(len(lines1)):
if lines1[i].startswith(('[E ', '[W ', '[I ')):
lines1[i] = lines1[i].split(']', 1)[(- 1)].lstrip()
while (len(lines1) < n):
lines1.append('')
while (len(lines2) < n):
lines2.append('')
nchars = 35
for i in range(n):
(line1, line2) = (lines1[i], lines2[i])
line1 = line1.lower()
line2 = line2.lower()
if line2.startswith('?'):
equal_enough = (line2[1:].strip() in line1)
else:
equal_enough = (line1 == line2)
if (not equal_enough):
i1 = max(0, (i - 16))
i2 = min(n, (i + 16))
msg = (((((' ' * 8) + name.ljust(nchars)) + ' ') + 'Reference'.ljust(nchars)) + '\n')
for j in range(i1, i2):
linenr = str((j + 1)).rjust(3, '0')
prefix = (' >> ' if (j == i) else ' ')
msg += '{}{} '.format(prefix, linenr)
msg += _zip(_wrap(lines1[j], nchars, 3), _wrap(lines2[j], nchars, 3), 8)
return msg | Compare text with a reference. Returns None if they match, and otherwise
an error message that outlines where they differ. | flexx/event/both_tester.py | validate_text | levinbgu/flexx | 1,662 | python | def validate_text(name, text, reference):
' Compare text with a reference. Returns None if they match, and otherwise\n an error message that outlines where they differ.\n '
lines1 = text.split('\n')
lines2 = reference.split('\n')
n = max(len(lines1), len(lines2))
for i in range(len(lines1)):
if lines1[i].startswith(('[E ', '[W ', '[I ')):
lines1[i] = lines1[i].split(']', 1)[(- 1)].lstrip()
while (len(lines1) < n):
lines1.append()
while (len(lines2) < n):
lines2.append()
nchars = 35
for i in range(n):
(line1, line2) = (lines1[i], lines2[i])
line1 = line1.lower()
line2 = line2.lower()
if line2.startswith('?'):
equal_enough = (line2[1:].strip() in line1)
else:
equal_enough = (line1 == line2)
if (not equal_enough):
i1 = max(0, (i - 16))
i2 = min(n, (i + 16))
msg = (((((' ' * 8) + name.ljust(nchars)) + ' ') + 'Reference'.ljust(nchars)) + '\n')
for j in range(i1, i2):
linenr = str((j + 1)).rjust(3, '0')
prefix = (' >> ' if (j == i) else ' ')
msg += '{}{} '.format(prefix, linenr)
msg += _zip(_wrap(lines1[j], nchars, 3), _wrap(lines2[j], nchars, 3), 8)
return msg | def validate_text(name, text, reference):
' Compare text with a reference. Returns None if they match, and otherwise\n an error message that outlines where they differ.\n '
lines1 = text.split('\n')
lines2 = reference.split('\n')
n = max(len(lines1), len(lines2))
for i in range(len(lines1)):
if lines1[i].startswith(('[E ', '[W ', '[I ')):
lines1[i] = lines1[i].split(']', 1)[(- 1)].lstrip()
while (len(lines1) < n):
lines1.append()
while (len(lines2) < n):
lines2.append()
nchars = 35
for i in range(n):
(line1, line2) = (lines1[i], lines2[i])
line1 = line1.lower()
line2 = line2.lower()
if line2.startswith('?'):
equal_enough = (line2[1:].strip() in line1)
else:
equal_enough = (line1 == line2)
if (not equal_enough):
i1 = max(0, (i - 16))
i2 = min(n, (i + 16))
msg = (((((' ' * 8) + name.ljust(nchars)) + ' ') + 'Reference'.ljust(nchars)) + '\n')
for j in range(i1, i2):
linenr = str((j + 1)).rjust(3, '0')
prefix = (' >> ' if (j == i) else ' ')
msg += '{}{} '.format(prefix, linenr)
msg += _zip(_wrap(lines1[j], nchars, 3), _wrap(lines2[j], nchars, 3), 8)
return msg<|docstring|>Compare text with a reference. Returns None if they match, and otherwise
an error message that outlines where they differ.<|endoftext|> |
be5bca171bb60d0904c17697fe908e370607f482cfc3ce40a5b0254e3355316a | def run_in_both(*classes, js=True, py=True, extra_nodejs_args=None):
' Decorator to run a test in both Python and JS.\n\n The decorator should be provided with any Component classes that\n you want to use in the test.\n\n The function docstring should match the stdout + stderr of the test (case\n insensitive). To provide separate reference outputs for Python and\n JavaScript, use a delimiter of at least 10 \'-\' characters. Use "? xx"\n to test that "xx" is present on a line (useful for logged exceptions).\n '
def wrapper(func):
reference = '\n'.join((line[4:] for line in func.__doc__.splitlines()))
parts = reference.split(('-' * 10))
pyref = parts[0].strip(' \n')
jsref = parts[(- 1)].strip(' \n-')
def runner1():
err = None
try:
return runner2()
except Exception as e:
err = e
if isinstance(err, StdoutMismatchError):
raise StdoutMismatchError(err)
elif isinstance(err, RuntimeError):
raise RuntimeError(err)
else:
raise err
def runner2():
if py:
pyresult = call_func_in_py(func)
pyresult = pyresult.replace('"', "'").replace("\\'", "'")
pyresult = pyresult.split('!!!!')[(- 1)]
pyresult = pyresult.split('old pending sessions\n')[(- 1)]
if js:
jsresult = call_func_in_js(func, classes, extra_nodejs_args)
jsresult = jsresult.replace('\n]', ']').replace('[\n', '[')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('\n ', ' ')
jsresult = jsresult.replace(', ', ', ').replace(', ', ', ')
jsresult = jsresult.replace('\n}', '}')
jsresult = jsresult.replace('"', "'").split('!!!!')[(- 1)]
jsresult = jsresult.replace('null', 'None')
args = [func]
if py:
args.append(('Python', pyresult, pyref))
if js:
args.append(('JavaScript', jsresult, jsref))
smart_compare(*args)
print(func.__name__, 'ok')
return True
return runner1
return wrapper | Decorator to run a test in both Python and JS.
The decorator should be provided with any Component classes that
you want to use in the test.
The function docstring should match the stdout + stderr of the test (case
insensitive). To provide separate reference outputs for Python and
JavaScript, use a delimiter of at least 10 '-' characters. Use "? xx"
to test that "xx" is present on a line (useful for logged exceptions). | flexx/event/both_tester.py | run_in_both | levinbgu/flexx | 1,662 | python | def run_in_both(*classes, js=True, py=True, extra_nodejs_args=None):
' Decorator to run a test in both Python and JS.\n\n The decorator should be provided with any Component classes that\n you want to use in the test.\n\n The function docstring should match the stdout + stderr of the test (case\n insensitive). To provide separate reference outputs for Python and\n JavaScript, use a delimiter of at least 10 \'-\' characters. Use "? xx"\n to test that "xx" is present on a line (useful for logged exceptions).\n '
def wrapper(func):
reference = '\n'.join((line[4:] for line in func.__doc__.splitlines()))
parts = reference.split(('-' * 10))
pyref = parts[0].strip(' \n')
jsref = parts[(- 1)].strip(' \n-')
def runner1():
err = None
try:
return runner2()
except Exception as e:
err = e
if isinstance(err, StdoutMismatchError):
raise StdoutMismatchError(err)
elif isinstance(err, RuntimeError):
raise RuntimeError(err)
else:
raise err
def runner2():
if py:
pyresult = call_func_in_py(func)
pyresult = pyresult.replace('"', "'").replace("\\'", "'")
pyresult = pyresult.split('!!!!')[(- 1)]
pyresult = pyresult.split('old pending sessions\n')[(- 1)]
if js:
jsresult = call_func_in_js(func, classes, extra_nodejs_args)
jsresult = jsresult.replace('\n]', ']').replace('[\n', '[')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('\n ', ' ')
jsresult = jsresult.replace(', ', ', ').replace(', ', ', ')
jsresult = jsresult.replace('\n}', '}')
jsresult = jsresult.replace('"', "'").split('!!!!')[(- 1)]
jsresult = jsresult.replace('null', 'None')
args = [func]
if py:
args.append(('Python', pyresult, pyref))
if js:
args.append(('JavaScript', jsresult, jsref))
smart_compare(*args)
print(func.__name__, 'ok')
return True
return runner1
return wrapper | def run_in_both(*classes, js=True, py=True, extra_nodejs_args=None):
' Decorator to run a test in both Python and JS.\n\n The decorator should be provided with any Component classes that\n you want to use in the test.\n\n The function docstring should match the stdout + stderr of the test (case\n insensitive). To provide separate reference outputs for Python and\n JavaScript, use a delimiter of at least 10 \'-\' characters. Use "? xx"\n to test that "xx" is present on a line (useful for logged exceptions).\n '
def wrapper(func):
reference = '\n'.join((line[4:] for line in func.__doc__.splitlines()))
parts = reference.split(('-' * 10))
pyref = parts[0].strip(' \n')
jsref = parts[(- 1)].strip(' \n-')
def runner1():
err = None
try:
return runner2()
except Exception as e:
err = e
if isinstance(err, StdoutMismatchError):
raise StdoutMismatchError(err)
elif isinstance(err, RuntimeError):
raise RuntimeError(err)
else:
raise err
def runner2():
if py:
pyresult = call_func_in_py(func)
pyresult = pyresult.replace('"', "'").replace("\\'", "'")
pyresult = pyresult.split('!!!!')[(- 1)]
pyresult = pyresult.split('old pending sessions\n')[(- 1)]
if js:
jsresult = call_func_in_js(func, classes, extra_nodejs_args)
jsresult = jsresult.replace('\n]', ']').replace('[\n', '[')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('\n ', ' ')
jsresult = jsresult.replace(', ', ', ').replace(', ', ', ')
jsresult = jsresult.replace('\n}', '}')
jsresult = jsresult.replace('"', "'").split('!!!!')[(- 1)]
jsresult = jsresult.replace('null', 'None')
args = [func]
if py:
args.append(('Python', pyresult, pyref))
if js:
args.append(('JavaScript', jsresult, jsref))
smart_compare(*args)
print(func.__name__, 'ok')
return True
return runner1
return wrapper<|docstring|>Decorator to run a test in both Python and JS.
The decorator should be provided with any Component classes that
you want to use in the test.
The function docstring should match the stdout + stderr of the test (case
insensitive). To provide separate reference outputs for Python and
JavaScript, use a delimiter of at least 10 '-' characters. Use "? xx"
to test that "xx" is present on a line (useful for logged exceptions).<|endoftext|> |
a3667fed7683e587749b9600697111e77041e817545344b7ee0b6010d1f2fb3a | @staticmethod
def parse_feed(url, entries=0):
'\n Parses the given url, returns a list containing all available entries\n '
if (1 <= entries <= 10):
feed = feedparser.parse(url)
return feed.entries[:entries]
else:
feed = feedparser.parse(url)
if feed.entries:
return feed.entries[:BotConfig.rss_count]
return None | Parses the given url, returns a list containing all available entries | rss/feedhandler.py | parse_feed | balemessenger/rss_reader_bot | 0 | python | @staticmethod
def parse_feed(url, entries=0):
'\n \n '
if (1 <= entries <= 10):
feed = feedparser.parse(url)
return feed.entries[:entries]
else:
feed = feedparser.parse(url)
if feed.entries:
return feed.entries[:BotConfig.rss_count]
return None | @staticmethod
def parse_feed(url, entries=0):
'\n \n '
if (1 <= entries <= 10):
feed = feedparser.parse(url)
return feed.entries[:entries]
else:
feed = feedparser.parse(url)
if feed.entries:
return feed.entries[:BotConfig.rss_count]
return None<|docstring|>Parses the given url, returns a list containing all available entries<|endoftext|> |
5642481389c7e44f9cb72fb091f820ac8e50cc127fca37d68d69b22f73c4b7be | @staticmethod
def is_parsable(url):
'\n Checks wether the given url provides a news feed. Return True if news are available, else False\n '
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(url)):
return False
feed = feedparser.parse(url)
if (not feed.entries):
return False
for post in feed.entries:
if (not hasattr(post, 'updated')):
return False
return True | Checks wether the given url provides a news feed. Return True if news are available, else False | rss/feedhandler.py | is_parsable | balemessenger/rss_reader_bot | 0 | python | @staticmethod
def is_parsable(url):
'\n \n '
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(url)):
return False
feed = feedparser.parse(url)
if (not feed.entries):
return False
for post in feed.entries:
if (not hasattr(post, 'updated')):
return False
return True | @staticmethod
def is_parsable(url):
'\n \n '
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(url)):
return False
feed = feedparser.parse(url)
if (not feed.entries):
return False
for post in feed.entries:
if (not hasattr(post, 'updated')):
return False
return True<|docstring|>Checks wether the given url provides a news feed. Return True if news are available, else False<|endoftext|> |
1722feb46860091049120d4fff705d2ab6f4c552cf7a3a23f60180800e17fcef | @staticmethod
def format_url_string(string):
'\n Formats a given url as string so it matches http(s)://adress.domain.\n This should be called before parsing the url, to make sure it is parsable\n '
string = string.lower()
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(string)):
string = ('http://' + string)
return string | Formats a given url as string so it matches http(s)://adress.domain.
This should be called before parsing the url, to make sure it is parsable | rss/feedhandler.py | format_url_string | balemessenger/rss_reader_bot | 0 | python | @staticmethod
def format_url_string(string):
'\n Formats a given url as string so it matches http(s)://adress.domain.\n This should be called before parsing the url, to make sure it is parsable\n '
string = string.lower()
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(string)):
string = ('http://' + string)
return string | @staticmethod
def format_url_string(string):
'\n Formats a given url as string so it matches http(s)://adress.domain.\n This should be called before parsing the url, to make sure it is parsable\n '
string = string.lower()
url_pattern = re.compile('((http(s?))):\\/\\/.*')
if (not url_pattern.match(string)):
string = ('http://' + string)
return string<|docstring|>Formats a given url as string so it matches http(s)://adress.domain.
This should be called before parsing the url, to make sure it is parsable<|endoftext|> |
78628f54f62c2d150794e48834f0e7a53421a3009d541648e9b37c13a8dfa735 | def __init__(self, channel: int, logging: bool=True):
'\n :param channel: The ID of the channel.\n :param logging: If True, log messages.\n '
self.channel: int = int(channel)
self.mishnah = loads(Path(resource_filename(__name__, 'data/mishnah.json')).read_text())
self.logging: bool = logging
super().__init__() | :param channel: The ID of the channel.
:param logging: If True, log messages. | mishnabot/bot.py | __init__ | subalterngames/mishnahbot | 0 | python | def __init__(self, channel: int, logging: bool=True):
'\n :param channel: The ID of the channel.\n :param logging: If True, log messages.\n '
self.channel: int = int(channel)
self.mishnah = loads(Path(resource_filename(__name__, 'data/mishnah.json')).read_text())
self.logging: bool = logging
super().__init__() | def __init__(self, channel: int, logging: bool=True):
'\n :param channel: The ID of the channel.\n :param logging: If True, log messages.\n '
self.channel: int = int(channel)
self.mishnah = loads(Path(resource_filename(__name__, 'data/mishnah.json')).read_text())
self.logging: bool = logging
super().__init__()<|docstring|>:param channel: The ID of the channel.
:param logging: If True, log messages.<|endoftext|> |
1e6545b76ae6ce4877b231c16131cd651acf9034cd897fdfdce8e5e88add43c9 | def log(self, message: str) -> None:
'\n Log a message.\n\n :param message: The message.\n '
if self.logging:
with Path(getcwd()).joinpath('log.txt').open('at') as f:
f.write((message + '\n')) | Log a message.
:param message: The message. | mishnabot/bot.py | log | subalterngames/mishnahbot | 0 | python | def log(self, message: str) -> None:
'\n Log a message.\n\n :param message: The message.\n '
if self.logging:
with Path(getcwd()).joinpath('log.txt').open('at') as f:
f.write((message + '\n')) | def log(self, message: str) -> None:
'\n Log a message.\n\n :param message: The message.\n '
if self.logging:
with Path(getcwd()).joinpath('log.txt').open('at') as f:
f.write((message + '\n'))<|docstring|>Log a message.
:param message: The message.<|endoftext|> |
36df627d5c64eca549a73549bfa53e7650d0b14a72aa8310c24e6736557d2688 | def nlist(length):
'\n creates a list (length) long of empty lists.\n This is probably redundant with a built-in python/numpy/scipy function,\n so consider replacing in future.\n Input:\n :param length: number of empty lists in list\n Returns:\n a list of [] x length\n\n '
return [[] for l in range(length)] | creates a list (length) long of empty lists.
This is probably redundant with a built-in python/numpy/scipy function,
so consider replacing in future.
Input:
:param length: number of empty lists in list
Returns:
a list of [] x length | history/nmrmath_old.py | nlist | sametz/nmrtools | 0 | python | def nlist(length):
'\n creates a list (length) long of empty lists.\n This is probably redundant with a built-in python/numpy/scipy function,\n so consider replacing in future.\n Input:\n :param length: number of empty lists in list\n Returns:\n a list of [] x length\n\n '
return [[] for l in range(length)] | def nlist(length):
'\n creates a list (length) long of empty lists.\n This is probably redundant with a built-in python/numpy/scipy function,\n so consider replacing in future.\n Input:\n :param length: number of empty lists in list\n Returns:\n a list of [] x length\n\n '
return [[] for l in range(length)]<|docstring|>creates a list (length) long of empty lists.
This is probably redundant with a built-in python/numpy/scipy function,
so consider replacing in future.
Input:
:param length: number of empty lists in list
Returns:
a list of [] x length<|endoftext|> |
619168d69c44d18cd43e2d3d34181f2890ce7ef60877b59e257c7d7afd46e5d2 | def popcount(n=0):
'\n Computes the popcount (binary Hamming weight) of integer n\n input:\n :param n: an integer\n returns:\n popcount of integer (binary Hamming weight)\n\n '
return bin(n).count('1') | Computes the popcount (binary Hamming weight) of integer n
input:
:param n: an integer
returns:
popcount of integer (binary Hamming weight) | history/nmrmath_old.py | popcount | sametz/nmrtools | 0 | python | def popcount(n=0):
'\n Computes the popcount (binary Hamming weight) of integer n\n input:\n :param n: an integer\n returns:\n popcount of integer (binary Hamming weight)\n\n '
return bin(n).count('1') | def popcount(n=0):
'\n Computes the popcount (binary Hamming weight) of integer n\n input:\n :param n: an integer\n returns:\n popcount of integer (binary Hamming weight)\n\n '
return bin(n).count('1')<|docstring|>Computes the popcount (binary Hamming weight) of integer n
input:
:param n: an integer
returns:
popcount of integer (binary Hamming weight)<|endoftext|> |
9746249c0d8d93e0c33419c1e3bddda7e45a88585449d3fe6b2d994679eff099 | def is_allowed(m=0, n=0):
'\n determines if a transition between two spin states is allowed or forbidden.\n The transition is allowed if one and only one spin (i.e. bit) changes\n input: integers whose binary codes for a spin state\n :param n:\n :param m:\n output: 1 = allowed, 0 = forbidden\n\n '
return (popcount((m ^ n)) == 1) | determines if a transition between two spin states is allowed or forbidden.
The transition is allowed if one and only one spin (i.e. bit) changes
input: integers whose binary codes for a spin state
:param n:
:param m:
output: 1 = allowed, 0 = forbidden | history/nmrmath_old.py | is_allowed | sametz/nmrtools | 0 | python | def is_allowed(m=0, n=0):
'\n determines if a transition between two spin states is allowed or forbidden.\n The transition is allowed if one and only one spin (i.e. bit) changes\n input: integers whose binary codes for a spin state\n :param n:\n :param m:\n output: 1 = allowed, 0 = forbidden\n\n '
return (popcount((m ^ n)) == 1) | def is_allowed(m=0, n=0):
'\n determines if a transition between two spin states is allowed or forbidden.\n The transition is allowed if one and only one spin (i.e. bit) changes\n input: integers whose binary codes for a spin state\n :param n:\n :param m:\n output: 1 = allowed, 0 = forbidden\n\n '
return (popcount((m ^ n)) == 1)<|docstring|>determines if a transition between two spin states is allowed or forbidden.
The transition is allowed if one and only one spin (i.e. bit) changes
input: integers whose binary codes for a spin state
:param n:
:param m:
output: 1 = allowed, 0 = forbidden<|endoftext|> |
ce0bebd128ae58b4c31a40d2405ef5204474e63767db9b14560439b50c572798 | def transition_matrix(n):
'\n Creates a matrix of allowed transitions.\n The integers 0-n, in their binary form, code for a spin state (alpha/beta).\n The (i,j) cells in the matrix indicate whether a transition from spin state\n i to spin state j is allowed or forbidden.\n See the is_allowed function for more information.\n\n input:\n :param n: size of the n,n matrix (i.e. number of possible spin states)\n\n :returns: a transition matrix that can be used to compute the intensity of\n allowed transitions.\n\n '
T = csc_matrix((n, n))
for i in range(n):
for j in range(n):
if is_allowed(i, j):
T[(i, j)] = 1
return T | Creates a matrix of allowed transitions.
The integers 0-n, in their binary form, code for a spin state (alpha/beta).
The (i,j) cells in the matrix indicate whether a transition from spin state
i to spin state j is allowed or forbidden.
See the is_allowed function for more information.
input:
:param n: size of the n,n matrix (i.e. number of possible spin states)
:returns: a transition matrix that can be used to compute the intensity of
allowed transitions. | history/nmrmath_old.py | transition_matrix | sametz/nmrtools | 0 | python | def transition_matrix(n):
'\n Creates a matrix of allowed transitions.\n The integers 0-n, in their binary form, code for a spin state (alpha/beta).\n The (i,j) cells in the matrix indicate whether a transition from spin state\n i to spin state j is allowed or forbidden.\n See the is_allowed function for more information.\n\n input:\n :param n: size of the n,n matrix (i.e. number of possible spin states)\n\n :returns: a transition matrix that can be used to compute the intensity of\n allowed transitions.\n\n '
T = csc_matrix((n, n))
for i in range(n):
for j in range(n):
if is_allowed(i, j):
T[(i, j)] = 1
return T | def transition_matrix(n):
'\n Creates a matrix of allowed transitions.\n The integers 0-n, in their binary form, code for a spin state (alpha/beta).\n The (i,j) cells in the matrix indicate whether a transition from spin state\n i to spin state j is allowed or forbidden.\n See the is_allowed function for more information.\n\n input:\n :param n: size of the n,n matrix (i.e. number of possible spin states)\n\n :returns: a transition matrix that can be used to compute the intensity of\n allowed transitions.\n\n '
T = csc_matrix((n, n))
for i in range(n):
for j in range(n):
if is_allowed(i, j):
T[(i, j)] = 1
return T<|docstring|>Creates a matrix of allowed transitions.
The integers 0-n, in their binary form, code for a spin state (alpha/beta).
The (i,j) cells in the matrix indicate whether a transition from spin state
i to spin state j is allowed or forbidden.
See the is_allowed function for more information.
input:
:param n: size of the n,n matrix (i.e. number of possible spin states)
:returns: a transition matrix that can be used to compute the intensity of
allowed transitions.<|endoftext|> |
34e1a4c148cfc7679ff4d9f3b059fab6a323e140debde02cfcc64cadffb21def | def hamiltonian(freqlist, couplings):
'\n Computes the spin Hamiltonian for spin-1/2 nuclei.\n inputs for n nuclei:\n :param freqlist: a list of frequencies in Hz of length n\n :param couplings: a sparse n x n matrix of coupling constants in Hz\n Returns: a sparse Hamiltonian matrix\n '
nspins = len(freqlist)
print('Defining unit matrices')
sigma_x = csc_matrix(np.matrix([[0, (1 / 2)], [(1 / 2), 0]]))
sigma_y = csc_matrix(np.matrix([[0, ((- 1j) / 2)], [(1j / 2), 0]]))
sigma_z = csc_matrix(np.matrix([[(1 / 2), 0], [0, ((- 1) / 2)]]))
unit = csc_matrix(np.matrix([[1, 0], [0, 1]]))
print('Unit matrices defined')
print('Generating lists of Lx/y/z matrices')
Lx = nlist(nspins)
Ly = nlist(nspins)
Lz = nlist(nspins)
for n in range(nspins):
Lx_current = 1
Ly_current = 1
Lz_current = 1
for k in range(nspins):
if (k == n):
Lx_current = kron(Lx_current, sigma_x)
Ly_current = kron(Ly_current, sigma_y)
Lz_current = kron(Lz_current, sigma_z)
else:
Lx_current = kron(Lx_current, unit)
Ly_current = kron(Ly_current, unit)
Lz_current = kron(Lz_current, unit)
Lx[n] = Lx_current
Ly[n] = Ly_current
Lz[n] = Lz_current
print('Lx/y/z matrices compiled')
print('Calculating Hamiltonian')
H = csc_matrix(((2 ** nspins), (2 ** nspins)))
for n in range(nspins):
H = (H + (freqlist[n] * Lz[n]))
print('Diagonal elements computed')
for n in range(nspins):
for k in range(nspins):
if (n != k):
H += ((couplings[(n, k)] / 2) * (((Lx[n] * Lx[k]) + (Ly[n] * Ly[k])) + (Lz[n] * Lz[k])))
print('Hamiltonian computed')
return H | Computes the spin Hamiltonian for spin-1/2 nuclei.
inputs for n nuclei:
:param freqlist: a list of frequencies in Hz of length n
:param couplings: a sparse n x n matrix of coupling constants in Hz
Returns: a sparse Hamiltonian matrix | history/nmrmath_old.py | hamiltonian | sametz/nmrtools | 0 | python | def hamiltonian(freqlist, couplings):
'\n Computes the spin Hamiltonian for spin-1/2 nuclei.\n inputs for n nuclei:\n :param freqlist: a list of frequencies in Hz of length n\n :param couplings: a sparse n x n matrix of coupling constants in Hz\n Returns: a sparse Hamiltonian matrix\n '
nspins = len(freqlist)
print('Defining unit matrices')
sigma_x = csc_matrix(np.matrix([[0, (1 / 2)], [(1 / 2), 0]]))
sigma_y = csc_matrix(np.matrix([[0, ((- 1j) / 2)], [(1j / 2), 0]]))
sigma_z = csc_matrix(np.matrix([[(1 / 2), 0], [0, ((- 1) / 2)]]))
unit = csc_matrix(np.matrix([[1, 0], [0, 1]]))
print('Unit matrices defined')
print('Generating lists of Lx/y/z matrices')
Lx = nlist(nspins)
Ly = nlist(nspins)
Lz = nlist(nspins)
for n in range(nspins):
Lx_current = 1
Ly_current = 1
Lz_current = 1
for k in range(nspins):
if (k == n):
Lx_current = kron(Lx_current, sigma_x)
Ly_current = kron(Ly_current, sigma_y)
Lz_current = kron(Lz_current, sigma_z)
else:
Lx_current = kron(Lx_current, unit)
Ly_current = kron(Ly_current, unit)
Lz_current = kron(Lz_current, unit)
Lx[n] = Lx_current
Ly[n] = Ly_current
Lz[n] = Lz_current
print('Lx/y/z matrices compiled')
print('Calculating Hamiltonian')
H = csc_matrix(((2 ** nspins), (2 ** nspins)))
for n in range(nspins):
H = (H + (freqlist[n] * Lz[n]))
print('Diagonal elements computed')
for n in range(nspins):
for k in range(nspins):
if (n != k):
H += ((couplings[(n, k)] / 2) * (((Lx[n] * Lx[k]) + (Ly[n] * Ly[k])) + (Lz[n] * Lz[k])))
print('Hamiltonian computed')
return H | def hamiltonian(freqlist, couplings):
'\n Computes the spin Hamiltonian for spin-1/2 nuclei.\n inputs for n nuclei:\n :param freqlist: a list of frequencies in Hz of length n\n :param couplings: a sparse n x n matrix of coupling constants in Hz\n Returns: a sparse Hamiltonian matrix\n '
nspins = len(freqlist)
print('Defining unit matrices')
sigma_x = csc_matrix(np.matrix([[0, (1 / 2)], [(1 / 2), 0]]))
sigma_y = csc_matrix(np.matrix([[0, ((- 1j) / 2)], [(1j / 2), 0]]))
sigma_z = csc_matrix(np.matrix([[(1 / 2), 0], [0, ((- 1) / 2)]]))
unit = csc_matrix(np.matrix([[1, 0], [0, 1]]))
print('Unit matrices defined')
print('Generating lists of Lx/y/z matrices')
Lx = nlist(nspins)
Ly = nlist(nspins)
Lz = nlist(nspins)
for n in range(nspins):
Lx_current = 1
Ly_current = 1
Lz_current = 1
for k in range(nspins):
if (k == n):
Lx_current = kron(Lx_current, sigma_x)
Ly_current = kron(Ly_current, sigma_y)
Lz_current = kron(Lz_current, sigma_z)
else:
Lx_current = kron(Lx_current, unit)
Ly_current = kron(Ly_current, unit)
Lz_current = kron(Lz_current, unit)
Lx[n] = Lx_current
Ly[n] = Ly_current
Lz[n] = Lz_current
print('Lx/y/z matrices compiled')
print('Calculating Hamiltonian')
H = csc_matrix(((2 ** nspins), (2 ** nspins)))
for n in range(nspins):
H = (H + (freqlist[n] * Lz[n]))
print('Diagonal elements computed')
for n in range(nspins):
for k in range(nspins):
if (n != k):
H += ((couplings[(n, k)] / 2) * (((Lx[n] * Lx[k]) + (Ly[n] * Ly[k])) + (Lz[n] * Lz[k])))
print('Hamiltonian computed')
return H<|docstring|>Computes the spin Hamiltonian for spin-1/2 nuclei.
inputs for n nuclei:
:param freqlist: a list of frequencies in Hz of length n
:param couplings: a sparse n x n matrix of coupling constants in Hz
Returns: a sparse Hamiltonian matrix<|endoftext|> |
0ded2f552e4dcf2d4e5d8f7db5fd84f41b3b7f7e54a177646ebd5bfacd50bad8 | def simsignals(H, nspins):
'\n Solves the spin Hamiltonian H and returns a list of (frequency, intensity)\n tuples. Nuclei must be spin-1/2.\n Inputs:\n :param H: a sparse spin Hamiltonian\n :param nspins: number of nuclei\n Returns:\n peaklist: a list of (frequency, intensity) tuples.\n\n\n '
print('Calculating eigensystem')
(E, V) = eigh(H.todense())
print('Eigensystem solved; converting eigenvectors to sparse')
V = np.asmatrix(V.real)
V = csc_matrix(V)
print('V converted to csc matrix.')
print('Calculating the transition matrix')
T = transition_matrix((2 ** nspins))
print('Transition matrix calculated')
print('Collecting spectrum')
spectrum = []
for i in range((2 ** nspins)):
for j in range(i, (2 ** nspins)):
if (j != i):
intensity = (((V[(:, i)].T * T) * V[(:, j)])[(0, 0)] ** 2)
if (intensity > 0.01):
v = abs((E[i] - E[j]))
spectrum.append((v, intensity))
print('Spectrum obtained.')
return spectrum | Solves the spin Hamiltonian H and returns a list of (frequency, intensity)
tuples. Nuclei must be spin-1/2.
Inputs:
:param H: a sparse spin Hamiltonian
:param nspins: number of nuclei
Returns:
peaklist: a list of (frequency, intensity) tuples. | history/nmrmath_old.py | simsignals | sametz/nmrtools | 0 | python | def simsignals(H, nspins):
'\n Solves the spin Hamiltonian H and returns a list of (frequency, intensity)\n tuples. Nuclei must be spin-1/2.\n Inputs:\n :param H: a sparse spin Hamiltonian\n :param nspins: number of nuclei\n Returns:\n peaklist: a list of (frequency, intensity) tuples.\n\n\n '
print('Calculating eigensystem')
(E, V) = eigh(H.todense())
print('Eigensystem solved; converting eigenvectors to sparse')
V = np.asmatrix(V.real)
V = csc_matrix(V)
print('V converted to csc matrix.')
print('Calculating the transition matrix')
T = transition_matrix((2 ** nspins))
print('Transition matrix calculated')
print('Collecting spectrum')
spectrum = []
for i in range((2 ** nspins)):
for j in range(i, (2 ** nspins)):
if (j != i):
intensity = (((V[(:, i)].T * T) * V[(:, j)])[(0, 0)] ** 2)
if (intensity > 0.01):
v = abs((E[i] - E[j]))
spectrum.append((v, intensity))
print('Spectrum obtained.')
return spectrum | def simsignals(H, nspins):
'\n Solves the spin Hamiltonian H and returns a list of (frequency, intensity)\n tuples. Nuclei must be spin-1/2.\n Inputs:\n :param H: a sparse spin Hamiltonian\n :param nspins: number of nuclei\n Returns:\n peaklist: a list of (frequency, intensity) tuples.\n\n\n '
print('Calculating eigensystem')
(E, V) = eigh(H.todense())
print('Eigensystem solved; converting eigenvectors to sparse')
V = np.asmatrix(V.real)
V = csc_matrix(V)
print('V converted to csc matrix.')
print('Calculating the transition matrix')
T = transition_matrix((2 ** nspins))
print('Transition matrix calculated')
print('Collecting spectrum')
spectrum = []
for i in range((2 ** nspins)):
for j in range(i, (2 ** nspins)):
if (j != i):
intensity = (((V[(:, i)].T * T) * V[(:, j)])[(0, 0)] ** 2)
if (intensity > 0.01):
v = abs((E[i] - E[j]))
spectrum.append((v, intensity))
print('Spectrum obtained.')
return spectrum<|docstring|>Solves the spin Hamiltonian H and returns a list of (frequency, intensity)
tuples. Nuclei must be spin-1/2.
Inputs:
:param H: a sparse spin Hamiltonian
:param nspins: number of nuclei
Returns:
peaklist: a list of (frequency, intensity) tuples.<|endoftext|> |
0c1c1715538fb657cf1f23623f54de333a373dc281f0730307cef74977f0f32a | def nspinspec(freqs, couplings):
'\n Function that calculates a spectrum for n spin-half nuclei.\n Inputs:\n :param freqs: a list of n nuclei frequencies in Hz\n :param couplings: an n x n sparse matrix of couplings in Hz. The order\n of nuclei in the list corresponds to the column and row order in the\n matrix, e.g. couplings[0][1] and [1]0] are the J coupling between\n the nuclei of freqs[0] and freqs [1].\n '
nspins = len(freqs)
H = hamiltonian(freqs, couplings)
return simsignals(H, nspins) | Function that calculates a spectrum for n spin-half nuclei.
Inputs:
:param freqs: a list of n nuclei frequencies in Hz
:param couplings: an n x n sparse matrix of couplings in Hz. The order
of nuclei in the list corresponds to the column and row order in the
matrix, e.g. couplings[0][1] and [1]0] are the J coupling between
the nuclei of freqs[0] and freqs [1]. | history/nmrmath_old.py | nspinspec | sametz/nmrtools | 0 | python | def nspinspec(freqs, couplings):
'\n Function that calculates a spectrum for n spin-half nuclei.\n Inputs:\n :param freqs: a list of n nuclei frequencies in Hz\n :param couplings: an n x n sparse matrix of couplings in Hz. The order\n of nuclei in the list corresponds to the column and row order in the\n matrix, e.g. couplings[0][1] and [1]0] are the J coupling between\n the nuclei of freqs[0] and freqs [1].\n '
nspins = len(freqs)
H = hamiltonian(freqs, couplings)
return simsignals(H, nspins) | def nspinspec(freqs, couplings):
'\n Function that calculates a spectrum for n spin-half nuclei.\n Inputs:\n :param freqs: a list of n nuclei frequencies in Hz\n :param couplings: an n x n sparse matrix of couplings in Hz. The order\n of nuclei in the list corresponds to the column and row order in the\n matrix, e.g. couplings[0][1] and [1]0] are the J coupling between\n the nuclei of freqs[0] and freqs [1].\n '
nspins = len(freqs)
H = hamiltonian(freqs, couplings)
return simsignals(H, nspins)<|docstring|>Function that calculates a spectrum for n spin-half nuclei.
Inputs:
:param freqs: a list of n nuclei frequencies in Hz
:param couplings: an n x n sparse matrix of couplings in Hz. The order
of nuclei in the list corresponds to the column and row order in the
matrix, e.g. couplings[0][1] and [1]0] are the J coupling between
the nuclei of freqs[0] and freqs [1].<|endoftext|> |
04571937f750900666457f110b8dec504ecda0be752ce898f334ccf6a4258a76 | def __init__(self, x=None, y=None):
'\n Initializes a 2D point object with x and y coordinates.\n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
self.x = x
self.y = y | Initializes a 2D point object with x and y coordinates. | Helper/point_cloud.py | __init__ | Baumwollboebele/python_algorithms | 0 | python | def __init__(self, x=None, y=None):
'\n \n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
self.x = x
self.y = y | def __init__(self, x=None, y=None):
'\n \n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
self.x = x
self.y = y<|docstring|>Initializes a 2D point object with x and y coordinates.<|endoftext|> |
fdcc3a124c13c5a615c0a3d65f2765ddbd3d8d29054b6f4e1a63344336712ced | def __init__(self, x=None, y=None, z=None):
'\n Initializes a 3D point object wit x,y and z coordinates.\n\n Args:\n x (integer, optional): X coordinate. Defaults to randint(0, 50).\n y (integer, optional): Y coordinate. Defaults to randint(0, 50).\n z (integer, optional):Z. Defaults to randint(0, 50).\n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
if (z is None):
z = randint(0, 50)
self.x = x
self.y = y
self.z = z | Initializes a 3D point object wit x,y and z coordinates.
Args:
x (integer, optional): X coordinate. Defaults to randint(0, 50).
y (integer, optional): Y coordinate. Defaults to randint(0, 50).
z (integer, optional):Z. Defaults to randint(0, 50). | Helper/point_cloud.py | __init__ | Baumwollboebele/python_algorithms | 0 | python | def __init__(self, x=None, y=None, z=None):
'\n Initializes a 3D point object wit x,y and z coordinates.\n\n Args:\n x (integer, optional): X coordinate. Defaults to randint(0, 50).\n y (integer, optional): Y coordinate. Defaults to randint(0, 50).\n z (integer, optional):Z. Defaults to randint(0, 50).\n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
if (z is None):
z = randint(0, 50)
self.x = x
self.y = y
self.z = z | def __init__(self, x=None, y=None, z=None):
'\n Initializes a 3D point object wit x,y and z coordinates.\n\n Args:\n x (integer, optional): X coordinate. Defaults to randint(0, 50).\n y (integer, optional): Y coordinate. Defaults to randint(0, 50).\n z (integer, optional):Z. Defaults to randint(0, 50).\n '
if (x is None):
x = randint(0, 50)
if (y is None):
y = randint(0, 50)
if (z is None):
z = randint(0, 50)
self.x = x
self.y = y
self.z = z<|docstring|>Initializes a 3D point object wit x,y and z coordinates.
Args:
x (integer, optional): X coordinate. Defaults to randint(0, 50).
y (integer, optional): Y coordinate. Defaults to randint(0, 50).
z (integer, optional):Z. Defaults to randint(0, 50).<|endoftext|> |
3a315d72401d7644107f9d3057d48cfb8f7b70f1fe56c0bbd489519018a84086 | def get_x_values(self):
'\n Returns x values of all points.\n\n Returns:\n list: x-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.x)
return values | Returns x values of all points.
Returns:
list: x-axis values | Helper/point_cloud.py | get_x_values | Baumwollboebele/python_algorithms | 0 | python | def get_x_values(self):
'\n Returns x values of all points.\n\n Returns:\n list: x-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.x)
return values | def get_x_values(self):
'\n Returns x values of all points.\n\n Returns:\n list: x-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.x)
return values<|docstring|>Returns x values of all points.
Returns:
list: x-axis values<|endoftext|> |
31f5839ef34a20c3897f4019c9250c2177e8025baa7b782bf77ae12889344af7 | def get_y_values(self):
'\n Returns y values of all points.\n\n Returns:\n list: y-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.y)
return values | Returns y values of all points.
Returns:
list: y-axis values | Helper/point_cloud.py | get_y_values | Baumwollboebele/python_algorithms | 0 | python | def get_y_values(self):
'\n Returns y values of all points.\n\n Returns:\n list: y-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.y)
return values | def get_y_values(self):
'\n Returns y values of all points.\n\n Returns:\n list: y-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.y)
return values<|docstring|>Returns y values of all points.
Returns:
list: y-axis values<|endoftext|> |
4cc8c82310957aa5495490594596489332419d9bd96aa43393fe7e13084cbb31 | def __init__(self, size):
'\n Initializes a random Point cloud within a 2D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point2D()) | Initializes a random Point cloud within a 2D coordinate system.
Args:
size (integer): number of points | Helper/point_cloud.py | __init__ | Baumwollboebele/python_algorithms | 0 | python | def __init__(self, size):
'\n Initializes a random Point cloud within a 2D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point2D()) | def __init__(self, size):
'\n Initializes a random Point cloud within a 2D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point2D())<|docstring|>Initializes a random Point cloud within a 2D coordinate system.
Args:
size (integer): number of points<|endoftext|> |
485c6b1e492208359aef140e4c023f69e36ef39a9bd8f6ed8955f8f5c73221d3 | def rotate(self, rotation):
'\n Rotation of the point cloud around the z axis\n with the angle [rotation].\n\n Args:\n rotation (integer | float): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
x = point.x
y = point.y
point.x = round(((x * cos(rotation)) - (y * sin(rotation))), 2)
point.y = round(((y * cos(rotation)) + (x * sin(rotation))), 2)
return | Rotation of the point cloud around the z axis
with the angle [rotation].
Args:
rotation (integer | float): angle of rotation | Helper/point_cloud.py | rotate | Baumwollboebele/python_algorithms | 0 | python | def rotate(self, rotation):
'\n Rotation of the point cloud around the z axis\n with the angle [rotation].\n\n Args:\n rotation (integer | float): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
x = point.x
y = point.y
point.x = round(((x * cos(rotation)) - (y * sin(rotation))), 2)
point.y = round(((y * cos(rotation)) + (x * sin(rotation))), 2)
return | def rotate(self, rotation):
'\n Rotation of the point cloud around the z axis\n with the angle [rotation].\n\n Args:\n rotation (integer | float): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
x = point.x
y = point.y
point.x = round(((x * cos(rotation)) - (y * sin(rotation))), 2)
point.y = round(((y * cos(rotation)) + (x * sin(rotation))), 2)
return<|docstring|>Rotation of the point cloud around the z axis
with the angle [rotation].
Args:
rotation (integer | float): angle of rotation<|endoftext|> |
7bbba5ff1f86f83d8fe768b3f190d0fe62dc8d18db7fe868dfcdd8551f78f90e | def translate(self, x, y):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float ): translation by y\n '
for point in self.point_cloud:
point.x += x
point.y += y
return | Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float ): translation by y | Helper/point_cloud.py | translate | Baumwollboebele/python_algorithms | 0 | python | def translate(self, x, y):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float ): translation by y\n '
for point in self.point_cloud:
point.x += x
point.y += y
return | def translate(self, x, y):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float ): translation by y\n '
for point in self.point_cloud:
point.x += x
point.y += y
return<|docstring|>Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float ): translation by y<|endoftext|> |
d1e0f1b503c23e8e339dbfd7cff7ee1b81d6d574149964391219940797a94bc2 | def random_rotation(self):
'\n Applies a random rotation to the point cloud.\n '
self.rotate(randint(0, 360))
return | Applies a random rotation to the point cloud. | Helper/point_cloud.py | random_rotation | Baumwollboebele/python_algorithms | 0 | python | def random_rotation(self):
'\n \n '
self.rotate(randint(0, 360))
return | def random_rotation(self):
'\n \n '
self.rotate(randint(0, 360))
return<|docstring|>Applies a random rotation to the point cloud.<|endoftext|> |
01de7eaeaa30e6a0aba8257d2e1cde5042d027cb85661757cfeaefbe4d21ebcc | def random_translation(self):
'\n Applies a random translation to the point cloud.\n '
self.translate(randint(0, 5), randint(0, 5))
return | Applies a random translation to the point cloud. | Helper/point_cloud.py | random_translation | Baumwollboebele/python_algorithms | 0 | python | def random_translation(self):
'\n \n '
self.translate(randint(0, 5), randint(0, 5))
return | def random_translation(self):
'\n \n '
self.translate(randint(0, 5), randint(0, 5))
return<|docstring|>Applies a random translation to the point cloud.<|endoftext|> |
0ee30d48ed409bee71723a2bad321c8e25b37ae41e8be1ecd814aa801c82c439 | def randomize(self):
'\n Applize random translation and rotation to the point cloud.\n '
self.random_translation()
self.random_rotation()
return | Applize random translation and rotation to the point cloud. | Helper/point_cloud.py | randomize | Baumwollboebele/python_algorithms | 0 | python | def randomize(self):
'\n \n '
self.random_translation()
self.random_rotation()
return | def randomize(self):
'\n \n '
self.random_translation()
self.random_rotation()
return<|docstring|>Applize random translation and rotation to the point cloud.<|endoftext|> |
fdc283f1c1e3b0d6937e7f997fa46e3cdb8b1bf6445eeb2d1803a71a8bfa0503 | def __init__(self, size):
'\n Initializes a random Point cloud within a 3D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point3D()) | Initializes a random Point cloud within a 3D coordinate system.
Args:
size (integer): number of points | Helper/point_cloud.py | __init__ | Baumwollboebele/python_algorithms | 0 | python | def __init__(self, size):
'\n Initializes a random Point cloud within a 3D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point3D()) | def __init__(self, size):
'\n Initializes a random Point cloud within a 3D coordinate system.\n\n Args:\n size (integer): number of points\n '
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point3D())<|docstring|>Initializes a random Point cloud within a 3D coordinate system.
Args:
size (integer): number of points<|endoftext|> |
93793a92cf49d5467f9f5670d0c24a3093a1c9cda968a2eeeeaffe9a8e6ff39f | def get_z_values(self):
'\n Returns z values of all points.\n\n Returns:\n list: z-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.z)
return values | Returns z values of all points.
Returns:
list: z-axis values | Helper/point_cloud.py | get_z_values | Baumwollboebele/python_algorithms | 0 | python | def get_z_values(self):
'\n Returns z values of all points.\n\n Returns:\n list: z-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.z)
return values | def get_z_values(self):
'\n Returns z values of all points.\n\n Returns:\n list: z-axis values\n '
values = []
for point in self.point_cloud:
values.append(point.z)
return values<|docstring|>Returns z values of all points.
Returns:
list: z-axis values<|endoftext|> |
27dbce03a9e884a96fe1de6dc9c655154292f4ef659cc417f04cdc7129cfd210 | def rotate_x_axis(self, rotation):
'\n Rotation of the Point Cloud around the X-Axis.\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.y = round(((point.y * cos(rotation)) - (point.z * sin(rotation))), 2)
point.z = round(((point.y * sin(rotation)) + (point.z * cos(rotation))), 2)
return | Rotation of the Point Cloud around the X-Axis.
Args:
rotation (integer): angle of rotation | Helper/point_cloud.py | rotate_x_axis | Baumwollboebele/python_algorithms | 0 | python | def rotate_x_axis(self, rotation):
'\n Rotation of the Point Cloud around the X-Axis.\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.y = round(((point.y * cos(rotation)) - (point.z * sin(rotation))), 2)
point.z = round(((point.y * sin(rotation)) + (point.z * cos(rotation))), 2)
return | def rotate_x_axis(self, rotation):
'\n Rotation of the Point Cloud around the X-Axis.\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.y = round(((point.y * cos(rotation)) - (point.z * sin(rotation))), 2)
point.z = round(((point.y * sin(rotation)) + (point.z * cos(rotation))), 2)
return<|docstring|>Rotation of the Point Cloud around the X-Axis.
Args:
rotation (integer): angle of rotation<|endoftext|> |
dd55218fd5fa942a2d3d4ca2c0aae4ecd6dd2f6004c074ca4d537c839a4c09be | def rotate_y_axis(self, rotation):
'\n Rotation of the Point Cloud around the Y-Axis\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.x = round(((point.x * cos(rotation)) + (point.z * sin(rotation))), 2)
point.z = round((((- point.x) * sin(rotation)) + (point.z * cos(rotation))), 2)
return | Rotation of the Point Cloud around the Y-Axis
Args:
rotation (integer): angle of rotation | Helper/point_cloud.py | rotate_y_axis | Baumwollboebele/python_algorithms | 0 | python | def rotate_y_axis(self, rotation):
'\n Rotation of the Point Cloud around the Y-Axis\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.x = round(((point.x * cos(rotation)) + (point.z * sin(rotation))), 2)
point.z = round((((- point.x) * sin(rotation)) + (point.z * cos(rotation))), 2)
return | def rotate_y_axis(self, rotation):
'\n Rotation of the Point Cloud around the Y-Axis\n\n Args:\n rotation (integer): angle of rotation\n '
rotation = radians(rotation)
for point in self.point_cloud:
point.x = round(((point.x * cos(rotation)) + (point.z * sin(rotation))), 2)
point.z = round((((- point.x) * sin(rotation)) + (point.z * cos(rotation))), 2)
return<|docstring|>Rotation of the Point Cloud around the Y-Axis
Args:
rotation (integer): angle of rotation<|endoftext|> |
2ef7abccf405e92811747b92a6ecb5171290c7fe63ba2c444651b202e06a3b5a | def translate(self, x, y, z):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float): translation by y\n z (integer | float): translation by z\n '
for point in self.point_cloud:
point.x += x
point.y += y
point.z += z
return | Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float): translation by y
z (integer | float): translation by z | Helper/point_cloud.py | translate | Baumwollboebele/python_algorithms | 0 | python | def translate(self, x, y, z):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float): translation by y\n z (integer | float): translation by z\n '
for point in self.point_cloud:
point.x += x
point.y += y
point.z += z
return | def translate(self, x, y, z):
'\n Translate the coordinates of the point cloud by x and y.\n\n Args:\n x (integer | float): translation by x\n y (integer | float): translation by y\n z (integer | float): translation by z\n '
for point in self.point_cloud:
point.x += x
point.y += y
point.z += z
return<|docstring|>Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float): translation by y
z (integer | float): translation by z<|endoftext|> |
86cc72476ede10071746620b26e05a62c71327f7ab5c92899780e75a1e509ef7 | def random_rotation(self):
'\n Applies a random rotation to the point cloud.\n '
self.rotate_x_axis(randint(0, 360))
self.rotate_y_axis(randint(0, 360))
self.rotate_z_axis(randint(0, 360))
return | Applies a random rotation to the point cloud. | Helper/point_cloud.py | random_rotation | Baumwollboebele/python_algorithms | 0 | python | def random_rotation(self):
'\n \n '
self.rotate_x_axis(randint(0, 360))
self.rotate_y_axis(randint(0, 360))
self.rotate_z_axis(randint(0, 360))
return | def random_rotation(self):
'\n \n '
self.rotate_x_axis(randint(0, 360))
self.rotate_y_axis(randint(0, 360))
self.rotate_z_axis(randint(0, 360))
return<|docstring|>Applies a random rotation to the point cloud.<|endoftext|> |
43e1350dbe1d8d3ce9a1a61bb760d9ef64574194f81bb07b485c0262a944762e | def random_translation(self):
'\n Applies a random translation to the point cloud.\n '
self.translate(randint(0, 5), randint(0, 5), randint(0, 5))
return | Applies a random translation to the point cloud. | Helper/point_cloud.py | random_translation | Baumwollboebele/python_algorithms | 0 | python | def random_translation(self):
'\n \n '
self.translate(randint(0, 5), randint(0, 5), randint(0, 5))
return | def random_translation(self):
'\n \n '
self.translate(randint(0, 5), randint(0, 5), randint(0, 5))
return<|docstring|>Applies a random translation to the point cloud.<|endoftext|> |
0ee30d48ed409bee71723a2bad321c8e25b37ae41e8be1ecd814aa801c82c439 | def randomize(self):
'\n Applize random translation and rotation to the point cloud.\n '
self.random_translation()
self.random_rotation()
return | Applize random translation and rotation to the point cloud. | Helper/point_cloud.py | randomize | Baumwollboebele/python_algorithms | 0 | python | def randomize(self):
'\n \n '
self.random_translation()
self.random_rotation()
return | def randomize(self):
'\n \n '
self.random_translation()
self.random_rotation()
return<|docstring|>Applize random translation and rotation to the point cloud.<|endoftext|> |
4a141ebe38c4c6c2797af3540bb4a77fa46f686964a8a4c2607e38a90bad9296 | def update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, multi, lamu):
'\n users can provide constraints for the value range of elements in connectivity matrices, A and B. This\n can be easily done by modifying "update" functions. For example, if the negative diagonal value is required,\n we can add additional constraints on that. \n The main algorithm, updating parameter for a defined problem\n\n Parameters\n -----------\n file_name_dir: dir of problem folder\n precomp_dir: dir of precomputed data\n pickele_file: file name which we use to save estimations\n lamu: list = [lam, mu, lam_1], in our paper, mu is set to be zero. lam*mu is the coefficient \n for l2 norm penalty of A, B, C\n tol, max_iter:\n multi: bool variable, Default True\n '
configpara = Modelpara((precomp_dir + 'precomp.pkl'))
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
P1 = configpara.P1
P2 = configpara.P2
P3 = configpara.P3
P4 = configpara.P4
P5 = configpara.P5
P6 = configpara.P6
P7 = configpara.P7
P8 = configpara.P8
P9 = configpara.P9
P10 = configpara.P10
P11 = configpara.P11
P12 = configpara.P12
P13 = configpara.P13
P14 = configpara.P14
P15 = configpara.P15
Q1 = configpara.Q1
Q2 = configpara.Q2
Q3 = configpara.Q3
Q4 = configpara.Q4
Omega = configpara.Omega
y = config.y
n_area = config.n_area
p = configpara.p
t_i = configpara.t_i
l_t = configpara.l_t
J = configpara.J
t_T = configpara.t_T
dt = configpara.dt
row_n = configpara.row_n
fold = configpara.fold
def gr(gamma, A, B, C, D, lam, mu, lam_1):
g = np.zeros((n_area, p))
g = ((g + np.dot(gamma, P1)) - np.dot(np.dot(np.transpose(A), gamma), np.transpose(P2)))
g = ((g - np.dot(np.dot(A, gamma), P2)) + np.dot(np.dot(np.dot(np.transpose(A), A), gamma), P5))
tmp_1 = 0
tmp_2 = 0
for j in range(J):
tmp_1 = (tmp_1 + np.dot(np.dot(B[(:, :, j)], gamma), P3[(:, :, j)]))
tmp_2 = (tmp_2 + np.dot(np.dot(np.dot(np.transpose(A), B[(:, :, j)]), gamma), P6[(:, :, j)]))
g = (g - (tmp_1 - tmp_2))
g = ((g - np.dot(C, P4)) + np.dot(np.dot(np.transpose(A), C), P7))
g = ((g - np.dot(D, P8)) + np.dot(np.dot(np.transpose(A), D), P9))
tmp = 0
for l in range(J):
tmp_1 = 0
for j in range(J):
tmp_1 = np.dot(np.dot(B[(:, :, j)], gamma), P10[(:, :, j, l)])
tmp = (tmp - np.dot(np.transpose(B[(:, :, l)]), ((((np.dot(gamma, np.transpose(P3[(:, :, l)])) - np.dot(np.dot(A, gamma), np.transpose(P6[(:, :, l)]))) - tmp_1) - np.dot(C, P13[(:, :, l)])) - np.dot(D, P11[(l, :)].reshape((1, (- 1)))))))
g += tmp
g = ((g * 2) * lam)
tmp1 = np.zeros((n_area, 1))
tmp2 = np.zeros((n_area, J))
for m in range(n_area):
tmp1[(m, 0)] = (np.sum(abs(A[(:, m)])) / (np.dot(np.dot(gamma[(m, :)], P5), gamma[(m,)]) ** 0.5))
for j in range(J):
tmp2[(m, j)] = (np.sum(abs(B[(:, m, j)])) / (np.dot(np.dot(gamma[(m, :)], P10[(:, :, j, j)]), gamma[(m, :)]) ** 0.5))
g = (g + (((lam * mu) * np.dot(gamma, np.transpose(P5))) * tmp1))
for j in range(J):
g = (g + (((lam * mu) * np.dot(gamma, P10[(:, :, j, j)])) * tmp2[(:, j)].reshape(((- 1), 1))))
g = (g + (np.dot((np.dot(gamma, np.transpose(P12)) - y), P12) * 2))
g = (g + ((2 * lam_1) * np.dot(gamma, np.transpose(Omega))))
g[np.isnan(g)] = 0
return g
def cd_thre(tmp, tmp_1, mu):
mu = (mu / 2.0)
if (abs(tmp) > (mu * (tmp_1 ** 0.5))):
return ((np.sign(tmp) * (abs(tmp) - (mu * (tmp_1 ** 0.5)))) / tmp_1)
else:
return 0
def update_A(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, j)], gamma), P6[(:, :, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P5), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P2), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), P5), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P7), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P9[(0, :)]))) + (A[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_B(m, n, j, gamma, A, B, C, D, mu):
tmp_0 = 0
for l in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, l)], gamma), P10[(:, :, l, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P10[(:, :, j, j)]), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P3[(:, :, j)]), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), np.transpose(P6[(:, :, j)])), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P13[(:, :, j)]), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P11[(j, :)]))) + (B[(m, n, j)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_C(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(m, :, j)], gamma), P13[(n, :, j)]))
tmp_1 = P14[(n, n)]
tmp = (((((np.dot(gamma[(m, :)], P4[(n, :)]) - np.dot(np.dot(A[(m, :)], gamma), P7[(n, :)])) - tmp_0) - np.dot(C[(m, :)], P14[(n, :)])) - (D[(m, 0)] * P15[(0, n)])) + (C[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_D(gamma, A, B, C):
tmp = (np.dot(gamma, np.transpose(P8)) - np.dot(np.dot(A, gamma), np.transpose(P9)))
for j in range(J):
tmp = (tmp - np.dot(np.dot(B[(:, :, j)], gamma), P11[(j, :)]).reshape(((- 1), 1)))
tmp = (tmp - np.dot(C, np.transpose(P15)))
return ((tmp * 1.0) / t_T)
def likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=False):
e1 = np.sum(((y - np.dot(gamma, np.transpose(P12))) ** 2))
e2 = 0
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(:, :, j)], gamma), Q3[(:, :, j)]))
tmp = ((((np.dot(gamma, Q1) - np.dot(np.dot(A, gamma), Q2)) - tmp_0) - np.dot(C, Q4)) - np.repeat(D, l_t, axis=1))
for m in range(n_area):
e2 = (e2 + simps((tmp[(m, :)] ** 2), t_i))
plt = 0
for k in range(n_area):
w_1k = (np.dot(np.dot(gamma[(k, :)], P5), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(A[(:, k)])) * w_1k))
for j in range(J):
w_2kj = (np.dot(np.dot(gamma[(k, :)], P10[(:, :, j, j)]), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(B[(:, k, j)])) * w_2kj))
for k in range(J):
w_3k = (P14[(k, k)] ** 0.5)
plt = (plt + (np.sum(abs(C[(:, k)])) * w_3k))
plt_1 = 0
for i in range(n_area):
plt_1 = (plt_1 + np.dot(np.dot(gamma[(i, :)], Omega), gamma[(i, :)]))
sum_e = (((e1 + (lam * e2)) + ((lam * mu) * plt)) + (lam_1 * plt_1))
if (p_t == True):
return (e1, e2, plt, plt_1)
return sum_e
def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_2(gamma, mu):
'\n For the case when B = 0\n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_1(gamma, mu):
'\n For the case B = 0 and C = 0\n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0
def str_1(num):
if (num >= 1):
return str(int(num))
num = str(num)
num_1 = ''
for i in range(len(num)):
if (num[i] != '.'):
num_1 = (num_1 + num[i])
return num_1
lam = lamu[0]
mu = lamu[1]
lam_1 = lamu[(- 1)]
A = np.zeros((n_area, n_area))
B = np.zeros((n_area, n_area, J))
C = np.zeros((n_area, J))
D = np.zeros((n_area, 1))
iter = 0
sum_e = (10 ** 6)
gamma = ini_select(y, lam_1)
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
while ((iter < max_iter) and ((abs((sum_e - sum_e_1)) / sum_e_1) > tol)):
stp = 1
while ((stp < 10) and (iter > 1)):
results = gr(gamma, A, B, C, D, lam, mu, lam_1)
n_results = np.sum((results ** 2))
gamma_1 = gamma.copy()
f_t = 1
fixed = likelihood(gamma, A, B, C, D, lam, mu, lam_1)
while (likelihood((gamma - (f_t * results)), A, B, C, D, lam, mu, lam_1) > (fixed - ((0.5 * f_t) * n_results))):
f_t = (0.8 * f_t)
gamma = (gamma - (results * f_t))
stp = (stp + 1)
if (config.B_u == True):
tmp = update_all_3(gamma, mu=0)
A = tmp[(:, 0:n_area)]
for j in range(J):
B[(:, :, j)] = tmp[(:, ((j + 1) * n_area):((j + 2) * n_area))]
C = tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
elif (config.C_u == True):
tmp = update_all_2(gamma, mu=0)
A = tmp[(:, 0:n_area)]
C = tmp[(:, n_area:(n_area + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
else:
tmp = update_all_1(gamma, mu=0)
A = tmp[(:, 0:n_area)]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
sum_e = sum_e_1
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
iter += 1
(e1, e2, plt, plt_1) = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)
if (multi == False):
config.gamma = gamma
config.A = A
config.B = B
config.C = C
config.D = D
config.lamu = lamu
config.e1 = e1
config.e2 = e2
config.plt = plt
config.plt_1 = plt_1
config.t_i = configpara.t_i
pickle_file_1 = (file_name_dir + 'results/result.pkl')
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, configpara.Q2_all), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt ** 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return
else:
pickle_file_1 = ((((((pickle_file + str_1(lam)) + '_') + str_1((mu * lam))) + '_') + str_1(lam_1)) + '.pickle')
f = open(pickle_file_1, 'wb')
save = {'result': [lamu, gamma, A, B, C, D, e1, e2, plt, plt_1]}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return | users can provide constraints for the value range of elements in connectivity matrices, A and B. This
can be easily done by modifying "update" functions. For example, if the negative diagonal value is required,
we can add additional constraints on that.
The main algorithm, updating parameter for a defined problem
Parameters
-----------
file_name_dir: dir of problem folder
precomp_dir: dir of precomputed data
pickele_file: file name which we use to save estimations
lamu: list = [lam, mu, lam_1], in our paper, mu is set to be zero. lam*mu is the coefficient
for l2 norm penalty of A, B, C
tol, max_iter:
multi: bool variable, Default True | cdn/main_computation.py | update_p | xuefeicao/CDN | 11 | python | def update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, multi, lamu):
'\n users can provide constraints for the value range of elements in connectivity matrices, A and B. This\n can be easily done by modifying "update" functions. For example, if the negative diagonal value is required,\n we can add additional constraints on that. \n The main algorithm, updating parameter for a defined problem\n\n Parameters\n -----------\n file_name_dir: dir of problem folder\n precomp_dir: dir of precomputed data\n pickele_file: file name which we use to save estimations\n lamu: list = [lam, mu, lam_1], in our paper, mu is set to be zero. lam*mu is the coefficient \n for l2 norm penalty of A, B, C\n tol, max_iter:\n multi: bool variable, Default True\n '
configpara = Modelpara((precomp_dir + 'precomp.pkl'))
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
P1 = configpara.P1
P2 = configpara.P2
P3 = configpara.P3
P4 = configpara.P4
P5 = configpara.P5
P6 = configpara.P6
P7 = configpara.P7
P8 = configpara.P8
P9 = configpara.P9
P10 = configpara.P10
P11 = configpara.P11
P12 = configpara.P12
P13 = configpara.P13
P14 = configpara.P14
P15 = configpara.P15
Q1 = configpara.Q1
Q2 = configpara.Q2
Q3 = configpara.Q3
Q4 = configpara.Q4
Omega = configpara.Omega
y = config.y
n_area = config.n_area
p = configpara.p
t_i = configpara.t_i
l_t = configpara.l_t
J = configpara.J
t_T = configpara.t_T
dt = configpara.dt
row_n = configpara.row_n
fold = configpara.fold
def gr(gamma, A, B, C, D, lam, mu, lam_1):
g = np.zeros((n_area, p))
g = ((g + np.dot(gamma, P1)) - np.dot(np.dot(np.transpose(A), gamma), np.transpose(P2)))
g = ((g - np.dot(np.dot(A, gamma), P2)) + np.dot(np.dot(np.dot(np.transpose(A), A), gamma), P5))
tmp_1 = 0
tmp_2 = 0
for j in range(J):
tmp_1 = (tmp_1 + np.dot(np.dot(B[(:, :, j)], gamma), P3[(:, :, j)]))
tmp_2 = (tmp_2 + np.dot(np.dot(np.dot(np.transpose(A), B[(:, :, j)]), gamma), P6[(:, :, j)]))
g = (g - (tmp_1 - tmp_2))
g = ((g - np.dot(C, P4)) + np.dot(np.dot(np.transpose(A), C), P7))
g = ((g - np.dot(D, P8)) + np.dot(np.dot(np.transpose(A), D), P9))
tmp = 0
for l in range(J):
tmp_1 = 0
for j in range(J):
tmp_1 = np.dot(np.dot(B[(:, :, j)], gamma), P10[(:, :, j, l)])
tmp = (tmp - np.dot(np.transpose(B[(:, :, l)]), ((((np.dot(gamma, np.transpose(P3[(:, :, l)])) - np.dot(np.dot(A, gamma), np.transpose(P6[(:, :, l)]))) - tmp_1) - np.dot(C, P13[(:, :, l)])) - np.dot(D, P11[(l, :)].reshape((1, (- 1)))))))
g += tmp
g = ((g * 2) * lam)
tmp1 = np.zeros((n_area, 1))
tmp2 = np.zeros((n_area, J))
for m in range(n_area):
tmp1[(m, 0)] = (np.sum(abs(A[(:, m)])) / (np.dot(np.dot(gamma[(m, :)], P5), gamma[(m,)]) ** 0.5))
for j in range(J):
tmp2[(m, j)] = (np.sum(abs(B[(:, m, j)])) / (np.dot(np.dot(gamma[(m, :)], P10[(:, :, j, j)]), gamma[(m, :)]) ** 0.5))
g = (g + (((lam * mu) * np.dot(gamma, np.transpose(P5))) * tmp1))
for j in range(J):
g = (g + (((lam * mu) * np.dot(gamma, P10[(:, :, j, j)])) * tmp2[(:, j)].reshape(((- 1), 1))))
g = (g + (np.dot((np.dot(gamma, np.transpose(P12)) - y), P12) * 2))
g = (g + ((2 * lam_1) * np.dot(gamma, np.transpose(Omega))))
g[np.isnan(g)] = 0
return g
def cd_thre(tmp, tmp_1, mu):
mu = (mu / 2.0)
if (abs(tmp) > (mu * (tmp_1 ** 0.5))):
return ((np.sign(tmp) * (abs(tmp) - (mu * (tmp_1 ** 0.5)))) / tmp_1)
else:
return 0
def update_A(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, j)], gamma), P6[(:, :, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P5), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P2), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), P5), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P7), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P9[(0, :)]))) + (A[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_B(m, n, j, gamma, A, B, C, D, mu):
tmp_0 = 0
for l in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, l)], gamma), P10[(:, :, l, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P10[(:, :, j, j)]), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P3[(:, :, j)]), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), np.transpose(P6[(:, :, j)])), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P13[(:, :, j)]), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P11[(j, :)]))) + (B[(m, n, j)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_C(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(m, :, j)], gamma), P13[(n, :, j)]))
tmp_1 = P14[(n, n)]
tmp = (((((np.dot(gamma[(m, :)], P4[(n, :)]) - np.dot(np.dot(A[(m, :)], gamma), P7[(n, :)])) - tmp_0) - np.dot(C[(m, :)], P14[(n, :)])) - (D[(m, 0)] * P15[(0, n)])) + (C[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_D(gamma, A, B, C):
tmp = (np.dot(gamma, np.transpose(P8)) - np.dot(np.dot(A, gamma), np.transpose(P9)))
for j in range(J):
tmp = (tmp - np.dot(np.dot(B[(:, :, j)], gamma), P11[(j, :)]).reshape(((- 1), 1)))
tmp = (tmp - np.dot(C, np.transpose(P15)))
return ((tmp * 1.0) / t_T)
def likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=False):
e1 = np.sum(((y - np.dot(gamma, np.transpose(P12))) ** 2))
e2 = 0
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(:, :, j)], gamma), Q3[(:, :, j)]))
tmp = ((((np.dot(gamma, Q1) - np.dot(np.dot(A, gamma), Q2)) - tmp_0) - np.dot(C, Q4)) - np.repeat(D, l_t, axis=1))
for m in range(n_area):
e2 = (e2 + simps((tmp[(m, :)] ** 2), t_i))
plt = 0
for k in range(n_area):
w_1k = (np.dot(np.dot(gamma[(k, :)], P5), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(A[(:, k)])) * w_1k))
for j in range(J):
w_2kj = (np.dot(np.dot(gamma[(k, :)], P10[(:, :, j, j)]), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(B[(:, k, j)])) * w_2kj))
for k in range(J):
w_3k = (P14[(k, k)] ** 0.5)
plt = (plt + (np.sum(abs(C[(:, k)])) * w_3k))
plt_1 = 0
for i in range(n_area):
plt_1 = (plt_1 + np.dot(np.dot(gamma[(i, :)], Omega), gamma[(i, :)]))
sum_e = (((e1 + (lam * e2)) + ((lam * mu) * plt)) + (lam_1 * plt_1))
if (p_t == True):
return (e1, e2, plt, plt_1)
return sum_e
def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_2(gamma, mu):
'\n For the case when B = 0\n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_1(gamma, mu):
'\n For the case B = 0 and C = 0\n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0
def str_1(num):
if (num >= 1):
return str(int(num))
num = str(num)
num_1 =
for i in range(len(num)):
if (num[i] != '.'):
num_1 = (num_1 + num[i])
return num_1
lam = lamu[0]
mu = lamu[1]
lam_1 = lamu[(- 1)]
A = np.zeros((n_area, n_area))
B = np.zeros((n_area, n_area, J))
C = np.zeros((n_area, J))
D = np.zeros((n_area, 1))
iter = 0
sum_e = (10 ** 6)
gamma = ini_select(y, lam_1)
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
while ((iter < max_iter) and ((abs((sum_e - sum_e_1)) / sum_e_1) > tol)):
stp = 1
while ((stp < 10) and (iter > 1)):
results = gr(gamma, A, B, C, D, lam, mu, lam_1)
n_results = np.sum((results ** 2))
gamma_1 = gamma.copy()
f_t = 1
fixed = likelihood(gamma, A, B, C, D, lam, mu, lam_1)
while (likelihood((gamma - (f_t * results)), A, B, C, D, lam, mu, lam_1) > (fixed - ((0.5 * f_t) * n_results))):
f_t = (0.8 * f_t)
gamma = (gamma - (results * f_t))
stp = (stp + 1)
if (config.B_u == True):
tmp = update_all_3(gamma, mu=0)
A = tmp[(:, 0:n_area)]
for j in range(J):
B[(:, :, j)] = tmp[(:, ((j + 1) * n_area):((j + 2) * n_area))]
C = tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
elif (config.C_u == True):
tmp = update_all_2(gamma, mu=0)
A = tmp[(:, 0:n_area)]
C = tmp[(:, n_area:(n_area + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
else:
tmp = update_all_1(gamma, mu=0)
A = tmp[(:, 0:n_area)]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
sum_e = sum_e_1
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
iter += 1
(e1, e2, plt, plt_1) = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)
if (multi == False):
config.gamma = gamma
config.A = A
config.B = B
config.C = C
config.D = D
config.lamu = lamu
config.e1 = e1
config.e2 = e2
config.plt = plt
config.plt_1 = plt_1
config.t_i = configpara.t_i
pickle_file_1 = (file_name_dir + 'results/result.pkl')
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, configpara.Q2_all), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt ** 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return
else:
pickle_file_1 = ((((((pickle_file + str_1(lam)) + '_') + str_1((mu * lam))) + '_') + str_1(lam_1)) + '.pickle')
f = open(pickle_file_1, 'wb')
save = {'result': [lamu, gamma, A, B, C, D, e1, e2, plt, plt_1]}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return | def update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, multi, lamu):
'\n users can provide constraints for the value range of elements in connectivity matrices, A and B. This\n can be easily done by modifying "update" functions. For example, if the negative diagonal value is required,\n we can add additional constraints on that. \n The main algorithm, updating parameter for a defined problem\n\n Parameters\n -----------\n file_name_dir: dir of problem folder\n precomp_dir: dir of precomputed data\n pickele_file: file name which we use to save estimations\n lamu: list = [lam, mu, lam_1], in our paper, mu is set to be zero. lam*mu is the coefficient \n for l2 norm penalty of A, B, C\n tol, max_iter:\n multi: bool variable, Default True\n '
configpara = Modelpara((precomp_dir + 'precomp.pkl'))
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
P1 = configpara.P1
P2 = configpara.P2
P3 = configpara.P3
P4 = configpara.P4
P5 = configpara.P5
P6 = configpara.P6
P7 = configpara.P7
P8 = configpara.P8
P9 = configpara.P9
P10 = configpara.P10
P11 = configpara.P11
P12 = configpara.P12
P13 = configpara.P13
P14 = configpara.P14
P15 = configpara.P15
Q1 = configpara.Q1
Q2 = configpara.Q2
Q3 = configpara.Q3
Q4 = configpara.Q4
Omega = configpara.Omega
y = config.y
n_area = config.n_area
p = configpara.p
t_i = configpara.t_i
l_t = configpara.l_t
J = configpara.J
t_T = configpara.t_T
dt = configpara.dt
row_n = configpara.row_n
fold = configpara.fold
def gr(gamma, A, B, C, D, lam, mu, lam_1):
g = np.zeros((n_area, p))
g = ((g + np.dot(gamma, P1)) - np.dot(np.dot(np.transpose(A), gamma), np.transpose(P2)))
g = ((g - np.dot(np.dot(A, gamma), P2)) + np.dot(np.dot(np.dot(np.transpose(A), A), gamma), P5))
tmp_1 = 0
tmp_2 = 0
for j in range(J):
tmp_1 = (tmp_1 + np.dot(np.dot(B[(:, :, j)], gamma), P3[(:, :, j)]))
tmp_2 = (tmp_2 + np.dot(np.dot(np.dot(np.transpose(A), B[(:, :, j)]), gamma), P6[(:, :, j)]))
g = (g - (tmp_1 - tmp_2))
g = ((g - np.dot(C, P4)) + np.dot(np.dot(np.transpose(A), C), P7))
g = ((g - np.dot(D, P8)) + np.dot(np.dot(np.transpose(A), D), P9))
tmp = 0
for l in range(J):
tmp_1 = 0
for j in range(J):
tmp_1 = np.dot(np.dot(B[(:, :, j)], gamma), P10[(:, :, j, l)])
tmp = (tmp - np.dot(np.transpose(B[(:, :, l)]), ((((np.dot(gamma, np.transpose(P3[(:, :, l)])) - np.dot(np.dot(A, gamma), np.transpose(P6[(:, :, l)]))) - tmp_1) - np.dot(C, P13[(:, :, l)])) - np.dot(D, P11[(l, :)].reshape((1, (- 1)))))))
g += tmp
g = ((g * 2) * lam)
tmp1 = np.zeros((n_area, 1))
tmp2 = np.zeros((n_area, J))
for m in range(n_area):
tmp1[(m, 0)] = (np.sum(abs(A[(:, m)])) / (np.dot(np.dot(gamma[(m, :)], P5), gamma[(m,)]) ** 0.5))
for j in range(J):
tmp2[(m, j)] = (np.sum(abs(B[(:, m, j)])) / (np.dot(np.dot(gamma[(m, :)], P10[(:, :, j, j)]), gamma[(m, :)]) ** 0.5))
g = (g + (((lam * mu) * np.dot(gamma, np.transpose(P5))) * tmp1))
for j in range(J):
g = (g + (((lam * mu) * np.dot(gamma, P10[(:, :, j, j)])) * tmp2[(:, j)].reshape(((- 1), 1))))
g = (g + (np.dot((np.dot(gamma, np.transpose(P12)) - y), P12) * 2))
g = (g + ((2 * lam_1) * np.dot(gamma, np.transpose(Omega))))
g[np.isnan(g)] = 0
return g
def cd_thre(tmp, tmp_1, mu):
mu = (mu / 2.0)
if (abs(tmp) > (mu * (tmp_1 ** 0.5))):
return ((np.sign(tmp) * (abs(tmp) - (mu * (tmp_1 ** 0.5)))) / tmp_1)
else:
return 0
def update_A(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, j)], gamma), P6[(:, :, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P5), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P2), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), P5), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P7), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P9[(0, :)]))) + (A[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_B(m, n, j, gamma, A, B, C, D, mu):
tmp_0 = 0
for l in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(np.dot(B[(m, :, l)], gamma), P10[(:, :, l, j)]), gamma[(n, :)]))
tmp_1 = np.dot(np.dot(gamma[(n, :)], P10[(:, :, j, j)]), gamma[(n, :)])
tmp = (((((np.dot(np.dot(gamma[(n, :)], P3[(:, :, j)]), gamma[(m, :)]) - np.dot(np.dot(np.dot(A[(m, :)], gamma), np.transpose(P6[(:, :, j)])), gamma[(n, :)])) - tmp_0) - np.dot(np.dot(C[(m, :)], P13[(:, :, j)]), gamma[(n, :)])) - (D[(m, 0)] * np.dot(gamma[(n, :)], P11[(j, :)]))) + (B[(m, n, j)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_C(m, n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(m, :, j)], gamma), P13[(n, :, j)]))
tmp_1 = P14[(n, n)]
tmp = (((((np.dot(gamma[(m, :)], P4[(n, :)]) - np.dot(np.dot(A[(m, :)], gamma), P7[(n, :)])) - tmp_0) - np.dot(C[(m, :)], P14[(n, :)])) - (D[(m, 0)] * P15[(0, n)])) + (C[(m, n)] * tmp_1))
return cd_thre(tmp, tmp_1, mu)
def update_D(gamma, A, B, C):
tmp = (np.dot(gamma, np.transpose(P8)) - np.dot(np.dot(A, gamma), np.transpose(P9)))
for j in range(J):
tmp = (tmp - np.dot(np.dot(B[(:, :, j)], gamma), P11[(j, :)]).reshape(((- 1), 1)))
tmp = (tmp - np.dot(C, np.transpose(P15)))
return ((tmp * 1.0) / t_T)
def likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=False):
e1 = np.sum(((y - np.dot(gamma, np.transpose(P12))) ** 2))
e2 = 0
tmp_0 = 0
for j in range(J):
tmp_0 = (tmp_0 + np.dot(np.dot(B[(:, :, j)], gamma), Q3[(:, :, j)]))
tmp = ((((np.dot(gamma, Q1) - np.dot(np.dot(A, gamma), Q2)) - tmp_0) - np.dot(C, Q4)) - np.repeat(D, l_t, axis=1))
for m in range(n_area):
e2 = (e2 + simps((tmp[(m, :)] ** 2), t_i))
plt = 0
for k in range(n_area):
w_1k = (np.dot(np.dot(gamma[(k, :)], P5), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(A[(:, k)])) * w_1k))
for j in range(J):
w_2kj = (np.dot(np.dot(gamma[(k, :)], P10[(:, :, j, j)]), gamma[(k, :)]) ** 0.5)
plt = (plt + (np.sum(abs(B[(:, k, j)])) * w_2kj))
for k in range(J):
w_3k = (P14[(k, k)] ** 0.5)
plt = (plt + (np.sum(abs(C[(:, k)])) * w_3k))
plt_1 = 0
for i in range(n_area):
plt_1 = (plt_1 + np.dot(np.dot(gamma[(i, :)], Omega), gamma[(i, :)]))
sum_e = (((e1 + (lam * e2)) + ((lam * mu) * plt)) + (lam_1 * plt_1))
if (p_t == True):
return (e1, e2, plt, plt_1)
return sum_e
def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_2(gamma, mu):
'\n For the case when B = 0\n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def update_all_1(gamma, mu):
'\n For the case B = 0 and C = 0\n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0
def str_1(num):
if (num >= 1):
return str(int(num))
num = str(num)
num_1 =
for i in range(len(num)):
if (num[i] != '.'):
num_1 = (num_1 + num[i])
return num_1
lam = lamu[0]
mu = lamu[1]
lam_1 = lamu[(- 1)]
A = np.zeros((n_area, n_area))
B = np.zeros((n_area, n_area, J))
C = np.zeros((n_area, J))
D = np.zeros((n_area, 1))
iter = 0
sum_e = (10 ** 6)
gamma = ini_select(y, lam_1)
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
while ((iter < max_iter) and ((abs((sum_e - sum_e_1)) / sum_e_1) > tol)):
stp = 1
while ((stp < 10) and (iter > 1)):
results = gr(gamma, A, B, C, D, lam, mu, lam_1)
n_results = np.sum((results ** 2))
gamma_1 = gamma.copy()
f_t = 1
fixed = likelihood(gamma, A, B, C, D, lam, mu, lam_1)
while (likelihood((gamma - (f_t * results)), A, B, C, D, lam, mu, lam_1) > (fixed - ((0.5 * f_t) * n_results))):
f_t = (0.8 * f_t)
gamma = (gamma - (results * f_t))
stp = (stp + 1)
if (config.B_u == True):
tmp = update_all_3(gamma, mu=0)
A = tmp[(:, 0:n_area)]
for j in range(J):
B[(:, :, j)] = tmp[(:, ((j + 1) * n_area):((j + 2) * n_area))]
C = tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
elif (config.C_u == True):
tmp = update_all_2(gamma, mu=0)
A = tmp[(:, 0:n_area)]
C = tmp[(:, n_area:(n_area + J))]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
else:
tmp = update_all_1(gamma, mu=0)
A = tmp[(:, 0:n_area)]
if (config.D_u == True):
D = tmp[(:, (- 1))].reshape(((- 1), 1))
sum_e = sum_e_1
sum_e_1 = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)[1]
iter += 1
(e1, e2, plt, plt_1) = likelihood(gamma, A, B, C, D, lam, mu, lam_1, p_t=True)
if (multi == False):
config.gamma = gamma
config.A = A
config.B = B
config.C = C
config.D = D
config.lamu = lamu
config.e1 = e1
config.e2 = e2
config.plt = plt
config.plt_1 = plt_1
config.t_i = configpara.t_i
pickle_file_1 = (file_name_dir + 'results/result.pkl')
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, configpara.Q2_all), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt ** 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return
else:
pickle_file_1 = ((((((pickle_file + str_1(lam)) + '_') + str_1((mu * lam))) + '_') + str_1(lam_1)) + '.pickle')
f = open(pickle_file_1, 'wb')
save = {'result': [lamu, gamma, A, B, C, D, e1, e2, plt, plt_1]}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return<|docstring|>users can provide constraints for the value range of elements in connectivity matrices, A and B. This
can be easily done by modifying "update" functions. For example, if the negative diagonal value is required,
we can add additional constraints on that.
The main algorithm, updating parameter for a defined problem
Parameters
-----------
file_name_dir: dir of problem folder
precomp_dir: dir of precomputed data
pickele_file: file name which we use to save estimations
lamu: list = [lam, mu, lam_1], in our paper, mu is set to be zero. lam*mu is the coefficient
for l2 norm penalty of A, B, C
tol, max_iter:
multi: bool variable, Default True<|endoftext|> |
acc7755e5321bfa755570677c507ebf0827413a9d221fc65198bdae58ce5194d | def select_lamu(lam, mu, lam_1, file_name_dir, pickle_file, precomp_dir, val_data_dir=None, val_precomp_dir=None, num_cores=1, tol=0.01, max_iter=100):
'\n wrapper for selecting the tuning parameters of one subject\n See function update_p for details of variables meaning\n\n Parameters\n -----------\n num_cores : int, allow multi-processing, default None\n\n Returns\n -----------\n An instance of Modelconfig, including all summaries of estimation for one subject\n '
para = list()
for i in range(len(lam)):
for j in range(len(mu)):
for k in range(len(lam_1)):
para.append((lam[i], mu[j], lam_1[k]))
if (len(para) >= 1):
if (num_cores > 1):
pool = mp.Pool(processes=min(len(para), num_cores))
print('begin multiprocessing with {0} cores'.format(num_cores))
update_p_1 = partial(update_p, file_name_dir, precomp_dir, pickle_file, tol, max_iter, True)
pool.map(update_p_1, para)
pool.close()
pool.join()
else:
for i in range(len(para)):
update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, True, para[i])
results = list()
file_config = glob.glob((pickle_file + '*.pickle'))
for i in range(len(file_config)):
f = open(file_config[i], 'rb')
if six.PY2:
save = pkl.load(f)
else:
save = pkl.load(f, encoding='latin1')
results.append(save['result'])
pickle_file_1 = (file_name_dir + 'results/result.pkl')
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
if ((not val_data_dir) or (not val_precomp_dir)):
val_data_dir = precomp_dir
val_precomp_dir = precomp_dir
configpara = Modelpara((val_precomp_dir + 'precomp.pkl'))
with open((val_data_dir + 'observed.pkl'), 'rb') as f:
if six.PY2:
y = pkl.load(f)['y']
else:
y = pkl.load(f, encoding='latin1')['y']
if (len(results) > 1):
(ind, _) = cross_validation(y, configpara, results)
else:
ind = 0
config.t_i = configpara.t_i
config.lamu = results[ind][0]
config.A = results[ind][2]
config.B = results[ind][3]
config.C = results[ind][4]
config.D = results[ind][5]
config.gamma = results[ind][1]
config.e1 = results[ind][6]
config.e2 = results[ind][7]
config.plt = results[ind][8]
config.plt_1 = results[ind][9]
Q2 = configpara.Q2_all
fold = configpara.fold
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, Q2[(:, 0:(Q2.shape[1] + 1):int((1 / fold)))]), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(configpara.P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt * 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return config | wrapper for selecting the tuning parameters of one subject
See function update_p for details of variables meaning
Parameters
-----------
num_cores : int, allow multi-processing, default None
Returns
-----------
An instance of Modelconfig, including all summaries of estimation for one subject | cdn/main_computation.py | select_lamu | xuefeicao/CDN | 11 | python | def select_lamu(lam, mu, lam_1, file_name_dir, pickle_file, precomp_dir, val_data_dir=None, val_precomp_dir=None, num_cores=1, tol=0.01, max_iter=100):
'\n wrapper for selecting the tuning parameters of one subject\n See function update_p for details of variables meaning\n\n Parameters\n -----------\n num_cores : int, allow multi-processing, default None\n\n Returns\n -----------\n An instance of Modelconfig, including all summaries of estimation for one subject\n '
para = list()
for i in range(len(lam)):
for j in range(len(mu)):
for k in range(len(lam_1)):
para.append((lam[i], mu[j], lam_1[k]))
if (len(para) >= 1):
if (num_cores > 1):
pool = mp.Pool(processes=min(len(para), num_cores))
print('begin multiprocessing with {0} cores'.format(num_cores))
update_p_1 = partial(update_p, file_name_dir, precomp_dir, pickle_file, tol, max_iter, True)
pool.map(update_p_1, para)
pool.close()
pool.join()
else:
for i in range(len(para)):
update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, True, para[i])
results = list()
file_config = glob.glob((pickle_file + '*.pickle'))
for i in range(len(file_config)):
f = open(file_config[i], 'rb')
if six.PY2:
save = pkl.load(f)
else:
save = pkl.load(f, encoding='latin1')
results.append(save['result'])
pickle_file_1 = (file_name_dir + 'results/result.pkl')
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
if ((not val_data_dir) or (not val_precomp_dir)):
val_data_dir = precomp_dir
val_precomp_dir = precomp_dir
configpara = Modelpara((val_precomp_dir + 'precomp.pkl'))
with open((val_data_dir + 'observed.pkl'), 'rb') as f:
if six.PY2:
y = pkl.load(f)['y']
else:
y = pkl.load(f, encoding='latin1')['y']
if (len(results) > 1):
(ind, _) = cross_validation(y, configpara, results)
else:
ind = 0
config.t_i = configpara.t_i
config.lamu = results[ind][0]
config.A = results[ind][2]
config.B = results[ind][3]
config.C = results[ind][4]
config.D = results[ind][5]
config.gamma = results[ind][1]
config.e1 = results[ind][6]
config.e2 = results[ind][7]
config.plt = results[ind][8]
config.plt_1 = results[ind][9]
Q2 = configpara.Q2_all
fold = configpara.fold
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, Q2[(:, 0:(Q2.shape[1] + 1):int((1 / fold)))]), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(configpara.P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt * 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return config | def select_lamu(lam, mu, lam_1, file_name_dir, pickle_file, precomp_dir, val_data_dir=None, val_precomp_dir=None, num_cores=1, tol=0.01, max_iter=100):
'\n wrapper for selecting the tuning parameters of one subject\n See function update_p for details of variables meaning\n\n Parameters\n -----------\n num_cores : int, allow multi-processing, default None\n\n Returns\n -----------\n An instance of Modelconfig, including all summaries of estimation for one subject\n '
para = list()
for i in range(len(lam)):
for j in range(len(mu)):
for k in range(len(lam_1)):
para.append((lam[i], mu[j], lam_1[k]))
if (len(para) >= 1):
if (num_cores > 1):
pool = mp.Pool(processes=min(len(para), num_cores))
print('begin multiprocessing with {0} cores'.format(num_cores))
update_p_1 = partial(update_p, file_name_dir, precomp_dir, pickle_file, tol, max_iter, True)
pool.map(update_p_1, para)
pool.close()
pool.join()
else:
for i in range(len(para)):
update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, True, para[i])
results = list()
file_config = glob.glob((pickle_file + '*.pickle'))
for i in range(len(file_config)):
f = open(file_config[i], 'rb')
if six.PY2:
save = pkl.load(f)
else:
save = pkl.load(f, encoding='latin1')
results.append(save['result'])
pickle_file_1 = (file_name_dir + 'results/result.pkl')
config = Modelconfig((file_name_dir + 'data/observed.pkl'))
if ((not val_data_dir) or (not val_precomp_dir)):
val_data_dir = precomp_dir
val_precomp_dir = precomp_dir
configpara = Modelpara((val_precomp_dir + 'precomp.pkl'))
with open((val_data_dir + 'observed.pkl'), 'rb') as f:
if six.PY2:
y = pkl.load(f)['y']
else:
y = pkl.load(f, encoding='latin1')['y']
if (len(results) > 1):
(ind, _) = cross_validation(y, configpara, results)
else:
ind = 0
config.t_i = configpara.t_i
config.lamu = results[ind][0]
config.A = results[ind][2]
config.B = results[ind][3]
config.C = results[ind][4]
config.D = results[ind][5]
config.gamma = results[ind][1]
config.e1 = results[ind][6]
config.e2 = results[ind][7]
config.plt = results[ind][8]
config.plt_1 = results[ind][9]
Q2 = configpara.Q2_all
fold = configpara.fold
f = open(pickle_file_1, 'wb')
save = {'estimated_x': np.dot(config.gamma, Q2[(:, 0:(Q2.shape[1] + 1):int((1 / fold)))]), 'y': config.y, 'estimated_y': np.dot(config.gamma, np.transpose(configpara.P12)), 'gamma': config.gamma, 'A': config.A, 'B': config.B, 'C': config.C, 'D': config.D, 'lamu': config.lamu, 'e1': config.e1, 'e2': config.e2, 'plt_1': config.plt_1, 'plt': config.plt, 't': np.arange(0, ((configpara.dt * (configpara.row_n - 1)) + (configpara.dt * 0.5)), configpara.dt), 'n1': (int((configpara.t_i[0] / configpara.dt)) + 1)}
pkl.dump(save, f, pkl.HIGHEST_PROTOCOL)
f.close()
return config<|docstring|>wrapper for selecting the tuning parameters of one subject
See function update_p for details of variables meaning
Parameters
-----------
num_cores : int, allow multi-processing, default None
Returns
-----------
An instance of Modelconfig, including all summaries of estimation for one subject<|endoftext|> |
18b9fa412c3fb3824cd3f94cdd90f7284bf7c0b0d0b5e7acf141734bf3785230 | def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | Second step for updating A, B, C
Parameters
-----------
gamma: numpy array,
mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty
to the l2 norm of A, B, C | cdn/main_computation.py | update_all_3 | xuefeicao/CDN | 11 | python | def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | def update_all_3(gamma, mu=0):
'\n Second step for updating A, B, C\n\n Parameters\n -----------\n gamma: numpy array, \n mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty \n to the l2 norm of A, B, C\n '
n_all = (((n_area * (J + 1)) + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_B = np.zeros((n_area, n_area, J))
for j in range(J):
for i in range(n_area):
W_B[(i, i, j)] = np.dot(np.dot(gamma[(i, :)], P10[(:, :, j, j)]), np.transpose(gamma[(i, :)]))
I_tmp[(((j + 1) * n_area):((j + 2) * n_area), ((j + 1) * n_area):((j + 2) * n_area))] = W_B[(:, :, j)]
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = W_C
for j in range((J + 1)):
if (j == 0):
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
else:
Y_tmp[(:, (j * n_area):((j + 1) * n_area))] = np.dot(np.dot(gamma, np.transpose(P3[(:, :, (j - 1))])), np.transpose(gamma))
Y_tmp[(:, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
for j in range((J + 1)):
if (j == 0):
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
else:
X_tmp[((j * n_area):((j + 1) * n_area), 0:n_area)] = np.dot(np.dot(gamma, P6[(:, :, (j - 1))]), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
tmp = np.zeros(((n_area * J), (n_area * J)))
for j in range(J):
for l in range(J):
tmp[((j * n_area):((j + 1) * n_area), (l * n_area):((l + 1) * n_area))] = np.dot(np.dot(gamma, P10[(:, :, j, l)]), np.transpose(gamma))
for j in range(J):
X_tmp[(0:n_area, ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(np.dot(gamma, np.transpose(P6[(:, :, j)])), np.transpose(gamma))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P13[(:, :, j)], np.transpose(gamma))
X_tmp[((- 1), ((j + 1) * n_area):((j + 2) * n_area))] = np.dot(P11[(j, :)].reshape((1, (- 1))), np.transpose(gamma))
X_tmp[(n_area:((J + 1) * n_area), n_area:((J + 1) * n_area))] = tmp
X_tmp[(0:n_area, ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P7))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = np.dot(gamma, np.transpose(P13[(:, :, j)]))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P14
X_tmp[((- 1), ((J + 1) * n_area):(((J + 1) * n_area) + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
for j in range(J):
X_tmp[((n_area * (j + 1)):(n_area * (j + 2)), (- 1))] = np.dot(gamma, np.transpose(P11[(j, :)])).reshape((- 1))
X_tmp[(((J + 1) * n_area):(((J + 1) * n_area) + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))<|docstring|>Second step for updating A, B, C
Parameters
-----------
gamma: numpy array,
mu : this is an extra tuning parameter which is not used in paper, but provided for people who are interested to add penalty
to the l2 norm of A, B, C<|endoftext|> |
42fda253420237c97a80138661564e98544ab97181faab974b30908b792d490d | def update_all_2(gamma, mu):
'\n For the case when B = 0\n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | For the case when B = 0 | cdn/main_computation.py | update_all_2 | xuefeicao/CDN | 11 | python | def update_all_2(gamma, mu):
'\n \n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | def update_all_2(gamma, mu):
'\n \n '
n_all = ((n_area + J) + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
W_C = np.zeros((J, J))
for j in range(J):
W_C[(j, j)] = P14[(j, j)]
I_tmp[((1 * n_area):((1 * n_area) + J), (1 * n_area):((1 * n_area) + J))] = W_C
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (1 * n_area):((1 * n_area) + J))] = np.dot(gamma, np.transpose(P4))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((1 * n_area):((1 * n_area) + J), 0:n_area)] = np.dot(P7, np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, n_area:(n_area + J))] = np.dot(gamma, np.transpose(P7))
X_tmp[(n_area:(n_area + J), n_area:(n_area + J))] = P14
X_tmp[((- 1), n_area:(n_area + J))] = P15
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[(n_area:(n_area + J), (- 1))] = np.transpose(P15).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))<|docstring|>For the case when B = 0<|endoftext|> |
e314d65fbdcae8609f3980f3cffba5e978cfc94e3c642c312264128c757a7d5b | def update_all_1(gamma, mu):
'\n For the case B = 0 and C = 0\n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | For the case B = 0 and C = 0 | cdn/main_computation.py | update_all_1 | xuefeicao/CDN | 11 | python | def update_all_1(gamma, mu):
'\n \n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp)))) | def update_all_1(gamma, mu):
'\n \n '
n_all = (n_area + 1)
Y_tmp = np.zeros((n_area, n_all))
X_tmp = np.zeros((n_all, n_all))
I_tmp = np.zeros((n_all, n_all))
W_A = np.zeros((n_area, n_area))
for i in range(n_area):
W_A[(i, i)] = np.dot(np.dot(gamma[(i, :)], P5), np.transpose(gamma[(i, :)]))
I_tmp[(0:n_area, 0:n_area)] = W_A
Y_tmp[(:, 0:n_area)] = np.dot(np.dot(gamma, np.transpose(P2)), np.transpose(gamma))
Y_tmp[(:, (- 1))] = np.dot(gamma, np.transpose(P8)).reshape(((- 1),))
X_tmp[(0:n_area, 0:n_area)] = np.dot(np.dot(gamma, P5), np.transpose(gamma))
X_tmp[((- 1), 0:n_area)] = np.dot(P9, np.transpose(gamma))
X_tmp[(0:n_area, (- 1))] = np.dot(gamma, np.transpose(P9)).reshape((- 1))
X_tmp[((- 1), (- 1))] = t_T
s_eig = np.sort(abs(np.linalg.eig(X_tmp)[0]))
if (config.D_u == False):
Y_tmp = Y_tmp[(:, 0:(- 1))]
X_tmp = X_tmp[(0:(- 1), 0:(- 1))]
I_tmp = I_tmp[(0:(- 1), 0:(- 1))]
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))
return np.dot(Y_tmp, np.linalg.pinv((X_tmp + (mu * I_tmp))))<|docstring|>For the case B = 0 and C = 0<|endoftext|> |
bdbcce7caa17eebf93c4fbf94f65da5951ab0f615baeb88ea3be01231de64d82 | def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0 | selecting an initial for gamma which may help to avoid local minimum
Parameters
-------------
lam_1: scalar, penalty for the second derivative of neuronal activities x. | cdn/main_computation.py | ini_select | xuefeicao/CDN | 11 | python | def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0 | def ini_select(y, lam_1, P12=P12, Omega=Omega):
'\n selecting an initial for gamma which may help to avoid local minimum\n\n Parameters\n ------------- \n lam_1: scalar, penalty for the second derivative of neuronal activities x. \n '
gamma_0 = np.zeros((n_area, p))
gamma_0 = error_ws(y, gamma_0, lam_1, P12, Omega)
return gamma_0<|docstring|>selecting an initial for gamma which may help to avoid local minimum
Parameters
-------------
lam_1: scalar, penalty for the second derivative of neuronal activities x.<|endoftext|> |
c71f6e97a287a48a5358b31992c3b2887e23d5d0a9f90cd9c388565a8bda2c09 | def get_cast(device: Optional[str]=None) -> Tuple[(pychromecast.Chromecast, CCInfo)]:
'\n Attempt to connect with requested device (or any device if none has been specified).\n\n :param device: Can be an ip-address or a name.\n :type device: str\n :returns: Chromecast object for use in a CastController,\n and CCInfo object for use in setup_cast and StreamInfo\n :rtype: (pychromecast.Chromecast, CCInfo)\n '
cast = None
if (device and is_ipaddress(device)):
cast = get_chromecast_with_ip(device, DEFAULT_PORT)
if (not cast):
msg = 'No device found at {}'.format(device)
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, None, None, cast.cast_type)
else:
cache = Cache()
maybe_cc_info = cache.get_data(device)
if maybe_cc_info:
cast = get_chromecast_with_ip(maybe_cc_info.ip, maybe_cc_info.port)
cc_info = maybe_cc_info
if (not cast):
cast = get_chromecast(device)
if (not cast):
msg = ('Specified device "{}" not found'.format(device) if device else 'No devices found')
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, cast.device.manufacturer, cast.model_name, cast.cast_type)
cache.set_data(cast.name, cc_info)
cast.wait()
return (cast, cc_info) | Attempt to connect with requested device (or any device if none has been specified).
:param device: Can be an ip-address or a name.
:type device: str
:returns: Chromecast object for use in a CastController,
and CCInfo object for use in setup_cast and StreamInfo
:rtype: (pychromecast.Chromecast, CCInfo) | catt/controllers.py | get_cast | erdeiattila/catt | 1 | python | def get_cast(device: Optional[str]=None) -> Tuple[(pychromecast.Chromecast, CCInfo)]:
'\n Attempt to connect with requested device (or any device if none has been specified).\n\n :param device: Can be an ip-address or a name.\n :type device: str\n :returns: Chromecast object for use in a CastController,\n and CCInfo object for use in setup_cast and StreamInfo\n :rtype: (pychromecast.Chromecast, CCInfo)\n '
cast = None
if (device and is_ipaddress(device)):
cast = get_chromecast_with_ip(device, DEFAULT_PORT)
if (not cast):
msg = 'No device found at {}'.format(device)
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, None, None, cast.cast_type)
else:
cache = Cache()
maybe_cc_info = cache.get_data(device)
if maybe_cc_info:
cast = get_chromecast_with_ip(maybe_cc_info.ip, maybe_cc_info.port)
cc_info = maybe_cc_info
if (not cast):
cast = get_chromecast(device)
if (not cast):
msg = ('Specified device "{}" not found'.format(device) if device else 'No devices found')
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, cast.device.manufacturer, cast.model_name, cast.cast_type)
cache.set_data(cast.name, cc_info)
cast.wait()
return (cast, cc_info) | def get_cast(device: Optional[str]=None) -> Tuple[(pychromecast.Chromecast, CCInfo)]:
'\n Attempt to connect with requested device (or any device if none has been specified).\n\n :param device: Can be an ip-address or a name.\n :type device: str\n :returns: Chromecast object for use in a CastController,\n and CCInfo object for use in setup_cast and StreamInfo\n :rtype: (pychromecast.Chromecast, CCInfo)\n '
cast = None
if (device and is_ipaddress(device)):
cast = get_chromecast_with_ip(device, DEFAULT_PORT)
if (not cast):
msg = 'No device found at {}'.format(device)
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, None, None, cast.cast_type)
else:
cache = Cache()
maybe_cc_info = cache.get_data(device)
if maybe_cc_info:
cast = get_chromecast_with_ip(maybe_cc_info.ip, maybe_cc_info.port)
cc_info = maybe_cc_info
if (not cast):
cast = get_chromecast(device)
if (not cast):
msg = ('Specified device "{}" not found'.format(device) if device else 'No devices found')
raise CastError(msg)
cc_info = CCInfo(cast.host, cast.port, cast.device.manufacturer, cast.model_name, cast.cast_type)
cache.set_data(cast.name, cc_info)
cast.wait()
return (cast, cc_info)<|docstring|>Attempt to connect with requested device (or any device if none has been specified).
:param device: Can be an ip-address or a name.
:type device: str
:returns: Chromecast object for use in a CastController,
and CCInfo object for use in setup_cast and StreamInfo
:rtype: (pychromecast.Chromecast, CCInfo)<|endoftext|> |
2fae5ab6aac464acf27ce8841249fa1e60b6a5d300088295060d60269c4a4760 | def prep_app(self):
'Make sure desired chromecast app is running.'
if (not self._cast_listener.app_ready.is_set()):
self._cast.start_app(self._cast_listener.app_id)
self._cast_listener.app_ready.wait() | Make sure desired chromecast app is running. | catt/controllers.py | prep_app | erdeiattila/catt | 1 | python | def prep_app(self):
if (not self._cast_listener.app_ready.is_set()):
self._cast.start_app(self._cast_listener.app_id)
self._cast_listener.app_ready.wait() | def prep_app(self):
if (not self._cast_listener.app_ready.is_set()):
self._cast.start_app(self._cast_listener.app_id)
self._cast_listener.app_ready.wait()<|docstring|>Make sure desired chromecast app is running.<|endoftext|> |
ca1ad5f95a38ecf7a1ad6c1c47b9dec5d79916b2bd34c31628e096fd1820ceb9 | def prep_control(self):
'Make sure chromecast is not inactive or idle.'
self._check_inactive()
self._update_status()
if self._is_idle:
raise CastError('Nothing is currently playing') | Make sure chromecast is not inactive or idle. | catt/controllers.py | prep_control | erdeiattila/catt | 1 | python | def prep_control(self):
self._check_inactive()
self._update_status()
if self._is_idle:
raise CastError('Nothing is currently playing') | def prep_control(self):
self._check_inactive()
self._update_status()
if self._is_idle:
raise CastError('Nothing is currently playing')<|docstring|>Make sure chromecast is not inactive or idle.<|endoftext|> |
4b9cbfd79e6dae36b7c50a84465bc7bffeb8e8acec9052b12692a000fc707ff4 | def prep_info(self):
'Make sure chromecast is not inactive.'
self._check_inactive()
self._update_status() | Make sure chromecast is not inactive. | catt/controllers.py | prep_info | erdeiattila/catt | 1 | python | def prep_info(self):
self._check_inactive()
self._update_status() | def prep_info(self):
self._check_inactive()
self._update_status()<|docstring|>Make sure chromecast is not inactive.<|endoftext|> |
5e491c2378bbb853e6bd2fd55dfda1439e31ef1e59f39fc69f7e02c975b5f652 | def kill(self, idle_only=False, force=False):
'\n Kills current Chromecast session.\n\n :param idle_only: If set, session is only killed if the active Chromecast app\n is idle. Use to avoid killing an active streaming session\n when catt fails with certain invalid actions (such as trying\n to cast an empty playlist).\n :type idle_only: bool\n :param force: If set, a dummy chromecast app is launched before killing the session.\n This is a workaround for some devices that do not respond to this\n command under certain circumstances.\n :type force: bool\n '
if (idle_only and (not self._is_idle)):
return
if force:
listener = CastStatusListener(CLOUD_APP_ID)
self._cast.register_status_listener(listener)
self._cast.start_app(CLOUD_APP_ID)
listener.app_ready.wait()
self._cast.quit_app() | Kills current Chromecast session.
:param idle_only: If set, session is only killed if the active Chromecast app
is idle. Use to avoid killing an active streaming session
when catt fails with certain invalid actions (such as trying
to cast an empty playlist).
:type idle_only: bool
:param force: If set, a dummy chromecast app is launched before killing the session.
This is a workaround for some devices that do not respond to this
command under certain circumstances.
:type force: bool | catt/controllers.py | kill | erdeiattila/catt | 1 | python | def kill(self, idle_only=False, force=False):
'\n Kills current Chromecast session.\n\n :param idle_only: If set, session is only killed if the active Chromecast app\n is idle. Use to avoid killing an active streaming session\n when catt fails with certain invalid actions (such as trying\n to cast an empty playlist).\n :type idle_only: bool\n :param force: If set, a dummy chromecast app is launched before killing the session.\n This is a workaround for some devices that do not respond to this\n command under certain circumstances.\n :type force: bool\n '
if (idle_only and (not self._is_idle)):
return
if force:
listener = CastStatusListener(CLOUD_APP_ID)
self._cast.register_status_listener(listener)
self._cast.start_app(CLOUD_APP_ID)
listener.app_ready.wait()
self._cast.quit_app() | def kill(self, idle_only=False, force=False):
'\n Kills current Chromecast session.\n\n :param idle_only: If set, session is only killed if the active Chromecast app\n is idle. Use to avoid killing an active streaming session\n when catt fails with certain invalid actions (such as trying\n to cast an empty playlist).\n :type idle_only: bool\n :param force: If set, a dummy chromecast app is launched before killing the session.\n This is a workaround for some devices that do not respond to this\n command under certain circumstances.\n :type force: bool\n '
if (idle_only and (not self._is_idle)):
return
if force:
listener = CastStatusListener(CLOUD_APP_ID)
self._cast.register_status_listener(listener)
self._cast.start_app(CLOUD_APP_ID)
listener.app_ready.wait()
self._cast.quit_app()<|docstring|>Kills current Chromecast session.
:param idle_only: If set, session is only killed if the active Chromecast app
is idle. Use to avoid killing an active streaming session
when catt fails with certain invalid actions (such as trying
to cast an empty playlist).
:type idle_only: bool
:param force: If set, a dummy chromecast app is launched before killing the session.
This is a workaround for some devices that do not respond to this
command under certain circumstances.
:type force: bool<|endoftext|> |
f2f64d5577b9069eb011472c201ba07a4714dfefab6fa72976d2c7838b4389eb | def prep_app(self):
'Make sure desired chromecast app is running.'
self._cast.start_app(self._cast_listener.app_id, force_launch=True)
self._cast_listener.app_ready.wait() | Make sure desired chromecast app is running. | catt/controllers.py | prep_app | erdeiattila/catt | 1 | python | def prep_app(self):
self._cast.start_app(self._cast_listener.app_id, force_launch=True)
self._cast_listener.app_ready.wait() | def prep_app(self):
self._cast.start_app(self._cast_listener.app_id, force_launch=True)
self._cast_listener.app_ready.wait()<|docstring|>Make sure desired chromecast app is running.<|endoftext|> |
771c062b4885c666c1674b8cebeed1e8d3786d9d0cae18fa20b1dd50bba0e8fa | def set_ppr_template_data(report_data):
'Set up the PPR search data for the report, modifying the original for the template output.'
set_addresses(report_data)
set_date_times(report_data)
set_vehicle_collateral(report_data)
set_general_collateral(report_data) | Set up the PPR search data for the report, modifying the original for the template output. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_ppr_template_data | cameron-freshworks/ppr | 0 | python | def set_ppr_template_data(report_data):
set_addresses(report_data)
set_date_times(report_data)
set_vehicle_collateral(report_data)
set_general_collateral(report_data) | def set_ppr_template_data(report_data):
set_addresses(report_data)
set_date_times(report_data)
set_vehicle_collateral(report_data)
set_general_collateral(report_data)<|docstring|>Set up the PPR search data for the report, modifying the original for the template output.<|endoftext|> |
0076b520a54b599e372af38598787f8899e52df9763c5fa77cd3ab28275d37d7 | def format_address(address):
'Replace address country code with description.'
if (('country' in address) and address['country']):
country = address['country']
if (country == 'CA'):
address['country'] = 'Canada'
elif (country == 'US'):
address['country'] = 'United States of America'
else:
try:
country = pycountry.countries.search_fuzzy(country)[0].name
address['country'] = country
except (AttributeError, TypeError):
address['country'] = country
return address | Replace address country code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | format_address | cameron-freshworks/ppr | 0 | python | def format_address(address):
if (('country' in address) and address['country']):
country = address['country']
if (country == 'CA'):
address['country'] = 'Canada'
elif (country == 'US'):
address['country'] = 'United States of America'
else:
try:
country = pycountry.countries.search_fuzzy(country)[0].name
address['country'] = country
except (AttributeError, TypeError):
address['country'] = country
return address | def format_address(address):
if (('country' in address) and address['country']):
country = address['country']
if (country == 'CA'):
address['country'] = 'Canada'
elif (country == 'US'):
address['country'] = 'United States of America'
else:
try:
country = pycountry.countries.search_fuzzy(country)[0].name
address['country'] = country
except (AttributeError, TypeError):
address['country'] = country
return address<|docstring|>Replace address country code with description.<|endoftext|> |
ca9a53585f881d1ed37b2c37af918e31c767e1902c0b9e88f42199bb67778b31 | def set_financing_addresses(statement):
'Replace financing statement addresses country code with description.'
format_address(statement['registeringParty']['address'])
for secured_party in statement['securedParties']:
format_address(secured_party['address'])
for debtor in statement['debtors']:
format_address(debtor['address']) | Replace financing statement addresses country code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_financing_addresses | cameron-freshworks/ppr | 0 | python | def set_financing_addresses(statement):
format_address(statement['registeringParty']['address'])
for secured_party in statement['securedParties']:
format_address(secured_party['address'])
for debtor in statement['debtors']:
format_address(debtor['address']) | def set_financing_addresses(statement):
format_address(statement['registeringParty']['address'])
for secured_party in statement['securedParties']:
format_address(secured_party['address'])
for debtor in statement['debtors']:
format_address(debtor['address'])<|docstring|>Replace financing statement addresses country code with description.<|endoftext|> |
2218ff9e34735ea9fe8c6a42f27de5a35983bb0530cec100641b396dc6f0be1b | def set_amend_change_addresses(statement):
'Replace amendment/change statement address country code with description.'
format_address(statement['registeringParty']['address'])
if ('deleteSecuredParties' in statement):
for delete_secured in statement['deleteSecuredParties']:
format_address(delete_secured['address'])
if ('addSecuredParties' in statement):
for add_secured in statement['addSecuredParties']:
format_address(add_secured['address'])
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
format_address(delete_debtor['address'])
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
format_address(add_debtor['address']) | Replace amendment/change statement address country code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_amend_change_addresses | cameron-freshworks/ppr | 0 | python | def set_amend_change_addresses(statement):
format_address(statement['registeringParty']['address'])
if ('deleteSecuredParties' in statement):
for delete_secured in statement['deleteSecuredParties']:
format_address(delete_secured['address'])
if ('addSecuredParties' in statement):
for add_secured in statement['addSecuredParties']:
format_address(add_secured['address'])
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
format_address(delete_debtor['address'])
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
format_address(add_debtor['address']) | def set_amend_change_addresses(statement):
format_address(statement['registeringParty']['address'])
if ('deleteSecuredParties' in statement):
for delete_secured in statement['deleteSecuredParties']:
format_address(delete_secured['address'])
if ('addSecuredParties' in statement):
for add_secured in statement['addSecuredParties']:
format_address(add_secured['address'])
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
format_address(delete_debtor['address'])
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
format_address(add_debtor['address'])<|docstring|>Replace amendment/change statement address country code with description.<|endoftext|> |
1648c552109face352809c60e430fa745fdfc36a5f634bddb900c11dcb480991 | def set_modified_party(add_party, delete_parties):
'Set the update flags for a single party .'
for delete_party in delete_parties:
if (('reg_id' in add_party) and ('reg_id' in delete_party) and (add_party['reg_id'] == delete_party['reg_id']) and ('edit' not in delete_party)):
if (add_party['address'] == delete_party['address']):
if (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] != delete_party['businessName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] != delete_party['personName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] == delete_party['businessName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] == delete_party['personName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break | Set the update flags for a single party . | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_modified_party | cameron-freshworks/ppr | 0 | python | def set_modified_party(add_party, delete_parties):
for delete_party in delete_parties:
if (('reg_id' in add_party) and ('reg_id' in delete_party) and (add_party['reg_id'] == delete_party['reg_id']) and ('edit' not in delete_party)):
if (add_party['address'] == delete_party['address']):
if (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] != delete_party['businessName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] != delete_party['personName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] == delete_party['businessName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] == delete_party['personName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break | def set_modified_party(add_party, delete_parties):
for delete_party in delete_parties:
if (('reg_id' in add_party) and ('reg_id' in delete_party) and (add_party['reg_id'] == delete_party['reg_id']) and ('edit' not in delete_party)):
if (add_party['address'] == delete_party['address']):
if (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] != delete_party['businessName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] != delete_party['personName'])):
add_party['name_change'] = True
delete_party['edit'] = True
break
elif (('businessName' in add_party) and ('businessName' in delete_party) and (add_party['businessName'] == delete_party['businessName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break
elif (('personName' in add_party) and ('personName' in delete_party) and (add_party['personName'] == delete_party['personName'])):
add_party['address_change'] = True
delete_party['edit'] = True
break<|docstring|>Set the update flags for a single party .<|endoftext|> |
e79ad47f5b1d729ca742214b1ca76810814d421f0819953996cea14fb0e6242f | def set_modified_parties(statement):
'Replace amendment or change address country code with description. Set if party edited.'
set_amend_change_addresses(statement)
if (('deleteSecuredParties' in statement) and ('addSecuredParties' in statement)):
for add_secured in statement['addSecuredParties']:
if statement['deleteSecuredParties']:
set_modified_party(add_secured, statement['deleteSecuredParties'])
if (('deleteDebtors' in statement) and ('addDebtors' in statement)):
for add_debtor in statement['addDebtors']:
if statement['deleteDebtors']:
set_modified_party(add_debtor, statement['deleteDebtors']) | Replace amendment or change address country code with description. Set if party edited. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_modified_parties | cameron-freshworks/ppr | 0 | python | def set_modified_parties(statement):
set_amend_change_addresses(statement)
if (('deleteSecuredParties' in statement) and ('addSecuredParties' in statement)):
for add_secured in statement['addSecuredParties']:
if statement['deleteSecuredParties']:
set_modified_party(add_secured, statement['deleteSecuredParties'])
if (('deleteDebtors' in statement) and ('addDebtors' in statement)):
for add_debtor in statement['addDebtors']:
if statement['deleteDebtors']:
set_modified_party(add_debtor, statement['deleteDebtors']) | def set_modified_parties(statement):
set_amend_change_addresses(statement)
if (('deleteSecuredParties' in statement) and ('addSecuredParties' in statement)):
for add_secured in statement['addSecuredParties']:
if statement['deleteSecuredParties']:
set_modified_party(add_secured, statement['deleteSecuredParties'])
if (('deleteDebtors' in statement) and ('addDebtors' in statement)):
for add_debtor in statement['addDebtors']:
if statement['deleteDebtors']:
set_modified_party(add_debtor, statement['deleteDebtors'])<|docstring|>Replace amendment or change address country code with description. Set if party edited.<|endoftext|> |
aa564841bc93a007076902263a107d48c38af1695539502e30661fcd92ef3a05 | def set_addresses(report_data):
'Replace search results addresses country code with description.'
set_financing_addresses(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_modified_parties(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_modified_parties(change)
else:
format_address(change['registeringParty']['address']) | Replace search results addresses country code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_addresses | cameron-freshworks/ppr | 0 | python | def set_addresses(report_data):
set_financing_addresses(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_modified_parties(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_modified_parties(change)
else:
format_address(change['registeringParty']['address']) | def set_addresses(report_data):
set_financing_addresses(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_modified_parties(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_modified_parties(change)
else:
format_address(change['registeringParty']['address'])<|docstring|>Replace search results addresses country code with description.<|endoftext|> |
94374062c0cdbd754b1af2e1d1ded830bf6ac68b44fb28021704e8a4c12ff9ff | def to_report_datetime(date_time: str, include_time: bool=True, expiry: bool=False):
'Convert ISO formatted date time or date string to report format.'
local_datetime = model_utils.to_local_timestamp(model_utils.ts_from_iso_format(date_time))
if (expiry and (local_datetime.hour != 23)):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
if include_time:
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
if (timestamp.find(' AM ') > 0):
return timestamp.replace(' AM ', ' am ')
return timestamp.replace(' PM ', ' pm ')
return local_datetime.strftime('%B %-d, %Y') | Convert ISO formatted date time or date string to report format. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | to_report_datetime | cameron-freshworks/ppr | 0 | python | def to_report_datetime(date_time: str, include_time: bool=True, expiry: bool=False):
local_datetime = model_utils.to_local_timestamp(model_utils.ts_from_iso_format(date_time))
if (expiry and (local_datetime.hour != 23)):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
if include_time:
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
if (timestamp.find(' AM ') > 0):
return timestamp.replace(' AM ', ' am ')
return timestamp.replace(' PM ', ' pm ')
return local_datetime.strftime('%B %-d, %Y') | def to_report_datetime(date_time: str, include_time: bool=True, expiry: bool=False):
local_datetime = model_utils.to_local_timestamp(model_utils.ts_from_iso_format(date_time))
if (expiry and (local_datetime.hour != 23)):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
if include_time:
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
if (timestamp.find(' AM ') > 0):
return timestamp.replace(' AM ', ' am ')
return timestamp.replace(' PM ', ' pm ')
return local_datetime.strftime('%B %-d, %Y')<|docstring|>Convert ISO formatted date time or date string to report format.<|endoftext|> |
37a035b8fe8a8e9aeb8d1eaf5428116f3bfff3dc1e756e1a236506b5d9567756 | def to_report_datetime_expiry(date_time: str):
'Convert ISO formatted date time or date string to report expiry date format.'
local_datetime = model_utils.to_local_expiry_report(date_time)
if (local_datetime.hour != 23):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
return timestamp.replace(' PM ', ' pm ') | Convert ISO formatted date time or date string to report expiry date format. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | to_report_datetime_expiry | cameron-freshworks/ppr | 0 | python | def to_report_datetime_expiry(date_time: str):
local_datetime = model_utils.to_local_expiry_report(date_time)
if (local_datetime.hour != 23):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
return timestamp.replace(' PM ', ' pm ') | def to_report_datetime_expiry(date_time: str):
local_datetime = model_utils.to_local_expiry_report(date_time)
if (local_datetime.hour != 23):
offset = (23 - local_datetime.hour)
local_datetime = (local_datetime + timedelta(hours=offset))
timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')
return timestamp.replace(' PM ', ' pm ')<|docstring|>Convert ISO formatted date time or date string to report expiry date format.<|endoftext|> |
f4b37583055258a0da4408b3e489dba121e0a9b6b99d7f28af71f427603008ca | def set_financing_date_time(statement):
'Replace financing statement API ISO UTC strings with local report format strings.'
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('dischargedDateTime' in statement):
statement['dischargedDateTime'] = to_report_datetime(statement['dischargedDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
for debtor in statement['debtors']:
if ('birthDate' in debtor):
debtor['birthDate'] = to_report_datetime(debtor['birthDate'], False)
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('addedDateTime' in collateral):
collateral['addedDateTime'] = to_report_datetime(collateral['addedDateTime'], True)
if ((statement['type'] == 'RL') and ('lienAmount' in statement)):
lien_amount = str(statement['lienAmount'])
if lien_amount.isnumeric():
statement['lienAmount'] = ('$' + '{:0,.2f}'.format(float(lien_amount))) | Replace financing statement API ISO UTC strings with local report format strings. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_financing_date_time | cameron-freshworks/ppr | 0 | python | def set_financing_date_time(statement):
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('dischargedDateTime' in statement):
statement['dischargedDateTime'] = to_report_datetime(statement['dischargedDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
for debtor in statement['debtors']:
if ('birthDate' in debtor):
debtor['birthDate'] = to_report_datetime(debtor['birthDate'], False)
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('addedDateTime' in collateral):
collateral['addedDateTime'] = to_report_datetime(collateral['addedDateTime'], True)
if ((statement['type'] == 'RL') and ('lienAmount' in statement)):
lien_amount = str(statement['lienAmount'])
if lien_amount.isnumeric():
statement['lienAmount'] = ('$' + '{:0,.2f}'.format(float(lien_amount))) | def set_financing_date_time(statement):
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('dischargedDateTime' in statement):
statement['dischargedDateTime'] = to_report_datetime(statement['dischargedDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
for debtor in statement['debtors']:
if ('birthDate' in debtor):
debtor['birthDate'] = to_report_datetime(debtor['birthDate'], False)
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('addedDateTime' in collateral):
collateral['addedDateTime'] = to_report_datetime(collateral['addedDateTime'], True)
if ((statement['type'] == 'RL') and ('lienAmount' in statement)):
lien_amount = str(statement['lienAmount'])
if lien_amount.isnumeric():
statement['lienAmount'] = ('$' + '{:0,.2f}'.format(float(lien_amount)))<|docstring|>Replace financing statement API ISO UTC strings with local report format strings.<|endoftext|> |
f7e6f50bee4226a6e6ffca201c8b2899366a4f61a509462ebcc44097a35dc862 | def set_change_date_time(statement):
'Replace non-financing statement API ISO UTC strings with local report format strings.'
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
if ('changeType' in statement):
statement['changeType'] = TO_CHANGE_TYPE_DESCRIPTION[statement['changeType']].upper()
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
if ('birthDate' in delete_debtor):
delete_debtor['birthDate'] = to_report_datetime(delete_debtor['birthDate'], False)
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
if ('birthDate' in add_debtor):
add_debtor['birthDate'] = to_report_datetime(add_debtor['birthDate'], False)
if ('deleteGeneralCollateral' in statement):
for delete_gc in statement['deleteGeneralCollateral']:
if ('addedDateTime' in delete_gc):
delete_gc['addedDateTime'] = to_report_datetime(delete_gc['addedDateTime'], True)
if ('addGeneralCollateral' in statement):
for add_gc in statement['addGeneralCollateral']:
if ('addedDateTime' in add_gc):
add_gc['addedDateTime'] = to_report_datetime(add_gc['addedDateTime'], True) | Replace non-financing statement API ISO UTC strings with local report format strings. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_change_date_time | cameron-freshworks/ppr | 0 | python | def set_change_date_time(statement):
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
if ('changeType' in statement):
statement['changeType'] = TO_CHANGE_TYPE_DESCRIPTION[statement['changeType']].upper()
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
if ('birthDate' in delete_debtor):
delete_debtor['birthDate'] = to_report_datetime(delete_debtor['birthDate'], False)
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
if ('birthDate' in add_debtor):
add_debtor['birthDate'] = to_report_datetime(add_debtor['birthDate'], False)
if ('deleteGeneralCollateral' in statement):
for delete_gc in statement['deleteGeneralCollateral']:
if ('addedDateTime' in delete_gc):
delete_gc['addedDateTime'] = to_report_datetime(delete_gc['addedDateTime'], True)
if ('addGeneralCollateral' in statement):
for add_gc in statement['addGeneralCollateral']:
if ('addedDateTime' in add_gc):
add_gc['addedDateTime'] = to_report_datetime(add_gc['addedDateTime'], True) | def set_change_date_time(statement):
statement['createDateTime'] = to_report_datetime(statement['createDateTime'])
if (('courtOrderInformation' in statement) and ('orderDate' in statement['courtOrderInformation'])):
order_date = to_report_datetime(statement['courtOrderInformation']['orderDate'], False)
statement['courtOrderInformation']['orderDate'] = order_date
if ('changeType' in statement):
statement['changeType'] = TO_CHANGE_TYPE_DESCRIPTION[statement['changeType']].upper()
if (('expiryDate' in statement) and (len(statement['expiryDate']) > 10)):
statement['expiryDate'] = to_report_datetime_expiry(statement['expiryDate'])
if ('surrenderDate' in statement):
statement['surrenderDate'] = to_report_datetime(statement['surrenderDate'], False)
if ('deleteDebtors' in statement):
for delete_debtor in statement['deleteDebtors']:
if ('birthDate' in delete_debtor):
delete_debtor['birthDate'] = to_report_datetime(delete_debtor['birthDate'], False)
if ('addDebtors' in statement):
for add_debtor in statement['addDebtors']:
if ('birthDate' in add_debtor):
add_debtor['birthDate'] = to_report_datetime(add_debtor['birthDate'], False)
if ('deleteGeneralCollateral' in statement):
for delete_gc in statement['deleteGeneralCollateral']:
if ('addedDateTime' in delete_gc):
delete_gc['addedDateTime'] = to_report_datetime(delete_gc['addedDateTime'], True)
if ('addGeneralCollateral' in statement):
for add_gc in statement['addGeneralCollateral']:
if ('addedDateTime' in add_gc):
add_gc['addedDateTime'] = to_report_datetime(add_gc['addedDateTime'], True)<|docstring|>Replace non-financing statement API ISO UTC strings with local report format strings.<|endoftext|> |
8141298203fcd83149f23e22a1ad7e3f794ef2de42990d29fc125ba74d88607c | def set_date_times(report_data):
'Replace API ISO UTC strings with local report format strings.'
set_financing_date_time(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
set_change_date_time(change) | Replace API ISO UTC strings with local report format strings. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_date_times | cameron-freshworks/ppr | 0 | python | def set_date_times(report_data):
set_financing_date_time(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
set_change_date_time(change) | def set_date_times(report_data):
set_financing_date_time(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
set_change_date_time(change)<|docstring|>Replace API ISO UTC strings with local report format strings.<|endoftext|> |
d33d31722da0ac1fb59a0edc489a8a46c57c3c56de5011fdaf253bacb53632c2 | def set_financing_vehicle_collateral(statement):
'Replace financing statement vehicle collateral type code with description.'
if ('vehicleCollateral' in statement):
mh_count = 0
for collateral in statement['vehicleCollateral']:
if (collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[collateral['type']]
collateral['type'] = desc
statement['mhCollateralCount'] = mh_count | Replace financing statement vehicle collateral type code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_financing_vehicle_collateral | cameron-freshworks/ppr | 0 | python | def set_financing_vehicle_collateral(statement):
if ('vehicleCollateral' in statement):
mh_count = 0
for collateral in statement['vehicleCollateral']:
if (collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[collateral['type']]
collateral['type'] = desc
statement['mhCollateralCount'] = mh_count | def set_financing_vehicle_collateral(statement):
if ('vehicleCollateral' in statement):
mh_count = 0
for collateral in statement['vehicleCollateral']:
if (collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[collateral['type']]
collateral['type'] = desc
statement['mhCollateralCount'] = mh_count<|docstring|>Replace financing statement vehicle collateral type code with description.<|endoftext|> |
128383aefb89649ad0c8ff02e37dbc205ee752a14639daed88313dbab0af4450 | def set_amend_change_vehicle_collateral(statement):
'Replace amendment/change statement vehicle collateral type code with description.'
if (('deleteVehicleCollateral' in statement) or ('addVehicleCollateral' in statement)):
mh_count = 0
if ('deleteVehicleCollateral' in statement):
for delete_collateral in statement['deleteVehicleCollateral']:
if (delete_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[delete_collateral['type']]
delete_collateral['type'] = desc
if ('addVehicleCollateral' in statement):
for add_collateral in statement['addVehicleCollateral']:
if (add_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[add_collateral['type']]
add_collateral['type'] = desc
statement['mhCollateralCount'] = mh_count | Replace amendment/change statement vehicle collateral type code with description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_amend_change_vehicle_collateral | cameron-freshworks/ppr | 0 | python | def set_amend_change_vehicle_collateral(statement):
if (('deleteVehicleCollateral' in statement) or ('addVehicleCollateral' in statement)):
mh_count = 0
if ('deleteVehicleCollateral' in statement):
for delete_collateral in statement['deleteVehicleCollateral']:
if (delete_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[delete_collateral['type']]
delete_collateral['type'] = desc
if ('addVehicleCollateral' in statement):
for add_collateral in statement['addVehicleCollateral']:
if (add_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[add_collateral['type']]
add_collateral['type'] = desc
statement['mhCollateralCount'] = mh_count | def set_amend_change_vehicle_collateral(statement):
if (('deleteVehicleCollateral' in statement) or ('addVehicleCollateral' in statement)):
mh_count = 0
if ('deleteVehicleCollateral' in statement):
for delete_collateral in statement['deleteVehicleCollateral']:
if (delete_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[delete_collateral['type']]
delete_collateral['type'] = desc
if ('addVehicleCollateral' in statement):
for add_collateral in statement['addVehicleCollateral']:
if (add_collateral['type'] == 'MH'):
mh_count += 1
desc = TO_VEHICLE_TYPE_DESCRIPTION[add_collateral['type']]
add_collateral['type'] = desc
statement['mhCollateralCount'] = mh_count<|docstring|>Replace amendment/change statement vehicle collateral type code with description.<|endoftext|> |
d71b0b1dee6f4f6318e3c9d800a1d839f1b19d2b3b922eafd488e3a309af92ac | def set_amend_vehicle_collateral(statement):
'Replace amendment statement vehicle collateral type code with description. Set if change is an edit.'
set_amend_change_vehicle_collateral(statement)
if (('deleteVehicleCollateral' in statement) and ('addVehicleCollateral' in statement)):
for add in statement['addVehicleCollateral']:
for delete in statement['deleteVehicleCollateral']:
if (('serialNumber' in add) and ('serialNumber' in delete)):
if (('reg_id' in add) and ('reg_id' in delete) and (add['reg_id'] == delete['reg_id']) and (add['type'] == delete['type']) and (add['serialNumber'] == delete['serialNumber'])):
add['edit'] = True
delete['edit'] = True | Replace amendment statement vehicle collateral type code with description. Set if change is an edit. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_amend_vehicle_collateral | cameron-freshworks/ppr | 0 | python | def set_amend_vehicle_collateral(statement):
set_amend_change_vehicle_collateral(statement)
if (('deleteVehicleCollateral' in statement) and ('addVehicleCollateral' in statement)):
for add in statement['addVehicleCollateral']:
for delete in statement['deleteVehicleCollateral']:
if (('serialNumber' in add) and ('serialNumber' in delete)):
if (('reg_id' in add) and ('reg_id' in delete) and (add['reg_id'] == delete['reg_id']) and (add['type'] == delete['type']) and (add['serialNumber'] == delete['serialNumber'])):
add['edit'] = True
delete['edit'] = True | def set_amend_vehicle_collateral(statement):
set_amend_change_vehicle_collateral(statement)
if (('deleteVehicleCollateral' in statement) and ('addVehicleCollateral' in statement)):
for add in statement['addVehicleCollateral']:
for delete in statement['deleteVehicleCollateral']:
if (('serialNumber' in add) and ('serialNumber' in delete)):
if (('reg_id' in add) and ('reg_id' in delete) and (add['reg_id'] == delete['reg_id']) and (add['type'] == delete['type']) and (add['serialNumber'] == delete['serialNumber'])):
add['edit'] = True
delete['edit'] = True<|docstring|>Replace amendment statement vehicle collateral type code with description. Set if change is an edit.<|endoftext|> |
41d5324f0d8ef8ac1ceca04b3ed16297c60248dbf9d4e4e429843905cc8ae08b | def set_vehicle_collateral(report_data):
'Replace search results vehicle collateral type codes with descriptions.'
set_financing_vehicle_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_amend_change_vehicle_collateral(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_amend_vehicle_collateral(change) | Replace search results vehicle collateral type codes with descriptions. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_vehicle_collateral | cameron-freshworks/ppr | 0 | python | def set_vehicle_collateral(report_data):
set_financing_vehicle_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_amend_change_vehicle_collateral(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_amend_vehicle_collateral(change) | def set_vehicle_collateral(report_data):
set_financing_vehicle_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] == 'CHANGE_STATEMENT'):
set_amend_change_vehicle_collateral(change)
elif (change['statementType'] == 'AMENDMENT_STATEMENT'):
set_amend_vehicle_collateral(change)<|docstring|>Replace search results vehicle collateral type codes with descriptions.<|endoftext|> |
c3e35a8f3d2698f1a124b8299eff1d9b5b451024db1299169770f3a18152ca8a | def set_financing_general_collateral(statement):
'Replace report newline characters in financing statement general collateral descriptions.'
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('descriptionAdd' in collateral):
collateral['descriptionAdd'] = collateral['descriptionAdd'].replace('/r/n', '<br>')
collateral['descriptionAdd'] = markupsafe.Markup(collateral['descriptionAdd'])
if ('descriptionDelete' in collateral):
collateral['descriptionDelete'] = collateral['descriptionDelete'].replace('/r/n', '<br>')
collateral['descriptionDelete'] = markupsafe.Markup(collateral['descriptionDelete']) | Replace report newline characters in financing statement general collateral descriptions. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_financing_general_collateral | cameron-freshworks/ppr | 0 | python | def set_financing_general_collateral(statement):
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('descriptionAdd' in collateral):
collateral['descriptionAdd'] = collateral['descriptionAdd'].replace('/r/n', '<br>')
collateral['descriptionAdd'] = markupsafe.Markup(collateral['descriptionAdd'])
if ('descriptionDelete' in collateral):
collateral['descriptionDelete'] = collateral['descriptionDelete'].replace('/r/n', '<br>')
collateral['descriptionDelete'] = markupsafe.Markup(collateral['descriptionDelete']) | def set_financing_general_collateral(statement):
if ('generalCollateral' in statement):
for collateral in statement['generalCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('descriptionAdd' in collateral):
collateral['descriptionAdd'] = collateral['descriptionAdd'].replace('/r/n', '<br>')
collateral['descriptionAdd'] = markupsafe.Markup(collateral['descriptionAdd'])
if ('descriptionDelete' in collateral):
collateral['descriptionDelete'] = collateral['descriptionDelete'].replace('/r/n', '<br>')
collateral['descriptionDelete'] = markupsafe.Markup(collateral['descriptionDelete'])<|docstring|>Replace report newline characters in financing statement general collateral descriptions.<|endoftext|> |
47d062707e65e180545b45df07c2745623dd312dd7d00f73cef7c6ec7ffdd7b5 | def set_amend_change_general_collateral(statement):
'Replace report newline characters in amendment statement general collateral description.'
if ('deleteGeneralCollateral' in statement):
for collateral in statement['deleteGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('addGeneralCollateral' in statement):
for collateral in statement['addGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description']) | Replace report newline characters in amendment statement general collateral description. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_amend_change_general_collateral | cameron-freshworks/ppr | 0 | python | def set_amend_change_general_collateral(statement):
if ('deleteGeneralCollateral' in statement):
for collateral in statement['deleteGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('addGeneralCollateral' in statement):
for collateral in statement['addGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description']) | def set_amend_change_general_collateral(statement):
if ('deleteGeneralCollateral' in statement):
for collateral in statement['deleteGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])
if ('addGeneralCollateral' in statement):
for collateral in statement['addGeneralCollateral']:
if ('description' in collateral):
collateral['description'] = collateral['description'].replace('/r/n', '<br>')
collateral['description'] = markupsafe.Markup(collateral['description'])<|docstring|>Replace report newline characters in amendment statement general collateral description.<|endoftext|> |
b6064601539588c013139b680572888dd6a896c164bc0afbc3abb2c560a852fc | def set_general_collateral(report_data):
'Replace report newline characters in search general collateral descriptions.'
set_financing_general_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] in ('CHANGE_STATEMENT', 'AMENDMENT_STATEMENT')):
set_amend_change_general_collateral(change) | Replace report newline characters in search general collateral descriptions. | mhr_api/src/mhr_api/reports/ppr_report_utils.py | set_general_collateral | cameron-freshworks/ppr | 0 | python | def set_general_collateral(report_data):
set_financing_general_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] in ('CHANGE_STATEMENT', 'AMENDMENT_STATEMENT')):
set_amend_change_general_collateral(change) | def set_general_collateral(report_data):
set_financing_general_collateral(report_data)
if ('changes' in report_data):
for change in report_data['changes']:
if (change['statementType'] in ('CHANGE_STATEMENT', 'AMENDMENT_STATEMENT')):
set_amend_change_general_collateral(change)<|docstring|>Replace report newline characters in search general collateral descriptions.<|endoftext|> |
7eb170caaf1bca92cfb34d9a368f93195517ddd156db941cd44bb411024cea1f | def evaluate(model, X, y, p_ids, num_runs=10, valid_only=True, return_raw=False, scale_data=False):
'\n This function is used to evaluate the performance of your model\n Parameters\n ----------\n model\n X\n y\n p_ids\n num_runs\n valid_only\n return_raw\n\n Returns\n -------\n\n '
(raw_results, results, sen, spe, accs, f1s) = ([], [], [], [], [], [])
header = ['model', 'sensitivity', 'specificity', 'acc', 'f1']
for run in range(num_runs):
(X_train, y_train, X_test, y_test) = split_by_ids(X, y, p_ids, seed=run, cat=valid_only, valid_only=valid_only)
if scale_data:
(act_X_train, act_X_test) = train_test_scale(X_train[0].reshape((- 1), ((3 * 8) * 14)), X_test[0].reshape((- 1), ((3 * 8) * 14)))
(act_X_train, act_X_test) = (act_X_train.reshape((- 1), 3, 8, 14), act_X_test.reshape((- 1), 3, 8, 14))
(env_X_train, env_X_test) = train_test_scale(X_train[1], X_test[1])
(phy_X_train, phy_X_test) = train_test_scale(X_train[2], X_test[2])
(X_train, X_test) = ([act_X_train, env_X_train, phy_X_train], [act_X_test, env_X_test, phy_X_test])
model.reset()
model.fit(X_train, y_train)
(sensitivity, specificity, acc, f1) = get_scores(y_test, model.predict(X_test))
if ((sensitivity is not None) and (str(sensitivity) != 'nan')):
sen.append(sensitivity)
spe.append(specificity)
accs.append(acc)
f1s.append(f1)
if return_raw:
raw_results.append([model.model_type, sensitivity, specificity, acc, f1])
row = [model.model_type, format_mean_std(sen), format_mean_std(spe), format_mean_std(accs), format_mean_std(f1s)]
results.append(row)
if return_raw:
return pd.DataFrame(raw_results, columns=header)
df_results = pd.DataFrame(results, columns=header)
return df_results | This function is used to evaluate the performance of your model
Parameters
----------
model
X
y
p_ids
num_runs
valid_only
return_raw
Returns
------- | minder_utils/sleep_data_test.py | evaluate | alexcapstick/minder_utils | 0 | python | def evaluate(model, X, y, p_ids, num_runs=10, valid_only=True, return_raw=False, scale_data=False):
'\n This function is used to evaluate the performance of your model\n Parameters\n ----------\n model\n X\n y\n p_ids\n num_runs\n valid_only\n return_raw\n\n Returns\n -------\n\n '
(raw_results, results, sen, spe, accs, f1s) = ([], [], [], [], [], [])
header = ['model', 'sensitivity', 'specificity', 'acc', 'f1']
for run in range(num_runs):
(X_train, y_train, X_test, y_test) = split_by_ids(X, y, p_ids, seed=run, cat=valid_only, valid_only=valid_only)
if scale_data:
(act_X_train, act_X_test) = train_test_scale(X_train[0].reshape((- 1), ((3 * 8) * 14)), X_test[0].reshape((- 1), ((3 * 8) * 14)))
(act_X_train, act_X_test) = (act_X_train.reshape((- 1), 3, 8, 14), act_X_test.reshape((- 1), 3, 8, 14))
(env_X_train, env_X_test) = train_test_scale(X_train[1], X_test[1])
(phy_X_train, phy_X_test) = train_test_scale(X_train[2], X_test[2])
(X_train, X_test) = ([act_X_train, env_X_train, phy_X_train], [act_X_test, env_X_test, phy_X_test])
model.reset()
model.fit(X_train, y_train)
(sensitivity, specificity, acc, f1) = get_scores(y_test, model.predict(X_test))
if ((sensitivity is not None) and (str(sensitivity) != 'nan')):
sen.append(sensitivity)
spe.append(specificity)
accs.append(acc)
f1s.append(f1)
if return_raw:
raw_results.append([model.model_type, sensitivity, specificity, acc, f1])
row = [model.model_type, format_mean_std(sen), format_mean_std(spe), format_mean_std(accs), format_mean_std(f1s)]
results.append(row)
if return_raw:
return pd.DataFrame(raw_results, columns=header)
df_results = pd.DataFrame(results, columns=header)
return df_results | def evaluate(model, X, y, p_ids, num_runs=10, valid_only=True, return_raw=False, scale_data=False):
'\n This function is used to evaluate the performance of your model\n Parameters\n ----------\n model\n X\n y\n p_ids\n num_runs\n valid_only\n return_raw\n\n Returns\n -------\n\n '
(raw_results, results, sen, spe, accs, f1s) = ([], [], [], [], [], [])
header = ['model', 'sensitivity', 'specificity', 'acc', 'f1']
for run in range(num_runs):
(X_train, y_train, X_test, y_test) = split_by_ids(X, y, p_ids, seed=run, cat=valid_only, valid_only=valid_only)
if scale_data:
(act_X_train, act_X_test) = train_test_scale(X_train[0].reshape((- 1), ((3 * 8) * 14)), X_test[0].reshape((- 1), ((3 * 8) * 14)))
(act_X_train, act_X_test) = (act_X_train.reshape((- 1), 3, 8, 14), act_X_test.reshape((- 1), 3, 8, 14))
(env_X_train, env_X_test) = train_test_scale(X_train[1], X_test[1])
(phy_X_train, phy_X_test) = train_test_scale(X_train[2], X_test[2])
(X_train, X_test) = ([act_X_train, env_X_train, phy_X_train], [act_X_test, env_X_test, phy_X_test])
model.reset()
model.fit(X_train, y_train)
(sensitivity, specificity, acc, f1) = get_scores(y_test, model.predict(X_test))
if ((sensitivity is not None) and (str(sensitivity) != 'nan')):
sen.append(sensitivity)
spe.append(specificity)
accs.append(acc)
f1s.append(f1)
if return_raw:
raw_results.append([model.model_type, sensitivity, specificity, acc, f1])
row = [model.model_type, format_mean_std(sen), format_mean_std(spe), format_mean_std(accs), format_mean_std(f1s)]
results.append(row)
if return_raw:
return pd.DataFrame(raw_results, columns=header)
df_results = pd.DataFrame(results, columns=header)
return df_results<|docstring|>This function is used to evaluate the performance of your model
Parameters
----------
model
X
y
p_ids
num_runs
valid_only
return_raw
Returns
-------<|endoftext|> |
c0c90f6487b90307595f2f05d399e851172454046568f6f809c373f66c238379 | def get_description(self):
'\n A description of the Chebyshev (arcsine) distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :return:\n A string describing the Chebyshev (arcsine) distribution.\n '
text = (((('is a Chebyshev or arcsine distribution that is characterised by its lower bound, which is' + str(self.lower)) + ' and its upper bound, which is') + str(self.upper)) + '.')
return text | A description of the Chebyshev (arcsine) distribution.
:param Chebyshev self:
An instance of the Chebyshev (arcsine) class.
:return:
A string describing the Chebyshev (arcsine) distribution. | equadratures/distributions/chebyshev.py | get_description | psesh/Efficient-Quadratures | 59 | python | def get_description(self):
'\n A description of the Chebyshev (arcsine) distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :return:\n A string describing the Chebyshev (arcsine) distribution.\n '
text = (((('is a Chebyshev or arcsine distribution that is characterised by its lower bound, which is' + str(self.lower)) + ' and its upper bound, which is') + str(self.upper)) + '.')
return text | def get_description(self):
'\n A description of the Chebyshev (arcsine) distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :return:\n A string describing the Chebyshev (arcsine) distribution.\n '
text = (((('is a Chebyshev or arcsine distribution that is characterised by its lower bound, which is' + str(self.lower)) + ' and its upper bound, which is') + str(self.upper)) + '.')
return text<|docstring|>A description of the Chebyshev (arcsine) distribution.
:param Chebyshev self:
An instance of the Chebyshev (arcsine) class.
:return:
A string describing the Chebyshev (arcsine) distribution.<|endoftext|> |
89e38d66099019efe476751d3c91092559e1520820d6b484df035d8fbea4798e | def get_pdf(self, points=None):
'\n A Chebyshev probability density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :param points:\n Matrix of points for defining the probability density function.\n :return:\n An array of N the support of the Chebyshev (arcsine) distribution.\n :return:\n Probability density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.pdf(points)
else:
raise ValueError('Please digit an input for getPDF method') | A Chebyshev probability density function.
:param Chebyshev self:
An instance of the Chebyshev (arcsine) class.
:param points:
Matrix of points for defining the probability density function.
:return:
An array of N the support of the Chebyshev (arcsine) distribution.
:return:
Probability density values along the support of the Chebyshev (arcsine) distribution. | equadratures/distributions/chebyshev.py | get_pdf | psesh/Efficient-Quadratures | 59 | python | def get_pdf(self, points=None):
'\n A Chebyshev probability density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :param points:\n Matrix of points for defining the probability density function.\n :return:\n An array of N the support of the Chebyshev (arcsine) distribution.\n :return:\n Probability density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.pdf(points)
else:
raise ValueError('Please digit an input for getPDF method') | def get_pdf(self, points=None):
'\n A Chebyshev probability density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev (arcsine) class.\n :param points:\n Matrix of points for defining the probability density function.\n :return:\n An array of N the support of the Chebyshev (arcsine) distribution.\n :return:\n Probability density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.pdf(points)
else:
raise ValueError('Please digit an input for getPDF method')<|docstring|>A Chebyshev probability density function.
:param Chebyshev self:
An instance of the Chebyshev (arcsine) class.
:param points:
Matrix of points for defining the probability density function.
:return:
An array of N the support of the Chebyshev (arcsine) distribution.
:return:
Probability density values along the support of the Chebyshev (arcsine) distribution.<|endoftext|> |
a1b04b22b482ab6852b0a0bcc5f20a75fc03ab52eaa3cd31f5a29d90137f57d4 | def get_cdf(self, points=None):
'\n A Chebyshev cumulative density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param points:\n Matrix of points for defining the cumulative density function.\n :return:\n An array of N values over the support of the Chebyshev (arcsine) distribution.\n :return:\n Cumulative density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.cdf(points)
else:
raise ValueError('Please digit an input for getCDF method') | A Chebyshev cumulative density function.
:param Chebyshev self:
An instance of the Chebyshev class.
:param points:
Matrix of points for defining the cumulative density function.
:return:
An array of N values over the support of the Chebyshev (arcsine) distribution.
:return:
Cumulative density values along the support of the Chebyshev (arcsine) distribution. | equadratures/distributions/chebyshev.py | get_cdf | psesh/Efficient-Quadratures | 59 | python | def get_cdf(self, points=None):
'\n A Chebyshev cumulative density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param points:\n Matrix of points for defining the cumulative density function.\n :return:\n An array of N values over the support of the Chebyshev (arcsine) distribution.\n :return:\n Cumulative density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.cdf(points)
else:
raise ValueError('Please digit an input for getCDF method') | def get_cdf(self, points=None):
'\n A Chebyshev cumulative density function.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param points:\n Matrix of points for defining the cumulative density function.\n :return:\n An array of N values over the support of the Chebyshev (arcsine) distribution.\n :return:\n Cumulative density values along the support of the Chebyshev (arcsine) distribution.\n '
if (points is not None):
return self.parent.cdf(points)
else:
raise ValueError('Please digit an input for getCDF method')<|docstring|>A Chebyshev cumulative density function.
:param Chebyshev self:
An instance of the Chebyshev class.
:param points:
Matrix of points for defining the cumulative density function.
:return:
An array of N values over the support of the Chebyshev (arcsine) distribution.
:return:
Cumulative density values along the support of the Chebyshev (arcsine) distribution.<|endoftext|> |
d5fdb9fe223cc2bd83112e6edd7f78ad6cbe6c6d4320b41d75382eafd1739302 | def get_recurrence_coefficients(self, order):
'\n Recurrence coefficients for the Chebyshev distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param array order:\n The order of the recurrence coefficients desired.\n :return:\n Recurrence coefficients associated with the Chebyshev distribution.\n '
ab = jacobi_recurrence_coefficients(self.shape_parameter_A, self.shape_parameter_B, self.lower, self.upper, order)
return ab | Recurrence coefficients for the Chebyshev distribution.
:param Chebyshev self:
An instance of the Chebyshev class.
:param array order:
The order of the recurrence coefficients desired.
:return:
Recurrence coefficients associated with the Chebyshev distribution. | equadratures/distributions/chebyshev.py | get_recurrence_coefficients | psesh/Efficient-Quadratures | 59 | python | def get_recurrence_coefficients(self, order):
'\n Recurrence coefficients for the Chebyshev distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param array order:\n The order of the recurrence coefficients desired.\n :return:\n Recurrence coefficients associated with the Chebyshev distribution.\n '
ab = jacobi_recurrence_coefficients(self.shape_parameter_A, self.shape_parameter_B, self.lower, self.upper, order)
return ab | def get_recurrence_coefficients(self, order):
'\n Recurrence coefficients for the Chebyshev distribution.\n\n :param Chebyshev self:\n An instance of the Chebyshev class.\n :param array order:\n The order of the recurrence coefficients desired.\n :return:\n Recurrence coefficients associated with the Chebyshev distribution.\n '
ab = jacobi_recurrence_coefficients(self.shape_parameter_A, self.shape_parameter_B, self.lower, self.upper, order)
return ab<|docstring|>Recurrence coefficients for the Chebyshev distribution.
:param Chebyshev self:
An instance of the Chebyshev class.
:param array order:
The order of the recurrence coefficients desired.
:return:
Recurrence coefficients associated with the Chebyshev distribution.<|endoftext|> |
81711e29badbf7b511e214d2ef10bbd0e2d575703ba3ddb1b89f2ca7d2cb2a3d | def get_icdf(self, xx):
'\n A Arcisine inverse cumulative density function.\n\n :param Arcsine self:\n An instance of Arcisine class.\n :param xx:\n A matrix of points at which the inverse cumulative density function needs to be evaluated.\n :return:\n Inverse cumulative density function values of the Arcisine distribution.\n '
return self.parent.ppf(xx) | A Arcisine inverse cumulative density function.
:param Arcsine self:
An instance of Arcisine class.
:param xx:
A matrix of points at which the inverse cumulative density function needs to be evaluated.
:return:
Inverse cumulative density function values of the Arcisine distribution. | equadratures/distributions/chebyshev.py | get_icdf | psesh/Efficient-Quadratures | 59 | python | def get_icdf(self, xx):
'\n A Arcisine inverse cumulative density function.\n\n :param Arcsine self:\n An instance of Arcisine class.\n :param xx:\n A matrix of points at which the inverse cumulative density function needs to be evaluated.\n :return:\n Inverse cumulative density function values of the Arcisine distribution.\n '
return self.parent.ppf(xx) | def get_icdf(self, xx):
'\n A Arcisine inverse cumulative density function.\n\n :param Arcsine self:\n An instance of Arcisine class.\n :param xx:\n A matrix of points at which the inverse cumulative density function needs to be evaluated.\n :return:\n Inverse cumulative density function values of the Arcisine distribution.\n '
return self.parent.ppf(xx)<|docstring|>A Arcisine inverse cumulative density function.
:param Arcsine self:
An instance of Arcisine class.
:param xx:
A matrix of points at which the inverse cumulative density function needs to be evaluated.
:return:
Inverse cumulative density function values of the Arcisine distribution.<|endoftext|> |
98b3cb6fa033cb527cece9c6b64bd4f0331b33e5d49e059fd411374760405300 | def get_samples(self, m=None):
'\n Generates samples from the Arcsine distribution.\n\n :param arcsine self:\n An instance of Arcsine class.\n :param integer m:\n Number of random samples. If not provided, a default of 5e05 is assumed.\n\n '
if (m is not None):
number = m
else:
number = 500000
return self.parent.rvs(size=number) | Generates samples from the Arcsine distribution.
:param arcsine self:
An instance of Arcsine class.
:param integer m:
Number of random samples. If not provided, a default of 5e05 is assumed. | equadratures/distributions/chebyshev.py | get_samples | psesh/Efficient-Quadratures | 59 | python | def get_samples(self, m=None):
'\n Generates samples from the Arcsine distribution.\n\n :param arcsine self:\n An instance of Arcsine class.\n :param integer m:\n Number of random samples. If not provided, a default of 5e05 is assumed.\n\n '
if (m is not None):
number = m
else:
number = 500000
return self.parent.rvs(size=number) | def get_samples(self, m=None):
'\n Generates samples from the Arcsine distribution.\n\n :param arcsine self:\n An instance of Arcsine class.\n :param integer m:\n Number of random samples. If not provided, a default of 5e05 is assumed.\n\n '
if (m is not None):
number = m
else:
number = 500000
return self.parent.rvs(size=number)<|docstring|>Generates samples from the Arcsine distribution.
:param arcsine self:
An instance of Arcsine class.
:param integer m:
Number of random samples. If not provided, a default of 5e05 is assumed.<|endoftext|> |
f0499ecb1737b90ebac1e77da100215c681bfdc02a7fa056513748365b7fc093 | def main(tspoint, f, grad, hessian, dirname, SHSrank, SHSroot, SHScomm, const):
'\n main: main part of the calculation of minimum path\n Args:\n tspoint : Coordinate of ts point\n f : function to calculate potential as f(x)\n grad : function to calculate gradient as grad(x)\n hessian : function to calculate hessian as hessian(x)\n dirname : name of directory to calculate minimum path\n SHSrank : rank of MPI\n SHSroot : root rank of MPI\n SHScomm : communicate class of mpi4py\n const : class of constants\n\n '
if (SHSrank == SHSroot):
print(('start Minimum Path calculation of %s' % dirname))
os.chdir(dirname)
dim = len(tspoint)
TShess = hessian(tspoint)
(eigNlist, _eigV) = np.linalg.eigh(TShess)
eigVlist = []
for i in range(dim):
eigVlist.append(_eigV[(:, i)])
pathlist = []
if (SHSrank == SHSroot):
for (i, pm) in enumerate([1, (- 1)]):
writeline = ''
for p in tspoint:
writeline += ('% 3.10f, ' % p)
writeline += ': TS point\n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
x = (tspoint + ((pm * const.deltas0) * eigVlist[0]))
pathlist.append(x)
writeline = ''
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += ':deltas0 \n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
else:
pathlist = None
if const.calc_mpiQ:
pathlist = SHScomm.bcast(pathlist, root=0)
downQlist = [True, True]
beforeEgradlist = [None, None]
whileN = 0
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for (i, pm) in enumerate([(- 1), 1]):
if (downQlist[i] is False):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) < (const.threshold * 5.0)):
grad_x = (pm * eigVlist[0])
else:
Egrad = (grad_x / np.linalg.norm(grad_x))
beforeEgradlist[i] = Egrad
downQlist[i] = False
continue
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
x -= (const.deltas0 * Egrad)
beforeEgradlist[i] = Egrad
pathlist[i] = copy.copy(x)
if (SHSrank == SHSroot):
writeline = ''
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
downQlist = [True, True]
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for i in [0, 1]:
if (not downQlist[i]):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
if (np.dot(Egrad, beforeEgradlist[i]) < 0.0):
downQlist[i] = False
x -= (const.deltas * Egrad)
if (SHSrank == SHSroot):
writeline = ''
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
beforeEgradlist[i] = copy.copy(Egrad)
pathlist[i] = copy.copy(x)
os.chdir('../')
return pathlist | main: main part of the calculation of minimum path
Args:
tspoint : Coordinate of ts point
f : function to calculate potential as f(x)
grad : function to calculate gradient as grad(x)
hessian : function to calculate hessian as hessian(x)
dirname : name of directory to calculate minimum path
SHSrank : rank of MPI
SHSroot : root rank of MPI
SHScomm : communicate class of mpi4py
const : class of constants | SHS4py/MinimumPath.py | main | YukiMitsuta/shs4py | 2 | python | def main(tspoint, f, grad, hessian, dirname, SHSrank, SHSroot, SHScomm, const):
'\n main: main part of the calculation of minimum path\n Args:\n tspoint : Coordinate of ts point\n f : function to calculate potential as f(x)\n grad : function to calculate gradient as grad(x)\n hessian : function to calculate hessian as hessian(x)\n dirname : name of directory to calculate minimum path\n SHSrank : rank of MPI\n SHSroot : root rank of MPI\n SHScomm : communicate class of mpi4py\n const : class of constants\n\n '
if (SHSrank == SHSroot):
print(('start Minimum Path calculation of %s' % dirname))
os.chdir(dirname)
dim = len(tspoint)
TShess = hessian(tspoint)
(eigNlist, _eigV) = np.linalg.eigh(TShess)
eigVlist = []
for i in range(dim):
eigVlist.append(_eigV[(:, i)])
pathlist = []
if (SHSrank == SHSroot):
for (i, pm) in enumerate([1, (- 1)]):
writeline =
for p in tspoint:
writeline += ('% 3.10f, ' % p)
writeline += ': TS point\n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
x = (tspoint + ((pm * const.deltas0) * eigVlist[0]))
pathlist.append(x)
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += ':deltas0 \n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
else:
pathlist = None
if const.calc_mpiQ:
pathlist = SHScomm.bcast(pathlist, root=0)
downQlist = [True, True]
beforeEgradlist = [None, None]
whileN = 0
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for (i, pm) in enumerate([(- 1), 1]):
if (downQlist[i] is False):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) < (const.threshold * 5.0)):
grad_x = (pm * eigVlist[0])
else:
Egrad = (grad_x / np.linalg.norm(grad_x))
beforeEgradlist[i] = Egrad
downQlist[i] = False
continue
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
x -= (const.deltas0 * Egrad)
beforeEgradlist[i] = Egrad
pathlist[i] = copy.copy(x)
if (SHSrank == SHSroot):
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
downQlist = [True, True]
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for i in [0, 1]:
if (not downQlist[i]):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
if (np.dot(Egrad, beforeEgradlist[i]) < 0.0):
downQlist[i] = False
x -= (const.deltas * Egrad)
if (SHSrank == SHSroot):
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
beforeEgradlist[i] = copy.copy(Egrad)
pathlist[i] = copy.copy(x)
os.chdir('../')
return pathlist | def main(tspoint, f, grad, hessian, dirname, SHSrank, SHSroot, SHScomm, const):
'\n main: main part of the calculation of minimum path\n Args:\n tspoint : Coordinate of ts point\n f : function to calculate potential as f(x)\n grad : function to calculate gradient as grad(x)\n hessian : function to calculate hessian as hessian(x)\n dirname : name of directory to calculate minimum path\n SHSrank : rank of MPI\n SHSroot : root rank of MPI\n SHScomm : communicate class of mpi4py\n const : class of constants\n\n '
if (SHSrank == SHSroot):
print(('start Minimum Path calculation of %s' % dirname))
os.chdir(dirname)
dim = len(tspoint)
TShess = hessian(tspoint)
(eigNlist, _eigV) = np.linalg.eigh(TShess)
eigVlist = []
for i in range(dim):
eigVlist.append(_eigV[(:, i)])
pathlist = []
if (SHSrank == SHSroot):
for (i, pm) in enumerate([1, (- 1)]):
writeline =
for p in tspoint:
writeline += ('% 3.10f, ' % p)
writeline += ': TS point\n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
x = (tspoint + ((pm * const.deltas0) * eigVlist[0]))
pathlist.append(x)
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += ':deltas0 \n'
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
else:
pathlist = None
if const.calc_mpiQ:
pathlist = SHScomm.bcast(pathlist, root=0)
downQlist = [True, True]
beforeEgradlist = [None, None]
whileN = 0
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for (i, pm) in enumerate([(- 1), 1]):
if (downQlist[i] is False):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) < (const.threshold * 5.0)):
grad_x = (pm * eigVlist[0])
else:
Egrad = (grad_x / np.linalg.norm(grad_x))
beforeEgradlist[i] = Egrad
downQlist[i] = False
continue
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
x -= (const.deltas0 * Egrad)
beforeEgradlist[i] = Egrad
pathlist[i] = copy.copy(x)
if (SHSrank == SHSroot):
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
downQlist = [True, True]
while any(downQlist):
whileN += 1
if (1000 < whileN):
if (SHSrank == SHSroot):
print('in MinimumPath: whileN over 1000')
return []
for i in [0, 1]:
if (not downQlist[i]):
continue
x = pathlist[i]
grad_x = grad(x)
if (np.linalg.norm(grad_x) == 0.0):
if (SHSrank == SHSroot):
print(('ERROR: gradient become 0.0 in %s' % x))
return []
Egrad = (grad_x / np.linalg.norm(grad_x))
if (np.dot(Egrad, beforeEgradlist[i]) < 0.0):
downQlist[i] = False
x -= (const.deltas * Egrad)
if (SHSrank == SHSroot):
writeline =
for p in x:
writeline += ('% 3.10f, ' % p)
writeline += (':% 3.10f\n' % np.linalg.norm(grad_x))
with open(('./pathlist%s.csv' % i), 'a') as wf:
wf.write(writeline)
beforeEgradlist[i] = copy.copy(Egrad)
pathlist[i] = copy.copy(x)
os.chdir('../')
return pathlist<|docstring|>main: main part of the calculation of minimum path
Args:
tspoint : Coordinate of ts point
f : function to calculate potential as f(x)
grad : function to calculate gradient as grad(x)
hessian : function to calculate hessian as hessian(x)
dirname : name of directory to calculate minimum path
SHSrank : rank of MPI
SHSroot : root rank of MPI
SHScomm : communicate class of mpi4py
const : class of constants<|endoftext|> |
4191de6d125095ad8554b0b71377fe5366d763dc3b84c3060205c061b5f4ea1d | def get_training_and_validation_and_testing_generators25d(data_file, batch_size, n_labels, training_keys_file, validation_keys_file, testing_keys_file, data_split=0.8, overwrite=False, labels=None, patch_shape=None, validation_patch_overlap=0, training_patch_start_offset=None, validation_batch_size=None, patch_overlap=[0, 0, (- 1)], project='brats', augment_flipud=False, augment_fliplr=False, augment_elastic=False, augment_rotation=False, augment_shift=False, augment_shear=False, augment_zoom=False, n_augment=0, skip_blank=False, is_test='1'):
"\n Creates the training and validation generators that can be used when training the model.\n :param skip_blank: If True, any blank (all-zero) label images/patches will be skipped by the data generator.\n :param validation_batch_size: Batch size for the validation data.\n :param training_patch_start_offset: Tuple of length 3 containing integer values. Training data will randomly be\n offset by a number of pixels between (0, 0, 0) and the given tuple. (default is None)\n :param validation_patch_overlap: Number of pixels/voxels that will be overlapped in the validation data. (requires\n patch_shape to not be None)\n :param patch_shape: Shape of the data to return with the generator. If None, the whole image will be returned.\n (default is None)\n :param augment_flip: if True and augment is True, then the data will be randomly flipped along the x, y and z axis\n :param augment_distortion_factor: if augment is True, this determines the standard deviation from the original\n that the data will be distorted (in a stretching or shrinking fashion). Set to None, False, or 0 to prevent the\n augmentation from distorting the data in this way.\n :param augment: If True, training data will be distorted on the fly so as to avoid over-fitting.\n :param labels: List or tuple containing the ordered label values in the image files. The length of the list or tuple\n should be equal to the n_labels value.\n Example: (10, 25, 50)\n The data generator would then return binary truth arrays representing the labels 10, 25, and 30 in that order.\n :param data_file: hdf5 file to load the data from.\n :param batch_size: Size of the batches that the training generator will provide.\n :param n_labels: Number of binary labels.\n :param training_keys_file: Pickle file where the index locations of the training data will be stored.\n :param validation_keys_file: Pickle file where the index locations of the validation data will be stored.\n :param data_split: How the training and validation data will be split. 0 means all the data will be used for\n validation and none of it will be used for training. 1 means that all the data will be used for training and none\n will be used for validation. Default is 0.8 or 80%.\n :param overwrite: If set to True, previous files will be overwritten. The default mode is false, so that the\n training and validation splits won't be overwritten when rerunning model training.\n :param permute: will randomly permute the data (data must be 3D cube)\n :return: Training data generator, validation data generator, number of training steps, number of validation steps\n "
if (not validation_batch_size):
validation_batch_size = batch_size
if (project == 'brats'):
(training_list, validation_list, _) = get_train_valid_test_split(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, data_split=data_split, overwrite=False)
else:
(training_list, validation_list, _) = get_train_valid_test_split_isbr(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, overwrite=False)
print('training_list:', training_list)
train_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
valid_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
print('>> training data generator')
training_generator = data_generator25d(data_file, training_list, batch_size=batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=train_patch_overlap, patch_start_offset=training_patch_start_offset, augment_flipud=augment_flipud, augment_fliplr=augment_fliplr, augment_elastic=augment_elastic, augment_rotation=augment_rotation, augment_shift=augment_shift, augment_shear=augment_shear, augment_zoom=augment_zoom, n_augment=n_augment, skip_blank=skip_blank)
print('>> valid data generator')
validation_generator = data_generator25d(data_file, validation_list, batch_size=validation_batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=valid_patch_overlap, skip_blank=skip_blank)
print('>> compute number of training and validation steps')
from unet3d.generator import get_number_of_patches
num_training_steps = get_number_of_steps(get_number_of_patches(data_file, training_list, patch_shape, patch_start_offset=training_patch_start_offset, patch_overlap=train_patch_overlap), batch_size)
num_validation_steps = get_number_of_steps(get_number_of_patches(data_file, validation_list, patch_shape, patch_overlap=valid_patch_overlap), validation_batch_size)
print('Number of training steps: ', num_training_steps)
print('Number of validation steps: ', num_validation_steps)
return (training_generator, validation_generator, num_training_steps, num_validation_steps) | Creates the training and validation generators that can be used when training the model.
:param skip_blank: If True, any blank (all-zero) label images/patches will be skipped by the data generator.
:param validation_batch_size: Batch size for the validation data.
:param training_patch_start_offset: Tuple of length 3 containing integer values. Training data will randomly be
offset by a number of pixels between (0, 0, 0) and the given tuple. (default is None)
:param validation_patch_overlap: Number of pixels/voxels that will be overlapped in the validation data. (requires
patch_shape to not be None)
:param patch_shape: Shape of the data to return with the generator. If None, the whole image will be returned.
(default is None)
:param augment_flip: if True and augment is True, then the data will be randomly flipped along the x, y and z axis
:param augment_distortion_factor: if augment is True, this determines the standard deviation from the original
that the data will be distorted (in a stretching or shrinking fashion). Set to None, False, or 0 to prevent the
augmentation from distorting the data in this way.
:param augment: If True, training data will be distorted on the fly so as to avoid over-fitting.
:param labels: List or tuple containing the ordered label values in the image files. The length of the list or tuple
should be equal to the n_labels value.
Example: (10, 25, 50)
The data generator would then return binary truth arrays representing the labels 10, 25, and 30 in that order.
:param data_file: hdf5 file to load the data from.
:param batch_size: Size of the batches that the training generator will provide.
:param n_labels: Number of binary labels.
:param training_keys_file: Pickle file where the index locations of the training data will be stored.
:param validation_keys_file: Pickle file where the index locations of the validation data will be stored.
:param data_split: How the training and validation data will be split. 0 means all the data will be used for
validation and none of it will be used for training. 1 means that all the data will be used for training and none
will be used for validation. Default is 0.8 or 80%.
:param overwrite: If set to True, previous files will be overwritten. The default mode is false, so that the
training and validation splits won't be overwritten when rerunning model training.
:param permute: will randomly permute the data (data must be 3D cube)
:return: Training data generator, validation data generator, number of training steps, number of validation steps | unet25d/generator.py | get_training_and_validation_and_testing_generators25d | vuhoangminh/3DUnetCNN | 1 | python | def get_training_and_validation_and_testing_generators25d(data_file, batch_size, n_labels, training_keys_file, validation_keys_file, testing_keys_file, data_split=0.8, overwrite=False, labels=None, patch_shape=None, validation_patch_overlap=0, training_patch_start_offset=None, validation_batch_size=None, patch_overlap=[0, 0, (- 1)], project='brats', augment_flipud=False, augment_fliplr=False, augment_elastic=False, augment_rotation=False, augment_shift=False, augment_shear=False, augment_zoom=False, n_augment=0, skip_blank=False, is_test='1'):
"\n Creates the training and validation generators that can be used when training the model.\n :param skip_blank: If True, any blank (all-zero) label images/patches will be skipped by the data generator.\n :param validation_batch_size: Batch size for the validation data.\n :param training_patch_start_offset: Tuple of length 3 containing integer values. Training data will randomly be\n offset by a number of pixels between (0, 0, 0) and the given tuple. (default is None)\n :param validation_patch_overlap: Number of pixels/voxels that will be overlapped in the validation data. (requires\n patch_shape to not be None)\n :param patch_shape: Shape of the data to return with the generator. If None, the whole image will be returned.\n (default is None)\n :param augment_flip: if True and augment is True, then the data will be randomly flipped along the x, y and z axis\n :param augment_distortion_factor: if augment is True, this determines the standard deviation from the original\n that the data will be distorted (in a stretching or shrinking fashion). Set to None, False, or 0 to prevent the\n augmentation from distorting the data in this way.\n :param augment: If True, training data will be distorted on the fly so as to avoid over-fitting.\n :param labels: List or tuple containing the ordered label values in the image files. The length of the list or tuple\n should be equal to the n_labels value.\n Example: (10, 25, 50)\n The data generator would then return binary truth arrays representing the labels 10, 25, and 30 in that order.\n :param data_file: hdf5 file to load the data from.\n :param batch_size: Size of the batches that the training generator will provide.\n :param n_labels: Number of binary labels.\n :param training_keys_file: Pickle file where the index locations of the training data will be stored.\n :param validation_keys_file: Pickle file where the index locations of the validation data will be stored.\n :param data_split: How the training and validation data will be split. 0 means all the data will be used for\n validation and none of it will be used for training. 1 means that all the data will be used for training and none\n will be used for validation. Default is 0.8 or 80%.\n :param overwrite: If set to True, previous files will be overwritten. The default mode is false, so that the\n training and validation splits won't be overwritten when rerunning model training.\n :param permute: will randomly permute the data (data must be 3D cube)\n :return: Training data generator, validation data generator, number of training steps, number of validation steps\n "
if (not validation_batch_size):
validation_batch_size = batch_size
if (project == 'brats'):
(training_list, validation_list, _) = get_train_valid_test_split(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, data_split=data_split, overwrite=False)
else:
(training_list, validation_list, _) = get_train_valid_test_split_isbr(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, overwrite=False)
print('training_list:', training_list)
train_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
valid_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
print('>> training data generator')
training_generator = data_generator25d(data_file, training_list, batch_size=batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=train_patch_overlap, patch_start_offset=training_patch_start_offset, augment_flipud=augment_flipud, augment_fliplr=augment_fliplr, augment_elastic=augment_elastic, augment_rotation=augment_rotation, augment_shift=augment_shift, augment_shear=augment_shear, augment_zoom=augment_zoom, n_augment=n_augment, skip_blank=skip_blank)
print('>> valid data generator')
validation_generator = data_generator25d(data_file, validation_list, batch_size=validation_batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=valid_patch_overlap, skip_blank=skip_blank)
print('>> compute number of training and validation steps')
from unet3d.generator import get_number_of_patches
num_training_steps = get_number_of_steps(get_number_of_patches(data_file, training_list, patch_shape, patch_start_offset=training_patch_start_offset, patch_overlap=train_patch_overlap), batch_size)
num_validation_steps = get_number_of_steps(get_number_of_patches(data_file, validation_list, patch_shape, patch_overlap=valid_patch_overlap), validation_batch_size)
print('Number of training steps: ', num_training_steps)
print('Number of validation steps: ', num_validation_steps)
return (training_generator, validation_generator, num_training_steps, num_validation_steps) | def get_training_and_validation_and_testing_generators25d(data_file, batch_size, n_labels, training_keys_file, validation_keys_file, testing_keys_file, data_split=0.8, overwrite=False, labels=None, patch_shape=None, validation_patch_overlap=0, training_patch_start_offset=None, validation_batch_size=None, patch_overlap=[0, 0, (- 1)], project='brats', augment_flipud=False, augment_fliplr=False, augment_elastic=False, augment_rotation=False, augment_shift=False, augment_shear=False, augment_zoom=False, n_augment=0, skip_blank=False, is_test='1'):
"\n Creates the training and validation generators that can be used when training the model.\n :param skip_blank: If True, any blank (all-zero) label images/patches will be skipped by the data generator.\n :param validation_batch_size: Batch size for the validation data.\n :param training_patch_start_offset: Tuple of length 3 containing integer values. Training data will randomly be\n offset by a number of pixels between (0, 0, 0) and the given tuple. (default is None)\n :param validation_patch_overlap: Number of pixels/voxels that will be overlapped in the validation data. (requires\n patch_shape to not be None)\n :param patch_shape: Shape of the data to return with the generator. If None, the whole image will be returned.\n (default is None)\n :param augment_flip: if True and augment is True, then the data will be randomly flipped along the x, y and z axis\n :param augment_distortion_factor: if augment is True, this determines the standard deviation from the original\n that the data will be distorted (in a stretching or shrinking fashion). Set to None, False, or 0 to prevent the\n augmentation from distorting the data in this way.\n :param augment: If True, training data will be distorted on the fly so as to avoid over-fitting.\n :param labels: List or tuple containing the ordered label values in the image files. The length of the list or tuple\n should be equal to the n_labels value.\n Example: (10, 25, 50)\n The data generator would then return binary truth arrays representing the labels 10, 25, and 30 in that order.\n :param data_file: hdf5 file to load the data from.\n :param batch_size: Size of the batches that the training generator will provide.\n :param n_labels: Number of binary labels.\n :param training_keys_file: Pickle file where the index locations of the training data will be stored.\n :param validation_keys_file: Pickle file where the index locations of the validation data will be stored.\n :param data_split: How the training and validation data will be split. 0 means all the data will be used for\n validation and none of it will be used for training. 1 means that all the data will be used for training and none\n will be used for validation. Default is 0.8 or 80%.\n :param overwrite: If set to True, previous files will be overwritten. The default mode is false, so that the\n training and validation splits won't be overwritten when rerunning model training.\n :param permute: will randomly permute the data (data must be 3D cube)\n :return: Training data generator, validation data generator, number of training steps, number of validation steps\n "
if (not validation_batch_size):
validation_batch_size = batch_size
if (project == 'brats'):
(training_list, validation_list, _) = get_train_valid_test_split(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, data_split=data_split, overwrite=False)
else:
(training_list, validation_list, _) = get_train_valid_test_split_isbr(data_file, training_file=training_keys_file, validation_file=validation_keys_file, testing_file=testing_keys_file, overwrite=False)
print('training_list:', training_list)
train_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
valid_patch_overlap = np.asarray([0, 0, (patch_shape[(- 1)] - 1)])
print('>> training data generator')
training_generator = data_generator25d(data_file, training_list, batch_size=batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=train_patch_overlap, patch_start_offset=training_patch_start_offset, augment_flipud=augment_flipud, augment_fliplr=augment_fliplr, augment_elastic=augment_elastic, augment_rotation=augment_rotation, augment_shift=augment_shift, augment_shear=augment_shear, augment_zoom=augment_zoom, n_augment=n_augment, skip_blank=skip_blank)
print('>> valid data generator')
validation_generator = data_generator25d(data_file, validation_list, batch_size=validation_batch_size, n_labels=n_labels, labels=labels, patch_shape=patch_shape, patch_overlap=valid_patch_overlap, skip_blank=skip_blank)
print('>> compute number of training and validation steps')
from unet3d.generator import get_number_of_patches
num_training_steps = get_number_of_steps(get_number_of_patches(data_file, training_list, patch_shape, patch_start_offset=training_patch_start_offset, patch_overlap=train_patch_overlap), batch_size)
num_validation_steps = get_number_of_steps(get_number_of_patches(data_file, validation_list, patch_shape, patch_overlap=valid_patch_overlap), validation_batch_size)
print('Number of training steps: ', num_training_steps)
print('Number of validation steps: ', num_validation_steps)
return (training_generator, validation_generator, num_training_steps, num_validation_steps)<|docstring|>Creates the training and validation generators that can be used when training the model.
:param skip_blank: If True, any blank (all-zero) label images/patches will be skipped by the data generator.
:param validation_batch_size: Batch size for the validation data.
:param training_patch_start_offset: Tuple of length 3 containing integer values. Training data will randomly be
offset by a number of pixels between (0, 0, 0) and the given tuple. (default is None)
:param validation_patch_overlap: Number of pixels/voxels that will be overlapped in the validation data. (requires
patch_shape to not be None)
:param patch_shape: Shape of the data to return with the generator. If None, the whole image will be returned.
(default is None)
:param augment_flip: if True and augment is True, then the data will be randomly flipped along the x, y and z axis
:param augment_distortion_factor: if augment is True, this determines the standard deviation from the original
that the data will be distorted (in a stretching or shrinking fashion). Set to None, False, or 0 to prevent the
augmentation from distorting the data in this way.
:param augment: If True, training data will be distorted on the fly so as to avoid over-fitting.
:param labels: List or tuple containing the ordered label values in the image files. The length of the list or tuple
should be equal to the n_labels value.
Example: (10, 25, 50)
The data generator would then return binary truth arrays representing the labels 10, 25, and 30 in that order.
:param data_file: hdf5 file to load the data from.
:param batch_size: Size of the batches that the training generator will provide.
:param n_labels: Number of binary labels.
:param training_keys_file: Pickle file where the index locations of the training data will be stored.
:param validation_keys_file: Pickle file where the index locations of the validation data will be stored.
:param data_split: How the training and validation data will be split. 0 means all the data will be used for
validation and none of it will be used for training. 1 means that all the data will be used for training and none
will be used for validation. Default is 0.8 or 80%.
:param overwrite: If set to True, previous files will be overwritten. The default mode is false, so that the
training and validation splits won't be overwritten when rerunning model training.
:param permute: will randomly permute the data (data must be 3D cube)
:return: Training data generator, validation data generator, number of training steps, number of validation steps<|endoftext|> |
307ca7a1e471266d6977241e5ec66f3e0617d7bace91fd1cd1c0652e3038a140 | def move_hat(self, direction):
' DPad is interpreted as a hat. Values are 0-7 where N is 0, and the\n directions move clockwise. 8 is centered. '
self._hat_position = direction
self._send() | DPad is interpreted as a hat. Values are 0-7 where N is 0, and the
directions move clockwise. 8 is centered. | ofs.py | move_hat | SleepUnit/OpenStickFirmware | 17 | python | def move_hat(self, direction):
' DPad is interpreted as a hat. Values are 0-7 where N is 0, and the\n directions move clockwise. 8 is centered. '
self._hat_position = direction
self._send() | def move_hat(self, direction):
' DPad is interpreted as a hat. Values are 0-7 where N is 0, and the\n directions move clockwise. 8 is centered. '
self._hat_position = direction
self._send()<|docstring|>DPad is interpreted as a hat. Values are 0-7 where N is 0, and the
directions move clockwise. 8 is centered.<|endoftext|> |
8b00f0bef3a10e7d5071b6616d94f14cf64ee167f04d6b8bd6dadb0d08dac0cf | def reset_all(self):
'Return the fightstick to a neutral state'
self._buttons_state = 0
self._hat_position = 8
self._joy_x = 0
self._joy_y = 0
self._joy_z = 0
self._joy_r_z = 0
self._send(always=True) | Return the fightstick to a neutral state | ofs.py | reset_all | SleepUnit/OpenStickFirmware | 17 | python | def reset_all(self):
self._buttons_state = 0
self._hat_position = 8
self._joy_x = 0
self._joy_y = 0
self._joy_z = 0
self._joy_r_z = 0
self._send(always=True) | def reset_all(self):
self._buttons_state = 0
self._hat_position = 8
self._joy_x = 0
self._joy_y = 0
self._joy_z = 0
self._joy_r_z = 0
self._send(always=True)<|docstring|>Return the fightstick to a neutral state<|endoftext|> |
51ffc18cbc9a6701b56dd0f0c83cafc47d9114b3e19b183cffcd5d352f1a0d72 | def _send(self, always=False):
'Send a report with all the existing settings.\n If ``always`` is ``False`` (the default), send only if there have been changes.\n '
struct.pack_into('<HBbbbb', self._report, 0, self._buttons_state, self._hat_position, self._joy_x, self._joy_y, self._joy_z, self._joy_r_z)
if (always or (self._last_report != self._report)):
self._device.send_report(self._report)
self._last_report[:] = self._report | Send a report with all the existing settings.
If ``always`` is ``False`` (the default), send only if there have been changes. | ofs.py | _send | SleepUnit/OpenStickFirmware | 17 | python | def _send(self, always=False):
'Send a report with all the existing settings.\n If ``always`` is ``False`` (the default), send only if there have been changes.\n '
struct.pack_into('<HBbbbb', self._report, 0, self._buttons_state, self._hat_position, self._joy_x, self._joy_y, self._joy_z, self._joy_r_z)
if (always or (self._last_report != self._report)):
self._device.send_report(self._report)
self._last_report[:] = self._report | def _send(self, always=False):
'Send a report with all the existing settings.\n If ``always`` is ``False`` (the default), send only if there have been changes.\n '
struct.pack_into('<HBbbbb', self._report, 0, self._buttons_state, self._hat_position, self._joy_x, self._joy_y, self._joy_z, self._joy_r_z)
if (always or (self._last_report != self._report)):
self._device.send_report(self._report)
self._last_report[:] = self._report<|docstring|>Send a report with all the existing settings.
If ``always`` is ``False`` (the default), send only if there have been changes.<|endoftext|> |
6030a733b4af46114f9c4d3d82a42dc5b09d87a81f61fb1fb0ee93c2fdaf75b5 | def convert_names_to_model_inputs(names: Union[(list, np.ndarray)]) -> torch.Tensor:
'\n Return a torch tensor of names, where each name has been converted to a sequence of ids and the ids have been one-hot encoded.\n Also return the tensor where the names have been converted to a sequence of ids but before the ids have been one-hot encoded.\n :param names: list of names to encode\n :param char_to_idx_map: map characters to ids\n :param max_name_length: maximum name length\n :return: 2D tensor [names, char position]\n '
X_targets = convert_names_to_ids(names, char_to_idx_map, MAX_NAME_LENGTH)
return check_convert_tensor(X_targets) | Return a torch tensor of names, where each name has been converted to a sequence of ids and the ids have been one-hot encoded.
Also return the tensor where the names have been converted to a sequence of ids but before the ids have been one-hot encoded.
:param names: list of names to encode
:param char_to_idx_map: map characters to ids
:param max_name_length: maximum name length
:return: 2D tensor [names, char position] | src/models/swivel_encoder.py | convert_names_to_model_inputs | rootsdev/nama | 0 | python | def convert_names_to_model_inputs(names: Union[(list, np.ndarray)]) -> torch.Tensor:
'\n Return a torch tensor of names, where each name has been converted to a sequence of ids and the ids have been one-hot encoded.\n Also return the tensor where the names have been converted to a sequence of ids but before the ids have been one-hot encoded.\n :param names: list of names to encode\n :param char_to_idx_map: map characters to ids\n :param max_name_length: maximum name length\n :return: 2D tensor [names, char position]\n '
X_targets = convert_names_to_ids(names, char_to_idx_map, MAX_NAME_LENGTH)
return check_convert_tensor(X_targets) | def convert_names_to_model_inputs(names: Union[(list, np.ndarray)]) -> torch.Tensor:
'\n Return a torch tensor of names, where each name has been converted to a sequence of ids and the ids have been one-hot encoded.\n Also return the tensor where the names have been converted to a sequence of ids but before the ids have been one-hot encoded.\n :param names: list of names to encode\n :param char_to_idx_map: map characters to ids\n :param max_name_length: maximum name length\n :return: 2D tensor [names, char position]\n '
X_targets = convert_names_to_ids(names, char_to_idx_map, MAX_NAME_LENGTH)
return check_convert_tensor(X_targets)<|docstring|>Return a torch tensor of names, where each name has been converted to a sequence of ids and the ids have been one-hot encoded.
Also return the tensor where the names have been converted to a sequence of ids but before the ids have been one-hot encoded.
:param names: list of names to encode
:param char_to_idx_map: map characters to ids
:param max_name_length: maximum name length
:return: 2D tensor [names, char position]<|endoftext|> |
9a63606ec6ac39742249ac42590ae5d4f5b4fd8b68d5bd70760a8f11466c912e | def train_swivel_encoder(model, X_train, X_targets, num_epochs=100, batch_size=64, lr=0.01, use_adam_opt=False, use_mse_loss=False, verbose=True, optimizer=None, checkpoint_path=None):
'\n Train the SwivelEncoder\n :param model: SwivelEncoder model\n :param X_train: list of names\n :param X_targets: list of embeddings\n :param num_epochs: number of epochs\n :param batch_size: batch size\n :param lr: learning rate\n :param use_adam_opt: if True, use Adam optimizer; otherwise use Adagrad optimizer\n :param use_mse_loss: if True, use mean squared error (euclidean distance) loss; otherwise use cosine similarity loss\n :param verbose:print average loss every so often\n :param optimizer: passed-in optimizer to use\n :param checkpoint_path: if set, save models to this path after each epoch\n '
model = model.to(device=model.device)
if (optimizer is None):
optimizer = (torch.optim.Adam(model.parameters(), lr=lr) if use_adam_opt else torch.optim.Adagrad(model.parameters(), lr=lr))
loss_fn = (torch.nn.MSELoss() if use_mse_loss else torch.nn.CosineEmbeddingLoss())
dataset_train = torch.utils.data.TensorDataset(X_train, X_targets)
data_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
losses = list()
for epoch in range(1, (num_epochs + 1)):
for (batch_num, (train_batch, targets_batch)) in enumerate(data_loader):
model.zero_grad()
x_prime = model(train_batch.to(device=model.device))
if use_mse_loss:
loss = loss_fn(x_prime, targets_batch.to(device=model.device))
else:
loss = loss_fn(x_prime, targets_batch.to(device=model.device), torch.ones(len(x_prime)).to(device=model.device))
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
if ((batch_num % 1000) == 0):
print('Epoch: {}/{} \t Batch: {} \t Loss: {}'.format(epoch, num_epochs, batch_num, np.mean(losses[(- 100):])))
if checkpoint_path:
torch.save(model.state_dict(), fopen(f'{checkpoint_path}.{epoch}', 'wb'))
return losses | Train the SwivelEncoder
:param model: SwivelEncoder model
:param X_train: list of names
:param X_targets: list of embeddings
:param num_epochs: number of epochs
:param batch_size: batch size
:param lr: learning rate
:param use_adam_opt: if True, use Adam optimizer; otherwise use Adagrad optimizer
:param use_mse_loss: if True, use mean squared error (euclidean distance) loss; otherwise use cosine similarity loss
:param verbose:print average loss every so often
:param optimizer: passed-in optimizer to use
:param checkpoint_path: if set, save models to this path after each epoch | src/models/swivel_encoder.py | train_swivel_encoder | rootsdev/nama | 0 | python | def train_swivel_encoder(model, X_train, X_targets, num_epochs=100, batch_size=64, lr=0.01, use_adam_opt=False, use_mse_loss=False, verbose=True, optimizer=None, checkpoint_path=None):
'\n Train the SwivelEncoder\n :param model: SwivelEncoder model\n :param X_train: list of names\n :param X_targets: list of embeddings\n :param num_epochs: number of epochs\n :param batch_size: batch size\n :param lr: learning rate\n :param use_adam_opt: if True, use Adam optimizer; otherwise use Adagrad optimizer\n :param use_mse_loss: if True, use mean squared error (euclidean distance) loss; otherwise use cosine similarity loss\n :param verbose:print average loss every so often\n :param optimizer: passed-in optimizer to use\n :param checkpoint_path: if set, save models to this path after each epoch\n '
model = model.to(device=model.device)
if (optimizer is None):
optimizer = (torch.optim.Adam(model.parameters(), lr=lr) if use_adam_opt else torch.optim.Adagrad(model.parameters(), lr=lr))
loss_fn = (torch.nn.MSELoss() if use_mse_loss else torch.nn.CosineEmbeddingLoss())
dataset_train = torch.utils.data.TensorDataset(X_train, X_targets)
data_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
losses = list()
for epoch in range(1, (num_epochs + 1)):
for (batch_num, (train_batch, targets_batch)) in enumerate(data_loader):
model.zero_grad()
x_prime = model(train_batch.to(device=model.device))
if use_mse_loss:
loss = loss_fn(x_prime, targets_batch.to(device=model.device))
else:
loss = loss_fn(x_prime, targets_batch.to(device=model.device), torch.ones(len(x_prime)).to(device=model.device))
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
if ((batch_num % 1000) == 0):
print('Epoch: {}/{} \t Batch: {} \t Loss: {}'.format(epoch, num_epochs, batch_num, np.mean(losses[(- 100):])))
if checkpoint_path:
torch.save(model.state_dict(), fopen(f'{checkpoint_path}.{epoch}', 'wb'))
return losses | def train_swivel_encoder(model, X_train, X_targets, num_epochs=100, batch_size=64, lr=0.01, use_adam_opt=False, use_mse_loss=False, verbose=True, optimizer=None, checkpoint_path=None):
'\n Train the SwivelEncoder\n :param model: SwivelEncoder model\n :param X_train: list of names\n :param X_targets: list of embeddings\n :param num_epochs: number of epochs\n :param batch_size: batch size\n :param lr: learning rate\n :param use_adam_opt: if True, use Adam optimizer; otherwise use Adagrad optimizer\n :param use_mse_loss: if True, use mean squared error (euclidean distance) loss; otherwise use cosine similarity loss\n :param verbose:print average loss every so often\n :param optimizer: passed-in optimizer to use\n :param checkpoint_path: if set, save models to this path after each epoch\n '
model = model.to(device=model.device)
if (optimizer is None):
optimizer = (torch.optim.Adam(model.parameters(), lr=lr) if use_adam_opt else torch.optim.Adagrad(model.parameters(), lr=lr))
loss_fn = (torch.nn.MSELoss() if use_mse_loss else torch.nn.CosineEmbeddingLoss())
dataset_train = torch.utils.data.TensorDataset(X_train, X_targets)
data_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
losses = list()
for epoch in range(1, (num_epochs + 1)):
for (batch_num, (train_batch, targets_batch)) in enumerate(data_loader):
model.zero_grad()
x_prime = model(train_batch.to(device=model.device))
if use_mse_loss:
loss = loss_fn(x_prime, targets_batch.to(device=model.device))
else:
loss = loss_fn(x_prime, targets_batch.to(device=model.device), torch.ones(len(x_prime)).to(device=model.device))
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
if ((batch_num % 1000) == 0):
print('Epoch: {}/{} \t Batch: {} \t Loss: {}'.format(epoch, num_epochs, batch_num, np.mean(losses[(- 100):])))
if checkpoint_path:
torch.save(model.state_dict(), fopen(f'{checkpoint_path}.{epoch}', 'wb'))
return losses<|docstring|>Train the SwivelEncoder
:param model: SwivelEncoder model
:param X_train: list of names
:param X_targets: list of embeddings
:param num_epochs: number of epochs
:param batch_size: batch size
:param lr: learning rate
:param use_adam_opt: if True, use Adam optimizer; otherwise use Adagrad optimizer
:param use_mse_loss: if True, use mean squared error (euclidean distance) loss; otherwise use cosine similarity loss
:param verbose:print average loss every so often
:param optimizer: passed-in optimizer to use
:param checkpoint_path: if set, save models to this path after each epoch<|endoftext|> |
201df119c138e3ecd19fdd56e889bb79abb0da533de3cc69b9b2520eacde8811 | def forward(self, X):
'\n Generate embeddings for X\n :param X: [batch size, seq length]\n :return: [batch size, seq embedding]\n '
X = X.to(device=self.device)
(batch_size, seq_len) = X.size()
hidden = (torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device), torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device))
if self.pack:
X_lengths = torch.count_nonzero(X, dim=1).to(device='cpu').type(torch.int64)
ixs = torch.argsort(X_lengths, descending=True)
inverse_ixs = torch.argsort(ixs)
X = X[ixs]
X_lengths = X_lengths[ixs]
if (self.char_embedding is None):
eye = torch.eye((constants.VOCAB_SIZE + 1)).to(device=self.device)
X = eye[X]
else:
X = self.char_embedding(X)
if self.pack:
X = pack_padded_sequence(X, X_lengths, batch_first=True, enforce_sorted=True)
(_, (hidden, _)) = self.lstm(X, hidden)
if (self.n_directions == 1):
last_hidden = hidden[((- 1), :, :)]
else:
last_hidden = torch.cat((hidden[((- 2), :, :)], hidden[((- 1), :, :)]), dim=1)
if self.pack:
last_hidden = last_hidden[inverse_ixs]
output = self.linear(last_hidden)
return output | Generate embeddings for X
:param X: [batch size, seq length]
:return: [batch size, seq embedding] | src/models/swivel_encoder.py | forward | rootsdev/nama | 0 | python | def forward(self, X):
'\n Generate embeddings for X\n :param X: [batch size, seq length]\n :return: [batch size, seq embedding]\n '
X = X.to(device=self.device)
(batch_size, seq_len) = X.size()
hidden = (torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device), torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device))
if self.pack:
X_lengths = torch.count_nonzero(X, dim=1).to(device='cpu').type(torch.int64)
ixs = torch.argsort(X_lengths, descending=True)
inverse_ixs = torch.argsort(ixs)
X = X[ixs]
X_lengths = X_lengths[ixs]
if (self.char_embedding is None):
eye = torch.eye((constants.VOCAB_SIZE + 1)).to(device=self.device)
X = eye[X]
else:
X = self.char_embedding(X)
if self.pack:
X = pack_padded_sequence(X, X_lengths, batch_first=True, enforce_sorted=True)
(_, (hidden, _)) = self.lstm(X, hidden)
if (self.n_directions == 1):
last_hidden = hidden[((- 1), :, :)]
else:
last_hidden = torch.cat((hidden[((- 2), :, :)], hidden[((- 1), :, :)]), dim=1)
if self.pack:
last_hidden = last_hidden[inverse_ixs]
output = self.linear(last_hidden)
return output | def forward(self, X):
'\n Generate embeddings for X\n :param X: [batch size, seq length]\n :return: [batch size, seq embedding]\n '
X = X.to(device=self.device)
(batch_size, seq_len) = X.size()
hidden = (torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device), torch.randn((self.n_layers * self.n_directions), batch_size, self.n_hidden_units).to(device=self.device))
if self.pack:
X_lengths = torch.count_nonzero(X, dim=1).to(device='cpu').type(torch.int64)
ixs = torch.argsort(X_lengths, descending=True)
inverse_ixs = torch.argsort(ixs)
X = X[ixs]
X_lengths = X_lengths[ixs]
if (self.char_embedding is None):
eye = torch.eye((constants.VOCAB_SIZE + 1)).to(device=self.device)
X = eye[X]
else:
X = self.char_embedding(X)
if self.pack:
X = pack_padded_sequence(X, X_lengths, batch_first=True, enforce_sorted=True)
(_, (hidden, _)) = self.lstm(X, hidden)
if (self.n_directions == 1):
last_hidden = hidden[((- 1), :, :)]
else:
last_hidden = torch.cat((hidden[((- 2), :, :)], hidden[((- 1), :, :)]), dim=1)
if self.pack:
last_hidden = last_hidden[inverse_ixs]
output = self.linear(last_hidden)
return output<|docstring|>Generate embeddings for X
:param X: [batch size, seq length]
:return: [batch size, seq embedding]<|endoftext|> |
99a2215f6de40a753c6cf926f95d09447eb85cff1398ff00bd7d52037af25fbd | def __init__(self, **kwargs):
'\n Initializes a new UserAssessmentBaseLineDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param assessment_ids:\n The value to assign to the assessment_ids property of this UserAssessmentBaseLineDetails.\n :type assessment_ids: list[str]\n\n '
self.swagger_types = {'assessment_ids': 'list[str]'}
self.attribute_map = {'assessment_ids': 'assessmentIds'}
self._assessment_ids = None | Initializes a new UserAssessmentBaseLineDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param assessment_ids:
The value to assign to the assessment_ids property of this UserAssessmentBaseLineDetails.
:type assessment_ids: list[str] | src/oci/data_safe/models/user_assessment_base_line_details.py | __init__ | Manny27nyc/oci-python-sdk | 249 | python | def __init__(self, **kwargs):
'\n Initializes a new UserAssessmentBaseLineDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param assessment_ids:\n The value to assign to the assessment_ids property of this UserAssessmentBaseLineDetails.\n :type assessment_ids: list[str]\n\n '
self.swagger_types = {'assessment_ids': 'list[str]'}
self.attribute_map = {'assessment_ids': 'assessmentIds'}
self._assessment_ids = None | def __init__(self, **kwargs):
'\n Initializes a new UserAssessmentBaseLineDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param assessment_ids:\n The value to assign to the assessment_ids property of this UserAssessmentBaseLineDetails.\n :type assessment_ids: list[str]\n\n '
self.swagger_types = {'assessment_ids': 'list[str]'}
self.attribute_map = {'assessment_ids': 'assessmentIds'}
self._assessment_ids = None<|docstring|>Initializes a new UserAssessmentBaseLineDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param assessment_ids:
The value to assign to the assessment_ids property of this UserAssessmentBaseLineDetails.
:type assessment_ids: list[str]<|endoftext|> |
93c51beb9f6ef2087b085d1f4d4a8fc49b55c53fe9c7a673e4c73ad1e78f4700 | @property
def assessment_ids(self):
'\n Gets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :return: The assessment_ids of this UserAssessmentBaseLineDetails.\n :rtype: list[str]\n '
return self._assessment_ids | Gets the assessment_ids of this UserAssessmentBaseLineDetails.
The list of user assessment OCIDs that need to be updated while setting the baseline.
:return: The assessment_ids of this UserAssessmentBaseLineDetails.
:rtype: list[str] | src/oci/data_safe/models/user_assessment_base_line_details.py | assessment_ids | Manny27nyc/oci-python-sdk | 249 | python | @property
def assessment_ids(self):
'\n Gets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :return: The assessment_ids of this UserAssessmentBaseLineDetails.\n :rtype: list[str]\n '
return self._assessment_ids | @property
def assessment_ids(self):
'\n Gets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :return: The assessment_ids of this UserAssessmentBaseLineDetails.\n :rtype: list[str]\n '
return self._assessment_ids<|docstring|>Gets the assessment_ids of this UserAssessmentBaseLineDetails.
The list of user assessment OCIDs that need to be updated while setting the baseline.
:return: The assessment_ids of this UserAssessmentBaseLineDetails.
:rtype: list[str]<|endoftext|> |
92ff6dad637874be2064e7f91c23ec8b6bb8eff9fd860436f179168e5ab0cfa0 | @assessment_ids.setter
def assessment_ids(self, assessment_ids):
'\n Sets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :param assessment_ids: The assessment_ids of this UserAssessmentBaseLineDetails.\n :type: list[str]\n '
self._assessment_ids = assessment_ids | Sets the assessment_ids of this UserAssessmentBaseLineDetails.
The list of user assessment OCIDs that need to be updated while setting the baseline.
:param assessment_ids: The assessment_ids of this UserAssessmentBaseLineDetails.
:type: list[str] | src/oci/data_safe/models/user_assessment_base_line_details.py | assessment_ids | Manny27nyc/oci-python-sdk | 249 | python | @assessment_ids.setter
def assessment_ids(self, assessment_ids):
'\n Sets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :param assessment_ids: The assessment_ids of this UserAssessmentBaseLineDetails.\n :type: list[str]\n '
self._assessment_ids = assessment_ids | @assessment_ids.setter
def assessment_ids(self, assessment_ids):
'\n Sets the assessment_ids of this UserAssessmentBaseLineDetails.\n The list of user assessment OCIDs that need to be updated while setting the baseline.\n\n\n :param assessment_ids: The assessment_ids of this UserAssessmentBaseLineDetails.\n :type: list[str]\n '
self._assessment_ids = assessment_ids<|docstring|>Sets the assessment_ids of this UserAssessmentBaseLineDetails.
The list of user assessment OCIDs that need to be updated while setting the baseline.
:param assessment_ids: The assessment_ids of this UserAssessmentBaseLineDetails.
:type: list[str]<|endoftext|> |
ab8009a65305111cebe768c0af084e07beff90fd91c737d9f1876ce81910b9ce | def is_uuid(string):
"检查字符串是不是合法的 UUID。validate if string is a valid uuid.\n\n Examples::\n\n if is_uuid('wrong string'): ...\n\n :rtype: bool\n "
try:
return bool((string and (uuid.UUID(string).hex == string)))
except ValueError:
return False | 检查字符串是不是合法的 UUID。validate if string is a valid uuid.
Examples::
if is_uuid('wrong string'): ...
:rtype: bool | hutils/validators.py | is_uuid | zaihui/hutils | 30 | python | def is_uuid(string):
"检查字符串是不是合法的 UUID。validate if string is a valid uuid.\n\n Examples::\n\n if is_uuid('wrong string'): ...\n\n :rtype: bool\n "
try:
return bool((string and (uuid.UUID(string).hex == string)))
except ValueError:
return False | def is_uuid(string):
"检查字符串是不是合法的 UUID。validate if string is a valid uuid.\n\n Examples::\n\n if is_uuid('wrong string'): ...\n\n :rtype: bool\n "
try:
return bool((string and (uuid.UUID(string).hex == string)))
except ValueError:
return False<|docstring|>检查字符串是不是合法的 UUID。validate if string is a valid uuid.
Examples::
if is_uuid('wrong string'): ...
:rtype: bool<|endoftext|> |
173a71f74f749aa343fd0749db8e752a1bb895f734e490c065a0b9175cfb656f | def is_int(string):
"检查字符串是不是合法的 int. validate if string is a valid int.\n\n Examples::\n\n if is_int('wrong string'): ...\n\n :rtype: bool\n "
try:
int(string)
return True
except ValueError:
return False | 检查字符串是不是合法的 int. validate if string is a valid int.
Examples::
if is_int('wrong string'): ...
:rtype: bool | hutils/validators.py | is_int | zaihui/hutils | 30 | python | def is_int(string):
"检查字符串是不是合法的 int. validate if string is a valid int.\n\n Examples::\n\n if is_int('wrong string'): ...\n\n :rtype: bool\n "
try:
int(string)
return True
except ValueError:
return False | def is_int(string):
"检查字符串是不是合法的 int. validate if string is a valid int.\n\n Examples::\n\n if is_int('wrong string'): ...\n\n :rtype: bool\n "
try:
int(string)
return True
except ValueError:
return False<|docstring|>检查字符串是不是合法的 int. validate if string is a valid int.
Examples::
if is_int('wrong string'): ...
:rtype: bool<|endoftext|> |
8194982494e81da0c5d849dbc0561a053dffc73498a693e1316983fd8d33355e | def is_chinese_phone(string):
"检查字符串是不是合法的大陆手机号。validate if string is a valid chinese mainland phone number.\n\n Examples::\n\n if is_chinese_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(CHINESE_PHONE_REGEX.match(string)) | 检查字符串是不是合法的大陆手机号。validate if string is a valid chinese mainland phone number.
Examples::
if is_chinese_phone('12345678910'): ...
:rtype: bool | hutils/validators.py | is_chinese_phone | zaihui/hutils | 30 | python | def is_chinese_phone(string):
"检查字符串是不是合法的大陆手机号。validate if string is a valid chinese mainland phone number.\n\n Examples::\n\n if is_chinese_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(CHINESE_PHONE_REGEX.match(string)) | def is_chinese_phone(string):
"检查字符串是不是合法的大陆手机号。validate if string is a valid chinese mainland phone number.\n\n Examples::\n\n if is_chinese_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(CHINESE_PHONE_REGEX.match(string))<|docstring|>检查字符串是不是合法的大陆手机号。validate if string is a valid chinese mainland phone number.
Examples::
if is_chinese_phone('12345678910'): ...
:rtype: bool<|endoftext|> |
68daf6bec344bc35476ca10d38c28f82c5635b902552570c9ae0b71cb236a8d0 | def is_singapore_phone(string):
"检查字符串是不是合法的新加坡手机号。validate if string is a valid singapore phone number.\n\n Examples::\n\n if is_singapore_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(SINGAPORE_PHONE_REGEX.match(string)) | 检查字符串是不是合法的新加坡手机号。validate if string is a valid singapore phone number.
Examples::
if is_singapore_phone('12345678910'): ...
:rtype: bool | hutils/validators.py | is_singapore_phone | zaihui/hutils | 30 | python | def is_singapore_phone(string):
"检查字符串是不是合法的新加坡手机号。validate if string is a valid singapore phone number.\n\n Examples::\n\n if is_singapore_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(SINGAPORE_PHONE_REGEX.match(string)) | def is_singapore_phone(string):
"检查字符串是不是合法的新加坡手机号。validate if string is a valid singapore phone number.\n\n Examples::\n\n if is_singapore_phone('12345678910'): ...\n\n :rtype: bool\n "
return bool(SINGAPORE_PHONE_REGEX.match(string))<|docstring|>检查字符串是不是合法的新加坡手机号。validate if string is a valid singapore phone number.
Examples::
if is_singapore_phone('12345678910'): ...
:rtype: bool<|endoftext|> |
7af030753ae6c90fc07e4ddc4978d0ded4d9c933494026eaa677376c0f847255 | def is_phone(string):
"检查字符串是不是合法的手机号 validate if string is a valid phone number.\n\n Examples::\n\n if is_phone('12345678910'): ...\n\n :rtype: bool\n "
return any([is_chinese_phone(string), is_singapore_phone(string)]) | 检查字符串是不是合法的手机号 validate if string is a valid phone number.
Examples::
if is_phone('12345678910'): ...
:rtype: bool | hutils/validators.py | is_phone | zaihui/hutils | 30 | python | def is_phone(string):
"检查字符串是不是合法的手机号 validate if string is a valid phone number.\n\n Examples::\n\n if is_phone('12345678910'): ...\n\n :rtype: bool\n "
return any([is_chinese_phone(string), is_singapore_phone(string)]) | def is_phone(string):
"检查字符串是不是合法的手机号 validate if string is a valid phone number.\n\n Examples::\n\n if is_phone('12345678910'): ...\n\n :rtype: bool\n "
return any([is_chinese_phone(string), is_singapore_phone(string)])<|docstring|>检查字符串是不是合法的手机号 validate if string is a valid phone number.
Examples::
if is_phone('12345678910'): ...
:rtype: bool<|endoftext|> |
933d17d3ac0e4081dfeee8083f2394556e7c85657d4d5d70a2669b402d7f8840 | def weighted_hamming_distance(s1: List[int], s2: List[int], missing_state_indicator=(- 1), weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'Computes the weighted hamming distance between samples.\n\n Evaluates the dissimilarity of two phylogenetic samples on the basis of\n their shared indel states and the probability of these indel states\n occurring. Specifically, for a given character, if two states are identical\n we decrement the dissimilarity by the probability of these two occurring\n independently; if the two states disagree, we increment the dissimilarity by\n the probability of these states occurring. We normalize the dissimilarity\n by the number of non-missing characters shared by the two samples.\n\n If weights are not given, then we increment dissimilarity by +2 if the states\n are different, +1 if one state is uncut and the other is an indel, and +0 if\n the two states are identical.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A dictionary storing the state weights for each character, derived\n from the state priors. This should be a nested dictionary where each\n key corresponds to character that then indexes another dictionary\n storing the weight of each observed state.\n (Character -> State -> Weight)\n\n Returns:\n A dissimilarity score.\n\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] != s2[i]):
if ((s1[i] == 0) or (s2[i] == 0)):
if weights:
if (s1[i] != 0):
d += weights[i][s1[i]]
else:
d += weights[i][s2[i]]
else:
d += 1
elif weights:
d += (weights[i][s1[i]] + weights[i][s2[i]])
else:
d += 2
if (num_present == 0):
return 0
return (d / num_present) | Computes the weighted hamming distance between samples.
Evaluates the dissimilarity of two phylogenetic samples on the basis of
their shared indel states and the probability of these indel states
occurring. Specifically, for a given character, if two states are identical
we decrement the dissimilarity by the probability of these two occurring
independently; if the two states disagree, we increment the dissimilarity by
the probability of these states occurring. We normalize the dissimilarity
by the number of non-missing characters shared by the two samples.
If weights are not given, then we increment dissimilarity by +2 if the states
are different, +1 if one state is uncut and the other is an indel, and +0 if
the two states are identical.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A dictionary storing the state weights for each character, derived
from the state priors. This should be a nested dictionary where each
key corresponds to character that then indexes another dictionary
storing the weight of each observed state.
(Character -> State -> Weight)
Returns:
A dissimilarity score. | cassiopeia/solver/dissimilarity_functions.py | weighted_hamming_distance | YosefLab/Cassiopeia | 52 | python | def weighted_hamming_distance(s1: List[int], s2: List[int], missing_state_indicator=(- 1), weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'Computes the weighted hamming distance between samples.\n\n Evaluates the dissimilarity of two phylogenetic samples on the basis of\n their shared indel states and the probability of these indel states\n occurring. Specifically, for a given character, if two states are identical\n we decrement the dissimilarity by the probability of these two occurring\n independently; if the two states disagree, we increment the dissimilarity by\n the probability of these states occurring. We normalize the dissimilarity\n by the number of non-missing characters shared by the two samples.\n\n If weights are not given, then we increment dissimilarity by +2 if the states\n are different, +1 if one state is uncut and the other is an indel, and +0 if\n the two states are identical.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A dictionary storing the state weights for each character, derived\n from the state priors. This should be a nested dictionary where each\n key corresponds to character that then indexes another dictionary\n storing the weight of each observed state.\n (Character -> State -> Weight)\n\n Returns:\n A dissimilarity score.\n\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] != s2[i]):
if ((s1[i] == 0) or (s2[i] == 0)):
if weights:
if (s1[i] != 0):
d += weights[i][s1[i]]
else:
d += weights[i][s2[i]]
else:
d += 1
elif weights:
d += (weights[i][s1[i]] + weights[i][s2[i]])
else:
d += 2
if (num_present == 0):
return 0
return (d / num_present) | def weighted_hamming_distance(s1: List[int], s2: List[int], missing_state_indicator=(- 1), weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'Computes the weighted hamming distance between samples.\n\n Evaluates the dissimilarity of two phylogenetic samples on the basis of\n their shared indel states and the probability of these indel states\n occurring. Specifically, for a given character, if two states are identical\n we decrement the dissimilarity by the probability of these two occurring\n independently; if the two states disagree, we increment the dissimilarity by\n the probability of these states occurring. We normalize the dissimilarity\n by the number of non-missing characters shared by the two samples.\n\n If weights are not given, then we increment dissimilarity by +2 if the states\n are different, +1 if one state is uncut and the other is an indel, and +0 if\n the two states are identical.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A dictionary storing the state weights for each character, derived\n from the state priors. This should be a nested dictionary where each\n key corresponds to character that then indexes another dictionary\n storing the weight of each observed state.\n (Character -> State -> Weight)\n\n Returns:\n A dissimilarity score.\n\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] != s2[i]):
if ((s1[i] == 0) or (s2[i] == 0)):
if weights:
if (s1[i] != 0):
d += weights[i][s1[i]]
else:
d += weights[i][s2[i]]
else:
d += 1
elif weights:
d += (weights[i][s1[i]] + weights[i][s2[i]])
else:
d += 2
if (num_present == 0):
return 0
return (d / num_present)<|docstring|>Computes the weighted hamming distance between samples.
Evaluates the dissimilarity of two phylogenetic samples on the basis of
their shared indel states and the probability of these indel states
occurring. Specifically, for a given character, if two states are identical
we decrement the dissimilarity by the probability of these two occurring
independently; if the two states disagree, we increment the dissimilarity by
the probability of these states occurring. We normalize the dissimilarity
by the number of non-missing characters shared by the two samples.
If weights are not given, then we increment dissimilarity by +2 if the states
are different, +1 if one state is uncut and the other is an indel, and +0 if
the two states are identical.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A dictionary storing the state weights for each character, derived
from the state priors. This should be a nested dictionary where each
key corresponds to character that then indexes another dictionary
storing the weight of each observed state.
(Character -> State -> Weight)
Returns:
A dissimilarity score.<|endoftext|> |
7ddc65d954467861a231f066afbeb71623991d1850341c936e92eeaec68adbe5 | def hamming_similarity_without_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n Returns:\n The number of shared mutations between two samples, weighted or unweighted\n '
similarity = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator) or (s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
return similarity | A function to return the number of (non-missing) character/state
mutations shared by two samples.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The number of shared mutations between two samples, weighted or unweighted | cassiopeia/solver/dissimilarity_functions.py | hamming_similarity_without_missing | YosefLab/Cassiopeia | 52 | python | def hamming_similarity_without_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n Returns:\n The number of shared mutations between two samples, weighted or unweighted\n '
similarity = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator) or (s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
return similarity | def hamming_similarity_without_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n Returns:\n The number of shared mutations between two samples, weighted or unweighted\n '
similarity = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator) or (s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
return similarity<|docstring|>A function to return the number of (non-missing) character/state
mutations shared by two samples.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The number of shared mutations between two samples, weighted or unweighted<|endoftext|> |
f691888bf9ff56cf7f4ab0aa2187019669a4b634262807471b83835b7167d9cb | def hamming_similarity_normalized_over_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'\n A function to return the number of (non-missing) character/state mutations\n shared by two samples, normalized over the amount of missing data.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The number of shared mutations between two samples normalized over the\n number of missing data events, weighted or unweighted\n '
similarity = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if ((s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
if (num_present == 0):
return 0
return (similarity / num_present) | A function to return the number of (non-missing) character/state mutations
shared by two samples, normalized over the amount of missing data.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The number of shared mutations between two samples normalized over the
number of missing data events, weighted or unweighted | cassiopeia/solver/dissimilarity_functions.py | hamming_similarity_normalized_over_missing | YosefLab/Cassiopeia | 52 | python | def hamming_similarity_normalized_over_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'\n A function to return the number of (non-missing) character/state mutations\n shared by two samples, normalized over the amount of missing data.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The number of shared mutations between two samples normalized over the\n number of missing data events, weighted or unweighted\n '
similarity = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if ((s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
if (num_present == 0):
return 0
return (similarity / num_present) | def hamming_similarity_normalized_over_missing(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'\n A function to return the number of (non-missing) character/state mutations\n shared by two samples, normalized over the amount of missing data.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The number of shared mutations between two samples normalized over the\n number of missing data events, weighted or unweighted\n '
similarity = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if ((s1[i] == 0) or (s2[i] == 0)):
continue
if (s1[i] == s2[i]):
if weights:
similarity += weights[i][s1[i]]
else:
similarity += 1
if (num_present == 0):
return 0
return (similarity / num_present)<|docstring|>A function to return the number of (non-missing) character/state mutations
shared by two samples, normalized over the amount of missing data.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The number of shared mutations between two samples normalized over the
number of missing data events, weighted or unweighted<|endoftext|> |
6f25e4a8c45486c84717155e21f7ef74bde51489acaa2869767d15d7affeecce | @numba.jit(nopython=True)
def hamming_distance(s1: np.array(int), s2: np.array(int), ignore_missing_state: bool=False, missing_state_indicator: int=(- 1)) -> int:
'Computes the vanilla hamming distance between two samples.\n\n Counts the number of positions that two samples disagree at. A user can\n optionally specify to ignore missing data.\n\n Args:\n s1: The first sample\n s2: The second sample\n ignore_missing_state: Ignore comparisons where one is the missing state\n indicator\n missing_state_indicator: Indicator for missing data.\n\n Returns:\n The number of positions two nodes disagree at.\n '
dist = 0
for i in range(len(s1)):
if (s1[i] != s2[i]):
if (((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)) and ignore_missing_state):
dist += 0
else:
dist += 1
return dist | Computes the vanilla hamming distance between two samples.
Counts the number of positions that two samples disagree at. A user can
optionally specify to ignore missing data.
Args:
s1: The first sample
s2: The second sample
ignore_missing_state: Ignore comparisons where one is the missing state
indicator
missing_state_indicator: Indicator for missing data.
Returns:
The number of positions two nodes disagree at. | cassiopeia/solver/dissimilarity_functions.py | hamming_distance | YosefLab/Cassiopeia | 52 | python | @numba.jit(nopython=True)
def hamming_distance(s1: np.array(int), s2: np.array(int), ignore_missing_state: bool=False, missing_state_indicator: int=(- 1)) -> int:
'Computes the vanilla hamming distance between two samples.\n\n Counts the number of positions that two samples disagree at. A user can\n optionally specify to ignore missing data.\n\n Args:\n s1: The first sample\n s2: The second sample\n ignore_missing_state: Ignore comparisons where one is the missing state\n indicator\n missing_state_indicator: Indicator for missing data.\n\n Returns:\n The number of positions two nodes disagree at.\n '
dist = 0
for i in range(len(s1)):
if (s1[i] != s2[i]):
if (((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)) and ignore_missing_state):
dist += 0
else:
dist += 1
return dist | @numba.jit(nopython=True)
def hamming_distance(s1: np.array(int), s2: np.array(int), ignore_missing_state: bool=False, missing_state_indicator: int=(- 1)) -> int:
'Computes the vanilla hamming distance between two samples.\n\n Counts the number of positions that two samples disagree at. A user can\n optionally specify to ignore missing data.\n\n Args:\n s1: The first sample\n s2: The second sample\n ignore_missing_state: Ignore comparisons where one is the missing state\n indicator\n missing_state_indicator: Indicator for missing data.\n\n Returns:\n The number of positions two nodes disagree at.\n '
dist = 0
for i in range(len(s1)):
if (s1[i] != s2[i]):
if (((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)) and ignore_missing_state):
dist += 0
else:
dist += 1
return dist<|docstring|>Computes the vanilla hamming distance between two samples.
Counts the number of positions that two samples disagree at. A user can
optionally specify to ignore missing data.
Args:
s1: The first sample
s2: The second sample
ignore_missing_state: Ignore comparisons where one is the missing state
indicator
missing_state_indicator: Indicator for missing data.
Returns:
The number of positions two nodes disagree at.<|endoftext|> |
09cee579294092e972ef0a95730a3c6820fea23f216fc4ff7a32430c0064c326 | def weighted_hamming_similarity(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the weighted number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The weighted number of shared mutations between two samples\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] == s2[i]):
if (s1[i] != 0):
if weights:
d += (2 * weights[i][s1[i]])
else:
d += 2
elif (not weights):
d += 1
if (num_present == 0):
return 0
return (d / num_present) | A function to return the weighted number of (non-missing) character/state
mutations shared by two samples.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The weighted number of shared mutations between two samples | cassiopeia/solver/dissimilarity_functions.py | weighted_hamming_similarity | YosefLab/Cassiopeia | 52 | python | def weighted_hamming_similarity(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the weighted number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The weighted number of shared mutations between two samples\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] == s2[i]):
if (s1[i] != 0):
if weights:
d += (2 * weights[i][s1[i]])
else:
d += 2
elif (not weights):
d += 1
if (num_present == 0):
return 0
return (d / num_present) | def weighted_hamming_similarity(s1: List[int], s2: List[int], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None) -> float:
'A function to return the weighted number of (non-missing) character/state\n mutations shared by two samples.\n\n Args:\n s1: Character states of the first sample\n s2: Character states of the second sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n\n Returns:\n The weighted number of shared mutations between two samples\n '
d = 0
num_present = 0
for i in range(len(s1)):
if ((s1[i] == missing_state_indicator) or (s2[i] == missing_state_indicator)):
continue
num_present += 1
if (s1[i] == s2[i]):
if (s1[i] != 0):
if weights:
d += (2 * weights[i][s1[i]])
else:
d += 2
elif (not weights):
d += 1
if (num_present == 0):
return 0
return (d / num_present)<|docstring|>A function to return the weighted number of (non-missing) character/state
mutations shared by two samples.
Args:
s1: Character states of the first sample
s2: Character states of the second sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
Returns:
The weighted number of shared mutations between two samples<|endoftext|> |
9d8966dcfaf3efe4dbdc2761fc79ef7adc542a92771989ea6042f2d46d1eab07 | def cluster_dissimilarity(dissimilarity_function: Callable[([List[int], List[int], int, Dict[(int, Dict[(int, float)])]], float)], s1: Union[(List[int], List[Tuple[(int, ...)]])], s2: Union[(List[int], List[Tuple[(int, ...)]])], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None, linkage_function: Callable[([Union[(np.array, List[float])]], float)]=np.mean, normalize: bool=True) -> float:
'Compute the dissimilarity between (possibly) ambiguous character strings.\n\n An ambiguous character string is a character string in\n which each character contains an tuple of possible states, and such a\n character string is represented as a list of tuples of integers.\n\n A naive implementation is to first disambiguate each of the two ambiguous\n character strings by generating all possible strings, then computing the\n dissimilarity between all pairwise combinations, and finally applying the\n linkage function on the calculated dissimilarities. However, doing so has\n complexity O(\\prod_{i=1}^N |a_i| x |b_i|) where N is the number of target sites,\n |a_i| is the number of ambiguous characters at target site i of string a,\n and |b_i| is the number of amiguous characters at target site i of string b.\n As an example, if we have N=10 and all a_i=b_i=2, then we have to construct\n 1,038,576 * 2 strings and compute over 4 trillion dissimilarities.\n\n By assuming each target site is independent, simply calculating the sum of\n the linkages of each target site separately is equivalent to the naive\n implementation (can be proven by induction). This procedure is implemented\n in this function. One caveat is that we usually normalize the distance by\n the number of shared non-missing positions. We approximate this by dividing\n the absolute distance by the sum of the probability of each site not being\n a missing site for both strings.\n\n The idea of linkage is analogous to that in hierarchical clustering, where\n ``np.min`` can be used for single linkage, ``np.max`` for complete linkage,\n and ``np.mean`` for average linkage (the default).\n\n The reason the ``dissimilarity_function`` argument is defined as the first\n argument is so that this function may be used as input to\n :func:`cassiopeia.data.CassiopeiaTree.compute_dissimilarity_map`. This can\n be done by partial application of this function with the desired dissimilarity\n function.\n\n Note:\n If neither character string is ambiguous, then calling this function is\n equivalent to calling ``dissimilarity_function`` separately.\n\n Args:\n s1: The first (possibly) ambiguous sample\n s2: The second (possibly) ambiguous sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n dissimilarity_function: The dissimilarity function to use to calculate pairwise\n dissimilarities.\n linkage_function: The linkage function to use to aggregate dissimilarities\n into a single number. Defaults to ``np.mean`` for average linkage.\n normalize: Whether to normalize to the proportion of sites present in\n both strings.\n\n Returns:\n The dissimilarity between the two ambiguous samples\n '
s1 = [(s if isinstance(s, tuple) else (s,)) for s in s1]
s2 = [(s if isinstance(s, tuple) else (s,)) for s in s2]
result = 0
num_present = 0
for (i, (c1, c2)) in enumerate(zip(s1, s2)):
dissim = []
present = []
for (_c1, _c2) in itertools.product(c1, c2):
present.append(((_c1 != missing_state_indicator) and (_c2 != missing_state_indicator)))
dissim.append(dissimilarity_function([_c1], [_c2], missing_state_indicator, ({0: weights[i]} if weights else None)))
result += linkage_function(dissim)
num_present += np.mean(present)
if (num_present == 0):
return 0
return ((result / num_present) if normalize else result) | Compute the dissimilarity between (possibly) ambiguous character strings.
An ambiguous character string is a character string in
which each character contains an tuple of possible states, and such a
character string is represented as a list of tuples of integers.
A naive implementation is to first disambiguate each of the two ambiguous
character strings by generating all possible strings, then computing the
dissimilarity between all pairwise combinations, and finally applying the
linkage function on the calculated dissimilarities. However, doing so has
complexity O(\prod_{i=1}^N |a_i| x |b_i|) where N is the number of target sites,
|a_i| is the number of ambiguous characters at target site i of string a,
and |b_i| is the number of amiguous characters at target site i of string b.
As an example, if we have N=10 and all a_i=b_i=2, then we have to construct
1,038,576 * 2 strings and compute over 4 trillion dissimilarities.
By assuming each target site is independent, simply calculating the sum of
the linkages of each target site separately is equivalent to the naive
implementation (can be proven by induction). This procedure is implemented
in this function. One caveat is that we usually normalize the distance by
the number of shared non-missing positions. We approximate this by dividing
the absolute distance by the sum of the probability of each site not being
a missing site for both strings.
The idea of linkage is analogous to that in hierarchical clustering, where
``np.min`` can be used for single linkage, ``np.max`` for complete linkage,
and ``np.mean`` for average linkage (the default).
The reason the ``dissimilarity_function`` argument is defined as the first
argument is so that this function may be used as input to
:func:`cassiopeia.data.CassiopeiaTree.compute_dissimilarity_map`. This can
be done by partial application of this function with the desired dissimilarity
function.
Note:
If neither character string is ambiguous, then calling this function is
equivalent to calling ``dissimilarity_function`` separately.
Args:
s1: The first (possibly) ambiguous sample
s2: The second (possibly) ambiguous sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
dissimilarity_function: The dissimilarity function to use to calculate pairwise
dissimilarities.
linkage_function: The linkage function to use to aggregate dissimilarities
into a single number. Defaults to ``np.mean`` for average linkage.
normalize: Whether to normalize to the proportion of sites present in
both strings.
Returns:
The dissimilarity between the two ambiguous samples | cassiopeia/solver/dissimilarity_functions.py | cluster_dissimilarity | YosefLab/Cassiopeia | 52 | python | def cluster_dissimilarity(dissimilarity_function: Callable[([List[int], List[int], int, Dict[(int, Dict[(int, float)])]], float)], s1: Union[(List[int], List[Tuple[(int, ...)]])], s2: Union[(List[int], List[Tuple[(int, ...)]])], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None, linkage_function: Callable[([Union[(np.array, List[float])]], float)]=np.mean, normalize: bool=True) -> float:
'Compute the dissimilarity between (possibly) ambiguous character strings.\n\n An ambiguous character string is a character string in\n which each character contains an tuple of possible states, and such a\n character string is represented as a list of tuples of integers.\n\n A naive implementation is to first disambiguate each of the two ambiguous\n character strings by generating all possible strings, then computing the\n dissimilarity between all pairwise combinations, and finally applying the\n linkage function on the calculated dissimilarities. However, doing so has\n complexity O(\\prod_{i=1}^N |a_i| x |b_i|) where N is the number of target sites,\n |a_i| is the number of ambiguous characters at target site i of string a,\n and |b_i| is the number of amiguous characters at target site i of string b.\n As an example, if we have N=10 and all a_i=b_i=2, then we have to construct\n 1,038,576 * 2 strings and compute over 4 trillion dissimilarities.\n\n By assuming each target site is independent, simply calculating the sum of\n the linkages of each target site separately is equivalent to the naive\n implementation (can be proven by induction). This procedure is implemented\n in this function. One caveat is that we usually normalize the distance by\n the number of shared non-missing positions. We approximate this by dividing\n the absolute distance by the sum of the probability of each site not being\n a missing site for both strings.\n\n The idea of linkage is analogous to that in hierarchical clustering, where\n ``np.min`` can be used for single linkage, ``np.max`` for complete linkage,\n and ``np.mean`` for average linkage (the default).\n\n The reason the ``dissimilarity_function`` argument is defined as the first\n argument is so that this function may be used as input to\n :func:`cassiopeia.data.CassiopeiaTree.compute_dissimilarity_map`. This can\n be done by partial application of this function with the desired dissimilarity\n function.\n\n Note:\n If neither character string is ambiguous, then calling this function is\n equivalent to calling ``dissimilarity_function`` separately.\n\n Args:\n s1: The first (possibly) ambiguous sample\n s2: The second (possibly) ambiguous sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n dissimilarity_function: The dissimilarity function to use to calculate pairwise\n dissimilarities.\n linkage_function: The linkage function to use to aggregate dissimilarities\n into a single number. Defaults to ``np.mean`` for average linkage.\n normalize: Whether to normalize to the proportion of sites present in\n both strings.\n\n Returns:\n The dissimilarity between the two ambiguous samples\n '
s1 = [(s if isinstance(s, tuple) else (s,)) for s in s1]
s2 = [(s if isinstance(s, tuple) else (s,)) for s in s2]
result = 0
num_present = 0
for (i, (c1, c2)) in enumerate(zip(s1, s2)):
dissim = []
present = []
for (_c1, _c2) in itertools.product(c1, c2):
present.append(((_c1 != missing_state_indicator) and (_c2 != missing_state_indicator)))
dissim.append(dissimilarity_function([_c1], [_c2], missing_state_indicator, ({0: weights[i]} if weights else None)))
result += linkage_function(dissim)
num_present += np.mean(present)
if (num_present == 0):
return 0
return ((result / num_present) if normalize else result) | def cluster_dissimilarity(dissimilarity_function: Callable[([List[int], List[int], int, Dict[(int, Dict[(int, float)])]], float)], s1: Union[(List[int], List[Tuple[(int, ...)]])], s2: Union[(List[int], List[Tuple[(int, ...)]])], missing_state_indicator: int, weights: Optional[Dict[(int, Dict[(int, float)])]]=None, linkage_function: Callable[([Union[(np.array, List[float])]], float)]=np.mean, normalize: bool=True) -> float:
'Compute the dissimilarity between (possibly) ambiguous character strings.\n\n An ambiguous character string is a character string in\n which each character contains an tuple of possible states, and such a\n character string is represented as a list of tuples of integers.\n\n A naive implementation is to first disambiguate each of the two ambiguous\n character strings by generating all possible strings, then computing the\n dissimilarity between all pairwise combinations, and finally applying the\n linkage function on the calculated dissimilarities. However, doing so has\n complexity O(\\prod_{i=1}^N |a_i| x |b_i|) where N is the number of target sites,\n |a_i| is the number of ambiguous characters at target site i of string a,\n and |b_i| is the number of amiguous characters at target site i of string b.\n As an example, if we have N=10 and all a_i=b_i=2, then we have to construct\n 1,038,576 * 2 strings and compute over 4 trillion dissimilarities.\n\n By assuming each target site is independent, simply calculating the sum of\n the linkages of each target site separately is equivalent to the naive\n implementation (can be proven by induction). This procedure is implemented\n in this function. One caveat is that we usually normalize the distance by\n the number of shared non-missing positions. We approximate this by dividing\n the absolute distance by the sum of the probability of each site not being\n a missing site for both strings.\n\n The idea of linkage is analogous to that in hierarchical clustering, where\n ``np.min`` can be used for single linkage, ``np.max`` for complete linkage,\n and ``np.mean`` for average linkage (the default).\n\n The reason the ``dissimilarity_function`` argument is defined as the first\n argument is so that this function may be used as input to\n :func:`cassiopeia.data.CassiopeiaTree.compute_dissimilarity_map`. This can\n be done by partial application of this function with the desired dissimilarity\n function.\n\n Note:\n If neither character string is ambiguous, then calling this function is\n equivalent to calling ``dissimilarity_function`` separately.\n\n Args:\n s1: The first (possibly) ambiguous sample\n s2: The second (possibly) ambiguous sample\n missing_state_indicator: The character representing missing values\n weights: A set of optional weights to weight the similarity of a mutation\n dissimilarity_function: The dissimilarity function to use to calculate pairwise\n dissimilarities.\n linkage_function: The linkage function to use to aggregate dissimilarities\n into a single number. Defaults to ``np.mean`` for average linkage.\n normalize: Whether to normalize to the proportion of sites present in\n both strings.\n\n Returns:\n The dissimilarity between the two ambiguous samples\n '
s1 = [(s if isinstance(s, tuple) else (s,)) for s in s1]
s2 = [(s if isinstance(s, tuple) else (s,)) for s in s2]
result = 0
num_present = 0
for (i, (c1, c2)) in enumerate(zip(s1, s2)):
dissim = []
present = []
for (_c1, _c2) in itertools.product(c1, c2):
present.append(((_c1 != missing_state_indicator) and (_c2 != missing_state_indicator)))
dissim.append(dissimilarity_function([_c1], [_c2], missing_state_indicator, ({0: weights[i]} if weights else None)))
result += linkage_function(dissim)
num_present += np.mean(present)
if (num_present == 0):
return 0
return ((result / num_present) if normalize else result)<|docstring|>Compute the dissimilarity between (possibly) ambiguous character strings.
An ambiguous character string is a character string in
which each character contains an tuple of possible states, and such a
character string is represented as a list of tuples of integers.
A naive implementation is to first disambiguate each of the two ambiguous
character strings by generating all possible strings, then computing the
dissimilarity between all pairwise combinations, and finally applying the
linkage function on the calculated dissimilarities. However, doing so has
complexity O(\prod_{i=1}^N |a_i| x |b_i|) where N is the number of target sites,
|a_i| is the number of ambiguous characters at target site i of string a,
and |b_i| is the number of amiguous characters at target site i of string b.
As an example, if we have N=10 and all a_i=b_i=2, then we have to construct
1,038,576 * 2 strings and compute over 4 trillion dissimilarities.
By assuming each target site is independent, simply calculating the sum of
the linkages of each target site separately is equivalent to the naive
implementation (can be proven by induction). This procedure is implemented
in this function. One caveat is that we usually normalize the distance by
the number of shared non-missing positions. We approximate this by dividing
the absolute distance by the sum of the probability of each site not being
a missing site for both strings.
The idea of linkage is analogous to that in hierarchical clustering, where
``np.min`` can be used for single linkage, ``np.max`` for complete linkage,
and ``np.mean`` for average linkage (the default).
The reason the ``dissimilarity_function`` argument is defined as the first
argument is so that this function may be used as input to
:func:`cassiopeia.data.CassiopeiaTree.compute_dissimilarity_map`. This can
be done by partial application of this function with the desired dissimilarity
function.
Note:
If neither character string is ambiguous, then calling this function is
equivalent to calling ``dissimilarity_function`` separately.
Args:
s1: The first (possibly) ambiguous sample
s2: The second (possibly) ambiguous sample
missing_state_indicator: The character representing missing values
weights: A set of optional weights to weight the similarity of a mutation
dissimilarity_function: The dissimilarity function to use to calculate pairwise
dissimilarities.
linkage_function: The linkage function to use to aggregate dissimilarities
into a single number. Defaults to ``np.mean`` for average linkage.
normalize: Whether to normalize to the proportion of sites present in
both strings.
Returns:
The dissimilarity between the two ambiguous samples<|endoftext|> |
4bff8b8d065290e11cf96eac686697dfbdde9903dc2e26e4f48ca174a2a0caa1 | @pytest.fixture(scope='session')
def logger():
'Logger object with log file and colorful console output'
return LOGGER | Logger object with log file and colorful console output | tests/conftest.py | logger | DeFi-Coder-News-Letter/StormSurge-pydex | 28 | python | @pytest.fixture(scope='session')
def logger():
return LOGGER | @pytest.fixture(scope='session')
def logger():
return LOGGER<|docstring|>Logger object with log file and colorful console output<|endoftext|> |
84d1f6609924ebb42502f59b19fb07f6f93483346c219331ed2f7f399f9a82d6 | @pytest.fixture(scope='session')
def asset_infos():
'A convenience object for holding all asset info needed for testing.\n Ideally, need to enhance this later to dynamically pull this information\n from the configured network.\n '
class AssetInfos():
'Convenience class holding asset info'
VETH_TOKEN = '0xc4abc01578139e2105d9c9eba0b0aa6f6a60d082'
VETH_ASSET_DATA = '0xf47261b0000000000000000000000000c4abc01578139e2105d9c9eba0b0aa6f6a60d082'
LONG_TOKEN = '0x358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
LONG_ASSET_DATA = '0xf47261b0000000000000000000000000358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
SHORT_TOKEN = '0x4a1f5f67c1cf90f176496aa548382c78921ae9f1'
SHORT_ASSET_DATA = '0xf47261b00000000000000000000000004a1f5f67c1cf90f176496aa548382c78921ae9f1'
FULL_SET_ASSET_DATA = {'LONG': LONG_ASSET_DATA, 'SHORT': SHORT_ASSET_DATA}
return AssetInfos | A convenience object for holding all asset info needed for testing.
Ideally, need to enhance this later to dynamically pull this information
from the configured network. | tests/conftest.py | asset_infos | DeFi-Coder-News-Letter/StormSurge-pydex | 28 | python | @pytest.fixture(scope='session')
def asset_infos():
'A convenience object for holding all asset info needed for testing.\n Ideally, need to enhance this later to dynamically pull this information\n from the configured network.\n '
class AssetInfos():
'Convenience class holding asset info'
VETH_TOKEN = '0xc4abc01578139e2105d9c9eba0b0aa6f6a60d082'
VETH_ASSET_DATA = '0xf47261b0000000000000000000000000c4abc01578139e2105d9c9eba0b0aa6f6a60d082'
LONG_TOKEN = '0x358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
LONG_ASSET_DATA = '0xf47261b0000000000000000000000000358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
SHORT_TOKEN = '0x4a1f5f67c1cf90f176496aa548382c78921ae9f1'
SHORT_ASSET_DATA = '0xf47261b00000000000000000000000004a1f5f67c1cf90f176496aa548382c78921ae9f1'
FULL_SET_ASSET_DATA = {'LONG': LONG_ASSET_DATA, 'SHORT': SHORT_ASSET_DATA}
return AssetInfos | @pytest.fixture(scope='session')
def asset_infos():
'A convenience object for holding all asset info needed for testing.\n Ideally, need to enhance this later to dynamically pull this information\n from the configured network.\n '
class AssetInfos():
'Convenience class holding asset info'
VETH_TOKEN = '0xc4abc01578139e2105d9c9eba0b0aa6f6a60d082'
VETH_ASSET_DATA = '0xf47261b0000000000000000000000000c4abc01578139e2105d9c9eba0b0aa6f6a60d082'
LONG_TOKEN = '0x358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
LONG_ASSET_DATA = '0xf47261b0000000000000000000000000358b48569a4a4ef6310c1f1d8e50be9d068a50c6'
SHORT_TOKEN = '0x4a1f5f67c1cf90f176496aa548382c78921ae9f1'
SHORT_ASSET_DATA = '0xf47261b00000000000000000000000004a1f5f67c1cf90f176496aa548382c78921ae9f1'
FULL_SET_ASSET_DATA = {'LONG': LONG_ASSET_DATA, 'SHORT': SHORT_ASSET_DATA}
return AssetInfos<|docstring|>A convenience object for holding all asset info needed for testing.
Ideally, need to enhance this later to dynamically pull this information
from the configured network.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.