code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from collections import Collection
import regex as re
import numpy as np
from bn.values.array_val import ArrayVal
from bn.values.boolean_val import BooleanVal
from bn.values.double_val import DoubleVal
from bn.values.none_val import NoneVal
from bn.values.relational_val import RelationalVal
from bn.values.set_val import SetVal
from bn.values.string_val import StringVal
from bn.values.custom_val import CustomVal
from bn.values.value import Value
from datastructs.graph import Graph
from utils.py_utils import get_class, Singleton
import logging
from multipledispatch import dispatch
from settings import Settings
dispatch_namespace = dict()
class ValueFactory:
"""
Factory for creating variable values.
"""
_none_value = NoneVal()
_double_pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$')
_array_pattern = re.compile(r'\[([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?,\s*)*([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\]')
_set_pattern = re.compile(r'[/\w\-_\.\^\=\s]*([\[\(][/\w\-_,\.\^\=\s\(]+\)*[\]\)])?')
_custom_class_pattern = re.compile(r'^@[^\(\)]*$')
_custom_function_pattern = re.compile(r'^@[^\(\)]+\(.*\)$')
# logger
log = logging.getLogger('PyOpenDial')
@staticmethod
@dispatch(str, namespace=dispatch_namespace)
def create(value):
"""
Creates a new value based on the provided string representation. If the string
contains a numeric value, "true", "false", "None", or opening and closing
brackets, convert it to the appropriate values. Else, returns a string value.
:param value: the string representation for the value
:return: the resulting value
"""
if value == None:
return NoneVal()
if ValueFactory._double_pattern.search(value):
return DoubleVal(float(value))
elif value.lower() == 'true':
return BooleanVal(True)
elif value.lower() == 'false':
return BooleanVal(False)
elif value.lower() == 'none':
return ValueFactory._none_value
elif ValueFactory._array_pattern.match(value):
value_list = list()
value_str_list = value[1:-1].split(',')
for value_str_item in value_str_list:
value_list.append(float(value_str_item))
return ArrayVal(np.array(value_list))
elif value.startswith('[') and value.endswith(']'):
if Graph.is_relational(value):
relation_value = RelationalVal(value)
if not relation_value.is_empty():
return relation_value
sub_values = list()
for match in ValueFactory._set_pattern.finditer(value[1:-1]):
sub_value = match.group(0).strip()
if len(sub_value) > 0:
sub_values.append(ValueFactory.create(sub_value))
return SetVal(sub_values)
elif ValueFactory._custom_class_pattern.match(value):
class_name = value[1:]
custom_value = get_class(class_name)()
if isinstance(custom_value, Singleton):
return CustomVal(custom_value)
else:
raise ValueError("Custom class should inherit utils.py_utils.Singleton")
elif ValueFactory._custom_function_pattern.match(value):
function_name = value.split("(")[0][1:]
params = value.split("(")[1][:-1].split(",")
params.remove('')
if function_name in Settings._functions:
func = Settings._functions[function_name]
func_result = func(*params)
if isinstance(func_result, float):
return DoubleVal(func_result)
elif isinstance(func_result, bool):
return BooleanVal(func_result)
elif func_result is None:
return ValueFactory._none_value
elif isinstance(func_result, np.ndarray):
return ArrayVal(func_result)
elif isinstance(func_result, set):
return SetVal(func_result)
elif isinstance(func_result, str):
return StringVal(func_result)
else:
raise ValueError("Not supported return type %s" % type(func_result))
else:
raise ValueError("Function %s is not defined." % function_name)
else:
return StringVal(value)
@staticmethod
@dispatch(float, namespace=dispatch_namespace)
def create(value):
"""
Returns a double value given the double
:param value: the float
:return: the value
"""
return DoubleVal(value)
@staticmethod
@dispatch(bool, namespace=dispatch_namespace)
def create(value):
"""
Returns the boolean value given the boolean
:param value: the boolean
:return: the boolean value
"""
return BooleanVal(value)
@staticmethod
@dispatch((list, Collection), namespace=dispatch_namespace)
def create(values):
"""
Returns the set value given the values
:param values: the values
:return: the set value
"""
if len(values) == 0 or isinstance(next(iter(values)), Value):
return SetVal(values)
if isinstance(values[0], float):
return ArrayVal(np.array(values))
@staticmethod
@dispatch(namespace=dispatch_namespace)
def none():
"""
Returns the none value.
:return: the none value
"""
return ValueFactory._none_value
@staticmethod
@dispatch(Value, Value, namespace=dispatch_namespace)
def concatenate(v1, v2):
"""
Returns the concatenation of the two values.
:param v1: the value
:param v2: the value
:return: the concatenation of the two values
"""
if isinstance(v1, StringVal) and isinstance(v2, StringVal):
return str(v1) + ' ' + str(v2)
elif isinstance(v1, NoneVal):
return v2
elif isinstance(v2, NoneVal):
return v1
else:
ValueFactory.log.warning("concatenation not implemented for %s + %s" % (v1, v2))
return NoneVal()
| [
"logging.getLogger",
"bn.values.none_val.NoneVal",
"bn.values.array_val.ArrayVal",
"datastructs.graph.Graph.is_relational",
"bn.values.relational_val.RelationalVal",
"bn.values.set_val.SetVal",
"bn.values.custom_val.CustomVal",
"bn.values.boolean_val.BooleanVal",
"numpy.array",
"multipledispatch.d... | [((743, 752), 'bn.values.none_val.NoneVal', 'NoneVal', ([], {}), '()\n', (750, 752), False, 'from bn.values.none_val import NoneVal\n'), ((775, 830), 'regex.compile', 're.compile', (['"""^[-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?$"""'], {}), "('^[-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?$')\n", (785, 830), True, 'import regex as re\n'), ((852, 970), 'regex.compile', 're.compile', (['"""\\\\[([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?,\\\\s*)*([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)\\\\]"""'], {}), "(\n '\\\\[([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?,\\\\s*)*([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)\\\\]'\n )\n", (862, 970), True, 'import regex as re\n'), ((976, 1073), 'regex.compile', 're.compile', (['"""[/\\\\w\\\\-_\\\\.\\\\^\\\\=\\\\s]*([\\\\[\\\\(][/\\\\w\\\\-_,\\\\.\\\\^\\\\=\\\\s\\\\(]+\\\\)*[\\\\]\\\\)])?"""'], {}), "(\n '[/\\\\w\\\\-_\\\\.\\\\^\\\\=\\\\s]*([\\\\[\\\\(][/\\\\w\\\\-_,\\\\.\\\\^\\\\=\\\\s\\\\(]+\\\\)*[\\\\]\\\\)])?'\n )\n", (986, 1073), True, 'import regex as re\n'), ((1075, 1102), 'regex.compile', 're.compile', (['"""^@[^\\\\(\\\\)]*$"""'], {}), "('^@[^\\\\(\\\\)]*$')\n", (1085, 1102), True, 'import regex as re\n'), ((1133, 1168), 'regex.compile', 're.compile', (['"""^@[^\\\\(\\\\)]+\\\\(.*\\\\)$"""'], {}), "('^@[^\\\\(\\\\)]+\\\\(.*\\\\)$')\n", (1143, 1168), True, 'import regex as re\n'), ((1190, 1221), 'logging.getLogger', 'logging.getLogger', (['"""PyOpenDial"""'], {}), "('PyOpenDial')\n", (1207, 1221), False, 'import logging\n'), ((1246, 1289), 'multipledispatch.dispatch', 'dispatch', (['str'], {'namespace': 'dispatch_namespace'}), '(str, namespace=dispatch_namespace)\n', (1254, 1289), False, 'from multipledispatch import dispatch\n'), ((4531, 4576), 'multipledispatch.dispatch', 'dispatch', (['float'], {'namespace': 'dispatch_namespace'}), '(float, namespace=dispatch_namespace)\n', (4539, 4576), False, 'from multipledispatch import dispatch\n'), ((4788, 4832), 'multipledispatch.dispatch', 'dispatch', (['bool'], {'namespace': 'dispatch_namespace'}), '(bool, namespace=dispatch_namespace)\n', (4796, 4832), False, 'from multipledispatch import dispatch\n'), ((5059, 5117), 'multipledispatch.dispatch', 'dispatch', (['(list, Collection)'], {'namespace': 'dispatch_namespace'}), '((list, Collection), namespace=dispatch_namespace)\n', (5067, 5117), False, 'from multipledispatch import dispatch\n'), ((5494, 5532), 'multipledispatch.dispatch', 'dispatch', ([], {'namespace': 'dispatch_namespace'}), '(namespace=dispatch_namespace)\n', (5502, 5532), False, 'from multipledispatch import dispatch\n'), ((5702, 5754), 'multipledispatch.dispatch', 'dispatch', (['Value', 'Value'], {'namespace': 'dispatch_namespace'}), '(Value, Value, namespace=dispatch_namespace)\n', (5710, 5754), False, 'from multipledispatch import dispatch\n'), ((4747, 4763), 'bn.values.double_val.DoubleVal', 'DoubleVal', (['value'], {}), '(value)\n', (4756, 4763), False, 'from bn.values.double_val import DoubleVal\n'), ((5017, 5034), 'bn.values.boolean_val.BooleanVal', 'BooleanVal', (['value'], {}), '(value)\n', (5027, 5034), False, 'from bn.values.boolean_val import BooleanVal\n'), ((1737, 1746), 'bn.values.none_val.NoneVal', 'NoneVal', ([], {}), '()\n', (1744, 1746), False, 'from bn.values.none_val import NoneVal\n'), ((5368, 5382), 'bn.values.set_val.SetVal', 'SetVal', (['values'], {}), '(values)\n', (5374, 5382), False, 'from bn.values.set_val import SetVal\n'), ((1903, 1919), 'bn.values.boolean_val.BooleanVal', 'BooleanVal', (['(True)'], {}), '(True)\n', (1913, 1919), False, 'from bn.values.boolean_val import BooleanVal\n'), ((5452, 5468), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (5460, 5468), True, 'import numpy as np\n'), ((1978, 1995), 'bn.values.boolean_val.BooleanVal', 'BooleanVal', (['(False)'], {}), '(False)\n', (1988, 1995), False, 'from bn.values.boolean_val import BooleanVal\n'), ((6330, 6339), 'bn.values.none_val.NoneVal', 'NoneVal', ([], {}), '()\n', (6337, 6339), False, 'from bn.values.none_val import NoneVal\n'), ((2353, 2373), 'numpy.array', 'np.array', (['value_list'], {}), '(value_list)\n', (2361, 2373), True, 'import numpy as np\n'), ((2450, 2476), 'datastructs.graph.Graph.is_relational', 'Graph.is_relational', (['value'], {}), '(value)\n', (2469, 2476), False, 'from datastructs.graph import Graph\n'), ((2911, 2929), 'bn.values.set_val.SetVal', 'SetVal', (['sub_values'], {}), '(sub_values)\n', (2917, 2929), False, 'from bn.values.set_val import SetVal\n'), ((2511, 2531), 'bn.values.relational_val.RelationalVal', 'RelationalVal', (['value'], {}), '(value)\n', (2524, 2531), False, 'from bn.values.relational_val import RelationalVal\n'), ((3054, 3075), 'utils.py_utils.get_class', 'get_class', (['class_name'], {}), '(class_name)\n', (3063, 3075), False, 'from utils.py_utils import get_class, Singleton\n'), ((3153, 3176), 'bn.values.custom_val.CustomVal', 'CustomVal', (['custom_value'], {}), '(custom_value)\n', (3162, 3176), False, 'from bn.values.custom_val import CustomVal\n'), ((4490, 4506), 'bn.values.string_val.StringVal', 'StringVal', (['value'], {}), '(value)\n', (4499, 4506), False, 'from bn.values.string_val import StringVal\n'), ((3721, 3743), 'bn.values.double_val.DoubleVal', 'DoubleVal', (['func_result'], {}), '(func_result)\n', (3730, 3743), False, 'from bn.values.double_val import DoubleVal\n'), ((3823, 3846), 'bn.values.boolean_val.BooleanVal', 'BooleanVal', (['func_result'], {}), '(func_result)\n', (3833, 3846), False, 'from bn.values.boolean_val import BooleanVal\n'), ((4026, 4047), 'bn.values.array_val.ArrayVal', 'ArrayVal', (['func_result'], {}), '(func_result)\n', (4034, 4047), False, 'from bn.values.array_val import ArrayVal\n'), ((4126, 4145), 'bn.values.set_val.SetVal', 'SetVal', (['func_result'], {}), '(func_result)\n', (4132, 4145), False, 'from bn.values.set_val import SetVal\n'), ((4224, 4246), 'bn.values.string_val.StringVal', 'StringVal', (['func_result'], {}), '(func_result)\n', (4233, 4246), False, 'from bn.values.string_val import StringVal\n')] |
#!/usr/bin/env python
# create random images for learning Betti numbers
# there two types of images: binary images and their distance transform
from PIL import Image
import numpy as np
import random
import sys
import os
import skimage.morphology as sm
import skimage.io as io
from scipy.ndimage.morphology import distance_transform_edt
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='create sinograms for artificial images')
parser.add_argument('--size', '-s', type=int, default=130,
help='size of the image')
parser.add_argument('--min_width', '-mi', type=int, default=8,
help='minimum width of component to be created')
parser.add_argument('--max_width', '-ma', type=int, default=20,
help='maximum width of component to be created')
parser.add_argument('--n_components', '-nc', type=int, default=20,
help='Number of components to be created')
parser.add_argument('--num', '-n', type=int, default=2000,
help='Number of images to be created')
parser.add_argument('--noise', '-z', type=int, default=0,
help='Strength of noise')
parser.add_argument('--outdir', '-o', default='betti',
help='output directory')
args = parser.parse_args()
###
os.makedirs(args.outdir, exist_ok=True)
tool = sm.disk(5)
for j in range(args.num):
img = np.zeros((args.size,args.size),dtype=np.uint8)
for i in range(args.n_components):
top = random.randint(args.min_width,args.size-2*args.min_width)
left = random.randint(args.min_width,args.size-2*args.min_width)
w = random.randint(args.min_width,args.max_width)
h = random.randint(args.min_width,args.max_width)
img[left:min(args.size-args.min_width,left+w),top:min(args.size-args.min_width,top+h)] = 1
img = sm.binary_closing(img, tool)
io.imsave(os.path.join(args.outdir,'{:0>5}.png'.format(j)),(img*255).astype(np.uint8))
# img = distance_transform_edt(img)-distance_transform_edt(~img)
# img = 127.5*(3*img+1.5*args.size)/args.size
# print(img.max(),img.min())
# io.imsave(os.path.join(args.outdir,'dt_{:0>5}.png'.format(j)),np.clip(np.uint8(img),0,255))
| [
"random.randint",
"skimage.morphology.binary_closing",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.zeros",
"skimage.morphology.disk"
] | [((396, 473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""create sinograms for artificial images"""'}), "(description='create sinograms for artificial images')\n", (419, 473), False, 'import argparse\n'), ((1396, 1435), 'os.makedirs', 'os.makedirs', (['args.outdir'], {'exist_ok': '(True)'}), '(args.outdir, exist_ok=True)\n', (1407, 1435), False, 'import os\n'), ((1447, 1457), 'skimage.morphology.disk', 'sm.disk', (['(5)'], {}), '(5)\n', (1454, 1457), True, 'import skimage.morphology as sm\n'), ((1503, 1551), 'numpy.zeros', 'np.zeros', (['(args.size, args.size)'], {'dtype': 'np.uint8'}), '((args.size, args.size), dtype=np.uint8)\n', (1511, 1551), True, 'import numpy as np\n'), ((1988, 2016), 'skimage.morphology.binary_closing', 'sm.binary_closing', (['img', 'tool'], {}), '(img, tool)\n', (2005, 2016), True, 'import skimage.morphology as sm\n'), ((1611, 1673), 'random.randint', 'random.randint', (['args.min_width', '(args.size - 2 * args.min_width)'], {}), '(args.min_width, args.size - 2 * args.min_width)\n', (1625, 1673), False, 'import random\n'), ((1688, 1750), 'random.randint', 'random.randint', (['args.min_width', '(args.size - 2 * args.min_width)'], {}), '(args.min_width, args.size - 2 * args.min_width)\n', (1702, 1750), False, 'import random\n'), ((1762, 1808), 'random.randint', 'random.randint', (['args.min_width', 'args.max_width'], {}), '(args.min_width, args.max_width)\n', (1776, 1808), False, 'import random\n'), ((1824, 1870), 'random.randint', 'random.randint', (['args.min_width', 'args.max_width'], {}), '(args.min_width, args.max_width)\n', (1838, 1870), False, 'import random\n')] |
#!/usr/bin/env python3
import argparse
import json
from os import listdir, mkdir, pardir
from os.path import join, exists
from sys import stderr, stdout
import os, sys, inspect
from random import shuffle
import dynet as dy
import re
import numpy as np
from time import time
from math import isinf
from random import random
SCRIPT_FOLDER = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
MAIN_FOLDER = join(SCRIPT_FOLDER, pardir, pardir)
MODULES_FOLDER = join(MAIN_FOLDER, "src")
if MODULES_FOLDER not in sys.path:
sys.path.insert(0, MODULES_FOLDER)
from data_formats.tree_loader import load_from_export_format
from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps
from parser.configuration import Configuration
from parser.action import ActionStorage
from parser.beam_search import BeamDecoder
from parser.model import *
from data_formats.head_finding import HeadFinder
def _rec_nonterm_mapping(nonterm_set, tree):
if not tree.is_terminal():
nonterm_set.add(tree.label)
for child in tree.children:
_rec_nonterm_mapping(nonterm_set, child)
def get_nonterm_mapping(trees):
n2i = String2IntegerMapper()
nonterm_set = set()
for tree in trees:
_rec_nonterm_mapping(nonterm_set, tree)
for nonterm in sorted(nonterm_set):
n2i.add_string(nonterm)
return n2i
def get_pos_mapping(pos_seqs):
p2i = String2IntegerMapper()
p2i.add_string(String2IntegerMapper.UNK)
p_count = dict()
for pos_seq in pos_seqs:
for pos in pos_seq:
p_count[pos] = p_count.get(pos, 0)+1
for p, _ in sorted(list(filter(lambda x: x[1] >= 3, p_count.items())), key=lambda x: x[0]):
p2i.add_string(p)
return p2i
def get_char_mapping(trees):
MIN_COUNT_KNOWN = 10
c2i = String2IntegerMapper()
c2i.add_string(String2IntegerMapper.UNK)
c_count = dict()
for tree in trees:
for node in tree.give_me_terminal_nodes():
w = node.label
for c in w:
c_count[c] = c_count.get(c, 0) + 1
for c, _ in sorted(list(filter(lambda x: x[1] >= MIN_COUNT_KNOWN, c_count.items())), key=lambda x: x[0]):
c2i.add_string(c)
return c2i
def get_word_mapping(trees):
MIN_COUNT_KNOWN = 3
w2i = String2IntegerMapper()
w2i.add_string(String2IntegerMapper.UNK)
w_count = dict()
for tree in trees:
for node in tree.give_me_terminal_nodes():
w = node.label
# p = node.attributes["tag"]
w_count[w] = w_count.get(w, 0)+1
for w, _ in sorted(list(filter(lambda x: x[1] >= MIN_COUNT_KNOWN, w_count.items())), key=lambda x: x[0]):
w2i.add_string(w)
return w2i
def annotate_node_G_ordering(tree, next_free_index, ind_method):
if ind_method == "Left":
ordered_children = tree.children
elif ind_method == "Right":
ordered_children = reversed(tree.children)
elif ind_method == "RightD":
if tree.is_projective :
ordered_children = tree.children
else:
ordered_children = reversed(tree.children)
elif ind_method == "Dist2":
if tree.is_gap_creator(2):
ordered_children = reversed(tree.children)
else:
ordered_children = tree.children
elif ind_method == "Label":
if tree.label == "NP" or tree.label == "PP" or tree.is_projective :
ordered_children = tree.children
else:
ordered_children = reversed(tree.children)
else:
raise Exception("unknown <G method %s"%ind_method)
for child in ordered_children:
next_free_index = annotate_node_G_ordering(child, next_free_index, ind_method)
tree.attributes["<G"] = next_free_index
return next_free_index+1
def find_me_a_mother(tree, madre="Whatever"):
tree.attributes['my_mother'] = madre
for child in tree.children:
find_me_a_mother(child, tree)
def annotate_projectivity(tree):
if tree.is_terminal():
tree.attributes['fully_projective'] = True
return
else:
for child in tree.children:
annotate_projectivity(child)
for child in tree.children:
if child.attributes['fully_projective'] == False:
tree.attributes['fully_projective'] = False
return
words_covered = len(tree.covered_indices)
if words_covered == (max(tree.covered_indices) - min(tree.covered_indices) + 1):
tree.attributes['fully_projective'] = True
return
else:
tree.attributes['fully_projective'] = False
return
def annotate_max_projective_constituent(tree, mpc=-1):
if mpc == -1 and tree.attributes['fully_projective']:
tree.attributes['mpc'] = tree.attributes['<G']
for child in tree.children:
annotate_max_projective_constituent(child, tree.attributes['<G'])
else:
tree.attributes['mpc'] = mpc
for child in tree.children:
annotate_max_projective_constituent(child, mpc)
def lazy_satisfied(laziness, conf):
if laziness != "lazy":
return True
if conf.buffer.size == 0 or conf.stack.size == 0:
return True
real_stack_top = conf.stack.top().attributes['real_me']
real_buffer_top = conf.buffer.top().attributes['real_me']
real_stack_top_mpc = real_stack_top.attributes['mpc']
real_buffer_top_mpc = real_buffer_top.attributes['mpc']
if real_stack_top_mpc != -1 and real_buffer_top_mpc != -1 and real_stack_top_mpc == real_buffer_top_mpc:
return False
else:
return True
def annotate_closest_projective_ancestor(tree, closest_proj_anc):
tree.attributes['cpa'] = closest_proj_anc
words_covered = len(tree.covered_indices)
if words_covered == (max(tree.covered_indices) - min(tree.covered_indices) + 1):
for child in tree.children:
annotate_closest_projective_ancestor(child, tree.attributes['<G'])
else:
for child in tree.children:
annotate_closest_projective_ancestor(child, closest_proj_anc)
def laziest_satisfied(laziness, conf):
if laziness != "laziest":
return True
if conf.stack.size <= 1:
return True
top = conf.stack.top()
second_top = conf.stack.second_top()
real_top = top.attributes['real_me']
real_second_top = second_top.attributes['real_me']
if not is_complete(top):
real_top = real_top.children[real_top.attributes['head_child']]
if not is_complete(second_top):
real_second_top = real_second_top.children[real_second_top.attributes['head_child']]
if real_top.attributes['cpa'] == real_second_top.attributes['cpa']:
return True
else:
return False
def is_complete(node):
return len(node.children) == len(node.attributes['real_me'].children)
def construct_oracle_conf(tree, pos_seq, params, all_s2i, laziness, word_droppiness, tag_droppiness, terminal_dropout_rate, ind_method):
annotate_node_G_ordering(tree, 0, ind_method)
find_me_a_mother(tree)
if laziness == "lazy":
annotate_projectivity(tree)
annotate_max_projective_constituent(tree)
if laziness == "laziest":
annotate_closest_projective_ancestor(tree, tree.attributes['<G'])
words = [node.label for node in tree.give_me_terminal_nodes()]
if word_droppiness > 0:
for i in range(len(words)):
rand_num = random()
if rand_num < word_droppiness:
words[i] = String2IntegerMapper.DROPPED
new_pos_seq = pos_seq.copy()
if tag_droppiness > 0:
for i in range(len(new_pos_seq)):
rand_num = random()
if rand_num < tag_droppiness:
new_pos_seq[i] = String2IntegerMapper.UNK
action_storage = ActionStorage(all_s2i.n2i, params['E_a'])
init_conf = Configuration.construct_init_configuration(words, new_pos_seq, params, action_storage, all_s2i, terminal_dropout_rate)
leafs = tree.give_me_terminal_nodes()
buffer_pointer = init_conf.buffer
while buffer_pointer.size > 0:
buffer_pointer.top().attributes['real_me'] = leafs[buffer_pointer.top().leftmost_word_position]
buffer_pointer = buffer_pointer.pop()
c = init_conf
while not (c.is_final_configuration() and c.stack.top().label == tree.label):
# ADJOINS
# if second_top() is complete &&&&&& top() is its mother
if c.stack.size >= 2:
second_top = c.stack.second_top()
top = c.stack.top()
# ADJ_LEFT
if is_complete(second_top) and \
second_top.attributes['real_me'].attributes['my_mother'].is_equal_to(top.attributes['real_me']):
c = c.transition(action_storage.ADJ_LEFT)
continue
elif is_complete(top) and \
top.attributes['real_me'].attributes['my_mother'].is_equal_to(second_top.attributes['real_me']):
c = c.transition(action_storage.ADJ_RIGHT)
continue
# PRO-X
# if top() is complete &&&&&& top() is the head of its mother
if c.stack.size != 0:
top = c.stack.top()
real_me = top.attributes['real_me']
real_mother = real_me.attributes['my_mother']
mothers_head = real_mother.children[real_mother.attributes['head_child']] if type(real_mother) != str else None
if mothers_head is not None and \
is_complete(top) and \
mothers_head.is_equal_to(real_me):
c = c.transition(action_storage.get_pro_index_for_string_label(real_mother.label))
c.stack.top().attributes['real_me'] = real_mother
continue
# SWAP
# if second_top()[<G] > top()[<G]
if c.stack.size >= 2:
real_me_top = c.stack.top().attributes['real_me']
if not is_complete(c.stack.top()):
real_me_top = real_me_top.children[real_me_top.attributes['head_child']]
real_me_second_top = c.stack.second_top().attributes['real_me']
if not is_complete(c.stack.second_top()):
real_me_second_top = real_me_second_top.children[real_me_second_top.attributes['head_child']]
if real_me_top.attributes['<G'] < real_me_second_top.attributes['<G'] and \
lazy_satisfied(laziness, c) and laziest_satisfied(laziness, c):
c = c.transition(action_storage.SWAP)
continue
# SHIFT
# otherwise
c = c.transition(action_storage.SHIFT)
continue
return c # -c.log_prob
def count_transitions(final_conf, sent_id):
all_confs = []
all_actions = []
conf = final_conf
while conf.prev_conf is not None:
all_confs.append(conf.prev_conf)
all_actions.append(conf.last_action)
conf = conf.prev_conf
all_confs = reversed(all_confs)
all_actions = reversed(all_actions)
counts = {}
swap_block_sizes = []
swap_alt_block_sizes = []
const_swap_transition_sizes = set()
const_swap_block_sizes = []
const_swap_block_sizes_cummul = 0
const_swap_consecutive_count = 0
const_swap_alt_block_sizes = []
for (conf, action) in zip(all_confs, all_actions):
if action == "swap":
counts['swap'] = counts.get("swap", 0) + 1
const_swap_consecutive_count += 1
const_swap_block_sizes_cummul += len(conf.stack.second_top().give_me_terminal_nodes())
swap_block_sizes.append(len(conf.stack.second_top().give_me_terminal_nodes()))
swap_alt_block_sizes.append(len(conf.stack.top().give_me_terminal_nodes()))
else:
if const_swap_consecutive_count > 0:
counts['comp_swap'] = counts.get("comp_swap", 0) + 1
const_swap_transition_sizes.add(const_swap_consecutive_count)
const_swap_consecutive_count = 0
const_swap_block_sizes.append(const_swap_block_sizes_cummul)
const_swap_block_sizes_cummul = 0
const_swap_alt_block_sizes.append(len(conf.stack.top().give_me_terminal_nodes()))
if 'swap' in counts:
counts['avg_block_size'] = np.mean(np.array(swap_block_sizes))
counts['avg_alt_block_size'] = np.mean(np.array(swap_alt_block_sizes))
counts['comp_avg_block_size'] = np.mean(np.array(const_swap_block_sizes))
counts['comp_avg_alt_block_size'] = np.mean(np.array(const_swap_alt_block_sizes))
else:
counts['swap'] = 0
counts['avg_block_size'] = 0.0
counts['avg_alt_block_size'] = 0.0
counts['comp_swap'] = 0
counts['comp_avg_block_size'] = 0.0
counts['comp_avg_alt_block_size'] = 0.0
counts['comp_transitions'] = const_swap_transition_sizes
return counts
def count_transitions2(final_conf, sent_id):
counts = {}
conf = final_conf
swap_block_sizes = []
swap_alt_block_sizes = []
while conf.prev_conf is not None:
action = conf.last_action
if action == "swap":
counts['swap'] = counts.get("swap", 0) + 1
swap_block_sizes.append(len(conf.buffer.top().give_me_terminal_nodes()))
swap_alt_block_sizes.append(len(conf.stack.top().give_me_terminal_nodes()))
conf = conf.prev_conf
if 'swap' in counts:
counts['avg_block_size'] = np.mean(np.array(swap_block_sizes))
counts['avg_alt_block_size'] = np.mean(np.array(swap_alt_block_sizes))
else:
counts['swap'] = 0
counts['avg_block_size'] = 0.0
counts['avg_alt_block_size'] = 0.0
return counts
def main(train_trees_file, train_pos_file, encoding, model_dir, hyper_params_desc_file, external_embeddings_file):
hyper_params = load_hyper_parameters_from_file(hyper_params_desc_file)
# load the data in the memory
train_trees = load_from_export_format(train_trees_file, encoding)
train_pos_seqs = load_pos_tags(train_pos_file)
assert(len(train_trees) == len(train_pos_seqs))
train_data = list(filter(lambda x: x[0] is not None, zip(train_trees,train_pos_seqs)))
train_trees = list(map(lambda x: x[0], train_data))
train_pos_seqs = list(map(lambda x: x[1], train_data))
all_s2i = ContainerStr2IntMaps()
all_s2i.w2i = get_word_mapping(train_trees)
if 'c_emb_size_for_char' in hyper_params:
all_s2i.c2i = get_char_mapping(train_trees)
else:
all_s2i.c2i = None
all_s2i.p2i = get_pos_mapping(train_pos_seqs)
all_s2i.n2i = get_nonterm_mapping(train_trees)
hyper_params['w_voc_size'] = all_s2i.w2i.size()
hyper_params['p_voc_size'] = all_s2i.p2i.size()
hyper_params['n_voc_size'] = all_s2i.n2i.size()
hyper_params['a_voc_size'] = all_s2i.n2i.size()+4 # is this correct?
if all_s2i.c2i is not None:
hyper_params['c_voc_size'] = all_s2i.c2i.size()
#laziness = hyper_params['laziness']
model, params = define_model(hyper_params, all_s2i, external_embeddings_file)
reporting_frequency = 1000
train_data = list(zip(train_trees, train_pos_seqs))
output_fh = stdout
columns = ["sentID",
"words",
"swapsEager",
"swapsLazy",
"swapsLazier",
"avgBlockSizeEager",
"avgBlockSizeLazy",
"avgBlockSizeLazier",
"avgAltBlockSizeEager",
"avgAltBlockSizeLazy",
"avgAltBlockSizeLazier",
"comp_swapsEager",
"comp_swapsLazy",
"comp_swapsLazier",
"comp_avgBlockSizeEager",
"comp_avgBlockSizeLazy",
"comp_avgBlockSizeLazier",
"comp_avgAltBlockSizeEager",
"comp_avgAltBlockSizeLazy",
"comp_avgAltBlockSizeLazier",
"better"]
print(",".join(columns), file=output_fh)
# print("sentID,words,swapsEager,swapsLazy,swapsLazier,avgBlockSizeEager,avgBlockSizeLazy,avgBlockSizeLazier,avgAltBlockSizeEager,avgAltBlockSizeLazy,avgAltBlockSizeLazier,better", file=output_fh)
# add compoundSwapsEager,compoundSwapsLazy,compoundSwapsLazier,compoundAvgBlockSizeEager,compoundAvgBlockSizeLazy,compoundAvgBlockSizeLazier,compoundAvgAltBlockSizeEager,compoundAvgAltBlockSizeLazy,compoundAvgAltBlockSizeLazier
const_transition_types_count = {}
for laziness in ["eager", "lazy", "laziest"]:
const_transition_types_count[laziness] = set()
for i, (tree, pos_seq) in enumerate(train_data, 1):
to_out = [str(i), str(len(pos_seq))]
#out = "%d,%d"%(i, len(pos_seq))
counts = {}
for laziness in ["eager", "lazy", "laziest"]:
dy.renew_cg()
oracle_conf = construct_oracle_conf(tree, pos_seq, params, all_s2i, laziness, hyper_params['word_droppiness'], hyper_params['tag_droppiness'], hyper_params['terminal_dropout'], hyper_params['<ind'])
counts[laziness] = count_transitions(oracle_conf, tree.attributes['sent_id'])
const_transition_types_count[laziness] |= counts[laziness]['comp_transitions']
# out+=",%d,%d,%d,%f,%f,%f,%f,%f,%f"%(
# counts["eager"]['swap'], counts["lazy"]['swap'], counts["laziest"]['swap'],
# counts["eager"]['avg_block_size'], counts["lazy"]['avg_block_size'], counts["laziest"]['avg_block_size'],
# counts["eager"]['avg_alt_block_size'], counts["lazy"]['avg_alt_block_size'], counts["laziest"]['avg_alt_block_size'])
to_out.extend(map(str, [
counts["eager"]['swap'],
counts["lazy"]['swap'],
counts["laziest"]['swap'],
counts["eager"]['avg_block_size'],
counts["lazy"]['avg_block_size'],
counts["laziest"]['avg_block_size'],
counts["eager"]['avg_alt_block_size'],
counts["lazy"]['avg_alt_block_size'],
counts["laziest"]['avg_alt_block_size'],
counts["eager"]['comp_swap'],
counts["lazy"]['comp_swap'],
counts["laziest"]['comp_swap'],
counts["eager"]['comp_avg_block_size'],
counts["lazy"]['comp_avg_block_size'],
counts["laziest"]['comp_avg_block_size'],
counts["eager"]['comp_avg_alt_block_size'],
counts["lazy"]['comp_avg_alt_block_size'],
counts["laziest"]['comp_avg_alt_block_size'],
]))
if counts['lazy']['swap']<counts['laziest']['swap']:
#out+=",yes"
to_out.append("yes")
else:
#out+=",no"
to_out.append("no")
print(",".join(to_out), file=output_fh)
if i % reporting_frequency == 0:
print("%d"%i, file=stderr)
stderr.flush()
output_fh.flush()
for laziness in ["eager", "lazy", "laziest"]:
print("const type transitions %s %d"%(laziness,len(const_transition_types_count[laziness])), file=stderr)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", required=True, type=str, help="Model output directory")
parser.add_argument("--train_trees_file", required=True, type=str, help="export format training file")
parser.add_argument("--train_pos_file", required=True, type=str, help="stanford tagger format(sep /) training file")
parser.add_argument("--external_embeddings_file", default=None, type=str, help="csv file with embeddings")
parser.add_argument("--dynet-mem", default=512, type=int, help="memory for the neural network")
parser.add_argument("--dynet-weight-decay", default=0, type=float, help="weight decay (L2) for the neural network")
parser.add_argument("--encoding", type=str, default="utf-8",
help="Export format encoding default=utf-8, alternative latin1")
parser.add_argument("--hyper_params_file", required=True, type=str, help="file with hyperparameters in json format")
args = parser.parse_args()
if not exists(args.train_trees_file):
raise Exception(args.train_trees_file+" not exists")
if not exists(args.train_pos_file):
raise Exception(args.train_pos_file+" not exists")
if not exists(args.hyper_params_file):
raise Exception(args.hyper_params_file+" not exists")
if not exists(args.model_dir):
mkdir(args.model_dir)
main(args.train_trees_file, args.train_pos_file, args.encoding, args.model_dir, args.hyper_params_file, args.external_embeddings_file)
| [
"os.path.exists",
"sys.path.insert",
"parser.string2int_mapper.ContainerStr2IntMaps",
"argparse.ArgumentParser",
"inspect.currentframe",
"parser.string2int_mapper.String2IntegerMapper",
"sys.stderr.flush",
"os.path.join",
"data_formats.tree_loader.load_from_export_format",
"parser.configuration.Co... | [((449, 484), 'os.path.join', 'join', (['SCRIPT_FOLDER', 'pardir', 'pardir'], {}), '(SCRIPT_FOLDER, pardir, pardir)\n', (453, 484), False, 'from os.path import join, exists\n'), ((502, 526), 'os.path.join', 'join', (['MAIN_FOLDER', '"""src"""'], {}), "(MAIN_FOLDER, 'src')\n", (506, 526), False, 'from os.path import join, exists\n'), ((566, 600), 'sys.path.insert', 'sys.path.insert', (['(0)', 'MODULES_FOLDER'], {}), '(0, MODULES_FOLDER)\n', (581, 600), False, 'import os, sys, inspect\n'), ((1194, 1216), 'parser.string2int_mapper.String2IntegerMapper', 'String2IntegerMapper', ([], {}), '()\n', (1214, 1216), False, 'from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps\n'), ((1441, 1463), 'parser.string2int_mapper.String2IntegerMapper', 'String2IntegerMapper', ([], {}), '()\n', (1461, 1463), False, 'from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps\n'), ((1838, 1860), 'parser.string2int_mapper.String2IntegerMapper', 'String2IntegerMapper', ([], {}), '()\n', (1858, 1860), False, 'from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps\n'), ((2318, 2340), 'parser.string2int_mapper.String2IntegerMapper', 'String2IntegerMapper', ([], {}), '()\n', (2338, 2340), False, 'from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps\n'), ((7845, 7886), 'parser.action.ActionStorage', 'ActionStorage', (['all_s2i.n2i', "params['E_a']"], {}), "(all_s2i.n2i, params['E_a'])\n", (7858, 7886), False, 'from parser.action import ActionStorage\n'), ((7903, 8025), 'parser.configuration.Configuration.construct_init_configuration', 'Configuration.construct_init_configuration', (['words', 'new_pos_seq', 'params', 'action_storage', 'all_s2i', 'terminal_dropout_rate'], {}), '(words, new_pos_seq, params,\n action_storage, all_s2i, terminal_dropout_rate)\n', (7945, 8025), False, 'from parser.configuration import Configuration\n'), ((14033, 14084), 'data_formats.tree_loader.load_from_export_format', 'load_from_export_format', (['train_trees_file', 'encoding'], {}), '(train_trees_file, encoding)\n', (14056, 14084), False, 'from data_formats.tree_loader import load_from_export_format\n'), ((14408, 14430), 'parser.string2int_mapper.ContainerStr2IntMaps', 'ContainerStr2IntMaps', ([], {}), '()\n', (14428, 14430), False, 'from parser.string2int_mapper import String2IntegerMapper, ContainerStr2IntMaps\n'), ((19141, 19166), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19164, 19166), False, 'import argparse\n'), ((18894, 18908), 'sys.stderr.flush', 'stderr.flush', ([], {}), '()\n', (18906, 18908), False, 'from sys import stderr, stdout\n'), ((20139, 20168), 'os.path.exists', 'exists', (['args.train_trees_file'], {}), '(args.train_trees_file)\n', (20145, 20168), False, 'from os.path import join, exists\n'), ((20242, 20269), 'os.path.exists', 'exists', (['args.train_pos_file'], {}), '(args.train_pos_file)\n', (20248, 20269), False, 'from os.path import join, exists\n'), ((20341, 20371), 'os.path.exists', 'exists', (['args.hyper_params_file'], {}), '(args.hyper_params_file)\n', (20347, 20371), False, 'from os.path import join, exists\n'), ((20447, 20469), 'os.path.exists', 'exists', (['args.model_dir'], {}), '(args.model_dir)\n', (20453, 20469), False, 'from os.path import join, exists\n'), ((20479, 20500), 'os.mkdir', 'mkdir', (['args.model_dir'], {}), '(args.model_dir)\n', (20484, 20500), False, 'from os import listdir, mkdir, pardir\n'), ((7482, 7490), 'random.random', 'random', ([], {}), '()\n', (7488, 7490), False, 'from random import random\n'), ((7715, 7723), 'random.random', 'random', ([], {}), '()\n', (7721, 7723), False, 'from random import random\n'), ((12371, 12397), 'numpy.array', 'np.array', (['swap_block_sizes'], {}), '(swap_block_sizes)\n', (12379, 12397), True, 'import numpy as np\n'), ((12446, 12476), 'numpy.array', 'np.array', (['swap_alt_block_sizes'], {}), '(swap_alt_block_sizes)\n', (12454, 12476), True, 'import numpy as np\n'), ((12526, 12558), 'numpy.array', 'np.array', (['const_swap_block_sizes'], {}), '(const_swap_block_sizes)\n', (12534, 12558), True, 'import numpy as np\n'), ((12612, 12648), 'numpy.array', 'np.array', (['const_swap_alt_block_sizes'], {}), '(const_swap_alt_block_sizes)\n', (12620, 12648), True, 'import numpy as np\n'), ((13544, 13570), 'numpy.array', 'np.array', (['swap_block_sizes'], {}), '(swap_block_sizes)\n', (13552, 13570), True, 'import numpy as np\n'), ((13619, 13649), 'numpy.array', 'np.array', (['swap_alt_block_sizes'], {}), '(swap_alt_block_sizes)\n', (13627, 13649), True, 'import numpy as np\n'), ((16865, 16878), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (16876, 16878), True, 'import dynet as dy\n'), ((405, 427), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (425, 427), False, 'import os, sys, inspect\n')] |
import Bio,gzip
from Bio import SeqIO
import pyteomics
from pyteomics import mass,fasta
import pyteomics.parser as pyt_parser
import pandas as pd
import numpy as np
import json,os
from tqdm import tqdm
from load_config import CONFIG
MAX_DATABASE_SIZE=100000000
DB_PEPTIDE_MINIMUM_LENGTH=CONFIG['DB_PEPTIDE_MINIMUM_LENGTH']#7
DB_PEPTIDE_MAXIMUM_LENGTH=CONFIG['DB_PEPTIDE_MAXIMUM_LENGTH']#42
MAX_MISSED_CLEAVAGES=CONFIG['MAX_MISSED_CLEAVAGES']#args.MAX_MISSED_CLEAVAGES
ENZYME=CONFIG['ENZYME']
SEMI_SPECIFIC_CLEAVAGE=CONFIG['SEMI_SPECIFIC_CLEAVAGE']
SAVE=True
SAVE_DB_AS_JSON=True
if "r'" in ENZYME:
ENZYME = ENZYME.replace("r'","")
ENZYME = ENZYME.replace("'","")
ENZYME = r'%s'%ENZYME
#FASTA_FILE = CONFIG['FASTA']
def add_check_keys_exising(key,dictionary,element):
if key in dictionary:
dictionary[key].add(element)
else:
dictionary[key] = set([element])
return dictionary
def cleave_peptide(protein_sequence):
#return pyt_parser.cleave(protein_sequence, pyt_parser.expasy_rules['trypsin'],min_length=PEPTIDE_MINIMUM_LENGTH,missed_cleavages=MAX_MISSED_CLEAVAGES, semi=SEMI_SPECIFIC_CLEAVAGE)
return pyt_parser.cleave(protein_sequence, ENZYME,min_length=DB_PEPTIDE_MINIMUM_LENGTH,missed_cleavages=MAX_MISSED_CLEAVAGES, semi=SEMI_SPECIFIC_CLEAVAGE)
def digest_seq_record(seq_record,fasta_type='generic'):
ID=None
HEADER = seq_record[0]
SEQ = seq_record[1]
if fasta_type=='generic':
accesion_id = ID
speciesName = None
protName = HEADER
if fasta_type=='uniprot':
accesion_id = ID
speciesName = HEADER.split("OS=")[1].split("OX=")[0]
prot = HEADER.split("|")[1]
protName = HEADER.split("|")[2].split("OX=")[0]
elif fasta_type=='ncbi':
accesion_id = ID
speciesName = HEADER.split("[")[1][:-1]
prot = HEADER.split("[")[0]
protName = " ".join(prot.split(" ")[1:])
#SEQ = str(seq_record.seq)
cleaved_peptides = cleave_peptide(SEQ)
LENGTH_CONDITION = lambda x: not (len(x) > DB_PEPTIDE_MAXIMUM_LENGTH or len(x) < DB_PEPTIDE_MINIMUM_LENGTH)
cleaved_peptides = list(filter(LENGTH_CONDITION,cleaved_peptides))
ODD_AMINOACIDS_CONDITION = lambda x: not (len(set(x).intersection(set(['X','U','J','Z','B','O'])))>0)
cleaved_peptides = list(filter(ODD_AMINOACIDS_CONDITION,cleaved_peptides))
accesion_id = HEADER.split()[0]
return HEADER, accesion_id, cleaved_peptides
from collections import defaultdict
#if __name__ == '__main__':
def digest_fasta(fasta_file,REVERSE_DECOY=False):
if REVERSE_DECOY:
DB_DIR = CONFIG['RESULTS_DIR']+'/rev/db'
else:
DB_DIR = CONFIG['RESULTS_DIR']+'/forward/db'
if not os.path.exists(DB_DIR):
os.makedirs(DB_DIR)
FASTA_FILE = fasta_file
ncbi_peptide_protein = defaultdict(set)
ncbi_peptide_meta = {}
all_peptides = []
all_proteins = []
print('Digesting peptides...')
from multiprocessing.pool import Pool, ThreadPool
with Pool() as p, ThreadPool() as tp:
if '.gz' in FASTA_FILE:
handle = gzip.open(FASTA_FILE, "rt")
else:
handle = open(FASTA_FILE, "rt")
with handle as FASTA_FILE:
if REVERSE_DECOY:
FASTA_FILE = fasta.decoy_db(FASTA_FILE,decoy_only=True)
else:
FASTA_FILE = fasta.read(FASTA_FILE)
#seqio = SeqIO.parse(FASTA_FILE, "fasta")
for seq_record in tqdm(p.map(digest_seq_record,FASTA_FILE)):
#ID = seq_record.id
#HEADER = seq_record.description
#SEQ = str(seq_record.seq)
HEADER, accesion_id, cleaved_peptides = seq_record
list(map(lambda peptide: add_check_keys_exising(peptide,ncbi_peptide_protein,accesion_id),cleaved_peptides))
# for peptide in cleaved_peptides:
# # peptide_protein_entry={'accesion_id':accesion_id,'speciesName':speciesName,'protName':protName}
# add_check_keys_exising(peptide,ncbi_peptide_protein,accesion_id)
if len(ncbi_peptide_protein) > MAX_DATABASE_SIZE:
print('exceeding maximum number of allowd peptides %s'%MAX_DATABASE_SIZE)
break
print('Done.')
print(len(ncbi_peptide_protein))
if SAVE_DB_AS_JSON:
print('saving db as db.json... ')
import json
ncbi_peptide_protein = dict(zip(ncbi_peptide_protein.keys(),list(map(list,ncbi_peptide_protein.values()))))
with open(os.path.join(DB_DIR,'db.json'), 'w') as fp:
json.dump(ncbi_peptide_protein, fp)
if SAVE:
print('Writing list of peptides... ')
peptides = list(ncbi_peptide_protein.keys())
#pepmasses = list(map(theoretical_peptide_mass,tqdm(peptides)))
np.save(os.path.join(DB_DIR,"peptides.npy"),np.array(peptides))
#np.save(os.path.join(DB_DIR,"pepmasses.npy"),np.array(pepmasses))
#embeddings = list(map(seq_embedder,tqdm(peptides)))
print('Done.')
return ncbi_peptide_protein
| [
"os.path.exists",
"pyteomics.fasta.decoy_db",
"pyteomics.fasta.read",
"os.makedirs",
"gzip.open",
"json.dump",
"os.path.join",
"multiprocessing.pool.ThreadPool",
"numpy.array",
"collections.defaultdict",
"multiprocessing.pool.Pool",
"pyteomics.parser.cleave"
] | [((1160, 1319), 'pyteomics.parser.cleave', 'pyt_parser.cleave', (['protein_sequence', 'ENZYME'], {'min_length': 'DB_PEPTIDE_MINIMUM_LENGTH', 'missed_cleavages': 'MAX_MISSED_CLEAVAGES', 'semi': 'SEMI_SPECIFIC_CLEAVAGE'}), '(protein_sequence, ENZYME, min_length=\n DB_PEPTIDE_MINIMUM_LENGTH, missed_cleavages=MAX_MISSED_CLEAVAGES, semi=\n SEMI_SPECIFIC_CLEAVAGE)\n', (1177, 1319), True, 'import pyteomics.parser as pyt_parser\n'), ((2865, 2881), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2876, 2881), False, 'from collections import defaultdict\n'), ((2757, 2779), 'os.path.exists', 'os.path.exists', (['DB_DIR'], {}), '(DB_DIR)\n', (2771, 2779), False, 'import json, os\n'), ((2789, 2808), 'os.makedirs', 'os.makedirs', (['DB_DIR'], {}), '(DB_DIR)\n', (2800, 2808), False, 'import json, os\n'), ((3055, 3061), 'multiprocessing.pool.Pool', 'Pool', ([], {}), '()\n', (3059, 3061), False, 'from multiprocessing.pool import Pool, ThreadPool\n'), ((3068, 3080), 'multiprocessing.pool.ThreadPool', 'ThreadPool', ([], {}), '()\n', (3078, 3080), False, 'from multiprocessing.pool import Pool, ThreadPool\n'), ((3150, 3177), 'gzip.open', 'gzip.open', (['FASTA_FILE', '"""rt"""'], {}), "(FASTA_FILE, 'rt')\n", (3159, 3177), False, 'import Bio, gzip\n'), ((4675, 4710), 'json.dump', 'json.dump', (['ncbi_peptide_protein', 'fp'], {}), '(ncbi_peptide_protein, fp)\n', (4684, 4710), False, 'import json\n'), ((4912, 4948), 'os.path.join', 'os.path.join', (['DB_DIR', '"""peptides.npy"""'], {}), "(DB_DIR, 'peptides.npy')\n", (4924, 4948), False, 'import json, os\n'), ((4948, 4966), 'numpy.array', 'np.array', (['peptides'], {}), '(peptides)\n', (4956, 4966), True, 'import numpy as np\n'), ((3331, 3374), 'pyteomics.fasta.decoy_db', 'fasta.decoy_db', (['FASTA_FILE'], {'decoy_only': '(True)'}), '(FASTA_FILE, decoy_only=True)\n', (3345, 3374), False, 'from pyteomics import mass, fasta\n'), ((3421, 3443), 'pyteomics.fasta.read', 'fasta.read', (['FASTA_FILE'], {}), '(FASTA_FILE)\n', (3431, 3443), False, 'from pyteomics import mass, fasta\n'), ((4619, 4650), 'os.path.join', 'os.path.join', (['DB_DIR', '"""db.json"""'], {}), "(DB_DIR, 'db.json')\n", (4631, 4650), False, 'import json, os\n')] |
#Some codes are adopted from https://github.com/DCASE-REPO/DESED_task
import torch
import numpy as np
import random
def frame_shift(features, label=None, net_pooling=None):
if label is not None:
batch_size, _, _ = features.shape
shifted_feature = []
shifted_label = []
for idx in range(batch_size):
shift = int(random.gauss(0, 90))
shifted_feature.append(torch.roll(features[idx], shift, dims=-1))
shift = -abs(shift) // net_pooling if shift < 0 else shift // net_pooling
shifted_label.append(torch.roll(label[idx], shift, dims=-1))
return torch.stack(shifted_feature), torch.stack(shifted_label)
else:
batch_size, _, _ = features.shape
shifted_feature = []
for idx in range(batch_size):
shift = int(random.gauss(0, 90))
shifted_feature.append(torch.roll(features[idx], shift, dims=-1))
return torch.stack(shifted_feature)
def mixup(features, label=None, permutation=None, c=None, alpha=0.2, beta=0.2, mixup_label_type="soft", returnc=False):
with torch.no_grad():
batch_size = features.size(0)
if permutation is None:
permutation = torch.randperm(batch_size)
if c is None:
if mixup_label_type == "soft":
c = np.random.beta(alpha, beta)
elif mixup_label_type == "hard":
c = np.random.beta(alpha, beta) * 0.4 + 0.3 # c in [0.3, 0.7]
mixed_features = c * features + (1 - c) * features[permutation, :]
if label is not None:
if mixup_label_type == "soft":
mixed_label = torch.clamp(c * label + (1 - c) * label[permutation, :], min=0, max=1)
elif mixup_label_type == "hard":
mixed_label = torch.clamp(label + label[permutation, :], min=0, max=1)
else:
raise NotImplementedError(f"mixup_label_type: {mixup_label_type} not implemented. choice in "
f"{'soft', 'hard'}")
if returnc:
return mixed_features, mixed_label, c, permutation
else:
return mixed_features, mixed_label
else:
return mixed_features
def time_mask(features, labels=None, net_pooling=None, mask_ratios=(10, 20)):
if labels is not None:
_, _, n_frame = labels.shape
t_width = torch.randint(low=int(n_frame/mask_ratios[1]), high=int(n_frame/mask_ratios[0]), size=(1,)) # [low, high)
t_low = torch.randint(low=0, high=n_frame-t_width[0], size=(1,))
features[:, :, t_low * net_pooling:(t_low+t_width)*net_pooling] = 0
labels[:, :, t_low:t_low+t_width] = 0
return features, labels
else:
_, _, n_frame = features.shape
t_width = torch.randint(low=int(n_frame/mask_ratios[1]), high=int(n_frame/mask_ratios[0]), size=(1,)) # [low, high)
t_low = torch.randint(low=0, high=n_frame-t_width[0], size=(1,))
features[:, :, t_low:(t_low + t_width)] = 0
return features
def feature_transformation(features, n_transform, choice, filter_db_range, filter_bands,
filter_minimum_bandwidth, filter_type, freq_mask_ratio, noise_snrs):
if n_transform == 2:
feature_list = []
for _ in range(n_transform):
features_temp = features
if choice[0]:
features_temp = filt_aug(features_temp, db_range=filter_db_range, n_band=filter_bands,
min_bw=filter_minimum_bandwidth, filter_type=filter_type)
if choice[1]:
features_temp = freq_mask(features_temp, mask_ratio=freq_mask_ratio)
if choice[2]:
features_temp = add_noise(features_temp, snrs=noise_snrs)
feature_list.append(features_temp)
return feature_list
elif n_transform == 1:
if choice[0]:
features = filt_aug(features, db_range=filter_db_range, n_band=filter_bands,
min_bw=filter_minimum_bandwidth, filter_type=filter_type)
if choice[1]:
features = freq_mask(features, mask_ratio=freq_mask_ratio)
if choice[2]:
features = add_noise(features, snrs=noise_snrs)
return [features, features]
else:
return [features, features]
def filt_aug(features, db_range=[-6, 6], n_band=[3, 6], min_bw=6, filter_type="linear"):
# this is updated FilterAugment algorithm used for ICASSP 2022
if not isinstance(filter_type, str):
if torch.rand(1).item() < filter_type:
filter_type = "step"
n_band = [2, 5]
min_bw = 4
else:
filter_type = "linear"
n_band = [3, 6]
min_bw = 6
batch_size, n_freq_bin, _ = features.shape
n_freq_band = torch.randint(low=n_band[0], high=n_band[1], size=(1,)).item() # [low, high)
if n_freq_band > 1:
while n_freq_bin - n_freq_band * min_bw + 1 < 0:
min_bw -= 1
band_bndry_freqs = torch.sort(torch.randint(0, n_freq_bin - n_freq_band * min_bw + 1,
(n_freq_band - 1,)))[0] + \
torch.arange(1, n_freq_band) * min_bw
band_bndry_freqs = torch.cat((torch.tensor([0]), band_bndry_freqs, torch.tensor([n_freq_bin])))
if filter_type == "step":
band_factors = torch.rand((batch_size, n_freq_band)).to(features) * (db_range[1] - db_range[0]) + db_range[0]
band_factors = 10 ** (band_factors / 20)
freq_filt = torch.ones((batch_size, n_freq_bin, 1)).to(features)
for i in range(n_freq_band):
freq_filt[:, band_bndry_freqs[i]:band_bndry_freqs[i + 1], :] = band_factors[:, i].unsqueeze(-1).unsqueeze(-1)
elif filter_type == "linear":
band_factors = torch.rand((batch_size, n_freq_band + 1)).to(features) * (db_range[1] - db_range[0]) + db_range[0]
freq_filt = torch.ones((batch_size, n_freq_bin, 1)).to(features)
for i in range(n_freq_band):
for j in range(batch_size):
freq_filt[j, band_bndry_freqs[i]:band_bndry_freqs[i+1], :] = \
torch.linspace(band_factors[j, i], band_factors[j, i+1],
band_bndry_freqs[i+1] - band_bndry_freqs[i]).unsqueeze(-1)
freq_filt = 10 ** (freq_filt / 20)
return features * freq_filt
else:
return features
def freq_mask(features, mask_ratio=16):
batch_size, n_freq_bin, _ = features.shape
max_mask = int(n_freq_bin/mask_ratio)
if max_mask == 1:
f_widths = torch.ones(batch_size)
else:
f_widths = torch.randint(low=1, high=max_mask, size=(batch_size,)) # [low, high)
for i in range(batch_size):
f_width = f_widths[i]
f_low = torch.randint(low=0, high=n_freq_bin-f_width, size=(1,))
features[i, f_low:f_low+f_width, :] = 0
return features
def add_noise(features, snrs=(15, 30), dims=(1, 2)):
if isinstance(snrs, (list, tuple)):
snr = (snrs[0] - snrs[1]) * torch.rand((features.shape[0],), device=features.device).reshape(-1, 1, 1) + snrs[1]
else:
snr = snrs
snr = 10 ** (snr / 20)
sigma = torch.std(features, dim=dims, keepdim=True) / snr
return features + torch.randn(features.shape, device=features.device) * sigma
| [
"torch.roll",
"numpy.random.beta",
"torch.randperm",
"torch.rand",
"torch.stack",
"torch.clamp",
"torch.randint",
"torch.tensor",
"torch.arange",
"torch.linspace",
"torch.no_grad",
"torch.std",
"torch.randn",
"random.gauss",
"torch.ones"
] | [((948, 976), 'torch.stack', 'torch.stack', (['shifted_feature'], {}), '(shifted_feature)\n', (959, 976), False, 'import torch\n'), ((1108, 1123), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1121, 1123), False, 'import torch\n'), ((2555, 2613), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(n_frame - t_width[0])', 'size': '(1,)'}), '(low=0, high=n_frame - t_width[0], size=(1,))\n', (2568, 2613), False, 'import torch\n'), ((2957, 3015), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(n_frame - t_width[0])', 'size': '(1,)'}), '(low=0, high=n_frame - t_width[0], size=(1,))\n', (2970, 3015), False, 'import torch\n'), ((6758, 6780), 'torch.ones', 'torch.ones', (['batch_size'], {}), '(batch_size)\n', (6768, 6780), False, 'import torch\n'), ((6810, 6865), 'torch.randint', 'torch.randint', ([], {'low': '(1)', 'high': 'max_mask', 'size': '(batch_size,)'}), '(low=1, high=max_mask, size=(batch_size,))\n', (6823, 6865), False, 'import torch\n'), ((6961, 7019), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(n_freq_bin - f_width)', 'size': '(1,)'}), '(low=0, high=n_freq_bin - f_width, size=(1,))\n', (6974, 7019), False, 'import torch\n'), ((7372, 7415), 'torch.std', 'torch.std', (['features'], {'dim': 'dims', 'keepdim': '(True)'}), '(features, dim=dims, keepdim=True)\n', (7381, 7415), False, 'import torch\n'), ((634, 662), 'torch.stack', 'torch.stack', (['shifted_feature'], {}), '(shifted_feature)\n', (645, 662), False, 'import torch\n'), ((664, 690), 'torch.stack', 'torch.stack', (['shifted_label'], {}), '(shifted_label)\n', (675, 690), False, 'import torch\n'), ((1222, 1248), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (1236, 1248), False, 'import torch\n'), ((4897, 4952), 'torch.randint', 'torch.randint', ([], {'low': 'n_band[0]', 'high': 'n_band[1]', 'size': '(1,)'}), '(low=n_band[0], high=n_band[1], size=(1,))\n', (4910, 4952), False, 'import torch\n'), ((7444, 7495), 'torch.randn', 'torch.randn', (['features.shape'], {'device': 'features.device'}), '(features.shape, device=features.device)\n', (7455, 7495), False, 'import torch\n'), ((361, 380), 'random.gauss', 'random.gauss', (['(0)', '(90)'], {}), '(0, 90)\n', (373, 380), False, 'import random\n'), ((417, 458), 'torch.roll', 'torch.roll', (['features[idx]', 'shift'], {'dims': '(-1)'}), '(features[idx], shift, dims=-1)\n', (427, 458), False, 'import torch\n'), ((579, 617), 'torch.roll', 'torch.roll', (['label[idx]', 'shift'], {'dims': '(-1)'}), '(label[idx], shift, dims=-1)\n', (589, 617), False, 'import torch\n'), ((834, 853), 'random.gauss', 'random.gauss', (['(0)', '(90)'], {}), '(0, 90)\n', (846, 853), False, 'import random\n'), ((890, 931), 'torch.roll', 'torch.roll', (['features[idx]', 'shift'], {'dims': '(-1)'}), '(features[idx], shift, dims=-1)\n', (900, 931), False, 'import torch\n'), ((1335, 1362), 'numpy.random.beta', 'np.random.beta', (['alpha', 'beta'], {}), '(alpha, beta)\n', (1349, 1362), True, 'import numpy as np\n'), ((1667, 1737), 'torch.clamp', 'torch.clamp', (['(c * label + (1 - c) * label[permutation, :])'], {'min': '(0)', 'max': '(1)'}), '(c * label + (1 - c) * label[permutation, :], min=0, max=1)\n', (1678, 1737), False, 'import torch\n'), ((5282, 5310), 'torch.arange', 'torch.arange', (['(1)', 'n_freq_band'], {}), '(1, n_freq_band)\n', (5294, 5310), False, 'import torch\n'), ((5358, 5375), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (5370, 5375), False, 'import torch\n'), ((5395, 5421), 'torch.tensor', 'torch.tensor', (['[n_freq_bin]'], {}), '([n_freq_bin])\n', (5407, 5421), False, 'import torch\n'), ((1813, 1869), 'torch.clamp', 'torch.clamp', (['(label + label[permutation, :])'], {'min': '(0)', 'max': '(1)'}), '(label + label[permutation, :], min=0, max=1)\n', (1824, 1869), False, 'import torch\n'), ((4611, 4624), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4621, 4624), False, 'import torch\n'), ((5119, 5194), 'torch.randint', 'torch.randint', (['(0)', '(n_freq_bin - n_freq_band * min_bw + 1)', '(n_freq_band - 1,)'], {}), '(0, n_freq_bin - n_freq_band * min_bw + 1, (n_freq_band - 1,))\n', (5132, 5194), False, 'import torch\n'), ((5659, 5698), 'torch.ones', 'torch.ones', (['(batch_size, n_freq_bin, 1)'], {}), '((batch_size, n_freq_bin, 1))\n', (5669, 5698), False, 'import torch\n'), ((6068, 6107), 'torch.ones', 'torch.ones', (['(batch_size, n_freq_bin, 1)'], {}), '((batch_size, n_freq_bin, 1))\n', (6078, 6107), False, 'import torch\n'), ((7218, 7274), 'torch.rand', 'torch.rand', (['(features.shape[0],)'], {'device': 'features.device'}), '((features.shape[0],), device=features.device)\n', (7228, 7274), False, 'import torch\n'), ((1428, 1455), 'numpy.random.beta', 'np.random.beta', (['alpha', 'beta'], {}), '(alpha, beta)\n', (1442, 1455), True, 'import numpy as np\n'), ((5486, 5523), 'torch.rand', 'torch.rand', (['(batch_size, n_freq_band)'], {}), '((batch_size, n_freq_band))\n', (5496, 5523), False, 'import torch\n'), ((5945, 5986), 'torch.rand', 'torch.rand', (['(batch_size, n_freq_band + 1)'], {}), '((batch_size, n_freq_band + 1))\n', (5955, 5986), False, 'import torch\n'), ((6313, 6423), 'torch.linspace', 'torch.linspace', (['band_factors[j, i]', 'band_factors[j, i + 1]', '(band_bndry_freqs[i + 1] - band_bndry_freqs[i])'], {}), '(band_factors[j, i], band_factors[j, i + 1], band_bndry_freqs\n [i + 1] - band_bndry_freqs[i])\n', (6327, 6423), False, 'import torch\n')] |
import numpy as np
import redis
import json
import logging
from docopt import docopt
from obnl.core.client import ClientNode
# This doc is used by docopt to make the wrapper callable by command line and gather easily all the given parameters
doc = """>>> IntegrCiTy wrapper command <<<
Usage:
wrapper.py (<host> <name> <init>) [--i=TO_SET... --o=TO_GET... --first --cmd=CMD]
wrapper.py -h | --help
wrapper.py --version
Options
-h --help show this
--version show version
--i parameters to set
--o parameters to get
--first node in sequence's first group
--cmd optional list of commands to run wrapper
"""
class Node(ClientNode):
"""
Node class for the wrapper (model can be called by the container or can be self contained directly in the wrapper)
"""
def __init__(self, host, input_attributes=None, output_attributes=None, is_first=False):
# Implement OBNL client node
super(Node, self).__init__(host, 'obnl_vhost', 'obnl', 'obnl', 'config_file.json',
input_attributes=input_attributes,
output_attributes=output_attributes,
is_first=is_first)
self.redis = redis.StrictRedis(host=host, port=6379, db=0)
# Declare model
self.a = 0
self.b = 0
self.c = None
# Set initial values / model parameters
with open('init_values.json') as json_data:
init_values = json.load(json_data)
for key, val in init_values.items():
setattr(self, key, val)
def step(self, current_time, time_step):
"""
Run a step for the wrapper/model
:param current_time: current simulation time
:param time_step: next time step to run
:return: nothing :)
"""
logging.debug('----- ' + self.name + ' -----')
logging.debug(self.name, 'time_step', time_step, "s")
logging.debug(self.name, 'current_time', current_time - time_step)
logging.debug(self.name, 'inputs', self.input_values)
# Update input attributes and save input attributes and corresponding simulation time step to Redis DB
for key, value in self.input_values.items():
setattr(self, key, value)
self.redis.rpush('IN||' + self.name + '||' + key, getattr(self, key))
self.redis.rpush('IN||' + self.name + '||' + key + '||time', current_time)
# Compute intern state
logging.debug(self.name, "compute new intern state")
self.b = self.a + np.random.choice([-1, 1]) * self.c
# Send updated output attributes
logging.debug(self.name, "outputs", {key: getattr(self, key) for key in self.output_attributes})
for key in self.output_attributes:
self.update_attribute(key, getattr(self, key))
# Save output attributes and corresponding simulation time step to Redis DB
for key in self.output_attributes:
self.redis.rpush('OUT||' + self.name + '||' + key, getattr(self, key))
self.redis.rpush('OUT||' + self.name + '||' + key + '||time', current_time)
if __name__ == "__main__":
args = docopt(doc, version='0.0.1')
node = Node(
host=args['<host>'],
is_first=args['--first'],
input_attributes=args['--i'],
output_attributes=args['--o']
)
node.start()
| [
"logging.debug",
"numpy.random.choice",
"redis.StrictRedis",
"json.load",
"docopt.docopt"
] | [((3221, 3249), 'docopt.docopt', 'docopt', (['doc'], {'version': '"""0.0.1"""'}), "(doc, version='0.0.1')\n", (3227, 3249), False, 'from docopt import docopt\n'), ((1250, 1295), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'host', 'port': '(6379)', 'db': '(0)'}), '(host=host, port=6379, db=0)\n', (1267, 1295), False, 'import redis\n'), ((1861, 1907), 'logging.debug', 'logging.debug', (["('----- ' + self.name + ' -----')"], {}), "('----- ' + self.name + ' -----')\n", (1874, 1907), False, 'import logging\n'), ((1916, 1969), 'logging.debug', 'logging.debug', (['self.name', '"""time_step"""', 'time_step', '"""s"""'], {}), "(self.name, 'time_step', time_step, 's')\n", (1929, 1969), False, 'import logging\n'), ((1978, 2044), 'logging.debug', 'logging.debug', (['self.name', '"""current_time"""', '(current_time - time_step)'], {}), "(self.name, 'current_time', current_time - time_step)\n", (1991, 2044), False, 'import logging\n'), ((2053, 2106), 'logging.debug', 'logging.debug', (['self.name', '"""inputs"""', 'self.input_values'], {}), "(self.name, 'inputs', self.input_values)\n", (2066, 2106), False, 'import logging\n'), ((2519, 2571), 'logging.debug', 'logging.debug', (['self.name', '"""compute new intern state"""'], {}), "(self.name, 'compute new intern state')\n", (2532, 2571), False, 'import logging\n'), ((1509, 1529), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (1518, 1529), False, 'import json\n'), ((2598, 2623), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (2614, 2623), True, 'import numpy as np\n')] |
"""
Utility functions for parsing data files. Especially designed for the CSVs produced
by trial_util.py
"""
import csv
import datetime
import os
import numpy as np
from common import render_exception
def lookup_data_file(data_prefix, filename):
full_name = os.path.join(data_prefix, filename)
if not os.path.exists(full_name):
raise Exception('Could not find "{}"'.format(filename))
return full_name
def compute_summary_stats(data, trait_name, trait_values, average_key='time', is_numeric=False):
"""
Expects the data to be a list of dicts.
For each entry r in the data and each value v in trait_values,
this function assembles the values of all fields
r[trait_key] where r[trait_name] == v.
Returns the mean, median, and std dev of all values in a
dict with fields "mean", "median", and "std"
is_numeric: Whether the trait is numeric or not (expects string by default)
"""
vals = []
for value in trait_values:
def filter_func(r):
if is_numeric:
return int(r[trait_name]) == value
return r[trait_name] == value
vals += list(map(lambda r: float(r[average_key]),
filter(filter_func, data)))
return {
'mean': np.mean(vals),
'median': np.median(vals),
'std': np.std(vals)
}
def summarize_over_reps(data, num_reps):
return compute_summary_stats(data, 'rep', range(num_reps), is_numeric=True)
def obtain_data_rows(data_dir, framework, task_name, parameter_names, params_to_match):
"""
Returns all data rows from the given framework from the
given task where the specified parameters match.
params_to_match as a dictionary {param names => value to match}
"""
filename = lookup_data_file(data_dir, '{}-{}.csv'.format(framework, task_name))
with open(filename, newline='') as csvfile:
# even though it seems redundant, parameter names does
# need to be a separate arg because *order matters*
# whereas it doesn't in a dict
fieldnames = parameter_names + ['rep', 'run', 'time']
reader = csv.DictReader(csvfile, fieldnames)
def filter_func(row):
for (name, value) in params_to_match.items():
comp = value
if not isinstance(value, str):
comp = str(value)
if row[name] != comp:
return False
return True
return list(filter(filter_func, reader))
def trials_stat_summary(data_dir, framework, task_name,
num_reps, parameter_names, params_to_match):
"""
Returns a full summary of statistics on the specified framework
and task across all reps where the specified parameters match.
Returns (summary, success, message)
"""
try:
data = obtain_data_rows(data_dir, framework, task_name, parameter_names, params_to_match)
summary = summarize_over_reps(data, num_reps)
return (summary, True, 'success')
except Exception as e:
return (-1, False,
'Encountered exception on {}, {} using params {}:\n{}'.format(
framework, task_name, params_to_match,
render_exception(e)))
def add_detailed_summary(report, detailed_summary, *fields):
"""
Nasty hack provided for including a more detailed statistical
summary in analysis files. The main reason this is here is
because old reports contained only the mean and the graphing,
etc., made assumptions about the layout of records.
Eventually the old records should be migrated.
"""
current = report
all_fields = ['detailed', *fields]
for field in all_fields[:-1]:
if field not in current:
current[field] = {}
current = current[field]
current[all_fields[-1]] = detailed_summary
| [
"os.path.exists",
"numpy.mean",
"numpy.median",
"csv.DictReader",
"common.render_exception",
"os.path.join",
"numpy.std"
] | [((265, 300), 'os.path.join', 'os.path.join', (['data_prefix', 'filename'], {}), '(data_prefix, filename)\n', (277, 300), False, 'import os\n'), ((312, 337), 'os.path.exists', 'os.path.exists', (['full_name'], {}), '(full_name)\n', (326, 337), False, 'import os\n'), ((1270, 1283), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (1277, 1283), True, 'import numpy as np\n'), ((1303, 1318), 'numpy.median', 'np.median', (['vals'], {}), '(vals)\n', (1312, 1318), True, 'import numpy as np\n'), ((1335, 1347), 'numpy.std', 'np.std', (['vals'], {}), '(vals)\n', (1341, 1347), True, 'import numpy as np\n'), ((2138, 2173), 'csv.DictReader', 'csv.DictReader', (['csvfile', 'fieldnames'], {}), '(csvfile, fieldnames)\n', (2152, 2173), False, 'import csv\n'), ((3255, 3274), 'common.render_exception', 'render_exception', (['e'], {}), '(e)\n', (3271, 3274), False, 'from common import render_exception\n')] |
import os
import json
import numpy as np
def load_img(image_path):
import cv2
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
return img
def load_json_lines(fpath):
assert os.path.exists(fpath)
with open(fpath, 'r') as fid:
lines = fid.readlines()
records = [json.loads(line.strip('\n')) for line in lines]
return records
def save_json_lines(content, fpath):
with open(fpath, 'w') as fid:
for db in content:
line = json.dumps(db) + '\n'
fid.write(line)
def device_parser(str_device):
if '-' in str_device:
device_id = str_device.split('-')
device_id = [i for i in range(int(device_id[0]), int(device_id[1]) + 1)]
else:
device_id = [int(str_device)]
return device_id
def ensure_dir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def xyxy_to_xywh(boxes):
assert boxes.shape[1] >= 4
boxes[:, 2:4] -= boxes[:, :2]
return boxes
def xywh_to_xyxy(boxes):
assert boxes.shape[1] >= 4
boxes[:, 2:4] += boxes[:, :2]
return boxes
def load_bboxes(dict_input, key_name, key_box, key_score=None, key_tag=None):
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
else:
assert key_box in dict_input[key_name][0]
if key_score:
assert key_score in dict_input[key_name][0]
if key_tag:
assert key_tag in dict_input[key_name][0]
if key_score:
if key_tag:
bboxes = np.vstack([np.hstack((rb[key_box], rb[key_score], rb[key_tag])) for rb in dict_input[key_name]])
else:
bboxes = np.vstack([np.hstack((rb[key_box], rb[key_score])) for rb in dict_input[key_name]])
else:
if key_tag:
bboxes = np.vstack([np.hstack((rb[key_box], rb[key_tag])) for rb in dict_input[key_name]])
else:
bboxes = np.vstack([rb[key_box] for rb in dict_input[key_name]])
return bboxes
def load_masks(dict_input, key_name, key_box):
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 28, 28])
else:
assert key_box in dict_input[key_name][0]
masks = np.array([rb[key_box] for rb in dict_input[key_name]])
return masks
def load_gt(dict_input, key_name, key_box, class_names):
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
else:
assert key_box in dict_input[key_name][0]
bbox = []
for rb in dict_input[key_name]:
if rb['tag'] in class_names:
tag = class_names.index(rb['tag'])
else:
tag = -1
if 'extra' in rb:
if 'ignore' in rb['extra']:
if rb['extra']['ignore'] != 0:
tag = -1
bbox.append(np.hstack((rb[key_box], tag)))
bboxes = np.vstack(bbox).astype(np.float64)
return bboxes
def boxes_dump(boxes, is_gt):
result = []
boxes = boxes.tolist()
for box in boxes:
if is_gt:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
box_dict['tag'] = box[-1]
result.append(box_dict)
else:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
box_dict['tag'] = 1
box_dict['score'] = box[-1]
result.append(box_dict)
return result
def clip_boundary(boxes, height, width):
assert boxes.shape[-1] >= 4
boxes[:, 0] = np.minimum(np.maximum(boxes[:, 0], 0), width - 1)
boxes[:, 1] = np.minimum(np.maximum(boxes[:, 1], 0), height - 1)
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], width), 0)
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], height), 0)
return boxes
| [
"os.path.exists",
"numpy.minimum",
"os.makedirs",
"numpy.hstack",
"json.dumps",
"numpy.array",
"numpy.empty",
"numpy.vstack",
"numpy.maximum",
"cv2.imread"
] | [((94, 134), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_COLOR'], {}), '(image_path, cv2.IMREAD_COLOR)\n', (104, 134), False, 'import cv2\n'), ((191, 212), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (205, 212), False, 'import os\n'), ((2234, 2288), 'numpy.array', 'np.array', (['[rb[key_box] for rb in dict_input[key_name]]'], {}), '([rb[key_box] for rb in dict_input[key_name]])\n', (2242, 2288), True, 'import numpy as np\n'), ((819, 842), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (833, 842), False, 'import os\n'), ((852, 872), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (863, 872), False, 'import os\n'), ((1258, 1274), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (1266, 1274), True, 'import numpy as np\n'), ((2140, 2161), 'numpy.empty', 'np.empty', (['[0, 28, 28]'], {}), '([0, 28, 28])\n', (2148, 2161), True, 'import numpy as np\n'), ((2452, 2468), 'numpy.empty', 'np.empty', (['[0, 5]'], {}), '([0, 5])\n', (2460, 2468), True, 'import numpy as np\n'), ((3604, 3630), 'numpy.maximum', 'np.maximum', (['boxes[:, 0]', '(0)'], {}), '(boxes[:, 0], 0)\n', (3614, 3630), True, 'import numpy as np\n'), ((3672, 3698), 'numpy.maximum', 'np.maximum', (['boxes[:, 1]', '(0)'], {}), '(boxes[:, 1], 0)\n', (3682, 3698), True, 'import numpy as np\n'), ((3741, 3771), 'numpy.minimum', 'np.minimum', (['boxes[:, 2]', 'width'], {}), '(boxes[:, 2], width)\n', (3751, 3771), True, 'import numpy as np\n'), ((3805, 3836), 'numpy.minimum', 'np.minimum', (['boxes[:, 3]', 'height'], {}), '(boxes[:, 3], height)\n', (3815, 3836), True, 'import numpy as np\n'), ((1930, 1985), 'numpy.vstack', 'np.vstack', (['[rb[key_box] for rb in dict_input[key_name]]'], {}), '([rb[key_box] for rb in dict_input[key_name]])\n', (1939, 1985), True, 'import numpy as np\n'), ((2860, 2889), 'numpy.hstack', 'np.hstack', (['(rb[key_box], tag)'], {}), '((rb[key_box], tag))\n', (2869, 2889), True, 'import numpy as np\n'), ((2904, 2919), 'numpy.vstack', 'np.vstack', (['bbox'], {}), '(bbox)\n', (2913, 2919), True, 'import numpy as np\n'), ((480, 494), 'json.dumps', 'json.dumps', (['db'], {}), '(db)\n', (490, 494), False, 'import json\n'), ((1557, 1609), 'numpy.hstack', 'np.hstack', (['(rb[key_box], rb[key_score], rb[key_tag])'], {}), '((rb[key_box], rb[key_score], rb[key_tag]))\n', (1566, 1609), True, 'import numpy as np\n'), ((1689, 1728), 'numpy.hstack', 'np.hstack', (['(rb[key_box], rb[key_score])'], {}), '((rb[key_box], rb[key_score]))\n', (1698, 1728), True, 'import numpy as np\n'), ((1824, 1861), 'numpy.hstack', 'np.hstack', (['(rb[key_box], rb[key_tag])'], {}), '((rb[key_box], rb[key_tag]))\n', (1833, 1861), True, 'import numpy as np\n')] |
#tani
from utility.feature_extract import ModelExtractFaceFeature
from utility.data_loader import data_load
from utility.similarity_calculate import *
from PIL import Image
import numpy as np
import os
def main():
feat_compare, img_list = data_load()
img_path = "/Users/taniyan/git/facenet-pytorch/data/test_images/angelina_jolie/ayaueto.jpg"
img_save_path = None
img = Image.open(img_path)
dirname = os.path.dirname(img_path)
model_eff = ModelExtractFaceFeature()
img_cropped = model_eff.trim_img(img.resize((160, 160)), model_eff.trim_face_model, img_path=img_save_path)
feature = model_eff.inference(img_cropped, model_eff.extract_feature_model)
feature_numpy = feature.to('cpu').detach().numpy().copy()
feature_numpy = np.tile(feature_numpy, (feat_compare[0].shape[0], 1))
# print("---> ", feature_numpy.shape, feat_compare[0].shape)
#cos類似度
res = cos_sim(feature_numpy, feat_compare[0].T)
ranking_cos_sim(img_path, img_list, res)
#ユークリッド距離(論文ではこちらを採用している)
res_2 = euclid_sim(feature_numpy, feat_compare[0])
ranking_euclid_sim(img_path, img_list, res_2)
if __name__ == "__main__":
main()
| [
"numpy.tile",
"PIL.Image.open",
"utility.data_loader.data_load",
"os.path.dirname",
"utility.feature_extract.ModelExtractFaceFeature"
] | [((361, 372), 'utility.data_loader.data_load', 'data_load', ([], {}), '()\n', (370, 372), False, 'from utility.data_loader import data_load\n'), ((510, 530), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (520, 530), False, 'from PIL import Image\n'), ((545, 570), 'os.path.dirname', 'os.path.dirname', (['img_path'], {}), '(img_path)\n', (560, 570), False, 'import os\n'), ((587, 612), 'utility.feature_extract.ModelExtractFaceFeature', 'ModelExtractFaceFeature', ([], {}), '()\n', (610, 612), False, 'from utility.feature_extract import ModelExtractFaceFeature\n'), ((889, 942), 'numpy.tile', 'np.tile', (['feature_numpy', '(feat_compare[0].shape[0], 1)'], {}), '(feature_numpy, (feat_compare[0].shape[0], 1))\n', (896, 942), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import keras
import utils
import glob
import os
from keras.models import load_model
import time
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
set_session(tf.compat.v1.Session(config=config))
labelsCoco = [
"person",
"bicycle",
"car",
"motorbike",
"aeroplane",
"bus",
"train",
"truck",
"boat",
"traffic_light",
"fire_hydrant",
"stop_sign",
"parking_meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports_ball",
"kite",
"baseball_bat",
"baseball_glove",
"skateboard",
"surfboard",
"tennis_racket",
"bottle",
"wine_glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot_dog",
"pizza",
"donut",
"cake",
"chair",
"sofa",
"pottedplant",
"bed",
"diningtable",
"toilet",
"tvmonitor",
"laptop",
"mouse",
"remote",
"keyboard",
"cell_phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy_bear",
"hair_drier",
"toothbrush"
]
# anchors = [[81,82, 135,169, 344,319], [23,27, 37,58, 81,82]] #tiny
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
obj_threshold = 0.5
nms_threshold = 0.3
# network size
net_w, net_h = 416,416
class yolo3_keras_model():
def __init__(self, model_path):
self.model = load_model(model_path)
self.model._make_predict_function()
self.model.summary()
def draw_boxes(self, boxes, img):
# draw boxes onto image
for box in boxes:
if box.get_score() > 0:
cv2.rectangle( img, (box.xmin, box.ymin), (box.xmax, box.ymax), (0,255,0),3)
cv2.rectangle( img, (box.xmin, box.ymin), (box.xmin+130, box.ymin+40), (0,255,0),-1)
cv2.putText( img, labelsCoco[box.get_label()], (box.get_box()[0]+10, box.get_box()[1]+30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 255), lineType=cv2.LINE_AA)
return img
def do_inference(self, img):
# image size
ih, iw, _ = img.shape
# preprocess input image
img_rgb = img[:,:,::-1]
image_data = utils.preprocess_input(img_rgb, net_w, net_h)
start_time = time.time()
# prediction
out = self.model.predict(image_data)
print("---cnn inference time: {} seconds ---".format((time.time() - start_time)))
out[0] = np.squeeze(out[0])
out[1] = np.squeeze(out[1])
out[2] = np.squeeze(out[2])
boxes = list()
# for i in range(2): #tiny
for i in range(3):
# decode the output of the network
boxes += utils.decode_netout(out[i], anchors[i], obj_threshold, 416, 416)
boxes = utils.correct_yolo_boxes(boxes, ih, iw, net_w, net_h)
boxes = utils.do_nms(boxes, nms_threshold)
# draw boxes onto image
self.draw_boxes(boxes, img)
return img, boxes
# def main():
# model = yolo3_keras_model('./yolov3.h5')
# img_paths = glob.glob(os.path.join('/media/p4f/My Passport/02.dataset/coco/val2017','*.jpg'))
# for img_path in img_paths:
# print('inference on image: {}'.format(img_path))
# img = cv2.imread(img_path)
# image, boxes = model.do_inference(img)
# cv2.imshow('result', image)
# key = cv2.waitKey(0)
# if key == 27:
# break
def main():
model = yolo3_keras_model('./test/yolov3.h5')
cap = cv2.VideoCapture(0)
ret, img = cap.read()
while ret:
start_time = time.time()
image, boxes = model.do_inference(img)
print("--- %s seconds ---" % (time.time() - start_time))
cv2.imshow('result', image)
key = cv2.waitKey(1)
if key == 27:
break
ret, img = cap.read()
if __name__ == '__main__':
main()
| [
"tensorflow.compat.v1.ConfigProto",
"cv2.rectangle",
"keras.models.load_model",
"utils.decode_netout",
"numpy.squeeze",
"cv2.imshow",
"cv2.waitKey",
"cv2.VideoCapture",
"utils.do_nms",
"time.time",
"tensorflow.compat.v1.Session",
"utils.preprocess_input",
"utils.correct_yolo_boxes"
] | [((217, 243), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (241, 243), True, 'import tensorflow as tf\n'), ((313, 348), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (333, 348), True, 'import tensorflow as tf\n'), ((4439, 4458), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4455, 4458), False, 'import cv2\n'), ((2523, 2545), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (2533, 2545), False, 'from keras.models import load_model\n'), ((3240, 3285), 'utils.preprocess_input', 'utils.preprocess_input', (['img_rgb', 'net_w', 'net_h'], {}), '(img_rgb, net_w, net_h)\n', (3262, 3285), False, 'import utils\n'), ((3304, 3315), 'time.time', 'time.time', ([], {}), '()\n', (3313, 3315), False, 'import time\n'), ((3473, 3491), 'numpy.squeeze', 'np.squeeze', (['out[0]'], {}), '(out[0])\n', (3483, 3491), True, 'import numpy as np\n'), ((3505, 3523), 'numpy.squeeze', 'np.squeeze', (['out[1]'], {}), '(out[1])\n', (3515, 3523), True, 'import numpy as np\n'), ((3537, 3555), 'numpy.squeeze', 'np.squeeze', (['out[2]'], {}), '(out[2])\n', (3547, 3555), True, 'import numpy as np\n'), ((3764, 3817), 'utils.correct_yolo_boxes', 'utils.correct_yolo_boxes', (['boxes', 'ih', 'iw', 'net_w', 'net_h'], {}), '(boxes, ih, iw, net_w, net_h)\n', (3788, 3817), False, 'import utils\n'), ((3831, 3865), 'utils.do_nms', 'utils.do_nms', (['boxes', 'nms_threshold'], {}), '(boxes, nms_threshold)\n', (3843, 3865), False, 'import utils\n'), ((4513, 4524), 'time.time', 'time.time', ([], {}), '()\n', (4522, 4524), False, 'import time\n'), ((4633, 4660), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'image'], {}), "('result', image)\n", (4643, 4660), False, 'import cv2\n'), ((4671, 4685), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4682, 4685), False, 'import cv2\n'), ((3686, 3750), 'utils.decode_netout', 'utils.decode_netout', (['out[i]', 'anchors[i]', 'obj_threshold', '(416)', '(416)'], {}), '(out[i], anchors[i], obj_threshold, 416, 416)\n', (3705, 3750), False, 'import utils\n'), ((2736, 2814), 'cv2.rectangle', 'cv2.rectangle', (['img', '(box.xmin, box.ymin)', '(box.xmax, box.ymax)', '(0, 255, 0)', '(3)'], {}), '(img, (box.xmin, box.ymin), (box.xmax, box.ymax), (0, 255, 0), 3)\n', (2749, 2814), False, 'import cv2\n'), ((2822, 2917), 'cv2.rectangle', 'cv2.rectangle', (['img', '(box.xmin, box.ymin)', '(box.xmin + 130, box.ymin + 40)', '(0, 255, 0)', '(-1)'], {}), '(img, (box.xmin, box.ymin), (box.xmin + 130, box.ymin + 40), (\n 0, 255, 0), -1)\n', (2835, 2917), False, 'import cv2\n'), ((3432, 3443), 'time.time', 'time.time', ([], {}), '()\n', (3441, 3443), False, 'import time\n'), ((4602, 4613), 'time.time', 'time.time', ([], {}), '()\n', (4611, 4613), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Scikit-Learn Model-by-Cluster wrapper.
Original code by jnorthman: https://gist.github.com/jnothman/566ebde618ec18f2bea6
"""
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.utils import safe_mask
class ModelByCluster(BaseEstimator):
"""Cluster data, then run a regression independently on each cluster.
Parameters:
:clusterer: scikit-learn style clustering model
:regression: scikit-learn style regression model
"""
def __init__(self, clusterer, estimator):
self.clusterer = clusterer
self.estimator = estimator
def fit(self, X, y):
self.clusterer_ = clone(self.clusterer)
clusters = self.clusterer_.fit_predict(X)
cluster_ids = np.unique(clusters)
assert (len(cluster_ids) == self.clusterer_.n_clusters), \
"MBC: Some clusters have no data. Probably too little data available: " + \
"Only {n} data points for {k} clusters.".format(
n=X.shape[0], k=self.clusterer_.n_clusters)
self.estimators_ = {}
for c in cluster_ids:
mask = clusters == c
est = clone(self.estimator)
est.fit(X[safe_mask(X, mask)], y[safe_mask(y, mask)])
self.estimators_[c] = est
return self
def predict(self, X):
# this returns -1 if any of the values squared are too large
# models with numerical instability will fail.
clusters = self.clusterer_.predict(X)
y_tmp = []
idx = []
for c, est in self.estimators_.items():
mask = clusters == c
if mask.any():
idx.append(np.flatnonzero(mask))
y_tmp.append(est.predict(X[safe_mask(X, mask)]))
y_tmp = np.concatenate(y_tmp)
idx = np.concatenate(idx)
y = np.full([X.shape[0], y_tmp.shape[1]], np.nan)
y[idx] = y_tmp
return y
| [
"numpy.unique",
"sklearn.base.clone",
"numpy.flatnonzero",
"numpy.concatenate",
"numpy.full",
"sklearn.utils.safe_mask"
] | [((694, 715), 'sklearn.base.clone', 'clone', (['self.clusterer'], {}), '(self.clusterer)\n', (699, 715), False, 'from sklearn.base import BaseEstimator, clone\n'), ((788, 807), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (797, 807), True, 'import numpy as np\n'), ((1817, 1838), 'numpy.concatenate', 'np.concatenate', (['y_tmp'], {}), '(y_tmp)\n', (1831, 1838), True, 'import numpy as np\n'), ((1853, 1872), 'numpy.concatenate', 'np.concatenate', (['idx'], {}), '(idx)\n', (1867, 1872), True, 'import numpy as np\n'), ((1885, 1930), 'numpy.full', 'np.full', (['[X.shape[0], y_tmp.shape[1]]', 'np.nan'], {}), '([X.shape[0], y_tmp.shape[1]], np.nan)\n', (1892, 1930), True, 'import numpy as np\n'), ((1197, 1218), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (1202, 1218), False, 'from sklearn.base import BaseEstimator, clone\n'), ((1241, 1259), 'sklearn.utils.safe_mask', 'safe_mask', (['X', 'mask'], {}), '(X, mask)\n', (1250, 1259), False, 'from sklearn.utils import safe_mask\n'), ((1264, 1282), 'sklearn.utils.safe_mask', 'safe_mask', (['y', 'mask'], {}), '(y, mask)\n', (1273, 1282), False, 'from sklearn.utils import safe_mask\n'), ((1713, 1733), 'numpy.flatnonzero', 'np.flatnonzero', (['mask'], {}), '(mask)\n', (1727, 1733), True, 'import numpy as np\n'), ((1778, 1796), 'sklearn.utils.safe_mask', 'safe_mask', (['X', 'mask'], {}), '(X, mask)\n', (1787, 1796), False, 'from sklearn.utils import safe_mask\n')] |
#!/usr/bin/env python3
"""
benchmarks writing boolean array vs uint8 array of same values.
For high-speed in the loop writing where performance is critical.
"""
import tempfile
from numpy.random import random
from numpy import packbits
import h5py
from time import time
SIZE = (3, 200000) # arbitrary size to test
# %% create random Boolean array
xb = random(SIZE) > 0.5 # mean ~ 0.5
xbl = xb.tolist()
with tempfile.NamedTemporaryFile() as fn:
with h5py.File(fn, "w") as h:
tic = time()
h["bool"] = xb
print(f"{time()-tic:3e} sec. to write boolean from Numpy bool")
tic = time()
xi = packbits(xbl, axis=0) # each column becomes uint8 BIG-ENDIAN
h["uint8"] = xi
print(f"{time()-tic:3e} sec. to write uint8")
# %% here's what nidaqmx gives us
tic = time()
h["listbool"] = xbl
print(f"{time()-tic:3e} sec. to write boolean from bool list")
| [
"numpy.packbits",
"numpy.random.random",
"h5py.File",
"tempfile.NamedTemporaryFile",
"time.time"
] | [((356, 368), 'numpy.random.random', 'random', (['SIZE'], {}), '(SIZE)\n', (362, 368), False, 'from numpy.random import random\n'), ((413, 442), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (440, 442), False, 'import tempfile\n'), ((459, 477), 'h5py.File', 'h5py.File', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (468, 477), False, 'import h5py\n'), ((498, 504), 'time.time', 'time', ([], {}), '()\n', (502, 504), False, 'from time import time\n'), ((615, 621), 'time.time', 'time', ([], {}), '()\n', (619, 621), False, 'from time import time\n'), ((635, 656), 'numpy.packbits', 'packbits', (['xbl'], {'axis': '(0)'}), '(xbl, axis=0)\n', (643, 656), False, 'from numpy import packbits\n'), ((831, 837), 'time.time', 'time', ([], {}), '()\n', (835, 837), False, 'from time import time\n'), ((545, 551), 'time.time', 'time', ([], {}), '()\n', (549, 551), False, 'from time import time\n'), ((738, 744), 'time.time', 'time', ([], {}), '()\n', (742, 744), False, 'from time import time\n'), ((883, 889), 'time.time', 'time', ([], {}), '()\n', (887, 889), False, 'from time import time\n')] |
import json
import os
import random
import numpy as np
from math import ceil
import bottle
from bottle import HTTPResponse
# import time
from timeit import default_timer as timer
from grid_data_maker import *
from util_fns import *
# my_moves
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]] # go right
delta_name = ['up', 'left', 'down', 'right']
cost = 1
# vals for smaller heads, equal or big, all bodies and next heads
small_head_val = 1
my_head_val = 3
same_head_val = 2
big_head_val = 5
body_val = 4
my_body_val = 7
next_bighead_val = 9
next_samehead_val = 6
next_smhead_val = 8
next_heads = [next_smhead_val, next_samehead_val, next_bighead_val]
curr_bodies = [small_head_val, my_head_val, same_head_val, big_head_val, body_val, my_body_val]
next_ok_heads = [next_smhead_val, next_samehead_val]
def make_heuristic_map(goal, snakes_grid):
'''
small_head_val = 1
my_head_val=3
same_head_val=2
big_head_val = 5
body_val = 4
my_body_val = 7
next_bighead_val = 9
next_samehead_val = 6
next_smhead_val = 8
'''
real_heads = [same_head_val, big_head_val]
next_heads = [next_bighead_val, next_samehead_val]
goal_y = goal[0]
goal_x = goal[1]
heuristic_map = np.zeros(snakes_grid.shape, dtype=np.int)
for i in range(heuristic_map.shape[0]):
for j in range(heuristic_map.shape[1]):
dy = np.abs(i - goal_y)
dx = np.abs(j - goal_x)
heuristic_map[i, j] = dy + dx
return heuristic_map
def fill_food_arr(food, my_head_y, my_head_x):
# list in order of nearest to furthest food tuples (dist, y,x)
food_arr = []
for z in range(len(food)):
food_dist = heuristic([my_head_y, my_head_x],
[food[z]['y'], food[z]['x']])
food_arr.append([food_dist, food[z]['y'], food[z]['x']])
food_array = sorted(food_arr, key=lambda x: x[0])
# #print(f'\n\nfood arr {food_arr}\n\n')
return food_array
def mark_next_heads(head_y, head_x, snakes_grid, next_head_val):
'''
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]]
'''
new_grid = np.copy(snakes_grid)
for i in range(len(delta)):
next_head_y = head_y + delta[i][0]
next_head_x = head_x + delta[i][1]
# if in bounds and space is free, fill with 9
if check_in_bounds(next_head_y, next_head_x, snakes_grid):
if new_grid[next_head_y, next_head_x] == 0 or \
new_grid[next_head_y, next_head_x] in next_heads:
new_grid[next_head_y, next_head_x] += next_head_val
return new_grid
def fill_snakes_grid(snakes, width, height, my_body_len, my_id):
'''
small_head_val = 1
same_head_val=2
my_head_val = 3
big_head_val = 5
body_val = 4
my_body_val = 7
next_bighead_val = 9
next_samehead_val = 6
next_smhead_val = 8
'''
# my_moves
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]] # go right
snake_heads = []
snake_tails = []
# second grid for checking open path to tail
snakes_grid = np.zeros((width, height), dtype=np.int)
solo_grid = np.zeros(snakes_grid.shape, dtype=np.int)
for j in range(len(snakes)):
curr_snake = snakes[j]
if curr_snake['id'] == my_id:
my_snake = True
else:
my_snake = False
# fill grid
for k in range(len(curr_snake['body'])):
# heads of opp snakes
if k == 0:
head_y = curr_snake['body'][k]['y']
head_x = curr_snake['body'][k]['x']
# if smaller
if len(curr_snake['body']) < my_body_len and not my_snake:
snakes_grid[head_y, head_x] = small_head_val
# append to heads list
snake_heads.append([small_head_val, head_y, head_x])
# mark smaller next heads as 8
snakes_grid = mark_next_heads(head_y, head_x,
snakes_grid, next_smhead_val)
# if it's the heads of bigger or equal snakes
elif len(curr_snake['body']) > my_body_len and not my_snake:
snakes_grid[head_y, head_x] = big_head_val
# append to heads list
snake_heads.append([big_head_val, head_y, head_x])
# mark bigger or equal next heads as 9
snakes_grid = mark_next_heads(head_y,
head_x, snakes_grid, next_bighead_val)
# todo: equal size
elif len(curr_snake['body']) == my_body_len and not my_snake:
snakes_grid[head_y, head_x] = same_head_val
# todo: append to heads list or not?
snake_heads.append([same_head_val, head_y, head_x])
# mark bigger or equal next heads as 9
snakes_grid = mark_next_heads(head_y,
head_x, snakes_grid,
next_samehead_val)
# fill solo grid for crash check
elif len(curr_snake['body']) == my_body_len and my_snake:
solo_grid[head_y, head_x] = my_head_val
snakes_grid[head_y, head_x] = my_head_val
# all snakes body and my head and body except tail
elif 0 < k < (len(curr_snake['body']) - 1):
body_y = curr_snake['body'][k]['y']
body_x = curr_snake['body'][k]['x']
#
if not my_snake:
snakes_grid[body_y, body_x] = body_val
# fill solo grid
elif my_snake:
snakes_grid[body_y, body_x] = my_body_val
solo_grid[body_y, body_x] = body_val
# tails
elif k == (len(curr_snake['body']) - 1):
body_y = curr_snake['body'][k]['y']
body_x = curr_snake['body'][k]['x']
solo_grid[body_y, body_x] = my_body_val
# all tails attached here so careful in find path to tail
snake_tails.append([body_y, body_x])
if curr_snake['health'] == 100:
snakes_grid[body_y, body_x] = body_val
return snakes_grid, solo_grid, snake_heads, snake_tails
def check_dist_to_snakes(snake_heads, head_y, head_x):
snake_dists = []
for i in range(len(snake_heads)):
snakehead = snake_heads[i]
snake_type = snakehead[0]
snake_y, snake_x = snakehead[1], snakehead[2]
dist = heuristic([head_y, head_x], [snake_y, snake_x])
snake_dists.append([dist, snake_type, snakehead[0], snakehead[1]])
snake_arr = sorted(snake_dists, key=lambda x: x[0])
return snake_arr
def find_free_spaces(snakes_grid, head_y, head_x):
free_spaces = np.argwhere(snakes_grid == 0)
free_spaces_arr = []
for i in range(free_spaces.shape[0]):
curr_free = free_spaces[i, :].tolist()
dist_to_free = heuristic([head_y, head_x], curr_free)
free_spaces_arr.append([dist_to_free, curr_free[0], curr_free[1]])
free_arr = sorted(free_spaces_arr, key=lambda x: x[0])
return free_arr
| [
"numpy.copy",
"numpy.zeros",
"numpy.argwhere",
"numpy.abs"
] | [((1279, 1320), 'numpy.zeros', 'np.zeros', (['snakes_grid.shape'], {'dtype': 'np.int'}), '(snakes_grid.shape, dtype=np.int)\n', (1287, 1320), True, 'import numpy as np\n'), ((2234, 2254), 'numpy.copy', 'np.copy', (['snakes_grid'], {}), '(snakes_grid)\n', (2241, 2254), True, 'import numpy as np\n'), ((3246, 3285), 'numpy.zeros', 'np.zeros', (['(width, height)'], {'dtype': 'np.int'}), '((width, height), dtype=np.int)\n', (3254, 3285), True, 'import numpy as np\n'), ((3302, 3343), 'numpy.zeros', 'np.zeros', (['snakes_grid.shape'], {'dtype': 'np.int'}), '(snakes_grid.shape, dtype=np.int)\n', (3310, 3343), True, 'import numpy as np\n'), ((7131, 7160), 'numpy.argwhere', 'np.argwhere', (['(snakes_grid == 0)'], {}), '(snakes_grid == 0)\n', (7142, 7160), True, 'import numpy as np\n'), ((1430, 1448), 'numpy.abs', 'np.abs', (['(i - goal_y)'], {}), '(i - goal_y)\n', (1436, 1448), True, 'import numpy as np\n'), ((1466, 1484), 'numpy.abs', 'np.abs', (['(j - goal_x)'], {}), '(j - goal_x)\n', (1472, 1484), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import logging
class TileUtils:
@staticmethod
def add_texts_with_bg(img, texts):
'''
Adds each text line by line at the bottom left area of the image
:param img:
:param texts:
:return:
'''
font_scale = 2
thickness = 2
font = cv2.FONT_HERSHEY_DUPLEX
# set the rectangle background to white
rectangle_bgr = (255, 255, 255)
rectangle_padding = 25
# set the text start position
text_offset_x = rectangle_padding - 5
text_offset_y = img.shape[0] - rectangle_padding
for txt in texts[::-1]:
(text_width, text_height) = cv2.getTextSize(txt, font, fontScale=font_scale, thickness=1)[0]
# make the coords of the box with a small padding of two pixels
box_coords = (
(text_offset_x - rectangle_padding - 5, text_offset_y + rectangle_padding),
(text_offset_x + text_width + rectangle_padding - 5, text_offset_y - text_height - rectangle_padding)
)
cv2.rectangle(img, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(img, txt, (text_offset_x, text_offset_y), font, fontScale=font_scale, color=(0, 0, 0),
thickness=thickness)
text_offset_y -= 80
@staticmethod
def create_image_vector_for_each_classes(final_classifications, tiles, classes, confs, max_tiles_per_row=3):
'''
Makes a vector where each tile belonging to each final classifications (tile will have highest conf)
and each tile will have an associated description overlaid
:param final_classifications: list
:param tiles:
:param classes:
:param confs:
:param max_tiles_per_row: how many tiles to have at most side by side in the tsne figure
:return:
'''
if len(final_classifications) != len(set(final_classifications)):
raise Exception('Final classifications should be unique')
UNDEFINED_ANOMALY = 'Undefined Anomaly'
final_classifications = list(final_classifications)
if len(final_classifications) > 1 and UNDEFINED_ANOMALY in final_classifications:
logging.warning(
f'Final classifications: contains undefined and regular classes ({final_classifications})..removing undefined for image vector')
final_classifications.remove(UNDEFINED_ANOMALY)
res = []
# each displayed tiles' conf score
chosen_confs = []
for final_classification in final_classifications:
texts = []
if final_classification == UNDEFINED_ANOMALY:
# find where the largest conf is. we will use the corresponding class
(_, idx2) = np.unravel_index(np.argmax(confs), confs.shape)
final_classification = classes[idx2]
texts.append('(Highest conf)')
# find the tile with this class as the highest conf
idx2 = classes.index(final_classification)
idx1 = np.argmax(confs[:, idx2])
tile = tiles[idx1]
chosen_confs.append((final_classification, confs[idx1][idx2]))
texts.append(final_classification)
texts.append(f'Conf: {round(confs[idx1][idx2] * 100, 2)}%')
# add the class and conf score onto the tile
TileUtils.add_texts_with_bg(tile, texts)
res.append(tile)
# create an image vector with these tiles
res = TileUtils.make_image_vector_using_tiles(res, tiles_per_row=max_tiles_per_row, add_numbering=False)
return res, chosen_confs
@staticmethod
def make_image_vector_using_tiles(tiles, tiles_per_row=3, add_numbering=False):
'''
Constructs tiles into a mega image vector.
Good for viewing tiles that are used in the slide or for whatever use case
:param tiles:
:return: numpy image vector matrix
'''
if len(tiles) == 0:
raise Exception("No tiles given. Cannot make image vector")
tile_size = tiles[0].shape[0]
num_rows = int(np.ceil(len(tiles) / tiles_per_row))
# TODO:
# # if we have at least same number of tiles as max_tiles_per_row, then that is number of columns
# num_cols = tiles_per_row if len(tiles) >= tiles_per_row else len(tiles)
# always have x tiles per row. blank pad if necessary
num_cols = tiles_per_row
# image holder
new = np.ones((tile_size * num_rows, tile_size * num_cols, 3), dtype=np.uint8) * 255
for idx, tile in enumerate(tiles):
tile = np.array(tile) # gives error if i dont
# what slice in the output vector we are in
curr_row_slice = int(np.floor(idx / tiles_per_row)) * tile_size
curr_col_slice = idx % (tiles_per_row) * tile_size
# bit of image editing
if add_numbering:
bottom_left_corner_of_text = (25, tile.shape[0] - 50)
TileUtils.add_text(tile, str(idx + 1), bottom_left_corner_of_text=bottom_left_corner_of_text,
font_scale=8, thickness=10, color=(0, 0, 0))
TileUtils.add_border(tile, thickness=0.005, color=(0, 0, 0))
# store image
new[
curr_row_slice:curr_row_slice + tile_size, curr_col_slice:curr_col_slice + tile_size, :] = tile
return new
@staticmethod
def add_border(a, thickness=0.05, color=(0, 0, 0)):
'''
:param a: the matrix image
:param thickness: border thickness
:return:
'''
h, w, c = a.shape
# some coordinates may be part of cropped part of heatmap (recall we do tile size * divide fac). ignore those ones
if h == 0 or c == 0:
return
if c != 3:
raise Exception('Only RGB images supported')
pixel_len = min(int(w * thickness), int(h * thickness))
# for each row in the image
for j in range(h):
# if we are in first 5% or last% of rows, we color the whole row
if j <= pixel_len or j >= w - pixel_len:
# color entire row
for i in range(3):
a[j, :, i] = color[i]
else:
# color the leftmost and rightmost 5% of the row
for i in range(3):
a[j, :pixel_len, i] = color[i]
a[j, (w - pixel_len):, i] = color[i]
@staticmethod
def add_text(a, text, bottom_left_corner_of_text, color=(0, 0, 0), font_scale=1, thickness=2):
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(a, text,
bottom_left_corner_of_text,
font,
font_scale,
color,
thickness)
| [
"cv2.rectangle",
"numpy.ones",
"logging.warning",
"numpy.argmax",
"numpy.floor",
"cv2.putText",
"numpy.array",
"cv2.getTextSize"
] | [((6774, 6862), 'cv2.putText', 'cv2.putText', (['a', 'text', 'bottom_left_corner_of_text', 'font', 'font_scale', 'color', 'thickness'], {}), '(a, text, bottom_left_corner_of_text, font, font_scale, color,\n thickness)\n', (6785, 6862), False, 'import cv2\n'), ((1103, 1178), 'cv2.rectangle', 'cv2.rectangle', (['img', 'box_coords[0]', 'box_coords[1]', 'rectangle_bgr', 'cv2.FILLED'], {}), '(img, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n', (1116, 1178), False, 'import cv2\n'), ((1191, 1315), 'cv2.putText', 'cv2.putText', (['img', 'txt', '(text_offset_x, text_offset_y)', 'font'], {'fontScale': 'font_scale', 'color': '(0, 0, 0)', 'thickness': 'thickness'}), '(img, txt, (text_offset_x, text_offset_y), font, fontScale=\n font_scale, color=(0, 0, 0), thickness=thickness)\n', (1202, 1315), False, 'import cv2\n'), ((2283, 2437), 'logging.warning', 'logging.warning', (['f"""Final classifications: contains undefined and regular classes ({final_classifications})..removing undefined for image vector"""'], {}), "(\n f'Final classifications: contains undefined and regular classes ({final_classifications})..removing undefined for image vector'\n )\n", (2298, 2437), False, 'import logging\n'), ((3135, 3160), 'numpy.argmax', 'np.argmax', (['confs[:, idx2]'], {}), '(confs[:, idx2])\n', (3144, 3160), True, 'import numpy as np\n'), ((4590, 4662), 'numpy.ones', 'np.ones', (['(tile_size * num_rows, tile_size * num_cols, 3)'], {'dtype': 'np.uint8'}), '((tile_size * num_rows, tile_size * num_cols, 3), dtype=np.uint8)\n', (4597, 4662), True, 'import numpy as np\n'), ((4732, 4746), 'numpy.array', 'np.array', (['tile'], {}), '(tile)\n', (4740, 4746), True, 'import numpy as np\n'), ((698, 759), 'cv2.getTextSize', 'cv2.getTextSize', (['txt', 'font'], {'fontScale': 'font_scale', 'thickness': '(1)'}), '(txt, font, fontScale=font_scale, thickness=1)\n', (713, 759), False, 'import cv2\n'), ((2865, 2881), 'numpy.argmax', 'np.argmax', (['confs'], {}), '(confs)\n', (2874, 2881), True, 'import numpy as np\n'), ((4862, 4891), 'numpy.floor', 'np.floor', (['(idx / tiles_per_row)'], {}), '(idx / tiles_per_row)\n', (4870, 4891), True, 'import numpy as np\n')] |
# ============================================================================
# 第七章 給湯設備
# 第一節 給湯設備
# Ver.18(エネルギー消費性能計算プログラム(住宅版)Ver.02.05~)
# ============================================================================
import numpy as np
from functools import lru_cache
import pyhees.section7_1_b as default
import pyhees.section7_1_c as gas
import pyhees.section7_1_d as oil
import pyhees.section7_1_e as eheatpump
import pyhees.section7_1_f as eheater
import pyhees.section7_1_g as hybrid_gas
import pyhees.section7_1_g_3 as hybrid_gas_3
import pyhees.section7_1_h as gas_hybrid
import pyhees.section7_1_i as whybrid
import pyhees.section7_1_j as watersaving
import pyhees.section7_1_m as schedule
import pyhees.section9_2 as lss
import pyhees.section9_3 as ass
from pyhees.section11_1 import load_outdoor, get_Theta_ex
from pyhees.section11_2 import load_solrad
from pyhees.section11_3 import load_schedule, get_schedule_hw
# ============================================================================
# 5. 給湯設備によるエネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
@lru_cache()
def calc_hotwater_load(n_p, region, sol_region, has_bath, bath_function, pipe_diameter, kitchen_watersaving_A,
kitchen_watersaving_C, shower_watersaving_A, shower_watersaving_B, washbowl_watersaving_C,
bath_insulation,
type=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None, W_tnk_ss=None,
hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None, V_fan_P0=None,
d0=None, d1=None, m_fan_test=None, W_tnk_ass=None
):
"""給湯負荷の計算
Args:
n_p(float): 仮想居住人数 (人)
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
pipe_diameter(str): ヘッダー分岐後の径
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
bath_insulation(bool): 浴槽の断熱の有無
type(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
hotwater_use(bool, optional): 空気集熱式太陽熱利用設備が給湯部を有する場合はTrue (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
A_col(tuple, optional): 集熱器群の面積 (m2) (Default value = None)
P_alpha(float, optional): 方位角 (°) (Default value = None)
P_beta(float, optional): 傾斜角 (°) (Default value = None)
V_fan_P0(float, optional): 空気搬送ファンの送風機特性曲線において機外静圧をゼロとしたときの空気搬送ファンの風量 (m3/h) (Default value = None)
d0(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の切片 (-) (Default value = None)
d1(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の傾き (W/(m2K)) (Default value = None)
m_fan_test(tuple, optional): 集熱器群を構成する集熱器の集熱性能試験時における単位面積当たりの空気の質量流量 (kg/(s・m2)) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Returns:
dict: 1日当たりの給湯設備付加
"""
# 生活スケジュール
schedule = load_schedule()
schedule_hw = get_schedule_hw(schedule)
# 外部環境
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
# ----- 14. 夜間平均外気温度 -----
# 夜間平均外気温度 (℃) (15)
Theta_ex_Nave_d = get_Theta_ex_Nave_d(Theta_ex_d_t)
# ----- 13. 日平均外気温度 -----
# 日平均外気温度 (℃) (14)
theta_ex_d_Ave_d = get_theta_ex_d_Ave_d(Theta_ex_d_t)
# ----- 12. 日平均給水温度 -----
# 期間平均外気温度 (℃) (13)
Theta_ex_prd_Ave_d = get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d)
# 日平均給水温度 (℃) (12)
Theta_wtr_d = get_Theta_wtr_d(region, Theta_ex_prd_Ave_d)
# ----- 11. 浴槽沸かし直しによる給湯熱負荷 -----
# 浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
L_ba_d_t = calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p)
# ----- 10. 基準給湯量 -----
# 基準給湯量 (L/h) (7)
W_k_d_t = calc_W_k_d_t(n_p, schedule_hw)
W_s_d_t = calc_W_s_d_t(n_p, schedule_hw, has_bath)
W_w_d_t = calc_W_w_d_t(n_p, schedule_hw)
W_b1_d_t = calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function)
W_b2_d_t = calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function)
# 浴槽水栓さし湯時における基準給湯量 (L/h) (9)
W_ba1_d_t = calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d)
# ----- 9. 節湯補正給湯量 -----
# 節湯補正給湯量 (L/h) (6)
W_dash_k_d_t = calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_s_d_t = calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter)
W_dash_w_d_t = calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_b1_d_t = calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter)
W_dash_b2_d_t = calc_W_dash_b2_d_t(W_b2_d_t)
W_dash_ba1_d_t = calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter)
# ----- 8. 節湯補正給湯熱負荷 -----
# 基準給湯温度 (℃)
Theta_sw_k = get_Theta_sw_k()
Theta_sw_s = get_Theta_sw_s()
Theta_sw_w = get_Theta_sw_w()
# 節湯補正給湯熱負荷 (MJ/h) (5)
L_dash_k_d_t = get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d)
L_dash_s_d_t = get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d)
L_dash_w_d_t = get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d)
L_dash_b1_d_t, L_dash_b2_d_t = get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bath_function)
L_dash_ba1_d_t, L_dash_ba2_d_t = get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bath_function)
# ----- 7. 太陽熱補正給湯熱負荷 -----
# 太陽熱利用給湯設備による補正集熱量
L_sun_d_t = calc_L_sun_d_t(
region=region,
sol_region=sol_region,
solar_device=type,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
hotwater_use=hotwater_use,
heating_flag_d=heating_flag_d,
A_col=A_col,
P_alpha=P_alpha,
P_beta=P_beta,
V_fan_P0=V_fan_P0,
d0=d0,
d1=d1,
m_fan_test=m_fan_test,
W_tnk_ass=W_tnk_ass,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
# 太陽熱補正給湯熱負荷
L_dashdash_k_d_t = calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_s_d_t = calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_w_d_t = calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_b1_d_t = calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_b2_d_t = calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba1_d_t = calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba2_d_t = get_L_dashdash_ba2_d_t(L_dash_ba2_d_t)
print('L_ba = {}'.format(np.sum(L_ba_d_t)))
print('W_k = {}'.format(np.sum(W_k_d_t)))
print('W_s = {}'.format(np.sum(W_s_d_t)))
print('W_w = {}'.format(np.sum(W_w_d_t)))
print('W_b1 = {}'.format(np.sum(W_b1_d_t)))
print('W_b2 = {}'.format(np.sum(W_b2_d_t)))
print('W_ba1 = {}'.format(np.sum(W_ba1_d_t)))
print('W_dash_k = {}'.format(np.sum(W_dash_k_d_t)))
print('W_dash_s = {}'.format(np.sum(W_dash_s_d_t)))
print('W_dash_w = {}'.format(np.sum(W_dash_w_d_t)))
print('W_dash_b1 = {}'.format(np.sum(W_dash_b1_d_t)))
print('W_dash_b2 = {}'.format(np.sum(W_dash_b2_d_t)))
print('W_dash_ba1 = {}'.format(np.sum(W_dash_ba1_d_t)))
print('L_dash_k = {}'.format(np.sum(L_dash_k_d_t)))
print('L_dash_s = {}'.format(np.sum(L_dash_s_d_t)))
print('L_dash_w = {}'.format(np.sum(L_dash_w_d_t)))
print('L_dash_b1 = {}'.format(np.sum(L_dash_b1_d_t)))
print('L_dash_b2 = {}'.format(np.sum(L_dash_b2_d_t)))
print('L_dash_ba1 = {}'.format(np.sum(L_dash_ba1_d_t)))
print('L_dash_ba2 = {}'.format(np.sum(L_dash_ba2_d_t)))
print('L_dashdash_k = {}'.format(np.sum(L_dashdash_k_d_t)))
print('L_dashdash_s = {}'.format(np.sum(L_dashdash_s_d_t)))
print('L_dashdash_w = {}'.format(np.sum(L_dashdash_w_d_t)))
print('L_dashdash_b1 = {}'.format(np.sum(L_dashdash_b1_d_t)))
print('L_dashdash_b2 = {}'.format(np.sum(L_dashdash_b2_d_t)))
print('L_dashdash_ba1 = {}'.format(np.sum(L_dashdash_ba1_d_t)))
print('L_dashdash_ba2 = {}'.format(np.sum(L_dashdash_ba2_d_t)))
return {
'L_dash_k_d_t': L_dash_k_d_t,
'L_dash_s_d_t': L_dash_s_d_t,
'L_dash_w_d_t': L_dash_w_d_t,
'L_dash_b1_d_t': L_dash_b1_d_t,
'L_dash_b2_d_t': L_dash_b2_d_t,
'L_dash_ba1_d_t': L_dash_ba1_d_t,
'L_dash_ba2_d_t': L_dash_ba2_d_t,
'L_dashdash_k_d_t': L_dashdash_k_d_t,
'L_dashdash_s_d_t': L_dashdash_s_d_t,
'L_dashdash_w_d_t': L_dashdash_w_d_t,
'L_dashdash_b1_d_t': L_dashdash_b1_d_t,
'L_dashdash_b2_d_t': L_dashdash_b2_d_t,
'L_dashdash_ba1_d_t': L_dashdash_ba1_d_t,
'L_dashdash_ba2_d_t': L_dashdash_ba2_d_t,
'W_dash_k_d_t': W_dash_k_d_t,
'W_dash_s_d_t': W_dash_s_d_t,
'W_dash_w_d_t': W_dash_w_d_t,
'W_dash_b1_d_t': W_dash_b1_d_t,
'W_dash_b2_d_t': W_dash_b2_d_t,
'W_dash_ba1_d_t': W_dash_ba1_d_t,
'theta_ex_d_Ave_d': theta_ex_d_Ave_d,
'Theta_ex_Nave_d': Theta_ex_Nave_d
}
def calc_E_E_W_d_t(n_p, L_HWH, heating_flag_d, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の消費電力量 (1)
Args:
n_p(float): 仮想居住人数 (人)
L_HWH(ndarray): 温水暖房用熱源機の熱負荷
heating_flag_d(ndarray): 暖房日
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
Returns:
ndarray: 1日当たりの給湯設備の消費電力量 (kWh/d)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
if HW['hw_type'] == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の消費電力量 (kWh/h)
E_E_hs_d_t = calc_E_E_hs_d_t(
hw_type=HW['hw_type'],
bath_function=bath_function,
hybrid_category=HW['hybrid_category'],
package_id=HW.get('package_id'),
hybrid_param=HW.get('hybrid_param'),
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave_d=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
CO2HP=HW['CO2HP'] if 'CO2HP' in HW else None
)
# 太陽利用設備の補機の消費電力量
E_E_aux_ss_d_t = calc_E_E_aux_ss_d_t(
SHC=SHC,
region=region,
sol_region=sol_region,
heating_flag_d=heating_flag_d
)
# 1時間当たりの給湯設備の消費電力量(1)
E_E_W_d_t = E_E_hs_d_t + E_E_aux_ss_d_t
return E_E_W_d_t
def calc_E_E_aux_ss_d_t(SHC, region=None, sol_region=None, heating_flag_d=None):
"""1時間当たりの補機の消費電力量 (kWh/h)
Args:
SHC(dict): 太陽熱利用設備の仕様
region(int, optional): 省エネルギー地域区分 (Default value = None)
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
Returns:
ndarray: 1時間当たりの補機の消費電力量 (kWh/h)
"""
if SHC is None:
return np.zeros(24 * 365)
elif SHC['type'] == '液体集熱式':
# 第九章「自然エネルギー利用設備」第二節「液体集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量 (kWh/h)
return lss.calc_E_E_lss_aux_d_t(
ls_type=SHC['ls_type'],
pmp_type='上記以外の機種',
P_alpha_sp=SHC['P_alpha_sp'],
P_beta_sp=SHC['P_beta_sp'],
region=region,
sol_region=sol_region
)
elif SHC['type'] == '空気集熱式':
# 第九章「自然エネルギー利用設備」第三節「空気集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量のうちの給湯設備への付加分 (kWh/h)
return ass.calc_E_E_W_aux_ass_d_t(
hotwater_use=SHC['hotwater_use'],
heating_flag_d=heating_flag_d,
region=region,
sol_region=sol_region,
P_alpha=SHC['P_alpha'],
P_beta=SHC['P_beta'],
A_col=SHC['A_col'],
V_fan_P0=SHC['V_fan_P0'],
m_fan_test=SHC['m_fan_test'],
d0=SHC['d0'],
d1=SHC['d1'],
fan_sso=SHC['fan_sso'],
fan_type=SHC['fan_type'],
pump_sso=SHC['pump_sso']
)
else:
raise ValueError(SHC['type'])
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
def calc_E_G_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備のガス消費量 (MJ/h) (2)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備のガス消費量 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯設備のガス消費量 (MJ/h)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1日当たりの給湯機のガス消費量
E_G_hs_d = calc_E_G_hs_d(
hw_type=HW['hw_type'],
hybrid_category=HW['hybrid_category'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
package_id=HW.get('package_id'),
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
Theta_ex_Ave=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
hybrid_param=HW.get('hybrid_param')
)
return E_G_hs_d
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
def calc_E_K_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Returns:
ndarray: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の灯油消費量 (MJ/h)
E_k_hs_d_t = calc_E_K_hs_d_t(
hw_type=HW['hw_type'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d']
)
return E_k_hs_d_t
# ============================================================================
# 5.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_W_d_t():
"""1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
"""
# 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6. 給湯機のエネルギー消費量
# ============================================================================
def calc_E_E_hs_d_t(hw_type, bath_function, package_id, hybrid_param, hybrid_category, e_rtd, e_dash_rtd, Theta_ex_Nave_d, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t,
W_dash_b1_d_t,
W_dash_b2_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t,
L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, CO2HP):
"""1時間当たりの給湯機の消費電力量 (kWh/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): 給湯機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
package_id(str): パッケージID
hybrid_param(dic): ハイブリッドパラメーター
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
Theta_ex_Nave_d(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/d)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/d)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/d)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/d)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
CO2HP(dict): CO2HPのパラメーター
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_E_hs_d_t(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_E_hs_d_t(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t, W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t, W_dash_ba1_d_t=W_dash_ba1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t, theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
e_rtd=e_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
theta_ex_Nave_d=Theta_ex_Nave_d,
CO2HP=CO2HP
)
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_E_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.calc_E_E_hs_d_t(
bath_function=bath_function,
package_id=package_id,
hybrid_param=hybrid_param,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_E_hs(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_E_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
else:
raise ValueError(hw_type)
def calc_E_G_hs_d(hw_type, hybrid_category, e_rtd, e_dash_rtd, bath_function, package_id, Theta_ex_Nave, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t, Theta_ex_Ave, L_dashdash_k_d_t,
L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, hybrid_param):
"""1日当たりの給湯機のガス消費量
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
Theta_ex_Nave(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
Theta_ex_Ave(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
package_id: param L_dashdash_k_d_t:
L_dashdash_s_d_t: param L_dashdash_w_d_t:
L_dashdash_b1_d_t: param L_dashdash_b2_d_t:
L_dashdash_ba1_d_t: param L_dashdash_ba2_d_t:
hybrid_param: returns: 1時間当たりの給湯機のガス消費量 (MJ/h)
L_dashdash_k_d_t:
L_dashdash_w_d_t:
L_dashdash_b2_d_t:
L_dashdash_ba2_d_t:
Returns:
ndarray: 1時間当たりの給湯機のガス消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_G_hs_d_t(
hw_type=hw_type,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.get_E_G_hs_d_t()
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_G_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_G_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_G_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.get_E_G_hs_d_t(
bath_function=bath_function,
package_id=package_id,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
hybrid_param=hybrid_param
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_G_hs(
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k=L_dashdash_k_d_t,
L_dashdash_s=L_dashdash_s_d_t,
L_dashdash_w=L_dashdash_w_d_t,
L_dashdash_b1=L_dashdash_b1_d_t,
L_dashdash_b2=L_dashdash_b2_d_t,
L_dashdash_ba1=L_dashdash_ba1_d_t,
L_dashdash_ba2=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_G_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def calc_E_K_hs_d_t(hw_type, e_rtd, e_dash_rtd, bath_function, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t,
L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の灯油消費量 (MJ/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の灯油消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.get_E_K_hs_d_t()
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_K_hs_d_t(
hw_type=hw_type,
bath_function=bath_function,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_K_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return gas_hybrid.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.get_E_K_hs_d_t()
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def get_normalized_bath_function(hw_type, bath_function):
"""表4 評価可能な給湯機/給湯温水暖房機の種類
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): ふろ機能の種類
Returns:
str: 評価可能な給湯機/給湯温水暖房機の種類
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ給湯機':
return bath_function
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == 'コージェネレーションを使用する':
return bath_function
else:
raise ValueError(hw_type)
# ============================================================================
# 7. 太陽熱補正給湯熱負荷
# ============================================================================
def calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h) (4a)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_k_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_k_d_t[f] = L_dash_k_d_t[f] - L_sun_d_t[f] * (L_dash_k_d_t[f] / L_dash_d_t[f])
return L_dashdash_k_d_t
def calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h) (4b)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_s_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_s_d_t[f] = L_dash_s_d_t[f] - L_sun_d_t[f] * (L_dash_s_d_t[f] / L_dash_d_t[f])
return L_dashdash_s_d_t
def calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h) (4c)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_w_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_w_d_t[f] = L_dash_w_d_t[f] - L_sun_d_t[f] * (L_dash_w_d_t[f] / L_dash_d_t[f])
return L_dashdash_w_d_t
def calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h) (4d)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_b1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b1_d_t[f] = L_dash_b1_d_t[f] - L_sun_d_t[f] * (L_dash_b1_d_t[f] / L_dash_d_t[f])
return L_dashdash_b1_d_t
def calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h) (4e)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_b2_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b2_d_t[f] = L_dash_b2_d_t[f] - L_sun_d_t[f] * (L_dash_b2_d_t[f] / L_dash_d_t[f])
return L_dashdash_b2_d_t
def calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h) (4f)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/hd)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_ba1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_ba1_d_t[f] = L_dash_ba1_d_t[f] - L_sun_d_t[f] * (L_dash_ba1_d_t[f] / L_dash_d_t[f])
return L_dashdash_ba1_d_t
def get_L_dashdash_ba2_d_t(L_dash_ba2_d_t):
"""1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) (4g)
Args:
L_dash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯負荷 (MJ/h)
Returns:
1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
"""
return L_dash_ba2_d_t
def calc_L_sun_d_t(region, sol_region=None, solar_device=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None,
W_tnk_ss=None, hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None,
V_fan_P0=None, d0=None,
d1=None, m_fan_test=None, W_tnk_ass=None, Theta_wtr_d=None, L_dash_k_d_t=None, L_dash_s_d_t=None,
L_dash_w_d_t=None, L_dash_b1_d_t=None, L_dash_b2_d_t=None, L_dash_ba1_d_t=None):
"""太陽熱利用給湯設備による補正集熱量
Args:
region(int): 省エネルギー地域区分
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
solar_device(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Theta_wtr_d(ndarray, optional): 日平均給水温度 (℃) (Default value = None)
L_dash_k_d_t(ndarrayL, optional): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_s_d_t(ndarray, optional): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_w_d_t(ndarray, optional): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b1_d_t(ndarray, optional): 1時間当たりの浴槽水栓湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b2_d_t(ndarray, optional): 1時間当たりの浴槽自動湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_ba1_d_t(ndarray, optional): 1時間当たりの浴槽水栓さし湯における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
hotwater_use: Default value = None)
heating_flag_d: Default value = None)
A_col: Default value = None)
P_alpha: Default value = None)
P_beta: Default value = None)
V_fan_P0: Default value = None)
d0: Default value = None)
d1: Default value = None)
m_fan_test: Default value = None)
Returns:
ndarray: 1時間当たりの太陽熱利用設備による補正集熱量 (MJ/h)
"""
if solar_device == '液体集熱式':
return lss.calc_L_sun_lss_d_t(
region=region,
sol_region=sol_region,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
elif solar_device == '空気集熱式':
if hotwater_use == True:
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
Theta_col_nonopg_d_t, Theta_col_opg_d_t = ass.calc_Theta_col(A_col, P_alpha, P_beta, V_fan_P0, d0, d1,
m_fan_test, region, sol_region, Theta_ex_d_t)
t_fan_d_t = ass.get_t_fan_d_t(Theta_col_nonopg_d_t, Theta_col_opg_d_t)
t_cp_d_t = ass.get_t_cp_d_t(hotwater_use, t_fan_d_t, heating_flag_d)
V_fan_d_t = ass.get_V_fan_d_t(t_fan_d_t, V_fan_P0)
Q_col_d_t = ass.get_Q_col_d_t(V_fan_d_t, Theta_col_opg_d_t, Theta_ex_d_t)
Q_d = ass.calc_Q_d(Q_col_d_t, t_cp_d_t)
L_tnk_d = ass.calc_L_tnk_d(Q_d, W_tnk_ass, Theta_wtr_d)
return ass.calc_L_sun_ass_d_t(L_tnk_d, L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t,
L_dash_b2_d_t, L_dash_ba1_d_t)
else:
return np.zeros(24 * 365)
elif solar_device is None:
return np.zeros(24 * 365)
else:
raise ValueError(solar_device)
# ============================================================================
# 8. 節湯補正給湯熱負荷
# ============================================================================
def get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d):
"""台所水栓における節湯補正給湯負荷 (MJ/h) (5a)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
Theta_sw_k(int): 台所水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 台所水栓における節湯補正給湯負荷 (MJ/h)
"""
return W_dash_k_d_t * (Theta_sw_k - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d):
"""浴室シャワー水栓における節湯補正給湯負荷 (5b)
Args:
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h)
Theta_sw_s(int): 浴室シャワーにおける基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 浴室シャワーにおける節湯補正給湯負荷 (MJ/h)
"""
return W_dash_s_d_t * (Theta_sw_s - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d):
"""洗面水栓における節湯補正給湯負荷 (5c)
Args:
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
Theta_sw_w(int): 洗面水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 洗面水栓における節湯補正給湯負荷 (MJ/d)
"""
return W_dash_w_d_t * (Theta_sw_w - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bash_function):
"""浴槽水栓湯はり時における節水補正給湯熱負荷 L_dash_b1_d, L_dash_b2_d
Args:
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/d)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
has_bath(bool): 浴室用の有無
bash_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽水栓湯はり時・浴槽自動湯はり時における節水補正給湯熱負荷 (MJ/d)
"""
if has_bath == False:
L_dash_b1_d_t = np.zeros(24 * 365) # (5-1d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-1e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == '給湯単機能':
Theta_sw_b1 = get_Theta_sw_b1()
L_dash_b1_d_t = W_dash_b1_d_t * (Theta_sw_b1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-2e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == 'ふろ給湯機(追焚あり)' or bash_function == 'ふろ給湯機(追焚なし)':
Theta_sw_b2 = get_Theta_sw_b2()
L_dash_b1_d_t = np.zeros(24 * 365) # (5-3d)
L_dash_b2_d_t = W_dash_b2_d_t * (Theta_sw_b2 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-3e)
return L_dash_b1_d_t, L_dash_b2_d_t
else:
raise ValueError(bash_function)
def get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bash_function):
"""浴槽水栓さし湯時における節水補正給湯熱負荷 L_dash_ba1_d, L_dash_ba2_d
Args:
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h)
has_bath(bool): 浴室等の有無
bash_function(str): ふろ機能の種類 (給湯単機能,ふろ給湯機(追焚なし),ふろ給湯機(追焚あり))
Returns:
ndarray: 浴槽水栓さし湯時/浴槽追焚時における節水補正給湯熱負荷 (MJ/h)
"""
if has_bath == False:
L_dash_ba1_d_t = np.zeros(24 * 365) # (5-1f)
L_dash_ba2_d_t = np.zeros(24 * 365) # (5-1g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
elif bash_function == '給湯単機能' or bash_function == 'ふろ給湯機(追焚なし)':
Theta_sw_ba1 = get_Theta_sw_ba1()
L_dash_ba1_d_t = W_dash_ba1_d_t * (Theta_sw_ba1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2f)
L_dash_ba2_d_t = np.zeros(24 * 365) # (5-2g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
elif bash_function == 'ふろ給湯機(追焚あり)':
L_dash_ba1_d_t = np.zeros(24 * 365) # (5-3f)
L_dash_ba2_d_t = L_ba_d_t * 1.25 # (5-3g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
else:
raise ValueError(bash_function)
def get_Theta_sw_k():
"""台所水栓の基準給湯温度
Args:
Returns:
int: 台所水栓の基準給湯温度
"""
return get_table_5()[0]
def get_Theta_sw_s():
"""浴室シャワー水栓の基準給湯温度
Args:
Returns:
int: 浴室シャワー水栓の基準給湯温度
"""
return get_table_5()[1]
def get_Theta_sw_w():
"""洗面水栓の基準給湯温度
Args:
Returns:
int: 洗面水栓の基準給湯温度
"""
return get_table_5()[2]
def get_Theta_sw_b1():
"""浴槽水栓湯はりの基準給湯温度
Args:
Returns:
int: 浴槽水栓湯はりの基準給湯温度
"""
return get_table_5()[3]
def get_Theta_sw_b2():
"""浴槽自動湯はりの基準給湯温度
Args:
Returns:
int: 浴槽自動湯はりの基準給湯温度
"""
return get_table_5()[4]
def get_Theta_sw_ba1():
"""浴槽水栓さし湯の基準給湯温度
Args:
Returns:
int: 浴槽水栓さし湯の基準給湯温度
"""
return get_table_5()[5]
def get_table_5():
"""表 5 用途ごとの基準給湯温度
Args:
Returns:
list: 用途ごとの基準給湯温度
"""
table_5 = [
40,
40,
40,
40,
40,
60
]
return table_5
# ============================================================================
# 9. 節湯補正給湯量
# ============================================================================
def calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d):
"""1時間当たりの台所水栓における節湯補正給湯量 [L/h] (6a)
Args:
W_k_d_t(ndarray): 1時間当たりの台所水栓における基準給湯量 (L/h)
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
"""
# 台所水栓における節湯の効果係数
f_sk = watersaving.get_f_sk(kitchen_watersaving_A, kitchen_watersaving_C, Theta_wtr_d)
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_k_d_t * np.repeat(f_sk, 24) * f_sp
def calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter):
"""1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h) (6a)
Args:
W_s_d_t(ndarray): 浴室シャワーにおける基準給湯量 (L/h)
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Returns:
ndarray: 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h)
"""
# 浴室シャワー水栓のける節湯の効果係数
f_ss = watersaving.get_f_ss(shower_watersaving_A, shower_watersaving_B)
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_s_d_t * f_ss * f_sp
def calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d):
"""1時間当たりの台所水栓における節湯補正給湯量 (L/h) (6c)
Args:
W_w_d_t(ndarray): 台所水栓における基準給湯量 (L/h)
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
# 洗面水栓における節湯の効果係数
f_sw = watersaving.get_f_sw(washbowl_watersaving_C, Theta_wtr_d)
return W_w_d_t * np.repeat(f_sw, 24) * f_sp
def calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter):
"""1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) (6d)
Args:
W_b1_d_t(ndarray): 浴槽水栓湯はり時における基準給湯量 (L/h)
pipe_diameter(str): ヘッダー分岐後の径
Returns:
ndarray: 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
# 浴槽における節湯の効果係数
f_sb = watersaving.get_f_sb()
return W_b1_d_t * f_sp * f_sb
def calc_W_dash_b2_d_t(W_b2_d_t):
"""1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h) (6e)
Args:
W_b2_d_t(ndarray): 浴槽自動湯はり時における基準給湯量 (L/h)
Returns:
ndarray: 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h)
"""
# 浴槽における節湯の効果係数
f_sb = watersaving.get_f_sb()
return W_b2_d_t * f_sb
def calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter):
"""1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h) (6f)
Args:
W_ba1_d_t(ndarray): 1時間当たりの浴室水栓さし湯時における基準給湯量 (L/h)
pipe_diameter(str): ヘッダー分岐後の径
Returns:
1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_ba1_d_t * f_sp
# ============================================================================
# 10. 基準給湯量
# ============================================================================
def calc_W_k_d_t(n_p, schedule_hw):
"""1時間当たりの台所水栓における基準給湯量 (L/h) (7a)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの台所水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_k_p_d_t(n_p, schedule_hw)
elif 1 <= n_p and n_p <= 2:
W_k_1_d_t = calc_W_k_p_d_t(1, schedule_hw)
W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw)
return W_k_1_d_t * (2 - n_p) / (2 - 1) + W_k_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw)
W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw)
return W_k_2_d_t * (3 - n_p) / (3 - 2) + W_k_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw)
W_k_4_d_t = calc_W_k_p_d_t(4, schedule_hw)
return W_k_3_d_t * (4 - n_p) / (4 - 3) + W_k_4_d_t * (n_p - 3) / (4 - 3)
def calc_W_s_d_t(n_p, schedule_hw, has_bath):
"""1時間当たりの浴室シャワー水栓における基準給湯量 (7b)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間当たりの浴室シャワー水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_s_p_d_t(n_p, schedule_hw, has_bath)
elif 1 <= n_p and n_p <= 2:
W_s_1_d_t = calc_W_s_p_d_t(1, schedule_hw, has_bath)
W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath)
return W_s_1_d_t * (2 - n_p) / (2 - 1) + W_s_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath)
W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath)
return W_s_2_d_t * (3 - n_p) / (3 - 2) + W_s_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath)
W_s_4_d_t = calc_W_s_p_d_t(4, schedule_hw, has_bath)
return W_s_3_d_t * (4 - n_p) / (4 - 3) + W_s_4_d_t * (n_p - 3) / (4 - 3)
def calc_W_w_d_t(n_p, schedule_hw):
"""1時間当たりの洗面水栓における基準給湯量 (7c)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの洗面水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_w_p_d_t(n_p, schedule_hw)
elif 1 <= n_p and n_p <= 2:
W_w_1_d_t = calc_W_w_p_d_t(1, schedule_hw)
W_w_2_d_t = calc_W_w_p_d_t(2, schedule_hw)
return W_w_1_d_t * (2 - n_p) / (2 - 1) + W_w_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_w_2_d_t = calc_W_w_p_d_t(2, schedule_hw)
W_w_3_d_t = calc_W_w_p_d_t(3, schedule_hw)
return W_w_2_d_t * (3 - n_p) / (3 - 2) + W_w_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_w_3_d_t = calc_W_w_p_d_t(3, schedule_hw)
W_w_4_d_t = calc_W_w_p_d_t(4, schedule_hw)
return W_w_3_d_t * (4 - n_p) / (4 - 3) + W_w_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def get_schedule_pattern_list():
"""生活スケジュールパターン
Args:
Returns:
list: 生活スケジュールパターン
"""
ptn_list = [
'休日在宅(大)',
'休日在宅(小)',
'平日(大)',
'平日(中)',
'平日(小)',
'休日外出'
]
return ptn_list
def calc_W_k_p_d_t(p, schedule_hw):
"""1時間当たりの居住人数がp人における台所水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの居住人数がp人における台所水栓における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_k_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_k_p_d_t[f] = np.tile(table[i][:, 0], 365)[f]
return W_k_p_d_t
def calc_W_s_p_d_t(p, schedule_hw, has_bath):
"""1時間当たりの居住人数がp人における浴室シャワー水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath: returns: 1時間当たりの居住人数がp人における洗面シャワー水栓における基準給湯量 (L/h)
Returns:
ndarray: 1時間当たりの居住人数がp人における洗面シャワー水栓における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_s_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 表6で読み取るべき列インデックス
j = 1 if has_bath else 2
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_s_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return W_s_p_d_t
def calc_W_w_p_d_t(p, schedule_hw):
"""1時間あたりの居住人数がp人における洗面水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1日当たりの居住人数がp人における洗面水栓における基準給湯量 (L/d)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_w_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_w_p_d_t[f] = np.tile(table[i][:, 3], 365)[f]
return W_w_p_d_t
def calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function):
"""浴槽水栓湯はり時における給湯基準量 (L/h)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽水栓湯はり時における給湯基準量 (L/h)
"""
if bath_function == '給湯単機能':
return calc_W_b_d_t(n_p, schedule_hw, has_bath)
elif bath_function == 'ふろ給湯機(追焚なし)' or bath_function == 'ふろ給湯機(追焚あり)':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
def calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function):
"""浴槽自動湯はり時における給湯基準量 (L/h)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽自動湯はり時における給湯基準量 (L/d)
"""
if bath_function == 'ふろ給湯機(追焚なし)' or bath_function == 'ふろ給湯機(追焚あり)':
return calc_W_b_d_t(n_p, schedule_hw, has_bath)
elif bath_function == '給湯単機能':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
def calc_W_b_d_t(n_p, schedule_hw, has_bath):
"""1時間当たりの浴槽湯はり時における基準給湯量 (L/h) (8)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの浴槽湯はり時における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_b_p_d_t(n_p, schedule_hw, has_bath)
if 1 <= n_p and n_p <= 2:
W_b_1_d_t = calc_W_b_p_d_t(1, schedule_hw, has_bath)
W_b_2_d_t = calc_W_b_p_d_t(2, schedule_hw, has_bath)
return W_b_1_d_t * (2 - n_p) / (2 - 1) + W_b_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_b_2_d_t = calc_W_b_p_d_t(2, schedule_hw, has_bath)
W_b_3_d_t = calc_W_b_p_d_t(3, schedule_hw, has_bath)
return W_b_2_d_t * (3 - n_p) / (3 - 2) + W_b_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_b_3_d_t = calc_W_b_p_d_t(3, schedule_hw, has_bath)
W_b_4_d_t = calc_W_b_p_d_t(4, schedule_hw, has_bath)
return W_b_3_d_t * (4 - n_p) / (4 - 3) + W_b_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def calc_W_b_p_d_t(p, schedule_hw, has_bath):
"""1時間あたりの居住人数がp人における浴槽湯はり時における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの居住人数がp人における浴槽湯はり時における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_b_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 読み取るべき表の列インデックス
j = 4 if has_bath else 5
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_b_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return W_b_p_d_t
def calc_n_b_p_d_t(p, schedule_hw, has_bath):
"""1時間あたりの居住人数がp人における入浴人数(人/h)
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの居住人数がp人における入浴人数(人/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
n_b_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 読み取るべき表の列インデックス
j = 6 if has_bath else 7
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
n_b_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return n_b_p_d_t
def calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d):
"""浴槽水栓さし湯時における基準給湯量 (L/h) (9)
Args:
bath_function(str): ふろ機能の種類
L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 浴槽水栓さし湯時における基準給湯量 (L/h)
"""
if bath_function == '給湯単機能' or bath_function == 'ふろ給湯機(追焚なし)':
# 浴槽水栓さし湯時における基準給湯温度
Theta_sw_ba1 = get_Theta_sw_ba1()
return L_ba_d_t * (1.0 / (Theta_sw_ba1 - np.repeat(Theta_wtr_d, 24))) * (1.0 / 4.186) * 10 ** 3
elif bath_function == 'ふろ給湯機(追焚あり)':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
# ============================================================================
# 11. 浴槽沸かし直しによる給湯熱負荷
# ============================================================================
def calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p):
"""浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
Args:
bath_insulation(bool): 浴槽の断熱の有無
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
n_p(float): 仮想居住人数
Returns:
ndarray: 浴槽沸かし直しによる給湯熱負荷 (MJ/d)
"""
if 1 <= n_p and n_p <= 2:
n_b_1_d_t = calc_n_b_p_d_t(1, schedule_hw, has_bath)
n_b_2_d_t = calc_n_b_p_d_t(2, schedule_hw, has_bath)
L_ba_1_d_ = calc_L_ba_p_d_t(1, bath_insulation, n_b_1_d_t, theta_ex_d_Ave_d)
L_ba_2_d_t = calc_L_ba_p_d_t(2, bath_insulation, n_b_2_d_t, theta_ex_d_Ave_d)
return L_ba_1_d_ * (2 - n_p) / (2 - 1) + L_ba_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
n_b_2_d_t = calc_n_b_p_d_t(2, schedule_hw, has_bath)
n_b_3_d_t = calc_n_b_p_d_t(3, schedule_hw, has_bath)
L_ba_2_d_t = calc_L_ba_p_d_t(2, bath_insulation, n_b_2_d_t, theta_ex_d_Ave_d)
L_ba_3_d_t = calc_L_ba_p_d_t(3, bath_insulation, n_b_3_d_t, theta_ex_d_Ave_d)
return L_ba_2_d_t * (3 - n_p) / (3 - 2) + L_ba_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
n_b_3_d_t = calc_n_b_p_d_t(3, schedule_hw, has_bath)
n_b_4_d_t = calc_n_b_p_d_t(4, schedule_hw, has_bath)
L_ba_3_d_t = calc_L_ba_p_d_t(3, bath_insulation, n_b_3_d_t, theta_ex_d_Ave_d)
L_ba_4_d_t = calc_L_ba_p_d_t(4, bath_insulation, n_b_4_d_t, theta_ex_d_Ave_d)
return L_ba_3_d_t * (4 - n_p) / (4 - 3) + L_ba_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def calc_L_ba_p_d_t(p, bath_insulation, n_b_p_d_t, theta_ex_d_Ave_d):
"""居住人数がp人における浴槽沸かし直しにおける給湯熱負荷 (11)
Args:
p(float): 居住人数 (人)
bath_insulation(bool): 浴槽の断熱の有無
n_b_p_d_t(ndarray): 居住人数p人における入浴人数(人/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 居住人数がp人における浴槽沸かし直しにおける給湯熱負荷 (MJ/d)
"""
# 係数a_ba, b_ba
a_ba_p_d, b_ba_p_d = get_coeff_eq11(bath_insulation, p, theta_ex_d_Ave_d)
# 24時間化
a_ba_p_d = np.repeat(a_ba_p_d, 24)
b_ba_p_d = np.repeat(b_ba_p_d, 24)
theta_ex_d_Ave_d = np.repeat(theta_ex_d_Ave_d, 24)
# 浴槽沸かし直しによよる給湯熱負荷 (MJ/h) (11)
# L_ba_p_d_t の作業領域確保
L_ba_p_d_t = np.zeros(24 * 365)
# 1日あたりののべ入浴人数
n_b_p_d = np.repeat(np.sum(n_b_p_d_t.reshape(365, 24), axis=1), 24)
# W_b_p_d > = 0 の場合
f1 = (n_b_p_d > 0)
L_ba_p_d_t[f1] = (a_ba_p_d[f1] * theta_ex_d_Ave_d[f1] + b_ba_p_d[f1]) * (n_b_p_d_t[f1] / n_b_p_d[f1])
# W_b_p_d = 0 の場合
f2 = (n_b_p_d == 0)
L_ba_p_d_t[f2] = 0
return L_ba_p_d_t
def get_coeff_eq11(bath_insulation, p, theta_ex_d_Ave_d):
"""係数a_ba, b_ba
Args:
bath_insulation(bool): 浴槽の断熱の有無
p(float): 居住人数 (人)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
tuple: 係数a_ba, b_ba
"""
if bath_insulation == False:
# 通常浴槽
y_off = 0
elif bath_insulation == True:
# 高断熱浴槽
y_off = 1
else:
raise ValueError(bath_insulation)
x_off = (4 - p) * 2
# 7度未満
tmp_a = ([get_table_6()[y_off][x_off + 0]] * 365) * (theta_ex_d_Ave_d < 7.0)
tmp_b = ([get_table_6()[y_off][x_off + 1]] * 365) * (theta_ex_d_Ave_d < 7.0)
# 7度以上かつ16度未満
tmp_a = tmp_a + ([get_table_6()[y_off + 2][x_off + 0]] * 365) * (7.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 16.0)
tmp_b = tmp_b + ([get_table_6()[y_off + 2][x_off + 1]] * 365) * (7.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 16.0)
# 16度以上かつ25度未満
tmp_a = tmp_a + ([get_table_6()[y_off + 4][x_off + 0]] * 365) * (16.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 25.0)
tmp_b = tmp_b + ([get_table_6()[y_off + 4][x_off + 1]] * 365) * (16.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 25.0)
# 25度以上
tmp_a = tmp_a + ([get_table_6()[y_off + 6][x_off + 0]] * 365) * (25.0 <= theta_ex_d_Ave_d)
tmp_b = tmp_b + ([get_table_6()[y_off + 6][x_off + 1]] * 365) * (25.0 <= theta_ex_d_Ave_d)
return tmp_a, tmp_b
def get_table_6():
"""表6 係数 a_ba, b_ba
Args:
Returns:
list: 係数 a_ba, b_ba
"""
table_6 = [
(-0.12, 6.00, -0.10, 4.91, -0.06, 3.02, 0.00, 0.00),
(-0.07, 3.98, -0.06, 3.22, -0.04, 2.01, 0.00, 0.00),
(-0.13, 6.04, -0.10, 4.93, -0.06, 3.04, 0.00, 0.00),
(-0.08, 4.02, -0.06, 3.25, -0.04, 2.03, 0.00, 0.00),
(-0.14, 6.21, -0.11, 5.07, -0.07, 3.13, 0.00, 0.00),
(-0.09, 4.19, -0.07, 3.39, -0.04, 2.12, 0.00, 0.00),
(-0.12, 5.81, -0.10, 4.77, -0.06, 2.92, 0.00, 0.00),
(-0.07, 3.80, -0.06, 3.09, -0.04, 1.92, 0.00, 0.00)
]
return table_6
# ============================================================================
# 12. 日平均給水温度
# ============================================================================
def get_Theta_wtr_d(region, Theta_ex_prd_Ave_d):
"""日平均給水温度 (℃) (12)
Args:
region(int): 省エネルギー地域区分
Theta_ex_prd_Ave_d(ndarray): 期間平均外気温度 (℃)
Returns:
ndarray: 日平均給水温度 (℃)
"""
# 日平均給水温度を求める際の会期係数
a_wtr, b_wtr = get_table_7()[region - 1]
# 日平均給水温度 (12)
Theta_wtr_d = np.clip(a_wtr * Theta_ex_prd_Ave_d + b_wtr, 0.5, None)
return Theta_wtr_d
def get_table_7():
"""表 7 日平均給水温度を求める際の回帰係数の値
Args:
Returns:
list: 日平均給水温度を求める際の回帰係数の値
"""
table_7 = [
(0.6639, 3.466),
(0.6639, 3.466),
(0.6054, 4.515),
(0.6054, 4.515),
(0.8660, 1.665),
(0.8516, 2.473),
(0.9223, 2.097),
(0.6921, 7.167)
]
return table_7
def get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d):
"""期間平均外気温度 (℃) (13)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 期間平均外気温度 (℃)
"""
# 10日前までを拡張した配列を作る(最終日は削る=>-1)
tmp = np.zeros(365 + 10 - 1)
tmp[0:10] = theta_ex_d_Ave_d[-10:]
tmp[10:] = theta_ex_d_Ave_d[0:364]
# 畳み込み演算
# 10日分のデータにそれぞれ0.1を掛けて加算する→平均が求まる
Theta_ex_prd_Ave_d = np.convolve(tmp, [0.1] * 10, mode='valid')
return Theta_ex_prd_Ave_d
# ============================================================================
# 13. 日平均外気温度
# ============================================================================
def get_theta_ex_d_Ave_d(Theta_ex_d_t):
"""日平均外気温度 (℃) (14)
Args:
Theta_ex_d_t(ndarray): 外気温度 (℃)
Returns:
ndarray: 日平均外気温度 (℃)
"""
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = Theta_ex_d_t.reshape(365, 24)
# 二次元目を加算することで二次元目を消滅させる
tmp = np.sum(tmp, axis=1)
# 24で割ることで平均化する
theta_ex_d_Ave_d = tmp / 24
return theta_ex_d_Ave_d
# ============================================================================
# 14. 夜間平均外気温度
# ============================================================================
def get_Theta_ex_Nave_d(Theta_ex_d_t):
"""夜間平均外気温度 (℃) (15)
Args:
Theta_ex_d_t(ndarray): 外気温度 (℃)
Returns:
ndarray: 夜間平均外気温度 (℃)
"""
# 1時間後ろに配列をずらす(そして、12月31日23時を1月1日0時に移動させる)
tmp = np.roll(Theta_ex_d_t, 1)
# ** 1時間ずらしたので、前日23時から当日7時までの代わりに、当日0時から8時までの平均を計算すればよい **
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = tmp.reshape(365, 24)
# 8時~23時を0にする
tmp[:, 8:] = 0
# 配列の2次元目を合算して2次元目を消す
tmp = np.sum(tmp, axis=1)
# 8で割ることで平均化する
Theta_ex_Nave_d = tmp / 8
return Theta_ex_Nave_d
# ============================================================================
# 15. 温水温度の熱負荷
# ============================================================================
def get_L_HWH_d(L_HWH_d_t):
"""1日当たりの温水温度の熱負荷 (MJ/d) (16)
Args:
L_HWH_d_t(ndarray): 1時間当たりの温水暖房の熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの温水暖房の熱負荷 (MJ/d)
"""
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = L_HWH_d_t.reshape(365, 24)
# 二次元目を加算することで二次元目を消滅させる
L_HWH_d = np.sum(tmp, axis=1)
return L_HWH_d
| [
"numpy.clip",
"pyhees.section7_1_d.calc_E_E_hs_d_t",
"numpy.convolve",
"pyhees.section7_1_g_3.calc_E_E_hs_d_t",
"pyhees.section7_1_i.get_E_K_hs_d_t",
"pyhees.section7_1_d.get_E_G_hs_d_t",
"pyhees.section9_3.calc_E_E_W_aux_ass_d_t",
"pyhees.section7_1_j.get_f_sb",
"pyhees.section7_1_c.get_E_K_hs_d_t"... | [((1290, 1301), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (1299, 1301), False, 'from functools import lru_cache\n'), ((3745, 3760), 'pyhees.section11_3.load_schedule', 'load_schedule', ([], {}), '()\n', (3758, 3760), False, 'from pyhees.section11_3 import load_schedule, get_schedule_hw\n'), ((3779, 3804), 'pyhees.section11_3.get_schedule_hw', 'get_schedule_hw', (['schedule'], {}), '(schedule)\n', (3794, 3804), False, 'from pyhees.section11_3 import load_schedule, get_schedule_hw\n'), ((3831, 3845), 'pyhees.section11_1.load_outdoor', 'load_outdoor', ([], {}), '()\n', (3843, 3845), False, 'from pyhees.section11_1 import load_outdoor, get_Theta_ex\n'), ((3865, 3894), 'pyhees.section11_1.get_Theta_ex', 'get_Theta_ex', (['region', 'outdoor'], {}), '(region, outdoor)\n', (3877, 3894), False, 'from pyhees.section11_1 import load_outdoor, get_Theta_ex\n'), ((23528, 23546), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (23536, 23546), True, 'import numpy as np\n'), ((40673, 40691), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (40681, 40691), True, 'import numpy as np\n'), ((41699, 41717), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (41707, 41717), True, 'import numpy as np\n'), ((42717, 42735), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (42725, 42735), True, 'import numpy as np\n'), ((43746, 43764), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (43754, 43764), True, 'import numpy as np\n'), ((44778, 44796), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (44786, 44796), True, 'import numpy as np\n'), ((45813, 45831), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (45821, 45831), True, 'import numpy as np\n'), ((55890, 55969), 'pyhees.section7_1_j.get_f_sk', 'watersaving.get_f_sk', (['kitchen_watersaving_A', 'kitchen_watersaving_C', 'Theta_wtr_d'], {}), '(kitchen_watersaving_A, kitchen_watersaving_C, Theta_wtr_d)\n', (55910, 55969), True, 'import pyhees.section7_1_j as watersaving\n'), ((56002, 56037), 'pyhees.section7_1_j.get_f_sp', 'watersaving.get_f_sp', (['pipe_diameter'], {}), '(pipe_diameter)\n', (56022, 56037), True, 'import pyhees.section7_1_j as watersaving\n'), ((56528, 56592), 'pyhees.section7_1_j.get_f_ss', 'watersaving.get_f_ss', (['shower_watersaving_A', 'shower_watersaving_B'], {}), '(shower_watersaving_A, shower_watersaving_B)\n', (56548, 56592), True, 'import pyhees.section7_1_j as watersaving\n'), ((56625, 56660), 'pyhees.section7_1_j.get_f_sp', 'watersaving.get_f_sp', (['pipe_diameter'], {}), '(pipe_diameter)\n', (56645, 56660), True, 'import pyhees.section7_1_j as watersaving\n'), ((57103, 57138), 'pyhees.section7_1_j.get_f_sp', 'watersaving.get_f_sp', (['pipe_diameter'], {}), '(pipe_diameter)\n', (57123, 57138), True, 'import pyhees.section7_1_j as watersaving\n'), ((57173, 57230), 'pyhees.section7_1_j.get_f_sw', 'watersaving.get_f_sw', (['washbowl_watersaving_C', 'Theta_wtr_d'], {}), '(washbowl_watersaving_C, Theta_wtr_d)\n', (57193, 57230), True, 'import pyhees.section7_1_j as watersaving\n'), ((57574, 57609), 'pyhees.section7_1_j.get_f_sp', 'watersaving.get_f_sp', (['pipe_diameter'], {}), '(pipe_diameter)\n', (57594, 57609), True, 'import pyhees.section7_1_j as watersaving\n'), ((57642, 57664), 'pyhees.section7_1_j.get_f_sb', 'watersaving.get_f_sb', ([], {}), '()\n', (57662, 57664), True, 'import pyhees.section7_1_j as watersaving\n'), ((57943, 57965), 'pyhees.section7_1_j.get_f_sb', 'watersaving.get_f_sb', ([], {}), '()\n', (57963, 57965), True, 'import pyhees.section7_1_j as watersaving\n'), ((58289, 58324), 'pyhees.section7_1_j.get_f_sp', 'watersaving.get_f_sp', (['pipe_diameter'], {}), '(pipe_diameter)\n', (58309, 58324), True, 'import pyhees.section7_1_j as watersaving\n'), ((62006, 62035), 'pyhees.section7_1_m.get_table_m_for_p', 'schedule.get_table_m_for_p', (['p'], {}), '(p)\n', (62032, 62035), True, 'import pyhees.section7_1_m as schedule\n'), ((62063, 62081), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (62071, 62081), True, 'import numpy as np\n'), ((62664, 62693), 'pyhees.section7_1_m.get_table_m_for_p', 'schedule.get_table_m_for_p', (['p'], {}), '(p)\n', (62690, 62693), True, 'import pyhees.section7_1_m as schedule\n'), ((62721, 62739), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (62729, 62739), True, 'import numpy as np\n'), ((63289, 63318), 'pyhees.section7_1_m.get_table_m_for_p', 'schedule.get_table_m_for_p', (['p'], {}), '(p)\n', (63315, 63318), True, 'import pyhees.section7_1_m as schedule\n'), ((63346, 63364), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (63354, 63364), True, 'import numpy as np\n'), ((66087, 66116), 'pyhees.section7_1_m.get_table_m_for_p', 'schedule.get_table_m_for_p', (['p'], {}), '(p)\n', (66113, 66116), True, 'import pyhees.section7_1_m as schedule\n'), ((66144, 66162), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (66152, 66162), True, 'import numpy as np\n'), ((66737, 66766), 'pyhees.section7_1_m.get_table_m_for_p', 'schedule.get_table_m_for_p', (['p'], {}), '(p)\n', (66763, 66766), True, 'import pyhees.section7_1_m as schedule\n'), ((66794, 66812), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (66802, 66812), True, 'import numpy as np\n'), ((70055, 70078), 'numpy.repeat', 'np.repeat', (['a_ba_p_d', '(24)'], {}), '(a_ba_p_d, 24)\n', (70064, 70078), True, 'import numpy as np\n'), ((70094, 70117), 'numpy.repeat', 'np.repeat', (['b_ba_p_d', '(24)'], {}), '(b_ba_p_d, 24)\n', (70103, 70117), True, 'import numpy as np\n'), ((70141, 70172), 'numpy.repeat', 'np.repeat', (['theta_ex_d_Ave_d', '(24)'], {}), '(theta_ex_d_Ave_d, 24)\n', (70150, 70172), True, 'import numpy as np\n'), ((70252, 70270), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (70260, 70270), True, 'import numpy as np\n'), ((73130, 73184), 'numpy.clip', 'np.clip', (['(a_wtr * Theta_ex_prd_Ave_d + b_wtr)', '(0.5)', 'None'], {}), '(a_wtr * Theta_ex_prd_Ave_d + b_wtr, 0.5, None)\n', (73137, 73184), True, 'import numpy as np\n'), ((73790, 73812), 'numpy.zeros', 'np.zeros', (['(365 + 10 - 1)'], {}), '(365 + 10 - 1)\n', (73798, 73812), True, 'import numpy as np\n'), ((73968, 74010), 'numpy.convolve', 'np.convolve', (['tmp', '([0.1] * 10)'], {'mode': '"""valid"""'}), "(tmp, [0.1] * 10, mode='valid')\n", (73979, 74010), True, 'import numpy as np\n'), ((74497, 74516), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (74503, 74516), True, 'import numpy as np\n'), ((74996, 75020), 'numpy.roll', 'np.roll', (['Theta_ex_d_t', '(1)'], {}), '(Theta_ex_d_t, 1)\n', (75003, 75020), True, 'import numpy as np\n'), ((75229, 75248), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (75235, 75248), True, 'import numpy as np\n'), ((75803, 75822), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (75809, 75822), True, 'import numpy as np\n'), ((11321, 11339), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (11329, 11339), True, 'import numpy as np\n'), ((11399, 11417), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (11407, 11417), True, 'import numpy as np\n'), ((15132, 15150), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (15140, 15150), True, 'import numpy as np\n'), ((16995, 17013), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (17003, 17013), True, 'import numpy as np\n'), ((20697, 20715), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (20705, 20715), True, 'import numpy as np\n'), ((25792, 26069), 'pyhees.section7_1_c.calc_E_E_hs_d_t', 'gas.calc_E_E_hs_d_t', ([], {'W_dash_k_d_t': 'W_dash_k_d_t', 'W_dash_s_d_t': 'W_dash_s_d_t', 'W_dash_w_d_t': 'W_dash_w_d_t', 'W_dash_b1_d_t': 'W_dash_b1_d_t', 'W_dash_b2_d_t': 'W_dash_b2_d_t', 'W_dash_ba1_d_t': 'W_dash_ba1_d_t', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t,\n W_dash_w_d_t=W_dash_w_d_t, W_dash_b1_d_t=W_dash_b1_d_t, W_dash_b2_d_t=\n W_dash_b2_d_t, W_dash_ba1_d_t=W_dash_ba1_d_t, theta_ex_d_Ave_d=\n theta_ex_d_Ave_d, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (25811, 26069), True, 'import pyhees.section7_1_c as gas\n'), ((32203, 32613), 'pyhees.section7_1_c.calc_E_G_hs_d_t', 'gas.calc_E_G_hs_d_t', ([], {'hw_type': 'hw_type', 'e_rtd': 'e_rtd', 'e_dash_rtd': 'e_dash_rtd', 'theta_ex_d_Ave_d': 'Theta_ex_Ave', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t', 'bath_function': 'bath_function'}), '(hw_type=hw_type, e_rtd=e_rtd, e_dash_rtd=e_dash_rtd,\n theta_ex_d_Ave_d=Theta_ex_Ave, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b1_d_t=L_dashdash_b1_d_t, L_dashdash_b2_d_t=\n L_dashdash_b2_d_t, L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,\n L_dashdash_ba2_d_t=L_dashdash_ba2_d_t, bath_function=bath_function)\n', (32222, 32613), True, 'import pyhees.section7_1_c as gas\n'), ((36727, 36747), 'pyhees.section7_1_c.get_E_K_hs_d_t', 'gas.get_E_K_hs_d_t', ([], {}), '()\n', (36745, 36747), True, 'import pyhees.section7_1_c as gas\n'), ((48611, 48975), 'pyhees.section9_2.calc_L_sun_lss_d_t', 'lss.calc_L_sun_lss_d_t', ([], {'region': 'region', 'sol_region': 'sol_region', 'ls_type': 'ls_type', 'A_sp': 'A_sp', 'P_alpha_sp': 'P_alpha_sp', 'P_beta_sp': 'P_beta_sp', 'W_tnk_ss': 'W_tnk_ss', 'Theta_wtr_d': 'Theta_wtr_d', 'L_dash_k_d_t': 'L_dash_k_d_t', 'L_dash_s_d_t': 'L_dash_s_d_t', 'L_dash_w_d_t': 'L_dash_w_d_t', 'L_dash_b1_d_t': 'L_dash_b1_d_t', 'L_dash_b2_d_t': 'L_dash_b2_d_t', 'L_dash_ba1_d_t': 'L_dash_ba1_d_t'}), '(region=region, sol_region=sol_region, ls_type=\n ls_type, A_sp=A_sp, P_alpha_sp=P_alpha_sp, P_beta_sp=P_beta_sp,\n W_tnk_ss=W_tnk_ss, Theta_wtr_d=Theta_wtr_d, L_dash_k_d_t=L_dash_k_d_t,\n L_dash_s_d_t=L_dash_s_d_t, L_dash_w_d_t=L_dash_w_d_t, L_dash_b1_d_t=\n L_dash_b1_d_t, L_dash_b2_d_t=L_dash_b2_d_t, L_dash_ba1_d_t=L_dash_ba1_d_t)\n', (48633, 48975), True, 'import pyhees.section9_2 as lss\n'), ((52194, 52212), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (52202, 52212), True, 'import numpy as np\n'), ((52247, 52265), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (52255, 52265), True, 'import numpy as np\n'), ((53518, 53536), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (53526, 53536), True, 'import numpy as np\n'), ((53572, 53590), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (53580, 53590), True, 'import numpy as np\n'), ((62213, 62246), 'numpy.repeat', 'np.repeat', (['(schedule_hw == ptn)', '(24)'], {}), '(schedule_hw == ptn, 24)\n', (62222, 62246), True, 'import numpy as np\n'), ((62924, 62957), 'numpy.repeat', 'np.repeat', (['(schedule_hw == ptn)', '(24)'], {}), '(schedule_hw == ptn, 24)\n', (62933, 62957), True, 'import numpy as np\n'), ((63496, 63529), 'numpy.repeat', 'np.repeat', (['(schedule_hw == ptn)', '(24)'], {}), '(schedule_hw == ptn, 24)\n', (63505, 63529), True, 'import numpy as np\n'), ((66346, 66379), 'numpy.repeat', 'np.repeat', (['(schedule_hw == ptn)', '(24)'], {}), '(schedule_hw == ptn, 24)\n', (66355, 66379), True, 'import numpy as np\n'), ((66996, 67029), 'numpy.repeat', 'np.repeat', (['(schedule_hw == ptn)', '(24)'], {}), '(schedule_hw == ptn, 24)\n', (67005, 67029), True, 'import numpy as np\n'), ((8350, 8366), 'numpy.sum', 'np.sum', (['L_ba_d_t'], {}), '(L_ba_d_t)\n', (8356, 8366), True, 'import numpy as np\n'), ((8398, 8413), 'numpy.sum', 'np.sum', (['W_k_d_t'], {}), '(W_k_d_t)\n', (8404, 8413), True, 'import numpy as np\n'), ((8444, 8459), 'numpy.sum', 'np.sum', (['W_s_d_t'], {}), '(W_s_d_t)\n', (8450, 8459), True, 'import numpy as np\n'), ((8490, 8505), 'numpy.sum', 'np.sum', (['W_w_d_t'], {}), '(W_w_d_t)\n', (8496, 8505), True, 'import numpy as np\n'), ((8537, 8553), 'numpy.sum', 'np.sum', (['W_b1_d_t'], {}), '(W_b1_d_t)\n', (8543, 8553), True, 'import numpy as np\n'), ((8585, 8601), 'numpy.sum', 'np.sum', (['W_b2_d_t'], {}), '(W_b2_d_t)\n', (8591, 8601), True, 'import numpy as np\n'), ((8634, 8651), 'numpy.sum', 'np.sum', (['W_ba1_d_t'], {}), '(W_ba1_d_t)\n', (8640, 8651), True, 'import numpy as np\n'), ((8688, 8708), 'numpy.sum', 'np.sum', (['W_dash_k_d_t'], {}), '(W_dash_k_d_t)\n', (8694, 8708), True, 'import numpy as np\n'), ((8744, 8764), 'numpy.sum', 'np.sum', (['W_dash_s_d_t'], {}), '(W_dash_s_d_t)\n', (8750, 8764), True, 'import numpy as np\n'), ((8800, 8820), 'numpy.sum', 'np.sum', (['W_dash_w_d_t'], {}), '(W_dash_w_d_t)\n', (8806, 8820), True, 'import numpy as np\n'), ((8857, 8878), 'numpy.sum', 'np.sum', (['W_dash_b1_d_t'], {}), '(W_dash_b1_d_t)\n', (8863, 8878), True, 'import numpy as np\n'), ((8915, 8936), 'numpy.sum', 'np.sum', (['W_dash_b2_d_t'], {}), '(W_dash_b2_d_t)\n', (8921, 8936), True, 'import numpy as np\n'), ((8974, 8996), 'numpy.sum', 'np.sum', (['W_dash_ba1_d_t'], {}), '(W_dash_ba1_d_t)\n', (8980, 8996), True, 'import numpy as np\n'), ((9033, 9053), 'numpy.sum', 'np.sum', (['L_dash_k_d_t'], {}), '(L_dash_k_d_t)\n', (9039, 9053), True, 'import numpy as np\n'), ((9089, 9109), 'numpy.sum', 'np.sum', (['L_dash_s_d_t'], {}), '(L_dash_s_d_t)\n', (9095, 9109), True, 'import numpy as np\n'), ((9145, 9165), 'numpy.sum', 'np.sum', (['L_dash_w_d_t'], {}), '(L_dash_w_d_t)\n', (9151, 9165), True, 'import numpy as np\n'), ((9202, 9223), 'numpy.sum', 'np.sum', (['L_dash_b1_d_t'], {}), '(L_dash_b1_d_t)\n', (9208, 9223), True, 'import numpy as np\n'), ((9260, 9281), 'numpy.sum', 'np.sum', (['L_dash_b2_d_t'], {}), '(L_dash_b2_d_t)\n', (9266, 9281), True, 'import numpy as np\n'), ((9319, 9341), 'numpy.sum', 'np.sum', (['L_dash_ba1_d_t'], {}), '(L_dash_ba1_d_t)\n', (9325, 9341), True, 'import numpy as np\n'), ((9379, 9401), 'numpy.sum', 'np.sum', (['L_dash_ba2_d_t'], {}), '(L_dash_ba2_d_t)\n', (9385, 9401), True, 'import numpy as np\n'), ((9442, 9466), 'numpy.sum', 'np.sum', (['L_dashdash_k_d_t'], {}), '(L_dashdash_k_d_t)\n', (9448, 9466), True, 'import numpy as np\n'), ((9506, 9530), 'numpy.sum', 'np.sum', (['L_dashdash_s_d_t'], {}), '(L_dashdash_s_d_t)\n', (9512, 9530), True, 'import numpy as np\n'), ((9570, 9594), 'numpy.sum', 'np.sum', (['L_dashdash_w_d_t'], {}), '(L_dashdash_w_d_t)\n', (9576, 9594), True, 'import numpy as np\n'), ((9635, 9660), 'numpy.sum', 'np.sum', (['L_dashdash_b1_d_t'], {}), '(L_dashdash_b1_d_t)\n', (9641, 9660), True, 'import numpy as np\n'), ((9701, 9726), 'numpy.sum', 'np.sum', (['L_dashdash_b2_d_t'], {}), '(L_dashdash_b2_d_t)\n', (9707, 9726), True, 'import numpy as np\n'), ((9768, 9794), 'numpy.sum', 'np.sum', (['L_dashdash_ba1_d_t'], {}), '(L_dashdash_ba1_d_t)\n', (9774, 9794), True, 'import numpy as np\n'), ((9836, 9862), 'numpy.sum', 'np.sum', (['L_dashdash_ba2_d_t'], {}), '(L_dashdash_ba2_d_t)\n', (9842, 9862), True, 'import numpy as np\n'), ((15288, 15460), 'pyhees.section9_2.calc_E_E_lss_aux_d_t', 'lss.calc_E_E_lss_aux_d_t', ([], {'ls_type': "SHC['ls_type']", 'pmp_type': '"""上記以外の機種"""', 'P_alpha_sp': "SHC['P_alpha_sp']", 'P_beta_sp': "SHC['P_beta_sp']", 'region': 'region', 'sol_region': 'sol_region'}), "(ls_type=SHC['ls_type'], pmp_type='上記以外の機種',\n P_alpha_sp=SHC['P_alpha_sp'], P_beta_sp=SHC['P_beta_sp'], region=region,\n sol_region=sol_region)\n", (15312, 15460), True, 'import pyhees.section9_2 as lss\n'), ((26310, 26587), 'pyhees.section7_1_d.calc_E_E_hs_d_t', 'oil.calc_E_E_hs_d_t', ([], {'W_dash_k_d_t': 'W_dash_k_d_t', 'W_dash_s_d_t': 'W_dash_s_d_t', 'W_dash_w_d_t': 'W_dash_w_d_t', 'W_dash_b1_d_t': 'W_dash_b1_d_t', 'W_dash_ba1_d_t': 'W_dash_ba1_d_t', 'W_dash_b2_d_t': 'W_dash_b2_d_t', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t,\n W_dash_w_d_t=W_dash_w_d_t, W_dash_b1_d_t=W_dash_b1_d_t, W_dash_ba1_d_t=\n W_dash_ba1_d_t, W_dash_b2_d_t=W_dash_b2_d_t, theta_ex_d_Ave_d=\n theta_ex_d_Ave_d, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (26329, 26587), True, 'import pyhees.section7_1_d as oil\n'), ((32895, 32915), 'pyhees.section7_1_d.get_E_G_hs_d_t', 'oil.get_E_G_hs_d_t', ([], {}), '()\n', (32913, 32915), True, 'import pyhees.section7_1_d as oil\n'), ((36896, 37311), 'pyhees.section7_1_d.calc_E_K_hs_d_t', 'oil.calc_E_K_hs_d_t', ([], {'hw_type': 'hw_type', 'bath_function': 'bath_function', 'e_rtd': 'e_rtd', 'e_dash_rtd': 'e_dash_rtd', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(hw_type=hw_type, bath_function=bath_function, e_rtd=\n e_rtd, e_dash_rtd=e_dash_rtd, theta_ex_d_Ave_d=theta_ex_d_Ave_d,\n L_dashdash_k_d_t=L_dashdash_k_d_t, L_dashdash_s_d_t=L_dashdash_s_d_t,\n L_dashdash_w_d_t=L_dashdash_w_d_t, L_dashdash_b1_d_t=L_dashdash_b1_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba1_d_t=\n L_dashdash_ba1_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (36915, 37311), True, 'import pyhees.section7_1_d as oil\n'), ((52533, 52551), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (52541, 52551), True, 'import numpy as np\n'), ((53900, 53918), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (53908, 53918), True, 'import numpy as np\n'), ((56060, 56079), 'numpy.repeat', 'np.repeat', (['f_sk', '(24)'], {}), '(f_sk, 24)\n', (56069, 56079), True, 'import numpy as np\n'), ((57253, 57272), 'numpy.repeat', 'np.repeat', (['f_sw', '(24)'], {}), '(f_sw, 24)\n', (57262, 57272), True, 'import numpy as np\n'), ((62270, 62298), 'numpy.tile', 'np.tile', (['table[i][:, 0]', '(365)'], {}), '(table[i][:, 0], 365)\n', (62277, 62298), True, 'import numpy as np\n'), ((62981, 63009), 'numpy.tile', 'np.tile', (['table[i][:, j]', '(365)'], {}), '(table[i][:, j], 365)\n', (62988, 63009), True, 'import numpy as np\n'), ((63553, 63581), 'numpy.tile', 'np.tile', (['table[i][:, 3]', '(365)'], {}), '(table[i][:, 3], 365)\n', (63560, 63581), True, 'import numpy as np\n'), ((64084, 64102), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (64092, 64102), True, 'import numpy as np\n'), ((64630, 64648), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (64638, 64648), True, 'import numpy as np\n'), ((66403, 66431), 'numpy.tile', 'np.tile', (['table[i][:, j]', '(365)'], {}), '(table[i][:, j], 365)\n', (66410, 66431), True, 'import numpy as np\n'), ((67053, 67081), 'numpy.tile', 'np.tile', (['table[i][:, j]', '(365)'], {}), '(table[i][:, j], 365)\n', (67060, 67081), True, 'import numpy as np\n'), ((67702, 67720), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (67710, 67720), True, 'import numpy as np\n'), ((15685, 16059), 'pyhees.section9_3.calc_E_E_W_aux_ass_d_t', 'ass.calc_E_E_W_aux_ass_d_t', ([], {'hotwater_use': "SHC['hotwater_use']", 'heating_flag_d': 'heating_flag_d', 'region': 'region', 'sol_region': 'sol_region', 'P_alpha': "SHC['P_alpha']", 'P_beta': "SHC['P_beta']", 'A_col': "SHC['A_col']", 'V_fan_P0': "SHC['V_fan_P0']", 'm_fan_test': "SHC['m_fan_test']", 'd0': "SHC['d0']", 'd1': "SHC['d1']", 'fan_sso': "SHC['fan_sso']", 'fan_type': "SHC['fan_type']", 'pump_sso': "SHC['pump_sso']"}), "(hotwater_use=SHC['hotwater_use'], heating_flag_d\n =heating_flag_d, region=region, sol_region=sol_region, P_alpha=SHC[\n 'P_alpha'], P_beta=SHC['P_beta'], A_col=SHC['A_col'], V_fan_P0=SHC[\n 'V_fan_P0'], m_fan_test=SHC['m_fan_test'], d0=SHC['d0'], d1=SHC['d1'],\n fan_sso=SHC['fan_sso'], fan_type=SHC['fan_type'], pump_sso=SHC['pump_sso'])\n", (15711, 16059), True, 'import pyhees.section9_3 as ass\n'), ((26729, 27127), 'pyhees.section7_1_e.calc_E_E_hs_d_t', 'eheatpump.calc_E_E_hs_d_t', ([], {'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t', 'e_rtd': 'e_rtd', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'theta_ex_Nave_d': 'Theta_ex_Nave_d', 'CO2HP': 'CO2HP'}), '(L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b1_d_t=L_dashdash_b1_d_t, L_dashdash_b2_d_t=\n L_dashdash_b2_d_t, L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,\n L_dashdash_ba2_d_t=L_dashdash_ba2_d_t, e_rtd=e_rtd, theta_ex_d_Ave_d=\n theta_ex_d_Ave_d, theta_ex_Nave_d=Theta_ex_Nave_d, CO2HP=CO2HP)\n', (26754, 27127), True, 'import pyhees.section7_1_e as eheatpump\n'), ((32966, 32992), 'pyhees.section7_1_e.get_E_G_hs_d_t', 'eheatpump.get_E_G_hs_d_t', ([], {}), '()\n', (32990, 32992), True, 'import pyhees.section7_1_e as eheatpump\n'), ((37494, 37520), 'pyhees.section7_1_e.get_E_K_hs_d_t', 'eheatpump.get_E_K_hs_d_t', ([], {}), '()\n', (37518, 37520), True, 'import pyhees.section7_1_e as eheatpump\n'), ((49225, 49239), 'pyhees.section11_1.load_outdoor', 'load_outdoor', ([], {}), '()\n', (49237, 49239), False, 'from pyhees.section11_1 import load_outdoor, get_Theta_ex\n'), ((49267, 49296), 'pyhees.section11_1.get_Theta_ex', 'get_Theta_ex', (['region', 'outdoor'], {}), '(region, outdoor)\n', (49279, 49296), False, 'from pyhees.section11_1 import load_outdoor, get_Theta_ex\n'), ((49351, 49461), 'pyhees.section9_3.calc_Theta_col', 'ass.calc_Theta_col', (['A_col', 'P_alpha', 'P_beta', 'V_fan_P0', 'd0', 'd1', 'm_fan_test', 'region', 'sol_region', 'Theta_ex_d_t'], {}), '(A_col, P_alpha, P_beta, V_fan_P0, d0, d1, m_fan_test,\n region, sol_region, Theta_ex_d_t)\n', (49369, 49461), True, 'import pyhees.section9_3 as ass\n'), ((49555, 49613), 'pyhees.section9_3.get_t_fan_d_t', 'ass.get_t_fan_d_t', (['Theta_col_nonopg_d_t', 'Theta_col_opg_d_t'], {}), '(Theta_col_nonopg_d_t, Theta_col_opg_d_t)\n', (49572, 49613), True, 'import pyhees.section9_3 as ass\n'), ((49637, 49694), 'pyhees.section9_3.get_t_cp_d_t', 'ass.get_t_cp_d_t', (['hotwater_use', 't_fan_d_t', 'heating_flag_d'], {}), '(hotwater_use, t_fan_d_t, heating_flag_d)\n', (49653, 49694), True, 'import pyhees.section9_3 as ass\n'), ((49719, 49757), 'pyhees.section9_3.get_V_fan_d_t', 'ass.get_V_fan_d_t', (['t_fan_d_t', 'V_fan_P0'], {}), '(t_fan_d_t, V_fan_P0)\n', (49736, 49757), True, 'import pyhees.section9_3 as ass\n'), ((49782, 49843), 'pyhees.section9_3.get_Q_col_d_t', 'ass.get_Q_col_d_t', (['V_fan_d_t', 'Theta_col_opg_d_t', 'Theta_ex_d_t'], {}), '(V_fan_d_t, Theta_col_opg_d_t, Theta_ex_d_t)\n', (49799, 49843), True, 'import pyhees.section9_3 as ass\n'), ((49862, 49895), 'pyhees.section9_3.calc_Q_d', 'ass.calc_Q_d', (['Q_col_d_t', 't_cp_d_t'], {}), '(Q_col_d_t, t_cp_d_t)\n', (49874, 49895), True, 'import pyhees.section9_3 as ass\n'), ((49918, 49963), 'pyhees.section9_3.calc_L_tnk_d', 'ass.calc_L_tnk_d', (['Q_d', 'W_tnk_ass', 'Theta_wtr_d'], {}), '(Q_d, W_tnk_ass, Theta_wtr_d)\n', (49934, 49963), True, 'import pyhees.section9_3 as ass\n'), ((49983, 50106), 'pyhees.section9_3.calc_L_sun_ass_d_t', 'ass.calc_L_sun_ass_d_t', (['L_tnk_d', 'L_dash_k_d_t', 'L_dash_s_d_t', 'L_dash_w_d_t', 'L_dash_b1_d_t', 'L_dash_b2_d_t', 'L_dash_ba1_d_t'], {}), '(L_tnk_d, L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t,\n L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t)\n', (50005, 50106), True, 'import pyhees.section9_3 as ass\n'), ((50178, 50196), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (50186, 50196), True, 'import numpy as np\n'), ((50243, 50261), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (50251, 50261), True, 'import numpy as np\n'), ((52745, 52763), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (52753, 52763), True, 'import numpy as np\n'), ((54041, 54059), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (54049, 54059), True, 'import numpy as np\n'), ((27326, 27660), 'pyhees.section7_1_f.calc_E_E_hs_d_t', 'eheater.calc_E_E_hs_d_t', ([], {'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d'}), '(L_dashdash_k_d_t=L_dashdash_k_d_t, L_dashdash_s_d_t\n =L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t, L_dashdash_b1_d_t\n =L_dashdash_b1_d_t, L_dashdash_b2_d_t=L_dashdash_b2_d_t,\n L_dashdash_ba1_d_t=L_dashdash_ba1_d_t, L_dashdash_ba2_d_t=\n L_dashdash_ba2_d_t, theta_ex_d_Ave_d=theta_ex_d_Ave_d)\n', (27349, 27660), True, 'import pyhees.section7_1_f as eheater\n'), ((33071, 33091), 'pyhees.section7_1_f.get_E_G_hs', 'eheater.get_E_G_hs', ([], {}), '()\n', (33089, 33091), True, 'import pyhees.section7_1_f as eheater\n'), ((37599, 37619), 'pyhees.section7_1_f.get_E_K_hs', 'eheater.get_E_K_hs', ([], {}), '()\n', (37617, 37619), True, 'import pyhees.section7_1_f as eheater\n'), ((50836, 50862), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (50845, 50862), True, 'import numpy as np\n'), ((51239, 51265), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (51248, 51265), True, 'import numpy as np\n'), ((51632, 51658), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (51641, 51658), True, 'import numpy as np\n'), ((27907, 28194), 'pyhees.section7_1_g.calc_E_E_hs_d_t', 'hybrid_gas.calc_E_E_hs_d_t', ([], {'hybrid_category': 'hybrid_category', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(hybrid_category=hybrid_category,\n theta_ex_d_Ave_d=theta_ex_d_Ave_d, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (27933, 28194), True, 'import pyhees.section7_1_g as hybrid_gas\n'), ((33251, 33534), 'pyhees.section7_1_g.calc_E_G_hs_d_t', 'hybrid_gas.calc_E_G_hs_d_t', ([], {'hybrid_category': 'hybrid_category', 'theta_ex_d_Ave_d': 'Theta_ex_Ave', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(hybrid_category=hybrid_category,\n theta_ex_d_Ave_d=Theta_ex_Ave, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (33277, 33534), True, 'import pyhees.section7_1_g as hybrid_gas\n'), ((37779, 37802), 'pyhees.section7_1_h.get_E_K_hs', 'gas_hybrid.get_E_K_hs', ([], {}), '()\n', (37800, 37802), True, 'import pyhees.section7_1_h as gas_hybrid\n'), ((28446, 28902), 'pyhees.section7_1_g_3.calc_E_E_hs_d_t', 'hybrid_gas_3.calc_E_E_hs_d_t', ([], {'bath_function': 'bath_function', 'package_id': 'package_id', 'hybrid_param': 'hybrid_param', 'W_dash_ba1_d_t': 'W_dash_ba1_d_t', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(bath_function=bath_function, package_id=\n package_id, hybrid_param=hybrid_param, W_dash_ba1_d_t=W_dash_ba1_d_t,\n theta_ex_d_Ave_d=theta_ex_d_Ave_d, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b1_d_t=L_dashdash_b1_d_t, L_dashdash_b2_d_t=\n L_dashdash_b2_d_t, L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,\n L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (28474, 28902), True, 'import pyhees.section7_1_g_3 as hybrid_gas_3\n'), ((33786, 34239), 'pyhees.section7_1_g_3.get_E_G_hs_d_t', 'hybrid_gas_3.get_E_G_hs_d_t', ([], {'bath_function': 'bath_function', 'package_id': 'package_id', 'theta_ex_d_Ave_d': 'Theta_ex_Ave', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b1_d_t': 'L_dashdash_b1_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba1_d_t': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t', 'W_dash_ba1_d_t': 'W_dash_ba1_d_t', 'hybrid_param': 'hybrid_param'}), '(bath_function=bath_function, package_id=\n package_id, theta_ex_d_Ave_d=Theta_ex_Ave, L_dashdash_k_d_t=\n L_dashdash_k_d_t, L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=\n L_dashdash_w_d_t, L_dashdash_b1_d_t=L_dashdash_b1_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba1_d_t=\n L_dashdash_ba1_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,\n W_dash_ba1_d_t=W_dash_ba1_d_t, hybrid_param=hybrid_param)\n', (33813, 34239), True, 'import pyhees.section7_1_g_3 as hybrid_gas_3\n'), ((37972, 37999), 'pyhees.section7_1_g.get_E_K_hs_d_t', 'hybrid_gas.get_E_K_hs_d_t', ([], {}), '()\n', (37997, 37999), True, 'import pyhees.section7_1_g as hybrid_gas\n'), ((52450, 52476), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (52459, 52476), True, 'import numpy as np\n'), ((53816, 53842), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (53825, 53842), True, 'import numpy as np\n'), ((67591, 67617), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (67600, 67617), True, 'import numpy as np\n'), ((29128, 29407), 'pyhees.section7_1_h.get_E_E_hs', 'gas_hybrid.get_E_E_hs', ([], {'W_dash_k_d_t': 'W_dash_k_d_t', 'W_dash_s_d_t': 'W_dash_s_d_t', 'W_dash_w_d_t': 'W_dash_w_d_t', 'W_dash_b1_d_t': 'W_dash_b1_d_t', 'W_dash_b2_d_t': 'W_dash_b2_d_t', 'W_dash_ba1_d_t': 'W_dash_ba1_d_t', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t,\n W_dash_w_d_t=W_dash_w_d_t, W_dash_b1_d_t=W_dash_b1_d_t, W_dash_b2_d_t=\n W_dash_b2_d_t, W_dash_ba1_d_t=W_dash_ba1_d_t, theta_ex_d_Ave_d=\n theta_ex_d_Ave_d, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (29149, 29407), True, 'import pyhees.section7_1_h as gas_hybrid\n'), ((34463, 34789), 'pyhees.section7_1_h.get_E_G_hs', 'gas_hybrid.get_E_G_hs', ([], {'Theta_ex_Ave': 'Theta_ex_Ave', 'L_dashdash_k': 'L_dashdash_k_d_t', 'L_dashdash_s': 'L_dashdash_s_d_t', 'L_dashdash_w': 'L_dashdash_w_d_t', 'L_dashdash_b1': 'L_dashdash_b1_d_t', 'L_dashdash_b2': 'L_dashdash_b2_d_t', 'L_dashdash_ba1': 'L_dashdash_ba1_d_t', 'L_dashdash_ba2': 'L_dashdash_ba2_d_t', 'bath_function': 'bath_function'}), '(Theta_ex_Ave=Theta_ex_Ave, L_dashdash_k=\n L_dashdash_k_d_t, L_dashdash_s=L_dashdash_s_d_t, L_dashdash_w=\n L_dashdash_w_d_t, L_dashdash_b1=L_dashdash_b1_d_t, L_dashdash_b2=\n L_dashdash_b2_d_t, L_dashdash_ba1=L_dashdash_ba1_d_t, L_dashdash_ba2=\n L_dashdash_ba2_d_t, bath_function=bath_function)\n', (34484, 34789), True, 'import pyhees.section7_1_h as gas_hybrid\n'), ((38097, 38124), 'pyhees.section7_1_g.get_E_K_hs_d_t', 'hybrid_gas.get_E_K_hs_d_t', ([], {}), '()\n', (38122, 38124), True, 'import pyhees.section7_1_g as hybrid_gas\n'), ((52829, 52855), 'numpy.repeat', 'np.repeat', (['Theta_wtr_d', '(24)'], {}), '(Theta_wtr_d, 24)\n', (52838, 52855), True, 'import numpy as np\n'), ((29608, 29905), 'pyhees.section7_1_i.calc_E_E_hs_d_t', 'whybrid.calc_E_E_hs_d_t', ([], {'L_HWH': 'L_HWH', 'hybrid_category': 'hybrid_category', 'theta_ex_d_Ave_d': 'theta_ex_d_Ave_d', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(L_HWH=L_HWH, hybrid_category=hybrid_category,\n theta_ex_d_Ave_d=theta_ex_d_Ave_d, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (29631, 29905), True, 'import pyhees.section7_1_i as whybrid\n'), ((34996, 35285), 'pyhees.section7_1_i.calc_E_G_hs_d_t', 'whybrid.calc_E_G_hs_d_t', ([], {'L_HWH': 'L_HWH', 'hybrid_category': 'hybrid_category', 'Theta_ex_Ave': 'Theta_ex_Ave', 'L_dashdash_k_d_t': 'L_dashdash_k_d_t', 'L_dashdash_s_d_t': 'L_dashdash_s_d_t', 'L_dashdash_w_d_t': 'L_dashdash_w_d_t', 'L_dashdash_b2_d_t': 'L_dashdash_b2_d_t', 'L_dashdash_ba2_d_t': 'L_dashdash_ba2_d_t'}), '(L_HWH=L_HWH, hybrid_category=hybrid_category,\n Theta_ex_Ave=Theta_ex_Ave, L_dashdash_k_d_t=L_dashdash_k_d_t,\n L_dashdash_s_d_t=L_dashdash_s_d_t, L_dashdash_w_d_t=L_dashdash_w_d_t,\n L_dashdash_b2_d_t=L_dashdash_b2_d_t, L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)\n', (35019, 35285), True, 'import pyhees.section7_1_i as whybrid\n'), ((38233, 38257), 'pyhees.section7_1_i.get_E_K_hs_d_t', 'whybrid.get_E_K_hs_d_t', ([], {}), '()\n', (38255, 38257), True, 'import pyhees.section7_1_i as whybrid\n'), ((35434, 35447), 'numpy.zeros', 'np.zeros', (['(365)'], {}), '(365)\n', (35442, 35447), True, 'import numpy as np\n'), ((38312, 38325), 'numpy.zeros', 'np.zeros', (['(365)'], {}), '(365)\n', (38320, 38325), True, 'import numpy as np\n')] |
'''
Module containing the DataFiller class,
which is responsible for filling data
to plots and monitors
'''
from copy import copy
from ast import literal_eval # to convert a string to list
import numpy as np
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
class DataFiller():
#pylint: disable=too-many-instance-attributes
'''
This class fills the data for all the
displayed plots on the screen, and
updates the plots accordingly.
It also passes data to the monitors.
In "frozen" mode, we keep adding new data points to _data,
but don't update the displayed graph. When we unfreeze, we
then see the full recent data.
Attributes:
_qtgraphs (dict) All PlotItems
_plots (dict) All PlotDataItems
_data (dict) The data for all plots
_historic_data (dict) The historic data for all plots
_default_yrange (dict) The default y ranges per plot
_yrange (dict) The current y ranges per plot
_monitors (dict) The monitors to which to send data
_colors (dict) The plot color
_config (dict) The config dict
_n_samples (int) The number of samples to plot
_n_historic_samples (int) The number of samples to keep for historic data
_sampling (float) The time interval between samples
_time_window (float) The number of seconds shown
_xdata (array) The data along x
_frozen (bool) True we are in forzen state
_first_plot (PlotDataItem) Reference to the first drwan plot
_looping (bool) True displays looping plots
_looping_data_idx (int) The x index of the looping line
_looping_lines (dict) A dict of InfiniteLines
'''
def __init__(self, config):
'''
Constructor
arguments:
- config: the config dictionary
'''
self._qtgraphs = {}
self._plots = {}
self._data = {}
self._historic_data = {}
self._default_yrange = {}
self._yrange = {}
self._monitors = {}
self._colors = {}
self._config = config
self._n_samples = self._config['nsamples']
self._n_historic_samples = self._config.get('historic_nsamples',
200)
self._sampling = self._config['sampling_interval']
self._time_window = self._n_samples * self._sampling # seconds
self._xdata = np.linspace(-self._time_window, 0, self._n_samples)
self._frozen = False
self._first_plot = None
self._looping = self._config['use_looping_plots']
self._looping_data_idx = {}
self._looping_lines = {}
self._x_label = None
def connect_plot(self, plotname, plot):
'''
Connects a plot to this class by
storing it in a dictionary
arguments:
- plotname: the name of the plot
- plot: the PlotItem from the ui file
'''
plot_config = self._config['plots'][plotname]
name = plot_config['observable']
# Link X axes if we've already seen a plot
if self._first_plot:
plot.setXLink(self._first_plot)
else:
self._first_plot = plot
self._qtgraphs[name] = plot
self._plots[name] = plot.plot()
self._data[name] = np.linspace(0, 0, self._n_samples)
self._historic_data[name] = np.linspace(0, 0, self._n_historic_samples)
self._yrange[name] = None
self._plots[name].setData(copy(self._xdata), copy(self._data[name]))
self._colors[name] = plot_config['color']
self._looping_data_idx[name] = 0
# Set the Y axis
y_axis_label = plot_config['name']
y_axis_label += ' '
y_axis_label += plot_config['units']
plot.setLabel(axis='left', text=y_axis_label)
# Set the X axis
if self._config['show_x_axis_labels'] and 'bot' in plotname and not self._looping:
self.add_x_axis_label(plot)
# Remove x ticks, if selected
if self._looping or not self._config['show_x_axis_ticks']:
plot.getAxis('bottom').setTicks([])
plot.getAxis('bottom').setStyle(tickTextOffset=0, tickTextHeight=0)
# Customize the axis color
color = self.parse_color(self._config['axis_line_color'])
plot.getAxis('bottom').setPen(
pg.mkPen(color, width=self._config['axis_line_width']))
plot.getAxis('left').setPen(
pg.mkPen(color, width=self._config['axis_line_width']))
if self._looping:
self.add_looping_lines(name, plot)
# Fix the x axis range
self.set_default_x_range(name)
# Fix the y axis range
value_min = plot_config['min']
value_max = plot_config['max']
ymin = value_min - (value_max - value_min) * 0.1
ymax = value_max + (value_max - value_min) * 0.1
self._default_yrange[name] = [ymin, ymax]
self.set_default_y_range(name)
# Remove mouse interaction with plots
plot.setMouseEnabled(x=False, y=False)
plot.setMenuEnabled(False)
print('NORMAL: Connected plot',
plot_config['name'], 'with variable', name)
def set_default_y_range(self, name):
'''
Set the Y axis range of the plot to the defaults
specified in the config file.
arguments:
- name: the plot name to set the y range
'''
if name not in self._qtgraphs:
raise Exception('Cannot set y range for graph',
name, 'as it doesn\'t exist.')
# Save the range for future use
self._yrange[name] = (self._default_yrange[name]
[0], self._default_yrange[name][1])
# Set the range to the graph
self._qtgraphs[name].setYRange(*self._default_yrange[name])
# Also set the width (space) on the left of the Y axis (for the label
# and ticks)
self._qtgraphs[name].getAxis('left').setWidth(
self._config['left_ax_label_space'])
def set_y_range(self, name):
'''
Set the Y axis range of the plot to the max and min
from the historic data set.
arguments:
- name: the plot name to set the y range
'''
if name not in self._historic_data or name not in self._qtgraphs:
raise Exception('Cannot set y range for graph',
name, 'as it doesn\'t exist.')
# Calculate the max and min using the larger historical data sample
ymax = np.max(self._historic_data[name])
ymin = np.min(self._historic_data[name])
if ymax == ymin:
return
span = ymax - ymin
ymax += span * 0.1
ymin -= span * 0.1
# Save the range for future use
self._yrange[name] = (ymin, ymax)
# Set the range to the graph
self._qtgraphs[name].setYRange(*self._yrange[name])
self.updateTicks(name, ymax - ymin)
def restore_y_range(self, name):
'''
Restores a previously set y range.
If the y range was not previously set,
this method calls set_y_range()
arguments:
- name: the plot name to restore the y range
'''
if self._yrange[name] is None:
self.set_y_range(name)
return
self._qtgraphs[name].setYRange(*self._yrange[name])
def updateTicks(self, name, yrange=None):
#pylint: disable=invalid-name
'''
Updates the major and minor ticks
in the graphs
arguments:
- name: the plot name to update tickes
- yrange: (optinal) the yrange to use (otherwise Pyqtgraph default)
'''
if name not in self._qtgraphs:
raise Exception('Cannot set ticks for graph',
name, 'as it doesn\'t exist.')
ax = self._qtgraphs[name].getAxis('left')
if yrange is None:
ax.setTickSpacing()
else:
# Sligthly reduce the yrange so
# the tick labels don't get
# cropped on the top
yrange -= yrange * 0.2
major_step = yrange / (self._config['n_major_ticks'] - 1)
minor_step = major_step / (self._config['n_minor_ticks'] - 1)
if major_step == 0 or minor_step == 0:
ax.setTickSpacing()
else:
ax.setTickSpacing(major=major_step, minor=minor_step)
def set_default_x_range(self, name):
'''
Set the X axis range of the plot to the defaults
specified in the config file.
arguments:
- name: the plot name to set the x range
'''
self._qtgraphs[name].setXRange(-self._time_window, 0)
def add_x_axis_label(self, plot):
#pylint: disable=invalid-name
#pylint: disable=c-extension-no-member
'''
Adds the x axis label 'Time [s]' in the form
of a QGraphicsTextItem. This is done because it
is hard to customize the PyQtGraph label.
arguments:
- plot: the PlotDataItem to add the label
'''
self._x_label = QtGui.QGraphicsTextItem()
self._x_label.setVisible(True)
self._x_label.setHtml(
'<p style="color: %s">Time [s]:</p>' %
self._config["axis_line_color"])
# Find the position of the label
br = self._x_label.boundingRect()
p = QtCore.QPointF(0, 0)
# x = plot.size().width() / 2. - br.width() / 2.
y = plot.size().height() - br.height()
p.setX(0) # Leave it on the left, so it doesn't cover labels.
p.setY(y)
self._x_label.setPos(p)
plot.getAxis('bottom').scene().addItem(self._x_label)
def add_looping_lines(self, name, plot):
'''
Add line corresponding to where the
data is being updated when in "looping" mode.
arguments:
- name: the plot name to add the lines
- plot: the PlotItem to add the lines
'''
self._looping_lines[name] = pg.InfiniteLine(
pos=0,
angle=90,
movable=False,
pen=pg.mkPen(
cosmetic=False,
width=self._time_window / 25,
color='k',
style=QtCore.Qt.SolidLine))
plot.addItem(self._looping_lines[name])
def connect_monitor(self, monitor):
'''
Connect a monitor to this class by
storing it in a dictionary
arguments:
- monitor: the monitor to connect
'''
name = monitor.observable
self._monitors[name] = monitor
self._data[name] = np.linspace(0, 0, self._n_samples)
self._looping_data_idx[name] = 0
if name not in self._data:
self._data[name] = np.linspace(0, 0, self._n_samples)
print('NORMAL: Connected monitor',
monitor.configname, 'with variable', name)
def add_data_point(self, name, data_point):
'''
Adds a data point to the plot with
name 'name'
arguments:
- name: the name of the plots (and monitor if available)
- data_point: (float) the data point to add
'''
# print('NORMAL: Received data for monitor', name)
if name in self._historic_data:
# Save to the historic data dict
self._historic_data[name][:-1] = self._historic_data[name][1:]
self._historic_data[name][-1] = data_point
if name in self._data:
if self._looping:
# Looping plots - update next value
self._data[name][self._looping_data_idx[name]] = data_point
self._looping_data_idx[name] += 1
if self._looping_data_idx[name] == self._n_samples:
self._looping_data_idx[name] = 0
else:
# Scrolling plots - shift data 1 sample left
self._data[name][:-1] = self._data[name][1:]
# add the last data point
self._data[name][-1] = data_point
if name in self._plots:
self.update_plot(name)
if name in self._monitors:
self.update_monitor(name)
def update_plot(self, name):
'''
Send new data from self._data to the actual pyqtgraph plot.
arguments:
- name: the name of the plot to update
'''
if not self._frozen:
# Update the displayed plot with current data.
# In frozen mode, we don't update the display.
color = self._colors[name]
color = color.replace('rgb', '')
color = literal_eval(color)
self._plots[name].setData(
copy(self._xdata),
copy(self._data[name]),
pen=pg.mkPen(color, width=self._config['line_width']))
self.set_default_x_range(name)
self.set_y_range(name)
if self._looping:
x_val = self._xdata[self._looping_data_idx[name]
] - self._sampling * 0.1
self._looping_lines[name].setValue(x_val)
def freeze(self):
'''
Enter "frozen" mode, where plots are not updated, and mouse/zoom
interaction is enabled.
'''
self._frozen = True
for plot in self._qtgraphs.values():
plot.setMouseEnabled(x=True, y=True)
def unfreeze(self):
'''
Leave "frozen" mode, resetting the zoom and showing self-updating
plots.
'''
self._frozen = False
for name in self._plots:
self.update_plot(name)
for plot in self._qtgraphs.values():
plot.setMouseEnabled(x=False, y=False)
self.reset_zoom()
def reset_zoom(self):
'''
Revert to normal zoom range for each plot.
autoRange() used to set X range, then
custom values used for Y range.
'''
for name in self._qtgraphs:
self.set_default_x_range(name)
self.restore_y_range(name)
def update_monitor(self, name):
'''
Updates the values in a monitor,
if a monitor exists with this name
arguments:
- name: the name of the monitor to update
'''
if name in self._monitors:
last_data_idx = self._looping_data_idx[name] - \
1 if self._looping else -1
self._monitors[name].update_value(self._data[name][last_data_idx])
else:
return
def parse_color(self, rgb_string):
#pylint: disable=no-self-use
'''
Given a color string in format
'rgb(X,Y,Z)', it returns a list
(X,Y,Z)
arguments:
- rgb_string: (str) the rgb string 'rgb(X,Y,Z)'
'''
color = rgb_string.replace('rgb', '')
return literal_eval(color)
| [
"PyQt5.QtGui.QGraphicsTextItem",
"numpy.max",
"ast.literal_eval",
"numpy.linspace",
"pyqtgraph.mkPen",
"numpy.min",
"copy.copy",
"PyQt5.QtCore.QPointF"
] | [((2602, 2653), 'numpy.linspace', 'np.linspace', (['(-self._time_window)', '(0)', 'self._n_samples'], {}), '(-self._time_window, 0, self._n_samples)\n', (2613, 2653), True, 'import numpy as np\n'), ((3497, 3531), 'numpy.linspace', 'np.linspace', (['(0)', '(0)', 'self._n_samples'], {}), '(0, 0, self._n_samples)\n', (3508, 3531), True, 'import numpy as np\n'), ((3568, 3611), 'numpy.linspace', 'np.linspace', (['(0)', '(0)', 'self._n_historic_samples'], {}), '(0, 0, self._n_historic_samples)\n', (3579, 3611), True, 'import numpy as np\n'), ((6771, 6804), 'numpy.max', 'np.max', (['self._historic_data[name]'], {}), '(self._historic_data[name])\n', (6777, 6804), True, 'import numpy as np\n'), ((6820, 6853), 'numpy.min', 'np.min', (['self._historic_data[name]'], {}), '(self._historic_data[name])\n', (6826, 6853), True, 'import numpy as np\n'), ((9390, 9415), 'PyQt5.QtGui.QGraphicsTextItem', 'QtGui.QGraphicsTextItem', ([], {}), '()\n', (9413, 9415), False, 'from PyQt5 import QtGui, QtCore\n'), ((9678, 9698), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (9692, 9698), False, 'from PyQt5 import QtGui, QtCore\n'), ((10919, 10953), 'numpy.linspace', 'np.linspace', (['(0)', '(0)', 'self._n_samples'], {}), '(0, 0, self._n_samples)\n', (10930, 10953), True, 'import numpy as np\n'), ((15171, 15190), 'ast.literal_eval', 'literal_eval', (['color'], {}), '(color)\n', (15183, 15190), False, 'from ast import literal_eval\n'), ((3680, 3697), 'copy.copy', 'copy', (['self._xdata'], {}), '(self._xdata)\n', (3684, 3697), False, 'from copy import copy\n'), ((3699, 3721), 'copy.copy', 'copy', (['self._data[name]'], {}), '(self._data[name])\n', (3703, 3721), False, 'from copy import copy\n'), ((4554, 4608), 'pyqtgraph.mkPen', 'pg.mkPen', (['color'], {'width': "self._config['axis_line_width']"}), "(color, width=self._config['axis_line_width'])\n", (4562, 4608), True, 'import pyqtgraph as pg\n'), ((4659, 4713), 'pyqtgraph.mkPen', 'pg.mkPen', (['color'], {'width': "self._config['axis_line_width']"}), "(color, width=self._config['axis_line_width'])\n", (4667, 4713), True, 'import pyqtgraph as pg\n'), ((11063, 11097), 'numpy.linspace', 'np.linspace', (['(0)', '(0)', 'self._n_samples'], {}), '(0, 0, self._n_samples)\n', (11074, 11097), True, 'import numpy as np\n'), ((12931, 12950), 'ast.literal_eval', 'literal_eval', (['color'], {}), '(color)\n', (12943, 12950), False, 'from ast import literal_eval\n'), ((10405, 10502), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'cosmetic': '(False)', 'width': '(self._time_window / 25)', 'color': '"""k"""', 'style': 'QtCore.Qt.SolidLine'}), "(cosmetic=False, width=self._time_window / 25, color='k', style=\n QtCore.Qt.SolidLine)\n", (10413, 10502), True, 'import pyqtgraph as pg\n'), ((13006, 13023), 'copy.copy', 'copy', (['self._xdata'], {}), '(self._xdata)\n', (13010, 13023), False, 'from copy import copy\n'), ((13041, 13063), 'copy.copy', 'copy', (['self._data[name]'], {}), '(self._data[name])\n', (13045, 13063), False, 'from copy import copy\n'), ((13085, 13134), 'pyqtgraph.mkPen', 'pg.mkPen', (['color'], {'width': "self._config['line_width']"}), "(color, width=self._config['line_width'])\n", (13093, 13134), True, 'import pyqtgraph as pg\n')] |
import multiprocessing
import time
from multiprocessing import Queue
import numpy as np
import os
from keras import Input, Model
from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten
from keras_vggface.vggface import VGGFace
from skimage.feature import hog
from skimage.metrics import structural_similarity as ssim
from skimage.transform import resize
from sklearn.metrics import confusion_matrix, roc_curve
import pretrained_networks
import projector
from blink_detection import check_blink
from training import dataset, misc
##used to perform most of the testing
#uses iterative projection on image
def project_images_dataset(proj, dataset_name, data_dir, num_snapshots = 2):
print('Loading images from "%s"...' % dataset_name)
dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=1, repeat=False, shuffle_mb=0)
all_ssim = []
all_mse = []
all_times = []
labels = []
image_idx = 0
while (True):
#print('Projecting image %d ...' % (image_idx), flush=True)
try:
images, label = dataset_obj.get_minibatch_np(1)
labels.append(label)
except:
break
images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
start = time.time()
img,temp_ssim, temp_mse = project_image(proj, targets=images,img_num=image_idx, num_snapshots=num_snapshots)
end = time.time()
all_ssim.append(temp_ssim)
all_mse.append(temp_mse)
all_times.append(end-start)
# print("Time to process image: ", end-start , flush=True)
avg_time = sum(all_times)/len(all_times)
image_idx += 1
break
return all_ssim, all_mse, labels,avg_time
#used to project single image
def project_image(proj, targets, img_num, num_snapshots):
snapshot_steps = set(proj.num_steps - np.linspace(0, proj.num_steps, num_snapshots, endpoint=False, dtype=int))
#misc.save_image_grid(targets, png_prefix + 'target.png', drange=[-1,1])
proj.start(targets)
while proj.get_cur_step() < proj.num_steps:
print('\rProjecting image %d: %d / %d ... ' % (img_num,proj.get_cur_step(), proj.num_steps), end='', flush=True)
proj.step()
if proj.get_cur_step() == proj.num_steps:
#misc.save_image_grid(proj.get_images(), png_prefix + 'step%04d.png' % proj.get_cur_step(), drange=[-1,1])
imreal = np.array(misc.convert_to_pil_image(targets[0]))
improj = np.array(misc.convert_to_pil_image(proj.get_images()[0]))
imreal = normalize_img(imreal)
improj = normalize_img(improj)
temp_mse = mse(imreal,improj)
temp_ssim = ssim(imreal,improj,multichannel=True)
# print(temp_ssim, temp_mse , flush=True)
# print(improj)
# print(imreal)
# misc.convert_to_pil_image(targets[0]).save("test_img_real.png")
# misc.convert_to_pil_image(proj.get_images()[0]).save("test_img.png")
return improj,temp_ssim, temp_mse
print('\r%-30s\r' % '', end='', flush=True)
#helper
def normalize_img(pixels):
# pixels = pixels.astype('float32')
mean, std = pixels.mean(), pixels.std()
if np.isnan(std):
return None
pixels = (pixels - mean) / std
pixels = (pixels - np.min(pixels)) / np.ptp(pixels)
pixels = np.floor(pixels).astype('int16')
return pixels
#old, only tests projecting atm
def test_network(weight,threshold_dataset_dir, test_dataset_dir,threshold_set,test_set, numsteps = 10 ,queue = None):
print('Loading networks from "%s"...' % weight)
_G, _D, Gs = pretrained_networks.load_networks(weight)
proj = projector.Projector()
proj.set_network(Gs)
proj.num_steps = numsteps
#threshold_ssim, threshold_mse,threshold_labels,_ = project_images_dataset(proj,threshold_set,threshold_dataset_dir)
# threshold_ssim_value = getThreshold(threshold_ssim,threshold_labels)
# threshold_mse_value = getThreshold(threshold_mse,threshold_labels)
test_ssim, test_mse, test_labels,avg_time = project_images_dataset(proj,test_set,test_dataset_dir)
# thresholded_test_ssim = threshold_values(test_ssim,threshold_ssim_value,False)
# thresholded_test_mse = threshold_values(test_mse,threshold_mse_value,True)
# combined_measure_test = [x or y for x,y in zip(thresholded_test_mse, thresholded_test_ssim)]
#
# hter_ssim = calculate_metrics(thresholded_test_ssim,test_labels,"SSIM")
# hter_mse = calculate_metrics(thresholded_test_mse,test_labels,"MSE")
# hter_combined = calculate_metrics(combined_measure_test,test_labels,"Combined")
hter_ssim = 0
hter_mse = 0
hter_combined = 0
if queue is not None:
queue.put((weight.split("-")[-1].split(".")[0], hter_ssim, hter_mse,hter_combined))
return hter_ssim,hter_mse,hter_combined,avg_time
#helper
def getThreshold(predictions,labels):
if max(labels) > 13:
labels = [0 if x > 20 else 1 for x in labels]
else:
labels = [0 if x in [1,2,9] else 1 for x in labels]
fpr, tpr, threshold = roc_curve(labels, predictions, pos_label=1)
if sum(tpr)/len(tpr) < 0.5:
fpr, tpr, threshold = roc_curve(labels, -np.array(predictions), pos_label=1)
threshold = -np.array(threshold)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
print(eer_threshold)
print(eer)
print("fpr = ", list(fpr))
print("tpr = ", list(tpr))
print("train_threshold = ", list(threshold))
print("train_hter = " , [(fpr[i] + (1-tpr[i]))/2 for i in range(0,len(fpr))])
return eer_threshold
#asc is true if the higher values are positive
def threshold_values(predictions,threshold, asc = True):
if asc:
return [0 if x < threshold else 1 for x in predictions]
else:
return [0 if x > threshold else 1 for x in predictions]
#interprets labels and calcs HTER
def calculate_metrics(thresholded_values,labels,name):
print("Calculating metrics for method: " + name)
true_labels = [x[0][0] for x in labels ]
if max(true_labels) > 12:
labels = [0 if x > 20 else 1 for x in labels]
elif max(true_labels) < 2:
labels = [x[0][0] for x in labels]
elif max(true_labels) < 3:
labels = [int(x) for x in labels]
else:
labels = [0 if x in [1,2,9] else 1 for x in labels]
wrongly_classified = []
for i in range(0,len(true_labels)):
if thresholded_values[i] != labels[i]:
wrongly_classified.append(true_labels[i])
print("wrongly classified: ", wrongly_classified)
conf = confusion_matrix(labels,thresholded_values)
print(conf)
TN = conf[0][0]
FN = conf[1][0]
TP = conf[1][1]
FP = conf[0][1]
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
hter = (FPR + FNR)/2
print("FPR: ")
print(FPR)
print("FNR: ")
print(FNR)
print("HTER: ")
print(hter)
return hter
#helper
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
#uses old test,used for testing optimal it
def test_different_nums(weight,train_set,test_set):
threshold_dataset_dir = "/".join(train_set.split("/")[:-1])
threshold_sets = train_set.split("/")[-1]
test_dataset_dir = "/".join(test_set.split("/")[:-1])
test_sets = test_set.split("/")[-1]
nums = [1,2,5,10,20,50,100]
all_ssim = []
all_mse = []
all_combined = []
all_times = []
for num in nums:
temp_ssim,temp_mse,temp_combined,avg_time = test_network(weight, threshold_dataset_dir, test_dataset_dir, threshold_sets, test_sets,num)
all_ssim.append(temp_ssim)
all_mse.append(temp_mse)
all_combined.append(temp_combined)
all_times.append(avg_time)
print("Results")
print("nb It", "SSIM", "MSE", "Avg Time")
for i in range(len(nums)):
print(nums[i],all_ssim[i],all_mse[i],all_combined[i],all_times[i])
#function used to produce final results
def test_diff_weights(weights,training_tf,test_tf):
threshold_dataset_dir = "/".join(training_tf.split("/")[:-1])
threshold_sets = training_tf.split("/")[-1]
test_dataset_dir = "/".join(test_tf.split("/")[:-1])
test_sets = test_tf.split("/")[-1]
queue = Queue()
all_results = []
for weight in weights:
p = multiprocessing.Process(target=test_full_network, args=(weight, threshold_dataset_dir, test_dataset_dir, threshold_sets, test_sets, 2,queue))
p.start()
all_results.append(queue.get())
p.join()
print("weight","SSIM","MSE","Siamese","Combined_Scale_Sum","full network test","Discriminator,Blink")
for i in range(0,len(weights)):
for j in range(0,len(all_results[i])):
if j == 0:
print(all_results[i][j], end= " ")
elif j == len(all_results[i])-1:
print(all_results[i][j] * 100)
else:
print(all_results[i][j] * 100, end= " ")
#helper test funciton
def gen_disc_test(proj, D, dataset_name, data_dir, num_snapshots = 2, queue = None):
print('Loading images from "%s"...' % dataset_name)
dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=2, repeat=False, shuffle_mb=0)
all_ssim = []
all_mse = []
labels = []
discrim = []
image_idx = 0
all_real_img = []
all_proj_img = []
all_blink_detects = []
while (True):
#print('Projecting image %d ...' % (image_idx), flush=True)
# if image_idx == 10 :
# break
try:
images, label = dataset_obj.get_minibatch_np(1)
# print(label)
if not len(label[0]) == 1:
label,name = label[0]
label = np.array([[label]])
else:
name = None
labels.append(label)
except:
break
images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
img,temp_ssim, temp_mse = project_image(proj, targets=images,img_num=image_idx, num_snapshots=num_snapshots)
all_ssim.append(temp_ssim)
all_mse.append(temp_mse)
all_real_img.append(np.array(misc.convert_to_pil_image(images[0])))
all_proj_img.append(img)
if name is not None:
all_blink_detects.append(check_blink(parse_num(name,data_dir)))
test = images
# test = np.array(normalize_img(images))
discrim.append(D.run(test, None)[0][0])
image_idx += 1
if queue is not None:
queue.put((all_ssim, all_mse, labels,discrim,all_real_img,all_proj_img,all_blink_detects))
return [all_ssim, all_mse, labels,discrim,all_real_img,all_proj_img,all_blink_detects]
#wrapper for gen_disc_test
def gen_disc_process(weight,numsteps, threshold_set, threshold_dataset_dir, test_set, test_dataset_dir, queue_proj = None):
print('Loading networks from "%s"...' % weight)
_, D, Gs = pretrained_networks.load_networks(weight)
proj = projector.Projector()
proj.set_network(Gs)
proj.num_steps = numsteps
threshold_results = gen_disc_test(proj,D,threshold_set,threshold_dataset_dir)
test_results = gen_disc_test(proj,D,test_set,test_dataset_dir)
if queue_proj is not None:
queue_proj.put((threshold_results,test_results))
return threshold_results , test_results
#Helper function that evaluates single GAN
def test_full_network(weight,threshold_dataset_dir, test_dataset_dir,threshold_set,test_set, numsteps = 10 ,queue = None):
queue_proj = Queue()
p = multiprocessing.Process(target=gen_disc_process, args=(weight,numsteps, threshold_set, threshold_dataset_dir, test_set, test_dataset_dir, queue_proj))
p.start()
threshold_res, test_res = queue_proj.get()
p.join()
threshold_ssim, threshold_mse, threshold_labels, discriminator_values, threshold_real_img, threshold_proj_img, detected_blinks = threshold_res
test_ssim, test_mse, test_labels, test_discriminator_values, test_real_img, test_proj_img, test_detected_blinks = test_res
normalized_mse = 1 - (threshold_mse - min(threshold_mse)) / max((threshold_mse - min(threshold_mse)))
normalized_disc_values = (discriminator_values - min(discriminator_values)) / max((discriminator_values - min(discriminator_values)))
combined_sum = [x + y for x, y in zip(threshold_ssim, normalized_mse)]
normalized_combined_sum = (combined_sum - min(combined_sum)) / max((combined_sum - min(combined_sum)))
full_network_combined_sum = [x + y for x, y in zip(normalized_combined_sum, normalized_disc_values)]
print("threshold ssim: ")
threshold_ssim_value = getThreshold(threshold_ssim,threshold_labels)
print("threshold mse: ")
threshold_mse_value = getThreshold(threshold_mse,threshold_labels)
print("threshold discriminator: ")
threshold_discriminator = getThreshold(discriminator_values,threshold_labels)
print("threshold combined sum")
threshold_combined = getThreshold(combined_sum,threshold_labels)
print("threshold full network")
threshold_full_network_combined = getThreshold(full_network_combined_sum,threshold_labels)
if max(threshold_labels) > 12:
labels = [0 if x > 20 else 1 for x in threshold_labels]
else:
labels = [0 if x in [1,2,9] else 1 for x in threshold_labels]
if max(test_labels) > 12:
val_labels = [0 if x > 20 else 1 for x in test_labels]
else:
val_labels = [0 if x in [1,2,9] else 1 for x in test_labels]
rescaled_th_real = [resize(x, (224, 224, 3)) for x in threshold_real_img]
rescaled_th_proj = [resize(x, (224, 224, 3)) for x in threshold_proj_img]
rescaled_test_real = [resize(x, (224, 224, 3)) for x in test_real_img]
rescaled_test_proj = [resize(x, (224, 224, 3)) for x in test_proj_img]
model = train_model(rescaled_th_real,rescaled_th_proj,rescaled_test_real,rescaled_test_proj,labels,val_labels)
normalized_mse = 1 - (test_mse - min(test_mse)) / max((test_mse - min(test_mse)))
normalized_disc_values_test = (test_discriminator_values - min(test_discriminator_values)) / max((test_discriminator_values - min(test_discriminator_values)))
combined_sum = [x + y for x, y in zip(test_ssim, normalized_mse)]
normalized_combined_sum = (combined_sum - min(combined_sum)) / max((combined_sum - min(combined_sum)))
full_network_combined_sum_test = [x + y for x, y in zip(normalized_combined_sum, normalized_disc_values_test)]
thresholded_test_ssim = threshold_values(test_ssim,threshold_ssim_value,False)
thresholded_test_mse = threshold_values(test_mse,threshold_mse_value,True)
thresholded_test_discriminator = threshold_values(test_discriminator_values,threshold_discriminator,False)
thresholded_combined_sum = threshold_values(combined_sum,threshold_combined,False)
thresholded_full_network_combined = threshold_values(full_network_combined_sum_test,threshold_full_network_combined,False)
print("threshold siamese network")
siamese_threshold_values = [model.predict([[x],[y]])[0][0] for x,y in zip(rescaled_th_real,rescaled_th_proj) ]
siamese_threshold = getThreshold(siamese_threshold_values,threshold_labels)
siamese_test_values = [model.predict([[x],[y]])[0][0] for x,y in zip(rescaled_test_real,rescaled_test_proj) ]
siamese_test = threshold_values(siamese_test_values,siamese_threshold,False)
combined_network_test = [x or y for x,y in zip(thresholded_combined_sum, thresholded_test_discriminator)]
# print("ROC values combined sum")
# print_roc_values(combined_sum,val_labels)
# print("ROC values MSE")
# print_roc_values(test_mse,val_labels)
# print("ROC values SSIM")
# print_roc_values(test_ssim,val_labels)
# print("ROC values Full network combined")
# print_roc_values(full_network_combined_sum_test,val_labels)
# print("ROC values full network &&")
# print_roc_values(combined_network_test,val_labels)
# print("ROC values siamese network")
# print_roc_values(siamese_test_values,val_labels)
added_blink_test = [(not x) or y for x,y in zip(test_detected_blinks,combined_network_test)]
added_blink_test_with_scaled_sum = [(not x) or y for x,y in zip(test_detected_blinks,thresholded_full_network_combined)]
hter_ssim = calculate_metrics(thresholded_test_ssim,test_labels,"SSIM")
hter_mse = calculate_metrics(thresholded_test_mse,test_labels,"MSE")
hter_combined = calculate_metrics(siamese_test,test_labels,"Siamese_nn")
hter_combined_sum = calculate_metrics(thresholded_combined_sum,test_labels,"combined_sum")
hter_discriminator = calculate_metrics(thresholded_test_discriminator,test_labels,"discriminator")
hter_network = calculate_metrics(combined_network_test,test_labels,"network")
hter_combined_network= calculate_metrics(thresholded_full_network_combined,test_labels,"scaled_sum_network")
hter_just_blinks = calculate_metrics([not x for x in test_detected_blinks],test_labels,"Just blinky")
hter_blink_test = calculate_metrics(added_blink_test,test_labels,"Blinky Test")
hter_blink_test_scaled_sum = calculate_metrics(added_blink_test_with_scaled_sum,test_labels,"Blinky Test with scaled sum GAN")
if queue is not None:
queue.put((weight.split("-")[-1].split(".")[0], hter_ssim, hter_mse,hter_combined,hter_combined_sum,hter_network,hter_discriminator,hter_blink_test))
return hter_ssim,hter_mse,hter_combined,hter_network
#used in training a siamese network
def train_model(rescaled_th_real,rescaled_th_proj,rescaled_test_real,rescaled_test_proj,labels,val_labels,queue=None):
print("started model")
# base_network = create_base_network((128,128,3))
# real_img_input = Input(shape=(128,128,3))
# proj_img_input = Input(shape=(128,128,3))
real_img_input = Input(shape=(224, 224, 3))
proj_img_input = Input(shape=(224, 224, 3))
# processed_real = base_network(real_img_input)
# processed_proj = base_network(proj_img_input)
processed_real = VGGFace(include_top=False, input_tensor=real_img_input)
processed_proj = VGGFace(include_top=False, input_tensor=proj_img_input)
for layer in processed_proj.layers:
layer.name = layer.name + str("proj")
layer.trainable = False
for layer in processed_real.layers:
layer.trainable = False
merged = concatenate([processed_real.output, processed_proj.output], axis=-1)
x = Flatten()(merged)
x = Dense(128, activation="relu")(x)
x = Dense(64, activation="relu")(x)
prediction = Dense(1, activation="sigmoid")(x)
model = Model(inputs=[real_img_input, proj_img_input], outputs=prediction)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# model.fit([np.stack(threshold_real_img),np.stack(threshold_proj_img)], labels, epochs=3, validation_data=([np.stack(test_real_img),np.stack(test_proj_img)],np.stack(val_labels)))
model.fit([np.stack(rescaled_th_real), np.stack(rescaled_th_proj)], labels, epochs=10,
validation_data=([np.stack(rescaled_test_real), np.stack(rescaled_test_proj)], np.stack(val_labels)))
if queue is not None:
queue.put(model)
return model
#used in training siamese network
def create_base_network(input_shape):
input_img = Input(shape=input_shape)
x = Conv2D(64,(5, 5), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (5, 5), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
return Model(input_img, x)
#helper
def print_roc_values(predictions,labels):
predictions = [abs(x) for x in predictions]
print("pred = " , list(zip(predictions,labels)))
fpr, tpr, threshold = roc_curve(labels, predictions, pos_label=1)
if sum(tpr)/len(tpr) < 0.5:
fpr, tpr, threshold = roc_curve(labels, -np.array(predictions), pos_label=1)
threshold = -np.array(threshold)
print("fpr = ", list(fpr))
print("tpr = ", list(tpr))
print("threshold = ", list(threshold))
print("hter = " , [(fpr[i] + (1-tpr[i]))/2 for i in range(0,len(fpr))])
#helper test
def apply_hog(image):
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(4, 4),
cells_per_block=(1, 1), visualize=True, multichannel=True)
# hog_image = [[[x, x, x] for x in line] for line in hog_image]
# image = np.multiply(image, hog_image)
# image *= 255.0 / image.max()
# image = np.floor(image)
return [hog_image]
#interprets labeling of tfrecord dataset
def parse_num(num, dataset):
person = str(int(num))[:-2]
vid_num = int(str(int(num))[-2:])
if "casia" in dataset:
vid_name = str(vid_num) if vid_num < 9 else "HR_" + str(vid_num - 8)
path_to_vids = "../databases/casia-fasd/test/test_release"
return path_to_vids + "/" + person + "/" + vid_name + ".avi"
if "replay" in dataset:
path_to_vids = "../databases/replay-attack/test"
attack_or_real = "attack" if vid_num < 21 else "real"
adverse_controlled = "controlled" if vid_num in [1, 2, 3, 4, 5, 11, 12, 13, 14, 15, 21, 22] else "adverse"
if attack_or_real == "attack":
if vid_num in [1, 6, 11, 16]:
full_name = "attack_highdef_client" + str(person.zfill(3)) + "_session01_" + "highdef_photo_" + adverse_controlled + ".mov"
elif vid_num in [2, 7, 12, 17]:
full_name = "attack_highdef_client" + str(person.zfill(3)) + "_session01_" + "highdef_video_" + adverse_controlled + ".mov"
elif vid_num in [3, 8, 13, 18]:
full_name = "attack_mobile_client" + str(person.zfill(3)) + "_session01_" + "mobile_photo_" + adverse_controlled + ".mov"
elif vid_num in [4, 9, 14, 19]:
full_name = "attack_mobile_client" + str(person.zfill(3)) + "_session01_" + "mobile_video_" + adverse_controlled + ".mov"
elif vid_num in [5, 10, 15, 20]:
full_name = "attack_print_client" + str(person.zfill(3)) + "_session01_" + "highdef_photo_" + adverse_controlled + ".mov"
attack_mode = "fixed" if vid_num < 11 else "hand"
return path_to_vids + "/" + attack_or_real + "/" + attack_mode + "/" + full_name
elif attack_or_real == "real":
full_name = "client" + str(person.zfill(3)) + "_session01_webcam_authenticate_" + adverse_controlled + "_" + str(vid_num - 20 if adverse_controlled == "controlled" else vid_num - 22) + ".mov"
return path_to_vids + "/" + attack_or_real + "/" + full_name
if __name__ == '__main__':
path = "../weights/stylegan2_straight_faces/"
weights = os.listdir(path)
weights = [os.path.join(path,x) for x in weights if x.endswith(".pkl")]
weights = [sorted(weights)[-1]]
test_diff_weights(weights,"../databases/replay-attack/faces/devel_faces_tf", "../databases/casia-fasd/faces/test_faces_tf" )
| [
"numpy.ptp",
"keras.layers.Conv2D",
"multiprocessing.Process",
"training.misc.adjust_dynamic_range",
"numpy.array",
"sklearn.metrics.roc_curve",
"keras.layers.Dense",
"training.misc.convert_to_pil_image",
"os.listdir",
"skimage.metrics.structural_similarity",
"keras.Model",
"pretrained_network... | [((781, 897), 'training.dataset.load_dataset', 'dataset.load_dataset', ([], {'data_dir': 'data_dir', 'tfrecord_dir': 'dataset_name', 'max_label_size': '(1)', 'repeat': '(False)', 'shuffle_mb': '(0)'}), '(data_dir=data_dir, tfrecord_dir=dataset_name,\n max_label_size=1, repeat=False, shuffle_mb=0)\n', (801, 897), False, 'from training import dataset, misc\n'), ((3210, 3223), 'numpy.isnan', 'np.isnan', (['std'], {}), '(std)\n', (3218, 3223), True, 'import numpy as np\n'), ((3621, 3662), 'pretrained_networks.load_networks', 'pretrained_networks.load_networks', (['weight'], {}), '(weight)\n', (3654, 3662), False, 'import pretrained_networks\n'), ((3674, 3695), 'projector.Projector', 'projector.Projector', ([], {}), '()\n', (3693, 3695), False, 'import projector\n'), ((5091, 5134), 'sklearn.metrics.roc_curve', 'roc_curve', (['labels', 'predictions'], {'pos_label': '(1)'}), '(labels, predictions, pos_label=1)\n', (5100, 5134), False, 'from sklearn.metrics import confusion_matrix, roc_curve\n'), ((6680, 6724), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'thresholded_values'], {}), '(labels, thresholded_values)\n', (6696, 6724), False, 'from sklearn.metrics import confusion_matrix, roc_curve\n'), ((8744, 8751), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (8749, 8751), False, 'from multiprocessing import Queue\n'), ((9645, 9761), 'training.dataset.load_dataset', 'dataset.load_dataset', ([], {'data_dir': 'data_dir', 'tfrecord_dir': 'dataset_name', 'max_label_size': '(2)', 'repeat': '(False)', 'shuffle_mb': '(0)'}), '(data_dir=data_dir, tfrecord_dir=dataset_name,\n max_label_size=2, repeat=False, shuffle_mb=0)\n', (9665, 9761), False, 'from training import dataset, misc\n'), ((11443, 11484), 'pretrained_networks.load_networks', 'pretrained_networks.load_networks', (['weight'], {}), '(weight)\n', (11476, 11484), False, 'import pretrained_networks\n'), ((11496, 11517), 'projector.Projector', 'projector.Projector', ([], {}), '()\n', (11515, 11517), False, 'import projector\n'), ((12040, 12047), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (12045, 12047), False, 'from multiprocessing import Queue\n'), ((12056, 12215), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'gen_disc_process', 'args': '(weight, numsteps, threshold_set, threshold_dataset_dir, test_set,\n test_dataset_dir, queue_proj)'}), '(target=gen_disc_process, args=(weight, numsteps,\n threshold_set, threshold_dataset_dir, test_set, test_dataset_dir,\n queue_proj))\n', (12079, 12215), False, 'import multiprocessing\n'), ((18320, 18346), 'keras.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (18325, 18346), False, 'from keras import Input, Model\n'), ((18368, 18394), 'keras.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (18373, 18394), False, 'from keras import Input, Model\n'), ((18521, 18576), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'include_top': '(False)', 'input_tensor': 'real_img_input'}), '(include_top=False, input_tensor=real_img_input)\n', (18528, 18576), False, 'from keras_vggface.vggface import VGGFace\n'), ((18598, 18653), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'include_top': '(False)', 'input_tensor': 'proj_img_input'}), '(include_top=False, input_tensor=proj_img_input)\n', (18605, 18653), False, 'from keras_vggface.vggface import VGGFace\n'), ((18859, 18927), 'keras.layers.concatenate', 'concatenate', (['[processed_real.output, processed_proj.output]'], {'axis': '(-1)'}), '([processed_real.output, processed_proj.output], axis=-1)\n', (18870, 18927), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19099, 19165), 'keras.Model', 'Model', ([], {'inputs': '[real_img_input, proj_img_input]', 'outputs': 'prediction'}), '(inputs=[real_img_input, proj_img_input], outputs=prediction)\n', (19104, 19165), False, 'from keras import Input, Model\n'), ((19821, 19845), 'keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (19826, 19845), False, 'from keras import Input, Model\n'), ((20494, 20513), 'keras.Model', 'Model', (['input_img', 'x'], {}), '(input_img, x)\n', (20499, 20513), False, 'from keras import Input, Model\n'), ((20693, 20736), 'sklearn.metrics.roc_curve', 'roc_curve', (['labels', 'predictions'], {'pos_label': '(1)'}), '(labels, predictions, pos_label=1)\n', (20702, 20736), False, 'from sklearn.metrics import confusion_matrix, roc_curve\n'), ((21133, 21246), 'skimage.feature.hog', 'hog', (['image'], {'orientations': '(8)', 'pixels_per_cell': '(4, 4)', 'cells_per_block': '(1, 1)', 'visualize': '(True)', 'multichannel': '(True)'}), '(image, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1),\n visualize=True, multichannel=True)\n', (21136, 21246), False, 'from skimage.feature import hog\n'), ((23632, 23648), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (23642, 23648), False, 'import os\n'), ((1226, 1278), 'training.misc.adjust_dynamic_range', 'misc.adjust_dynamic_range', (['images', '[0, 255]', '[-1, 1]'], {}), '(images, [0, 255], [-1, 1])\n', (1251, 1278), False, 'from training import dataset, misc\n'), ((1295, 1306), 'time.time', 'time.time', ([], {}), '()\n', (1304, 1306), False, 'import time\n'), ((1438, 1449), 'time.time', 'time.time', ([], {}), '()\n', (1447, 1449), False, 'import time\n'), ((3321, 3335), 'numpy.ptp', 'np.ptp', (['pixels'], {}), '(pixels)\n', (3327, 3335), True, 'import numpy as np\n'), ((8813, 8963), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'test_full_network', 'args': '(weight, threshold_dataset_dir, test_dataset_dir, threshold_sets, test_sets,\n 2, queue)'}), '(target=test_full_network, args=(weight,\n threshold_dataset_dir, test_dataset_dir, threshold_sets, test_sets, 2,\n queue))\n', (8836, 8963), False, 'import multiprocessing\n'), ((10406, 10458), 'training.misc.adjust_dynamic_range', 'misc.adjust_dynamic_range', (['images', '[0, 255]', '[-1, 1]'], {}), '(images, [0, 255], [-1, 1])\n', (10431, 10458), False, 'from training import dataset, misc\n'), ((14031, 14055), 'skimage.transform.resize', 'resize', (['x', '(224, 224, 3)'], {}), '(x, (224, 224, 3))\n', (14037, 14055), False, 'from skimage.transform import resize\n'), ((14109, 14133), 'skimage.transform.resize', 'resize', (['x', '(224, 224, 3)'], {}), '(x, (224, 224, 3))\n', (14115, 14133), False, 'from skimage.transform import resize\n'), ((14189, 14213), 'skimage.transform.resize', 'resize', (['x', '(224, 224, 3)'], {}), '(x, (224, 224, 3))\n', (14195, 14213), False, 'from skimage.transform import resize\n'), ((14264, 14288), 'skimage.transform.resize', 'resize', (['x', '(224, 224, 3)'], {}), '(x, (224, 224, 3))\n', (14270, 14288), False, 'from skimage.transform import resize\n'), ((18937, 18946), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (18944, 18946), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((18963, 18992), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (18968, 18992), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19004, 19032), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (19009, 19032), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19053, 19083), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (19058, 19083), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19854, 19907), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (5, 5), activation='relu', padding='same')\n", (19860, 19907), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19926, 19962), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (19938, 19962), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((19974, 20027), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (5, 5), activation='relu', padding='same')\n", (19980, 20027), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20039, 20075), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (20051, 20075), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20087, 20140), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (20093, 20140), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20152, 20188), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (20164, 20188), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20200, 20253), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (20206, 20253), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20265, 20301), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (20277, 20301), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20313, 20366), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (20319, 20366), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20378, 20414), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (20390, 20414), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((20426, 20479), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (20432, 20479), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, concatenate, Flatten\n'), ((23664, 23685), 'os.path.join', 'os.path.join', (['path', 'x'], {}), '(path, x)\n', (23676, 23685), False, 'import os\n'), ((1886, 1958), 'numpy.linspace', 'np.linspace', (['(0)', 'proj.num_steps', 'num_snapshots'], {'endpoint': '(False)', 'dtype': 'int'}), '(0, proj.num_steps, num_snapshots, endpoint=False, dtype=int)\n', (1897, 1958), True, 'import numpy as np\n'), ((2720, 2759), 'skimage.metrics.structural_similarity', 'ssim', (['imreal', 'improj'], {'multichannel': '(True)'}), '(imreal, improj, multichannel=True)\n', (2724, 2759), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((3303, 3317), 'numpy.min', 'np.min', (['pixels'], {}), '(pixels)\n', (3309, 3317), True, 'import numpy as np\n'), ((3349, 3365), 'numpy.floor', 'np.floor', (['pixels'], {}), '(pixels)\n', (3357, 3365), True, 'import numpy as np\n'), ((5273, 5292), 'numpy.array', 'np.array', (['threshold'], {}), '(threshold)\n', (5281, 5292), True, 'import numpy as np\n'), ((5354, 5376), 'numpy.absolute', 'np.absolute', (['(fnr - fpr)'], {}), '(fnr - fpr)\n', (5365, 5376), True, 'import numpy as np\n'), ((5408, 5430), 'numpy.absolute', 'np.absolute', (['(fnr - fpr)'], {}), '(fnr - fpr)\n', (5419, 5430), True, 'import numpy as np\n'), ((19473, 19499), 'numpy.stack', 'np.stack', (['rescaled_th_real'], {}), '(rescaled_th_real)\n', (19481, 19499), True, 'import numpy as np\n'), ((19501, 19527), 'numpy.stack', 'np.stack', (['rescaled_th_proj'], {}), '(rescaled_th_proj)\n', (19509, 19527), True, 'import numpy as np\n'), ((20875, 20894), 'numpy.array', 'np.array', (['threshold'], {}), '(threshold)\n', (20883, 20894), True, 'import numpy as np\n'), ((2450, 2487), 'training.misc.convert_to_pil_image', 'misc.convert_to_pil_image', (['targets[0]'], {}), '(targets[0])\n', (2475, 2487), False, 'from training import dataset, misc\n'), ((5216, 5237), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (5224, 5237), True, 'import numpy as np\n'), ((10256, 10275), 'numpy.array', 'np.array', (['[[label]]'], {}), '([[label]])\n', (10264, 10275), True, 'import numpy as np\n'), ((10681, 10717), 'training.misc.convert_to_pil_image', 'misc.convert_to_pil_image', (['images[0]'], {}), '(images[0])\n', (10706, 10717), False, 'from training import dataset, misc\n'), ((19642, 19662), 'numpy.stack', 'np.stack', (['val_labels'], {}), '(val_labels)\n', (19650, 19662), True, 'import numpy as np\n'), ((20818, 20839), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (20826, 20839), True, 'import numpy as np\n'), ((19581, 19609), 'numpy.stack', 'np.stack', (['rescaled_test_real'], {}), '(rescaled_test_real)\n', (19589, 19609), True, 'import numpy as np\n'), ((19611, 19639), 'numpy.stack', 'np.stack', (['rescaled_test_proj'], {}), '(rescaled_test_proj)\n', (19619, 19639), True, 'import numpy as np\n')] |
import os
import scipy.misc as im
import numpy as np
files = os.listdir("dataset")
r = 0
g = 0
b = 0
for f in files:
image = im.imread("dataset/"+f,mode='RGB')
r += image[:,:,0]
g += image[:,:,1]
b += image[:,:,2]
print (image.shape)
r = np.sum(r)
g = np.sum(g)
b = np.sum(b)
print(r,g,b)
r = r/(256*256)
g = g/(256*256)
b = b/(256*256)
print(r,g,b) | [
"numpy.sum",
"scipy.misc.imread",
"os.listdir"
] | [((66, 87), 'os.listdir', 'os.listdir', (['"""dataset"""'], {}), "('dataset')\n", (76, 87), False, 'import os\n'), ((275, 284), 'numpy.sum', 'np.sum', (['r'], {}), '(r)\n', (281, 284), True, 'import numpy as np\n'), ((290, 299), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (296, 299), True, 'import numpy as np\n'), ((305, 314), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (311, 314), True, 'import numpy as np\n'), ((143, 180), 'scipy.misc.imread', 'im.imread', (["('dataset/' + f)"], {'mode': '"""RGB"""'}), "('dataset/' + f, mode='RGB')\n", (152, 180), True, 'import scipy.misc as im\n')] |
import gsw
import xarray as xr
import subprocess
import numpy as np
import os
import pylab as plt
# Import utils and decorators
from tcoasts.utils.utils import *
from tcoasts.utils.decorators import _file_exists
class TransportAlongCoast(object):
'''
'''
def __init__(self,path,initpos,contour_file,distance=np.arange(-400,400,100),length=100):
self.path = path # Data path
self.initpos = initpos # Init location lat lon coordinates.
self.dist = distance # Units kilometers
self.length = length # Units kilometers
self.n = 4 # self.length/self.n corresponds to the segments
# on the perpendicular vector.
self.contour_file = contour_file # Contour filename.
self.tmpfile= 'tmp_interp_transects.nc' # Temporal file to store interpolated fields.
# Load required data
self.extract_contour()
self.initindex=find2d(self.coastline,initpos)
def extract_contour(self):
# Load contour file.
if './' not in self.contour_file:
self.contour_file=os.path.join(self.path,self.contour_file)
if os.path.isfile(self.contour_file):
self.coastline=np.loadtxt(self.contour_file)
else:
raise ValueError('''
Make sure the file path is correct.
The path should be relative to the location of
the running script, or relative to self.path.
''')
def coords2dist(self,lonlat,p=0):
'''
This function follows the GSW computation.
'''
distance=gsw.distance(lonlat[:,0],lonlat[:,1],p)
return distance
def distancefrominit(self):
'''
The distance definition points positive to the east.
'''
if self.initindex != 0:
# Compute cumulative distance to right of index [location,location]
postinit=np.cumsum(self.coords2dist(self.coastline[self.initindex:]))
# Compute cumulative distance to left of index [location,location]
neginit=-1*np.cumsum(np.flipud(self.coords2dist(self.coastline[:self.initindex])))
# Join cumulative distances.
cumdistance=np.hstack((np.flipud(neginit),postinit))
else:
# Compute cumulative distance starting from the index [0,0]
cumdistance=np.cumsum(self.coords2dist(self.coastline))
return cumdistance
def perploc(self):
#Find the user defined locations for the perpendicular vectors.
dist_coast=self.distancefrominit()
index_perp=[find(dist_coast,dis*1000) for dis in self.dist]
return index_perp
def perp2coast(self,method='smooth',x=10):
'''
Input:
method: [ mean ]
smooth - computes the mean over X number of slopes and
projects the perpendicular vector
byseg - computes the mean over each segment of the slope
local - computes the the perpendicular vector using the 2
adjacent locations
ext - computes the perpendicular vector using the slope at
x cells to the left and right of the desired
perpendicular location.
'''
index_perp=self.perploc()
# Method to find the location perpendicular vector.
if method =='local' and method =='ext':
# Compute slope from adjacent locations [loc-x,loc+x]
if method=='local':
x=1
slopes=np.array([slope(self.coastline[ii-x,0],self.coastline[ii+x,0],
self.coastline[ii-x,1],self.coastline[ii+x,1])
for ii in index_perp])
elif method == 'smooth':
# Compute average slope from all the indexes contained between locations [loc-x,loc+x]
slopes=np.array([np.mean([slope(self.coastline[ii-xx,0],self.coastline[ii+xx,0],
self.coastline[ii-xx,1],self.coastline[ii+xx,1])
for xx in range(1,x)])
for ii in index_perp])
else:
# Compute average slope from all segments from [loc-x,loc-x+(2x-1)]
slopes=np.array([np.mean([slope(self.coastline[ii-x,0],self.coastline[ii-x+xx,0],
self.coastline[ii-x,1],self.coastline[ii-x+xx,1])
for xx in range(1,(2*x-1))])
for ii in index_perp])
#Compute angles from slopes
angles=slope2angle(slopes)
#Shift angles to be perpendicular
perp_angle=angles+(np.pi/2)
#Normal vector
self.x_norm = np.squeeze(np.cos(angles))
self.y_norm = np.squeeze(np.sin(angles))
#Perpendicualar vector
self.x_perp = np.squeeze(np.cos(perp_angle))
self.y_perp = np.squeeze(np.sin(perp_angle))
# Return dictionary containing vector information
return {'Nvector':{'x':self.x_norm,'y':self.x_norm,'angle':angles,'slope':slopes},
'Pvector':{'x':self.x_perp,'y':self.y_perp,'angles':perp_angle,'slope':-1/slopes}}
def perpvecdist(self,index_perp,perp_angle):
#compute distances to scale perpendicular vectors.
### Note this will produce an error of 1e-4.
x=np.array([[self.coastline[index_perp][ii,0],
np.cos(perp_angle[ii])+self.coastline[index_perp][ii,0]]
for ii in range(len(index_perp))])
y=np.array([[self.coastline[index_perp][ii,1],
np.sin(perp_angle[ii])+self.coastline[index_perp][ii,1]]
for ii in range(len(index_perp))])
distances = gsw.distance(x,y)
return distances
# _file_exists will test if the tmporal file containing the interpolated
# data exits. If file exists it will load the contents, otherwise, it will
# interpolate the data.
@_file_exists
def inter2vector(self,ufiles='U.*.nc',vfiles='V.*.nc',tracerfile=None,dataset=None,save=True,shift=360,**kwargs):
'''
**kwargs inter2vector supports the all the kwargs of xr.open_mfdataset.
'''
# xr load parameters
xr_openmf_defaults={}
if '*' in ufiles and '*' in vfiles:
xr_openmf_defaults = {'concat_dim':'time','parallel':True,'combine':'nested'}
xr_openmf_defaults.update(kwargs)
print('Opening velocity files')
# Load data.
u = self.loaddata(file=ufiles,var='U',dataset=dataset,**xr_openmf_defaults)
v = self.loaddata(file=vfiles,var='V',dataset=dataset,**xr_openmf_defaults)
# Make sure the shape of the velocity fields are the same.
if u.shape != v.shape:
raise ValueError('The velocity fields should have the same shape.')
# Compute perpendicular vectors.
x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all=self.vertor_perp()
# Define locations to interpolate interpolation.
# !Important:
# x_perp,y_perp is defined in the center of the cells
x = xr.DataArray(x_perp, dims=('transect','n'))
y = xr.DataArray(y_perp, dims=('transect','n'))
# Define limits to slice data.
deltax = 2*max((abs(x_perp[:,0]-x_perp[:,1])))
slicevalx = [shift+x_perp.min()-deltax,shift+x_perp.max()+deltax]
deltay = 2*max((abs(y_perp[:,0]-y_perp[:,1])))
slicevaly = [y_perp.min()-deltay,y_perp.max()+deltay]
# Slice data to reduce memory issues.
u = u.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
v = v.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
# Interpolate data using xarray,
# Note that fields can not contain nans
# TO DO: Add support for data containing nans.
print('Interpolating velocity fields')
interp_u = u.chunk({'time':10,'depth':25,'lat':len(u.lat),'lon':len(u.lon)}).interp(lon=shift+x,lat=y).compute()
interp_u = interp_u.where(interp_u!=0,np.nan)
interp_v = v.chunk({'time':10,'depth':25,'lat':len(v.lat),'lon':len(v.lon)}).interp(lon=shift+x,lat=y).compute()
interp_v = interp_v.where(interp_v!=0,np.nan)
# Merge datasets
self.interp_data=xr.merge([interp_u.to_dataset(name='u'), interp_v.to_dataset(name='v')])
# Interpolate tracer fields to constrain transport.
if tracerfile != None:
print('Loadng and interpolating tracer')
tracer = self.loaddata(file=tracerfile,var='Tracer',dataset=dataset,**xr_openmf_defaults)
tracer = tracer.sel({'lon':slice(slicevalx[0],slicevalx[1]),'lat':slice(slicevaly[0],slicevaly[1])})
interp_tracer = tracer.interp(lon=shift+x,lat=y).compute()
interp_tracer = interp_tracer.where(interp_tracer!=0,np.nan)
self.interp_data = xr.merge([interp_u.to_dataset(name='u'), interp_v.to_dataset(name='v'),
interp_tracer.to_dataset(name='tracer')])
# Save data.
if save==True:
self.interp_data.to_netcdf('./tmp_interp_transects.nc')
return self.interp_data
def depth_profiles(self,bottom_vel):
'''
'''
# Maximum depth from interpolated field.
depth_index=self.interp_data.depth[np.isfinite(self.interp_data.u.where(abs(self.interp_data.u)>bottom_vel,np.nan).isel({'time':0})).argmin('depth')]
# xr.DataArray 2 multiply with field.
depth=(xr.zeros_like(self.interp_data.u.isel(time=0))+self.interp_data.depth)
# Mask depth to only contain values larger than index.
depth=depth.where(depth > depth_index,np.nan)
# Delta depth to compute area
delta_depth=depth.diff(dim='depth')
return delta_depth
def vel_magnitude(self):
# Magnitude of interpolated vectors.
magnitude = np.sqrt(self.interp_data.u**2+self.interp_data.v**2)
return magnitude
def dot_product(self):
# Dot product between interpolated vectors and normal vector
# from perpendicular transect to the coast.
return self.interp_data.u*self.x_norm[np.newaxis,np.newaxis,:,np.newaxis]+self.interp_data.v*self.y_norm[np.newaxis,np.newaxis,:,np.newaxis]
def compute_transport(self,bottom_vel=1e-5):
# Scalar projection of interpolated data
dotproduct = self.dot_product()
# Projected data over normal vectors to surface.
u_normal = dotproduct*self.x_norm[np.newaxis,np.newaxis,:,np.newaxis]
v_normal = dotproduct*self.y_norm[np.newaxis,np.newaxis,:,np.newaxis]
# Area of each grid cell.
dA = self.delta_area(bottom_vel)
# Multiplication of vector sum and the dA. Flux integral.
self.transport=(u_normal+v_normal)*dA
return self.transport.sum(dim={'depth','n'})
def delta_area(self,bottom_vel):
# Compute perpendicular vectors.
x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all=self.vertor_perp()
# Depth at each section of the transect.
delta_z=abs(self.depth_profiles(bottom_vel=bottom_vel))
# Distance between lon,lat points of transect.
delta_x=gsw.distance(x_perp_all,y_perp_all)
return delta_z*delta_x
def mask_transport(self,threshold,method='greater'):
'''
threshold [ float / list ]
Threshold to scale transport with tracers used for tracer.
method [ string ]
'greater' will compute the transport for all the values larger
than the threshold in the tracer field.
'smaller' will compute the transport for all the values smaller
than the threshold in the tracer field.
'both' will compute the transport for all the values within
the threshold interval in the tracer field.
'''
if type(threshold)==list:
threshold=np.array(threshold)
# TO DO: If u vertical grid != tracer vertical grid then interpolate tracer to velocity grid.
if method=='smaller' and type(threshold)==float:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))<threshold)
elif method=='greater' and type(threshold)==float:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))>threshold)
elif method=='both' and type(threshold)==np.ndarray:
scaled_transport=self.transport.where(self.interp_data.tracer.isel(depth=slice(0,-1))>threshold.min()).where(self.interp_data.tracer<threshold.max())
else:
raise ValueError('''Threshold must be an float or list/array in which the
min and max value will define the threshold interval.''')
return scaled_transport.sum(dim={'depth','n'})
def loaddata(self,file=None,var='U',dataset=None,**kwargs):
# Check if file or dataset is defined.
if file == None and dataset==None:
raise ValueError('''file should be the path to the netCDF files or
dataset should contain a dataset with a variable
containing the string defined as var.
''')
elif file != None and (type(dataset) == 'NoneType' or dataset==None):
results = subprocess.check_output(['find', self.path, '-name', file])
results=[s for s in results.decode('utf-8').split()]
results.sort()
data=xr.open_mfdataset(results,**kwargs)
elif dataset != None:
data=dataset
else:
raise ValueError('Only one of the arguments [file or dataset] can be defined.')
# Extract variables from dataset
varname= [key for key,items in data.data_vars.items()]
# Rename variable for easier manipulation.
if len(varname)==1:
variable=data.rename({varname[0]:var})
else:
varname=[var for varn in varname if var in varn]
variable=data.rename({varname[0]:var})
# Extract only the variable of interest.
data=variable[var]
if type(data) != xr.core.dataarray.DataArray:
raise ValueError('The provided data should be a xr.DataArray.')
else:
return data
def vector_scale(self,index_perp,perp_angle):
'''
Scale vector to desired distance self.length
'''
# Scale perpendicular vector to distance self.length
return np.squeeze((self.length*1000)/self.perpvecdist(index_perp,perp_angle))
def vertor_perp(self,shift=0):
# Nearest location of perpendicular vectors from coastline grid.
index_perp=self.perploc()
# Compute perpendicular vectors.
perp_dict=self.perp2coast()
# Scale perpendicular vector to desired distance self.length.
scale=self.vector_scale(index_perp,perp_dict['Pvector']['angles'])
# Gridded normal vector
x_norm=(np.squeeze(np.linspace(0,scale,self.length//self.n)[:,np.newaxis]*self.x_norm)
+self.coastline[index_perp][:,0]).T+shift
y_norm=(np.squeeze(np.linspace(0,scale,self.length//self.n)[:,np.newaxis]*self.y_norm)
+self.coastline[index_perp][:,1]).T
# Gridded perpendicular vector at [x,y]
x_perp_all=(np.squeeze(np.linspace(0,scale,self.length//self.n)[:,np.newaxis]*self.x_perp)
+self.coastline[index_perp][:,0]).T+shift
y_perp_all=(np.squeeze(np.linspace(0,scale,self.length//self.n )[:,np.newaxis]*self.y_perp)
+self.coastline[index_perp][:,1]).T
# Gridded perpendicular vector at [x+diff(x)/2,y+diff(y)/2]
x_perp = x_perp_all[:,:-1]+np.diff(x_perp_all)/2
y_perp = y_perp_all[:,:-1]+np.diff(y_perp_all)/2
return x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all
def plotperp_vect(self,shift=0,transect=None,**kwargs):
'''
transect [int] zooms in to the transect
'''
fig,ax = plt.subplots(1,1,figsize=(5,5),**kwargs)
# Plot coastline
plt.plot(self.coastline[:,0]+shift,self.coastline[:,1])
# Compute perpendicular vectors.
x_norm,y_norm,x_perp,y_perp,x_perp_all,y_perp_all=self.vertor_perp(shift)
# Plot perpendicular vectors.
plt.plot(x_norm.T,y_norm.T,'-r')
plt.plot(x_perp.T,y_perp.T,'--k')
# Zoom in into transect, usefule when the angle is significantly
# different to n*{0,np.pi/2}.
if transect != None:
xdelta=2*abs(x_perp[transect][0]-x_perp[transect][1])
plt.xlim(x_perp[transect].min()-xdelta,x_perp[transect].max()+xdelta)
ydelta=2*abs(y_perp[transect][0]-y_perp[transect][1])
plt.ylim(y_perp[transect].min()-ydelta,y_perp[transect].max()+ydelta)
plt.gca().set_aspect('equal', adjustable='box')
return fig,ax | [
"subprocess.check_output",
"numpy.sqrt",
"numpy.flipud",
"pylab.plot",
"os.path.join",
"numpy.diff",
"os.path.isfile",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"xarray.DataArray",
"numpy.sin",
"pylab.subplots",
"numpy.loadtxt",
"pylab.gca",
"xarray.open_mfdataset",
"numpy.arange... | [((327, 352), 'numpy.arange', 'np.arange', (['(-400)', '(400)', '(100)'], {}), '(-400, 400, 100)\n', (336, 352), True, 'import numpy as np\n'), ((1168, 1201), 'os.path.isfile', 'os.path.isfile', (['self.contour_file'], {}), '(self.contour_file)\n', (1182, 1201), False, 'import os\n'), ((1702, 1745), 'gsw.distance', 'gsw.distance', (['lonlat[:, 0]', 'lonlat[:, 1]', 'p'], {}), '(lonlat[:, 0], lonlat[:, 1], p)\n', (1714, 1745), False, 'import gsw\n'), ((6040, 6058), 'gsw.distance', 'gsw.distance', (['x', 'y'], {}), '(x, y)\n', (6052, 6058), False, 'import gsw\n'), ((7434, 7478), 'xarray.DataArray', 'xr.DataArray', (['x_perp'], {'dims': "('transect', 'n')"}), "(x_perp, dims=('transect', 'n'))\n", (7446, 7478), True, 'import xarray as xr\n'), ((7490, 7534), 'xarray.DataArray', 'xr.DataArray', (['y_perp'], {'dims': "('transect', 'n')"}), "(y_perp, dims=('transect', 'n'))\n", (7502, 7534), True, 'import xarray as xr\n'), ((10306, 10364), 'numpy.sqrt', 'np.sqrt', (['(self.interp_data.u ** 2 + self.interp_data.v ** 2)'], {}), '(self.interp_data.u ** 2 + self.interp_data.v ** 2)\n', (10313, 10364), True, 'import numpy as np\n'), ((11631, 11667), 'gsw.distance', 'gsw.distance', (['x_perp_all', 'y_perp_all'], {}), '(x_perp_all, y_perp_all)\n', (11643, 11667), False, 'import gsw\n'), ((16556, 16600), 'pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5), **kwargs)\n', (16568, 16600), True, 'import pylab as plt\n'), ((16630, 16690), 'pylab.plot', 'plt.plot', (['(self.coastline[:, 0] + shift)', 'self.coastline[:, 1]'], {}), '(self.coastline[:, 0] + shift, self.coastline[:, 1])\n', (16638, 16690), True, 'import pylab as plt\n'), ((16855, 16889), 'pylab.plot', 'plt.plot', (['x_norm.T', 'y_norm.T', '"""-r"""'], {}), "(x_norm.T, y_norm.T, '-r')\n", (16863, 16889), True, 'import pylab as plt\n'), ((16896, 16931), 'pylab.plot', 'plt.plot', (['x_perp.T', 'y_perp.T', '"""--k"""'], {}), "(x_perp.T, y_perp.T, '--k')\n", (16904, 16931), True, 'import pylab as plt\n'), ((1115, 1157), 'os.path.join', 'os.path.join', (['self.path', 'self.contour_file'], {}), '(self.path, self.contour_file)\n', (1127, 1157), False, 'import os\n'), ((1230, 1259), 'numpy.loadtxt', 'np.loadtxt', (['self.contour_file'], {}), '(self.contour_file)\n', (1240, 1259), True, 'import numpy as np\n'), ((5006, 5020), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5012, 5020), True, 'import numpy as np\n'), ((5055, 5069), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (5061, 5069), True, 'import numpy as np\n'), ((5144, 5162), 'numpy.cos', 'np.cos', (['perp_angle'], {}), '(perp_angle)\n', (5150, 5162), True, 'import numpy as np\n'), ((5197, 5215), 'numpy.sin', 'np.sin', (['perp_angle'], {}), '(perp_angle)\n', (5203, 5215), True, 'import numpy as np\n'), ((12393, 12412), 'numpy.array', 'np.array', (['threshold'], {}), '(threshold)\n', (12401, 12412), True, 'import numpy as np\n'), ((13840, 13899), 'subprocess.check_output', 'subprocess.check_output', (["['find', self.path, '-name', file]"], {}), "(['find', self.path, '-name', file])\n", (13863, 13899), False, 'import subprocess\n'), ((14009, 14045), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['results'], {}), '(results, **kwargs)\n', (14026, 14045), True, 'import xarray as xr\n'), ((16254, 16273), 'numpy.diff', 'np.diff', (['x_perp_all'], {}), '(x_perp_all)\n', (16261, 16273), True, 'import numpy as np\n'), ((16311, 16330), 'numpy.diff', 'np.diff', (['y_perp_all'], {}), '(y_perp_all)\n', (16318, 16330), True, 'import numpy as np\n'), ((17374, 17383), 'pylab.gca', 'plt.gca', ([], {}), '()\n', (17381, 17383), True, 'import pylab as plt\n'), ((2332, 2350), 'numpy.flipud', 'np.flipud', (['neginit'], {}), '(neginit)\n', (2341, 2350), True, 'import numpy as np\n'), ((5716, 5738), 'numpy.cos', 'np.cos', (['perp_angle[ii]'], {}), '(perp_angle[ii])\n', (5722, 5738), True, 'import numpy as np\n'), ((5906, 5928), 'numpy.sin', 'np.sin', (['perp_angle[ii]'], {}), '(perp_angle[ii])\n', (5912, 5928), True, 'import numpy as np\n'), ((15674, 15718), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', '(self.length // self.n)'], {}), '(0, scale, self.length // self.n)\n', (15685, 15718), True, 'import numpy as np\n'), ((16030, 16074), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', '(self.length // self.n)'], {}), '(0, scale, self.length // self.n)\n', (16041, 16074), True, 'import numpy as np\n'), ((15521, 15565), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', '(self.length // self.n)'], {}), '(0, scale, self.length // self.n)\n', (15532, 15565), True, 'import numpy as np\n'), ((15873, 15917), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', '(self.length // self.n)'], {}), '(0, scale, self.length // self.n)\n', (15884, 15917), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Provides a non-parametric two-stage least squares instrumental variable estimator."""
import numpy as np
from copy import deepcopy
from sklearn import clone
from sklearn.linear_model import LinearRegression
from ...utilities import (shape, transpose, reshape, cross_product, ndim, size,
_deprecate_positional, check_input_arrays)
from ..._cate_estimator import BaseCateEstimator, LinearCateEstimator
from numpy.polynomial.hermite_e import hermeval
from sklearn.base import TransformerMixin
from sklearn.preprocessing import PolynomialFeatures
from itertools import product
class HermiteFeatures(TransformerMixin):
"""
Featurizer that returns(unscaled) Hermite function evaluations.
The evaluated functions are of degrees 0..`degree`, differentiated `shift` times.
If the input has shape(n, x) and `joint` is False, the output will have shape(n, (`degree`+ 1)×x) if `shift` is 0.
If the input has shape(n, x) and `joint` is True, the output will have shape(n, (`degree`+ 1) ^ x) if `shift` is 0.
In either case, if `shift` is nonzero there will be `shift` additional dimensions of size x
between the first and last.
"""
def __init__(self, degree, shift=0, joint=False):
self._degree = degree
self._shift = shift
self._joint = joint
def _column_feats(self, X, shift):
"""
Apply Hermite function evaluations of degrees 0..`degree` differentiated `shift` times.
When applied to the column `X` of shape(n,), the resulting array has shape(n, (degree + 1)).
"""
assert ndim(X) == 1
# this will have dimension (d,) + shape(X)
coeffs = np.identity(self._degree + shift + 1)[:, shift:]
feats = ((-1) ** shift) * hermeval(X, coeffs) * np.exp(-X * X / 2)
# send the first dimension to the end
return transpose(feats)
def fit(self, X):
"""Fits the data(a NOP for this class) and returns self."""
return self
def transform(self, X):
"""
Transform the data by applying the appropriate Hermite functions.
Parameters
----------
X: array_like
2-dimensional array of input features
Returns
-------
The transformed data
"""
assert ndim(X) == 2
n = shape(X)[0]
ncols = shape(X)[1]
columns = []
for indices in product(*[range(ncols) for i in range(self._shift)]):
if self._joint:
columns.append(cross_product(*[self._column_feats(X[:, i], indices.count(i))
for i in range(shape(X)[1])]))
else:
indices = set(indices)
if self._shift == 0: # return features for all columns:
columns.append(np.hstack([self._column_feats(X[:, i], self._shift) for i in range(shape(X)[1])]))
# columns are featurized independently; partial derivatives are only non-zero
# when taken with respect to the same column each time
elif len(indices) == 1:
index = list(indices)[0]
feats = self._column_feats(X[:, index], self._shift)
columns.append(np.hstack([feats if i == index else np.zeros(shape(feats))
for i in range(shape(X)[1])]))
else:
columns.append(np.zeros((n, (self._degree + 1) * ncols)))
return reshape(np.hstack(columns), (n,) + (ncols,) * self._shift + (-1,))
class DPolynomialFeatures(TransformerMixin):
"""
Featurizer that returns the derivatives of :class:`~sklearn.preprocessing.PolynomialFeatures` features in
a way that's compativle with the expectations of :class:`.NonparametricTwoStageLeastSquares`'s
`dt_featurizer` parameter.
If the input has shape `(n, x)` and
:meth:`PolynomialFeatures.transform<sklearn.preprocessing.PolynomialFeatures.transform>` returns an output
of shape `(n, f)`, then :meth:`.transform` will return an array of shape `(n, x, f)`.
Parameters
----------
degree: integer, default = 2
The degree of the polynomial features.
interaction_only: boolean, default = False
If true, only derivatives of interaction features are produced: features that are products of at most degree
distinct input features (so not `x[1] ** 2`, `x[0] * x[2] ** 3`, etc.).
include_bias: boolean, default = True
If True (default), then include the derivative of a bias column, the feature in which all polynomial powers
are zero.
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.F = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=include_bias)
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
y : array, optional
Not used
Returns
-------
self : instance
"""
return self
def transform(self, X):
"""
Transform data to derivatives of polynomial features
Parameters
----------
X: array-like, shape (n_samples, n_features)
The data to transform, row by row.
Returns
-------
XP: array-like, shape (n_samples, n_features, n_output_features)
The matrix of features, where `n_output_features` is the number of features that
would be returned from :class:`~sklearn.preprocessing.PolynomialFeatures`.
"""
self.F.fit(X)
powers = self.F.powers_
result = np.zeros(X.shape + (self.F.n_output_features_,))
for i in range(X.shape[1]):
p = powers.copy()
c = powers[:, i]
p[:, i] -= 1
M = np.float_power(X[:, np.newaxis, :], p[np.newaxis, :, :])
result[:, i, :] = c[np.newaxis, :] * np.prod(M, axis=-1)
return result
def _add_ones(arr):
"""Add a column of ones to the front of an array."""
return np.hstack([np.ones((shape(arr)[0], 1)), arr])
def _add_zeros(arr):
"""Add a column of zeros to the front of an array."""
return np.hstack([np.zeros((shape(arr)[0], 1)), arr])
class SieveTSLS(BaseCateEstimator):
"""
Non-parametric instrumental variables estimator.
Supports the use of arbitrary featurizers for the features, treatments, and instruments.
Parameters
----------
t_featurizer: transformer
Featurizer used to transform the treatments
x_featurizer: transformer
Featurizer used to transform the raw features
z_featurizer: transformer
Featurizer used to transform the instruments
dt_featurizer: transformer
Featurizer used to transform the treatments for the computation of the marginal effect.
This should produce a 3-dimensional array, containing the per-treatment derivative of
each transformed treatment. That is, given a treatment array of shape(n, dₜ),
the output should have shape(n, dₜ, fₜ), where fₜ is the number of columns produced by `t_featurizer`.
"""
def __init__(self, *,
t_featurizer,
x_featurizer,
z_featurizer,
dt_featurizer):
self._t_featurizer = clone(t_featurizer, safe=False)
self._x_featurizer = clone(x_featurizer, safe=False)
self._z_featurizer = clone(z_featurizer, safe=False)
self._dt_featurizer = clone(dt_featurizer, safe=False)
# don't fit intercept; manually add column of ones to the data instead;
# this allows us to ignore the intercept when computing marginal effects
self._model_T = LinearRegression(fit_intercept=False)
self._model_Y = LinearRegression(fit_intercept=False)
super().__init__()
@_deprecate_positional("X, W, and Z should be passed by keyword only. In a future release "
"we will disallow passing X, W, and Z by position.", ['X', 'W', 'Z'])
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, X, W, Z, *, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates functions τ(·, ·, ·), ∂τ(·, ·).
Parameters
----------
Y: (n × d_y) matrix
Outcomes for each sample
T: (n × dₜ) matrix
Treatments for each sample
X: optional(n × dₓ) matrix
Features for each sample
W: optional(n × d_w) matrix
Controls for each sample
Z: optional(n × d_z) matrix
Instruments for each sample
inference: string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self
"""
Y, T, X, W, Z = check_input_arrays(Y, T, X, W, Z)
if X is None:
X = np.empty((shape(Y)[0], 0))
if W is None:
W = np.empty((shape(Y)[0], 0))
assert shape(Y)[0] == shape(T)[0] == shape(X)[0] == shape(W)[0] == shape(Z)[0]
# make T 2D if if was a vector
if ndim(T) == 1:
T = reshape(T, (-1, 1))
# store number of columns of W so that we can create correctly shaped zero array in effect and marginal effect
self._d_w = shape(W)[1]
# two stage approximation
# first, get basis expansions of T, X, and Z
ft_X = self._x_featurizer.fit_transform(X)
ft_Z = self._z_featurizer.fit_transform(Z)
ft_T = self._t_featurizer.fit_transform(T)
# TODO: is it right that the effective number of intruments is the
# product of ft_X and ft_Z, not just ft_Z?
assert shape(ft_T)[1] <= shape(ft_X)[1] * shape(ft_Z)[1], ("There can be no more T features than the product "
"of the number of X and Z features; otherwise "
"there is not enough information to identify their "
"structure")
# regress T expansion on X,Z expansions concatenated with W
features = _add_ones(np.hstack([W, cross_product(ft_X, ft_Z)]))
self._model_T.fit(features, ft_T)
# predict ft_T from interacted ft_X, ft_Z
ft_T_hat = self._model_T.predict(features)
self._model_Y.fit(_add_ones(np.hstack([W, cross_product(ft_T_hat, ft_X)])), Y)
def effect(self, X=None, T0=0, T1=1):
"""
Calculate the heterogeneous treatment effect τ(·,·,·).
The effect is calculated between the two treatment points
conditional on a vector of features on a set of m test samples {T0ᵢ, T1ᵢ, Xᵢ}.
Parameters
----------
T0: (m × dₜ) matrix or vector of length m
Base treatments for each sample
T1: (m × dₜ) matrix or vector of length m
Target treatments for each sample
X: optional (m × dₓ) matrix
Features for each sample
Returns
-------
τ: (m × d_y) matrix
Heterogeneous treatment effects on each outcome for each sample
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
if ndim(T0) == 0:
T0 = np.full((1 if X is None else shape(X)[0],) + self._d_t, T0)
if ndim(T1) == 0:
T1 = np.full((1 if X is None else shape(X)[0],) + self._d_t, T1)
if ndim(T0) == 1:
T0 = reshape(T0, (-1, 1))
if ndim(T1) == 1:
T1 = reshape(T1, (-1, 1))
if X is None:
X = np.empty((shape(T0)[0], 0))
assert shape(T0) == shape(T1)
assert shape(T0)[0] == shape(X)[0]
W = np.zeros((shape(T0)[0], self._d_w)) # can set arbitrarily since values will cancel
ft_X = self._x_featurizer.transform(X)
ft_T0 = self._t_featurizer.transform(T0)
ft_T1 = self._t_featurizer.transform(T1)
Y0 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T0, ft_X)])))
Y1 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T1, ft_X)])))
return Y1 - Y0
def marginal_effect(self, T, X=None):
"""
Calculate the heterogeneous marginal effect ∂τ(·, ·).
The marginal effect is calculated around a base treatment
point conditional on a vector of features on a set of m test samples {Tᵢ, Xᵢ}.
Parameters
----------
T: (m × dₜ) matrix
Base treatments for each sample
X: optional(m × dₓ) matrix
Features for each sample
Returns
-------
grad_tau: (m × d_y × dₜ) array
Heterogeneous marginal effects on each outcome for each sample
Note that when Y or T is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
(e.g. if both are vectors, then the output of this method will also be a vector)
"""
if X is None:
X = np.empty((shape(T)[0], 0))
assert shape(T)[0] == shape(X)[0]
ft_X = self._x_featurizer.transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.transform(T if ndim(T) == 2 else reshape(T, (-1, 1)))
W = np.zeros((size(T), self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
output = reshape(output, shape(T) + shape(output)[1:])
if ndim(output) == 3:
return transpose(output, (0, 2, 1)) # transpose trailing T and Y dims
else:
return output
| [
"numpy.identity",
"numpy.prod",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.hstack",
"numpy.float_power",
"numpy.exp",
"numpy.zeros",
"sklearn.clone",
"numpy.polynomial.hermite_e.hermeval",
"sklearn.linear_model.LinearRegression"
] | [((4866, 4965), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'degree', 'interaction_only': 'interaction_only', 'include_bias': 'include_bias'}), '(degree=degree, interaction_only=interaction_only,\n include_bias=include_bias)\n', (4884, 4965), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((5911, 5959), 'numpy.zeros', 'np.zeros', (['(X.shape + (self.F.n_output_features_,))'], {}), '(X.shape + (self.F.n_output_features_,))\n', (5919, 5959), True, 'import numpy as np\n'), ((7604, 7635), 'sklearn.clone', 'clone', (['t_featurizer'], {'safe': '(False)'}), '(t_featurizer, safe=False)\n', (7609, 7635), False, 'from sklearn import clone\n'), ((7665, 7696), 'sklearn.clone', 'clone', (['x_featurizer'], {'safe': '(False)'}), '(x_featurizer, safe=False)\n', (7670, 7696), False, 'from sklearn import clone\n'), ((7726, 7757), 'sklearn.clone', 'clone', (['z_featurizer'], {'safe': '(False)'}), '(z_featurizer, safe=False)\n', (7731, 7757), False, 'from sklearn import clone\n'), ((7788, 7820), 'sklearn.clone', 'clone', (['dt_featurizer'], {'safe': '(False)'}), '(dt_featurizer, safe=False)\n', (7793, 7820), False, 'from sklearn import clone\n'), ((8006, 8043), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (8022, 8043), False, 'from sklearn.linear_model import LinearRegression\n'), ((8068, 8105), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (8084, 8105), False, 'from sklearn.linear_model import LinearRegression\n'), ((1779, 1816), 'numpy.identity', 'np.identity', (['(self._degree + shift + 1)'], {}), '(self._degree + shift + 1)\n', (1790, 1816), True, 'import numpy as np\n'), ((1884, 1902), 'numpy.exp', 'np.exp', (['(-X * X / 2)'], {}), '(-X * X / 2)\n', (1890, 1902), True, 'import numpy as np\n'), ((3634, 3652), 'numpy.hstack', 'np.hstack', (['columns'], {}), '(columns)\n', (3643, 3652), True, 'import numpy as np\n'), ((6096, 6152), 'numpy.float_power', 'np.float_power', (['X[:, np.newaxis, :]', 'p[np.newaxis, :, :]'], {}), '(X[:, np.newaxis, :], p[np.newaxis, :, :])\n', (6110, 6152), True, 'import numpy as np\n'), ((1862, 1881), 'numpy.polynomial.hermite_e.hermeval', 'hermeval', (['X', 'coeffs'], {}), '(X, coeffs)\n', (1870, 1881), False, 'from numpy.polynomial.hermite_e import hermeval\n'), ((6202, 6221), 'numpy.prod', 'np.prod', (['M'], {'axis': '(-1)'}), '(M, axis=-1)\n', (6209, 6221), True, 'import numpy as np\n'), ((14542, 14566), 'numpy.hstack', 'np.hstack', (['[W, features]'], {}), '([W, features])\n', (14551, 14566), True, 'import numpy as np\n'), ((3568, 3609), 'numpy.zeros', 'np.zeros', (['(n, (self._degree + 1) * ncols)'], {}), '((n, (self._degree + 1) * ncols))\n', (3576, 3609), True, 'import numpy as np\n')] |
# Line Plot
import numpy as np
from matplotlib import pyplot as plt
x = np.arange(1, 11)
print(x)
print()
y = 2 * x
print(y)
print()
plt.plot(x, y)
plt.show()
print()
# Line Plot - 2 (adding titles and labels)
plt.plot(x, y)
plt.title("Line Plot")
plt.xlabel("x-label")
plt.ylabel("y-label")
plt.show()
print()
# Line Plot - changing line aesthetics
plt.plot(x, y, color='red', linestyle=':', linewidth=2)
plt.show()
print()
# Line Plot - two lines
x = np.arange(1, 11)
y1 = 2 * x
y2 = 3 * x
plt.plot(x, y1, color='red', linestyle=':', linewidth=2)
plt.plot(x, y2, color='green', linestyle='-', linewidth=3)
plt.title("Line Plot")
plt.xlabel("x-label")
plt.ylabel("y-label")
plt.grid(True)
plt.show()
# Line Plot - adding subplots
x = np.arange(1, 11)
y1 = 2 * x
y2 = 3 * x
plt.subplot(1, 2, 1)
plt.plot(x, y1, color='red', linestyle=':', linewidth=2)
plt.subplot(1, 2, 2)
plt.plot(x, y2, color='green', linestyle=':', linewidth=3)
plt.show()
# Bar plot
student = {"Bob": 87, "Matt": 56, "Sam": 27}
names = list(student.keys())
values = list(student.values())
plt.bar(names, values)
plt.show()
# bar plot adding labels and grid
plt.bar(names, values)
plt.title("Marks of Students")
plt.xlabel("x-label")
plt.ylabel("y-label")
plt.grid(True)
plt.show()
# horizontal bar plot
plt.barh(names, values, color='orange')
plt.title("Marks of Students")
plt.xlabel("Marks")
plt.ylabel("Names")
plt.grid(True)
plt.show()
# Scalar plot
x = [10, 20, 30, 40, 50, 60, 70, 80, 90]
a = [8, 1, 7, 2, 0, 3, 5, 3, 4]
plt.scatter(x, a)
plt.show()
# Scalar plot - 2
x = [10, 20, 30, 40, 50, 60, 70, 80, 90]
a = [8, 1, 7, 2, 0, 3, 5, 3, 4]
plt.scatter(x, a, marker="*", c="g", s=100)
plt.show()
# Scalar plot - 3
x = [10, 20, 30, 40, 50, 60, 70, 80, 90]
a = [8, 1, 7, 2, 0, 3, 5, 3, 4]
b = [4, 3, 5, 3, 0, 2, 7, 1, 8]
plt.scatter(x, a, marker="*", c="red", s=100)
plt.scatter(x, b, marker=".", c="yellow", s=150)
plt.show()
# Scalar plot - 4
x = [10, 20, 30, 40, 50, 60, 70, 80, 90]
a = [8, 1, 7, 2, 0, 3, 5, 3, 4]
b = [4, 3, 5, 3, 0, 2, 7, 1, 8]
plt.subplot(1, 2, 1)
plt.scatter(x, a, marker="*", c="red", s=100)
plt.subplot(1, 2, 2)
plt.scatter(x, b, marker=".", c="yellow", s=150)
plt.show()
# Histogram
data = [1, 3, 3, 3, 3, 3, 9, 9, 5, 4, 4, 8, 8, 8, 6, 7]
plt.hist(data)
plt.show()
# Histogram - 2
plt.hist(data, color="g", bins=4)
plt.show()
# Histogram -3 dataset
import pandas as pd
iris = pd.read_csv('Book1.csv')
iris.head()
plt.hist(iris['Units'], bins=30, color="r")
plt.show()
# box plot
one = [1, 2, 3, 4, 5, 6, 7, 8, 9]
two = [1, 2, 3, 4, 5, 4, 3, 2, 1]
three = [6, 7, 8, 9, 8, 7, 6, 5, 4]
data = list([one, two, three])
plt.boxplot(data)
plt.show()
# Violin plot
one = [1, 2, 3, 4, 5, 6, 7, 8, 9]
two = [1, 2, 3, 4, 5, 4, 3, 2, 1]
three = [6, 7, 8, 9, 8, 7, 6, 5, 4]
data = list([one, two, three])
plt.violinplot(data, showmedians=True)
plt.show()
# pie chart
fruit = ['Apple', 'Orange', 'Mango', 'Guava']
quantity = [67, 34, 100, 29]
plt.pie(quantity, labels=fruit)
plt.show()
# pie chart - 2
fruit = ['Apple', 'Orange', 'Mango', 'Guava']
quantity = [53, 43, 12, 97]
plt.pie(quantity, labels=fruit, autopct='%0.1f%%', colors=['yellow', 'grey', 'blue', 'black'])
plt.show()
# doughnut-chart
fruit = ['Apple', 'Orange', 'Mango', 'Guava']
quantity = [53, 43, 12, 97]
plt.pie(quantity, labels=fruit, radius=2)
plt.pie([1], colors=['w'], radius=1)
plt.show()
# seaborn line plot
import seaborn as sns
from matplotlib import pyplot as plt
fmri = sns.load_dataset("fmri")
fmri.head()
sns.lineplot(x="time_point", y="signal", data=fmri)
plt.show()
| [
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"seaborn.load_dataset",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.violinplot",
"seab... | [((73, 89), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (82, 89), True, 'import numpy as np\n'), ((134, 148), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (142, 148), True, 'from matplotlib import pyplot as plt\n'), ((149, 159), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (157, 159), True, 'from matplotlib import pyplot as plt\n'), ((211, 225), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (219, 225), True, 'from matplotlib import pyplot as plt\n'), ((226, 248), 'matplotlib.pyplot.title', 'plt.title', (['"""Line Plot"""'], {}), "('Line Plot')\n", (235, 248), True, 'from matplotlib import pyplot as plt\n'), ((249, 270), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-label"""'], {}), "('x-label')\n", (259, 270), True, 'from matplotlib import pyplot as plt\n'), ((271, 292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-label"""'], {}), "('y-label')\n", (281, 292), True, 'from matplotlib import pyplot as plt\n'), ((293, 303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (301, 303), True, 'from matplotlib import pyplot as plt\n'), ((351, 406), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""red"""', 'linestyle': '""":"""', 'linewidth': '(2)'}), "(x, y, color='red', linestyle=':', linewidth=2)\n", (359, 406), True, 'from matplotlib import pyplot as plt\n'), ((407, 417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (415, 417), True, 'from matplotlib import pyplot as plt\n'), ((454, 470), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (463, 470), True, 'import numpy as np\n'), ((493, 549), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""red"""', 'linestyle': '""":"""', 'linewidth': '(2)'}), "(x, y1, color='red', linestyle=':', linewidth=2)\n", (501, 549), True, 'from matplotlib import pyplot as plt\n'), ((550, 608), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""green"""', 'linestyle': '"""-"""', 'linewidth': '(3)'}), "(x, y2, color='green', linestyle='-', linewidth=3)\n", (558, 608), True, 'from matplotlib import pyplot as plt\n'), ((609, 631), 'matplotlib.pyplot.title', 'plt.title', (['"""Line Plot"""'], {}), "('Line Plot')\n", (618, 631), True, 'from matplotlib import pyplot as plt\n'), ((632, 653), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-label"""'], {}), "('x-label')\n", (642, 653), True, 'from matplotlib import pyplot as plt\n'), ((654, 675), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-label"""'], {}), "('y-label')\n", (664, 675), True, 'from matplotlib import pyplot as plt\n'), ((676, 690), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (684, 690), True, 'from matplotlib import pyplot as plt\n'), ((691, 701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (699, 701), True, 'from matplotlib import pyplot as plt\n'), ((736, 752), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (745, 752), True, 'import numpy as np\n'), ((775, 795), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (786, 795), True, 'from matplotlib import pyplot as plt\n'), ((796, 852), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""red"""', 'linestyle': '""":"""', 'linewidth': '(2)'}), "(x, y1, color='red', linestyle=':', linewidth=2)\n", (804, 852), True, 'from matplotlib import pyplot as plt\n'), ((853, 873), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (864, 873), True, 'from matplotlib import pyplot as plt\n'), ((874, 932), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""green"""', 'linestyle': '""":"""', 'linewidth': '(3)'}), "(x, y2, color='green', linestyle=':', linewidth=3)\n", (882, 932), True, 'from matplotlib import pyplot as plt\n'), ((933, 943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (941, 943), True, 'from matplotlib import pyplot as plt\n'), ((1061, 1083), 'matplotlib.pyplot.bar', 'plt.bar', (['names', 'values'], {}), '(names, values)\n', (1068, 1083), True, 'from matplotlib import pyplot as plt\n'), ((1084, 1094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1092, 1094), True, 'from matplotlib import pyplot as plt\n'), ((1129, 1151), 'matplotlib.pyplot.bar', 'plt.bar', (['names', 'values'], {}), '(names, values)\n', (1136, 1151), True, 'from matplotlib import pyplot as plt\n'), ((1152, 1183), 'matplotlib.pyplot.title', 'plt.title', (['"""Marks of Students"""'], {}), "('Marks of Students')\n", (1161, 1183), True, 'from matplotlib import pyplot as plt\n'), ((1184, 1205), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-label"""'], {}), "('x-label')\n", (1194, 1205), True, 'from matplotlib import pyplot as plt\n'), ((1206, 1227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-label"""'], {}), "('y-label')\n", (1216, 1227), True, 'from matplotlib import pyplot as plt\n'), ((1228, 1242), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1236, 1242), True, 'from matplotlib import pyplot as plt\n'), ((1243, 1253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1251, 1253), True, 'from matplotlib import pyplot as plt\n'), ((1276, 1315), 'matplotlib.pyplot.barh', 'plt.barh', (['names', 'values'], {'color': '"""orange"""'}), "(names, values, color='orange')\n", (1284, 1315), True, 'from matplotlib import pyplot as plt\n'), ((1316, 1347), 'matplotlib.pyplot.title', 'plt.title', (['"""Marks of Students"""'], {}), "('Marks of Students')\n", (1325, 1347), True, 'from matplotlib import pyplot as plt\n'), ((1348, 1367), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Marks"""'], {}), "('Marks')\n", (1358, 1367), True, 'from matplotlib import pyplot as plt\n'), ((1368, 1387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Names"""'], {}), "('Names')\n", (1378, 1387), True, 'from matplotlib import pyplot as plt\n'), ((1388, 1402), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1396, 1402), True, 'from matplotlib import pyplot as plt\n'), ((1403, 1413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1411, 1413), True, 'from matplotlib import pyplot as plt\n'), ((1501, 1518), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'a'], {}), '(x, a)\n', (1512, 1518), True, 'from matplotlib import pyplot as plt\n'), ((1519, 1529), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1527, 1529), True, 'from matplotlib import pyplot as plt\n'), ((1621, 1664), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'a'], {'marker': '"""*"""', 'c': '"""g"""', 's': '(100)'}), "(x, a, marker='*', c='g', s=100)\n", (1632, 1664), True, 'from matplotlib import pyplot as plt\n'), ((1665, 1675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1673, 1675), True, 'from matplotlib import pyplot as plt\n'), ((1799, 1844), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'a'], {'marker': '"""*"""', 'c': '"""red"""', 's': '(100)'}), "(x, a, marker='*', c='red', s=100)\n", (1810, 1844), True, 'from matplotlib import pyplot as plt\n'), ((1845, 1893), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'b'], {'marker': '"""."""', 'c': '"""yellow"""', 's': '(150)'}), "(x, b, marker='.', c='yellow', s=150)\n", (1856, 1893), True, 'from matplotlib import pyplot as plt\n'), ((1894, 1904), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1902, 1904), True, 'from matplotlib import pyplot as plt\n'), ((2028, 2048), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2039, 2048), True, 'from matplotlib import pyplot as plt\n'), ((2049, 2094), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'a'], {'marker': '"""*"""', 'c': '"""red"""', 's': '(100)'}), "(x, a, marker='*', c='red', s=100)\n", (2060, 2094), True, 'from matplotlib import pyplot as plt\n'), ((2095, 2115), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2106, 2115), True, 'from matplotlib import pyplot as plt\n'), ((2116, 2164), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'b'], {'marker': '"""."""', 'c': '"""yellow"""', 's': '(150)'}), "(x, b, marker='.', c='yellow', s=150)\n", (2127, 2164), True, 'from matplotlib import pyplot as plt\n'), ((2165, 2175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2173, 2175), True, 'from matplotlib import pyplot as plt\n'), ((2244, 2258), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {}), '(data)\n', (2252, 2258), True, 'from matplotlib import pyplot as plt\n'), ((2259, 2269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2267, 2269), True, 'from matplotlib import pyplot as plt\n'), ((2286, 2319), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'color': '"""g"""', 'bins': '(4)'}), "(data, color='g', bins=4)\n", (2294, 2319), True, 'from matplotlib import pyplot as plt\n'), ((2320, 2330), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2328, 2330), True, 'from matplotlib import pyplot as plt\n'), ((2382, 2406), 'pandas.read_csv', 'pd.read_csv', (['"""Book1.csv"""'], {}), "('Book1.csv')\n", (2393, 2406), True, 'import pandas as pd\n'), ((2419, 2462), 'matplotlib.pyplot.hist', 'plt.hist', (["iris['Units']"], {'bins': '(30)', 'color': '"""r"""'}), "(iris['Units'], bins=30, color='r')\n", (2427, 2462), True, 'from matplotlib import pyplot as plt\n'), ((2463, 2473), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2471, 2473), True, 'from matplotlib import pyplot as plt\n'), ((2620, 2637), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['data'], {}), '(data)\n', (2631, 2637), True, 'from matplotlib import pyplot as plt\n'), ((2638, 2648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2646, 2648), True, 'from matplotlib import pyplot as plt\n'), ((2798, 2836), 'matplotlib.pyplot.violinplot', 'plt.violinplot', (['data'], {'showmedians': '(True)'}), '(data, showmedians=True)\n', (2812, 2836), True, 'from matplotlib import pyplot as plt\n'), ((2837, 2847), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2845, 2847), True, 'from matplotlib import pyplot as plt\n'), ((2935, 2966), 'matplotlib.pyplot.pie', 'plt.pie', (['quantity'], {'labels': 'fruit'}), '(quantity, labels=fruit)\n', (2942, 2966), True, 'from matplotlib import pyplot as plt\n'), ((2967, 2977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2975, 2977), True, 'from matplotlib import pyplot as plt\n'), ((3068, 3166), 'matplotlib.pyplot.pie', 'plt.pie', (['quantity'], {'labels': 'fruit', 'autopct': '"""%0.1f%%"""', 'colors': "['yellow', 'grey', 'blue', 'black']"}), "(quantity, labels=fruit, autopct='%0.1f%%', colors=['yellow', 'grey',\n 'blue', 'black'])\n", (3075, 3166), True, 'from matplotlib import pyplot as plt\n'), ((3163, 3173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3171, 3173), True, 'from matplotlib import pyplot as plt\n'), ((3265, 3306), 'matplotlib.pyplot.pie', 'plt.pie', (['quantity'], {'labels': 'fruit', 'radius': '(2)'}), '(quantity, labels=fruit, radius=2)\n', (3272, 3306), True, 'from matplotlib import pyplot as plt\n'), ((3307, 3343), 'matplotlib.pyplot.pie', 'plt.pie', (['[1]'], {'colors': "['w']", 'radius': '(1)'}), "([1], colors=['w'], radius=1)\n", (3314, 3343), True, 'from matplotlib import pyplot as plt\n'), ((3344, 3354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3352, 3354), True, 'from matplotlib import pyplot as plt\n'), ((3442, 3466), 'seaborn.load_dataset', 'sns.load_dataset', (['"""fmri"""'], {}), "('fmri')\n", (3458, 3466), True, 'import seaborn as sns\n'), ((3479, 3530), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""time_point"""', 'y': '"""signal"""', 'data': 'fmri'}), "(x='time_point', y='signal', data=fmri)\n", (3491, 3530), True, 'import seaborn as sns\n'), ((3531, 3541), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3539, 3541), True, 'from matplotlib import pyplot as plt\n')] |
import os
import numpy as np
from pyfftw.builders import rfft
from scipy.interpolate import interp1d
from scipy.special import gamma
from scipy.integrate import quad
import matplotlib.pyplot as plt
class FFTLog(object):
def __init__(self, **kwargs):
self.Nmax = kwargs['Nmax']
self.xmin = kwargs['xmin']
self.xmax = kwargs['xmax']
self.bias = kwargs['bias']
self.dx = np.log(self.xmax/self.xmin) / (self.Nmax-1.)
self.setx()
self.setPow()
def setx(self):
self.x = self.xmin * np.exp(np.arange(self.Nmax) * self.dx)
def setPow(self):
self.Pow = self.bias + 1j * 2. * np.pi / (self.Nmax * self.dx) * (np.arange(self.Nmax+1) - self.Nmax/2.)
def Coef(self, xin, f, window=1, co=common):
interpfunc = interp1d(xin, f, kind='cubic')
if xin[0] > self.x[0]:
print ('low extrapolation')
nslow = (log(f[1])-log(f[0])) / (log(xin[1])-log(xin[0]))
Aslow = f[0] / xin[0]**nslow
if xin[-1] < self.x[-1]:
print ('high extrapolation')
nshigh = (log(f[-1])-log(f[-2])) / (log(xin[-1])-log(xin[-2]))
Ashigh = f[-1] / xin[-1]**nshigh
fx = np.empty(self.Nmax)
tmp = np.empty(int(self.Nmax/2+1), dtype = complex)
Coef = np.empty(self.Nmax+1, dtype = complex)
for i in range(self.Nmax):
if xin[0] > self.x[i]: fx[i] = Aslow * self.x[i]**nslow * np.exp(-self.bias*i*self.dx)
elif xin[-1] < self.x[i]: fx[i] = Ashigh * self.x[i]**nshigh * np.exp(-self.bias*i*self.dx)
else: fx[i] = interpfunc(self.x[i]) * np.exp(-self.bias*i*self.dx)
#tmp = rfft(fx) ### numpy
tmp = rfft(fx)() ### pyfftw
for i in range(self.Nmax+1):
if (i < self.Nmax/2): Coef[i] = np.conj(tmp[int(self.Nmax/2-i)]) * self.xmin**(-self.Pow[i]) / float(self.Nmax)
else: Coef[i] = tmp[int(i-self.Nmax/2)] * self.xmin**(-self.Pow[i]) / float(self.Nmax)
if window is not None: Coef = Coef*CoefWindow(self.Nmax, window=window)
else:
Coef[0] /= 2.
Coef[self.Nmax] /= 2.
return Coef
#return self.x,
def sumCoefxPow(self, xin, f, x, window=1):
Coef = self.Coef(xin, f, window=window)
fFFT = np.empty_like(x)
for i, xi in enumerate(x):
fFFT[i] = np.real( np.sum(Coef * xi**self.Pow) )
return fFFT | [
"pyfftw.builders.rfft",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.sum",
"numpy.empty_like",
"numpy.empty",
"numpy.arange"
] | [((808, 838), 'scipy.interpolate.interp1d', 'interp1d', (['xin', 'f'], {'kind': '"""cubic"""'}), "(xin, f, kind='cubic')\n", (816, 838), False, 'from scipy.interpolate import interp1d\n'), ((1233, 1252), 'numpy.empty', 'np.empty', (['self.Nmax'], {}), '(self.Nmax)\n', (1241, 1252), True, 'import numpy as np\n'), ((1328, 1366), 'numpy.empty', 'np.empty', (['(self.Nmax + 1)'], {'dtype': 'complex'}), '(self.Nmax + 1, dtype=complex)\n', (1336, 1366), True, 'import numpy as np\n'), ((2379, 2395), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (2392, 2395), True, 'import numpy as np\n'), ((413, 442), 'numpy.log', 'np.log', (['(self.xmax / self.xmin)'], {}), '(self.xmax / self.xmin)\n', (419, 442), True, 'import numpy as np\n'), ((1751, 1759), 'pyfftw.builders.rfft', 'rfft', (['fx'], {}), '(fx)\n', (1755, 1759), False, 'from pyfftw.builders import rfft\n'), ((2462, 2491), 'numpy.sum', 'np.sum', (['(Coef * xi ** self.Pow)'], {}), '(Coef * xi ** self.Pow)\n', (2468, 2491), True, 'import numpy as np\n'), ((561, 581), 'numpy.arange', 'np.arange', (['self.Nmax'], {}), '(self.Nmax)\n', (570, 581), True, 'import numpy as np\n'), ((694, 718), 'numpy.arange', 'np.arange', (['(self.Nmax + 1)'], {}), '(self.Nmax + 1)\n', (703, 718), True, 'import numpy as np\n'), ((1482, 1514), 'numpy.exp', 'np.exp', (['(-self.bias * i * self.dx)'], {}), '(-self.bias * i * self.dx)\n', (1488, 1514), True, 'import numpy as np\n'), ((1586, 1618), 'numpy.exp', 'np.exp', (['(-self.bias * i * self.dx)'], {}), '(-self.bias * i * self.dx)\n', (1592, 1618), True, 'import numpy as np\n'), ((1665, 1697), 'numpy.exp', 'np.exp', (['(-self.bias * i * self.dx)'], {}), '(-self.bias * i * self.dx)\n', (1671, 1697), True, 'import numpy as np\n')] |
"""Morse code handling"""
from configparser import ConfigParser
import os
from pathlib import Path
import sys
import warnings
import numpy as np
import sklearn.cluster
import sklearn.exceptions
from .io import read_wave
from .processing import smoothed_power, squared_signal
class MorseCode:
"""Morse code
Attributes:
data (np.ndarray): 1D binary array, representing morse code in time
"""
_morse_to_char: dict = None
def __init__(self, data: np.ndarray, sample_rate: int = None):
"""Initialize code with binary data
Args:
data (np.ndarray): 1D binary array, representing morse code in time
sample_rate (np.ndarray): Audio sampling rate. Default: None.
"""
self.data = data
self.sample_rate = sample_rate
@classmethod
def from_wavfile(cls, file: os.PathLike) -> "MorseCode":
"""Construct from wave file
- Read in wave file
- Calculate signal envelope (smoothing of 0.1 seconds)
- Apply squaring (threshold: 50% of max smoothed data value)
Args:
file (os.PathLike): path to input WAV file
Returns:
MorseCode: class instance, with 1D binary input data
"""
sample_rate, wave = read_wave(file)
window_size = int(0.01 * sample_rate)
envelope = smoothed_power(wave, window_size)
square_data = squared_signal(envelope)
return cls(square_data)
def decode(self) -> str:
"""Decode data
Returns:
str: Morse code content, in plain language
Raises:
UserWarning: dash/dot separation cannot be made unambiguosly
"""
on_samples, off_samples = self._on_off_samples()
dash_dot_chars = self._dash_dot_characters(on_samples)
char_break_idx, word_space_idx = self._break_spaces(off_samples)
morse_words = self._morse_words(dash_dot_chars, char_break_idx, word_space_idx)
return self._translate(morse_words)
@classmethod
@property
def morse_to_char(cls) -> dict[str, str]:
"""Morse to character dictionary
Read mappings from morse.ini and store them to class variable. Later,
return directly from this class variable.
Returns:
dict[str, str]: Mapping of morse character string to letter
"""
if cls._morse_to_char is not None:
return cls._morse_to_char
config = ConfigParser()
config.read(Path(__file__).parent / "morse.ini")
chars = config["characters"]
cls._morse_to_char = {chars[key]: key.upper() for key in chars}
return cls._morse_to_char
def _on_off_samples(self) -> tuple[np.ndarray, np.ndarray]:
"""Calculate signal ON/OFF durations
Locate rising and falling edges in square wave at self.data. Calculate
number of samples in each ON / OFF period.
Returns:
tuple[np.ndarray, np.ndarray]: on_samples, off_samples. Note that
in addition to character and word spaces, off_samples also
includes inter-character spaces.
"""
if len(self.data) == 0:
return np.array([], dtype="int"), np.array([], dtype="int")
square_diff = np.diff(self.data)
rising_idx = np.nonzero(square_diff == 1)[0]
falling_idx = np.nonzero(square_diff == -1)[0]
# Case: data starts with ON - it started one sample before index 0
if falling_idx[0] < rising_idx[0]:
rising_idx = np.insert(rising_idx, 0, -1)
# Case: data ends with ON
if rising_idx[-1] > falling_idx[-1]:
falling_idx = np.insert(falling_idx, len(falling_idx), len(self.data) - 1)
on_samples = falling_idx - rising_idx
off_samples = rising_idx[1:] - falling_idx[: len(falling_idx) - 1]
return on_samples, off_samples
def _dash_dot_characters(self, on_samples: np.ndarray) -> np.ndarray:
"""Convert array of ON sample lengths to array of dashes and dots
NOTE: It is expected, that the signal contains exactly two distinct
lengths - those for a dash and for a dot. If the keying speed varies,
or either character does not exist, then this method will fail.
As a circumvention, 20 WPM is used as a guess
Args:
on_samples (np.ndarray): number of samples in each ON period in
the signal. This comes from `MorseCode._on_off_samples`.
Raises:
UserWarning: if there are no distinct clusters (only dashes
or dots in the input), and self.sample_rate is not set; thus
no guess can be made on dash/dot.
Returns:
np.ndarray: array of dashes and dots, of object (string) type
"""
if len(on_samples) == 0:
return np.array([], dtype="str")
n_clusters = min(2, len(on_samples))
column_vec = on_samples.reshape(-1, 1)
# Suppress ConvergenceWarning on too low distinct clusters; fix it later
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clustering = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=0
).fit(column_vec)
distinct_clusters = len(set(clustering.labels_))
# It is not clear whether dash or dot -- use (20 wpm dot length) * 1.5 as limit
if distinct_clusters == 1:
if self.sample_rate is None:
raise UserWarning("Cannot determine whether dash or dot")
sys.stderr.write("WARNING: too little data, guessing based on 20 wpm")
sample_length = clustering.cluster_centers_[0]
is_dot = sample_length / (self.sample_rate * 60 / 1000) < 1.5
dot_label = 0 if is_dot else 1
dash_label = 1 if is_dot else 0
else:
cluster_sort_idx = np.argsort(
clustering.cluster_centers_.flatten()
).tolist()
dot_label = cluster_sort_idx.index(0)
dash_label = cluster_sort_idx.index(1)
dash_dot_map = {dot_label: ".", dash_label: "-"}
dash_dot_characters = np.vectorize(dash_dot_map.get)(clustering.labels_)
return dash_dot_characters
@staticmethod
def _break_spaces(off_samples: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert array of OFF sample lengths to indices for char/word breaks
NOTE: It is expected, that the signal contains exactly three distinct
space lengths: inter-character space, character space and word space.
If the keying speed varies, or word spaces do not exist, then this
method will fail.
Args:
off_samples (np.ndarray): number of samples in each OFF period in
the signal. This comes from `MorseCode._on_off_samples`.
Returns:
tuple[np.ndarray, np.ndarray]: indices for breaking dash/dot
character array from `MorseCode._dash_dot_characters`. First
array contains positions, where character breaks should be.
Second array contains positions, where word spaces should be in
the list of already resolved morse characters.
"""
if len(off_samples) == 0:
return np.array([], dtype="int"), np.array([], dtype="int")
n_clusters = min(3, len(off_samples))
column_vec = off_samples.reshape(-1, 1)
# Suppress ConvergenceWarning on too low distinct clusters; fix it later
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clustering = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=0
).fit(column_vec)
distinct_clusters = len(set(clustering.labels_))
cluster_sort_idx = np.argsort(clustering.cluster_centers_.flatten()).tolist()
# This index breaks dashes/dots into characters
intra_space_label = cluster_sort_idx.index(0)
char_break_idx = np.nonzero(clustering.labels_ != intra_space_label)[0] + 1
char_or_word_space_arr = clustering.labels_[
clustering.labels_ != intra_space_label
]
# This index breaks character list into word lists
if distinct_clusters == 3:
word_space_label = cluster_sort_idx.index(2)
word_space_idx = (
np.nonzero(char_or_word_space_arr == word_space_label)[0] + 1
)
else:
word_space_idx = np.array([], dtype="int")
return char_break_idx, word_space_idx
@staticmethod
def _morse_words(
raw_dash_dot: np.ndarray,
char_break_idx: np.ndarray,
word_space_idx: np.ndarray,
) -> list[list[str]]:
"""Convert character and space arrays to list of morse words
Args:
raw_dash_dot (np.ndarray): Numpy array of strings, contains
'.' and '-' characters, as processed from self.data
char_break_idx (np.ndarray): Index of locations in raw_dash_dot,
where a character space or word space would exist. The array
raw_dash_dot is first broken into characters with this index.
word_space_idx (np.ndarray): Index for breaking character array
into words. Contains locations of word spaces between natural
language characters.
Returns:
list[list[str]]: Words in morse code. A single word is a list of
dash-dot character combinations.
"""
char_start_idx = [0] + (char_break_idx).tolist()
char_end_idx = (char_break_idx).tolist() + [len(raw_dash_dot)]
morse_characters = [
"".join(raw_dash_dot[i:j].tolist())
for i, j in zip(char_start_idx, char_end_idx)
]
word_start_idx = [0] + (word_space_idx).tolist()
word_end_idx = (word_space_idx).tolist() + [len(morse_characters)]
return [morse_characters[i:j] for i, j in zip(word_start_idx, word_end_idx)]
def _translate(self, morse_words: list[list[str]]) -> str:
"""Translate list of morse-coded words to string
Args:
morse_words (list[list[str]]): List of words, having list of characters.
The characters are in morse-coded dash/dot form, e.g. '.--' for 'w'
Returns:
str: Message contained in input
"""
char_dict = self.morse_to_char
char_lists = [[char_dict.get(j, "") for j in i] for i in morse_words]
return " ".join(["".join(word) for word in char_lists])
| [
"numpy.insert",
"configparser.ConfigParser",
"pathlib.Path",
"warnings.catch_warnings",
"numpy.diff",
"numpy.array",
"sys.stderr.write",
"numpy.nonzero",
"warnings.simplefilter",
"numpy.vectorize"
] | [((2473, 2487), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2485, 2487), False, 'from configparser import ConfigParser\n'), ((3287, 3305), 'numpy.diff', 'np.diff', (['self.data'], {}), '(self.data)\n', (3294, 3305), True, 'import numpy as np\n'), ((3328, 3356), 'numpy.nonzero', 'np.nonzero', (['(square_diff == 1)'], {}), '(square_diff == 1)\n', (3338, 3356), True, 'import numpy as np\n'), ((3382, 3411), 'numpy.nonzero', 'np.nonzero', (['(square_diff == -1)'], {}), '(square_diff == -1)\n', (3392, 3411), True, 'import numpy as np\n'), ((3559, 3587), 'numpy.insert', 'np.insert', (['rising_idx', '(0)', '(-1)'], {}), '(rising_idx, 0, -1)\n', (3568, 3587), True, 'import numpy as np\n'), ((4884, 4909), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""str"""'}), "([], dtype='str')\n", (4892, 4909), True, 'import numpy as np\n'), ((5098, 5123), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5121, 5123), False, 'import warnings\n'), ((5137, 5168), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5158, 5168), False, 'import warnings\n'), ((5610, 5680), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: too little data, guessing based on 20 wpm"""'], {}), "('WARNING: too little data, guessing based on 20 wpm')\n", (5626, 5680), False, 'import sys\n'), ((6225, 6255), 'numpy.vectorize', 'np.vectorize', (['dash_dot_map.get'], {}), '(dash_dot_map.get)\n', (6237, 6255), True, 'import numpy as np\n'), ((7610, 7635), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7633, 7635), False, 'import warnings\n'), ((7649, 7680), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7670, 7680), False, 'import warnings\n'), ((8587, 8612), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (8595, 8612), True, 'import numpy as np\n'), ((3212, 3237), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (3220, 3237), True, 'import numpy as np\n'), ((3239, 3264), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (3247, 3264), True, 'import numpy as np\n'), ((7367, 7392), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (7375, 7392), True, 'import numpy as np\n'), ((7394, 7419), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (7402, 7419), True, 'import numpy as np\n'), ((8094, 8145), 'numpy.nonzero', 'np.nonzero', (['(clustering.labels_ != intra_space_label)'], {}), '(clustering.labels_ != intra_space_label)\n', (8104, 8145), True, 'import numpy as np\n'), ((2508, 2522), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2512, 2522), False, 'from pathlib import Path\n'), ((8468, 8522), 'numpy.nonzero', 'np.nonzero', (['(char_or_word_space_arr == word_space_label)'], {}), '(char_or_word_space_arr == word_space_label)\n', (8478, 8522), True, 'import numpy as np\n')] |
# http://www.apache.org/licenses/LICENSE-2.0
import unittest
import time
import numpy as np
import auspex.config as config
config.auspex_dummy_mode = True
from auspex.experiment import Experiment
from auspex.stream import DataStream, DataAxis, DataStreamDescriptor, OutputConnector
from auspex.filters.debug import Print, Passthrough
from auspex.filters.correlator import Correlator
from auspex.filters.io import DataBuffer
from auspex.log import logger
class CorrelatorExperiment(Experiment):
# DataStreams
chan1 = OutputConnector()
chan2 = OutputConnector()
# Constants
samples = 100
# For correlator verification
vals = 2.0 + np.linspace(0, 10*np.pi, samples)
def init_streams(self):
self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
self.chan2.add_axis(DataAxis("samples", list(range(self.samples))))
def run(self):
logger.debug("Data taker running (inner loop)")
np.random.seed(12345)
self.idx_1 = 0
self.idx_2 = 0
while self.idx_1 < self.samples or self.idx_2 < self.samples:
# print(self.idx_1, self.idx_2)
# Generate random number of samples:
new_1 = np.random.randint(2,5)
new_2 = np.random.randint(2,5)
if self.chan1.points_taken.value < self.chan1.num_points():
if self.chan1.points_taken.value + new_1 > self.chan1.num_points():
new_1 = self.chan1.num_points() - self.chan1.points_taken.value
# logger.info(f"C1 push {self.vals[self.idx_1:self.idx_1+new_1]}")
self.chan1.push(self.vals[self.idx_1:self.idx_1+new_1])
self.idx_1 += new_1
if self.chan2.points_taken.value < self.chan2.num_points():
if self.chan2.points_taken.value + new_2 > self.chan2.num_points():
new_2 = self.chan2.num_points() - self.chan2.points_taken.value
self.chan2.push(self.vals[self.idx_2:self.idx_2+new_2])
self.idx_2 += new_2
# logger.info(f"C2 push {self.vals[self.idx_2:self.idx_2+new_2]}")
time.sleep(0.002)
logger.debug("Idx_1: %d, Idx_2: %d", self.idx_1, self.idx_2)
class CorrelatorTestCase(unittest.TestCase):
def test_correlator(self):
exp = CorrelatorExperiment()
corr = Correlator(name='corr')
buff = DataBuffer()
edges = [(exp.chan1, corr.sink),
(exp.chan2, corr.sink),
(corr.source, buff.sink)]
exp.set_graph(edges)
exp.run_sweeps()
time.sleep(0.01)
corr_data = buff.output_data
expected_data = exp.vals*exp.vals
self.assertAlmostEqual(np.sum(corr_data), np.sum(expected_data), places=0)
if __name__ == '__main__':
unittest.main()
| [
"time.sleep",
"numpy.sum",
"numpy.linspace",
"auspex.filters.io.DataBuffer",
"numpy.random.randint",
"numpy.random.seed",
"auspex.stream.OutputConnector",
"unittest.main",
"auspex.filters.correlator.Correlator",
"auspex.log.logger.debug"
] | [((533, 550), 'auspex.stream.OutputConnector', 'OutputConnector', ([], {}), '()\n', (548, 550), False, 'from auspex.stream import DataStream, DataAxis, DataStreamDescriptor, OutputConnector\n'), ((563, 580), 'auspex.stream.OutputConnector', 'OutputConnector', ([], {}), '()\n', (578, 580), False, 'from auspex.stream import DataStream, DataAxis, DataStreamDescriptor, OutputConnector\n'), ((2850, 2865), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2863, 2865), False, 'import unittest\n'), ((668, 703), 'numpy.linspace', 'np.linspace', (['(0)', '(10 * np.pi)', 'samples'], {}), '(0, 10 * np.pi, samples)\n', (679, 703), True, 'import numpy as np\n'), ((911, 958), 'auspex.log.logger.debug', 'logger.debug', (['"""Data taker running (inner loop)"""'], {}), "('Data taker running (inner loop)')\n", (923, 958), False, 'from auspex.log import logger\n'), ((967, 988), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (981, 988), True, 'import numpy as np\n'), ((2388, 2411), 'auspex.filters.correlator.Correlator', 'Correlator', ([], {'name': '"""corr"""'}), "(name='corr')\n", (2398, 2411), False, 'from auspex.filters.correlator import Correlator\n'), ((2428, 2440), 'auspex.filters.io.DataBuffer', 'DataBuffer', ([], {}), '()\n', (2438, 2440), False, 'from auspex.filters.io import DataBuffer\n'), ((2634, 2650), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2644, 2650), False, 'import time\n'), ((1222, 1245), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (1239, 1245), True, 'import numpy as np\n'), ((1265, 1288), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (1282, 1288), True, 'import numpy as np\n'), ((2164, 2181), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (2174, 2181), False, 'import time\n'), ((2194, 2254), 'auspex.log.logger.debug', 'logger.debug', (['"""Idx_1: %d, Idx_2: %d"""', 'self.idx_1', 'self.idx_2'], {}), "('Idx_1: %d, Idx_2: %d', self.idx_1, self.idx_2)\n", (2206, 2254), False, 'from auspex.log import logger\n'), ((2765, 2782), 'numpy.sum', 'np.sum', (['corr_data'], {}), '(corr_data)\n', (2771, 2782), True, 'import numpy as np\n'), ((2784, 2805), 'numpy.sum', 'np.sum', (['expected_data'], {}), '(expected_data)\n', (2790, 2805), True, 'import numpy as np\n')] |
import numpy as np
import mfd
from matplotlib import pyplot as pl
n = 30
xr = np.linspace(0, 2*1.5, int(n*1.5))
yr = np.linspace(0, 2, n)
x, y = np.meshgrid(xr, yr)
z = np.exp(-x*x-y*y)
w = abs(xr[0]-xr[1])
a = mfd.sca(z, w)
b = np.load('data.npy')
pl.pcolormesh(x, y, (a-b)/a)
pl.colorbar()
ax = pl.gca()
ax.set_aspect('equal')
pl.show()
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"numpy.exp",
"numpy.linspace",
"numpy.meshgrid",
"numpy.load",
"mfd.sca",
"matplotlib.pyplot.show"
] | [((118, 138), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (129, 138), True, 'import numpy as np\n'), ((146, 165), 'numpy.meshgrid', 'np.meshgrid', (['xr', 'yr'], {}), '(xr, yr)\n', (157, 165), True, 'import numpy as np\n'), ((170, 192), 'numpy.exp', 'np.exp', (['(-x * x - y * y)'], {}), '(-x * x - y * y)\n', (176, 192), True, 'import numpy as np\n'), ((212, 225), 'mfd.sca', 'mfd.sca', (['z', 'w'], {}), '(z, w)\n', (219, 225), False, 'import mfd\n'), ((230, 249), 'numpy.load', 'np.load', (['"""data.npy"""'], {}), "('data.npy')\n", (237, 249), True, 'import numpy as np\n'), ((250, 282), 'matplotlib.pyplot.pcolormesh', 'pl.pcolormesh', (['x', 'y', '((a - b) / a)'], {}), '(x, y, (a - b) / a)\n', (263, 282), True, 'from matplotlib import pyplot as pl\n'), ((279, 292), 'matplotlib.pyplot.colorbar', 'pl.colorbar', ([], {}), '()\n', (290, 292), True, 'from matplotlib import pyplot as pl\n'), ((298, 306), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (304, 306), True, 'from matplotlib import pyplot as pl\n'), ((330, 339), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (337, 339), True, 'from matplotlib import pyplot as pl\n')] |
import pandas
import matplotlib.pyplot as plt
from ETF import get_split_name_and_isin, get_combined_name_and_isin
from pypfopt import expected_returns, risk_models, plotting, discrete_allocation
from pypfopt.efficient_frontier import EfficientFrontier
import base64
import io
import numpy as np
import math
from timeit import default_timer
OPTIMIZERS = ["MaxSharpe", "MinimumVolatility", "EfficientRisk", "EfficientReturn"]
INITIAL_VALUE = 10000
OPTIMIZER = "MaxSharpe"
RISK_FREE_RATE = 0.02
MINIMUM_DAYS_WITH_DATA = 0
ASSET_WEIGHT_CUTOFF = 0.01
ASSET_WEIGHT_ROUNDING = 4
ROLLING_WINDOW_IN_DAYS = 0
MAX_ETF_LIST_SIZE = 400
N_EF_PLOTTING_POINTS = 10
def optimize(etf_list, prices_df, optimizer_parameters, etf_filters):
start = default_timer()
etf_list = filter_etfs_using_filters(etf_list, etf_filters)
etfs_matching_filters = len(etf_list)
rolling_window_in_days = optimizer_parameters.get("rollingWindowInDays", ROLLING_WINDOW_IN_DAYS)
final_date = optimizer_parameters.get("finalDate", None)
prices = get_prices_data_frame_with_parameters(etf_list, prices_df, rolling_window_in_days, final_date)
prices, etf_size_list = size_check_prices_df(prices, optimizer_parameters)
returns = expected_returns.mean_historical_return(prices)
remove_ter_from_returns(etf_list, returns)
cov = risk_models.sample_cov(prices)
volatility = pandas.Series(np.sqrt(np.diag(cov)), index=cov.index)
ef = EfficientFrontier(returns, cov, weight_bounds=(0, 1), solver_options={"solver": "ECOS"}, verbose=False)
n_ef_plotting_points = optimizer_parameters.get("nEFPlottingPoints", N_EF_PLOTTING_POINTS)
if n_ef_plotting_points > 0:
fig, ax = plt.subplots()
param_range = get_plotting_param_range(ef, n_ef_plotting_points)
plotting.plot_efficient_frontier(ef, ax=ax, points=n_ef_plotting_points, ef_param_range=param_range, show_assets=True)
call_optimizer(ef, optimizer_parameters)
portfolio = get_portfolio_and_performance(ef, prices, optimizer_parameters, returns, volatility)
plot_assets_in_portfolio_with_different_color(portfolio, ax, volatility, returns)
add_plot_to_portfolio(portfolio, ax, fig)
else:
call_optimizer(ef, optimizer_parameters)
portfolio = get_portfolio_and_performance(ef, prices, optimizer_parameters, returns, volatility)
end = default_timer()
print("Time to find max sharpe {}".format(end - start))
portfolio["ETFsMatchingFilters"] = etfs_matching_filters
portfolio["ETFsUsedForOptimization"] = len(etf_list)
return portfolio
def get_plotting_param_range(ef, points):
param_range = plotting._ef_default_returns_range(ef, points=points)
if param_range[0] < 0:
return np.linspace(0, param_range[-1], points)
return param_range
def size_check_prices_df(prices, optimizer_parameters):
etf_list_size_after_filtering = len(prices.index)
if etf_list_size_after_filtering == 0:
raise Exception("No ETFs are left after filtering. Can't perform portfolio optimization. " +
"Check if your filters are correct.")
if etf_list_size_after_filtering == 1:
raise Exception("Can't perform portfolio optimization on just one ETF.")
max_etf_list_size = optimizer_parameters.get("maxETFListSize", MAX_ETF_LIST_SIZE)
if etf_list_size_after_filtering > max_etf_list_size:
print("Too many ETFs, calculation will take too long. Using only the first {} ETFs".format(max_etf_list_size))
prices = prices.iloc[:, :max_etf_list_size]
return prices, etf_list_size_after_filtering
def filter_etfs_using_filters(etf_list, etf_filters):
isin_list = etf_filters.get("isinList", None)
minimum_days_with_data = etf_filters.get("minimumDaysWithData")
if minimum_days_with_data is None:
minimum_days_with_data = MINIMUM_DAYS_WITH_DATA
domicile_country = etf_filters.get("domicileCountry", None)
replication_method = etf_filters.get("replicationMethod", None)
distribution_policy = etf_filters.get("distributionPolicy", None)
fund_currency = etf_filters.get("fundCurrency", None)
etfs_with_data = []
for etf in etf_list:
if len(etf.get_historical_data()) >= minimum_days_with_data:
etfs_with_data.append(etf)
print("Filtered ETFs that don't have enough data: {} ETFs left".format(len(etfs_with_data)))
etfs_with_filters = []
for etf in etfs_with_data:
if isin_list is not None and etf.get_isin() not in isin_list:
continue
if domicile_country is not None and domicile_country != etf.get_domicile_country():
continue
if replication_method is not None and replication_method != etf.get_replication_method():
continue
if distribution_policy is not None and distribution_policy != etf.get_distribution_policy():
continue
if fund_currency is not None and fund_currency != etf.get_fund_currency():
continue
etfs_with_filters.append(etf)
print("Filtered ETFs by the parameters provided: {} ETFs left".format(len(etfs_with_filters)))
return etfs_with_filters
def call_optimizer(ef, optimizer_parameters):
optimizer = optimizer_parameters.get("optimizer", OPTIMIZER)
target_volatility = optimizer_parameters.get("targetVolatility", None)
target_return = optimizer_parameters.get("targetReturn", None)
risk_free_rate = optimizer_parameters.get("riskFreeRate", RISK_FREE_RATE)
if optimizer == "MaxSharpe":
ef.max_sharpe(risk_free_rate=risk_free_rate)
elif optimizer == "MinimumVolatility":
ef.min_volatility()
elif optimizer == "EfficientRisk":
if target_volatility is None:
raise Exception("No target volatility provided for EfficientRisk optimizer.")
ef.efficient_risk(target_volatility=target_volatility)
elif optimizer == "EfficientReturn":
if target_return is None:
raise Exception("No target return provided for EfficientReturn optimizer.")
ef.efficient_return(target_return=target_return)
else:
raise Exception("The optimizer provided isn't valid. Provide one of: {}".format(OPTIMIZERS))
def get_portfolio_and_performance(ef, prices, optimizer_parameters, returns, variance):
initial_value = optimizer_parameters.get("initialValue", INITIAL_VALUE)
risk_free_rate = optimizer_parameters.get("riskFreeRate", RISK_FREE_RATE)
asset_cutoff = optimizer_parameters.get("assetCutoff", ASSET_WEIGHT_CUTOFF)
asset_rounding = optimizer_parameters.get("assetRounding", ASSET_WEIGHT_ROUNDING)
sharpe_pwt = ef.clean_weights(cutoff=asset_cutoff, rounding=asset_rounding)
performance = ef.portfolio_performance(risk_free_rate=risk_free_rate)
latest_prices = discrete_allocation.get_latest_prices(prices)
allocation = discrete_allocation.DiscreteAllocation(sharpe_pwt, latest_prices, initial_value)
alloc, leftover_funds = allocation.greedy_portfolio()
portfolio = []
total_weight = 0
portfolio_as_dict = dict(sharpe_pwt.items())
for etf in sorted(portfolio_as_dict, key=portfolio_as_dict.get, reverse=True):
weight = portfolio_as_dict[etf]
if weight != 0:
name, isin = get_split_name_and_isin(etf)
latest_price = latest_prices[etf]
shares = int(alloc.get(etf, 0))
value = shares * latest_price
expected_return = returns[etf]
volatility = variance[etf]
portfolio.append({"name": name, "isin": isin, "shares": shares, "price": latest_price, "value": value,
"expectedReturn": expected_return, "volatility": volatility, "weight": weight})
total_weight += weight
result = {
"expectedReturn": performance[0],
"annualVolatility": performance[1],
"sharpeRatio": performance[2],
"portfolioSize": len(portfolio),
"portfolio": portfolio,
"totalWeight": total_weight,
"totalValue": initial_value - leftover_funds,
"initialValue": initial_value,
"leftoverFunds": leftover_funds
}
return result
def plot_assets_in_portfolio_with_different_color(portfolio, ax, volatility, returns):
volatility_list = []
return_list = []
for etf in portfolio["portfolio"]:
etf_combined_name = get_combined_name_and_isin(etf["name"], etf["isin"])
volatility_list.append(volatility[etf_combined_name])
return_list.append(returns[etf_combined_name])
ax.scatter(
volatility_list,
return_list,
s=30,
color="g",
label="assets in portfolio",
)
def add_plot_to_portfolio(portfolio, ax, fig):
ax.scatter(portfolio["annualVolatility"], portfolio["expectedReturn"], marker="*", s=100, c="r", label="Optimized Portfolio")
ax.set_title("Efficient Frontier")
ax.legend()
fig.tight_layout()
buf = io.BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
portfolio["efficientFrontierImage"] = base64.b64encode(buf.getbuffer()).decode("ascii")
buf.close()
def remove_ter_from_returns(etf_list, returns):
for index, row in returns.items():
for etf in etf_list:
if etf.get_name() == index:
returns.loc[index] = row - etf.get_ter()
def get_complete_prices_data_frame(etf_list):
start = default_timer()
prices_by_date = {}
for etf in etf_list:
if len(etf.get_historical_data()) == 0:
continue
identifier = get_combined_name_and_isin(etf.get_name(), etf.get_isin())
dates = {}
historical_data = etf.get_historical_data()
last_50 = []
for i in range(len(historical_data)):
date_price = historical_data[i]
date = date_price["date"]
price = date_price["close"]
# Fixes ETFs that have 0 as their first value and then get an infinite return
if price == 0:
price = float("nan")
# Fixes ETFs that have 1 day with a completely wrong value
previous_price = historical_data[i-1]["close"] if i > 0 else float("nan")
next_price = historical_data[i+1]["close"] if i < len(historical_data) - 1 else float("nan")
if not math.isnan(previous_price) and previous_price != 0 and (0.2 > (price / previous_price) or 5 < (price / previous_price)):
price = float("nan")
elif not math.isnan(next_price) and next_price != 0 and (0.2 > (price / next_price) or 5 < (price / next_price)):
price = float("nan")
elif len(last_50) > 0:
last_50_avg = sum(last_50) / len(last_50)
if 0.2 > (price / last_50_avg) or 5 < (price / last_50_avg):
price = float("nan")
if not math.isnan(price):
last_50.append(price)
if len(last_50) == 50:
last_50 = last_50[1:]
dates[date] = price
prices_by_date[identifier] = dates
df = pandas.DataFrame(prices_by_date)
df.sort_index(inplace=True)
end = default_timer()
print("Time to build prices dataframe {}".format(end - start))
return df
def get_prices_data_frame_with_parameters(etf_list, prices_df, rolling_window_in_days, final_date):
identifiers = []
for etf in etf_list:
identifiers.append(get_combined_name_and_isin(etf.get_name(), etf.get_isin()))
prices_df = prices_df[identifiers]
if final_date is not None:
prices_df = prices_df.loc[:str(final_date)]
prices_df = prices_df[-rolling_window_in_days:]
# Only include ETFs that have at least three recorded prices in the given timeframe, otherwise there are errors
etfs_to_drop = []
for etf in prices_df:
at_least_three_prices = 0
for price in prices_df[etf][::-1]:
if not math.isnan(price):
at_least_three_prices += 1
if at_least_three_prices == 3:
break
if at_least_three_prices < 3:
etfs_to_drop.append(etf)
prices_df = prices_df.drop(etfs_to_drop, 1)
return prices_df
| [
"pypfopt.discrete_allocation.DiscreteAllocation",
"ETF.get_split_name_and_isin",
"pypfopt.risk_models.sample_cov",
"timeit.default_timer",
"pypfopt.discrete_allocation.get_latest_prices",
"ETF.get_combined_name_and_isin",
"io.BytesIO",
"numpy.diag",
"pypfopt.plotting._ef_default_returns_range",
"p... | [((736, 751), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (749, 751), False, 'from timeit import default_timer\n'), ((1225, 1272), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['prices'], {}), '(prices)\n', (1264, 1272), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((1331, 1361), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['prices'], {}), '(prices)\n', (1353, 1361), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((1443, 1551), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['returns', 'cov'], {'weight_bounds': '(0, 1)', 'solver_options': "{'solver': 'ECOS'}", 'verbose': '(False)'}), "(returns, cov, weight_bounds=(0, 1), solver_options={\n 'solver': 'ECOS'}, verbose=False)\n", (1460, 1551), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((2383, 2398), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (2396, 2398), False, 'from timeit import default_timer\n'), ((2662, 2715), 'pypfopt.plotting._ef_default_returns_range', 'plotting._ef_default_returns_range', (['ef'], {'points': 'points'}), '(ef, points=points)\n', (2696, 2715), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((6842, 6887), 'pypfopt.discrete_allocation.get_latest_prices', 'discrete_allocation.get_latest_prices', (['prices'], {}), '(prices)\n', (6879, 6887), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((6905, 6990), 'pypfopt.discrete_allocation.DiscreteAllocation', 'discrete_allocation.DiscreteAllocation', (['sharpe_pwt', 'latest_prices', 'initial_value'], {}), '(sharpe_pwt, latest_prices, initial_value\n )\n', (6943, 6990), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((9002, 9014), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9012, 9014), False, 'import io\n'), ((9451, 9466), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (9464, 9466), False, 'from timeit import default_timer\n'), ((11138, 11170), 'pandas.DataFrame', 'pandas.DataFrame', (['prices_by_date'], {}), '(prices_by_date)\n', (11154, 11170), False, 'import pandas\n'), ((11214, 11229), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (11227, 11229), False, 'from timeit import default_timer\n'), ((1695, 1709), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1707, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1791, 1913), 'pypfopt.plotting.plot_efficient_frontier', 'plotting.plot_efficient_frontier', (['ef'], {'ax': 'ax', 'points': 'n_ef_plotting_points', 'ef_param_range': 'param_range', 'show_assets': '(True)'}), '(ef, ax=ax, points=n_ef_plotting_points,\n ef_param_range=param_range, show_assets=True)\n', (1823, 1913), False, 'from pypfopt import expected_returns, risk_models, plotting, discrete_allocation\n'), ((2759, 2798), 'numpy.linspace', 'np.linspace', (['(0)', 'param_range[-1]', 'points'], {}), '(0, param_range[-1], points)\n', (2770, 2798), True, 'import numpy as np\n'), ((8424, 8476), 'ETF.get_combined_name_and_isin', 'get_combined_name_and_isin', (["etf['name']", "etf['isin']"], {}), "(etf['name'], etf['isin'])\n", (8450, 8476), False, 'from ETF import get_split_name_and_isin, get_combined_name_and_isin\n'), ((1401, 1413), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (1408, 1413), True, 'import numpy as np\n'), ((7306, 7334), 'ETF.get_split_name_and_isin', 'get_split_name_and_isin', (['etf'], {}), '(etf)\n', (7329, 7334), False, 'from ETF import get_split_name_and_isin, get_combined_name_and_isin\n'), ((10920, 10937), 'math.isnan', 'math.isnan', (['price'], {}), '(price)\n', (10930, 10937), False, 'import math\n'), ((11985, 12002), 'math.isnan', 'math.isnan', (['price'], {}), '(price)\n', (11995, 12002), False, 'import math\n'), ((10368, 10394), 'math.isnan', 'math.isnan', (['previous_price'], {}), '(previous_price)\n', (10378, 10394), False, 'import math\n'), ((10547, 10569), 'math.isnan', 'math.isnan', (['next_price'], {}), '(next_price)\n', (10557, 10569), False, 'import math\n')] |
import numpy as np
def magnitude(x):
"""Returns magnitude of a vector"""
return np.linalg.norm(np.array(x))
def distance(p1, p2):
"""Returns distance between two positions p1 and p2"""
return magnitude(np.array(p2) - np.array(p1))
def unit_vector(x):
"""Returns unit_vector of a vector"""
return np.array(x)/magnitude(x)
def random_sphere(center, radius, size, seed=123):
"""Create a list of random points in a sphere.
Keyword arguments:
center -- center of sphere
radius -- radius of sphere
size -- number of points to create
seed -- random state (default 123)
"""
np.random.seed(seed)
x0, y0, z0 = center
phi = np.random.uniform(0, 2*np.pi, size)
costheta = np.random.uniform(-1, 1, size)
u = np.random.uniform(0, 1, size)
theta = np.arccos( costheta )
r = radius * (u**(1/3))
xs = (r * np.sin(theta) * np.cos(phi)) + x0
ys = (r * np.sin(theta) * np.sin(phi)) + y0
zs = (r * np.cos(theta)) + z0
return list(zip(xs,ys,zs))
def random_ec_sphere(center, r1, r2, size, seed=123):
"""Create a list of random points between two concentric spheres.
Keyword arguments:
center -- center of spheres
r1 -- radius of the smaller sphere
r2 -- radius of the bigger sphere
size -- number of points to create
seed -- random state (default 123)
"""
np.random.seed(seed)
inc_size = int(2*size / (1 - (r1/r2)**3))
x0, y0, z0 = center
ls = random_sphere(center, r2, inc_size, seed)
x = np.array([i[0] for i in ls])
y = np.array([i[1] for i in ls])
z = np.array([i[2] for i in ls])
cnd = x**2 + y**2 + z**2 > r1**2
xs = x[cnd]
ys = y[cnd]
zs = z[cnd]
return list(zip(xs,ys,zs))[:size]
def somigliana(phi):
"""
Somigliana equation
phi: latitude in degrees
g: Gravitational acceleration (m/s2)
"""
phi = phi * (np.pi/180)
a = 1 + 0.00193185265245827352087 * (np.sin(phi)**2)
b = np.sqrt(1 - 0.006694379990141316996137 * (np.sin(phi)**2))
g = 9.780325335903891718546 * (a/b)
return g
def welmec(phi, h):
"""
WELMEC formula
phi: latitude in degrees
h: height in meters above sea level
g: Gravitational acceleration (m/s2)
"""
phi = phi * (np.pi/180)
g = (1 + 0.0053024*(np.sin(phi)**2) - 0.0000058*(np.sin(2*phi)**2))\
* 9.780318 - 0.000003085 * h
return g
| [
"numpy.arccos",
"numpy.array",
"numpy.cos",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.sin"
] | [((631, 651), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (645, 651), True, 'import numpy as np\n'), ((686, 723), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'size'], {}), '(0, 2 * np.pi, size)\n', (703, 723), True, 'import numpy as np\n'), ((737, 767), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'size'], {}), '(-1, 1, size)\n', (754, 767), True, 'import numpy as np\n'), ((776, 805), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'size'], {}), '(0, 1, size)\n', (793, 805), True, 'import numpy as np\n'), ((818, 837), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (827, 837), True, 'import numpy as np\n'), ((1381, 1401), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1395, 1401), True, 'import numpy as np\n'), ((1531, 1559), 'numpy.array', 'np.array', (['[i[0] for i in ls]'], {}), '([i[0] for i in ls])\n', (1539, 1559), True, 'import numpy as np\n'), ((1568, 1596), 'numpy.array', 'np.array', (['[i[1] for i in ls]'], {}), '([i[1] for i in ls])\n', (1576, 1596), True, 'import numpy as np\n'), ((1605, 1633), 'numpy.array', 'np.array', (['[i[2] for i in ls]'], {}), '([i[2] for i in ls])\n', (1613, 1633), True, 'import numpy as np\n'), ((104, 115), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (112, 115), True, 'import numpy as np\n'), ((323, 334), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (331, 334), True, 'import numpy as np\n'), ((220, 232), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (228, 232), True, 'import numpy as np\n'), ((235, 247), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (243, 247), True, 'import numpy as np\n'), ((898, 909), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (904, 909), True, 'import numpy as np\n'), ((946, 957), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (952, 957), True, 'import numpy as np\n'), ((978, 991), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (984, 991), True, 'import numpy as np\n'), ((882, 895), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (888, 895), True, 'import numpy as np\n'), ((930, 943), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (936, 943), True, 'import numpy as np\n'), ((1963, 1974), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1969, 1974), True, 'import numpy as np\n'), ((2029, 2040), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2035, 2040), True, 'import numpy as np\n'), ((2351, 2366), 'numpy.sin', 'np.sin', (['(2 * phi)'], {}), '(2 * phi)\n', (2357, 2366), True, 'import numpy as np\n'), ((2322, 2333), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2328, 2333), True, 'import numpy as np\n')] |
class KelvinHelmholtzUniform:
"""
Kelvin-Helmholtz instability with anisotropic viscosity and a constant
magnetic field in the x-direction. The equilibrium is assumed to have
constant density, temperature and pressure. The velocity profile varies
smoothly and the setup is periodic.
More details about this specific setup can be found in
<NAME>. & <NAME>. (2019). *On the Kelvin-Helmholtz instability
with smooth initial conditions – Linear theory and simulations*, MNRAS,
485, 908
Another reference for the KHI with anisotric viscosity is
<NAME>., <NAME>., <NAME>., & <NAME>. (2013).
Magnetohydrodynamic simulations of the formation of cold fronts in
clusters of galaxies: Effects of anisotropic viscosity. Astrophysical
Journal, 768(2). https://doi.org/10.1088/0004-637X/768/2/175
"""
def __init__(self, grid, beta, nu, kx, u0=1, z1=0.5, z2=1.5, a=0.05):
import numpy as np
# Parameters that change (TODO: make nu, beta, and chi0 part of this)
self._u0 = u0
self.nu = nu
self.beta = beta
self.kx = kx
self.gamma = 5.0 / 3
self.p = 1.0
self.rho = 1.0
self.mu0 = 1.0
self.B = np.sqrt(2 * self.p / beta)
self.va = self.B / np.sqrt(self.mu0 * self.rho)
self.grid = grid
self.grid.bind_to(self.make_background)
self.z1 = z1
self.z2 = z2
self.a = a
# Create initial background
self.make_background()
# Variables to solve for
self.variables = ["drho", "dA", "dvx", "dvz", "dT"]
self.labels = [
r"$\delta \rho/\rho$",
r"$\delta A/B$",
r"$\delta v_x/c_0$",
r"$\delta v_z/c_0$",
r"$\delta T/T$",
]
# Boundary conditions
self.boundaries = [False, False, False, False, False]
# Number of equations in system
self.dim = len(self.variables)
# String used for eigenvalue (do not use lambda!)
self.eigenvalue = "sigma"
# Equations (Careful! No space behind minus
eq1 = "sigma*drho = -1j*kx*v*drho -1j*kx*dvx -1.0*dz(dvz)"
eq2 = "sigma*dA = -1j*kx*v*dA +1.0*dvz"
eq3 = "sigma*dvx = -1j*kx*v*dvx -dvdz*dvz -1j*kx*p/rho*drho -1j*kx*p/rho*dT -nu*4/3*kx**2*dvx -nu*2*kx**2*dvdz*dA -nu*1j*kx*2/3*dz(dvz)"
eq4 = "sigma*dvz = -1j*kx*v*dvz -1/rho*p*dz(drho) -1/rho*p*dz(dT) +va**2*dz(dz(dA)) -va**2*kx**2*dA -1j*kx*nu*2/3*dz(dvx) -1j*kx*nu*d2vdz*dA -1j*kx*nu*dvdz*dz(dA) +nu*1/3*dz(dz(dvz))"
eq5 = "sigma*dT = -1j*kx*v*dT -1j*kx*2/3*dvx -2/3*dz(dvz)"
self.equations = [eq1, eq2, eq3, eq4, eq5]
@property
def u0(self):
return self._u0
@u0.setter
def u0(self, value):
self._u0 = value
self.make_background()
def make_background(self):
from sympy import tanh, diff, lambdify, symbols
z = symbols("z")
zg = self.grid.zg
u0 = self._u0
z1 = self.z1
z2 = self.z2
a = self.a
# Define Background Functions
v_sym = u0 * (tanh((z - z1) / a) - tanh((z - z2) / a) - 1.0)
dvdz_sym = diff(v_sym, z)
d2vdz_sym = diff(dvdz_sym, z)
self.v = lambdify(z, v_sym)(zg)
self.dvdz = lambdify(z, dvdz_sym)(zg)
self.d2vdz = lambdify(z, d2vdz_sym)(zg)
| [
"numpy.sqrt",
"sympy.lambdify",
"sympy.symbols",
"sympy.diff",
"sympy.tanh"
] | [((1277, 1303), 'numpy.sqrt', 'np.sqrt', (['(2 * self.p / beta)'], {}), '(2 * self.p / beta)\n', (1284, 1303), True, 'import numpy as np\n'), ((2996, 3008), 'sympy.symbols', 'symbols', (['"""z"""'], {}), "('z')\n", (3003, 3008), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3248, 3262), 'sympy.diff', 'diff', (['v_sym', 'z'], {}), '(v_sym, z)\n', (3252, 3262), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3283, 3300), 'sympy.diff', 'diff', (['dvdz_sym', 'z'], {}), '(dvdz_sym, z)\n', (3287, 3300), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((1331, 1359), 'numpy.sqrt', 'np.sqrt', (['(self.mu0 * self.rho)'], {}), '(self.mu0 * self.rho)\n', (1338, 1359), True, 'import numpy as np\n'), ((3319, 3337), 'sympy.lambdify', 'lambdify', (['z', 'v_sym'], {}), '(z, v_sym)\n', (3327, 3337), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3362, 3383), 'sympy.lambdify', 'lambdify', (['z', 'dvdz_sym'], {}), '(z, dvdz_sym)\n', (3370, 3383), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3409, 3431), 'sympy.lambdify', 'lambdify', (['z', 'd2vdz_sym'], {}), '(z, d2vdz_sym)\n', (3417, 3431), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3181, 3199), 'sympy.tanh', 'tanh', (['((z - z1) / a)'], {}), '((z - z1) / a)\n', (3185, 3199), False, 'from sympy import tanh, diff, lambdify, symbols\n'), ((3202, 3220), 'sympy.tanh', 'tanh', (['((z - z2) / a)'], {}), '((z - z2) / a)\n', (3206, 3220), False, 'from sympy import tanh, diff, lambdify, symbols\n')] |
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
class GradCAM():
def __init__(self, model, target_layer, use_cuda):
self.model = model.eval()
self.target_layer = target_layer
self.use_cuda = use_cuda
self.feature_map = 0
self.grad = 0
if self.use_cuda:
self.model = self.model.cuda()
for module in self.model.named_modules():
if module[0] == target_layer:
module[1].register_forward_hook(self.save_feature_map)
module[1].register_backward_hook(self.save_grad)
def save_feature_map(self, module, input, output):
self.feature_map = output.detach()
def save_grad(self, module, grad_in, grad_out):
self.grad = grad_out[0].detach()
def __call__(self, x, index=None):
x = x.clone()
if self.use_cuda:
x = x.cuda()
output = self.model(x)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot)
one_hot.requires_grad_()
if self.use_cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
self.model.zero_grad()
one_hot.backward()
self.feature_map = self.feature_map.cpu().numpy()[0]
self.weights = np.mean(self.grad.cpu().numpy(), axis = (2, 3))[0, :]
cam = np.sum(self.feature_map * self.weights[:, None, None], axis=0)
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (x.size()[-1], x.size()[-2]))
return cam, index
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return np.uint8(255 * cam)
| [
"numpy.uint8",
"torch.from_numpy",
"numpy.max",
"numpy.sum",
"torch.sum",
"numpy.maximum",
"numpy.float32"
] | [((2103, 2122), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (2111, 2122), True, 'import numpy as np\n'), ((1236, 1261), 'torch.from_numpy', 'torch.from_numpy', (['one_hot'], {}), '(one_hot)\n', (1252, 1261), False, 'import torch\n'), ((1701, 1763), 'numpy.sum', 'np.sum', (['(self.feature_map * self.weights[:, None, None])'], {'axis': '(0)'}), '(self.feature_map * self.weights[:, None, None], axis=0)\n', (1707, 1763), True, 'import numpy as np\n'), ((1778, 1796), 'numpy.maximum', 'np.maximum', (['cam', '(0)'], {}), '(cam, 0)\n', (1788, 1796), True, 'import numpy as np\n'), ((1950, 1970), 'numpy.uint8', 'np.uint8', (['(255 * mask)'], {}), '(255 * mask)\n', (1958, 1970), True, 'import numpy as np\n'), ((2002, 2021), 'numpy.float32', 'np.float32', (['heatmap'], {}), '(heatmap)\n', (2012, 2021), True, 'import numpy as np\n'), ((2048, 2063), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2058, 2063), True, 'import numpy as np\n'), ((2080, 2091), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (2086, 2091), True, 'import numpy as np\n'), ((1414, 1441), 'torch.sum', 'torch.sum', (['(one_hot * output)'], {}), '(one_hot * output)\n', (1423, 1441), False, 'import torch\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment
import pandas as pd
from collections import OrderedDict
from feature_extraction.helpers import *
from feature_extraction.type_detection import detect_field_type, data_type_to_general_type, data_types, general_types
np.warnings.filterwarnings('ignore')
field_basic_features_list = [
{'name': 'fid', 'type': 'id'},
{'name': 'field_id', 'type': 'id'},
{'name': 'exists', 'type': 'boolean'},
{'name': 'length', 'type': 'numeric'}
]
for data_type in data_types:
field_basic_features_list.append({
'name': 'data_type_is_{}'.format(data_type), 'type': 'boolean'
})
for general_type in general_types:
field_basic_features_list.append({
'name': 'general_type_is_{}'.format(general_type), 'type': 'boolean'
})
field_existence_features_list = [
{'name': 'has_none', 'type': 'boolean'},
{'name': 'percentage_none', 'type': 'numeric'},
{'name': 'num_none', 'type': 'numeric'},
]
field_uniqueness_features_list = [
{'name': 'num_unique_elements', 'type': 'numeric'},
{'name': 'unique_percent', 'type': 'numeric'},
{'name': 'is_unique', 'type': 'boolean'}
]
field_c_statistical_features_list = [
{'name': 'list_entropy', 'type': 'numeric'},
{'name': 'mean_value_length', 'type': 'numeric'},
{'name': 'median_value_length', 'type': 'numeric'},
{'name': 'min_value_length', 'type': 'numeric'},
{'name': 'max_value_length', 'type': 'numeric'},
{'name': 'std_value_length', 'type': 'numeric'},
{'name': 'percentage_of_mode', 'type': 'numeric'},
]
field_q_statistical_features_list = [
{'name': 'mean', 'type': 'numeric'},
{'name': 'normalized_mean', 'type': 'numeric'},
{'name': 'median', 'type': 'numeric'},
{'name': 'normalized_median', 'type': 'numeric'},
{'name': 'var', 'type': 'numeric'},
{'name': 'std', 'type': 'numeric'},
{'name': 'coeff_var', 'type': 'numeric'},
{'name': 'min', 'type': 'numeric'},
{'name': 'max', 'type': 'numeric'},
{'name': 'range', 'type': 'numeric'},
{'name': 'normalized_range', 'type': 'numeric'},
{'name': 'entropy', 'type': 'numeric'},
{'name': 'gini', 'type': 'numeric'},
{'name': 'q25', 'type': 'numeric'},
{'name': 'q75', 'type': 'numeric'},
{'name': 'med_abs_dev', 'type': 'numeric'},
{'name': 'avg_abs_dev', 'type': 'numeric'},
{'name': 'quant_coeff_disp', 'type': 'numeric'},
{'name': 'skewness', 'type': 'numeric'},
{'name': 'kurtosis', 'type': 'numeric'},
{'name': 'moment_5', 'type': 'numeric'},
{'name': 'moment_6', 'type': 'numeric'},
{'name': 'moment_7', 'type': 'numeric'},
{'name': 'moment_8', 'type': 'numeric'},
{'name': 'moment_9', 'type': 'numeric'},
{'name': 'moment_10', 'type': 'numeric'},
{'name': 'percent_outliers_15iqr', 'type': 'numeric'},
{'name': 'percent_outliers_3iqr', 'type': 'numeric'},
{'name': 'percent_outliers_1_99', 'type': 'numeric'},
{'name': 'percent_outliers_3std', 'type': 'numeric'},
{'name': 'has_outliers_15iqr', 'type': 'boolean'},
{'name': 'has_outliers_3iqr', 'type': 'boolean'},
{'name': 'has_outliers_1_99', 'type': 'boolean'},
{'name': 'has_outliers_3std', 'type': 'boolean'},
{'name': 'normality_statistic', 'type': 'numeric'},
{'name': 'normality_p', 'type': 'numeric'},
{'name': 'is_normal_5', 'type': 'boolean'},
{'name': 'is_normal_1', 'type': 'boolean'},
]
field_name_features_list = [
{'name': 'field_name_length', 'type': 'numeric'},
{'name': 'x_in_name', 'type': 'boolean'},
{'name': 'y_in_name', 'type': 'boolean'},
{'name': 'id_in_name', 'type': 'boolean'},
{'name': 'time_in_name', 'type': 'boolean'},
{'name': 'digit_in_name', 'type': 'boolean'},
{'name': 'dollar_in_name', 'type': 'boolean'},
{'name': 'pound_in_name', 'type': 'boolean'},
{'name': 'euro_in_name', 'type': 'boolean'},
{'name': 'yen_in_name', 'type': 'boolean'},
{'name': 'first_char_uppercase_name', 'type': 'boolean'},
{'name': 'num_uppercase_characters', 'type': 'numeric'},
{'name': 'space_in_name', 'type': 'boolean'},
{'name': 'number_of_words_in_name', 'type': 'numeric'},
]
field_sequence_features_list = [
{'name': 'is_sorted', 'type': 'boolean'},
{'name': 'is_monotonic', 'type': 'boolean'},
{'name': 'sortedness', 'type': 'numeric'},
{'name': 'lin_space_sequence_coeff', 'type': 'numeric'},
{'name': 'log_space_sequence_coeff', 'type': 'numeric'},
{'name': 'is_lin_space', 'type': 'boolean'},
{'name': 'is_log_space', 'type': 'boolean'},
]
all_field_features_list = \
field_basic_features_list + \
field_existence_features_list + \
field_uniqueness_features_list + \
field_c_statistical_features_list + \
field_q_statistical_features_list + \
field_name_features_list + \
field_sequence_features_list
all_field_features_list_names = [x['name'] for x in all_field_features_list]
def get_existence_features(v):
r = OrderedDict([(f['name'], None) for f in field_existence_features_list])
if not len(v):
return r
num_none = sum(1 for e in v if e == 'None')
r['num_none'] = num_none
r['percentage_none'] = num_none / len(v)
r['has_none'] = (num_none > 0)
return r
# Sequence Properties
def get_uniqueness_features(v, field_type, field_general_type):
r = OrderedDict([(f['name'], None)
for f in field_uniqueness_features_list])
if not len(v):
return r
if field_general_type == 'c' or field_type == 'integer':
unique_elements = get_unique(v)
r = {}
r['num_unique_elements'] = unique_elements.size
r['unique_percent'] = (r['num_unique_elements'] / len(v))
r['is_unique'] = (r['num_unique_elements'] == len(v))
return r
def get_statistical_features(v, field_type, field_general_type):
r = OrderedDict([(f['name'], None)
for f in field_c_statistical_features_list + field_q_statistical_features_list])
if not len(v):
return r
if field_general_type == 'c':
r['list_entropy'] = list_entropy(v)
value_lengths = [len(x) for x in v]
r['mean_value_length'] = np.mean(value_lengths)
r['median_value_length'] = np.median(value_lengths)
r['min_value_length'] = np.min(value_lengths)
r['max_value_length'] = np.max(value_lengths)
r['std_value_length'] = np.std(value_lengths)
r['percentage_of_mode'] = (pd.Series(v).value_counts().max() / len(v))
if field_general_type in 'q':
sample_mean = np.mean(v)
sample_median = np.median(v)
sample_var = np.var(v)
sample_min = np.min(v)
sample_max = np.max(v)
sample_std = np.std(v)
q1, q25, q75, q99 = np.percentile(v, [0.01, 0.25, 0.75, 0.99])
iqr = q75 - q25
r['mean'] = sample_mean
r['normalized_mean'] = sample_mean / sample_max
r['median'] = sample_median
r['normalized_median'] = sample_median / sample_max
r['var'] = sample_var
r['std'] = sample_std
r['coeff_var'] = (sample_mean / sample_var) if sample_var else None
r['min'] = sample_min
r['max'] = sample_max
r['range'] = r['max'] - r['min']
r['normalized_range'] = (r['max'] - r['min']) / \
sample_mean if sample_mean else None
r['entropy'] = entropy(v)
r['gini'] = gini(v)
r['q25'] = q25
r['q75'] = q75
r['med_abs_dev'] = np.median(np.absolute(v - sample_median))
r['avg_abs_dev'] = np.mean(np.absolute(v - sample_mean))
r['quant_coeff_disp'] = (q75 - q25) / (q75 + q25)
r['coeff_var'] = sample_var / sample_mean
r['skewness'] = skew(v)
r['kurtosis'] = kurtosis(v)
r['moment_5'] = moment(v, moment=5)
r['moment_6'] = moment(v, moment=6)
r['moment_7'] = moment(v, moment=7)
r['moment_8'] = moment(v, moment=8)
r['moment_9'] = moment(v, moment=9)
r['moment_10'] = moment(v, moment=10)
# Outliers
outliers_15iqr = np.logical_or(
v < (q25 - 1.5 * iqr), v > (q75 + 1.5 * iqr))
outliers_3iqr = np.logical_or(v < (q25 - 3 * iqr), v > (q75 + 3 * iqr))
outliers_1_99 = np.logical_or(v < q1, v > q99)
outliers_3std = np.logical_or(
v < (
sample_mean -
3 *
sample_std),
v > (
sample_mean +
3 *
sample_std))
r['percent_outliers_15iqr'] = np.sum(outliers_15iqr) / len(v)
r['percent_outliers_3iqr'] = np.sum(outliers_3iqr) / len(v)
r['percent_outliers_1_99'] = np.sum(outliers_1_99) / len(v)
r['percent_outliers_3std'] = np.sum(outliers_3std) / len(v)
r['has_outliers_15iqr'] = np.any(outliers_15iqr)
r['has_outliers_3iqr'] = np.any(outliers_3iqr)
r['has_outliers_1_99'] = np.any(outliers_1_99)
r['has_outliers_3std'] = np.any(outliers_3std)
# Statistical Distribution
if len(v) >= 8:
normality_k2, normality_p = normaltest(v)
r['normality_statistic'] = normality_k2
r['normality_p'] = normality_p
r['is_normal_5'] = (normality_p < 0.05)
r['is_normal_1'] = (normality_p < 0.01)
return r
def get_name_features(n):
r = OrderedDict([(f['name'], None) for f in field_name_features_list])
r['field_name_length'] = len(n)
r['x_in_name'] = (n.startswith('x') or n.endswith('x'))
r['y_in_name'] = (n.startswith('y') or n.endswith('y'))
r['id_in_name'] = (n.startswith('id') or n.endswith('id'))
r['time_in_name'] = ('time' in n)
r['digit_in_name'] = bool(re.match(r'^(?=.*\d).+$', n))
r['dollar_in_name'] = ('$' in n)
r['pound_in_name'] = ('£' in n)
r['euro_in_name'] = ('€' in n)
r['yen_in_name'] = ('¥' in n)
r['first_char_uppercase_name'] = (n[0] == n[0].upper())
num_uppercase_characters = sum(1 for c in n if c.isupper())
r['num_uppercase_characters'] = num_uppercase_characters
r['space_in_name'] = (' ' in n)
r['number_of_words_in_name'] = len(n.split(' '))
return r
def get_sequence_features(v, field_type, field_general_type):
r = OrderedDict([(f['name'], None) for f in field_sequence_features_list])
if not len(v):
return r
sorted_v = np.sort(v)
if field_general_type == 'c':
r['is_sorted'] = np.array_equal(sorted_v, v)
if field_general_type == 't':
v = v.astype('int')
sorted_v = sorted_v.astype('int')
if field_general_type in ['t', 'q']:
sequence_incremental_subtraction = np.subtract(
sorted_v[:-1], sorted_v[1:]).astype(int)
r['is_monotonic'] = np.all(
sequence_incremental_subtraction <= 0) or np.all(
sequence_incremental_subtraction >= 0)
r['sortedness'] = np.absolute(
pearsonr(v, sorted_v)[0]) # or use inversions
# np.allclose(v, sorted_v) # np.array_equal(sorted_v, v)
r['is_sorted'] = np.array_equal(sorted_v, v)
if field_general_type == 'q':
sequence_incremental_division = np.divide(sorted_v[:-1], sorted_v[1:])
sequence_incremental_subtraction = np.diff(sorted_v)
r['lin_space_sequence_coeff'] = np.std(
sequence_incremental_subtraction) / np.mean(sequence_incremental_subtraction)
r['log_space_sequence_coeff'] = np.std(
sequence_incremental_division) / np.mean(sequence_incremental_division)
r['is_lin_space'] = r['lin_space_sequence_coeff'] <= 0.001
r['is_log_space'] = r['log_space_sequence_coeff'] <= 0.001
return r
def extract_single_field_features(fields, fid, timeout=15, MAX_FIELDS=20):
all_field_features = []
for i in range(0, MAX_FIELDS):
single_field_feature_set = OrderedDict()
for f in all_field_features_list:
if f['type'] == 'boolean':
single_field_feature_set[f['name']] = False
else:
single_field_feature_set[f['name']] = None
all_field_features.append(single_field_feature_set)
parsed_fields = [] # For pairwise feature extraction
for i, (field_name, d) in enumerate(fields):
field_id = d['uid']
field_order = d['order']
field_values = d['data']
field_length = len(field_values)
field_type, field_scores = detect_field_type(field_values)
field_general_type = data_type_to_general_type[field_type]
all_field_features[i]['fid'] = fid
all_field_features[i]['field_id'] = '{}:{}'.format(
fid.split(':')[0] if fid else None, field_id)
all_field_features[i]['exists'] = True
all_field_features[i]['length'] = field_length
# all_field_features[i]['data_type'] = field_type
# all_field_features[i]['general_type'] = field_general_type
all_field_features[i]['data_type_is_{}'.format(field_type)] = True
all_field_features[i]['general_type_is_{}'.format(
field_general_type)] = True
parsed_field = {
'name': field_name,
'order': field_order,
'data_type': field_type,
'general_type': field_general_type
}
existence_features = OrderedDict()
uniqueness_features = OrderedDict()
statistical_features = OrderedDict()
name_features = OrderedDict()
sequence_features = OrderedDict()
try:
v = parse(field_values, field_type, field_general_type)
v = np.ma.array(v).compressed()
parsed_field['data'] = v
parsed_field['unique_data'] = get_unique(v)
start_time = time()
while time() < (start_time + timeout):
existence_features = get_existence_features(field_values)
uniqueness_features = get_uniqueness_features(
v, field_type, field_general_type)
statistical_features = get_statistical_features(
v, field_type, field_general_type)
name_features = get_name_features(field_name)
sequence_features = get_sequence_features(
v, field_type, field_general_type)
break
except Exception as e:
print(e)
print('Error parsing {}: {}'.format(field_name, e))
pass
for feature_set in [uniqueness_features, existence_features,
statistical_features, name_features, sequence_features]:
for k, v in feature_set.items():
all_field_features[i][k] = v
parsed_fields.append(parsed_field)
return all_field_features, parsed_fields
def extract_single_feature(fields, fid=None):
single_field_features, parsed_fields = extract_single_field_features(fields, fid, MAX_FIELDS=len(fields))
column_features = {}
for f, c in zip(fields, single_field_features):
column_features[f[0]] = (sorted(c.items(), key=lambda x:x[0]))
return column_features
def extract_column_features(all_rows, fid, uid):
'''
input data : [
[header1, val1, val2, val3, val4..]
[header2, val1, val2, val3, val4..]
]
'''
order = 0
fields = []
for row in all_rows:
column_name = row[0]
data = row[1:]
data_dict = {"uid" : uid, "order" : order, "data" : data}
fields.append((column_name, data_dict))
order += 1
column_features = extract_single_feature(fields)
return column_features
| [
"feature_extraction.type_detection.detect_field_type",
"scipy.stats.normaltest",
"scipy.stats.pearsonr",
"numpy.divide",
"numpy.mean",
"numpy.sort",
"scipy.stats.kurtosis",
"numpy.diff",
"numpy.subtract",
"numpy.max",
"numpy.min",
"collections.OrderedDict",
"scipy.stats.entropy",
"numpy.ma... | [((364, 400), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (390, 400), True, 'import numpy as np\n'), ((5107, 5178), 'collections.OrderedDict', 'OrderedDict', (["[(f['name'], None) for f in field_existence_features_list]"], {}), "([(f['name'], None) for f in field_existence_features_list])\n", (5118, 5178), False, 'from collections import OrderedDict\n'), ((5483, 5555), 'collections.OrderedDict', 'OrderedDict', (["[(f['name'], None) for f in field_uniqueness_features_list]"], {}), "([(f['name'], None) for f in field_uniqueness_features_list])\n", (5494, 5555), False, 'from collections import OrderedDict\n'), ((6001, 6116), 'collections.OrderedDict', 'OrderedDict', (["[(f['name'], None) for f in field_c_statistical_features_list +\n field_q_statistical_features_list]"], {}), "([(f['name'], None) for f in field_c_statistical_features_list +\n field_q_statistical_features_list])\n", (6012, 6116), False, 'from collections import OrderedDict\n'), ((9536, 9602), 'collections.OrderedDict', 'OrderedDict', (["[(f['name'], None) for f in field_name_features_list]"], {}), "([(f['name'], None) for f in field_name_features_list])\n", (9547, 9602), False, 'from collections import OrderedDict\n'), ((10424, 10494), 'collections.OrderedDict', 'OrderedDict', (["[(f['name'], None) for f in field_sequence_features_list]"], {}), "([(f['name'], None) for f in field_sequence_features_list])\n", (10435, 10494), False, 'from collections import OrderedDict\n'), ((10546, 10556), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (10553, 10556), True, 'import numpy as np\n'), ((6327, 6349), 'numpy.mean', 'np.mean', (['value_lengths'], {}), '(value_lengths)\n', (6334, 6349), True, 'import numpy as np\n'), ((6385, 6409), 'numpy.median', 'np.median', (['value_lengths'], {}), '(value_lengths)\n', (6394, 6409), True, 'import numpy as np\n'), ((6442, 6463), 'numpy.min', 'np.min', (['value_lengths'], {}), '(value_lengths)\n', (6448, 6463), True, 'import numpy as np\n'), ((6496, 6517), 'numpy.max', 'np.max', (['value_lengths'], {}), '(value_lengths)\n', (6502, 6517), True, 'import numpy as np\n'), ((6550, 6571), 'numpy.std', 'np.std', (['value_lengths'], {}), '(value_lengths)\n', (6556, 6571), True, 'import numpy as np\n'), ((6708, 6718), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (6715, 6718), True, 'import numpy as np\n'), ((6743, 6755), 'numpy.median', 'np.median', (['v'], {}), '(v)\n', (6752, 6755), True, 'import numpy as np\n'), ((6777, 6786), 'numpy.var', 'np.var', (['v'], {}), '(v)\n', (6783, 6786), True, 'import numpy as np\n'), ((6808, 6817), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (6814, 6817), True, 'import numpy as np\n'), ((6839, 6848), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (6845, 6848), True, 'import numpy as np\n'), ((6870, 6879), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (6876, 6879), True, 'import numpy as np\n'), ((6908, 6950), 'numpy.percentile', 'np.percentile', (['v', '[0.01, 0.25, 0.75, 0.99]'], {}), '(v, [0.01, 0.25, 0.75, 0.99])\n', (6921, 6950), True, 'import numpy as np\n'), ((7529, 7539), 'scipy.stats.entropy', 'entropy', (['v'], {}), '(v)\n', (7536, 7539), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((7880, 7887), 'scipy.stats.skew', 'skew', (['v'], {}), '(v)\n', (7884, 7887), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((7912, 7923), 'scipy.stats.kurtosis', 'kurtosis', (['v'], {}), '(v)\n', (7920, 7923), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((7948, 7967), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(5)'}), '(v, moment=5)\n', (7954, 7967), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((7992, 8011), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(6)'}), '(v, moment=6)\n', (7998, 8011), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((8036, 8055), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(7)'}), '(v, moment=7)\n', (8042, 8055), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((8080, 8099), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(8)'}), '(v, moment=8)\n', (8086, 8099), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((8124, 8143), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(9)'}), '(v, moment=9)\n', (8130, 8143), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((8169, 8189), 'scipy.stats.moment', 'moment', (['v'], {'moment': '(10)'}), '(v, moment=10)\n', (8175, 8189), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((8235, 8290), 'numpy.logical_or', 'np.logical_or', (['(v < q25 - 1.5 * iqr)', '(v > q75 + 1.5 * iqr)'], {}), '(v < q25 - 1.5 * iqr, v > q75 + 1.5 * iqr)\n', (8248, 8290), True, 'import numpy as np\n'), ((8332, 8383), 'numpy.logical_or', 'np.logical_or', (['(v < q25 - 3 * iqr)', '(v > q75 + 3 * iqr)'], {}), '(v < q25 - 3 * iqr, v > q75 + 3 * iqr)\n', (8345, 8383), True, 'import numpy as np\n'), ((8412, 8442), 'numpy.logical_or', 'np.logical_or', (['(v < q1)', '(v > q99)'], {}), '(v < q1, v > q99)\n', (8425, 8442), True, 'import numpy as np\n'), ((8467, 8552), 'numpy.logical_or', 'np.logical_or', (['(v < sample_mean - 3 * sample_std)', '(v > sample_mean + 3 * sample_std)'], {}), '(v < sample_mean - 3 * sample_std, v > sample_mean + 3 *\n sample_std)\n', (8480, 8552), True, 'import numpy as np\n'), ((8985, 9007), 'numpy.any', 'np.any', (['outliers_15iqr'], {}), '(outliers_15iqr)\n', (8991, 9007), True, 'import numpy as np\n'), ((9041, 9062), 'numpy.any', 'np.any', (['outliers_3iqr'], {}), '(outliers_3iqr)\n', (9047, 9062), True, 'import numpy as np\n'), ((9096, 9117), 'numpy.any', 'np.any', (['outliers_1_99'], {}), '(outliers_1_99)\n', (9102, 9117), True, 'import numpy as np\n'), ((9151, 9172), 'numpy.any', 'np.any', (['outliers_3std'], {}), '(outliers_3std)\n', (9157, 9172), True, 'import numpy as np\n'), ((10617, 10644), 'numpy.array_equal', 'np.array_equal', (['sorted_v', 'v'], {}), '(sorted_v, v)\n', (10631, 10644), True, 'import numpy as np\n'), ((11238, 11265), 'numpy.array_equal', 'np.array_equal', (['sorted_v', 'v'], {}), '(sorted_v, v)\n', (11252, 11265), True, 'import numpy as np\n'), ((11340, 11378), 'numpy.divide', 'np.divide', (['sorted_v[:-1]', 'sorted_v[1:]'], {}), '(sorted_v[:-1], sorted_v[1:])\n', (11349, 11378), True, 'import numpy as np\n'), ((11422, 11439), 'numpy.diff', 'np.diff', (['sorted_v'], {}), '(sorted_v)\n', (11429, 11439), True, 'import numpy as np\n'), ((12032, 12045), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12043, 12045), False, 'from collections import OrderedDict\n'), ((12603, 12634), 'feature_extraction.type_detection.detect_field_type', 'detect_field_type', (['field_values'], {}), '(field_values)\n', (12620, 12634), False, 'from feature_extraction.type_detection import detect_field_type, data_type_to_general_type, data_types, general_types\n'), ((13482, 13495), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13493, 13495), False, 'from collections import OrderedDict\n'), ((13526, 13539), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13537, 13539), False, 'from collections import OrderedDict\n'), ((13571, 13584), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13582, 13584), False, 'from collections import OrderedDict\n'), ((13609, 13622), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13620, 13622), False, 'from collections import OrderedDict\n'), ((13651, 13664), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13662, 13664), False, 'from collections import OrderedDict\n'), ((7651, 7681), 'numpy.absolute', 'np.absolute', (['(v - sample_median)'], {}), '(v - sample_median)\n', (7662, 7681), True, 'import numpy as np\n'), ((7718, 7746), 'numpy.absolute', 'np.absolute', (['(v - sample_mean)'], {}), '(v - sample_mean)\n', (7729, 7746), True, 'import numpy as np\n'), ((8714, 8736), 'numpy.sum', 'np.sum', (['outliers_15iqr'], {}), '(outliers_15iqr)\n', (8720, 8736), True, 'import numpy as np\n'), ((8783, 8804), 'numpy.sum', 'np.sum', (['outliers_3iqr'], {}), '(outliers_3iqr)\n', (8789, 8804), True, 'import numpy as np\n'), ((8851, 8872), 'numpy.sum', 'np.sum', (['outliers_1_99'], {}), '(outliers_1_99)\n', (8857, 8872), True, 'import numpy as np\n'), ((8919, 8940), 'numpy.sum', 'np.sum', (['outliers_3std'], {}), '(outliers_3std)\n', (8925, 8940), True, 'import numpy as np\n'), ((9273, 9286), 'scipy.stats.normaltest', 'normaltest', (['v'], {}), '(v)\n', (9283, 9286), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((10928, 10973), 'numpy.all', 'np.all', (['(sequence_incremental_subtraction <= 0)'], {}), '(sequence_incremental_subtraction <= 0)\n', (10934, 10973), True, 'import numpy as np\n'), ((10990, 11035), 'numpy.all', 'np.all', (['(sequence_incremental_subtraction >= 0)'], {}), '(sequence_incremental_subtraction >= 0)\n', (10996, 11035), True, 'import numpy as np\n'), ((11480, 11520), 'numpy.std', 'np.std', (['sequence_incremental_subtraction'], {}), '(sequence_incremental_subtraction)\n', (11486, 11520), True, 'import numpy as np\n'), ((11536, 11577), 'numpy.mean', 'np.mean', (['sequence_incremental_subtraction'], {}), '(sequence_incremental_subtraction)\n', (11543, 11577), True, 'import numpy as np\n'), ((11618, 11655), 'numpy.std', 'np.std', (['sequence_incremental_division'], {}), '(sequence_incremental_division)\n', (11624, 11655), True, 'import numpy as np\n'), ((11671, 11709), 'numpy.mean', 'np.mean', (['sequence_incremental_division'], {}), '(sequence_incremental_division)\n', (11678, 11709), True, 'import numpy as np\n'), ((10834, 10874), 'numpy.subtract', 'np.subtract', (['sorted_v[:-1]', 'sorted_v[1:]'], {}), '(sorted_v[:-1], sorted_v[1:])\n', (10845, 10874), True, 'import numpy as np\n'), ((11100, 11121), 'scipy.stats.pearsonr', 'pearsonr', (['v', 'sorted_v'], {}), '(v, sorted_v)\n', (11108, 11121), False, 'from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment\n'), ((13762, 13776), 'numpy.ma.array', 'np.ma.array', (['v'], {}), '(v)\n', (13773, 13776), True, 'import numpy as np\n'), ((6607, 6619), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (6616, 6619), True, 'import pandas as pd\n')] |
#/usr/bin/env python
from __future__ import print_function
import numpy
import copy
import saveVTK
class Upwind:
def __init__(self, velocity, lengths, numCells):
self.numCells = numCells
self.ndims = len(velocity)
self.deltas = numpy.zeros( (self.ndims,), numpy.float64 )
self.upDirection = numpy.zeros( (self.ndims,), numpy.int )
self.v = velocity
self.lengths = lengths
self.ntot = 1
self.offsetMat = numpy.identity(self.ndims, numpy.int)
self.numCellsExt = numpy.outer(self.numCells, numpy.ones((self.ndims,), numpy.int))
for j in range(self.ndims):
self.upDirection[j] = -1
if velocity[j] < 0.: self.upDirection[j] = +1
self.deltas[j] = lengths[j] / numCells[j]
self.offsetMat[j, j] = self.upDirection[j]
self.ntot *= numCells[j]
self.dimProd = numpy.ones( (self.ndims,), numpy.int )
for i in range(self.ndims - 2, -1, -1):
# last index varies fastest
self.dimProd[i] = self.dimProd[i + 1] * self.numCells[i + 1]
self.coeff = self.v * self.upDirection / self.deltas
# initializing the field
self.f = numpy.zeros( (self.ntot,), numpy.float64 )
# initialize lower corner to one
self.f[0] = 1
# array of index sets for each cell
self.inds = numpy.zeros( (self.ndims, self.ntot), numpy.int )
for j in range(self.ndims):
self.inds[j, :] = numpy.arange(self.ntot)
self.inds[j, :] //= self.dimProd[j]
self.inds[j, :] %= self.numCells[j]
def advect(self, deltaTime):
# copy
oldF = self.f.copy()
indsUp = self.inds.copy()
# update the field in each spatial direction
for j in range(self.ndims):
# apply offset
indsUp[j, :] += self.upDirection[j]
indsUp[j, :] %= self.numCells[j]
# compute flat indices corresponding to the offset index sets
flatIndsUp = numpy.dot(self.dimProd, indsUp)
# update
self.f -= (deltaTime * self.coeff[j]) * (oldF[flatIndsUp] - oldF)
# reset
indsUp[j, :] = self.inds[j, :]
def saveVTK(self, fname):
xAxis = [0.0]
yAxis = [0.0]
zAxis = [0.0]
if self.ndims > 2:
xAxis = [0.0 + self.deltas[2] * i for i in range(self.numCells[2] + 1)]
if self.ndims > 1:
yAxis = [0.0 + self.deltas[1] * i for i in range(self.numCells[1] + 1)]
if self.ndims > 0:
zAxis = [0.0 + self.deltas[0] * i for i in range(self.numCells[0] + 1)]
saveVTK.rectilinear(fname, xAxis, yAxis, zAxis, self.f.reshape(self.numCells))
def checksum(self):
return numpy.sum(self.f)
def printOut(self):
for i in range(len(self.f)):
print(i, ' ', self.f[i])
#################################################################################################
def main():
import sys
if len(sys.argv) <= 1:
print("must specify number of cells in each direction.")
return sys.exit(1)
ndims = 3
numCells = [int(sys.argv[1])] * 3
print("number of cells: ", numCells)
numTimeSteps = 100
if len(sys.argv) > 2:
numTimeSteps = int(sys.argv[2])
print("number of time steps: ", numTimeSteps)
doVtk = False
if len(sys.argv) > 3 and sys.argv[3] == 'vtk':
doVtk = True
velocity = numpy.ones( (ndims,), numpy.float64 )
lengths = numpy.ones( (ndims,), numpy.float64 )
# compute dt
courant = 0.1
dt = float('inf')
for j in range(ndims):
dx = lengths[j]/ float(numCells[j])
dt = min(courant * dx / velocity[j], dt)
up = Upwind(velocity, lengths, numCells)
for i in range(numTimeSteps):
up.advect(dt)
print("check sum: ", up.checksum())
if doVtk:
up.saveVTK("up.vtk")
if __name__ == '__main__': main()
| [
"numpy.identity",
"numpy.ones",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"sys.exit",
"numpy.arange"
] | [((3192, 3227), 'numpy.ones', 'numpy.ones', (['(ndims,)', 'numpy.float64'], {}), '((ndims,), numpy.float64)\n', (3202, 3227), False, 'import numpy\n'), ((3242, 3277), 'numpy.ones', 'numpy.ones', (['(ndims,)', 'numpy.float64'], {}), '((ndims,), numpy.float64)\n', (3252, 3277), False, 'import numpy\n'), ((246, 287), 'numpy.zeros', 'numpy.zeros', (['(self.ndims,)', 'numpy.float64'], {}), '((self.ndims,), numpy.float64)\n', (257, 287), False, 'import numpy\n'), ((313, 350), 'numpy.zeros', 'numpy.zeros', (['(self.ndims,)', 'numpy.int'], {}), '((self.ndims,), numpy.int)\n', (324, 350), False, 'import numpy\n'), ((441, 478), 'numpy.identity', 'numpy.identity', (['self.ndims', 'numpy.int'], {}), '(self.ndims, numpy.int)\n', (455, 478), False, 'import numpy\n'), ((830, 866), 'numpy.ones', 'numpy.ones', (['(self.ndims,)', 'numpy.int'], {}), '((self.ndims,), numpy.int)\n', (840, 866), False, 'import numpy\n'), ((1120, 1160), 'numpy.zeros', 'numpy.zeros', (['(self.ntot,)', 'numpy.float64'], {}), '((self.ntot,), numpy.float64)\n', (1131, 1160), False, 'import numpy\n'), ((1275, 1322), 'numpy.zeros', 'numpy.zeros', (['(self.ndims, self.ntot)', 'numpy.int'], {}), '((self.ndims, self.ntot), numpy.int)\n', (1286, 1322), False, 'import numpy\n'), ((2538, 2555), 'numpy.sum', 'numpy.sum', (['self.f'], {}), '(self.f)\n', (2547, 2555), False, 'import numpy\n'), ((2865, 2876), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2873, 2876), False, 'import sys\n'), ((529, 565), 'numpy.ones', 'numpy.ones', (['(self.ndims,)', 'numpy.int'], {}), '((self.ndims,), numpy.int)\n', (539, 565), False, 'import numpy\n'), ((1381, 1404), 'numpy.arange', 'numpy.arange', (['self.ntot'], {}), '(self.ntot)\n', (1393, 1404), False, 'import numpy\n'), ((1863, 1894), 'numpy.dot', 'numpy.dot', (['self.dimProd', 'indsUp'], {}), '(self.dimProd, indsUp)\n', (1872, 1894), False, 'import numpy\n')] |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from sklearn import model_selection
from sklearn.naive_bayes import GaussianNB
import sys
from sklearn.metrics import accuracy_score
from skimage import color
from sklearn import pipeline
from sklearn import preprocessing
# In[4]:
def my_rgb2lab(colors):
old_shape = colors.shape
reshaped = colors.reshape(old_shape[0],1,old_shape[1])
colors_lab = color.rgb2lab(reshaped)
return colors_lab.reshape(old_shape)
# In[6]:
# representative RGB colours for each label, for nice display
COLOUR_RGB = {
'red': (255, 0, 0),
'orange': (255, 114, 0),
'yellow': (255, 255, 0),
'green': (0, 230, 0),
'blue': (0, 0, 255),
'purple': (187, 0, 187),
'brown': (117, 60, 0),
'black': (0, 0, 0),
'grey': (150, 150, 150),
'white': (255, 255, 255),
}
name_to_rgb = np.vectorize(COLOUR_RGB.get, otypes=[np.uint8, np.uint8, np.uint8])
def plot_predictions(model, lum=71, resolution=256):
"""
Create a slice of LAB colour space with given luminance; predict with the model; plot the results.
"""
wid = resolution
hei = resolution
# create a hei*wid grid of LAB colour values, with L=lum
ag = np.linspace(-100, 100, wid)
bg = np.linspace(-100, 100, hei)
aa, bb = np.meshgrid(ag, bg)
ll = lum * np.ones((hei, wid))
lab_grid = np.stack([ll, aa, bb], axis=2)
# convert to RGB for consistency with original input
X_grid = lab2rgb(lab_grid)
# predict and convert predictions to colours so we can see what's happening
y_grid = model.predict(X_grid.reshape((wid*hei, 3)))
pixels = np.stack(name_to_rgb(y_grid), axis=1) / 255
pixels = pixels.reshape((hei, wid, 3))
# plot input and predictions
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.title('Inputs')
plt.imshow(X_grid.reshape((hei, wid, 3)))
plt.subplot(1, 2, 2)
plt.title('Predicted Labels')
plt.imshow(pixels)
def main():
# data = pd.read_csv("colour-data.csv")
data = pd.read_csv(sys.argv[1])
X = data # array with shape (n, 3). Divide by 255
y = data # array with shape (n,) of colour words
# TODO: build model_rgb to predict y from X.
# TODO: print model_rgb's accuracy_score
# TODO: build model_lab to predict y from X by converting to LAB colour first.
# TODO: print model_lab's accuracy_score
data = pd.read_csv("colour-data.csv")
rgb_columns = ["R","G","B"]
data[rgb_columns] = data[rgb_columns].values/255
X_train,X_test,Y_train,Y_test = model_selection.train_test_split(data[rgb_columns].values,data["Label"].values)
model_rgb = GaussianNB()
model_rgb = model_rgb.fit(X_train, Y_train)
Y_predicted = model_rgb.predict(X_test)
print(accuracy_score(Y_test, Y_predicted))
model_lab = pipeline.make_pipeline(preprocessing.FunctionTransformer(my_rgb2lab),GaussianNB())
model_lab = model_lab.fit(X_train, Y_train)
Y_predicted_lab = model_lab.predict(X_test)
print(accuracy_score(Y_test, Y_predicted_lab))
plot_predictions(model_rgb)
plt.savefig('predictions_rgb.png')
plot_predictions(model_lab)
plt.savefig('predictions_lab.png')
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"skimage.color.rgb2lab",
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"skimage.color.lab2rgb",
"sklearn.model_selection.train_test_split",
"sklearn.naive_bayes.GaussianNB",
"numpy.ones",
"matplotlib.pyplot.subplot",
"numpy.stack",
"nump... | [((939, 1006), 'numpy.vectorize', 'np.vectorize', (['COLOUR_RGB.get'], {'otypes': '[np.uint8, np.uint8, np.uint8]'}), '(COLOUR_RGB.get, otypes=[np.uint8, np.uint8, np.uint8])\n', (951, 1006), True, 'import numpy as np\n'), ((497, 520), 'skimage.color.rgb2lab', 'color.rgb2lab', (['reshaped'], {}), '(reshaped)\n', (510, 520), False, 'from skimage import color\n'), ((1294, 1321), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', 'wid'], {}), '(-100, 100, wid)\n', (1305, 1321), True, 'import numpy as np\n'), ((1331, 1358), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', 'hei'], {}), '(-100, 100, hei)\n', (1342, 1358), True, 'import numpy as np\n'), ((1372, 1391), 'numpy.meshgrid', 'np.meshgrid', (['ag', 'bg'], {}), '(ag, bg)\n', (1383, 1391), True, 'import numpy as np\n'), ((1442, 1472), 'numpy.stack', 'np.stack', (['[ll, aa, bb]'], {'axis': '(2)'}), '([ll, aa, bb], axis=2)\n', (1450, 1472), True, 'import numpy as np\n'), ((1544, 1561), 'skimage.color.lab2rgb', 'lab2rgb', (['lab_grid'], {}), '(lab_grid)\n', (1551, 1561), False, 'from skimage.color import lab2rgb\n'), ((1838, 1865), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1848, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1870, 1890), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1881, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1914), 'matplotlib.pyplot.title', 'plt.title', (['"""Inputs"""'], {}), "('Inputs')\n", (1904, 1914), True, 'import matplotlib.pyplot as plt\n'), ((1966, 1986), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1977, 1986), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2020), 'matplotlib.pyplot.title', 'plt.title', (['"""Predicted Labels"""'], {}), "('Predicted Labels')\n", (2000, 2020), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2043), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pixels'], {}), '(pixels)\n', (2035, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2118, 2142), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2129, 2142), True, 'import pandas as pd\n'), ((2490, 2520), 'pandas.read_csv', 'pd.read_csv', (['"""colour-data.csv"""'], {}), "('colour-data.csv')\n", (2501, 2520), True, 'import pandas as pd\n'), ((2642, 2727), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['data[rgb_columns].values', "data['Label'].values"], {}), "(data[rgb_columns].values, data['Label'].values\n )\n", (2674, 2727), False, 'from sklearn import model_selection\n'), ((2738, 2750), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2748, 2750), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3190, 3224), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""predictions_rgb.png"""'], {}), "('predictions_rgb.png')\n", (3201, 3224), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3295), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""predictions_lab.png"""'], {}), "('predictions_lab.png')\n", (3272, 3295), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1426), 'numpy.ones', 'np.ones', (['(hei, wid)'], {}), '((hei, wid))\n', (1414, 1426), True, 'import numpy as np\n'), ((2853, 2888), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'Y_predicted'], {}), '(Y_test, Y_predicted)\n', (2867, 2888), False, 'from sklearn.metrics import accuracy_score\n'), ((2940, 2985), 'sklearn.preprocessing.FunctionTransformer', 'preprocessing.FunctionTransformer', (['my_rgb2lab'], {}), '(my_rgb2lab)\n', (2973, 2985), False, 'from sklearn import preprocessing\n'), ((2986, 2998), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2996, 2998), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3106, 3145), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'Y_predicted_lab'], {}), '(Y_test, Y_predicted_lab)\n', (3120, 3145), False, 'from sklearn.metrics import accuracy_score\n')] |
import numpy as np
import pandas as pd
from scipy.stats import beta
import matplotlib.pyplot as plt
import json
def t2d_beta_data(min_val,max_val,alpha_val, beta_val,no_default):
# Calculation
tad = np.array([range(1,max_val+1)])
beta_pdf = beta.pdf(tad, alpha_val, beta_val, loc=min_val, scale=max_val-min_val)
beta_pdf_std = beta_pdf/beta_pdf.sum()
bad_loans = np.round(beta_pdf_std* no_default)
return bad_loans.tolist()[0]
if __name__ == '__main__':
# Define beta distribution variables
min_val = 2
max_val = 36
alpha_val = 1.58
beta_val = 4.25
no_default = 1200
fileLocation = 'D:/Epay/Epay/Dashboard/dashboard_prototype/data/'
t2d_beta = fileLocation + 't2d_beta_data' + '.json'
json_new = t2d_beta_data(min_val,max_val,alpha_val, beta_val,no_default)
with open(t2d_beta, 'w') as fp:
json.dump(json_new, fp)
# plt.bar(tad.tolist()[0], loans.tolist()[0])
# plt.show() | [
"json.dump",
"scipy.stats.beta.pdf",
"numpy.round"
] | [((255, 327), 'scipy.stats.beta.pdf', 'beta.pdf', (['tad', 'alpha_val', 'beta_val'], {'loc': 'min_val', 'scale': '(max_val - min_val)'}), '(tad, alpha_val, beta_val, loc=min_val, scale=max_val - min_val)\n', (263, 327), False, 'from scipy.stats import beta\n'), ((385, 420), 'numpy.round', 'np.round', (['(beta_pdf_std * no_default)'], {}), '(beta_pdf_std * no_default)\n', (393, 420), True, 'import numpy as np\n'), ((870, 893), 'json.dump', 'json.dump', (['json_new', 'fp'], {}), '(json_new, fp)\n', (879, 893), False, 'import json\n')] |
import numpy as np
def bound_linear(z,z_low,z_high,val_low,val_high):
z0 = val_low + (val_high - val_low) * (z - z_low) / (z_high - z_low)
z0 = np.maximum(z0,val_low)
z0 = np.minimum(z0,val_high)
return z0 | [
"numpy.maximum",
"numpy.minimum"
] | [((153, 176), 'numpy.maximum', 'np.maximum', (['z0', 'val_low'], {}), '(z0, val_low)\n', (163, 176), True, 'import numpy as np\n'), ((185, 209), 'numpy.minimum', 'np.minimum', (['z0', 'val_high'], {}), '(z0, val_high)\n', (195, 209), True, 'import numpy as np\n')] |
'''
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Buddha Bless, No Bug !
'''
import numpy as np
import struct
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
def loadDataSet(images_path,labels_path,nums,batch):
'''
:param images_path: 特征集地址
:param labels_path: 标签集地址
:param nums: 取得的数据条数
:param batch: 一个Batch_size大小
:return: 划分好的数据集
'''
#图像读取
f1 = open(images_path,'rb')
image_buf = f1.read()
image_index = 0
image_index += struct.calcsize('>IIII')
image_list = []
for index in range(nums):
temp = struct.unpack_from('>784B',image_buf,image_index)
im = np.reshape(temp,(28,28))
image_list.append(im)
image_index += struct.calcsize('>784B')
#label读取
f2 = open(labels_path,'rb')
label_buf = f2.read()
label_index = 0
label_index += struct.calcsize('>II')
label_list = []
for index in range(nums):
temp = struct.unpack_from('>1B',label_buf,label_index)
t = np.array([0,0,0,0,0,0,0,0,0,0])
t[temp] = 1
label_list.append(t)
label_index += struct.calcsize('>1B')
train_images = torch.tensor(np.array(image_list),dtype=torch.float32)
train_label = torch.tensor(np.array(label_list),dtype=torch.float32)
datas_td = TensorDataset(train_images,train_label)
datas_dl = DataLoader(datas_td,batch_size=batch,shuffle=True)
return datas_dl
| [
"struct.calcsize",
"numpy.reshape",
"torch.utils.data.TensorDataset",
"numpy.array",
"torch.utils.data.DataLoader",
"struct.unpack_from"
] | [((1321, 1345), 'struct.calcsize', 'struct.calcsize', (['""">IIII"""'], {}), "('>IIII')\n", (1336, 1345), False, 'import struct\n'), ((1688, 1710), 'struct.calcsize', 'struct.calcsize', (['""">II"""'], {}), "('>II')\n", (1703, 1710), False, 'import struct\n'), ((2125, 2165), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_images', 'train_label'], {}), '(train_images, train_label)\n', (2138, 2165), False, 'from torch.utils.data import TensorDataset\n'), ((2180, 2232), 'torch.utils.data.DataLoader', 'DataLoader', (['datas_td'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(datas_td, batch_size=batch, shuffle=True)\n', (2190, 2232), False, 'from torch.utils.data import DataLoader\n'), ((1411, 1462), 'struct.unpack_from', 'struct.unpack_from', (['""">784B"""', 'image_buf', 'image_index'], {}), "('>784B', image_buf, image_index)\n", (1429, 1462), False, 'import struct\n'), ((1474, 1500), 'numpy.reshape', 'np.reshape', (['temp', '(28, 28)'], {}), '(temp, (28, 28))\n', (1484, 1500), True, 'import numpy as np\n'), ((1552, 1576), 'struct.calcsize', 'struct.calcsize', (['""">784B"""'], {}), "('>784B')\n", (1567, 1576), False, 'import struct\n'), ((1776, 1825), 'struct.unpack_from', 'struct.unpack_from', (['""">1B"""', 'label_buf', 'label_index'], {}), "('>1B', label_buf, label_index)\n", (1794, 1825), False, 'import struct\n'), ((1836, 1876), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (1844, 1876), True, 'import numpy as np\n'), ((1940, 1962), 'struct.calcsize', 'struct.calcsize', (['""">1B"""'], {}), "('>1B')\n", (1955, 1962), False, 'import struct\n'), ((1995, 2015), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (2003, 2015), True, 'import numpy as np\n'), ((2068, 2088), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (2076, 2088), True, 'import numpy as np\n')] |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for randomizers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from absl.testing import parameterized
from dm_control.mujoco import engine
from dm_control.mujoco.wrapper.mjbindings import mjlib
from dm_control.suite.utils import randomizers
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
class RandomizeUnlimitedJointsTest(parameterized.TestCase):
def setUp(self):
self.rand = np.random.RandomState(100)
def test_single_joint_of_each_type(self):
physics = engine.Physics.from_xml_string("""<mujoco>
<default>
<joint range="0 90" />
</default>
<worldbody>
<body>
<geom type="box" size="1 1 1"/>
<joint name="free" type="free"/>
</body>
<body>
<geom type="box" size="1 1 1"/>
<joint name="limited_hinge" type="hinge" limited="true"/>
<joint name="slide" type="slide"/>
<joint name="limited_slide" type="slide" limited="true"/>
<joint name="hinge" type="hinge"/>
</body>
<body>
<geom type="box" size="1 1 1"/>
<joint name="ball" type="ball"/>
</body>
<body>
<geom type="box" size="1 1 1"/>
<joint name="limited_ball" type="ball" limited="true"/>
</body>
</worldbody>
</mujoco>""")
randomizers.randomize_limited_and_rotational_joints(physics, self.rand)
self.assertNotEqual(0., physics.named.data.qpos['hinge'])
self.assertNotEqual(0., physics.named.data.qpos['limited_hinge'])
self.assertNotEqual(0., physics.named.data.qpos['limited_slide'])
self.assertNotEqual(0., np.sum(physics.named.data.qpos['ball']))
self.assertNotEqual(0., np.sum(physics.named.data.qpos['limited_ball']))
self.assertNotEqual(0., np.sum(physics.named.data.qpos['free'][3:]))
# Unlimited slide and the positional part of the free joint remains
# uninitialized.
self.assertEqual(0., physics.named.data.qpos['slide'])
self.assertEqual(0., np.sum(physics.named.data.qpos['free'][:3]))
def test_multiple_joints_of_same_type(self):
physics = engine.Physics.from_xml_string("""<mujoco>
<worldbody>
<body>
<geom type="box" size="1 1 1"/>
<joint name="hinge_1" type="hinge"/>
<joint name="hinge_2" type="hinge"/>
<joint name="hinge_3" type="hinge"/>
</body>
</worldbody>
</mujoco>""")
randomizers.randomize_limited_and_rotational_joints(physics, self.rand)
self.assertNotEqual(0., physics.named.data.qpos['hinge_1'])
self.assertNotEqual(0., physics.named.data.qpos['hinge_2'])
self.assertNotEqual(0., physics.named.data.qpos['hinge_3'])
self.assertNotEqual(physics.named.data.qpos['hinge_1'],
physics.named.data.qpos['hinge_2'])
self.assertNotEqual(physics.named.data.qpos['hinge_2'],
physics.named.data.qpos['hinge_3'])
self.assertNotEqual(physics.named.data.qpos['hinge_1'],
physics.named.data.qpos['hinge_3'])
def test_unlimited_hinge_randomization_range(self):
physics = engine.Physics.from_xml_string("""<mujoco>
<worldbody>
<body>
<geom type="box" size="1 1 1"/>
<joint name="hinge" type="hinge"/>
</body>
</worldbody>
</mujoco>""")
for _ in xrange(10):
randomizers.randomize_limited_and_rotational_joints(physics, self.rand)
self.assertBetween(physics.named.data.qpos['hinge'], -np.pi, np.pi)
def test_limited_1d_joint_limits_are_respected(self):
physics = engine.Physics.from_xml_string("""<mujoco>
<default>
<joint limited="true"/>
</default>
<worldbody>
<body>
<geom type="box" size="1 1 1"/>
<joint name="hinge" type="hinge" range="0 10"/>
<joint name="slide" type="slide" range="30 50"/>
</body>
</worldbody>
</mujoco>""")
for _ in xrange(10):
randomizers.randomize_limited_and_rotational_joints(physics, self.rand)
self.assertBetween(physics.named.data.qpos['hinge'],
np.deg2rad(0), np.deg2rad(10))
self.assertBetween(physics.named.data.qpos['slide'], 30, 50)
def test_limited_ball_joint_are_respected(self):
physics = engine.Physics.from_xml_string("""<mujoco>
<worldbody>
<body name="body" zaxis="1 0 0">
<geom type="box" size="1 1 1"/>
<joint name="ball" type="ball" limited="true" range="0 60"/>
</body>
</worldbody>
</mujoco>""")
body_axis = np.array([1., 0., 0.])
joint_axis = np.zeros(3)
for _ in xrange(10):
randomizers.randomize_limited_and_rotational_joints(physics, self.rand)
quat = physics.named.data.qpos['ball']
mjlib.mju_rotVecQuat(joint_axis, body_axis, quat)
angle_cos = np.dot(body_axis, joint_axis)
self.assertGreater(angle_cos, 0.5) # cos(60) = 0.5
if __name__ == '__main__':
absltest.main()
| [
"dm_control.mujoco.wrapper.mjbindings.mjlib.mju_rotVecQuat",
"dm_control.mujoco.engine.Physics.from_xml_string",
"absl.testing.absltest.main",
"dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints",
"numpy.array",
"numpy.zeros",
"six.moves.xrange",
"numpy.sum",
"numpy.dot",
"... | [((6034, 6049), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6047, 6049), False, 'from absl.testing import absltest\n'), ((1233, 1259), 'numpy.random.RandomState', 'np.random.RandomState', (['(100)'], {}), '(100)\n', (1254, 1259), True, 'import numpy as np\n'), ((1319, 2260), 'dm_control.mujoco.engine.Physics.from_xml_string', 'engine.Physics.from_xml_string', (['"""<mujoco>\n <default>\n <joint range="0 90" />\n </default>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="free" type="free"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="limited_hinge" type="hinge" limited="true"/>\n <joint name="slide" type="slide"/>\n <joint name="limited_slide" type="slide" limited="true"/>\n <joint name="hinge" type="hinge"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="ball" type="ball"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="limited_ball" type="ball" limited="true"/>\n </body>\n </worldbody>\n </mujoco>"""'], {}), '(\n """<mujoco>\n <default>\n <joint range="0 90" />\n </default>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="free" type="free"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="limited_hinge" type="hinge" limited="true"/>\n <joint name="slide" type="slide"/>\n <joint name="limited_slide" type="slide" limited="true"/>\n <joint name="hinge" type="hinge"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="ball" type="ball"/>\n </body>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="limited_ball" type="ball" limited="true"/>\n </body>\n </worldbody>\n </mujoco>"""\n )\n', (1349, 2260), False, 'from dm_control.mujoco import engine\n'), ((2256, 2327), 'dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints', 'randomizers.randomize_limited_and_rotational_joints', (['physics', 'self.rand'], {}), '(physics, self.rand)\n', (2307, 2327), False, 'from dm_control.suite.utils import randomizers\n'), ((3036, 3393), 'dm_control.mujoco.engine.Physics.from_xml_string', 'engine.Physics.from_xml_string', (['"""<mujoco>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge_1" type="hinge"/>\n <joint name="hinge_2" type="hinge"/>\n <joint name="hinge_3" type="hinge"/>\n </body>\n </worldbody>\n </mujoco>"""'], {}), '(\n """<mujoco>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge_1" type="hinge"/>\n <joint name="hinge_2" type="hinge"/>\n <joint name="hinge_3" type="hinge"/>\n </body>\n </worldbody>\n </mujoco>"""\n )\n', (3066, 3393), False, 'from dm_control.mujoco import engine\n'), ((3389, 3460), 'dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints', 'randomizers.randomize_limited_and_rotational_joints', (['physics', 'self.rand'], {}), '(physics, self.rand)\n', (3440, 3460), False, 'from dm_control.suite.utils import randomizers\n'), ((4085, 4338), 'dm_control.mujoco.engine.Physics.from_xml_string', 'engine.Physics.from_xml_string', (['"""<mujoco>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge" type="hinge"/>\n </body>\n </worldbody>\n </mujoco>"""'], {}), '(\n """<mujoco>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge" type="hinge"/>\n </body>\n </worldbody>\n </mujoco>"""\n )\n', (4115, 4338), False, 'from dm_control.mujoco import engine\n'), ((4343, 4353), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (4349, 4353), False, 'from six.moves import xrange\n'), ((4578, 4984), 'dm_control.mujoco.engine.Physics.from_xml_string', 'engine.Physics.from_xml_string', (['"""<mujoco>\n <default>\n <joint limited="true"/>\n </default>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge" type="hinge" range="0 10"/>\n <joint name="slide" type="slide" range="30 50"/>\n </body>\n </worldbody>\n </mujoco>"""'], {}), '(\n """<mujoco>\n <default>\n <joint limited="true"/>\n </default>\n <worldbody>\n <body>\n <geom type="box" size="1 1 1"/>\n <joint name="hinge" type="hinge" range="0 10"/>\n <joint name="slide" type="slide" range="30 50"/>\n </body>\n </worldbody>\n </mujoco>"""\n )\n', (4608, 4984), False, 'from dm_control.mujoco import engine\n'), ((4989, 4999), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (4995, 4999), False, 'from six.moves import xrange\n'), ((5327, 5632), 'dm_control.mujoco.engine.Physics.from_xml_string', 'engine.Physics.from_xml_string', (['"""<mujoco>\n <worldbody>\n <body name="body" zaxis="1 0 0">\n <geom type="box" size="1 1 1"/>\n <joint name="ball" type="ball" limited="true" range="0 60"/>\n </body>\n </worldbody>\n </mujoco>"""'], {}), '(\n """<mujoco>\n <worldbody>\n <body name="body" zaxis="1 0 0">\n <geom type="box" size="1 1 1"/>\n <joint name="ball" type="ball" limited="true" range="0 60"/>\n </body>\n </worldbody>\n </mujoco>"""\n )\n', (5357, 5632), False, 'from dm_control.mujoco import engine\n'), ((5640, 5665), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (5648, 5665), True, 'import numpy as np\n'), ((5680, 5691), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5688, 5691), True, 'import numpy as np\n'), ((5705, 5715), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (5711, 5715), False, 'from six.moves import xrange\n'), ((2559, 2598), 'numpy.sum', 'np.sum', (["physics.named.data.qpos['ball']"], {}), "(physics.named.data.qpos['ball'])\n", (2565, 2598), True, 'import numpy as np\n'), ((2628, 2675), 'numpy.sum', 'np.sum', (["physics.named.data.qpos['limited_ball']"], {}), "(physics.named.data.qpos['limited_ball'])\n", (2634, 2675), True, 'import numpy as np\n'), ((2706, 2749), 'numpy.sum', 'np.sum', (["physics.named.data.qpos['free'][3:]"], {}), "(physics.named.data.qpos['free'][3:])\n", (2712, 2749), True, 'import numpy as np\n'), ((2929, 2972), 'numpy.sum', 'np.sum', (["physics.named.data.qpos['free'][:3]"], {}), "(physics.named.data.qpos['free'][:3])\n", (2935, 2972), True, 'import numpy as np\n'), ((4361, 4432), 'dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints', 'randomizers.randomize_limited_and_rotational_joints', (['physics', 'self.rand'], {}), '(physics, self.rand)\n', (4412, 4432), False, 'from dm_control.suite.utils import randomizers\n'), ((5007, 5078), 'dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints', 'randomizers.randomize_limited_and_rotational_joints', (['physics', 'self.rand'], {}), '(physics, self.rand)\n', (5058, 5078), False, 'from dm_control.suite.utils import randomizers\n'), ((5723, 5794), 'dm_control.suite.utils.randomizers.randomize_limited_and_rotational_joints', 'randomizers.randomize_limited_and_rotational_joints', (['physics', 'self.rand'], {}), '(physics, self.rand)\n', (5774, 5794), False, 'from dm_control.suite.utils import randomizers\n'), ((5847, 5896), 'dm_control.mujoco.wrapper.mjbindings.mjlib.mju_rotVecQuat', 'mjlib.mju_rotVecQuat', (['joint_axis', 'body_axis', 'quat'], {}), '(joint_axis, body_axis, quat)\n', (5867, 5896), False, 'from dm_control.mujoco.wrapper.mjbindings import mjlib\n'), ((5915, 5944), 'numpy.dot', 'np.dot', (['body_axis', 'joint_axis'], {}), '(body_axis, joint_axis)\n', (5921, 5944), True, 'import numpy as np\n'), ((5163, 5176), 'numpy.deg2rad', 'np.deg2rad', (['(0)'], {}), '(0)\n', (5173, 5176), True, 'import numpy as np\n'), ((5178, 5192), 'numpy.deg2rad', 'np.deg2rad', (['(10)'], {}), '(10)\n', (5188, 5192), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from paz.backend.image.draw import put_text, draw_rectangle
from paz.backend.image.draw import GREEN
def draw_box(image, coordinates, class_name, score,
color=GREEN, scale=0.7, weighted=False):
x_min, y_min, x_max, y_max = coordinates
if weighted:
color = [int(channel * score) for channel in color]
text = '{:0.2f}, {}'.format(score, class_name)
put_text(image, text, (x_min, y_min - 10), scale, color, 1)
draw_rectangle(image, (x_min, y_min), (x_max, y_max), color, 2)
return image
def draw_square(image, center_x, center_y, size, color):
x_min, y_min = center_x - size, center_y - size
x_max, y_max = center_x + size, center_y + size
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color, -1)
return image
def draw_circle(image, center_x, center_y, size, color):
cv2.circle(image, (center_x, center_y), size, color, -1)
return image
def draw_triangle(image, center_x, center_y, size, color):
vertex_A = (center_x, center_y - size)
vertex_B = (center_x - size, center_y + size)
vertex_C = (center_x + size, center_y + size)
points = np.array([[vertex_A, vertex_B, vertex_C]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def resize_image_with_nearest_neighbors(image, size):
"""Resize image using nearest neighbors interpolation.
# Arguments
image: Numpy array.
size: List of two ints.
# Returns
Numpy array.
"""
if(type(image) != np.ndarray):
raise ValueError(
'Recieved Image is not of type numpy array', type(image))
else:
return cv2.resize(image, size, interpolation=cv2.INTER_NEAREST)
| [
"cv2.rectangle",
"cv2.fillPoly",
"paz.backend.image.draw.draw_rectangle",
"numpy.array",
"cv2.circle",
"paz.backend.image.draw.put_text",
"cv2.resize"
] | [((416, 475), 'paz.backend.image.draw.put_text', 'put_text', (['image', 'text', '(x_min, y_min - 10)', 'scale', 'color', '(1)'], {}), '(image, text, (x_min, y_min - 10), scale, color, 1)\n', (424, 475), False, 'from paz.backend.image.draw import put_text, draw_rectangle\n'), ((480, 543), 'paz.backend.image.draw.draw_rectangle', 'draw_rectangle', (['image', '(x_min, y_min)', '(x_max, y_max)', 'color', '(2)'], {}), '(image, (x_min, y_min), (x_max, y_max), color, 2)\n', (494, 543), False, 'from paz.backend.image.draw import put_text, draw_rectangle\n'), ((728, 791), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x_min, y_min)', '(x_max, y_max)', 'color', '(-1)'], {}), '(image, (x_min, y_min), (x_max, y_max), color, -1)\n', (741, 791), False, 'import cv2\n'), ((872, 928), 'cv2.circle', 'cv2.circle', (['image', '(center_x, center_y)', 'size', 'color', '(-1)'], {}), '(image, (center_x, center_y), size, color, -1)\n', (882, 928), False, 'import cv2\n'), ((1163, 1221), 'numpy.array', 'np.array', (['[[vertex_A, vertex_B, vertex_C]]'], {'dtype': 'np.int32'}), '([[vertex_A, vertex_B, vertex_C]], dtype=np.int32)\n', (1171, 1221), True, 'import numpy as np\n'), ((1226, 1260), 'cv2.fillPoly', 'cv2.fillPoly', (['image', 'points', 'color'], {}), '(image, points, color)\n', (1238, 1260), False, 'import cv2\n'), ((1670, 1726), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_NEAREST'}), '(image, size, interpolation=cv2.INTER_NEAREST)\n', (1680, 1726), False, 'import cv2\n')] |
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
class MahaDist:
def __init__(self, bandwidth_factor=1.0):
self.bandwidth = 1.
if type(bandwidth_factor) in [float, int]:
self.bandwidth_factor = bandwidth_factor
elif type(bandwidth_factor) in [list, tuple]:
self.bandwidth_factor = np.array(bandwidth_factor)
else:
self.bandwidth_factor = bandwidth_factor
self._preprocessor = None
def __call__(self, X, Y=None, eval_gradient=False):
if self._preprocessor:
X = self._preprocessor(X)
if Y is not None:
Y = self._preprocessor(Y)
return pairwise_distances(X, Y, metric='mahalanobis', VI=self.bandwidth)
@staticmethod
def diag(X):
return np.zeros((X.shape[0],))
@staticmethod
def is_stationary():
return True
def set_bandwidth(self, bandwidth):
if np.isscalar(bandwidth):
self.bandwidth = 1 / (self.bandwidth_factor * bandwidth)
else:
self.bandwidth = np.diag(1 / (self.bandwidth_factor * bandwidth))
def get_bandwidth(self):
if np.isscalar(self.bandwidth):
return 1 / self.bandwidth
return 1 / np.diag(self.bandwidth)
class MeanSwarmDist(MahaDist):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from ._preprocessors import compute_mean_position
self._preprocessor = compute_mean_position
class MeanCovSwarmDist(MeanSwarmDist):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from ._preprocessors import compute_mean_and_cov_position
self._preprocessor = compute_mean_and_cov_position
class PeriodicDist:
def __init__(self):
self.bandwidth = 1.
self.preprocessor = None
def __call__(self, X, Y=None, eval_gradient=False):
if self.preprocessor:
X = self.preprocessor(X)
if Y is not None:
Y = self.preprocessor(Y)
if Y is None:
Y = X
return np.sum((np.sin(.5 * np.einsum('nd,kd->nkd', X, -Y)) ** 2) / self.bandwidth, axis=2)
@staticmethod
def diag(X):
return np.zeros((X.shape[0],))
@staticmethod
def is_stationary():
return True
def set_bandwidth(self, bandwidth):
self.bandwidth = 1 / (bandwidth)
def get_bandwidth(self):
return self.bandwidth
| [
"numpy.isscalar",
"numpy.diag",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.array",
"numpy.zeros",
"numpy.einsum"
] | [((704, 769), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X', 'Y'], {'metric': '"""mahalanobis"""', 'VI': 'self.bandwidth'}), "(X, Y, metric='mahalanobis', VI=self.bandwidth)\n", (722, 769), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((821, 844), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (829, 844), True, 'import numpy as np\n'), ((961, 983), 'numpy.isscalar', 'np.isscalar', (['bandwidth'], {}), '(bandwidth)\n', (972, 983), True, 'import numpy as np\n'), ((1187, 1214), 'numpy.isscalar', 'np.isscalar', (['self.bandwidth'], {}), '(self.bandwidth)\n', (1198, 1214), True, 'import numpy as np\n'), ((2267, 2290), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (2275, 2290), True, 'import numpy as np\n'), ((1097, 1145), 'numpy.diag', 'np.diag', (['(1 / (self.bandwidth_factor * bandwidth))'], {}), '(1 / (self.bandwidth_factor * bandwidth))\n', (1104, 1145), True, 'import numpy as np\n'), ((1273, 1296), 'numpy.diag', 'np.diag', (['self.bandwidth'], {}), '(self.bandwidth)\n', (1280, 1296), True, 'import numpy as np\n'), ((362, 388), 'numpy.array', 'np.array', (['bandwidth_factor'], {}), '(bandwidth_factor)\n', (370, 388), True, 'import numpy as np\n'), ((2152, 2182), 'numpy.einsum', 'np.einsum', (['"""nd,kd->nkd"""', 'X', '(-Y)'], {}), "('nd,kd->nkd', X, -Y)\n", (2161, 2182), True, 'import numpy as np\n')] |
import os
import numpy as np
import SharedArray as SA
from torch.utils.data import Dataset
from util.data_util import sa_create
from util.data_util import data_prepare
class S3DIS(Dataset):
def __init__(self, split='train', data_root='trainval', test_area=5, voxel_size=0.04, voxel_max=None, transform=None, shuffle_index=False, loop=1):
super().__init__()
self.split, self.voxel_size, self.transform, self.voxel_max, self.shuffle_index, self.loop = split, voxel_size, transform, voxel_max, shuffle_index, loop
data_list = sorted(os.listdir(data_root))
data_list = [item[:-4] for item in data_list if 'Area_' in item]
if split == 'train':
self.data_list = [item for item in data_list if not 'Area_{}'.format(test_area) in item]
else:
self.data_list = [item for item in data_list if 'Area_{}'.format(test_area) in item]
for item in self.data_list:
if not os.path.exists("/dev/shm/{}".format(item)):
data_path = os.path.join(data_root, item + '.npy')
data = np.load(data_path) # xyzrgbl, N*7
sa_create("shm://{}".format(item), data)
self.data_idx = np.arange(len(self.data_list))
print("Totally {} samples in {} set.".format(len(self.data_idx), split))
def __getitem__(self, idx):
data_idx = self.data_idx[idx % len(self.data_idx)]
data = SA.attach("shm://{}".format(self.data_list[data_idx])).copy()
coord, feat, label = data[:, 0:3], data[:, 3:6], data[:, 6]
coord, feat, label = data_prepare(coord, feat, label, self.split, self.voxel_size, self.voxel_max, self.transform, self.shuffle_index)
return coord, feat, label
def __len__(self):
return len(self.data_idx) * self.loop
| [
"os.listdir",
"os.path.join",
"util.data_util.data_prepare",
"numpy.load"
] | [((1582, 1700), 'util.data_util.data_prepare', 'data_prepare', (['coord', 'feat', 'label', 'self.split', 'self.voxel_size', 'self.voxel_max', 'self.transform', 'self.shuffle_index'], {}), '(coord, feat, label, self.split, self.voxel_size, self.\n voxel_max, self.transform, self.shuffle_index)\n', (1594, 1700), False, 'from util.data_util import data_prepare\n'), ((562, 583), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (572, 583), False, 'import os\n'), ((1026, 1064), 'os.path.join', 'os.path.join', (['data_root', "(item + '.npy')"], {}), "(data_root, item + '.npy')\n", (1038, 1064), False, 'import os\n'), ((1088, 1106), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (1095, 1106), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.decomposition import PCA
def project_embeddings_and_centroids_together(embeddings, centroids, masks):
original_embeddings_shape = embeddings.shape
embeddings = np.reshape(embeddings, (-1, embeddings.shape[2]))
points = np.concatenate([centroids, embeddings], axis=0)
model = PCA(n_components=2)
projected_points = model.fit_transform(points)
projected_centroids = projected_points[:len(centroids)]
projected_embeddings = projected_points[len(centroids):]
projected_embeddings = np.reshape(
projected_embeddings, (original_embeddings_shape[0], original_embeddings_shape[1], 2)
)
return projected_embeddings, projected_centroids
def project_embeddings(embeddings, masks, dimensionality):
flat_masked_embeddings = mask_embeddings(embeddings, masks)
model = PCA(n_components=dimensionality)
model.fit(flat_masked_embeddings)
projected_flat_embeddings = model.transform(flatten(embeddings))
return unflatten(projected_flat_embeddings, embeddings.shape[1])
def mask_embeddings(embeddings, masks):
flat_embeddings = flatten(embeddings)
flat_masks = flatten(masks)
flat_masked_embeddings = flat_embeddings[flat_masks]
return flat_masked_embeddings
def flatten(seq):
return np.reshape(seq, (seq.shape[0] * seq.shape[1], *seq.shape[2:]))
def unflatten(flat_seq, seq_length):
return np.reshape(flat_seq, (-1, seq_length, *flat_seq.shape[1:]))
| [
"sklearn.decomposition.PCA",
"numpy.reshape",
"numpy.concatenate"
] | [((203, 252), 'numpy.reshape', 'np.reshape', (['embeddings', '(-1, embeddings.shape[2])'], {}), '(embeddings, (-1, embeddings.shape[2]))\n', (213, 252), True, 'import numpy as np\n'), ((266, 313), 'numpy.concatenate', 'np.concatenate', (['[centroids, embeddings]'], {'axis': '(0)'}), '([centroids, embeddings], axis=0)\n', (280, 313), True, 'import numpy as np\n'), ((327, 346), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (330, 346), False, 'from sklearn.decomposition import PCA\n'), ((547, 648), 'numpy.reshape', 'np.reshape', (['projected_embeddings', '(original_embeddings_shape[0], original_embeddings_shape[1], 2)'], {}), '(projected_embeddings, (original_embeddings_shape[0],\n original_embeddings_shape[1], 2))\n', (557, 648), True, 'import numpy as np\n'), ((852, 884), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'dimensionality'}), '(n_components=dimensionality)\n', (855, 884), False, 'from sklearn.decomposition import PCA\n'), ((1303, 1365), 'numpy.reshape', 'np.reshape', (['seq', '(seq.shape[0] * seq.shape[1], *seq.shape[2:])'], {}), '(seq, (seq.shape[0] * seq.shape[1], *seq.shape[2:]))\n', (1313, 1365), True, 'import numpy as np\n'), ((1416, 1475), 'numpy.reshape', 'np.reshape', (['flat_seq', '(-1, seq_length, *flat_seq.shape[1:])'], {}), '(flat_seq, (-1, seq_length, *flat_seq.shape[1:]))\n', (1426, 1475), True, 'import numpy as np\n')] |
import numpy as np
from scattertext.termscoring.CohensDCalculator import CohensDCalculator
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class CohensD(CorpusBasedTermScorer, CohensDCalculator):
'''
Cohen's d scores
term_scorer = (CohensD(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'rotten_fresh_fre.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['cohens_d']
def get_score_df(self, correction_method=None):
'''
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
# From https://people.kth.se/~lang/Effect_size.pdf
# <NAME> and <NAME>. Effect size, confidence interval and statistical
# significance: a practical guide for biologists. 2007. In Biological Reviews 82.
#
# Modification: when calculating variance, an empty document is added to each set
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
X[np.isnan(X)] = 0
cat_X, ncat_X = self._get_cat_and_ncat(X)
score_df = (self
.get_cohens_d_df(cat_X, ncat_X, correction_method)
.set_index(np.array(self.corpus_.get_terms())))
return score_df
def get_name(self):
return "Cohen's d"
class HedgesR(CohensD):
def get_scores(self, *args):
return self.get_score_df()['hedges_r']
def get_name(self):
return "Hedge's r"
| [
"numpy.isnan"
] | [((1592, 1603), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (1600, 1603), True, 'import numpy as np\n')] |
import argparse
import json
from PIL import Image
import torch
import numpy as np
from math import ceil
from train import check_gpu
from torchvision import models, transforms
def arg_parser():
parser = argparse.ArgumentParser(description="Prediction settings")
parser.add_argument('-i', '--image', type=str, help = 'path of the image to predict', required=True)
parser.add_argument('-c', '--checkpoint', type=str, help = 'checkpoint path', required=True )
parser.add_argument('-k', '--topk', type=int, help = 'choose number of matches')
parser.add_argument('-cn', '--category_names', type=str, help = 'file pointing to category names', required=True)
parser.add_argument('-g', '--gpu', action='store_true',help = 'use gpu for prediction')
return parser.parse_args()
def load_checkpoint(checkpoint_path):
# load checlpoint for the path
checkpoint = torch.load(checkpoint_path)
model = models.vgg16(pretrained=True)
model.name = "vgg16"
ldic=locals()
if checkpoint['architecture'] != "vgg16":
exec("model = models.{}(pretrained=True)".format(checkpoint['architecture']), globals(), ldic)
model.name = checkpoint['architecture']
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image = Image.open(image)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
])
img = transform(pil_image)
return img
def predict(img_tensor, model, device, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
if topk is None:
topk = 5
model.to(device)
model.eval()
img_tensor.unsqueeze_(0)
img_tensor = img_tensor.float()
with torch.no_grad():
if device != "cpu":
logps = model.forward(img_tensor.cuda())
else:
logps = model.forward(img_tensor)
ps = torch.exp(logps)
return ps.topk(topk, dim = 1)
def print_prediction(probabilities, json_path):
with open(json_path, 'r') as json_file:
cat_to_name = json.load(json_file)
labels = [cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])]
probability = np.array(probabilities[0][0])
final_prediction = ""
max = 0
i=0
while i < len(probability):
if probability[i] > max:
max = probability[i]
final_prediction = labels[i]
print("{} with a probability of {}".format(labels[i], probability[i]))
i += 1
print("\n\nFinal prediction for the given image is {}".format(final_prediction))
def main():
args = arg_parser()
# load the model from the checkpoint
model = load_checkpoint(args.checkpoint)
# process the image used for prediction
img_tensor = process_image(args.image)
# check if the gpu is available
device = check_gpu(use_gpu=args.gpu)
# Get predicted probabilities for the input image
probabilities = predict(img_tensor, model, device ,args.topk)
# print the probabilities and predicted category.
print_prediction(probabilities, args.category_names)
if __name__ == '__main__': main() | [
"torchvision.transforms.CenterCrop",
"PIL.Image.open",
"argparse.ArgumentParser",
"torch.load",
"torchvision.transforms.Resize",
"torch.exp",
"train.check_gpu",
"numpy.array",
"torchvision.transforms.Normalize",
"json.load",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torchvision.mo... | [((213, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Prediction settings"""'}), "(description='Prediction settings')\n", (236, 271), False, 'import argparse\n'), ((903, 930), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (913, 930), False, 'import torch\n'), ((948, 977), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (960, 977), False, 'from torchvision import models, transforms\n'), ((1628, 1645), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1638, 1645), False, 'from PIL import Image\n'), ((2398, 2414), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (2407, 2414), False, 'import torch\n'), ((2693, 2722), 'numpy.array', 'np.array', (['probabilities[0][0]'], {}), '(probabilities[0][0])\n', (2701, 2722), True, 'import numpy as np\n'), ((3372, 3399), 'train.check_gpu', 'check_gpu', ([], {'use_gpu': 'args.gpu'}), '(use_gpu=args.gpu)\n', (3381, 3399), False, 'from train import check_gpu\n'), ((2231, 2246), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2244, 2246), False, 'import torch\n'), ((2565, 2585), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2574, 2585), False, 'import json\n'), ((1696, 1718), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1713, 1718), False, 'from torchvision import models, transforms\n'), ((1728, 1754), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1749, 1754), False, 'from torchvision import models, transforms\n'), ((1764, 1785), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1783, 1785), False, 'from torchvision import models, transforms\n'), ((1795, 1870), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1815, 1870), False, 'from torchvision import models, transforms\n'), ((2644, 2673), 'numpy.array', 'np.array', (['probabilities[1][0]'], {}), '(probabilities[1][0])\n', (2652, 2673), True, 'import numpy as np\n')] |
import open3d as o3d
import numpy as np
import sys, os
import matplotlib.pyplot as plt
import cv2
import torch
import glob
import copy
import mathutils
from PIL import Image
from pytorch3d.loss import chamfer_distance
from tk3dv.nocstools.aligning import estimateSimilarityUmeyama
import math
from tqdm import tqdm
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def display_image(image):
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def generate_mask_NOCS(nocs_map):
'''
Function to extract mask from NOCS map
'''
white = np.ones(nocs_map.shape)*1.0
white = np.array([1, 1, 1])
image_mask = np.abs(nocs_map[:,:,:3] - white).mean(axis=2) > 0.15
return image_mask
def read_image(nocs_image_path):
'''
Reading NOCS image
'''
nocs_map = cv2.imread(nocs_image_path)
nocs_map = cv2.cvtColor(nocs_map, cv2.COLOR_BGR2RGB) / 255.0
# print(nocs_map.shape)
return nocs_map
def visualize_nocs_map(nocs_map, nm, image = None):
'''
Plots 3D point cloud from nocs map
Arguments:
nocs_map - [H x W x 3] - NOCS map for image
Returns:
None
'''
h, w = nocs_map.shape[:2]
nocs_mask = generate_mask_NOCS(nm)
# print(np.unique(nocs_mask))
# display_image(nocs_mask / 255.0)
# plt.imshow(nocs_mask)
# plt.show()
nocs_mask_cloud = np.reshape(nocs_mask, (h*w))
# print(nocs_mask_cloud.shape)
nocs_cloud = np.reshape(nocs_map, (h*w, 3))
# nocs_cloud = np.reshape(nocs_map, (3, h*w))
nocs_cloud = nocs_cloud[nocs_mask_cloud == 1.0, :]
colors = nocs_cloud
if image is not None:
image_cloud = np.reshape(image, (h*w, 3))
image_cloud = image_cloud[nocs_mask_cloud == 1.0, :]
colors = image_cloud
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(nocs_cloud)
pcd.colors = o3d.utility.Vector3dVector(colors)
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0])
vis_obj = o3d.visualization
# print(vis_obj.RenderOption.show_coordinate_frame)
# vis_obj.RenderOption.show_coordinate_frame.setter("True")
# print(vis_obj["light_on"])
# vis_obj.RenderOption.show_coordinate_frame = True
points = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
]
lines = [
[0, 1],
[0, 2],
[1, 3],
[2, 3],
[4, 5],
[4, 6],
[5, 7],
[6, 7],
[0, 4],
[1, 5],
[2, 6],
[3, 7],
]
colors = [[1, 0, 0] for i in range(len(lines))]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
# vis_obj.draw_geometries([pcd, line_set])
return pcd
if __name__ == "__main__":
# vis_obj = o3d.visualization
# nocs_image_path = sys.argv[1]
# image = None
# nocs_image_path_2 = sys.argv[2]
# R = np.asarray(
# [
# [ 0.13113765, -0.96770999, -0.15196208],
# [-0.97693124, -0.11753038, -0.09476062],
# [ 0.07573157, 0.16221497, -0.97838511],
# ]
# )
# # Read the NOCS maps and construct point clouds
# nocs_map = read_image(nocs_image_path)
# pcd = visualize_nocs_map(nocs_map, nocs_map, image)
# # T = R @ (np.asarray(pcd.points) - 0.5).T + 0.5
# nocs_map_2 = read_image(nocs_image_path_2)
# pcd_2 = visualize_nocs_map(nocs_map_2, nocs_map,image)
# pcd.paint_uniform_color([0, 0.651, 0.929])
# pcd_2.paint_uniform_color([1, 0.706, 0])
# # Visualize the point clouds
# vis_obj.draw_geometries([pcd, pcd_2])
# # Center the two NOCS point clouds
# pcd_points = np.asarray(pcd.points) - 0.5
# pcd_points_2 = np.asarray(pcd_2.points) - 0.5
# pcd.points = o3d.utility.Vector3dVector(pcd_points[:,:3])
# pcd_2.points = o3d.utility.Vector3dVector(pcd_points_2[:,:3])
# # Visualize the centered point clouds
# vis_obj.draw_geometries([pcd, pcd_2])
# # print(pcd_points.shape)
# Scales, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(pcd_points.T, pcd_points_2.T)
# vis_obj.draw_geometries([pcd, pcd_2])
# reg_p2p = o3d.registration.registration_icp(
# pcd, pcd_2, 0.2, OutTransform,
# o3d.registration.TransformationEstimationPointToPoint())
# print(reg_p2p.transformation)
# draw_registration_result(pcd, pcd_2, reg_p2p.transformation)
path = os.path.join(os.path.abspath(sys.argv[1]), "") + "*"
#gt_files = sorted(glob.glob("../../../../test/*"))
gt_files = sorted(glob.glob(path))
image = None
arr = []
x = []
y = []
z = []
unit_vector = np.ones((3, 1))
unit_vector /= np.linalg.norm(unit_vector)
vector_after_rotation = []
for i in tqdm(range(len(gt_files))):
gt_c3dpo = sorted(glob.glob( gt_files[i] + '/*_NOXRayTL_00.png'))
gt_nocs = sorted(glob.glob( gt_files[i] + '/*_C3DPO_00.png'))
for j in range(len(gt_c3dpo)):
nocs_image_gt = gt_c3dpo[j]
c3dpo_image_gt = gt_nocs[j]
unit_vector = np.ones((3, 1))
unit_vector /= np.linalg.norm(unit_vector)
nocs_gt = read_image(nocs_image_gt)
c3dpo_gt = read_image(c3dpo_image_gt)
pcd_nocs = visualize_nocs_map(nocs_gt, nocs_gt, image)
pcd_c3dpo = visualize_nocs_map(c3dpo_gt, nocs_gt, image)
nocs_points = np.asarray(pcd_nocs.points) - 0.5
c3dpo_points = np.asarray(pcd_c3dpo.points) - 0.5
pcd_nocs.points = o3d.utility.Vector3dVector(nocs_points[:,:3])
pcd_c3dpo.points = o3d.utility.Vector3dVector(c3dpo_points[:,:3])
Scales, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(nocs_points.T, c3dpo_points.T)
#print(Rotation)
reg_p2p = o3d.registration.registration_icp(
pcd_nocs, pcd_c3dpo, 0.2, OutTransform,
o3d.registration.TransformationEstimationPointToPoint())
arr.append(reg_p2p.transformation)
arr2 = np.array(arr)
# print(reg_p2p.transformation)
mat_loc = mathutils.Matrix(reg_p2p.transformation)
R_np = np.array(reg_p2p.transformation[:3, :3])
rotated_vector = R_np @ unit_vector
rotated_vector /= (np.linalg.norm(rotated_vector) + 0.000001)
vector_after_rotation.append(rotated_vector)
# print(reg_p2p.transformation)
# print("Variance", np.var(arr2))
# print("Mean", np.mean(arr2, axis=0))
# print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
# print()
eul = mat_loc.to_euler()
#print(mat_loc)
#x.append(math.degrees(eul.x))
#y.append(math.degrees(eul.y))
#z.append(math.degrees(eul.z))
#print('x:',math.degrees(eul.x))
#print('y:',math.degrees(eul.y))
#print('z:',math.degrees(eul.z))
# draw_registration_result(pcd_nocs, pcd_c3dpo, reg_p2p.transformation)
#break
#print(vector_after_rotation)
rotated_vector_mat = np.concatenate(vector_after_rotation, axis = 1)
mean_rotated_vector = np.mean(rotated_vector_mat, axis=1)
mean_rotated_vector /= (np.linalg.norm(mean_rotated_vector) + 0.000001)
#print(rotated_vector_mat.shape)
theta = np.arccos(mean_rotated_vector[:, np.newaxis].T @ rotated_vector_mat)
#print(theta)
print("\n mean", np.mean(np.rad2deg(theta)), " ", np.var(np.rad2deg(theta)))
#print(mean_rotated_vector, mean_rotated_vector.shape)
# x = np.array(x)
# np.save('x.npy', x)
# y = np.array(y)
# np.save('y.npy', y)
# z = np.array(z)
# np.save('z.npy', z)
# print("Mean X (Degrees)",np.mean(x))
# print("Mean Y (Degrees)",np.mean(y))
# print("Mean Z (Degrees)",np.mean(z))
# print()
# print("Var X (Degrees)",np.var(x))
# print("Var Y (Degrees)",np.var(y))
# print("Var Z (Degrees)",np.var(z))
# print()
# print("X")
# for i in x:
# print(i)
# print()
# print("Y")
# for i in y:
# print(i)
# print()
# print("Z")
# for i in z:
# print(i)
| [
"mathutils.Matrix",
"numpy.arccos",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.linalg.norm",
"copy.deepcopy",
"numpy.mean",
"numpy.reshape",
"open3d.utility.Vector2iVector",
"numpy.asarray",
"numpy.concatenate",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"num... | [((396, 417), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (409, 417), False, 'import copy\n'), ((436, 457), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (449, 457), False, 'import copy\n'), ((610, 671), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[source_temp, target_temp]'], {}), '([source_temp, target_temp])\n', (643, 671), True, 'import open3d as o3d\n'), ((705, 731), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (715, 731), False, 'import cv2\n'), ((736, 750), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (747, 750), False, 'import cv2\n'), ((755, 778), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (776, 778), False, 'import cv2\n'), ((927, 946), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (935, 946), True, 'import numpy as np\n'), ((1129, 1156), 'cv2.imread', 'cv2.imread', (['nocs_image_path'], {}), '(nocs_image_path)\n', (1139, 1156), False, 'import cv2\n'), ((1682, 1710), 'numpy.reshape', 'np.reshape', (['nocs_mask', '(h * w)'], {}), '(nocs_mask, h * w)\n', (1692, 1710), True, 'import numpy as np\n'), ((1763, 1795), 'numpy.reshape', 'np.reshape', (['nocs_map', '(h * w, 3)'], {}), '(nocs_map, (h * w, 3))\n', (1773, 1795), True, 'import numpy as np\n'), ((2102, 2127), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2125, 2127), True, 'import open3d as o3d\n'), ((2145, 2183), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['nocs_cloud'], {}), '(nocs_cloud)\n', (2171, 2183), True, 'import open3d as o3d\n'), ((2201, 2235), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (2227, 2235), True, 'import open3d as o3d\n'), ((2253, 2330), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(1.0)', 'origin': '[0, 0, 0]'}), '(size=1.0, origin=[0, 0, 0])\n', (2302, 2330), True, 'import open3d as o3d\n'), ((5163, 5178), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (5170, 5178), True, 'import numpy as np\n'), ((5198, 5225), 'numpy.linalg.norm', 'np.linalg.norm', (['unit_vector'], {}), '(unit_vector)\n', (5212, 5225), True, 'import numpy as np\n'), ((7644, 7689), 'numpy.concatenate', 'np.concatenate', (['vector_after_rotation'], {'axis': '(1)'}), '(vector_after_rotation, axis=1)\n', (7658, 7689), True, 'import numpy as np\n'), ((7719, 7754), 'numpy.mean', 'np.mean', (['rotated_vector_mat'], {'axis': '(1)'}), '(rotated_vector_mat, axis=1)\n', (7726, 7754), True, 'import numpy as np\n'), ((7881, 7949), 'numpy.arccos', 'np.arccos', (['(mean_rotated_vector[:, np.newaxis].T @ rotated_vector_mat)'], {}), '(mean_rotated_vector[:, np.newaxis].T @ rotated_vector_mat)\n', (7890, 7949), True, 'import numpy as np\n'), ((887, 910), 'numpy.ones', 'np.ones', (['nocs_map.shape'], {}), '(nocs_map.shape)\n', (894, 910), True, 'import numpy as np\n'), ((1172, 1213), 'cv2.cvtColor', 'cv2.cvtColor', (['nocs_map', 'cv2.COLOR_BGR2RGB'], {}), '(nocs_map, cv2.COLOR_BGR2RGB)\n', (1184, 1213), False, 'import cv2\n'), ((1973, 2002), 'numpy.reshape', 'np.reshape', (['image', '(h * w, 3)'], {}), '(image, (h * w, 3))\n', (1983, 2002), True, 'import numpy as np\n'), ((5064, 5079), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (5073, 5079), False, 'import glob\n'), ((7783, 7818), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_rotated_vector'], {}), '(mean_rotated_vector)\n', (7797, 7818), True, 'import numpy as np\n'), ((3072, 3106), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (3098, 3106), True, 'import open3d as o3d\n'), ((3122, 3155), 'open3d.utility.Vector2iVector', 'o3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (3148, 3155), True, 'import open3d as o3d\n'), ((4946, 4974), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (4961, 4974), False, 'import sys, os\n'), ((5327, 5372), 'glob.glob', 'glob.glob', (["(gt_files[i] + '/*_NOXRayTL_00.png')"], {}), "(gt_files[i] + '/*_NOXRayTL_00.png')\n", (5336, 5372), False, 'import glob\n'), ((5401, 5443), 'glob.glob', 'glob.glob', (["(gt_files[i] + '/*_C3DPO_00.png')"], {}), "(gt_files[i] + '/*_C3DPO_00.png')\n", (5410, 5443), False, 'import glob\n'), ((5592, 5607), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (5599, 5607), True, 'import numpy as np\n'), ((5635, 5662), 'numpy.linalg.norm', 'np.linalg.norm', (['unit_vector'], {}), '(unit_vector)\n', (5649, 5662), True, 'import numpy as np\n'), ((6052, 6098), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['nocs_points[:, :3]'], {}), '(nocs_points[:, :3])\n', (6078, 6098), True, 'import open3d as o3d\n'), ((6129, 6176), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['c3dpo_points[:, :3]'], {}), '(c3dpo_points[:, :3])\n', (6155, 6176), True, 'import open3d as o3d\n'), ((6235, 6291), 'tk3dv.nocstools.aligning.estimateSimilarityUmeyama', 'estimateSimilarityUmeyama', (['nocs_points.T', 'c3dpo_points.T'], {}), '(nocs_points.T, c3dpo_points.T)\n', (6260, 6291), False, 'from tk3dv.nocstools.aligning import estimateSimilarityUmeyama\n'), ((6575, 6588), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (6583, 6588), True, 'import numpy as np\n'), ((6656, 6696), 'mathutils.Matrix', 'mathutils.Matrix', (['reg_p2p.transformation'], {}), '(reg_p2p.transformation)\n', (6672, 6696), False, 'import mathutils\n'), ((6716, 6756), 'numpy.array', 'np.array', (['reg_p2p.transformation[:3, :3]'], {}), '(reg_p2p.transformation[:3, :3])\n', (6724, 6756), True, 'import numpy as np\n'), ((7998, 8015), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (8008, 8015), True, 'import numpy as np\n'), ((8030, 8047), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (8040, 8047), True, 'import numpy as np\n'), ((964, 998), 'numpy.abs', 'np.abs', (['(nocs_map[:, :, :3] - white)'], {}), '(nocs_map[:, :, :3] - white)\n', (970, 998), True, 'import numpy as np\n'), ((5926, 5953), 'numpy.asarray', 'np.asarray', (['pcd_nocs.points'], {}), '(pcd_nocs.points)\n', (5936, 5953), True, 'import numpy as np\n'), ((5987, 6015), 'numpy.asarray', 'np.asarray', (['pcd_c3dpo.points'], {}), '(pcd_c3dpo.points)\n', (5997, 6015), True, 'import numpy as np\n'), ((6451, 6506), 'open3d.registration.TransformationEstimationPointToPoint', 'o3d.registration.TransformationEstimationPointToPoint', ([], {}), '()\n', (6504, 6506), True, 'import open3d as o3d\n'), ((6837, 6867), 'numpy.linalg.norm', 'np.linalg.norm', (['rotated_vector'], {}), '(rotated_vector)\n', (6851, 6867), True, 'import numpy as np\n')] |
"""
examine performance of image collection for 3 cameras. it is implied that data for all 3 cameras is available.
input: dataset name
'black level all', 'black level analog', 'black level digital', 'exposure time', 'frameIDs', 'gain', 'image height', 'image width', 'images', 'temperature'
, 'time', 'timestamps_camera', 'timestamps_lab'
"""
import sys
import os
from h5py import File
import numpy as np
from matplotlib import pyplot as plt
#one representative filename
dataset_filename = sys.argv[1]
head, tail = os.path.split(dataset_filename)
lst = ['dm4','dm16','dm34']
filename = {}
fhdf5 = {}
timestamps = {}
for item in lst:
filename[item] = os.path.join(head, '_'.join([item]+tail.split('_')[1:]))
fhdf5[item] = File(filename[item],'r')
timestamps[item] = fhdf5[item]['timestamps_camera']
plt.ion()
plt.figure()
for item in lst:
x = (timestamps[item]-timestamps[item][0])[:-2]
y = np.diff(timestamps[item])[:-1]*10**-6
plt.plot(x,y,label = item)
| [
"matplotlib.pyplot.plot",
"numpy.diff",
"h5py.File",
"os.path.split",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion"
] | [((521, 552), 'os.path.split', 'os.path.split', (['dataset_filename'], {}), '(dataset_filename)\n', (534, 552), False, 'import os\n'), ((818, 827), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (825, 827), True, 'from matplotlib import pyplot as plt\n'), ((828, 840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (838, 840), True, 'from matplotlib import pyplot as plt\n'), ((736, 761), 'h5py.File', 'File', (['filename[item]', '"""r"""'], {}), "(filename[item], 'r')\n", (740, 761), False, 'from h5py import File\n'), ((960, 986), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'item'}), '(x, y, label=item)\n', (968, 986), True, 'from matplotlib import pyplot as plt\n'), ((918, 943), 'numpy.diff', 'np.diff', (['timestamps[item]'], {}), '(timestamps[item])\n', (925, 943), True, 'import numpy as np\n')] |
import unittest
import os.path
import numpy as np
import numpy.lib.recfunctions as rfn
from geodepy.convert import (hp2dec, dec2hp, rect2polar, polar2rect,
grid2geo, llh2xyz, DMSAngle)
from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu
class TestGeodesy(unittest.TestCase):
def test_enu2xyz(self):
MOBS_MGA2020 = (55, 321820.085, 5811181.510, 40.570)
MOBS_MGA1994 = (55, 321819.594, 5811180.038, 40.659)
# Convert UTM Projection Coordinates to Geographic Coordinates
MOBS_GDA2020 = grid2geo(MOBS_MGA2020[0], MOBS_MGA2020[1], MOBS_MGA2020[2])
MOBS_GDA1994 = grid2geo(MOBS_MGA1994[0], MOBS_MGA1994[1], MOBS_MGA1994[2])
# Convert Geographic Coordinates to Cartesian XYZ Coordinates
MOBS_GDA2020_XYZ = llh2xyz(MOBS_GDA2020[0], MOBS_GDA2020[1], MOBS_MGA2020[3])
MOBS_GDA1994_XYZ = llh2xyz(MOBS_GDA1994[0], MOBS_GDA1994[1], MOBS_MGA1994[3])
# Generate Vector Between UTM Projection Coordinates
mga_vector = [MOBS_MGA2020[1] - MOBS_MGA1994[1],
MOBS_MGA2020[2] - MOBS_MGA1994[2],
MOBS_MGA2020[3] - MOBS_MGA1994[3]]
# Generate Vector Between Cartesian XYZ Coordinates
xyz_vector = (MOBS_GDA2020_XYZ[0] - MOBS_GDA1994_XYZ[0],
MOBS_GDA2020_XYZ[1] - MOBS_GDA1994_XYZ[1],
MOBS_GDA2020_XYZ[2] - MOBS_GDA1994_XYZ[2])
# Rotate UTM Projection Vector by Grid Convergence
grid_dist, grid_brg = rect2polar(mga_vector[0], mga_vector[1])
local_east, local_north = polar2rect(grid_dist, grid_brg - MOBS_GDA2020[3])
local_vector = (local_east, local_north, mga_vector[2])
# Calculate XYZ Vector using Local Vector Components
x, y, z = enu2xyz(MOBS_GDA2020[0], MOBS_GDA2020[1], *local_vector)
self.assertAlmostEqual(x, xyz_vector[0], 4)
self.assertAlmostEqual(y, xyz_vector[1], 4)
self.assertAlmostEqual(z, xyz_vector[2], 4)
# Calculate Local Vector using XYZ Vector Components
e, n, u = xyz2enu(MOBS_GDA2020[0], MOBS_GDA2020[1], *xyz_vector)
self.assertAlmostEqual(e, local_vector[0], 4)
self.assertAlmostEqual(n, local_vector[1], 4)
self.assertAlmostEqual(u, local_vector[2], 4)
def test_vincinv(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
lon1 = hp2dec(144.25295244)
lat1_DMS = DMSAngle(-37, 57, 3.7203)
lon1_DMS = DMSAngle(144, 25, 29.5244)
# Buninyong
lat2 = hp2dec(-37.39101561)
lon2 = hp2dec(143.55353839)
lat2_DMS = DMSAngle(-37, 39, 10.1561)
lon2_DMS = DMSAngle(143, 55, 35.3839)
# Test Decimal Degrees Input
ell_dist, azimuth1to2, azimuth2to1 = vincinv(lat1, lon1, lat2, lon2)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# additional test case:
pl1 = (-29.85, 140.71666666666667)
pl2 = (-29.85, 140.76666666666667)
ell_dist, azimuth1to2, azimuth2to1 = vincinv(pl1[0], pl1[1], pl2[0], pl2[1])
self.assertEqual(round(ell_dist, 3), 4831.553)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 90.004480)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 269.591520)
test2 = vincinv(lat1, lon1, lat1, lon1)
self.assertEqual(test2, (0, 0, 0))
# Test DMSAngle Input
ell_dist, azimuth1to2, azimuth2to1 = vincinv(lat1_DMS, lon1_DMS,
lat2_DMS, lon2_DMS)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
test2 = vincinv(lat1_DMS, lon1_DMS, lat1_DMS, lon1_DMS)
self.assertEqual(test2, (0, 0, 0))
# Test DDMAngle Input
(ell_dist,
azimuth1to2,
azimuth2to1) = vincinv(lat1_DMS.ddm(), lon1_DMS.ddm(),
lat2_DMS.ddm(), lon2_DMS.ddm())
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
test2 = vincinv(lat1_DMS.ddm(), lon1_DMS.ddm(),
lat1_DMS.ddm(), lon1_DMS.ddm())
self.assertEqual(test2, (0, 0, 0))
def test_vincdir(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
lon1 = hp2dec(144.25295244)
lat1_DMS = DMSAngle(-37, 57, 3.7203)
lon1_DMS = DMSAngle(144, 25, 29.5244)
# To Buninyong
azimuth1to2 = hp2dec(306.520537)
azimuth1to2_DMS = DMSAngle(306, 52, 5.37)
ell_dist = 54972.271
# Test Decimal Degrees Input
lat2, lon2, azimuth2to1 = vincdir(lat1, lon1, azimuth1to2, ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(lon2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# Test DMSAngle Input
lat2, long2, azimuth2to1 = vincdir(lat1_DMS, lon1_DMS,
azimuth1to2_DMS, ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(long2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
# Test DDMAngle Input
lat2, long2, azimuth2to1 = vincdir(lat1_DMS.ddm(), lon1_DMS.ddm(),
azimuth1to2_DMS.ddm(), ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(long2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_vincinv_utm(self):
# Flinders Peak (UTM 55)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# Buninyong (UTM 55)
zone2 = 55
east2 = 228854.0513
north2 = 5828259.0384
# Buninyong (UTM 54)
zone3 = 54
east3 = 758173.7973
north3 = 5828674.3402
# Test Coordinates in Zone 55 only
grid_dist, grid1to2, grid2to1, lsf = vincinv_utm(zone1, east1, north1,
zone2, east2, north2)
self.assertAlmostEqual(lsf, 1.00036397, 8)
self.assertAlmostEqual(grid_dist, 54992.279, 3)
self.assertAlmostEqual(dec2hp(grid1to2), 305.17017259, 7)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
# Test Coordinates in Different Zones (55 and 54)
# (Point 2 Grid Bearing Different (Zone 54 Grid Bearing))
grid_dist, grid1to2, grid2to1, lsf = vincinv_utm(zone1, east1, north1,
zone3, east3, north3)
self.assertAlmostEqual(lsf, 1.00036397, 8)
self.assertAlmostEqual(grid_dist, 54992.279, 3)
self.assertAlmostEqual(dec2hp(grid1to2), 305.17017259, 7)
self.assertAlmostEqual(dec2hp(grid2to1), 128.57444307, 7)
def test_vincdir_utm(self):
# Flinders Peak (UTM 55)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# Grid Dimensions to Point 2 (Buninyong)
grid_dist = 54992.279
grid1to2 = hp2dec(305.17017259)
grid1to2_DMS = DMSAngle(305, 17, 1.7259)
# Test Decimal Degrees Input
(zone2, east2, north2,
grid2to1, lsf) = vincdir_utm(zone1, east1, north1,
grid1to2, grid_dist)
self.assertEqual(zone2, zone1)
self.assertAlmostEqual(east2, 228854.0513, 3)
self.assertAlmostEqual(north2, 5828259.0384, 3)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
self.assertAlmostEqual(lsf, 1.00036397, 8)
# Test DMSAngle Input
(zone2, east2, north2,
grid2to1, lsf) = vincdir_utm(zone1, east1, north1,
grid1to2_DMS, grid_dist)
self.assertEqual(zone2, zone1)
self.assertAlmostEqual(east2, 228854.0513, 3)
self.assertAlmostEqual(north2, 5828259.0384, 3)
self.assertAlmostEqual(dec2hp(grid2to1), 125.17418518, 7)
self.assertAlmostEqual(lsf, 1.00036397, 8)
def test_equality_vincentys(self):
# Test multiple point-to-point vincinv calculations
abs_path = os.path.abspath(os.path.dirname(__file__))
test_geo_coords =\
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat1', 'long1'],
usecols=('lat1', 'long1'))
test_geo_coord2 = \
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat2', 'long2'],
usecols=('lat2', 'long2'))
# Form array with point pairs from test file
test_pairs = rfn.merge_arrays([test_geo_coords, np.roll(test_geo_coord2, 1)], flatten=True)
# Calculate Vincenty's Inverse Result using Lat, Long Pairs
vincinv_result = np.array(list(vincinv(*x) for x in test_pairs[['lat1', 'long1', 'lat2', 'long2']]))
# Calculate Vincenty's Direct Result using Results from Inverse Function
vincdir_input = rfn.merge_arrays([test_geo_coords, vincinv_result[:, 1], vincinv_result[:, 0]], flatten=True)
vincdir_input.dtype.names = ['lat1', 'long1', 'az1to2', 'ell_dist']
vincdir_result = np.array(list(vincdir(*x) for x in vincdir_input[['lat1', 'long1', 'az1to2', 'ell_dist']]))
np.testing.assert_almost_equal(test_pairs['lat2'],
vincdir_result[:, 0], decimal=8)
np.testing.assert_almost_equal(test_pairs['long2'],
vincdir_result[:, 1], decimal=8)
np.testing.assert_almost_equal(vincinv_result[:, 2],
vincdir_result[:, 2])
def test_vincinv_edgecases(self):
lat1 = -32.153892
lon1 = -15.394827
lat2 = -31.587369
lon2 = -13.487739
gdist, az12, az21 = vincinv(lat1, lon1, lat2, lon2)
lon1 = lon1 + 14
lon2 = lon2 + 14
gdist_2, az12_2, az21_2 = vincinv(lat1, lon1, lat2, lon2)
self.assertEqual(gdist, gdist_2)
self.assertEqual(az12, az12_2)
self.assertEqual(az21, az21_2)
if __name__ == '__main__':
unittest.main()
| [
"geodepy.convert.DMSAngle",
"geodepy.convert.polar2rect",
"geodepy.geodesy.vincdir_utm",
"geodepy.geodesy.xyz2enu",
"numpy.roll",
"geodepy.convert.llh2xyz",
"geodepy.convert.grid2geo",
"geodepy.geodesy.enu2xyz",
"geodepy.geodesy.vincinv_utm",
"geodepy.convert.rect2polar",
"numpy.lib.recfunctions... | [((11049, 11064), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11062, 11064), False, 'import unittest\n'), ((588, 647), 'geodepy.convert.grid2geo', 'grid2geo', (['MOBS_MGA2020[0]', 'MOBS_MGA2020[1]', 'MOBS_MGA2020[2]'], {}), '(MOBS_MGA2020[0], MOBS_MGA2020[1], MOBS_MGA2020[2])\n', (596, 647), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((671, 730), 'geodepy.convert.grid2geo', 'grid2geo', (['MOBS_MGA1994[0]', 'MOBS_MGA1994[1]', 'MOBS_MGA1994[2]'], {}), '(MOBS_MGA1994[0], MOBS_MGA1994[1], MOBS_MGA1994[2])\n', (679, 730), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((829, 887), 'geodepy.convert.llh2xyz', 'llh2xyz', (['MOBS_GDA2020[0]', 'MOBS_GDA2020[1]', 'MOBS_MGA2020[3]'], {}), '(MOBS_GDA2020[0], MOBS_GDA2020[1], MOBS_MGA2020[3])\n', (836, 887), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((915, 973), 'geodepy.convert.llh2xyz', 'llh2xyz', (['MOBS_GDA1994[0]', 'MOBS_GDA1994[1]', 'MOBS_MGA1994[3]'], {}), '(MOBS_GDA1994[0], MOBS_GDA1994[1], MOBS_MGA1994[3])\n', (922, 973), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((1553, 1593), 'geodepy.convert.rect2polar', 'rect2polar', (['mga_vector[0]', 'mga_vector[1]'], {}), '(mga_vector[0], mga_vector[1])\n', (1563, 1593), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((1628, 1677), 'geodepy.convert.polar2rect', 'polar2rect', (['grid_dist', '(grid_brg - MOBS_GDA2020[3])'], {}), '(grid_dist, grid_brg - MOBS_GDA2020[3])\n', (1638, 1677), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((1822, 1878), 'geodepy.geodesy.enu2xyz', 'enu2xyz', (['MOBS_GDA2020[0]', 'MOBS_GDA2020[1]', '*local_vector'], {}), '(MOBS_GDA2020[0], MOBS_GDA2020[1], *local_vector)\n', (1829, 1878), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((2115, 2169), 'geodepy.geodesy.xyz2enu', 'xyz2enu', (['MOBS_GDA2020[0]', 'MOBS_GDA2020[1]', '*xyz_vector'], {}), '(MOBS_GDA2020[0], MOBS_GDA2020[1], *xyz_vector)\n', (2122, 2169), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((2400, 2420), 'geodepy.convert.hp2dec', 'hp2dec', (['(-37.57037203)'], {}), '(-37.57037203)\n', (2406, 2420), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2436, 2456), 'geodepy.convert.hp2dec', 'hp2dec', (['(144.25295244)'], {}), '(144.25295244)\n', (2442, 2456), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2476, 2501), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(-37)', '(57)', '(3.7203)'], {}), '(-37, 57, 3.7203)\n', (2484, 2501), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2521, 2547), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(144)', '(25)', '(29.5244)'], {}), '(144, 25, 29.5244)\n', (2529, 2547), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2584, 2604), 'geodepy.convert.hp2dec', 'hp2dec', (['(-37.39101561)'], {}), '(-37.39101561)\n', (2590, 2604), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2620, 2640), 'geodepy.convert.hp2dec', 'hp2dec', (['(143.55353839)'], {}), '(143.55353839)\n', (2626, 2640), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2660, 2686), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(-37)', '(39)', '(10.1561)'], {}), '(-37, 39, 10.1561)\n', (2668, 2686), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2706, 2732), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(143)', '(55)', '(35.3839)'], {}), '(143, 55, 35.3839)\n', (2714, 2732), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2816, 2847), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1', 'lon1', 'lat2', 'lon2'], {}), '(lat1, lon1, lat2, lon2)\n', (2823, 2847), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((3204, 3243), 'geodepy.geodesy.vincinv', 'vincinv', (['pl1[0]', 'pl1[1]', 'pl2[0]', 'pl2[1]'], {}), '(pl1[0], pl1[1], pl2[0], pl2[1])\n', (3211, 3243), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((3451, 3482), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1', 'lon1', 'lat1', 'lon1'], {}), '(lat1, lon1, lat1, lon1)\n', (3458, 3482), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((3602, 3649), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1_DMS', 'lon1_DMS', 'lat2_DMS', 'lon2_DMS'], {}), '(lat1_DMS, lon1_DMS, lat2_DMS, lon2_DMS)\n', (3609, 3649), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((3912, 3959), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1_DMS', 'lon1_DMS', 'lat1_DMS', 'lon1_DMS'], {}), '(lat1_DMS, lon1_DMS, lat1_DMS, lon1_DMS)\n', (3919, 3959), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((4619, 4639), 'geodepy.convert.hp2dec', 'hp2dec', (['(-37.57037203)'], {}), '(-37.57037203)\n', (4625, 4639), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4655, 4675), 'geodepy.convert.hp2dec', 'hp2dec', (['(144.25295244)'], {}), '(144.25295244)\n', (4661, 4675), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4695, 4720), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(-37)', '(57)', '(3.7203)'], {}), '(-37, 57, 3.7203)\n', (4703, 4720), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4740, 4766), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(144)', '(25)', '(29.5244)'], {}), '(144, 25, 29.5244)\n', (4748, 4766), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4813, 4831), 'geodepy.convert.hp2dec', 'hp2dec', (['(306.520537)'], {}), '(306.520537)\n', (4819, 4831), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4858, 4881), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(306)', '(52)', '(5.37)'], {}), '(306, 52, 5.37)\n', (4866, 4881), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4983, 5025), 'geodepy.geodesy.vincdir', 'vincdir', (['lat1', 'lon1', 'azimuth1to2', 'ell_dist'], {}), '(lat1, lon1, azimuth1to2, ell_dist)\n', (4990, 5025), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((5286, 5340), 'geodepy.geodesy.vincdir', 'vincdir', (['lat1_DMS', 'lon1_DMS', 'azimuth1to2_DMS', 'ell_dist'], {}), '(lat1_DMS, lon1_DMS, azimuth1to2_DMS, ell_dist)\n', (5293, 5340), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((6400, 6455), 'geodepy.geodesy.vincinv_utm', 'vincinv_utm', (['zone1', 'east1', 'north1', 'zone2', 'east2', 'north2'], {}), '(zone1, east1, north1, zone2, east2, north2)\n', (6411, 6455), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((6922, 6977), 'geodepy.geodesy.vincinv_utm', 'vincinv_utm', (['zone1', 'east1', 'north1', 'zone3', 'east3', 'north3'], {}), '(zone1, east1, north1, zone3, east3, north3)\n', (6933, 6977), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((7515, 7535), 'geodepy.convert.hp2dec', 'hp2dec', (['(305.17017259)'], {}), '(305.17017259)\n', (7521, 7535), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((7559, 7584), 'geodepy.convert.DMSAngle', 'DMSAngle', (['(305)', '(17)', '(1.7259)'], {}), '(305, 17, 1.7259)\n', (7567, 7584), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((7680, 7734), 'geodepy.geodesy.vincdir_utm', 'vincdir_utm', (['zone1', 'east1', 'north1', 'grid1to2', 'grid_dist'], {}), '(zone1, east1, north1, grid1to2, grid_dist)\n', (7691, 7734), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((8127, 8185), 'geodepy.geodesy.vincdir_utm', 'vincdir_utm', (['zone1', 'east1', 'north1', 'grid1to2_DMS', 'grid_dist'], {}), '(zone1, east1, north1, grid1to2_DMS, grid_dist)\n', (8138, 8185), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((9905, 10003), 'numpy.lib.recfunctions.merge_arrays', 'rfn.merge_arrays', (['[test_geo_coords, vincinv_result[:, 1], vincinv_result[:, 0]]'], {'flatten': '(True)'}), '([test_geo_coords, vincinv_result[:, 1], vincinv_result[:, \n 0]], flatten=True)\n', (9921, 10003), True, 'import numpy.lib.recfunctions as rfn\n'), ((10201, 10288), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["test_pairs['lat2']", 'vincdir_result[:, 0]'], {'decimal': '(8)'}), "(test_pairs['lat2'], vincdir_result[:, 0],\n decimal=8)\n", (10231, 10288), True, 'import numpy as np\n'), ((10332, 10420), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["test_pairs['long2']", 'vincdir_result[:, 1]'], {'decimal': '(8)'}), "(test_pairs['long2'], vincdir_result[:, 1],\n decimal=8)\n", (10362, 10420), True, 'import numpy as np\n'), ((10464, 10538), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['vincinv_result[:, 2]', 'vincdir_result[:, 2]'], {}), '(vincinv_result[:, 2], vincdir_result[:, 2])\n', (10494, 10538), True, 'import numpy as np\n'), ((10749, 10780), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1', 'lon1', 'lat2', 'lon2'], {}), '(lat1, lon1, lat2, lon2)\n', (10756, 10780), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((10865, 10896), 'geodepy.geodesy.vincinv', 'vincinv', (['lat1', 'lon1', 'lat2', 'lon2'], {}), '(lat1, lon1, lat2, lon2)\n', (10872, 10896), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((6651, 6667), 'geodepy.convert.dec2hp', 'dec2hp', (['grid1to2'], {}), '(grid1to2)\n', (6657, 6667), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((6717, 6733), 'geodepy.convert.dec2hp', 'dec2hp', (['grid2to1'], {}), '(grid2to1)\n', (6723, 6733), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((7173, 7189), 'geodepy.convert.dec2hp', 'dec2hp', (['grid1to2'], {}), '(grid1to2)\n', (7179, 7189), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((7239, 7255), 'geodepy.convert.dec2hp', 'dec2hp', (['grid2to1'], {}), '(grid2to1)\n', (7245, 7255), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((7953, 7969), 'geodepy.convert.dec2hp', 'dec2hp', (['grid2to1'], {}), '(grid2to1)\n', (7959, 7969), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((8404, 8420), 'geodepy.convert.dec2hp', 'dec2hp', (['grid2to1'], {}), '(grid2to1)\n', (8410, 8420), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((2935, 2954), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth1to2'], {}), '(azimuth1to2)\n', (2941, 2954), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((3003, 3022), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (3009, 3022), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((3330, 3349), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth1to2'], {}), '(azimuth1to2)\n', (3336, 3349), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((3397, 3416), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (3403, 3416), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((3790, 3809), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth1to2'], {}), '(azimuth1to2)\n', (3796, 3809), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((3858, 3877), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (3864, 3877), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4290, 4309), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth1to2'], {}), '(azimuth1to2)\n', (4296, 4309), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((4358, 4377), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (4364, 4377), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5057, 5069), 'geodepy.convert.dec2hp', 'dec2hp', (['lat2'], {}), '(lat2)\n', (5063, 5069), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5120, 5132), 'geodepy.convert.dec2hp', 'dec2hp', (['lon2'], {}), '(lon2)\n', (5126, 5132), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5183, 5202), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (5189, 5202), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5415, 5427), 'geodepy.convert.dec2hp', 'dec2hp', (['lat2'], {}), '(lat2)\n', (5421, 5427), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5478, 5491), 'geodepy.convert.dec2hp', 'dec2hp', (['long2'], {}), '(long2)\n', (5484, 5491), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5542, 5561), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (5548, 5561), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5792, 5804), 'geodepy.convert.dec2hp', 'dec2hp', (['lat2'], {}), '(lat2)\n', (5798, 5804), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5855, 5868), 'geodepy.convert.dec2hp', 'dec2hp', (['long2'], {}), '(long2)\n', (5861, 5868), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((5919, 5938), 'geodepy.convert.dec2hp', 'dec2hp', (['azimuth2to1'], {}), '(azimuth2to1)\n', (5925, 5938), False, 'from geodepy.convert import hp2dec, dec2hp, rect2polar, polar2rect, grid2geo, llh2xyz, DMSAngle\n'), ((9577, 9604), 'numpy.roll', 'np.roll', (['test_geo_coord2', '(1)'], {}), '(test_geo_coord2, 1)\n', (9584, 9604), True, 'import numpy as np\n'), ((9729, 9740), 'geodepy.geodesy.vincinv', 'vincinv', (['*x'], {}), '(*x)\n', (9736, 9740), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n'), ((10114, 10125), 'geodepy.geodesy.vincdir', 'vincdir', (['*x'], {}), '(*x)\n', (10121, 10125), False, 'from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm, enu2xyz, xyz2enu\n')] |
import numpy
import math
from mta.dataset import Dataset
from copy import copy
class Profile:
@classmethod
def rating_dist(self, dataset, stacks=11, stack_range=1000):
rating_list = numpy.array(dataset.ratings.to_list())[:,2].flat
dist = self._make_dist(rating_list, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
@classmethod
def rating_std_dist_on_items(self, dataset, stacks=11, stack_range=100):
item_size = dataset.ratings.matrix_shape()[1]
rating_stds = numpy.zeros(item_size, dtype=float)
rating_matrix = dataset.ratings.to_matrix()
for i_id in range(len(rating_matrix[0])):
rating_stds[i_id] = rating_matrix[:,i_id].std(ddof=1)
dist = self._make_dist(rating_stds, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
#get the distribution of number of touched factors in each users
@classmethod
def factor_dist_on_users(self, dataset, stacks=21, stack_range=1):
user_size = dataset.matrix_shape()[0]
factor_size = dataset.matrix_shape()[1]
user_num_factors = numpy.zeros(user_size, dtype=int)
user_factor_matrix = dataset.touchs.to_matrix()
for u_id in range(user_size):
user_num_factors[u_id] = user_factor_matrix[u_id].sum()
dist = self._make_dist(user_num_factors, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
#get the distribution of the number of touched factor in each items
@classmethod
def factor_dist_on_items(self, dataset, stacks=21, stack_range=1):
item_size = dataset.matrix_shape()[2]
factor_size = dataset.matrix_shape()[1]
item_factor_table = numpy.zeros((item_size, factor_size), dtype=bool)
item_num_factors = numpy.zeros(item_size, dtype=int)
user_factor_matrix = dataset.touchs.to_matrix()
for u_id, i_id, rating in dataset.ratings.to_list():
for f_id in range(factor_size):
if user_factor_matrix[u_id][f_id] ==1:
item_factor_table[i_id][f_id] =True
for i_id in range(len(item_factor_table)):
item_num_factors[i_id] = item_factor_table[i_id].sum()
dist = self._make_dist(item_num_factors, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
@classmethod
def item_dist(self, dataset, stacks=11, stack_range=10):
item_size = dataset.matrix_shape()[2]
items = numpy.zeros(item_size, dtype=int)
for u_id, i_id, rating in dataset.ratings.to_list():
items[i_id] +=1
dist = self._make_dist(items, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
#get the distribution of the number of item in each factors
@classmethod
def item_dist_on_factors(self, dataset, stacks=11, stack_range=10):
item_size = dataset.matrix_shape()[2]
factor_size = dataset.matrix_shape()[1]
factor_item_table = numpy.zeros((factor_size, item_size), dtype=bool)
factor_num_items = numpy.zeros(factor_size, dtype=int)
user_factor_matrix = dataset.touchs.to_matrix()
for u_id, i_id, rating in dataset.ratings.to_list():
for f_id in range(factor_size):
if user_factor_matrix[u_id][f_id] ==1:
factor_item_table[f_id][i_id] =True
for f_id in range(len(factor_item_table)):
factor_num_items[f_id] = factor_item_table[f_id].sum()
dist = self._make_dist(factor_num_items, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
#get the distribution of the number of item in each users
@classmethod
def item_dist_on_users(self, dataset, stacks=11, stack_range=10):
user_size = dataset.matrix_shape()[0]
user_num_items = numpy.zeros(user_size, dtype=int)
for u_id, i_id, rating in dataset.ratings.to_list():
user_num_items[u_id]+=1
dist = self._make_dist(user_num_items, stacks, stack_range)
labels = self._make_labels(stacks, stack_range)
return dist, labels
def _make_dist(value_list, stacks, stack_range ):
dist = numpy.zeros(stacks, dtype=int)
for value in value_list:
if value >= stacks* stack_range:
dist[stacks-1] +=1
else:
index = math.floor(value/stack_range)
dist[index] +=1
return dist
def _make_labels(stacks, stack_range):
return numpy.array(list( i*stack_range for i in range(stacks)))
| [
"numpy.zeros",
"math.floor"
] | [((578, 613), 'numpy.zeros', 'numpy.zeros', (['item_size'], {'dtype': 'float'}), '(item_size, dtype=float)\n', (589, 613), False, 'import numpy\n'), ((1224, 1257), 'numpy.zeros', 'numpy.zeros', (['user_size'], {'dtype': 'int'}), '(user_size, dtype=int)\n', (1235, 1257), False, 'import numpy\n'), ((1861, 1910), 'numpy.zeros', 'numpy.zeros', (['(item_size, factor_size)'], {'dtype': 'bool'}), '((item_size, factor_size), dtype=bool)\n', (1872, 1910), False, 'import numpy\n'), ((1938, 1971), 'numpy.zeros', 'numpy.zeros', (['item_size'], {'dtype': 'int'}), '(item_size, dtype=int)\n', (1949, 1971), False, 'import numpy\n'), ((2659, 2692), 'numpy.zeros', 'numpy.zeros', (['item_size'], {'dtype': 'int'}), '(item_size, dtype=int)\n', (2670, 2692), False, 'import numpy\n'), ((3203, 3252), 'numpy.zeros', 'numpy.zeros', (['(factor_size, item_size)'], {'dtype': 'bool'}), '((factor_size, item_size), dtype=bool)\n', (3214, 3252), False, 'import numpy\n'), ((3280, 3315), 'numpy.zeros', 'numpy.zeros', (['factor_size'], {'dtype': 'int'}), '(factor_size, dtype=int)\n', (3291, 3315), False, 'import numpy\n'), ((4082, 4115), 'numpy.zeros', 'numpy.zeros', (['user_size'], {'dtype': 'int'}), '(user_size, dtype=int)\n', (4093, 4115), False, 'import numpy\n'), ((4443, 4473), 'numpy.zeros', 'numpy.zeros', (['stacks'], {'dtype': 'int'}), '(stacks, dtype=int)\n', (4454, 4473), False, 'import numpy\n'), ((4629, 4660), 'math.floor', 'math.floor', (['(value / stack_range)'], {}), '(value / stack_range)\n', (4639, 4660), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Plot the effective index and group index curves for a given waveguide CML.
@author: <NAME>
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
#mpl.style.use('ggplot') # set plotting style
fname = 'wg_strip_o_mm_2000' # data to be loaded
npts = 10 # number of points to plot
data = [float(i) for i in open(fname+'.txt', 'r').read().rstrip().split(',')]
wavl_range = [data[0], data[1]]
coefficients_neff = [data[4], data[3], data[2]]
poly_neff = np.poly1d(coefficients_neff)
coefficients_ng = [data[7], data[6], data[5]]
poly_ng = np.poly1d(coefficients_ng)
wavl = np.linspace(wavl_range[0], wavl_range[1], npts)
neff = poly_neff(wavl)
ng = poly_ng(wavl)
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(111)
ax1.set_xlabel('Wavelength (nm)')
ax1.set_ylabel('Effective index', color="red")
ax1.set_title("Waveguide: " + fname)
ax1.grid('off')
ax1.plot(wavl*1e9, neff, label='Effective index', color="red")
ax2 = ax1.twinx()
ax2.set_ylabel('Group index', color="blue")
ax2.plot(wavl*1e9, ng, label='Group index', color="blue")
fig.legend()
fig.savefig(fname+'.pdf')
fig.savefig(fname+'.png')
| [
"numpy.poly1d",
"numpy.linspace",
"matplotlib.pyplot.figure"
] | [((509, 537), 'numpy.poly1d', 'np.poly1d', (['coefficients_neff'], {}), '(coefficients_neff)\n', (518, 537), True, 'import numpy as np\n'), ((594, 620), 'numpy.poly1d', 'np.poly1d', (['coefficients_ng'], {}), '(coefficients_ng)\n', (603, 620), True, 'import numpy as np\n'), ((629, 676), 'numpy.linspace', 'np.linspace', (['wavl_range[0]', 'wavl_range[1]', 'npts'], {}), '(wavl_range[0], wavl_range[1], npts)\n', (640, 676), True, 'import numpy as np\n'), ((726, 752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (736, 752), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import json
import math
import matplotlib.pyplot
import numpy
import os
import pandas
import re
import skimage.io
import skimage.transform
import torch
import torch.utils.data
import torch.utils.model_zoo
import tqdm
blurb = 'Fashion Brain: Library of trained deep learning models (D2.5)'
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
class BasicBlock(torch.nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = torch.nn.BatchNorm2d(planes)
self.relu = torch.nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = torch.nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(torch.nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = \
torch.nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(planes)
self.conv2 = torch.nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
self.bn2 = torch.nn.BatchNorm2d(planes)
self.conv3 = torch.nn.Conv2d(
planes,
planes * self.expansion,
kernel_size=1,
bias=False
)
self.bn3 = torch.nn.BatchNorm2d(planes * self.expansion)
self.relu = torch.nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(torch.nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = torch.nn.AvgPool2d(7, stride=1)
self.fc = torch.nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight,
mode='fan_out',
nonlinearity='relu'
)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = torch.nn.Sequential(
torch.nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
torch.nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return torch.nn.Sequential(*layers)
def features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet():
model = ResNet(Bottleneck, [3, 4, 6, 3])
model.load_state_dict(torch.utils.model_zoo.load_url(
'https://download.pytorch.org/models/resnet50-19c8e357.pth'
))
return model
class Classify(torch.nn.Module):
def __init__(self, n_classes, finetune=False):
torch.nn.Module.__init__(self)
self.cnn = resnet()
self.linear = torch.nn.Linear(2048, n_classes)
self.finetune = finetune
def forward(self, x):
if self.finetune:
return self.linear(self.cnn.features(x))
else:
return self.linear(self.cnn.features(x).detach())
class Data:
def __init__(self, model_type):
self.items = pandas.read_csv('data/{}.csv'.format(model_type))
with open('data/{}.json'.format(model_type)) as f:
self.dictionary = json.load(f)
self.lookup = dict(zip(
self.dictionary.values(), self.dictionary.keys()
))
ix = numpy.random.permutation(len(self.items))
self.items['fold'] = ['train' for _ in range(len(self.items))]
self.items['fold'].iloc[ix[-2000:-1000]] = 'valid'
self.items['fold'].iloc[ix[-1000:]] = 'test'
class Iterator:
def __init__(self, data, fold, model_type, local=False):
if fold == 'train':
self.items = stratify(
data.items[data.items.fold == fold],
model_type,
1000,
)
else:
self.items = stratify(
data.items[data.items.fold == fold],
model_type,
25,
)
self.dictionary = data.dictionary
self.lookup = data.lookup
self.model_type = model_type
self.local = local
def __getitem__(self, item):
if self.local:
x = 'data/img/' + \
str(self.items['ix'].iloc[item]).zfill(6) + '.jpg'
else:
x = '/data/fb-model-library/data/large/img/' + \
str(self.items['ix'].iloc[item]).zfill(6) + '.jpg'
x = torch.FloatTensor(skimage.io.imread(x).transpose(2, 0, 1))
y = torch.LongTensor([
self.dictionary[self.items.iloc[item][self.model_type]]
])
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
return x, y
def __len__(self):
return len(self.items)
class Trainer:
def __init__(self,
save_='checkpoints/classify',
model_type='color',
n_epochs=200,
lr=0.01,
batch_size=50,
local=False):
self.save_ = save_
self.model_type = model_type
data = Data(model_type)
train_it = Iterator(
data, fold='train', local=local, model_type=model_type,
)
valid_it = Iterator(
data, fold='valid', local=local, model_type=model_type,
)
self.train_loader = torch.utils.data.DataLoader(
train_it,
batch_size=batch_size,
shuffle=True,
)
self.valid_loader = torch.utils.data.DataLoader(
valid_it,
batch_size=batch_size,
)
self.model = Classify(len(train_it.dictionary))
self.optimizer = \
torch.optim.SGD(self.model.parameters(), momentum=0.9, lr=lr)
self.n_epochs = n_epochs
self.it = 0
def train(self):
if torch.cuda.is_available():
self.model.cuda()
vl = []
va = []
for epoch in range(self.n_epochs):
print('EPOCH {} '.format(epoch) + '*' * 20)
loss, acc = self.do_epoch()
vl.append(loss)
va.append(acc)
format_str = \
'VALIDATION iteration: {}; validation-loss: {}; ' + \
'validation-acc: {};'
print(format_str.format(self.it, vl[-1], va[-1]))
if va[-1] == max(va):
print('saving...')
torch.save(self.model, self.save_ + '/model.pt')
else:
print('Annealing learning rate...')
for param_group in self.optimizer.param_groups:
param_group['lr'] /= 4
def do_epoch(self):
for x, y in self.train_loader:
loss, acc = self.get_loss(x, y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
print('TRAIN iteration: {}; loss: {}; accuracy: {};'.format(
self.it, loss, acc,
))
self.it += 1
validation_loss = []
validation_acc = []
for x, y in self.valid_loader:
loss, acc = self.get_loss(x, y)
validation_loss.append(loss)
validation_acc.append(acc)
return sum(validation_loss) / len(validation_loss), \
sum(validation_acc) / len(validation_acc)
def get_loss(self, x, y):
yhat = self.model(x)
loss = torch.nn.functional.cross_entropy(yhat, y.squeeze())
acc = []
for i in range(yhat.shape[0]):
acc.append(y[i, 0] in yhat[i, :].topk(1)[1])
return loss, numpy.mean(acc)
def plot(checkpoint):
with open(checkpoint + '/log') as f:
lines = f.read().split('\n')
v_err = []
t_err = []
v_acc = []
t_acc = []
v_it = []
t_it = []
for line in lines:
matcher = 'TRAIN iteration: (\d+); ' + \
'loss: (\d+\.\d+); accuracy: (\d+\.\d+);'
match = re.search(matcher, line)
if match:
t_it.append(int(match.groups()[0]))
t_err.append(float(match.groups()[1]))
t_acc.append(float(match.groups()[2]))
matcher = 'VALIDATION iteration: (\d+); ' + \
'validation-loss: (\d+\.\d+); validation-acc: (\d+\.\d+);'
match = re.search(matcher, line)
if match:
v_it.append(int(match.groups()[0]))
v_err.append(float(match.groups()[1]))
v_acc.append(float(match.groups()[2]))
matplotlib.pyplot.figure()
matplotlib.pyplot.subplot(121)
matplotlib.pyplot.plot(t_it, t_err)
matplotlib.pyplot.plot(v_it, v_err, '-rx', linewidth=2, markersize=10)
matplotlib.pyplot.title('Learning curves: Classification')
matplotlib.pyplot.ylabel('Cross-Entropy Loss')
matplotlib.pyplot.xlabel('# Iteration')
matplotlib.pyplot.grid(linestyle='--')
matplotlib.pyplot.legend(['training', 'validation'])
matplotlib.pyplot.subplot(122)
matplotlib.pyplot.plot(t_it, t_acc)
matplotlib.pyplot.plot(v_it, v_acc, '-rx', linewidth=2, markersize=10)
matplotlib.pyplot.title('Learning curves: Classification')
matplotlib.pyplot.ylabel('Accuracy')
matplotlib.pyplot.xlabel('# Iteration')
matplotlib.pyplot.grid(linestyle='--')
matplotlib.pyplot.legend(['training', 'validation'])
matplotlib.pyplot.show()
def stratify(df, col, n):
vals = df[col].unique()
out = []
for i, val in enumerate(vals):
local = df[df[col] == val]
ix = numpy.random.choice(len(local), size=n, replace=True)
temp = local.iloc[ix]
out.append(temp)
return pandas.concat(out, axis=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=blurb)
parser.add_argument(
'--mode',
type=str,
default='train_annotation',
help='script call',
)
parser.add_argument(
'--checkpoint',
type=str,
default='checkpoints/classifier',
help='model checkpoint',
)
parser.add_argument(
'--n_epochs',
type=int,
default=200,
help='number of epochs trained'
)
parser.add_argument(
'--lr',
type=float,
default=0.01,
help='learning rate of SGD'
)
parser.add_argument(
'--batch_size',
type=int,
default=10,
help='batch-size for training',
)
parser.add_argument(
'--model_type',
type=str,
default='color',
help='type of model attributes to train on',
)
parser.add_argument(
'--local',
action='store_true',
help='run locally',
)
args = parser.parse_args()
if args.mode == 'train':
trainer = Trainer(
args.checkpoint,
args.model_type,
args.n_epochs,
args.lr,
args.batch_size,
args.local,
).train()
elif args.mode == 'plot':
plot(args.checkpoint)
elif args.mode == 'test':
model = torch.load(
args.checkpoint + '/model.pt',
map_location=lambda storage, loc: storage,
)
data = Data(args.model_type)
model_type = str(args.checkpoint).split('/')[-1]
iterator = Iterator(
data,
fold='test',
model_type=model_type,
local=args.local
)
choice = numpy.random.choice(len(iterator))
image = iterator[choice][0]
file_ = iterator.items.file.iloc[choice]
label = iterator.items[model_type].iloc[choice]
print(label)
prediction = model(image[None, :, :, :]).squeeze().detach().numpy()
best = iterator.lookup[numpy.argmax(prediction)]
print(best)
os.system('open ' + file_)
| [
"torch.nn.ReLU",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.LongTensor",
"torch.utils.model_zoo.load_url",
"torch.cuda.is_available",
"torch.nn.AvgPool2d",
"torch.nn.Module.__init__",
"re.search",
"torch.nn.BatchNorm2d",
"numpy.mean",
"argparse.ArgumentParser",
"torch.nn.init.k... | [((406, 501), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n', (421, 501), False, 'import torch\n'), ((12610, 12636), 'pandas.concat', 'pandas.concat', (['out'], {'axis': '(0)'}), '(out, axis=0)\n', (12623, 12636), False, 'import pandas\n'), ((12679, 12721), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'blurb'}), '(description=blurb)\n', (12702, 12721), False, 'import argparse\n'), ((794, 822), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (814, 822), False, 'import torch\n'), ((843, 870), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (856, 870), False, 'import torch\n'), ((935, 963), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (955, 963), False, 'import torch\n'), ((1583, 1643), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, bias=False)\n', (1598, 1643), False, 'import torch\n'), ((1663, 1691), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1683, 1691), False, 'import torch\n'), ((1713, 1801), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1728, 1801), False, 'import torch\n'), ((1899, 1927), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1919, 1927), False, 'import torch\n'), ((1949, 2024), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['planes', '(planes * self.expansion)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, planes * self.expansion, kernel_size=1, bias=False)\n', (1964, 2024), False, 'import torch\n'), ((2102, 2147), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (2122, 2147), False, 'import torch\n'), ((2168, 2195), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2181, 2195), False, 'import torch\n'), ((2878, 2948), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (2893, 2948), False, 'import torch\n'), ((2999, 3023), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3019, 3023), False, 'import torch\n'), ((3044, 3071), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3057, 3071), False, 'import torch\n'), ((3095, 3149), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (3113, 3149), False, 'import torch\n'), ((3450, 3481), 'torch.nn.AvgPool2d', 'torch.nn.AvgPool2d', (['(7)'], {'stride': '(1)'}), '(7, stride=1)\n', (3468, 3481), False, 'import torch\n'), ((3500, 3551), 'torch.nn.Linear', 'torch.nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (3515, 3551), False, 'import torch\n'), ((4633, 4661), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*layers'], {}), '(*layers)\n', (4652, 4661), False, 'import torch\n'), ((5413, 5509), 'torch.utils.model_zoo.load_url', 'torch.utils.model_zoo.load_url', (['"""https://download.pytorch.org/models/resnet50-19c8e357.pth"""'], {}), "(\n 'https://download.pytorch.org/models/resnet50-19c8e357.pth')\n", (5443, 5509), False, 'import torch\n'), ((5631, 5661), 'torch.nn.Module.__init__', 'torch.nn.Module.__init__', (['self'], {}), '(self)\n', (5655, 5661), False, 'import torch\n'), ((5713, 5745), 'torch.nn.Linear', 'torch.nn.Linear', (['(2048)', 'n_classes'], {}), '(2048, n_classes)\n', (5728, 5745), False, 'import torch\n'), ((7478, 7553), 'torch.LongTensor', 'torch.LongTensor', (['[self.dictionary[self.items.iloc[item][self.model_type]]]'], {}), '([self.dictionary[self.items.iloc[item][self.model_type]]])\n', (7494, 7553), False, 'import torch\n'), ((7588, 7613), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7611, 7613), False, 'import torch\n'), ((8326, 8400), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_it'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_it, batch_size=batch_size, shuffle=True)\n', (8353, 8400), False, 'import torch\n'), ((8477, 8537), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_it'], {'batch_size': 'batch_size'}), '(valid_it, batch_size=batch_size)\n', (8504, 8537), False, 'import torch\n'), ((8820, 8845), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8843, 8845), False, 'import torch\n'), ((10934, 10958), 're.search', 're.search', (['matcher', 'line'], {}), '(matcher, line)\n', (10943, 10958), False, 'import re\n'), ((11270, 11294), 're.search', 're.search', (['matcher', 'line'], {}), '(matcher, line)\n', (11279, 11294), False, 'import re\n'), ((6172, 6184), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6181, 6184), False, 'import json\n'), ((10583, 10598), 'numpy.mean', 'numpy.mean', (['acc'], {}), '(acc)\n', (10593, 10598), False, 'import numpy\n'), ((3649, 3725), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (3678, 3725), False, 'import torch\n'), ((4180, 4282), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1,\n stride=stride, bias=False)\n', (4195, 4282), False, 'import torch\n'), ((4322, 4368), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (4342, 4368), False, 'import torch\n'), ((9390, 9438), 'torch.save', 'torch.save', (['self.model', "(self.save_ + '/model.pt')"], {}), "(self.model, self.save_ + '/model.pt')\n", (9400, 9438), False, 'import torch\n'), ((14026, 14114), 'torch.load', 'torch.load', (["(args.checkpoint + '/model.pt')"], {'map_location': '(lambda storage, loc: storage)'}), "(args.checkpoint + '/model.pt', map_location=lambda storage, loc:\n storage)\n", (14036, 14114), False, 'import torch\n'), ((14771, 14797), 'os.system', 'os.system', (["('open ' + file_)"], {}), "('open ' + file_)\n", (14780, 14797), False, 'import os\n'), ((3874, 3910), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3897, 3910), False, 'import torch\n'), ((3927, 3961), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3950, 3961), False, 'import torch\n'), ((14715, 14739), 'numpy.argmax', 'numpy.argmax', (['prediction'], {}), '(prediction)\n', (14727, 14739), False, 'import numpy\n')] |
import os
import sys
import pickle
from tqdm import tqdm
import numpy as np
import cv2
from fsgan.utils.bbox_utils import scale_bbox, crop_img
from fsgan.utils.video_utils import Sequence
def main(input_path, output_dir=None, cache_path=None, seq_postfix='_dsfd_seq.pkl', resolution=256, crop_scale=2.0,
select='all', disable_tqdm=False, encoder_codec='avc1'):
cache_path = os.path.splitext(input_path)[0] + seq_postfix if cache_path is None else cache_path
if output_dir is None:
output_dir = os.path.splitext(input_path)[0]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Verification
if not os.path.isfile(input_path):
raise RuntimeError('Input video does not exist: ' + input_path)
if not os.path.isfile(cache_path):
raise RuntimeError('Cache file does not exist: ' + cache_path)
if not os.path.isdir(output_dir):
raise RuntimeError('Output directory does not exist: ' + output_dir)
print('=> Cropping video sequences from video: "%s"...' % os.path.basename(input_path))
# Load sequences from file
with open(cache_path, "rb") as fp: # Unpickling
seq_list = pickle.load(fp)
# Select sequences
if select == 'longest':
selected_seq_index = np.argmax([len(s) for s in seq_list])
seq = seq_list[selected_seq_index]
seq.id = 0
seq_list = [seq]
# Open input video file
cap = cv2.VideoCapture(input_path)
if not cap.isOpened():
raise RuntimeError('Failed to read video: ' + input_path)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
input_vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
input_vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# For each sequence initialize output video file
out_vids = []
fourcc = cv2.VideoWriter_fourcc(*encoder_codec)
for seq in seq_list:
curr_vid_name = os.path.splitext(os.path.basename(input_path))[0] + '_seq%02d.mp4' % seq.id
curr_vid_path = os.path.join(output_dir, curr_vid_name)
out_vids.append(cv2.VideoWriter(curr_vid_path, fourcc, fps, (resolution, resolution)))
# For each frame in the target video
cropped_detections = [[] for seq in seq_list]
cropped_landmarks = [[] for seq in seq_list]
pbar = range(total_frames) if disable_tqdm else tqdm(range(total_frames), file=sys.stdout)
for i in pbar:
ret, frame = cap.read()
if frame is None:
continue
# For each sequence
for s, seq in enumerate(seq_list):
if i < seq.start_index or (seq.start_index + len(seq) - 1) < i:
continue
det = seq[i - seq.start_index]
# Crop frame
bbox = np.concatenate((det[:2], det[2:] - det[:2]))
bbox = scale_bbox(bbox, crop_scale)
frame_cropped = crop_img(frame, bbox)
frame_cropped = cv2.resize(frame_cropped, (resolution, resolution), interpolation=cv2.INTER_CUBIC)
# Write cropped frame to output video
out_vids[s].write(frame_cropped)
# Add cropped detection to list
orig_size = bbox[2:]
axes_scale = np.array([resolution, resolution]) / orig_size
det[:2] -= bbox[:2]
det[2:] -= bbox[:2]
det[:2] *= axes_scale
det[2:] *= axes_scale
cropped_detections[s].append(det)
# Add cropped landmarks to list
if hasattr(seq, 'landmarks'):
curr_landmarks = seq.landmarks[i - seq.start_index]
curr_landmarks[:, :2] -= bbox[:2]
# 3D landmarks case
if curr_landmarks.shape[1] == 3:
axes_scale = np.append(axes_scale, axes_scale.mean())
curr_landmarks *= axes_scale
cropped_landmarks[s].append(curr_landmarks)
# For each sequence write cropped sequence to file
for s, seq in enumerate(seq_list):
# seq.detections = np.array(cropped_detections[s])
# if hasattr(seq, 'landmarks'):
# seq.landmarks = np.array(cropped_landmarks[s])
# seq.start_index = 0
# TODO: this is a hack to change class type (remove this later)
out_seq = Sequence(0)
out_seq.detections = np.array(cropped_detections[s])
if hasattr(seq, 'landmarks'):
out_seq.landmarks = np.array(cropped_landmarks[s])
out_seq.id, out_seq.obj_id, out_seq.size_avg = seq.id, seq.obj_id, seq.size_avg
# Write to file
curr_out_name = os.path.splitext(os.path.basename(input_path))[0] + '_seq%02d%s' % (out_seq.id, seq_postfix)
curr_out_path = os.path.join(output_dir, curr_out_name)
with open(curr_out_path, "wb") as fp: # Pickling
pickle.dump([out_seq], fp)
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('crop_video_sequences')
parser.add_argument('input', metavar='VIDEO',
help='path to input video')
parser.add_argument('-o', '--output', metavar='DIR',
help='output directory')
parser.add_argument('-c', '--cache', metavar='PATH',
help='path to sequence cache file')
parser.add_argument('-sp', '--seq_postfix', default='_dsfd_seq.pkl', metavar='POSTFIX',
help='input sequence file postfix')
parser.add_argument('-r', '--resolution', default=256, type=int, metavar='N',
help='output video resolution (default: 256)')
parser.add_argument('-cs', '--crop_scale', default=2.0, type=float, metavar='F',
help='crop scale relative to bounding box (default: 2.0)')
parser.add_argument('-s', '--select', default='all', metavar='STR',
help='selection method [all|longest]')
parser.add_argument('-dt', '--disable_tqdm', dest='disable_tqdm', action='store_true',
help='if specified disables tqdm progress bar')
parser.add_argument('-ec', '--encoder_codec', default='avc1', metavar='STR',
help='encoder codec code')
args = parser.parse_args()
main(args.input, args.output, args.cache, args.seq_postfix, args.resolution, args.crop_scale, args.select,
args.disable_tqdm, args.encoder_codec)
| [
"pickle.dump",
"cv2.resize",
"argparse.ArgumentParser",
"fsgan.utils.bbox_utils.crop_img",
"pickle.load",
"os.path.join",
"fsgan.utils.video_utils.Sequence",
"os.path.splitext",
"os.path.isfile",
"numpy.array",
"cv2.VideoWriter",
"os.path.isdir",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc... | [((1442, 1470), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_path'], {}), '(input_path)\n', (1458, 1470), False, 'import cv2\n'), ((1867, 1905), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*encoder_codec'], {}), '(*encoder_codec)\n', (1889, 1905), False, 'import cv2\n'), ((4967, 5014), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""crop_video_sequences"""'], {}), "('crop_video_sequences')\n", (4990, 5014), False, 'import argparse\n'), ((659, 685), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (673, 685), False, 'import os\n'), ((770, 796), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (784, 796), False, 'import os\n'), ((880, 905), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (893, 905), False, 'import os\n'), ((1181, 1196), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1192, 1196), False, 'import pickle\n'), ((2055, 2094), 'os.path.join', 'os.path.join', (['output_dir', 'curr_vid_name'], {}), '(output_dir, curr_vid_name)\n', (2067, 2094), False, 'import os\n'), ((4310, 4321), 'fsgan.utils.video_utils.Sequence', 'Sequence', (['(0)'], {}), '(0)\n', (4318, 4321), False, 'from fsgan.utils.video_utils import Sequence\n'), ((4351, 4382), 'numpy.array', 'np.array', (['cropped_detections[s]'], {}), '(cropped_detections[s])\n', (4359, 4382), True, 'import numpy as np\n'), ((4738, 4777), 'os.path.join', 'os.path.join', (['output_dir', 'curr_out_name'], {}), '(output_dir, curr_out_name)\n', (4750, 4777), False, 'import os\n'), ((521, 549), 'os.path.splitext', 'os.path.splitext', (['input_path'], {}), '(input_path)\n', (537, 549), False, 'import os\n'), ((568, 593), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (581, 593), False, 'import os\n'), ((607, 627), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (615, 627), False, 'import os\n'), ((1047, 1075), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (1063, 1075), False, 'import os\n'), ((2119, 2188), 'cv2.VideoWriter', 'cv2.VideoWriter', (['curr_vid_path', 'fourcc', 'fps', '(resolution, resolution)'], {}), '(curr_vid_path, fourcc, fps, (resolution, resolution))\n', (2134, 2188), False, 'import cv2\n'), ((2785, 2829), 'numpy.concatenate', 'np.concatenate', (['(det[:2], det[2:] - det[:2])'], {}), '((det[:2], det[2:] - det[:2]))\n', (2799, 2829), True, 'import numpy as np\n'), ((2849, 2877), 'fsgan.utils.bbox_utils.scale_bbox', 'scale_bbox', (['bbox', 'crop_scale'], {}), '(bbox, crop_scale)\n', (2859, 2877), False, 'from fsgan.utils.bbox_utils import scale_bbox, crop_img\n'), ((2906, 2927), 'fsgan.utils.bbox_utils.crop_img', 'crop_img', (['frame', 'bbox'], {}), '(frame, bbox)\n', (2914, 2927), False, 'from fsgan.utils.bbox_utils import scale_bbox, crop_img\n'), ((2956, 3043), 'cv2.resize', 'cv2.resize', (['frame_cropped', '(resolution, resolution)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(frame_cropped, (resolution, resolution), interpolation=cv2.\n INTER_CUBIC)\n', (2966, 3043), False, 'import cv2\n'), ((4453, 4483), 'numpy.array', 'np.array', (['cropped_landmarks[s]'], {}), '(cropped_landmarks[s])\n', (4461, 4483), True, 'import numpy as np\n'), ((4848, 4874), 'pickle.dump', 'pickle.dump', (['[out_seq]', 'fp'], {}), '([out_seq], fp)\n', (4859, 4874), False, 'import pickle\n'), ((389, 417), 'os.path.splitext', 'os.path.splitext', (['input_path'], {}), '(input_path)\n', (405, 417), False, 'import os\n'), ((3238, 3272), 'numpy.array', 'np.array', (['[resolution, resolution]'], {}), '([resolution, resolution])\n', (3246, 3272), True, 'import numpy as np\n'), ((1972, 2000), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (1988, 2000), False, 'import os\n'), ((4638, 4666), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (4654, 4666), False, 'import os\n')] |
#!/usr/bin/env python
###############################################################
# #
# D I S P E R S I O N . P Y #
# #
###############################################################
'''
ALTERNATIVE CODE FOR PLOTTING BANDSTRUCTURES
FROM A CASTEP .BANDS FILE
'''
# Let us import all the stuff we need, shouldnt require any specialist packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from fractions import Fraction
import sys
import os
from itertools import cycle
import argparse
import ase.io as io
import ase.dft.bz as bz
import warnings
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
# Define some constants
hartree = 27.211386245988
fracs=np.array([0.5,0.0,0.25,0.75,0.33333333,0.66666667])
# pdos reader
def pdos_read(seed,species):
from scipy.io import FortranFile as FF
f=FF(seed+'.pdos_bin', 'r','>u4')
version=f.read_reals('>f8')
header=f.read_record('a80')[0]
num_kpoints=f.read_ints('>u4')[0]
num_spins=f.read_ints('>u4')[0]
num_popn_orb=f.read_ints('>u4')[0]
max_eigenvalues=f.read_ints('>u4')[0]
orbital_species=f.read_ints('>u4')
orbital_ion=f.read_ints('>u4')
orbital_l=f.read_ints('>u4')
print(orbital_species,orbital_ion,orbital_l)
kpoints=np.zeros((num_kpoints,3))
pdos_weights=np.zeros((num_popn_orb,max_eigenvalues,num_kpoints,num_spins))
for nk in range(0,num_kpoints):
record=f.read_record('>i4','>3f8')
kpt_index,kpoints[nk,:]=record
for ns in range(0,num_spins):
spin_index=f.read_ints('>u4')[0]
num_eigenvalues=f.read_ints('>u4')[0]
for nb in range(0,num_eigenvalues):
pdos_weights[0:num_popn_orb,nb,nk,ns]=f.read_reals('>f8')
#norm=np.sqrt(np.sum((pdos_weights[0:num_popn_orb,nb,nk,ns])**2))
norm=np.sum((pdos_weights[0:num_popn_orb,nb,nk,ns]))
pdos_weights[0:num_popn_orb,nb,nk,ns]=pdos_weights[0:num_popn_orb,nb,nk,ns]/norm
if species:
num_species=len(np.unique(orbital_species))
pdos_weights_sum=np.zeros((num_species,max_eigenvalues,num_kpoints,num_spins))
for i in range(0,num_species):
loc=np.where(orbital_species==i+1)[0]
pdos_weights_sum[i,:,:,:]=np.sum(pdos_weights[loc,:,:,:],axis=0)
else:
num_orbitals=4
pdos_weights_sum=np.zeros((num_orbitals,max_eigenvalues,num_kpoints,num_spins))
pdos_colours=np.zeros((3,max_eigenvalues,num_kpoints,num_spins))
r=np.array([1,0,0])
g=np.array([0,1,0])
b=np.array([0,0,1])
k=np.array([0,0,0])
for i in range(0,num_orbitals):
loc=np.where(orbital_l==i)[0]
if len(loc)>0:
pdos_weights_sum[i,:,:,:]=np.sum(pdos_weights[loc,:,:,:],axis=0)
#print(kpoints[1])
#for nb in range(num_eigenvalues):
# print(pdos_weights_sum[:,nb,1,0])
pdos_weights_sum=np.where(pdos_weights_sum>1,1,pdos_weights_sum)
pdos_weights_sum=np.where(pdos_weights_sum<0,0,pdos_weights_sum)
return np.round(pdos_weights_sum,7)
def cart_to_abc(lattice):
a=np.sqrt( lattice[0,0]**2+lattice[0,1]**2+lattice[0,2]**2)
b=np.sqrt( lattice[1,0]**2+lattice[1,1]**2+lattice[1,2]**2)
c=np.sqrt( lattice[2,0]**2+lattice[2,1]**2+lattice[2,2]**2)
alpha=( lattice[1,0]* lattice[2,0]+lattice[1,1]* lattice[2,1]+lattice[1,2]* lattice[2,2])/(b*c)
alpha=np.arccos(alpha)
beta =( lattice[2,0]* lattice[0,0]+lattice[2,1]* lattice[0,1]+lattice[2,2]* lattice[0,2])/(c*a)
beta =np.arccos(beta)
gamma=( lattice[0,0]* lattice[1,0]+lattice[0,1]* lattice[1,1]+ lattice[0,2]*lattice[1,2])/(a*b)
gamma=np.arccos(gamma)
return a,b,c,alpha,beta,gamma
def calc_phonons(buff_seed):
no_ions = 0
no_kpoints = 0
no_branches = 0
no_electrons = 0
unit = 0
# Open the phonon file
phonon_file=buff_seed+".phonon"
phonon=open(phonon_file,'r')
lines=phonon.readlines()
no_ions=int(lines[1].split()[-1])
no_branches=int(lines[2].split()[-1])
no_kpoints=int(lines[3].split()[-1])
lattice=np.zeros((3,3))
lattice[0]=[i for i in lines[8].split()]
lattice[1]=[i for i in lines[9].split()]
lattice[2]=[i for i in lines[10].split()]
#make the arrays
energy_array=np.empty(shape=(no_kpoints,no_branches))
kpoint_array=np.empty(shape=(no_kpoints)) # the array holding the number of the kpoint
kpoint_list=[] # array of the kpoint vectors
kpoint_string=lines[15::no_branches+3+no_ions*no_branches]
for i in range(len(kpoint_string)):
kpoint_array[i]=int(kpoint_string[i].split()[1])
#Empty list for vectors
vec=[]
vec.append(float(kpoint_string[i].split()[2]))
vec.append(float(kpoint_string[i].split()[3]))
vec.append(float(kpoint_string[i].split()[4]))
kpoint_list.append(vec)
# print(vec)
#Lets get the eigen values into the big array
for k in range(0,no_kpoints):
ind=16 + (k) * (3+no_branches+no_ions*no_branches)
energy_array[k,:]=np.array([float(i.split()[-1]) for i in lines[ind:ind+no_branches]])
sort_array=kpoint_array.argsort()
kpoint_list=np.array(kpoint_list)[sort_array]
return energy_array,sort_array,kpoint_list,kpoint_array,no_kpoints,no_ions,lattice
# Variables we need from the bands file
def calc_bands(buff_seed,zero,show):
no_spins = 0
no_kpoints = 0
fermi_energy = 0
no_electrons = 0
no_electrons_2 = 0
no_eigen = 0
no_eigen_2 = 0
# Open the bands file
bands_file=buff_seed+".bands"
bands=open(bands_file,'r')
lines=bands.readlines()
no_spins=int(lines[1].split()[-1])
no_kpoints=int(lines[0].split()[-1])
fermi_energy=float(lines[4].split()[-1])
if no_spins==1:
fermi_energy=float(lines[4].split()[-1])
no_electrons =float(lines[2].split()[-1])
no_eigen = int(lines[3].split()[-1])
if no_spins==2:
spin_polarised=True
no_electrons=float(lines[2].split()[-2])
no_electrons_2=float(lines[2].split()[-1])
no_eigen = int(lines[3].split()[-2])
no_eigen_2=int(lines[3].split()[-1])
lattice=np.zeros((3,3))
lattice[0]=[i for i in lines[6].split()]
lattice[1]=[i for i in lines[7].split()]
lattice[2]=[i for i in lines[8].split()]
lattice=lattice/1.889
#make the arrays
energy_array=np.empty(shape=(no_kpoints,no_eigen))
energy_array_2=np.empty(shape=(no_kpoints,no_eigen_2))
kpoint_array=np.empty(shape=(no_kpoints)) # the array holding the number of the kpoint
kpoint_list=[] # array of the kpoint vectors
if no_spins==1:
kpoint_string=lines[9::no_eigen+2]
else:
kpoint_string=lines[9::no_eigen+3+no_eigen_2]
#loop through the kpoints to split it
for i in range(len(kpoint_string)):
kpoint_array[i]=int(kpoint_string[i].split()[1])
#Empty list for vectors
vec=[]
vec.append(float(kpoint_string[i].split()[2]))
vec.append(float(kpoint_string[i].split()[3]))
vec.append(float(kpoint_string[i].split()[4]))
kpoint_list.append(vec)
# print(vec)
#Lets get the eigen values into the big array
for k in range(0,no_kpoints):
if no_spins==1:
ind=9+k*no_eigen+2*(k+1)
if not zero:
energy_array[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind:ind+no_eigen]])
else:
energy_array[k,:]=hartree*np.array([float(i) for i in lines[ind:ind+no_eigen]])
if no_spins==2:
ind=9+k*(no_eigen+no_eigen_2+1)+2*(k+1)
if not zero:
energy_array[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind:ind+no_eigen]])
energy_array_2[k,:]=hartree*np.array([float(i)-fermi_energy for i in lines[ind+no_eigen+1:ind+no_eigen+1+no_eigen_2]])
else:
energy_array[k,:]=hartree*np.array([float(i) for i in lines[ind:ind+no_eigen]])
energy_array_2[k,:]=hartree*np.array([float(i) for i in lines[ind+no_eigen+1:ind+no_eigen+1+no_eigen_2]])
sort_array=kpoint_array.argsort()
kpoint_list=np.array(kpoint_list)[sort_array]
return energy_array,energy_array_2,sort_array,kpoint_list,kpoint_array,no_spins,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice
def check_sym(vec):
frac=[]
for i in vec:
#frac.append(i.as_integer_ratio()[0])
#frac.append(i.as_integer_ratio()[1])
buff=[]
for j in fracs:
buff.append(np.isclose(i,j))
frac.append(any(buff))
if all(frac):
#print(vec)
return True
else:
return False
def main_dispersion():
warnings.filterwarnings("ignore")
#matplotlib.rcParams['mathtext.fontset'] = 'stix'
#matplotlib.rcParams['font.family'] = 'STIXGeneral'
#matplotlib.pyplot.title(r'ABC123 vs $\mathrm{ABC123}^{123}$')
#matplotlib.use('macOsX')
matplotlib.rc('text', usetex = True)
plt.style.use("classic")
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
#Do the parser
parser = argparse.ArgumentParser(description= "Utillity for plotting bandstructurs from a CASTEP run.")
parser.add_argument("seed",help="The seed from the CASTEP calculation.")
parser.add_argument("--save",action="store_true",help="Save DOS as .pdf with name <seed>-dos.pdf.")
parser.add_argument("-m","--multi",action="store_true",help="Set lines multicoloured.")
parser.add_argument("-l","--line",help="Set linewidth.",default=0.75)
parser.add_argument("--lim",help="Provide plotting limits around the Fermi energy.",nargs=2,default=[None,None])
parser.add_argument("-s","--spin",help="Plot spin-up and spin-down channels.",action="store_true")
parser.add_argument("-d","--debug",action='store_true',help="Debug flag.")
#parser.add_argument("--sym",help="Provide crystal symmetry for plot labels.",default=None)
parser.add_argument("--overlay",help="Seedname of second bands file containing a different bandstructure.",default=None)
parser.add_argument("--n_up",help="Indices of up bands to be highlighted",nargs="+")
parser.add_argument("--n_down",help="Indices of down bands to be highlighted",nargs="+")
parser.add_argument("-f","--flip",action="store_true",help="Plot with a global spin flip")
parser.add_argument("--fontsize",help="Font size",default=20)
parser.add_argument("--title",help="Add a title for saving")
parser.add_argument("--fig",help="add figure caption")
parser.add_argument("-e","--exe",help="File extension for saving",default="png")
parser.add_argument("--dos",help="Prodide some data files for DOS plots adjoining bandstructure",nargs="+")
parser.add_argument("--path",help="Compute a suitable band path for the cell and exit.",nargs="*")
parser.add_argument("--pdos",help="Use .pdos_bin file to project orbital information",action='store_true')
parser.add_argument("--species",help="Project pdos onto species rather than orbitals",action='store_true')
parser.add_argument("--phonon",help="Plot phonon dispersion curve",action='store_true')
parser.add_argument("-b","--bandgap",help="Indicate bandgap on plots",action="store_true")
parser.add_argument("--no_plot",help="Supress plotting of dispersions",action="store_true")
parser.add_argument("--overlay_labels",help="Legend labels for overlay plots",nargs=2,default=[None,None])
parser.add_argument("-E","--optados",help="Use castep fermi energy if optados error persists",action='store_true')
parser.add_argument("-as",'--aspect_ratio',help="Specify the aspect ratio of the dispersion plot.",choices=['letter','square'],default='square')
parser.add_argument('-z','--zero',help='Do not shift the Fermi level to 0 eV.',action='store_true')
parser.add_argument('--show',help='Supress plotting of spin bands',choices=['up','down','both'],default='both')
args = parser.parse_args()
seed = args.seed
save = args.save
multi= args.multi
linewidth=np.float(args.line)
lim= args.lim
debug=args.debug
spin_split=args.spin
#sym=args.sym
SOC=args.overlay
spin_polarised=False
n_up=args.n_up
n_down=args.n_down
flip=args.flip
text=float(args.fontsize)
title=args.title
fig_cap=args.fig
exe=args.exe
dos_files=args.dos
path=args.path
pdos=args.pdos
species=args.species
do_phonons=args.phonon
bg=args.bandgap
no_plot=args.no_plot
overlay_labels=args.overlay_labels
opt_err=args.optados
aspect=args.aspect_ratio
zero=args.zero
show=args.show
blockPrint()
def path_finder():
# Open the cell
path_str=bv_latt.special_path
path_points=[]
path_labels=[]
for L in path_str:
if L==",":
break
path_labels.append(L)
path_points.append(special_points[L])
print("%BLOCK SPECTRAL_KPOINT_PATH")
for i in range(len(path_labels)):
print("%.5f %.5f %.5f" %(path_points[i][0],path_points[i][1],path_points[i][2]),"#",path_labels[i])
print("%ENDBLOCK SPECTRAL_KPOINT_PATH")
# Dothe path and labels
cell=io.read(seed+".cell")
bv_latt=cell.cell.get_bravais_lattice()
special_points=bv_latt.get_special_points()
atoms=np.unique(cell.get_chemical_symbols())[::-1]
enablePrint()
if path==[]:
path_finder()
sys.exit()
else:
if path!=None:
path_points=[]
path_labels=[]
for i in path:
try:
path_point=special_points[i]
path_points.append(path_point)
path_labels.append(i)
except:
print()
print("Error: %s has no symmetry point %s"%(bv_latt.name,i))
sys.exit()
path_points.append(path_point)
path_labels.append(i)
print("%BLOCK SPECTRAL_KPOINT_PATH")
for j in range(len(path_labels)):
print("%.5f %.5f %.5f" %(path_points[j][0],path_points[j][1],path_points[j][2]),"#",path_labels[j])
print("%ENDBLOCK SPECTRAL_KPOINT_PATH")
sys.exit()
if n_up!=None:
n_up=np.array(n_up,dtype=int)-1
spin_split=False
else:
n_up=[]
if n_down!=None:
n_down=np.array(n_down,dtype=int)-1
spin_split=False
else:
n_down=[]
if SOC != None:
doSOC=True
else :
doSOC=False
if dos_files!=None:
do_dos=True
else:
do_dos=False
bands_file=True
if multi and spin_split:
multi=False
#if doSOC:
# multi=False
# spin_split=False
#set the colours
if spin_split:
spin_up="r"
spin_do="b"
elif flip:
spin_up="b"
spin_do="r"
else :
spin_up="black"
spin_do="black"
#calculate the pdos if needed
if pdos:
pdos_weights=pdos_read(seed,species)
if doSOC:
energy_array_soc,energy_array_soc2,sort_array_soc,kpoint_list_soc,kpoint_array_soc,no_spins_soc,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice2=calc_bands(SOC,zero,show)
if not do_phonons:
energy_array,energy_array_2,sort_array,kpoint_list,kpoint_array,no_spins,no_kpoints,fermi_energy,no_electrons,no_electrons_2,no_eigen,no_eigen_2,lattice=calc_bands(seed,zero,show)
if energy_array_2.shape[1]!=0:
vb_max_up=np.max(energy_array[:,int(no_electrons)-1])
vb_max_down=np.max(energy_array_2[:,int(no_electrons_2)-1])
cb_min_up=np.min(energy_array[:,int(no_electrons)])
cb_min_down=np.min(energy_array_2[:,int(no_electrons_2)])
band_gap_up=cb_min_up-vb_max_up
band_gap_down=cb_min_down-vb_max_down
print("Band gap (up) : %6.3f eV"%band_gap_up)
print("Band gap (down) : %6.3f eV"%band_gap_down)
vb_max_ind_up=np.where(energy_array[sort_array][:,int(no_electrons)-1]==vb_max_up)[0][-1]
vb_max_ind_down=np.where(energy_array_2[sort_array][:,int(no_electrons_2)-1]==vb_max_down)[0][-1]
cb_min_ind_up=np.where(energy_array[sort_array][:,int(no_electrons)]==cb_min_up)[0][-1]
cb_min_ind_down=np.where(energy_array_2[sort_array][:,int(no_electrons_2)]==cb_min_down)[0][-1]
k_max_loc_up=kpoint_array[sort_array][vb_max_ind_up]
k_max_loc_down=kpoint_array[sort_array][vb_max_ind_down]
k_min_loc_up=kpoint_array[sort_array][cb_min_ind_up]
k_min_loc_down=kpoint_array[sort_array][cb_min_ind_down]
else:
vb_max=np.max(energy_array[:,int(no_electrons/2)-1])
cb_min=np.min(energy_array[:,int(no_electrons/2)])
band_gap=cb_min-vb_max
print("Band gap : %6.3f eV"%band_gap)
vb_max_ind=np.where(energy_array[sort_array][:,int(no_electrons/2)-1]==vb_max)[0][-1]
cb_min_ind=np.where(energy_array[sort_array][:,int(no_electrons/2)]==cb_min)[0][-1]
k_max_loc=kpoint_array[sort_array][vb_max_ind]
k_min_loc=kpoint_array[sort_array][cb_min_ind]
else:
energy_array,sort_array,kpoint_list,kpoint_array,no_kpoints,no_ions,lattice=calc_phonons(seed)
a,b,c,alpha,beta,gamma=cart_to_abc(lattice)
a1,a2,a3=lattice[0],lattice[1],lattice[2]
b1=2*np.pi*np.cross(a2,a3)/(np.dot(a1,np.cross(a2,a3)))
b2=2*np.pi*np.cross(a3,a1)/(np.dot(a1,np.cross(a2,a3)))
b3=2*np.pi*np.cross(a1,a2)/(np.dot(a1,np.cross(a2,a3)))
kalpha=np.arccos(np.dot(a2,a3)/(np.linalg.norm(a2)*np.linalg.norm(a3)))
kbeta=np.arccos(np.dot(a1,a3)/(np.linalg.norm(a1)*np.linalg.norm(a3)))
kgamma=np.arccos(np.dot(a2,a1)/(np.linalg.norm(a2)*np.linalg.norm(a1)))
#matplotlib.rc('text', usetex = True)
# Here we do the analysis of the kpoints and the symmetry.. It's going to be horific!
#define all the greek letters we will use for weird ones
if no_plot:
sys.exit()
k_ticks=[]
for i,vec in enumerate(kpoint_list):
if check_sym(vec):
k_ticks.append(kpoint_array[i])
tol=1e-5
tol=[tol,tol,tol]
kpoint_grad=[]
for i in range(1,len(kpoint_list)):
diff=kpoint_list[i]-kpoint_list[i-1]
kpoint_grad.append(diff)
kpoint_2grad=[]
high_sym=[0]
for i in range(1,len(kpoint_grad)):
diff=kpoint_grad[i]-kpoint_grad[i-1]
kpoint_2grad.append(diff)
#print(diff)
if any(np.abs(diff)>tol):
# print(diff)
high_sym.append(i)
high_sym.append(len(kpoint_list)-1)
high_sym=np.array(high_sym)+1
##################### SOC ###################
if doSOC:
k_ticks_soc=[]
for i,vec in enumerate(kpoint_list_soc):
if check_sym(vec):
k_ticks_soc.append(kpoint_array_soc[i])
tol=1e-5
tol=[tol,tol,tol]
kpoint_grad_soc=[]
for i in range(1,len(kpoint_list_soc)):
diff=kpoint_list_soc[i]-kpoint_list_soc[i-1]
kpoint_grad_soc.append(diff)
kpoint_2grad_soc=[]
high_sym_soc=[0]
for i in range(1,len(kpoint_grad_soc)):
diff=kpoint_grad_soc[i]-kpoint_grad_soc[i-1]
kpoint_2grad_soc.append(diff)
#print(diff)
if any(np.abs(diff)>tol):
# print(diff)
high_sym_soc.append(i)
high_sym_soc.append(len(kpoint_list_soc)-1)
high_sym_soc=np.array(high_sym_soc)+1
#############################################
if len(high_sym)!=len(high_sym_soc):
print("Second Bandsstructure Does not match")
sys.exit()
for i in range(1,len(high_sym)):
high_up=int(high_sym[i])
high_low=int(high_sym[i-1])
soc_up=int(high_sym_soc[i])
soc_low=int(high_sym_soc[i-1])
nsoc=len(kpoint_array_soc[soc_low:soc_up])+1
nhigh=len(kpoint_array[high_low:high_up])+1
kpoint_array_soc[soc_low-1:soc_up]=np.linspace(high_low,high_up,nsoc,endpoint=True)
# Set up the plotting environment
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif',weight='bold')
#Do the fonts
#matplotlib.rcParams['font.sans-serif'] = "Times New Roman"#Comic Sans MS"
# Then, "ALWAYS use sans-serif fonts"
#matplotlib.rcParams['font.family'] = "sans-serif"
if not do_dos:
if aspect=='square':
aspect_r=(7,7)
else:
aspect_r=(9,7)
fig, ax = plt.subplots(figsize=aspect_r)
else:
from matplotlib.ticker import MaxNLocator
fig, (ax, ax2) = plt.subplots(1, 2,sharey=True, gridspec_kw={'hspace': 0,'wspace': 0,'width_ratios': [2.4, 1]},figsize=(11,7))
for file in dos_files:
pdos_dat=np.loadtxt(file)
shape=pdos_dat.shape[1]
if opt_err:
energy = pdos_dat[:,0]-fermi_energy*hartree
else:
energy = pdos_dat[:,0]
if lim[0]!= None:
if not zero:
mask = (energy >= float(lim[0])) & (energy <= float(lim[1]))
else:
mask = (energy >= float(lim[0])+fermi_energy*hartree) & (energy <= float(lim[1])+fermi_energy*hartree)
else:
if not zero:
ax2.set_ylim(lim[0],lim[1])
else:
ax2.set_ylim(lim[0]+fermi_energy*hartree,lim[1]+fermi_energy*hartree)
mask=[True]*len(energy)
[mask]
if shape==3:
ax2.plot(pdos_dat[:,1][mask],energy[mask],linewidth=linewidth,color="black")
if shape==5:
ax2.plot(2*(pdos_dat[:,1][mask]-pdos_dat[:,2][mask]),energy[mask],linewidth=linewidth,color="black")
if not zero:
ax2.axhline(0,color="0.6",dashes=[8, 8],linewidth=1,)
else:
ax2.axhline(fermi_energy*hartree,color="0.6",dashes=[8, 8],linewidth=1,)
ax2.tick_params(axis='both', which='major', labelsize=text,length=7)
ax2.set_xlabel(r"$\mathit{g}(\mathit{E}$) (states/eV)",fontsize=text)
ax2.xaxis.set_major_locator(MaxNLocator(4))
dos_ticks=ax2.get_xticks()
dos_ticks=np.delete(dos_ticks,0)
ax2.set_xticks(dos_ticks)
for vline in high_sym:
ax.axvline(vline,color="black",linewidth=1)
ax.set_xticks(high_sym)
if not zero:
ax.axhline(0,color="0.6",dashes=[8, 8],linewidth=1,)
else:
ax.axhline(fermi_energy*hartree,color="0.6",dashes=[8, 8],linewidth=1,)
if not do_phonons:
if not zero:
ax.set_ylabel(r'$\mathit{E}$-$\mathit{E}_{\mathrm{F}}$ (eV)',fontsize=text)
else:
ax.set_ylabel(r'$\mathit{E}$ (eV)',fontsize=text)
else:
ax.set_ylabel(r'$\omega$ (cm$^{-1}$)',fontsize=text)
ax.set_xlim(1,no_kpoints)
ax.tick_params(axis='both', which='major', labelsize=text,length=7)
if lim[0]!= None:
if not zero:
ax.set_ylim(float(lim[0]),float(lim[1]))
else:
ax.set_ylim(float(lim[0])+fermi_energy*hartree,float(lim[1])+fermi_energy*hartree)
#set the x labels
ticks= []
tol=1e-4
'''
if sym==None:
for vec in kpoint_list[high_sym-1]:
ticks.append("("+str(Fraction(vec[0]).limit_denominator())+","+str(Fraction(vec[1]).limit_denominator())+","+str(Fraction(vec[2]).limit_denominator())+")")
ax.set_xticklabels(ticks)
for tick in ax.get_xticklabels():
tick.set_rotation(-30)'''
ticks=[""]*len(high_sym)
found=False
for k_count,k in enumerate(kpoint_list[high_sym-1]):
found=False
for i in special_points:#sym_dict[sym]:
#if abs(sym_dict[sym][i][0]-k[0])<tol and abs(sym_dict[sym][i][1]-k[1])<tol and abs(sym_dict[sym][i][2]-k[2])<tol:
if abs(special_points[i][0]-k[0])<tol and abs(special_points[i][1]-k[1])<tol and abs(special_points[i][2]-k[2])<tol:
if i=="G":
ticks[k_count]="$\Gamma$"
else:
ticks[k_count]=i
found=True
#if not found:
# ticks.append("")
ax.set_xticklabels(ticks)
#plt.gcf().subplots_adjust(bottom=0.2)
n_colors=cycle(['blue','red','green','black','purple','orange','yellow','cyan'])
if bg:
if energy_array_2.shape[1]!=0:
ax.plot([k_max_loc_up,k_max_loc_up],[vb_max_up,cb_min_up],color='r',linewidth=linewidth*2)
ax.plot([k_max_loc_down,k_max_loc_down],[vb_max_down,cb_min_down],color='b',linewidth=linewidth*2)
ax.plot([k_max_loc_up,k_min_loc_up],[cb_min_up,cb_min_up],color='r',linewidth=linewidth*2)
ax.plot([k_max_loc_down,k_min_loc_down],[cb_min_down,cb_min_down],color='b',linewidth=linewidth*2)
ax.text(k_max_loc_up*1.05,vb_max_up+(-vb_max_up+cb_min_up)*0.8/2,"%4.2f eV"%band_gap_up,fontsize=text)
ax.text(k_max_loc_down*1.05,vb_max_down+(-vb_max_down+cb_min_down)*0.8/2,"%4.2f eV"%band_gap_down,fontsize=text)
else:
#ax.scatter(k_min_loc,cb_min)
#ax.scatter(k_max_loc,vb_max)
ax.plot([k_max_loc,k_max_loc],[vb_max,cb_min],color='k',linewidth=linewidth*2)
ax.plot([k_max_loc,k_min_loc],[cb_min,cb_min],color='k',linewidth=linewidth*2)
ax.text(k_max_loc*1.05,vb_max+(-vb_max+cb_min)*0.8/2,"%4.2f eV"%band_gap,fontsize=text)
if multi:
if not do_phonons:
ax.plot(kpoint_array[sort_array],energy_array[sort_array],linewidth=linewidth)
if no_spins==2:
if show=='up' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array])
else:
if show=='down' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array[sort_array],linewidth=linewidth)
elif not do_phonons:
if pdos:
from matplotlib import colors
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
import matplotlib.collections as mcoll
import matplotlib.path as mpath
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(
x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha)
ax.add_collection(lc)
return lc
if species:
n_cat=len(atoms)
else:
n_cat=4
basis=[]
for i in range(n_cat):
basis.append(np.array(colors.to_rgba(next(n_colors))))
for nb in range(no_eigen):
# calculate the colour
cmap_array=np.zeros((len(kpoint_array),4))
for i in range(n_cat):
cmap_array[:,0]+=pdos_weights[i,nb,:,0]*basis[i][0]#/n_cat
cmap_array[:,1]+=pdos_weights[i,nb,:,0]*basis[i][1]#/n_cat
cmap_array[:,2]+=pdos_weights[i,nb,:,0]*basis[i][2]#/n_cat
cmap_array[:,3]+=pdos_weights[i,nb,:,0]*basis[i][3]#/n_cat
#cmap_array[:,0:3]=cmap_array[:,0:3]/n_cat
cmap_array=np.where(cmap_array>1,1,cmap_array)
cmap = ListedColormap(cmap_array)
z = np.linspace(0, 1, len(kpoint_array))
colorline(kpoint_array[sort_array], energy_array[sort_array][:,nb], z, cmap=cmap, linewidth=3)
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,nb],linewidth=linewidth,alpha=0)
if no_spins==2:
for nb in range(no_eigen):
# calculate the colour
cmap_array=np.zeros((len(kpoint_array),4))
for i in range(n_cat):
cmap_array[:,0]+=pdos_weights[i,nb,:,1]*basis[i][0]#/n_cat
cmap_array[:,1]+=pdos_weights[i,nb,:,1]*basis[i][1]#/n_cat
cmap_array[:,2]+=pdos_weights[i,nb,:,1]*basis[i][2]#/n_cat
cmap_array[:,3]+=pdos_weights[i,nb,:,1]*basis[i][3]#/n_cat
#cmap_array[:,0:3]=cmap_array[:,0:3]/n_cat
cmap_array=np.where(cmap_array>1,1,cmap_array)
cmap = ListedColormap(cmap_array)
z = np.linspace(0, 1, len(kpoint_array))
colorline(kpoint_array[sort_array], energy_array_2[sort_array][:,nb], z, cmap=cmap, linewidth=3)
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,nb],linewidth=linewidth,alpha=0)
custom_lines = []
labels=[]
for i in range(n_cat):
custom_lines.append(Line2D([0], [0], color=basis[i], lw=3))
if species:
labels.append(atoms[i])
else:
labels=["s","p","d","f"]
#custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
# Line2D([0], [0], color=cmap(.5), lw=4),
# Line2D([0], [0], color=cmap(1.), lw=4)]
ax.legend(custom_lines,labels,fontsize=text)
else:
if show=='up' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array[sort_array],color=spin_up,label=overlay_labels[0],linewidth=linewidth)
for i in n_up:
ax.plot(kpoint_array[sort_array],energy_array[sort_array][:,i],linewidth=linewidth,color=next(n_colors))
c=1
if no_spins==2:
if show=='down' or show=='both':
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array],color=spin_do,label=overlay_labels[0],linewidth=linewidth)
for i in n_down:
ax.plot(kpoint_array[sort_array],energy_array_2[sort_array][:,i],linewidth=linewidth,color=next(n_colors))
if doSOC:
#kpoint_array_soc=1+(kpoint_array[-1]-1)*(kpoint_array_soc-1)/(kpoint_array_soc[-1]-1)
ax.plot(kpoint_array_soc,energy_array_soc[sort_array_soc],color=spin_up,label=overlay_labels[1],linewidth=linewidth,linestyle="--")
if no_spins_soc==2:
ax.plot(kpoint_array_soc,energy_array_soc2[sort_array_soc],color=spin_do,label=overlay_labels[1],linewidth=linewidth,linestyle="--")
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
if not do_dos and overlay_labels[0]!=None:
plt.legend(by_label.values(), by_label.keys(),loc="upper right",fontsize=text)
else: #This is the part where we plot the phonons
ax.plot(kpoint_array[sort_array],energy_array[sort_array],color=spin_up,label="without SOC",linewidth=linewidth)
if spin_polarised and debug:
split_en=np.mean(energy_array-energy_array_2,axis=0)
if not do_dos:
plt.figtext(0.95, 0.96, fig_cap, wrap=True, horizontalalignment='center', fontsize=text)
else:
x=ax2.get_xlim()[1]*0.9
y=ax2.get_ylim()[1]*0.85
ax2.text(x,y,fig_cap,wrap=True, horizontalalignment='center', fontsize=text)
title_seed=seed#.replace("_","\_")
if save:
if title!=None:
plt.suptitle(title,fontsize=text)
if do_phonons:
plt.tight_layout()
fig.savefig(seed+"-phonon."+exe)
elif doSOC:
plt.tight_layout()
fig.savefig(seed+"-SOC-bs."+exe)
elif do_dos:
plt.tight_layout()
fig.savefig(seed+"-SOC-bs-dos."+exe)
else:
plt.tight_layout()
fig.savefig(seed+"-bs."+exe)
else:
plt.title(title_seed,fontsize=20)
plt.tight_layout()
plt.show()
if __name__=='__main__':
main_dispersion()
| [
"numpy.sqrt",
"numpy.arccos",
"matplotlib.collections.LineCollection",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"matplotlib.rc",
"sys.exit",
"numpy.linalg.norm",
"matplotlib.lines.Line2D",
"numpy.mean",
"numpy.cross",
"argparse.ArgumentParser",
"numpy.where",
"numpy.delete",
"matp... | [((933, 989), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.25, 0.75, 0.33333333, 0.66666667]'], {}), '([0.5, 0.0, 0.25, 0.75, 0.33333333, 0.66666667])\n', (941, 989), True, 'import numpy as np\n'), ((1080, 1114), 'scipy.io.FortranFile', 'FF', (["(seed + '.pdos_bin')", '"""r"""', '""">u4"""'], {}), "(seed + '.pdos_bin', 'r', '>u4')\n", (1082, 1114), True, 'from scipy.io import FortranFile as FF\n'), ((1516, 1542), 'numpy.zeros', 'np.zeros', (['(num_kpoints, 3)'], {}), '((num_kpoints, 3))\n', (1524, 1542), True, 'import numpy as np\n'), ((1559, 1624), 'numpy.zeros', 'np.zeros', (['(num_popn_orb, max_eigenvalues, num_kpoints, num_spins)'], {}), '((num_popn_orb, max_eigenvalues, num_kpoints, num_spins))\n', (1567, 1624), True, 'import numpy as np\n'), ((3305, 3356), 'numpy.where', 'np.where', (['(pdos_weights_sum > 1)', '(1)', 'pdos_weights_sum'], {}), '(pdos_weights_sum > 1, 1, pdos_weights_sum)\n', (3313, 3356), True, 'import numpy as np\n'), ((3374, 3425), 'numpy.where', 'np.where', (['(pdos_weights_sum < 0)', '(0)', 'pdos_weights_sum'], {}), '(pdos_weights_sum < 0, 0, pdos_weights_sum)\n', (3382, 3425), True, 'import numpy as np\n'), ((3433, 3462), 'numpy.round', 'np.round', (['pdos_weights_sum', '(7)'], {}), '(pdos_weights_sum, 7)\n', (3441, 3462), True, 'import numpy as np\n'), ((3504, 3573), 'numpy.sqrt', 'np.sqrt', (['(lattice[0, 0] ** 2 + lattice[0, 1] ** 2 + lattice[0, 2] ** 2)'], {}), '(lattice[0, 0] ** 2 + lattice[0, 1] ** 2 + lattice[0, 2] ** 2)\n', (3511, 3573), True, 'import numpy as np\n'), ((3569, 3638), 'numpy.sqrt', 'np.sqrt', (['(lattice[1, 0] ** 2 + lattice[1, 1] ** 2 + lattice[1, 2] ** 2)'], {}), '(lattice[1, 0] ** 2 + lattice[1, 1] ** 2 + lattice[1, 2] ** 2)\n', (3576, 3638), True, 'import numpy as np\n'), ((3640, 3709), 'numpy.sqrt', 'np.sqrt', (['(lattice[2, 0] ** 2 + lattice[2, 1] ** 2 + lattice[2, 2] ** 2)'], {}), '(lattice[2, 0] ** 2 + lattice[2, 1] ** 2 + lattice[2, 2] ** 2)\n', (3647, 3709), True, 'import numpy as np\n'), ((3810, 3826), 'numpy.arccos', 'np.arccos', (['alpha'], {}), '(alpha)\n', (3819, 3826), True, 'import numpy as np\n'), ((3939, 3954), 'numpy.arccos', 'np.arccos', (['beta'], {}), '(beta)\n', (3948, 3954), True, 'import numpy as np\n'), ((4067, 4083), 'numpy.arccos', 'np.arccos', (['gamma'], {}), '(gamma)\n', (4076, 4083), True, 'import numpy as np\n'), ((4507, 4523), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4515, 4523), True, 'import numpy as np\n'), ((4703, 4744), 'numpy.empty', 'np.empty', ([], {'shape': '(no_kpoints, no_branches)'}), '(shape=(no_kpoints, no_branches))\n', (4711, 4744), True, 'import numpy as np\n'), ((4762, 4788), 'numpy.empty', 'np.empty', ([], {'shape': 'no_kpoints'}), '(shape=no_kpoints)\n', (4770, 4788), True, 'import numpy as np\n'), ((6715, 6731), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6723, 6731), True, 'import numpy as np\n'), ((6935, 6973), 'numpy.empty', 'np.empty', ([], {'shape': '(no_kpoints, no_eigen)'}), '(shape=(no_kpoints, no_eigen))\n', (6943, 6973), True, 'import numpy as np\n'), ((6992, 7032), 'numpy.empty', 'np.empty', ([], {'shape': '(no_kpoints, no_eigen_2)'}), '(shape=(no_kpoints, no_eigen_2))\n', (7000, 7032), True, 'import numpy as np\n'), ((7050, 7076), 'numpy.empty', 'np.empty', ([], {'shape': 'no_kpoints'}), '(shape=no_kpoints)\n', (7058, 7076), True, 'import numpy as np\n'), ((9399, 9432), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9422, 9432), False, 'import warnings\n'), ((9644, 9678), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (9657, 9678), False, 'import matplotlib\n'), ((9685, 9709), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""classic"""'], {}), "('classic')\n", (9698, 9709), True, 'import matplotlib.pyplot as plt\n'), ((9865, 9963), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Utillity for plotting bandstructurs from a CASTEP run."""'}), "(description=\n 'Utillity for plotting bandstructurs from a CASTEP run.')\n", (9888, 9963), False, 'import argparse\n'), ((12807, 12826), 'numpy.float', 'np.float', (['args.line'], {}), '(args.line)\n', (12815, 12826), True, 'import numpy as np\n'), ((14031, 14054), 'ase.io.read', 'io.read', (["(seed + '.cell')"], {}), "(seed + '.cell')\n", (14038, 14054), True, 'import ase.io as io\n'), ((25752, 25830), 'itertools.cycle', 'cycle', (["['blue', 'red', 'green', 'black', 'purple', 'orange', 'yellow', 'cyan']"], {}), "(['blue', 'red', 'green', 'black', 'purple', 'orange', 'yellow', 'cyan'])\n", (25757, 25830), False, 'from itertools import cycle\n'), ((2355, 2419), 'numpy.zeros', 'np.zeros', (['(num_species, max_eigenvalues, num_kpoints, num_spins)'], {}), '((num_species, max_eigenvalues, num_kpoints, num_spins))\n', (2363, 2419), True, 'import numpy as np\n'), ((2676, 2741), 'numpy.zeros', 'np.zeros', (['(num_orbitals, max_eigenvalues, num_kpoints, num_spins)'], {}), '((num_orbitals, max_eigenvalues, num_kpoints, num_spins))\n', (2684, 2741), True, 'import numpy as np\n'), ((2760, 2814), 'numpy.zeros', 'np.zeros', (['(3, max_eigenvalues, num_kpoints, num_spins)'], {}), '((3, max_eigenvalues, num_kpoints, num_spins))\n', (2768, 2814), True, 'import numpy as np\n'), ((2823, 2842), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2831, 2842), True, 'import numpy as np\n'), ((2851, 2870), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2859, 2870), True, 'import numpy as np\n'), ((2879, 2898), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2887, 2898), True, 'import numpy as np\n'), ((2907, 2926), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2915, 2926), True, 'import numpy as np\n'), ((5696, 5717), 'numpy.array', 'np.array', (['kpoint_list'], {}), '(kpoint_list)\n', (5704, 5717), True, 'import numpy as np\n'), ((8803, 8824), 'numpy.array', 'np.array', (['kpoint_list'], {}), '(kpoint_list)\n', (8811, 8824), True, 'import numpy as np\n'), ((14277, 14287), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14285, 14287), False, 'import sys\n'), ((19160, 19170), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19168, 19170), False, 'import sys\n'), ((19816, 19834), 'numpy.array', 'np.array', (['high_sym'], {}), '(high_sym)\n', (19824, 19834), True, 'import numpy as np\n'), ((21847, 21877), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'aspect_r'}), '(figsize=aspect_r)\n', (21859, 21877), True, 'import matplotlib.pyplot as plt\n'), ((21963, 22081), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'gridspec_kw': "{'hspace': 0, 'wspace': 0, 'width_ratios': [2.4, 1]}", 'figsize': '(11, 7)'}), "(1, 2, sharey=True, gridspec_kw={'hspace': 0, 'wspace': 0,\n 'width_ratios': [2.4, 1]}, figsize=(11, 7))\n", (21975, 22081), True, 'import matplotlib.pyplot as plt\n'), ((23587, 23610), 'numpy.delete', 'np.delete', (['dos_ticks', '(0)'], {}), '(dos_ticks, 0)\n', (23596, 23610), True, 'import numpy as np\n'), ((34354, 34400), 'numpy.mean', 'np.mean', (['(energy_array - energy_array_2)'], {'axis': '(0)'}), '(energy_array - energy_array_2, axis=0)\n', (34361, 34400), True, 'import numpy as np\n'), ((34443, 34535), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.95)', '(0.96)', 'fig_cap'], {'wrap': '(True)', 'horizontalalignment': '"""center"""', 'fontsize': 'text'}), "(0.95, 0.96, fig_cap, wrap=True, horizontalalignment='center',\n fontsize=text)\n", (34454, 34535), True, 'import matplotlib.pyplot as plt\n'), ((35224, 35258), 'matplotlib.pyplot.title', 'plt.title', (['title_seed'], {'fontsize': '(20)'}), '(title_seed, fontsize=20)\n', (35233, 35258), True, 'import matplotlib.pyplot as plt\n'), ((35266, 35284), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (35282, 35284), True, 'import matplotlib.pyplot as plt\n'), ((35293, 35303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35301, 35303), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2328), 'numpy.unique', 'np.unique', (['orbital_species'], {}), '(orbital_species)\n', (2311, 2328), True, 'import numpy as np\n'), ((2545, 2587), 'numpy.sum', 'np.sum', (['pdos_weights[loc, :, :, :]'], {'axis': '(0)'}), '(pdos_weights[loc, :, :, :], axis=0)\n', (2551, 2587), True, 'import numpy as np\n'), ((15129, 15139), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15137, 15139), False, 'import sys\n'), ((15194, 15219), 'numpy.array', 'np.array', (['n_up'], {'dtype': 'int'}), '(n_up, dtype=int)\n', (15202, 15219), True, 'import numpy as np\n'), ((15308, 15335), 'numpy.array', 'np.array', (['n_down'], {'dtype': 'int'}), '(n_down, dtype=int)\n', (15316, 15335), True, 'import numpy as np\n'), ((18525, 18541), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (18533, 18541), True, 'import numpy as np\n'), ((18552, 18568), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (18560, 18568), True, 'import numpy as np\n'), ((18585, 18601), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (18593, 18601), True, 'import numpy as np\n'), ((18612, 18628), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (18620, 18628), True, 'import numpy as np\n'), ((18645, 18661), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (18653, 18661), True, 'import numpy as np\n'), ((18672, 18688), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (18680, 18688), True, 'import numpy as np\n'), ((18711, 18725), 'numpy.dot', 'np.dot', (['a2', 'a3'], {}), '(a2, a3)\n', (18717, 18725), True, 'import numpy as np\n'), ((18786, 18800), 'numpy.dot', 'np.dot', (['a1', 'a3'], {}), '(a1, a3)\n', (18792, 18800), True, 'import numpy as np\n'), ((18862, 18876), 'numpy.dot', 'np.dot', (['a2', 'a1'], {}), '(a2, a1)\n', (18868, 18876), True, 'import numpy as np\n'), ((20707, 20729), 'numpy.array', 'np.array', (['high_sym_soc'], {}), '(high_sym_soc)\n', (20715, 20729), True, 'import numpy as np\n'), ((20911, 20921), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20919, 20921), False, 'import sys\n'), ((21293, 21344), 'numpy.linspace', 'np.linspace', (['high_low', 'high_up', 'nsoc'], {'endpoint': '(True)'}), '(high_low, high_up, nsoc, endpoint=True)\n', (21304, 21344), True, 'import numpy as np\n'), ((22125, 22141), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (22135, 22141), True, 'import numpy as np\n'), ((23517, 23531), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['(4)'], {}), '(4)\n', (23528, 23531), False, 'from matplotlib.ticker import MaxNLocator\n'), ((34785, 34819), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': 'text'}), '(title, fontsize=text)\n', (34797, 34819), True, 'import matplotlib.pyplot as plt\n'), ((34859, 34877), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34875, 34877), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2164), 'numpy.sum', 'np.sum', (['pdos_weights[0:num_popn_orb, nb, nk, ns]'], {}), '(pdos_weights[0:num_popn_orb, nb, nk, ns])\n', (2122, 2164), True, 'import numpy as np\n'), ((2473, 2507), 'numpy.where', 'np.where', (['(orbital_species == i + 1)'], {}), '(orbital_species == i + 1)\n', (2481, 2507), True, 'import numpy as np\n'), ((3032, 3056), 'numpy.where', 'np.where', (['(orbital_l == i)'], {}), '(orbital_l == i)\n', (3040, 3056), True, 'import numpy as np\n'), ((3128, 3170), 'numpy.sum', 'np.sum', (['pdos_weights[loc, :, :, :]'], {'axis': '(0)'}), '(pdos_weights[loc, :, :, :], axis=0)\n', (3134, 3170), True, 'import numpy as np\n'), ((9213, 9229), 'numpy.isclose', 'np.isclose', (['i', 'j'], {}), '(i, j)\n', (9223, 9229), True, 'import numpy as np\n'), ((18726, 18744), 'numpy.linalg.norm', 'np.linalg.norm', (['a2'], {}), '(a2)\n', (18740, 18744), True, 'import numpy as np\n'), ((18745, 18763), 'numpy.linalg.norm', 'np.linalg.norm', (['a3'], {}), '(a3)\n', (18759, 18763), True, 'import numpy as np\n'), ((18801, 18819), 'numpy.linalg.norm', 'np.linalg.norm', (['a1'], {}), '(a1)\n', (18815, 18819), True, 'import numpy as np\n'), ((18820, 18838), 'numpy.linalg.norm', 'np.linalg.norm', (['a3'], {}), '(a3)\n', (18834, 18838), True, 'import numpy as np\n'), ((18877, 18895), 'numpy.linalg.norm', 'np.linalg.norm', (['a2'], {}), '(a2)\n', (18891, 18895), True, 'import numpy as np\n'), ((18896, 18914), 'numpy.linalg.norm', 'np.linalg.norm', (['a1'], {}), '(a1)\n', (18910, 18914), True, 'import numpy as np\n'), ((19682, 19694), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (19688, 19694), True, 'import numpy as np\n'), ((34955, 34973), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34971, 34973), True, 'import matplotlib.pyplot as plt\n'), ((20545, 20557), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (20551, 20557), True, 'import numpy as np\n'), ((28121, 28170), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (28135, 28170), True, 'import numpy as np\n'), ((28299, 28321), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""copper"""'], {}), "('copper')\n", (28311, 28321), True, 'import matplotlib.pyplot as plt\n'), ((28328, 28351), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (28341, 28351), True, 'import matplotlib.pyplot as plt\n'), ((29030, 29043), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (29040, 29043), True, 'import numpy as np\n'), ((29150, 29250), 'matplotlib.collections.LineCollection', 'mcoll.LineCollection', (['segments'], {'array': 'z', 'cmap': 'cmap', 'norm': 'norm', 'linewidth': 'linewidth', 'alpha': 'alpha'}), '(segments, array=z, cmap=cmap, norm=norm, linewidth=\n linewidth, alpha=alpha)\n', (29170, 29250), True, 'import matplotlib.collections as mcoll\n'), ((35052, 35070), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (35068, 35070), True, 'import matplotlib.pyplot as plt\n'), ((35146, 35164), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (35162, 35164), True, 'import matplotlib.pyplot as plt\n'), ((14749, 14759), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14757, 14759), False, 'import sys\n'), ((30314, 30353), 'numpy.where', 'np.where', (['(cmap_array > 1)', '(1)', 'cmap_array'], {}), '(cmap_array > 1, 1, cmap_array)\n', (30322, 30353), True, 'import numpy as np\n'), ((30377, 30403), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['cmap_array'], {}), '(cmap_array)\n', (30391, 30403), False, 'from matplotlib.colors import ListedColormap\n'), ((32008, 32046), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': 'basis[i]', 'lw': '(3)'}), '([0], [0], color=basis[i], lw=3)\n', (32014, 32046), False, 'from matplotlib.lines import Line2D\n'), ((31448, 31487), 'numpy.where', 'np.where', (['(cmap_array > 1)', '(1)', 'cmap_array'], {}), '(cmap_array > 1, 1, cmap_array)\n', (31456, 31487), True, 'import numpy as np\n'), ((31515, 31541), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['cmap_array'], {}), '(cmap_array)\n', (31529, 31541), False, 'from matplotlib.colors import ListedColormap\n'), ((33821, 33830), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (33828, 33830), True, 'import matplotlib.pyplot as plt\n'), ((28023, 28039), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (28031, 28039), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 10:49:26 2019
@author: fg010
"""
#%%
from __future__ import print_function,absolute_import, division, print_function, unicode_literals
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler,CSVLogger
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
import numpy as np
import os
import random
import tensorflow as tf
from create_tf_record import *
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
from keras import regularizers
from keras.models import load_model
#%%
model = load_model("D:\\OneDrive\\工作\\富港万嘉\\TensorFlow\\saved_models\\TL_0_%s_model.036_0.8795.h5")
# x_test = np.load('x_test_64.npy')
# y_test = np.load('y_test.npy')
#%%
def load_labels_file(filename,labels_num=1,shuffle=False):
'''
载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
:param filename:
:param labels_num :labels个数
:param shuffle :是否打乱顺序
:return:images type->list
:return:labels type->list
'''
images=[]
labels=[]
with open(filename) as f:
lines_list=f.readlines()
if shuffle:
random.shuffle(lines_list)
for lines in lines_list:
line=lines.rstrip().split(' ')
label=[]
for i in range(labels_num):
label.append(int(line[i+1]))
images.append(line[0])
labels.append(label)
return images,labels
def read_image(filename, resize_height, resize_width,normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:return: 返回的图片数据
'''
bgr_image = cv2.imdecode(np.fromfile(filename,dtype=np.uint8),1)
if len(bgr_image.shape)==2:#若是灰度图则转为三通道
print("Warning:gray image",filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB
# show_image(filename,rgb_image)
# rgb_image=Image.open(filename)
if resize_height>0 and resize_width>0:
rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))
rgb_image=np.asanyarray(rgb_image)
if normalization:
# 不能写成:rgb_image=rgb_image/255
rgb_image=rgb_image/255.0
# show_image("src resize image",image)
return rgb_image
def create_records(image_dir,file, resize_height, resize_width,shuffle = False,log=100,normalization=True):
'''
实现将图像原始数据,label,长,宽等信息保存为record文件
注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型
:param image_dir:原始图像的目录
:param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)
:param output_record_dir:保存record文件的路径
:param resize_height:
:param resize_width:
PS:当resize_height或者resize_width=0是,不执行resize
:param shuffle:是否打乱顺序
:param log:log信息打印间隔
'''
# 加载文件,仅获取一个label
images_list, labels_list=load_labels_file(file,1,shuffle)
error = 0
# writer = tf.python_io.TFRecordWriter(output_record_dir)
images = []
images_labels = []
for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):
# print(i)
# print([image_name, labels])
image_path=os.path.join(image_dir,images_list[i])
# print(image_path)
if not os.path.exists(image_path):
# print('Err:no image',image_path)
continue
#
try:
image = read_image(image_path, resize_height, resize_width)
except:
error += 1
continue
# if i%log==0 or i==len(images_list)-1:
# print('------------processing:%d-th------------' % (i))
# print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
# 这里仅保存一个label,多label适当增加"'label': _int64_feature(label)"项
label=labels[0]
images += [image]
images_labels += [label]
# print('读取失败图片数: ', error)
if normalization == True:
images = np.array(images)/255.0
images_labels = np.array(images_labels)
# np.save(save_path + 'images.npy', arr=images)
# np.save(save_path + 'labels.npy', arr=images_labels)
else:
images = np.array(images)
images_labels = np.array(images_labels)
# np.save(save_path + 'images.npy', arr=images)
# np.save(save_path + 'labels.npy', arr=images_labels)
return images,images_labels
#%%
# import os
# import os.path
# import pandas as pd
# def write_txt(content, filename, mode='w'):
# """保存txt数据
# :param content:需要保存的数据,type->list
# :param filename:文件名
# :param mode:读写模式:'w' or 'a'
# :return: void
# """
# with open(filename, mode) as f:
# for line in content:
# str_line = ""
# for col, data in enumerate(line):
# if not col == len(line) - 1:
# # 以空格作为分隔符
# str_line = str_line + str(data) + " "
# else:
# # 每行最后一个数据用换行符“\n”
# str_line = str_line + str(data) + "\n"
# f.write(str_line)
# def get_files_list(dir,lable_dir):
# '''
# 实现遍历dir目录下,所有文件(包含子文件夹的文件)
# :param dir:指定文件夹目录
# :return:包含所有文件的列表->list
# '''
# # parent:父目录, filenames:该目录下所有文件夹,filenames:该目录下的文件名
# files_list = []
# label_data = pd.read_csv(lable_dir,header=0)
# for parent, dirnames, filenames in os.walk(dir):
# for filename in filenames:
# # print("parent is: " + parent)
# # print("filename is: " + filename)
# # print(os.path.join(parent, filename)) # 输出rootdir路径下所有文件(包含子文件)信息
# curr_file=parent.split(os.sep)[-1]
# labels = label_data[label_data['物品'] == curr_file].index.tolist()[0]
# files_list.append([os.path.join(curr_file, filename),labels])#文件夹下路径
# # files_list.append([os.path.join(parent,filename),labels])#绝对路径
# print()
# return files_list
# lable_dir = 'D:\\data\\classification\\label.csv'
# train_dir = 'D:\\data\\classification\\test2'
# # train_txt='D:\\data\\classification\\train.txt'
# train_txt='D:\\data\\classification\\test2.txt'
# train_data = get_files_list(train_dir,lable_dir)
# write_txt(train_data,train_txt,mode='w')
#%%
name = ['丝瓜', '中华猕猴桃', '冬瓜', '南瓜', '哈密瓜', '大白菜', '大蒜', '快圆茄', '木瓜', '杨桃', '杨梅', '枣', '柚', '柠檬', '柿', '桂圆', '桃', '桑葚', '梨', '椰子', '樱桃', '橙', '沙棘', '油麦菜', '洋葱', '甜椒', '番茄', '白萝卜', '百合', '秋葵', '紫皮大蒜', '细香葱', '胡萝卜', '节瓜', '芒果', '芥蓝', '芹菜', '苦瓜', '苹果', '茄子', '茼蒿', '草莓', '荔枝', '荷兰豆', '荸荠', '莴苣', '菠菜', '菠萝', '菠萝蜜', '葡萄', '藕', '西兰花', '西瓜', '豆角', '豌豆', '辣椒', '青萝卜', '韭菜', '韭黄', '香菜', '香蕉', '鳄梨', '黄瓜', '黄皮果', '黄豆芽']
dirs = 'D:\\data\\classification\\data_clean\\test_clean\\'
file = 'D:\\data\\classification\\data_clean\\test_clean.txt'
import numpy as np
from sklearn.metrics import classification_report
x_test,y_test = create_records(dirs + '丝瓜',file, resize_height=224, resize_width =224,shuffle = False,log=100)
# for i in name:
# # print(dirs + i)
# x_test,y_test = create_records(dirs + i,file, resize_height=224, resize_width =224,shuffle = False,log=100)
# predict_test = model.predict(x_test)
# predict = np.argmax(predict_test,axis=1)
# # scores = model.evaluate(x_test, y_test, verbose=1)
# # print(i)
# # print('Test accuracy:', predict)
# print(i+': '+(y_test == predict).sum()/len(predict))
# # # open(os.path.join(dirs,i))
# # print(os.path.join(dirs,i))
#%%
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1]) | [
"os.path.exists",
"numpy.fromfile",
"keras.models.load_model",
"random.shuffle",
"os.path.join",
"numpy.asanyarray",
"numpy.array"
] | [((933, 1034), 'keras.models.load_model', 'load_model', (['"""D:\\\\OneDrive\\\\工作\\\\富港万嘉\\\\TensorFlow\\\\saved_models\\\\TL_0_%s_model.036_0.8795.h5"""'], {}), "(\n 'D:\\\\OneDrive\\\\工作\\\\富港万嘉\\\\TensorFlow\\\\saved_models\\\\TL_0_%s_model.036_0.8795.h5'\n )\n", (943, 1034), False, 'from keras.models import load_model\n'), ((2608, 2632), 'numpy.asanyarray', 'np.asanyarray', (['rgb_image'], {}), '(rgb_image)\n', (2621, 2632), True, 'import numpy as np\n'), ((2132, 2169), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'np.uint8'}), '(filename, dtype=np.uint8)\n', (2143, 2169), True, 'import numpy as np\n'), ((3670, 3709), 'os.path.join', 'os.path.join', (['image_dir', 'images_list[i]'], {}), '(image_dir, images_list[i])\n', (3682, 3709), False, 'import os\n'), ((4551, 4574), 'numpy.array', 'np.array', (['images_labels'], {}), '(images_labels)\n', (4559, 4574), True, 'import numpy as np\n'), ((4725, 4741), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (4733, 4741), True, 'import numpy as np\n'), ((4767, 4790), 'numpy.array', 'np.array', (['images_labels'], {}), '(images_labels)\n', (4775, 4790), True, 'import numpy as np\n'), ((1522, 1548), 'random.shuffle', 'random.shuffle', (['lines_list'], {}), '(lines_list)\n', (1536, 1548), False, 'import random\n'), ((3753, 3779), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (3767, 3779), False, 'import os\n'), ((4503, 4519), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (4511, 4519), True, 'import numpy as np\n')] |
import sys
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
import keras
class LIMESimpleModel:
def __init__(self, cluster_num, cluster_method=KMeans, random_state=None):
self.random_state = check_random_state(random_state)
self.cluster_num = cluster_num
self.cluster_method = cluster_method(
n_clusters=cluster_num, random_state=self.random_state)
self.models = []
def fit(self, X, y, predict_fn, labels_num):
self.cluster_labels = self.cluster_method.fit_predict(X)
self.labels_num = labels_num
for i in range(self.cluster_num):
inds = np.where(self.cluster_labels == i)
simplified_models = LinearRegression()
simplified_models.fit(X[inds], y[inds])
coef_ = simplified_models.coef_.T
intercept_ = simplified_models.intercept_
self.models.append((coef_, intercept_))
def predict(self, x):
cluster_result = self.cluster_method.predict(x)
prediction_result = np.zeros(x.shape[0])
for i in range(self.cluster_num):
inds = np.where(cluster_result == i)
if not len(inds[0]):
continue
predict_values = np.dot(x[inds],
self.models[i][0]) + self.models[i][1]
prediction_result[inds] = np.argmax(predict_values, axis=1)
return prediction_result
def predict_reg(self, x):
cluster_result = self.cluster_method.predict(x)
predict_values = np.zeros((x.shape[0], self.labels_num))
for i in range(self.cluster_num):
inds = np.where(cluster_result == i)
if not len(inds[0]):
continue
predict_values[inds] = np.dot(
x[inds], self.models[i][0]) + self.models[i][1]
return predict_values
def _create_long_network():
in_layer = keras.Input(shape=(143, ))
curr_layer = keras.layers.Dense(
300, kernel_initializer="glorot_uniform",
activation="sigmoid")(in_layer)
curr_layer_bn = keras.layers.BatchNormalization()(curr_layer)
# output layer
out_layer = keras.layers.Dense(
36, kernel_initializer="glorot_uniform",
activation="softmax")(curr_layer_bn)
# return an instance of the Model class
return keras.Model(inputs=in_layer, outputs=out_layer)
if __name__ == "__main__":
long_features = pd.read_csv(
"./features/long_features.csv", header=None, nrows=160000)
long_features = long_features.dropna().values
with open(f"./lime_extended_performance_{sys.argv[1]}.csv", "w") as FILE:
for j in range(20):
for i in range(0, 50, 1):
model = _create_long_network()
model.load_weights(
f"./weights/tmp_long_rla_weights")
feature_train, feature_test = train_test_split(
long_features, test_size=0.2)
action_train = model.predict(feature_train)
action_test = model.predict(feature_test)
pca = PCA(n_components=48)
feature_train = pca.fit_transform(feature_train)
feature_test = pca.transform(feature_test)
lime_model = LIMESimpleModel(cluster_num=i + 1)
lime_model.fit(
X=feature_train,
y=action_train,
predict_fn=model.predict,
labels_num=36)
lime_action_train = lime_model.predict(feature_train)
lime_action_test = lime_model.predict(feature_test)
train_accuracy = np.mean(
np.int32(
lime_action_train == np.argmax(action_train, axis=1)))
train_rmse = np.mean(
np.int32(
lime_action_test == np.argmax(action_test, axis=1)))
test_accuracy = np.sqrt(
np.mean(
np.square(lime_action_train -
np.argmax(action_train, axis=1))))
test_rmse = np.sqrt(
np.mean(
np.square(lime_action_test -
np.argmax(action_test, axis=1))))
FILE.write(f"{i + 1}, {train_accuracy}, {test_accuracy}"
f", {train_rmse}, {test_rmse}\n")
| [
"sklearn.utils.check_random_state",
"pandas.read_csv",
"keras.Model",
"numpy.where",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA",
"numpy.argmax",
"keras.Input",
"numpy.zeros",
"numpy.dot",
"keras.layers.Dense",
"keras.layers.BatchNormalization",
"sklearn.linear_mo... | [((2194, 2219), 'keras.Input', 'keras.Input', ([], {'shape': '(143,)'}), '(shape=(143,))\n', (2205, 2219), False, 'import keras\n'), ((2636, 2683), 'keras.Model', 'keras.Model', ([], {'inputs': 'in_layer', 'outputs': 'out_layer'}), '(inputs=in_layer, outputs=out_layer)\n', (2647, 2683), False, 'import keras\n'), ((2737, 2807), 'pandas.read_csv', 'pd.read_csv', (['"""./features/long_features.csv"""'], {'header': 'None', 'nrows': '(160000)'}), "('./features/long_features.csv', header=None, nrows=160000)\n", (2748, 2807), True, 'import pandas as pd\n'), ((431, 463), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (449, 463), False, 'from sklearn.utils import check_random_state\n'), ((1285, 1305), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (1293, 1305), True, 'import numpy as np\n'), ((1810, 1849), 'numpy.zeros', 'np.zeros', (['(x.shape[0], self.labels_num)'], {}), '((x.shape[0], self.labels_num))\n', (1818, 1849), True, 'import numpy as np\n'), ((2241, 2328), 'keras.layers.Dense', 'keras.layers.Dense', (['(300)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""sigmoid"""'}), "(300, kernel_initializer='glorot_uniform', activation=\n 'sigmoid')\n", (2259, 2328), False, 'import keras\n'), ((2376, 2409), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2407, 2409), False, 'import keras\n'), ((2461, 2547), 'keras.layers.Dense', 'keras.layers.Dense', (['(36)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""softmax"""'}), "(36, kernel_initializer='glorot_uniform', activation=\n 'softmax')\n", (2479, 2547), False, 'import keras\n'), ((867, 901), 'numpy.where', 'np.where', (['(self.cluster_labels == i)'], {}), '(self.cluster_labels == i)\n', (875, 901), True, 'import numpy as np\n'), ((937, 955), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (953, 955), False, 'from sklearn.linear_model import LinearRegression\n'), ((1371, 1400), 'numpy.where', 'np.where', (['(cluster_result == i)'], {}), '(cluster_result == i)\n', (1379, 1400), True, 'import numpy as np\n'), ((1622, 1655), 'numpy.argmax', 'np.argmax', (['predict_values'], {'axis': '(1)'}), '(predict_values, axis=1)\n', (1631, 1655), True, 'import numpy as np\n'), ((1915, 1944), 'numpy.where', 'np.where', (['(cluster_result == i)'], {}), '(cluster_result == i)\n', (1923, 1944), True, 'import numpy as np\n'), ((1491, 1525), 'numpy.dot', 'np.dot', (['x[inds]', 'self.models[i][0]'], {}), '(x[inds], self.models[i][0])\n', (1497, 1525), True, 'import numpy as np\n'), ((2041, 2075), 'numpy.dot', 'np.dot', (['x[inds]', 'self.models[i][0]'], {}), '(x[inds], self.models[i][0])\n', (2047, 2075), True, 'import numpy as np\n'), ((3206, 3252), 'sklearn.model_selection.train_test_split', 'train_test_split', (['long_features'], {'test_size': '(0.2)'}), '(long_features, test_size=0.2)\n', (3222, 3252), False, 'from sklearn.model_selection import train_test_split\n'), ((3422, 3442), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(48)'}), '(n_components=48)\n', (3425, 3442), False, 'from sklearn.decomposition import PCA\n'), ((4091, 4122), 'numpy.argmax', 'np.argmax', (['action_train'], {'axis': '(1)'}), '(action_train, axis=1)\n', (4100, 4122), True, 'import numpy as np\n'), ((4240, 4270), 'numpy.argmax', 'np.argmax', (['action_test'], {'axis': '(1)'}), '(action_test, axis=1)\n', (4249, 4270), True, 'import numpy as np\n'), ((4435, 4466), 'numpy.argmax', 'np.argmax', (['action_train'], {'axis': '(1)'}), '(action_train, axis=1)\n', (4444, 4466), True, 'import numpy as np\n'), ((4627, 4657), 'numpy.argmax', 'np.argmax', (['action_test'], {'axis': '(1)'}), '(action_test, axis=1)\n', (4636, 4657), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import rospy
import threading
import numpy as np
from geometry_msgs import msg as gmsg
from aist_depth_filter import DepthFilterClient
from tf import TransformBroadcaster, transformations as tfs
#########################################################################
# gloabal functions #
#########################################################################
def transform_from_plane(plane):
def normalize(v):
return v / np.linalg.norm(v)
t = gmsg.TransformStamped()
t.header.frame_id = plane.header.frame_id
t.child_frame_id = "tray_center"
# Compute translation
k = np.array([plane.plane.normal.x, plane.plane.normal.y, plane.plane.normal.z])
x = -plane.plane.distance * k
t.transform.translation.x = x[0]
t.transform.translation.y = x[1]
t.transform.translation.z = x[2]
# Compute rotation
j = normalize(np.cross(k, np.array([1, 0, 0])))
i = np.cross(j, k)
q = tfs.quaternion_from_matrix(
np.array(
[
[i[0], j[0], k[0], 0.0],
[i[1], j[1], k[1], 0.0],
[i[2], j[2], k[2], 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
return t
#########################################################################
# class PlaneDetector #
#########################################################################
class PlaneDetector(object):
def __init__(self):
super(PlaneDetector, self).__init__()
self._dfilter = DepthFilterClient("depth_filter")
self._broadcaster = TransformBroadcaster()
self._transform = None
self._run = True
thread = threading.Thread(target=self._broadcast_plane)
thread.start()
def detect_plane(self):
self._transform = None
def quit(self):
self._run = False
def _broadcast_plane(self):
rate = rospy.Rate(10) # 10Hz
while self._run:
if self._transform is None:
self._dfilter.detect_plane_send_goal()
plane = self._dfilter.detect_plane_wait_for_result()
if plane is not None:
self._transform = transform_from_plane(plane)
if self._transform is not None:
self._transform.header.stamp = rospy.Time.now()
self._broadcaster.sendTransformMessage(self._transform)
rate.sleep()
#########################################################################
# main #
#########################################################################
if __name__ == "__main__":
rospy.init_node("~")
detector = PlaneDetector()
while not rospy.is_shutdown():
if raw_input("Hit return key >> ") == "q":
detector.quit()
sys.exit()
detector.detect_plane()
| [
"geometry_msgs.msg.TransformStamped",
"tf.TransformBroadcaster",
"aist_depth_filter.DepthFilterClient",
"sys.exit",
"numpy.cross",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Time.now",
"numpy.array",
"rospy.Rate",
"numpy.linalg.norm",
"threading.Thread"
] | [((565, 588), 'geometry_msgs.msg.TransformStamped', 'gmsg.TransformStamped', ([], {}), '()\n', (586, 588), True, 'from geometry_msgs import msg as gmsg\n'), ((707, 783), 'numpy.array', 'np.array', (['[plane.plane.normal.x, plane.plane.normal.y, plane.plane.normal.z]'], {}), '([plane.plane.normal.x, plane.plane.normal.y, plane.plane.normal.z])\n', (715, 783), True, 'import numpy as np\n'), ((1013, 1027), 'numpy.cross', 'np.cross', (['j', 'k'], {}), '(j, k)\n', (1021, 1027), True, 'import numpy as np\n'), ((2948, 2968), 'rospy.init_node', 'rospy.init_node', (['"""~"""'], {}), "('~')\n", (2963, 2968), False, 'import rospy\n'), ((1072, 1184), 'numpy.array', 'np.array', (['[[i[0], j[0], k[0], 0.0], [i[1], j[1], k[1], 0.0], [i[2], j[2], k[2], 0.0],\n [0.0, 0.0, 0.0, 1.0]]'], {}), '([[i[0], j[0], k[0], 0.0], [i[1], j[1], k[1], 0.0], [i[2], j[2], k[\n 2], 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (1080, 1184), True, 'import numpy as np\n'), ((1785, 1818), 'aist_depth_filter.DepthFilterClient', 'DepthFilterClient', (['"""depth_filter"""'], {}), "('depth_filter')\n", (1802, 1818), False, 'from aist_depth_filter import DepthFilterClient\n'), ((1847, 1869), 'tf.TransformBroadcaster', 'TransformBroadcaster', ([], {}), '()\n', (1867, 1869), False, 'from tf import TransformBroadcaster, transformations as tfs\n'), ((1944, 1990), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._broadcast_plane'}), '(target=self._broadcast_plane)\n', (1960, 1990), False, 'import threading\n'), ((2169, 2183), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (2179, 2183), False, 'import rospy\n'), ((3016, 3035), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3033, 3035), False, 'import rospy\n'), ((538, 555), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (552, 555), True, 'import numpy as np\n'), ((983, 1002), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (991, 1002), True, 'import numpy as np\n'), ((3128, 3138), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3136, 3138), False, 'import sys\n'), ((2577, 2593), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2591, 2593), False, 'import rospy\n')] |
import os
import sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import numpy as np
from gym import spaces
class TrafficSignal:
"""
This class represents a Traffic Signal of an intersection
It is responsible for retrieving information and changing the traffic phase using Traci API
"""
def __init__(self, env, ts_id, delta_time, yellow_time, min_green, begin_seconds):
self.id = ts_id
self.env = env
self.delta_time = delta_time
self.yellow_time = yellow_time
self.min_green = min_green
self.green_phase = 0
self.is_yellow = False
self.time_since_last_phase_change = 0
self.next_action_time = begin_seconds
self.last_measure = 0.0
self.last_reward = None
self.build_phases()
self.lanes = list(dict.fromkeys(traci.trafficlight.getControlledLanes(self.id))) # Remove duplicates and keep order
self.out_lanes = [link[0][1] for link in traci.trafficlight.getControlledLinks(self.id) if link]
self.out_lanes = list(set(self.out_lanes))
self.lanes_length = {lane: traci.lane.getLength(lane) for lane in self.lanes}
self.observation_space = spaces.Box(low=np.zeros(self.num_green_phases+1+2*len(self.lanes), dtype=np.float32), high=np.ones(self.num_green_phases+1+2*len(self.lanes), dtype=np.float32))
self.discrete_observation_space = spaces.Tuple((
spaces.Discrete(self.num_green_phases), # Green Phase
spaces.Discrete(2), # Binary variable active if min_green seconds already elapsed
*(spaces.Discrete(10) for _ in range(2*len(self.lanes))) # Density and stopped-density for each lane
))
self.action_space = spaces.Discrete(self.num_green_phases)
def build_phases(self):
phases = traci.trafficlight.getAllProgramLogics(self.id)[0].phases
self.green_phases = list()
self.yellow_dict = dict()
for phase in phases:
state = phase.state
if 'y' not in state and (state.count('r') + state.count('s') != len(state)):
self.green_phases.append(traci.trafficlight.Phase(60, state))
self.num_green_phases = len(self.green_phases)
self.all_phases = self.green_phases.copy()
for i, p1 in enumerate(self.green_phases):
for j, p2 in enumerate(self.green_phases):
if i == j:
continue
yellow_state = ''
for s in range(len(p1.state)):
if (p1.state[s] == 'G' or p1.state[s] == 'g') and (p2.state[s] == 'r' or p2.state[s] == 's'):
yellow_state += 'y'
else:
yellow_state += p1.state[s]
self.yellow_dict[(i,j)] = len(self.all_phases)
self.all_phases.append(traci.trafficlight.Phase(self.yellow_time, yellow_state))
programs = traci.trafficlight.getAllProgramLogics(self.id)
logic = programs[0]
logic.type = 0
logic.phases = self.all_phases
traci.trafficlight.setProgramLogic(self.id, logic)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[0].state)
@property
def phase(self):
return traci.trafficlight.getPhase(self.id)
@property
def time_to_act(self):
return self.next_action_time == self.env.sim_step
def update(self):
self.time_since_last_phase_change += 1
if self.is_yellow and self.time_since_last_phase_change == self.yellow_time:
#traci.trafficlight.setPhase(self.id, self.green_phase)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)
self.is_yellow = False
def set_next_phase(self, new_phase):
"""
Sets what will be the next green phase and sets yellow phase if the next phase is different than the current
:param new_phase: (int) Number between [0..num_green_phases]
"""
if new_phase is not None:
new_phase = int(new_phase)
if new_phase is None or self.green_phase == new_phase or self.time_since_last_phase_change < self.yellow_time + self.min_green:
if self.time_since_last_phase_change < self.yellow_time + self.min_green:
self.next_action_time = max(
[self.env.sim_step + self.min_green + self.yellow_time - self.time_since_last_phase_change,
self.env.sim_step + self.delta_time]
)
else:
#traci.trafficlight.setPhase(self.id, self.green_phase)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)
self.next_action_time = self.env.sim_step + self.delta_time
else:
#traci.trafficlight.setPhase(self.id, self.yellow_dict[(self.green_phase, new_phase)]) # turns yellow
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.yellow_dict[(self.green_phase, new_phase)]].state)
self.green_phase = new_phase
self.next_action_time = self.env.sim_step + self.delta_time
self.is_yellow = True
self.time_since_last_phase_change = 0
def compute_observation(self):
time_info = self.compute_time_for_observation()
phase_id = [1 if self.phase//2 == i else 0 for i in range(self.num_green_phases)] # one-hot encoding
density = self.get_lanes_density()
queue = self.get_lanes_queue()
distance, speed = self.get_distance_and_speed()
observation = np.array(time_info + phase_id + density + queue + distance + speed, dtype=np.float32)
return observation
def compute_reward(self):
if self.env.reward_type == "waiting_time":
self.last_reward = self._waiting_time_reward()
elif self.env.reward_type == "vehicle_speed":
self.last_reward = self._vehicle_speed_reward()
elif self.env.reward_type == "vehicle_distance":
self.last_reward = self._vehicle_distance_reward()
return self.last_reward
def _pressure_reward(self):
return -self.get_pressure()
def _queue_average_reward(self):
new_average = np.mean(self.get_stopped_vehicles_num())
reward = self.last_measure - new_average
self.last_measure = new_average
return reward
def _queue_reward(self):
return - (sum(self.get_stopped_vehicles_num()))**2
def _waiting_time_reward(self):
ts_wait = sum(self.get_waiting_time_per_lane()) / 100.0
reward = self.last_measure - ts_wait
self.last_measure = ts_wait
return reward
def _waiting_time_reward2(self):
ts_wait = sum(self.get_waiting_time())
self.last_measure = ts_wait
if ts_wait == 0:
reward = 1.0
else:
reward = 1.0/ts_wait
return reward
def _waiting_time_reward3(self):
ts_wait = sum(self.get_waiting_time())
reward = -ts_wait
self.last_measure = ts_wait
return reward
def _vehicle_speed_reward(self):
veh_speed = list()
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
speed = traci.vehicle.getSpeed(veh)
speed_norm = speed / 10.0
veh_speed.append(speed_norm)
if len(veh_speed) == 0:
veh_speed_mean = 0.0
else:
veh_speed_mean = np.mean(veh_speed).tolist()
return veh_speed_mean
def _vehicle_distance_reward(self):
veh_dist = list()
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
leader = traci.vehicle.getLeader(veh)
if leader is None:
continue
else:
dist_norm = leader[1] / 10.0
veh_dist.append(dist_norm)
if len(veh_dist) == 0:
veh_dist_mean = 0.0
else:
veh_dist_mean = np.mean(veh_dist).tolist()
return veh_dist_mean
def get_waiting_time_per_lane(self):
wait_time_per_lane = []
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
wait_time = 0.0
for veh in veh_list:
veh_lane = traci.vehicle.getLaneID(veh)
acc = traci.vehicle.getAccumulatedWaitingTime(veh)
if veh not in self.env.vehicles:
self.env.vehicles[veh] = {veh_lane: acc}
else:
self.env.vehicles[veh][veh_lane] = acc - sum([self.env.vehicles[veh][lane] for lane in self.env.vehicles[veh].keys() if lane != veh_lane])
wait_time += self.env.vehicles[veh][veh_lane]
wait_time_per_lane.append(wait_time)
return wait_time_per_lane
def get_pressure(self):
return abs(sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.lanes) - sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.out_lanes))
def get_out_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.out_lanes]
def get_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_lanes_queue(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepHaltingNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_total_queued(self):
return sum([traci.lane.getLastStepHaltingNumber(lane) for lane in self.lanes])
def _get_veh_list(self):
veh_list = []
for lane in self.lanes:
veh_list += traci.lane.getLastStepVehicleIDs(lane)
return veh_list
def get_distance_and_speed(self):
veh_dist_mean = list()
veh_speed_mean = list()
for lane in self.lanes:
veh_dist = list()
veh_speed = list()
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
speed = traci.vehicle.getSpeed(veh)
max_speed = traci.vehicle.getMaxSpeed(veh)
speed_norm = speed / max_speed
veh_speed.append(speed_norm)
leader = traci.vehicle.getLeader(veh)
if leader is None:
continue
else:
standard_len = traci.lane.getLength(lane)
dist_norm = leader[1] / standard_len
if abs(dist_norm) > 1.0:
dist_norm = 1.0
veh_dist.append(dist_norm)
if len(veh_dist) == 0:
veh_dist_mean.append(1.0)
else:
veh_dist_mean.append(np.mean(veh_dist).tolist())
if len(veh_speed) == 0:
veh_speed_mean.append(1.0)
else:
veh_speed_mean.append(np.mean(veh_speed).tolist())
return veh_dist_mean, veh_speed_mean
def compute_time_for_observation(self):
time_norm = self.time_since_last_phase_change/(self.yellow_time + self.min_green*3)
if time_norm>1.0:
time_norm = 1.0
return [float(self.time_to_act), time_norm] | [
"traci.vehicle.getSpeed",
"numpy.array",
"traci.trafficlight.getAllProgramLogics",
"traci.vehicle.getLeader",
"sys.exit",
"traci.trafficlight.getControlledLinks",
"sys.path.append",
"numpy.mean",
"traci.trafficlight.getPhase",
"traci.lane.getLength",
"traci.vehicle.getLaneID",
"traci.trafficli... | [((63, 109), 'os.path.join', 'os.path.join', (["os.environ['SUMO_HOME']", '"""tools"""'], {}), "(os.environ['SUMO_HOME'], 'tools')\n", (75, 109), False, 'import os\n'), ((114, 136), 'sys.path.append', 'sys.path.append', (['tools'], {}), '(tools)\n', (129, 136), False, 'import sys\n'), ((147, 210), 'sys.exit', 'sys.exit', (['"""Please declare the environment variable \'SUMO_HOME\'"""'], {}), '("Please declare the environment variable \'SUMO_HOME\'")\n', (155, 210), False, 'import sys\n'), ((1955, 1993), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.num_green_phases'], {}), '(self.num_green_phases)\n', (1970, 1993), False, 'from gym import spaces\n'), ((3163, 3210), 'traci.trafficlight.getAllProgramLogics', 'traci.trafficlight.getAllProgramLogics', (['self.id'], {}), '(self.id)\n', (3201, 3210), False, 'import traci\n'), ((3309, 3359), 'traci.trafficlight.setProgramLogic', 'traci.trafficlight.setProgramLogic', (['self.id', 'logic'], {}), '(self.id, logic)\n', (3343, 3359), False, 'import traci\n'), ((3368, 3444), 'traci.trafficlight.setRedYellowGreenState', 'traci.trafficlight.setRedYellowGreenState', (['self.id', 'self.all_phases[0].state'], {}), '(self.id, self.all_phases[0].state)\n', (3409, 3444), False, 'import traci\n'), ((3500, 3536), 'traci.trafficlight.getPhase', 'traci.trafficlight.getPhase', (['self.id'], {}), '(self.id)\n', (3527, 3536), False, 'import traci\n'), ((5898, 5988), 'numpy.array', 'np.array', (['(time_info + phase_id + density + queue + distance + speed)'], {'dtype': 'np.float32'}), '(time_info + phase_id + density + queue + distance + speed, dtype=\n np.float32)\n', (5906, 5988), True, 'import numpy as np\n'), ((1271, 1297), 'traci.lane.getLength', 'traci.lane.getLength', (['lane'], {}), '(lane)\n', (1291, 1297), False, 'import traci\n'), ((3876, 3972), 'traci.trafficlight.setRedYellowGreenState', 'traci.trafficlight.setRedYellowGreenState', (['self.id', 'self.all_phases[self.green_phase].state'], {}), '(self.id, self.all_phases[self.\n green_phase].state)\n', (3917, 3972), False, 'import traci\n'), ((5212, 5337), 'traci.trafficlight.setRedYellowGreenState', 'traci.trafficlight.setRedYellowGreenState', (['self.id', 'self.all_phases[self.yellow_dict[self.green_phase, new_phase]].state'], {}), '(self.id, self.all_phases[self.\n yellow_dict[self.green_phase, new_phase]].state)\n', (5253, 5337), False, 'import traci\n'), ((7537, 7575), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['lane'], {}), '(lane)\n', (7569, 7575), False, 'import traci\n'), ((8037, 8075), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['lane'], {}), '(lane)\n', (8069, 8075), False, 'import traci\n'), ((8637, 8675), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['lane'], {}), '(lane)\n', (8669, 8675), False, 'import traci\n'), ((10452, 10490), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['lane'], {}), '(lane)\n', (10484, 10490), False, 'import traci\n'), ((10733, 10771), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['lane'], {}), '(lane)\n', (10765, 10771), False, 'import traci\n'), ((995, 1041), 'traci.trafficlight.getControlledLanes', 'traci.trafficlight.getControlledLanes', (['self.id'], {}), '(self.id)\n', (1032, 1041), False, 'import traci\n'), ((1129, 1175), 'traci.trafficlight.getControlledLinks', 'traci.trafficlight.getControlledLinks', (['self.id'], {}), '(self.id)\n', (1166, 1175), False, 'import traci\n'), ((1586, 1624), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.num_green_phases'], {}), '(self.num_green_phases)\n', (1601, 1624), False, 'from gym import spaces\n'), ((1674, 1692), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (1689, 1692), False, 'from gym import spaces\n'), ((2041, 2088), 'traci.trafficlight.getAllProgramLogics', 'traci.trafficlight.getAllProgramLogics', (['self.id'], {}), '(self.id)\n', (2079, 2088), False, 'import traci\n'), ((4903, 4999), 'traci.trafficlight.setRedYellowGreenState', 'traci.trafficlight.setRedYellowGreenState', (['self.id', 'self.all_phases[self.green_phase].state'], {}), '(self.id, self.all_phases[self.\n green_phase].state)\n', (4944, 4999), False, 'import traci\n'), ((7633, 7660), 'traci.vehicle.getSpeed', 'traci.vehicle.getSpeed', (['veh'], {}), '(veh)\n', (7655, 7660), False, 'import traci\n'), ((8134, 8162), 'traci.vehicle.getLeader', 'traci.vehicle.getLeader', (['veh'], {}), '(veh)\n', (8157, 8162), False, 'import traci\n'), ((8764, 8792), 'traci.vehicle.getLaneID', 'traci.vehicle.getLaneID', (['veh'], {}), '(veh)\n', (8787, 8792), False, 'import traci\n'), ((8815, 8859), 'traci.vehicle.getAccumulatedWaitingTime', 'traci.vehicle.getAccumulatedWaitingTime', (['veh'], {}), '(veh)\n', (8854, 8859), False, 'import traci\n'), ((10277, 10318), 'traci.lane.getLastStepHaltingNumber', 'traci.lane.getLastStepHaltingNumber', (['lane'], {}), '(lane)\n', (10312, 10318), False, 'import traci\n'), ((10829, 10856), 'traci.vehicle.getSpeed', 'traci.vehicle.getSpeed', (['veh'], {}), '(veh)\n', (10851, 10856), False, 'import traci\n'), ((10885, 10915), 'traci.vehicle.getMaxSpeed', 'traci.vehicle.getMaxSpeed', (['veh'], {}), '(veh)\n', (10910, 10915), False, 'import traci\n'), ((11034, 11062), 'traci.vehicle.getLeader', 'traci.vehicle.getLeader', (['veh'], {}), '(veh)\n', (11057, 11062), False, 'import traci\n'), ((2360, 2395), 'traci.trafficlight.Phase', 'traci.trafficlight.Phase', (['(60)', 'state'], {}), '(60, state)\n', (2384, 2395), False, 'import traci\n'), ((3085, 3141), 'traci.trafficlight.Phase', 'traci.trafficlight.Phase', (['self.yellow_time', 'yellow_state'], {}), '(self.yellow_time, yellow_state)\n', (3109, 3141), False, 'import traci\n'), ((7857, 7875), 'numpy.mean', 'np.mean', (['veh_speed'], {}), '(veh_speed)\n', (7864, 7875), True, 'import numpy as np\n'), ((8451, 8468), 'numpy.mean', 'np.mean', (['veh_dist'], {}), '(veh_dist)\n', (8458, 8468), True, 'import numpy as np\n'), ((9615, 9656), 'traci.lane.getLastStepVehicleNumber', 'traci.lane.getLastStepVehicleNumber', (['lane'], {}), '(lane)\n', (9650, 9656), False, 'import traci\n'), ((9860, 9901), 'traci.lane.getLastStepVehicleNumber', 'traci.lane.getLastStepVehicleNumber', (['lane'], {}), '(lane)\n', (9895, 9901), False, 'import traci\n'), ((10099, 10140), 'traci.lane.getLastStepHaltingNumber', 'traci.lane.getLastStepHaltingNumber', (['lane'], {}), '(lane)\n', (10134, 10140), False, 'import traci\n'), ((11184, 11210), 'traci.lane.getLength', 'traci.lane.getLength', (['lane'], {}), '(lane)\n', (11204, 11210), False, 'import traci\n'), ((1812, 1831), 'gym.spaces.Discrete', 'spaces.Discrete', (['(10)'], {}), '(10)\n', (1827, 1831), False, 'from gym import spaces\n'), ((9348, 9389), 'traci.lane.getLastStepVehicleNumber', 'traci.lane.getLastStepVehicleNumber', (['lane'], {}), '(lane)\n', (9383, 9389), False, 'import traci\n'), ((9420, 9461), 'traci.lane.getLastStepVehicleNumber', 'traci.lane.getLastStepVehicleNumber', (['lane'], {}), '(lane)\n', (9455, 9461), False, 'import traci\n'), ((9660, 9686), 'traci.lane.getLength', 'traci.lane.getLength', (['lane'], {}), '(lane)\n', (9680, 9686), False, 'import traci\n'), ((9905, 9931), 'traci.lane.getLength', 'traci.lane.getLength', (['lane'], {}), '(lane)\n', (9925, 9931), False, 'import traci\n'), ((10144, 10170), 'traci.lane.getLength', 'traci.lane.getLength', (['lane'], {}), '(lane)\n', (10164, 10170), False, 'import traci\n'), ((11533, 11550), 'numpy.mean', 'np.mean', (['veh_dist'], {}), '(veh_dist)\n', (11540, 11550), True, 'import numpy as np\n'), ((11697, 11715), 'numpy.mean', 'np.mean', (['veh_speed'], {}), '(veh_speed)\n', (11704, 11715), True, 'import numpy as np\n')] |
import pickle
import argparse
import json
import gc
import math
from util import *
from sklearn.metrics import classification_report
from keras.callbacks import EarlyStopping
from sklearn.feature_extraction.text import CountVectorizer
from keras.callbacks import ModelCheckpoint
from collections import defaultdict
from gensim.models import word2vec
from keras_han.model import HAN
from nltk.corpus import stopwords
import os
import numpy as np
import pandas as pd
from keras.metrics import TopKCategoricalAccuracy
import matplotlib.pyplot as plt
import sys
def main(dataset_path, print_flag=True):
#dataset_path = './data/eutopiaverttest/'
def train_word2vec(df, dataset_path):
def get_embeddings(inp_data, vocabulary_inv, size_features=100,
mode='skipgram',
min_word_count=2,
context=5):
num_workers = 15 # Number of threads to run in parallel
downsampling = 1e-3 # Downsample setting for frequent words
print('Training Word2Vec model...')
sentences = [[vocabulary_inv[w] for w in s] for s in inp_data]
if mode == 'skipgram':
sg = 1
print('Model: skip-gram')
elif mode == 'cbow':
sg = 0
print('Model: CBOW')
embedding_model = word2vec.Word2Vec(sentences, workers=num_workers,
sg=sg,
size=size_features,
min_count=min_word_count,
window=context,
sample=downsampling)
embedding_model.init_sims(replace=True)
embedding_weights = np.zeros((len(vocabulary_inv) + 1, size_features))
embedding_weights[0] = 0
for i, word in vocabulary_inv.items():
if word in embedding_model:
embedding_weights[i] = embedding_model[word]
else:
embedding_weights[i] = np.random.uniform(-0.25, 0.25, embedding_model.vector_size)
return embedding_weights
tokenizer = fit_get_tokenizer(df.sentence, max_words=150000)
print("Total number of words: ", len(tokenizer.word_index))
tagged_data = tokenizer.texts_to_sequences(df.sentence)
vocabulary_inv = {}
for word in tokenizer.word_index:
vocabulary_inv[tokenizer.word_index[word]] = word
embedding_mat = get_embeddings(tagged_data, vocabulary_inv)
pickle.dump(tokenizer, open(dataset_path + "tokenizer.pkl", "wb"))
pickle.dump(embedding_mat, open(dataset_path + "embedding_matrix.pkl", "wb"))
def preprocess(df, word_cluster):
print("Preprocessing data..")
stop_words = set(stopwords.words('english'))
stop_words.add('would')
word_vec = {}
for index, row in df.iterrows():
if index % 100 == 0:
print("Finished rows: " + str(index) + " out of " + str(len(df)))
line = row["sentence"]
words = line.strip().split()
new_words = []
for word in words:
try:
vec = word_vec[word]
except:
vec = get_vec(word, word_cluster, stop_words)
if len(vec) == 0:
continue
word_vec[word] = vec
new_words.append(word)
df["sentence"][index] = " ".join(new_words)
return df, word_vec
def generate_pseudo_labels(df, labels, label_term_dict, tokenizer):
#TODO this is bad code, i must be sure that labels follows the one-hot-index order
labels = sorted(labels)
#this an implementation for multilabel, returns a one-hot-encoded array
def argmax_perfectmatch(count_dict, percentage=0.2):
total = 0
labcounts = []
for l in labels:
count = 0
try:
for t in count_dict[l]:
count += count_dict[l][t]
except:
pass
labcounts.append((l, count))
total += count
current = np.zeros(len(labels))
# add 1 to labels over the threshold
for i in range(len(current)):
# if i have only match of less than 3 classes assign all of them
if len(labcounts) < 3:
if labcounts[i][1] != 0:
current[i] = 1.0
# if they are more check for threshold
else:
if (labcounts[i][1] / total) >= percentage:
current[i] = 1.0
# if there was no label over the threshold give the best one
if np.sum(current) == 0:
labcounts = [x[1] for x in labcounts]
index_max = max(range(len(labcounts)), key=labcounts.__getitem__)
current[index_max] = 1.0
return current
#TODO DEBUG
# x = {'a': {'pane':3, 'riso':2}, 'b': {'pesce':10, 'carne':22}, 'c': {'papate': 99, 'gamb': 101}}
# argmax_multilabel(x, 0.2)
y = []
X = []
y_true = []
index_word = {}
for w in tokenizer.word_index:
index_word[tokenizer.word_index[w]] = w
for index, row in df.iterrows():
line = row["sentence"]
label = row["label"]
tokens = tokenizer.texts_to_sequences([line])[0]
words = []
for tok in tokens:
words.append(index_word[tok])
count_dict = {}
flag = 0
for l in labels:
seed_words = set()
for w in label_term_dict[l]:
seed_words.add(w)
int_labels = list(set(words).intersection(seed_words))
if len(int_labels) == 0:
continue
for word in words:
if word in int_labels:
flag = 1
try:
temp = count_dict[l]
except:
count_dict[l] = {}
try:
count_dict[l][word] += 1
except:
count_dict[l][word] = 1
if flag:
lbl = argmax_perfectmatch(count_dict)
#TODO currently is impossible that there is no label, in the future maybe this should be possible
if np.sum(lbl) == 0:
continue
y.append(lbl)
X.append(line)
y_true.append(label)
return X, y, y_true
def train_classifier(df, labels, label_term_dict, label_to_index, index_to_label, dataset_path):
print("Going to train classifier..")
basepath = dataset_path
model_name = "conwea"
dump_dir = basepath + "models/" + model_name + "/"
tmp_dir = basepath + "checkpoints/" + model_name + "/"
os.makedirs(dump_dir, exist_ok=True)
os.makedirs(tmp_dir, exist_ok=True)
max_sentence_length = 100
#TODO what is max sentences???
max_sentences = 15
max_words = 20000
tokenizer = pickle.load(open(dataset_path + "tokenizer.pkl", "rb"))
X, y, y_true = generate_pseudo_labels(df, labels, label_term_dict, tokenizer)
#y_one_hot = make_one_hot(y, label_to_index)
y_one_hot = np.array(y)
#code too see distribution of labels
twodmatrix = np.stack(y, axis=0)
labelcounts = np.sum(twodmatrix, axis=0)
plt.bar(range(0, 13), labelcounts)
plt.title('PSEUDOLABEL DISTRIBUTION')
plt.show()
print("Fitting tokenizer...")
print("Splitting into train, dev...")
X_train, y_train, X_val, y_val = create_train_dev(X, labels=y_one_hot, tokenizer=tokenizer,
max_sentences=max_sentences,
max_sentence_length=max_sentence_length,
max_words=max_words)
print("Creating Embedding matrix...")
embedding_matrix = pickle.load(open(dataset_path + "embedding_matrix.pkl", "rb"))
print("Initializing model...")
model = HAN(max_words=max_sentence_length, max_sentences=max_sentences, output_size=len(y_train[0]),
embedding_matrix=embedding_matrix)
print("Compiling model...")
model.summary()
model.compile(loss="binary_crossentropy", optimizer='adam', metrics=[TopKCategoricalAccuracy(k=3)])
print("model fitting - Hierachical attention network...")
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
mc = ModelCheckpoint(filepath=tmp_dir + 'model.{epoch:02d}-{val_loss:.2f}.hdf5', monitor=TopKCategoricalAccuracy(k=3), mode='max',
verbose=1, save_weights_only=True, save_best_only=True)
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=1, batch_size=256, callbacks=[es, mc])
print("****************** CLASSIFICATION REPORT FOR All DOCUMENTS ********************")
X_all = prep_data(texts=df["sentence"], max_sentences=max_sentences, max_sentence_length=max_sentence_length,
tokenizer=tokenizer)
y_true_all = df["label"]
#pred now is an array as long as the classes
pred = model.predict(X_all)
#i need to convert this to binary 0,1 array
#code to see prediction distribution
twodmatrix = np.stack(y, axis=0)
labelcounts = np.sum(twodmatrix, axis=0)
plt.bar(range(0, 13), labelcounts)
plt.title('NN PREDICTION DISTRIBUTION')
plt.show()
# one-hot-encoding of predictions based on >0,5> thresh for recall and accuracy
lsprecrec = (pred > 0.5).astype(int)
#array of strings of predicted labels( with hard threshold for seeding words)
#pred usualy. trying lsprecrec for lower threshold
pred_labels = get_from_one_hot(lsprecrec, index_to_label)
y_true_allnp = np.array(y_true_all)
#this is to fix the error of different dimensions
y_true_allnp = np.array([np.array(x) for x in y_true_allnp])
from sklearn.metrics import confusion_matrix
for i,l in enumerate(label_to_index.keys()):
if sum(y_true_allnp.T[i])==0:
print('no {l} in dataset')
if sum(lsprecrec.T[i]) == 0:
print("no {} ever predicted".format(l))
tn, fp, fn, tp = confusion_matrix(y_true_allnp.T[i], lsprecrec.T[i]).ravel()
precision = tp/(tp+fp)
recall = tp/(tp+fn)
print('{} : precision {}, recall: {}'.format(l,precision, recall))
topk1_accuracypseudo = TopKCategoricalAccuracy(k=1, name="top_k1_categorical_accuracy", dtype=None)
topk2_accuracypseudo = TopKCategoricalAccuracy(k=2, name="top_k2_categorical_accuracy", dtype=None)
topk3_accuracypseudo = TopKCategoricalAccuracy(k=3, name="top_k3_categorical_accuracy", dtype=None)
topk1_accuracypseudo.update_state(y_true=y_true, y_pred=y_one_hot)
topk2_accuracypseudo.update_state(y_true=y_true, y_pred=y_one_hot)
topk3_accuracypseudo.update_state(y_true=y_true, y_pred=y_one_hot)
print("ACCURACY PSEUDO LABELS")
print("K1: ", topk1_accuracypseudo.result().numpy())
print("K2: ", topk2_accuracypseudo.result().numpy())
print("K3: ", topk3_accuracypseudo.result().numpy())
#keras top-k accuracy on nn prediction
topk1_accuracy = TopKCategoricalAccuracy(k=1, name="top_k1_categorical_accuracy", dtype=None)
topk2_accuracy = TopKCategoricalAccuracy(k=2, name="top_k2_categorical_accuracy", dtype=None)
topk3_accuracy = TopKCategoricalAccuracy(k=3, name="top_k3_categorical_accuracy", dtype=None)
topk1_accuracy.update_state(y_true=y_true_allnp.astype(np.float64), y_pred=pred)
topk2_accuracy.update_state(y_true=y_true_allnp.astype(np.float64), y_pred=pred)
topk3_accuracy.update_state(y_true=y_true_allnp.astype(np.float64), y_pred=pred)
print("ACCURACY NN PREDICTION")
print("K1: ", topk1_accuracy.result().numpy())
print("K2: ",topk2_accuracy.result().numpy())
print("K3: ", topk3_accuracy.result().numpy())
#print(classification_report(y_true_all, pred_labels))
print("Dumping the model...")
# model.save_weights(dump_dir + "model_weights_" + model_name + ".h5")
# model.save(dump_dir + "model_" + model_name + ".h5")
return pred_labels
def expand_seeds(df, label_term_dict, pred_labels, label_to_index, index_to_label, word_to_index, index_to_word,
inv_docfreq, docfreq, it, n1, doc_freq_thresh=5):
def get_rank_matrix(docfreq, inv_docfreq, label_count, label_docs_dict, label_to_index, term_count,
word_to_index, doc_freq_thresh):
E_LT = np.zeros((label_count, term_count))
components = {}
for l in label_docs_dict:
components[l] = {}
docs = label_docs_dict[l]
docfreq_local = calculate_doc_freq(docs)
vect = CountVectorizer(vocabulary=list(word_to_index.keys()), tokenizer=lambda x: x.split())
X = vect.fit_transform(docs)
rel_freq = X.sum(axis=0) / len(docs)
rel_freq = np.asarray(rel_freq).reshape(-1)
names = vect.get_feature_names()
for i, name in enumerate(names):
try:
if docfreq_local[name] < doc_freq_thresh:
continue
except:
continue
#docfreq local = count of document of specific label containing that word
#docfreq = count of all document of that specific class
#inv_doc_freq = log(number of documents, word frequency in all documents) to get unusual words
#tanh(relative frequency of word in document of specific class)
E_LT[label_to_index[l]][word_to_index[name]] = (docfreq_local[name] / docfreq[name]) * inv_docfreq[
name] * np.tanh(rel_freq[i])
components[l][name] = {"reldocfreq": docfreq_local[name] / docfreq[name],
"idf": inv_docfreq[name],
"rel_freq": np.tanh(rel_freq[i]),
"rank": E_LT[label_to_index[l]][word_to_index[name]]}
print('ok i guess')
return E_LT, components
def disambiguate(label_term_dict, components):
new_dic = {}
for l in label_term_dict:
all_interp_seeds = label_term_dict[l]
seed_to_all_interp = {}
disambiguated_seed_list = []
for word in all_interp_seeds:
temp = word.split("$")
if len(temp) == 1:
disambiguated_seed_list.append(word)
else:
try:
seed_to_all_interp[temp[0]].add(word)
except:
seed_to_all_interp[temp[0]] = {word}
for seed in seed_to_all_interp:
interpretations = seed_to_all_interp[seed]
max_interp = ""
maxi = -1
for interp in interpretations:
try:
if components[l][interp]["rank"] > maxi:
max_interp = interp
maxi = components[l][interp]["rank"]
except:
continue
disambiguated_seed_list.append(max_interp)
new_dic[l] = disambiguated_seed_list
return new_dic
def expand(E_LT, index_to_label, index_to_word, it, label_count, n1, old_label_term_dict, label_docs_dict):
word_map = {}
#if no label is assigned for a specific class just use the old dictionary of seedwords
zero_docs_labels = set()
for l in range(label_count):
if not np.any(E_LT):
continue
#no docs for a label == use old dictionary
elif len(label_docs_dict[index_to_label[l]]) == 0:
zero_docs_labels.add(index_to_label[l])
else:
#n1 is the number of words per iteration it, decides how many words to add to the dictionary
n = min(n1 * it, int(math.log(len(label_docs_dict[index_to_label[l]]), 1.5)))
#sort and get the first n words
inds_popular = E_LT[l].argsort()[::-1][:n]
for word_ind in inds_popular:
word = index_to_word[word_ind]
try:
temp = word_map[word]
if E_LT[l][word_ind] > temp[1]:
word_map[word] = (index_to_label[l], E_LT[l][word_ind])
except:
word_map[word] = (index_to_label[l], E_LT[l][word_ind])
new_label_term_dict = defaultdict(set)
for word in word_map:
label, val = word_map[word]
new_label_term_dict[label].add(word)
for l in zero_docs_labels:
new_label_term_dict[l] = old_label_term_dict[l]
return new_label_term_dict
print('1')
label_count = len(label_to_index)
term_count = len(word_to_index)
label_docs_dict = get_label_docs_dict(df, label_term_dict, pred_labels)
print('2')
E_LT, components = get_rank_matrix(docfreq, inv_docfreq, label_count, label_docs_dict, label_to_index,
term_count, word_to_index, doc_freq_thresh)
print('3')
if it == 0:
print("Disambiguating seeds..")
label_term_dict = disambiguate(label_term_dict, components)
print('4')
#not adding seeds at the first iteration
else:
print("Expanding seeds..")
label_term_dict = expand(E_LT, index_to_label, index_to_word, it, label_count, n1, label_term_dict,
label_docs_dict)
return label_term_dict, components
pkl_dump_dir = dataset_path
df = pickle.load(open(pkl_dump_dir + "df_contextualized.pkl", "rb"))
word_cluster = pickle.load(open(pkl_dump_dir + "word_cluster_map.pkl", "rb"))
with open(pkl_dump_dir + "seedwordsencoded.json") as fp:
label_term_dict = json.load(fp)
label_term_dict = add_all_interpretations(label_term_dict, word_cluster)
print_label_term_dict(label_term_dict, None, print_components=False)
labels = list(set(label_term_dict.keys()))
#creates onehot mappings name- index, index-name, should be coherent with multilabel binarizer
label_to_index, index_to_label = create_label_index_maps(labels)
df, word_vec = preprocess(df, word_cluster)
del word_cluster
gc.collect()
word_to_index, index_to_word = create_word_index_maps(word_vec)
docfreq = calculate_df_doc_freq(df)
inv_docfreq = calculate_inv_doc_freq(df, docfreq)
train_word2vec(df, dataset_path)
from sklearn.metrics import confusion_matrix
for i in range(6):
print("ITERATION: ", i)
pred_labels = train_classifier(df, labels, label_term_dict, label_to_index, index_to_label, dataset_path)
label_term_dict, components = expand_seeds(df, label_term_dict, pred_labels, label_to_index, index_to_label,
word_to_index, index_to_word, inv_docfreq, docfreq, i, n1=5)
dicttojson = {k: list(v) for k, v in label_term_dict.items()}
dicttojson = json.dumps(dicttojson)
jso = json.dumps(dicttojson)
f = open(pkl_dump_dir + "dictIteration"+ str(i) + ".json", "w")
f.write(jso)
f.close()
if print_flag:
print_label_term_dict(label_term_dict, components)
print("#" * 80)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='./data/nyt/')
parser.add_argument('--gpu_id', type=str, default="cpu")
args = parser.parse_args()
if args.gpu_id != "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
main(dataset_path=args.dataset_path)
| [
"numpy.array",
"gensim.models.word2vec.Word2Vec",
"nltk.corpus.stopwords.words",
"argparse.ArgumentParser",
"json.dumps",
"numpy.asarray",
"numpy.tanh",
"numpy.stack",
"keras.callbacks.EarlyStopping",
"keras.metrics.TopKCategoricalAccuracy",
"sklearn.metrics.confusion_matrix",
"numpy.any",
"... | [((19798, 19810), 'gc.collect', 'gc.collect', ([], {}), '()\n', (19808, 19810), False, 'import gc\n'), ((20876, 20901), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20899, 20901), False, 'import argparse\n'), ((7301, 7337), 'os.makedirs', 'os.makedirs', (['dump_dir'], {'exist_ok': '(True)'}), '(dump_dir, exist_ok=True)\n', (7312, 7337), False, 'import os\n'), ((7346, 7381), 'os.makedirs', 'os.makedirs', (['tmp_dir'], {'exist_ok': '(True)'}), '(tmp_dir, exist_ok=True)\n', (7357, 7381), False, 'import os\n'), ((7744, 7755), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7752, 7755), True, 'import numpy as np\n'), ((7823, 7842), 'numpy.stack', 'np.stack', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (7831, 7842), True, 'import numpy as np\n'), ((7865, 7891), 'numpy.sum', 'np.sum', (['twodmatrix'], {'axis': '(0)'}), '(twodmatrix, axis=0)\n', (7871, 7891), True, 'import numpy as np\n'), ((7943, 7980), 'matplotlib.pyplot.title', 'plt.title', (['"""PSEUDOLABEL DISTRIBUTION"""'], {}), "('PSEUDOLABEL DISTRIBUTION')\n", (7952, 7980), True, 'import matplotlib.pyplot as plt\n'), ((7989, 7999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7997, 7999), True, 'import matplotlib.pyplot as plt\n'), ((9036, 9104), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(3)'}), "(monitor='val_loss', mode='min', verbose=1, patience=3)\n", (9049, 9104), False, 'from keras.callbacks import EarlyStopping\n'), ((9948, 9967), 'numpy.stack', 'np.stack', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (9956, 9967), True, 'import numpy as np\n'), ((9990, 10016), 'numpy.sum', 'np.sum', (['twodmatrix'], {'axis': '(0)'}), '(twodmatrix, axis=0)\n', (9996, 10016), True, 'import numpy as np\n'), ((10068, 10107), 'matplotlib.pyplot.title', 'plt.title', (['"""NN PREDICTION DISTRIBUTION"""'], {}), "('NN PREDICTION DISTRIBUTION')\n", (10077, 10107), True, 'import matplotlib.pyplot as plt\n'), ((10116, 10126), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10124, 10126), True, 'import matplotlib.pyplot as plt\n'), ((10500, 10520), 'numpy.array', 'np.array', (['y_true_all'], {}), '(y_true_all)\n', (10508, 10520), True, 'import numpy as np\n'), ((11204, 11280), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(1)', 'name': '"""top_k1_categorical_accuracy"""', 'dtype': 'None'}), "(k=1, name='top_k1_categorical_accuracy', dtype=None)\n", (11227, 11280), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((11312, 11388), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(2)', 'name': '"""top_k2_categorical_accuracy"""', 'dtype': 'None'}), "(k=2, name='top_k2_categorical_accuracy', dtype=None)\n", (11335, 11388), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((11420, 11496), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(3)', 'name': '"""top_k3_categorical_accuracy"""', 'dtype': 'None'}), "(k=3, name='top_k3_categorical_accuracy', dtype=None)\n", (11443, 11496), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((12019, 12095), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(1)', 'name': '"""top_k1_categorical_accuracy"""', 'dtype': 'None'}), "(k=1, name='top_k1_categorical_accuracy', dtype=None)\n", (12042, 12095), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((12121, 12197), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(2)', 'name': '"""top_k2_categorical_accuracy"""', 'dtype': 'None'}), "(k=2, name='top_k2_categorical_accuracy', dtype=None)\n", (12144, 12197), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((12223, 12299), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(3)', 'name': '"""top_k3_categorical_accuracy"""', 'dtype': 'None'}), "(k=3, name='top_k3_categorical_accuracy', dtype=None)\n", (12246, 12299), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((19344, 19357), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (19353, 19357), False, 'import json\n'), ((20551, 20573), 'json.dumps', 'json.dumps', (['dicttojson'], {}), '(dicttojson)\n', (20561, 20573), False, 'import json\n'), ((20588, 20610), 'json.dumps', 'json.dumps', (['dicttojson'], {}), '(dicttojson)\n', (20598, 20610), False, 'import json\n'), ((1376, 1519), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['sentences'], {'workers': 'num_workers', 'sg': 'sg', 'size': 'size_features', 'min_count': 'min_word_count', 'window': 'context', 'sample': 'downsampling'}), '(sentences, workers=num_workers, sg=sg, size=size_features,\n min_count=min_word_count, window=context, sample=downsampling)\n', (1393, 1519), False, 'from gensim.models import word2vec\n'), ((2916, 2942), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2931, 2942), False, 'from nltk.corpus import stopwords\n'), ((13422, 13457), 'numpy.zeros', 'np.zeros', (['(label_count, term_count)'], {}), '((label_count, term_count))\n', (13430, 13457), True, 'import numpy as np\n'), ((17891, 17907), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (17902, 17907), False, 'from collections import defaultdict\n'), ((4971, 4986), 'numpy.sum', 'np.sum', (['current'], {}), '(current)\n', (4977, 4986), True, 'import numpy as np\n'), ((9202, 9230), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(3)'}), '(k=3)\n', (9225, 9230), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((10612, 10623), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (10620, 10623), True, 'import numpy as np\n'), ((2153, 2212), 'numpy.random.uniform', 'np.random.uniform', (['(-0.25)', '(0.25)', 'embedding_model.vector_size'], {}), '(-0.25, 0.25, embedding_model.vector_size)\n', (2170, 2212), True, 'import numpy as np\n'), ((6789, 6800), 'numpy.sum', 'np.sum', (['lbl'], {}), '(lbl)\n', (6795, 6800), True, 'import numpy as np\n'), ((8926, 8954), 'keras.metrics.TopKCategoricalAccuracy', 'TopKCategoricalAccuracy', ([], {'k': '(3)'}), '(k=3)\n', (8949, 8954), False, 'from keras.metrics import TopKCategoricalAccuracy\n'), ((10966, 11017), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true_allnp.T[i]', 'lsprecrec.T[i]'], {}), '(y_true_allnp.T[i], lsprecrec.T[i])\n', (10982, 11017), False, 'from sklearn.metrics import confusion_matrix\n'), ((16831, 16843), 'numpy.any', 'np.any', (['E_LT'], {}), '(E_LT)\n', (16837, 16843), True, 'import numpy as np\n'), ((13892, 13912), 'numpy.asarray', 'np.asarray', (['rel_freq'], {}), '(rel_freq)\n', (13902, 13912), True, 'import numpy as np\n'), ((14735, 14755), 'numpy.tanh', 'np.tanh', (['rel_freq[i]'], {}), '(rel_freq[i])\n', (14742, 14755), True, 'import numpy as np\n'), ((14974, 14994), 'numpy.tanh', 'np.tanh', (['rel_freq[i]'], {}), '(rel_freq[i])\n', (14981, 14994), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dataclasses import dataclass, field
import numpy as np
from shared.BASparseMat import BASparseMat
from shared.output_utils import save_errors_to_file, objective_file_name,\
save_sparse_j_to_file, jacobian_file_name
@dataclass
class BAInput:
cams: np.ndarray = field(default = np.empty(0, dtype = np.float64))
x: np.ndarray = field(default = np.empty(0, dtype = np.float64))
w: np.ndarray = field(default = np.empty(0, dtype = np.float64))
obs: np.ndarray = field(default = np.empty(0, dtype = np.int32))
feats: np.ndarray = field(default = np.empty(0, dtype = np.float64))
@dataclass
class BAOutput:
reproj_err: np.ndarray = field(default = np.empty(0, dtype = np.float64))
w_err: np.ndarray = field(default = np.empty(0, dtype = np.float64))
J: BASparseMat = field(default = BASparseMat())
def save_output_to_file(
self,
output_prefix,
input_basename,
module_basename
):
save_errors_to_file(
objective_file_name(output_prefix, input_basename, module_basename),
self.reproj_err,
self.w_err
)
save_sparse_j_to_file(
jacobian_file_name(output_prefix, input_basename, module_basename),
self.J
) | [
"shared.BASparseMat.BASparseMat",
"numpy.empty",
"shared.output_utils.jacobian_file_name",
"shared.output_utils.objective_file_name"
] | [((397, 426), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (405, 426), True, 'import numpy as np\n'), ((475, 504), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (483, 504), True, 'import numpy as np\n'), ((553, 582), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (561, 582), True, 'import numpy as np\n'), ((631, 658), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.int32'}), '(0, dtype=np.int32)\n', (639, 658), True, 'import numpy as np\n'), ((707, 736), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (715, 736), True, 'import numpy as np\n'), ((813, 842), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (821, 842), True, 'import numpy as np\n'), ((891, 920), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.float64'}), '(0, dtype=np.float64)\n', (899, 920), True, 'import numpy as np\n'), ((970, 983), 'shared.BASparseMat.BASparseMat', 'BASparseMat', ([], {}), '()\n', (981, 983), False, 'from shared.BASparseMat import BASparseMat\n'), ((1148, 1215), 'shared.output_utils.objective_file_name', 'objective_file_name', (['output_prefix', 'input_basename', 'module_basename'], {}), '(output_prefix, input_basename, module_basename)\n', (1167, 1215), False, 'from shared.output_utils import save_errors_to_file, objective_file_name, save_sparse_j_to_file, jacobian_file_name\n'), ((1323, 1389), 'shared.output_utils.jacobian_file_name', 'jacobian_file_name', (['output_prefix', 'input_basename', 'module_basename'], {}), '(output_prefix, input_basename, module_basename)\n', (1341, 1389), False, 'from shared.output_utils import save_errors_to_file, objective_file_name, save_sparse_j_to_file, jacobian_file_name\n')] |
#!/usr/bin/env python
import mirheo as mir
import argparse
import numpy as np
ranks = (1, 1, 1)
domain = (8, 8, 8)
dt = 0.01
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
n = 20
np.random.seed(42)
positions = np.random.rand(n, 3)
velocities = np.random.rand(n, 3) - 0.5
for i in range(3):
positions[:,i] *= domain[i]
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.FromArray(positions.tolist(), velocities.tolist())
u.registerParticleVector(pv=pv, ic=ic)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
dump_every = 20
update_every = dump_every
u.registerPlugins(mir.Plugins.createParticleDisplacement('disp', pv, update_every))
u.registerPlugins(mir.Plugins.createDumpParticles('partDump', pv, dump_every, ["displacements"], 'h5/solvent_particles-'))
u.run(100)
# TEST: plugins.displacements
# cd plugins
# rm -rf h5 displacements.out.txt
# mir.run --runargs "-n 2" ./displacements.py
# mir.post h5dump -d displacements h5/solvent_particles-00004.h5 | awk '{print $2, $3, $4}' | LC_ALL=en_US.utf8 sort > displacements.out.txt
| [
"numpy.random.rand",
"mirheo.Plugins.createParticleDisplacement",
"mirheo.Mirheo",
"mirheo.Plugins.createDumpParticles",
"mirheo.ParticleVectors.ParticleVector",
"mirheo.Integrators.VelocityVerlet",
"numpy.random.seed"
] | [((134, 219), 'mirheo.Mirheo', 'mir.Mirheo', (['ranks', 'domain', 'dt'], {'debug_level': '(3)', 'log_filename': '"""log"""', 'no_splash': '(True)'}), "(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True\n )\n", (144, 219), True, 'import mirheo as mir\n'), ((223, 241), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (237, 241), True, 'import numpy as np\n'), ((255, 275), 'numpy.random.rand', 'np.random.rand', (['n', '(3)'], {}), '(n, 3)\n', (269, 275), True, 'import numpy as np\n'), ((374, 422), 'mirheo.ParticleVectors.ParticleVector', 'mir.ParticleVectors.ParticleVector', (['"""pv"""'], {'mass': '(1)'}), "('pv', mass=1)\n", (408, 422), True, 'import mirheo as mir\n'), ((548, 584), 'mirheo.Integrators.VelocityVerlet', 'mir.Integrators.VelocityVerlet', (['"""vv"""'], {}), "('vv')\n", (578, 584), True, 'import mirheo as mir\n'), ((289, 309), 'numpy.random.rand', 'np.random.rand', (['n', '(3)'], {}), '(n, 3)\n', (303, 309), True, 'import numpy as np\n'), ((696, 760), 'mirheo.Plugins.createParticleDisplacement', 'mir.Plugins.createParticleDisplacement', (['"""disp"""', 'pv', 'update_every'], {}), "('disp', pv, update_every)\n", (734, 760), True, 'import mirheo as mir\n'), ((780, 888), 'mirheo.Plugins.createDumpParticles', 'mir.Plugins.createDumpParticles', (['"""partDump"""', 'pv', 'dump_every', "['displacements']", '"""h5/solvent_particles-"""'], {}), "('partDump', pv, dump_every, [\n 'displacements'], 'h5/solvent_particles-')\n", (811, 888), True, 'import mirheo as mir\n')] |
import cv2
import numpy as np
import random
import os
from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light
class backgroundOverlayer(object):
"""
Overlay's april tag on the background image
"""
def __init__(self, apriltag_generator , mx_tags):
self.generator = apriltag_generator
self.mx_tags = mx_tags
def __call__(self, background_img):
corners_collection = []
tags_to_overlay = 50
out_response = np.zeros(background_img.shape[:2], dtype = np.uint8)
real_out_response = np.full((background_img.shape[0],background_img.shape[1], 5),0, dtype = np.uint8)
real_out_response[:,:,-1] = 255
really_real_out_response = np.full((background_img.shape[0],background_img.shape[1], 5),0, dtype = np.uint8)
really_real_out_response[:,:,-1] = 255
id_real_out_response = np.full((background_img.shape[0],background_img.shape[1], 2),0, dtype = np.uint8)
#It attemps to generate as many tags as possible till the upper_limit tags_to_overlay, but sometimes two might overlap it will just remove the later one
for tag in range(tags_to_overlay):
index = random.randrange(len(self.generator))
# index= random.choice([27,28, 29,30,31,32, 33, 34, 35,36, 37, 38, 38, 39, 40,41, 42, 43, 44])
# index = random.randrange(500)
# index = 27
result = self.generator[index]
response = result["response"]
response_in_use = result["response_in_use"]
mask = result["mask"]
tag_img = result["image"]
corners_coords = result["corners_uv"]
# mask = np.maximum(mask, tag_img)
_, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
width = tag_img.shape[1]
height = tag_img.shape[0]
if background_img.shape[1] < width or background_img.shape[0] < height:
continue
x_offset = random.randrange(background_img.shape[1] - width + 1)
y_offset = random.randrange(background_img.shape[0] - height + 1)
out_response_view = out_response[y_offset:y_offset + height, x_offset:x_offset + width]
real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width]
really_real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width]
if cv2.bitwise_and(out_response_view, mask).any():
continue
#Merge with the image
background_img_view = background_img[y_offset:y_offset + height , x_offset:x_offset + width]
img_masked = cv2.bitwise_and(background_img_view, background_img_view, mask=mask_inv)
tag_img = cv2.cvtColor(tag_img, cv2.COLOR_GRAY2BGR)
tag_img = np.clip(tag_img, random.randint(0,10)*10, 255)
tag_img_masked = cv2.bitwise_and(tag_img, tag_img, mask = mask)
#Find light
if np.random.uniform(0, 1, 1)[0] > 0.1:
background_img_view_lab = cv2.cvtColor(background_img_view, cv2.COLOR_BGR2LAB)
tag_img_view_lab = cv2.cvtColor(tag_img_masked, cv2.COLOR_BGR2LAB)
light_background = background_img_view_lab[:, :,0].mean()
light_tag = tag_img_view_lab[:,:,0].sum()/ np.count_nonzero(mask)
# kernel = np.ones((5,5),np.float32)/25
# light_tag = cv2.filter2D(light_tag,-1,kernel)
w_light = (( light_background/(light_tag + 0.0001)))
# w_light = np.ones((height, width), dtype = np.float32)*w_light
# w_light = (w_light +np.random.normal(0, 0.1, w_light.shape))
tag_img_view_lab[:, :, 0] = np.clip(np.multiply(tag_img_view_lab[:,:,0] ,w_light), 0, 255);
if np.random.uniform(0, 1, 1)[0] > 1.7:
tag_img_view_lab[:, :,0] = add_spot_light(tag_img_view_lab[:,:,0][..., np.newaxis])
tag_img_view_lab[:, :,0] = add_parallel_light(tag_img_view_lab[:,:,0][..., np.newaxis])
tag_img_masked= cv2.cvtColor(tag_img_view_lab, cv2.COLOR_LAB2BGR)
tag_img_masked = cv2.bitwise_and(tag_img_masked, tag_img, mask = mask)
background_img_view = cv2.add(img_masked, tag_img_masked)
#make sure no overlaps
out_response_view = out_response[y_offset:y_offset + height, x_offset:x_offset + width]
real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width]
id_real_out_response_view = id_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, 0]
really_real_out_response_view = really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width]
if not cv2.bitwise_and(out_response_view, mask).any():
if np.random.uniform(0, 1, 1)[0] > 0.8:
blurred_background_img_view = cv2.GaussianBlur(background_img_view, (5, 5), 0)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
tmp_mask = np.zeros(background_img_view.shape, dtype = np.uint8)
cv2.drawContours(tmp_mask, contours, -1, (255,255, 255),5)
background_img_view = np.where(tmp_mask==np.array([255, 255, 255]), blurred_background_img_view, background_img_view)
background_img[y_offset:y_offset + height , x_offset:x_offset + width] = background_img_view
out_response[y_offset:y_offset + height , x_offset:x_offset + width] = cv2.bitwise_or(out_response_view, mask)
real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, :-1] = np.maximum(response[:,:,:-1], real_out_response_view[:,:,:-1])
real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, -1] = np.minimum(response[:,:,-1], real_out_response_view[:,:,-1])
id_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, 0] = np.maximum(id_real_out_response_view, mask/255*index)
really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, :-1] = np.maximum(response_in_use[:,:,:-1], really_real_out_response_view[:,:,:-1])
really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, -1] = np.minimum(response_in_use[:,:,-1], really_real_out_response_view[:,:,-1])
corners_collection.append([np.array([x_offset, y_offset])+corners_coords ])
if np.random.uniform(0, 1, 1)[0] > 1.8:
background_img[:,:,0] = cv2.equalizeHist(background_img[:,:,0]);
background_img[:,:,1] = cv2.equalizeHist(background_img[:,:,1]);
background_img[:,:,2] = cv2.equalizeHist(background_img[:,:,2]);
if np.random.uniform(0, 1, 1)[0] > 1.5:
background_img = add_shadow(background_img, random.randrange(2))
if np.random.uniform(0, 1, 1)[0] > 1.5:
background_img = add_spot_light(background_img)
if np.random.uniform(0, 1, 1)[0] > 1.5:
background_img = add_parallel_light(background_img)
if np.random.uniform(0, 1, 1)[0] > 1.5:
background_img = add_noise(background_img, "gauss")
if np.random.uniform(0, 1, 1)[0] > 1.8:
background_img = add_noise(background_img, "s&p")
if np.random.uniform(0, 1, 1)[0] > 1.8:
background_img = add_noise(background_img, "speckle")
# Motion blur
if np.random.uniform(0, 1, 1)[0] > 1.8 :
size = np.random.randint(3, 7)
deg = np.random.randint(-180, 180)
background_img = apply_motion_blur(background_img, size, deg)
return background_img, out_response, np.clip(real_out_response,0,255),np.clip(really_real_out_response,0,255), id_real_out_response, corners_collection
| [
"numpy.clip",
"numpy.count_nonzero",
"numpy.array",
"cv2.bitwise_or",
"numpy.multiply",
"cv2.threshold",
"numpy.maximum",
"random.randint",
"cv2.add",
"helper.apply_motion_blur",
"cv2.drawContours",
"numpy.full",
"random.randrange",
"helper.add_spot_light",
"cv2.equalizeHist",
"cv2.cvt... | [((508, 558), 'numpy.zeros', 'np.zeros', (['background_img.shape[:2]'], {'dtype': 'np.uint8'}), '(background_img.shape[:2], dtype=np.uint8)\n', (516, 558), True, 'import numpy as np\n'), ((589, 675), 'numpy.full', 'np.full', (['(background_img.shape[0], background_img.shape[1], 5)', '(0)'], {'dtype': 'np.uint8'}), '((background_img.shape[0], background_img.shape[1], 5), 0, dtype=np.\n uint8)\n', (596, 675), True, 'import numpy as np\n'), ((748, 834), 'numpy.full', 'np.full', (['(background_img.shape[0], background_img.shape[1], 5)', '(0)'], {'dtype': 'np.uint8'}), '((background_img.shape[0], background_img.shape[1], 5), 0, dtype=np.\n uint8)\n', (755, 834), True, 'import numpy as np\n'), ((909, 995), 'numpy.full', 'np.full', (['(background_img.shape[0], background_img.shape[1], 2)', '(0)'], {'dtype': 'np.uint8'}), '((background_img.shape[0], background_img.shape[1], 2), 0, dtype=np.\n uint8)\n', (916, 995), True, 'import numpy as np\n'), ((1774, 1822), 'cv2.threshold', 'cv2.threshold', (['mask', '(254)', '(255)', 'cv2.THRESH_BINARY'], {}), '(mask, 254, 255, cv2.THRESH_BINARY)\n', (1787, 1822), False, 'import cv2\n'), ((1846, 1867), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (1861, 1867), False, 'import cv2\n'), ((2078, 2131), 'random.randrange', 'random.randrange', (['(background_img.shape[1] - width + 1)'], {}), '(background_img.shape[1] - width + 1)\n', (2094, 2131), False, 'import random\n'), ((2155, 2209), 'random.randrange', 'random.randrange', (['(background_img.shape[0] - height + 1)'], {}), '(background_img.shape[0] - height + 1)\n', (2171, 2209), False, 'import random\n'), ((2810, 2882), 'cv2.bitwise_and', 'cv2.bitwise_and', (['background_img_view', 'background_img_view'], {'mask': 'mask_inv'}), '(background_img_view, background_img_view, mask=mask_inv)\n', (2825, 2882), False, 'import cv2\n'), ((2918, 2959), 'cv2.cvtColor', 'cv2.cvtColor', (['tag_img', 'cv2.COLOR_GRAY2BGR'], {}), '(tag_img, cv2.COLOR_GRAY2BGR)\n', (2930, 2959), False, 'import cv2\n'), ((3063, 3107), 'cv2.bitwise_and', 'cv2.bitwise_and', (['tag_img', 'tag_img'], {'mask': 'mask'}), '(tag_img, tag_img, mask=mask)\n', (3078, 3107), False, 'import cv2\n'), ((4374, 4425), 'cv2.bitwise_and', 'cv2.bitwise_and', (['tag_img_masked', 'tag_img'], {'mask': 'mask'}), '(tag_img_masked, tag_img, mask=mask)\n', (4389, 4425), False, 'import cv2\n'), ((4463, 4498), 'cv2.add', 'cv2.add', (['img_masked', 'tag_img_masked'], {}), '(img_masked, tag_img_masked)\n', (4470, 4498), False, 'import cv2\n'), ((6894, 6935), 'cv2.equalizeHist', 'cv2.equalizeHist', (['background_img[:, :, 0]'], {}), '(background_img[:, :, 0])\n', (6910, 6935), False, 'import cv2\n'), ((6971, 7012), 'cv2.equalizeHist', 'cv2.equalizeHist', (['background_img[:, :, 1]'], {}), '(background_img[:, :, 1])\n', (6987, 7012), False, 'import cv2\n'), ((7048, 7089), 'cv2.equalizeHist', 'cv2.equalizeHist', (['background_img[:, :, 2]'], {}), '(background_img[:, :, 2])\n', (7064, 7089), False, 'import cv2\n'), ((7293, 7323), 'helper.add_spot_light', 'add_spot_light', (['background_img'], {}), '(background_img)\n', (7307, 7323), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((7403, 7437), 'helper.add_parallel_light', 'add_parallel_light', (['background_img'], {}), '(background_img)\n', (7421, 7437), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((7517, 7551), 'helper.add_noise', 'add_noise', (['background_img', '"""gauss"""'], {}), "(background_img, 'gauss')\n", (7526, 7551), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((7630, 7662), 'helper.add_noise', 'add_noise', (['background_img', '"""s&p"""'], {}), "(background_img, 's&p')\n", (7639, 7662), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((7741, 7777), 'helper.add_noise', 'add_noise', (['background_img', '"""speckle"""'], {}), "(background_img, 'speckle')\n", (7750, 7777), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((7869, 7892), 'numpy.random.randint', 'np.random.randint', (['(3)', '(7)'], {}), '(3, 7)\n', (7886, 7892), True, 'import numpy as np\n'), ((7911, 7939), 'numpy.random.randint', 'np.random.randint', (['(-180)', '(180)'], {}), '(-180, 180)\n', (7928, 7939), True, 'import numpy as np\n'), ((7969, 8013), 'helper.apply_motion_blur', 'apply_motion_blur', (['background_img', 'size', 'deg'], {}), '(background_img, size, deg)\n', (7986, 8013), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((8063, 8097), 'numpy.clip', 'np.clip', (['real_out_response', '(0)', '(255)'], {}), '(real_out_response, 0, 255)\n', (8070, 8097), True, 'import numpy as np\n'), ((8096, 8137), 'numpy.clip', 'np.clip', (['really_real_out_response', '(0)', '(255)'], {}), '(really_real_out_response, 0, 255)\n', (8103, 8137), True, 'import numpy as np\n'), ((3231, 3283), 'cv2.cvtColor', 'cv2.cvtColor', (['background_img_view', 'cv2.COLOR_BGR2LAB'], {}), '(background_img_view, cv2.COLOR_BGR2LAB)\n', (3243, 3283), False, 'import cv2\n'), ((3319, 3366), 'cv2.cvtColor', 'cv2.cvtColor', (['tag_img_masked', 'cv2.COLOR_BGR2LAB'], {}), '(tag_img_masked, cv2.COLOR_BGR2LAB)\n', (3331, 3366), False, 'import cv2\n'), ((4288, 4337), 'cv2.cvtColor', 'cv2.cvtColor', (['tag_img_view_lab', 'cv2.COLOR_LAB2BGR'], {}), '(tag_img_view_lab, cv2.COLOR_LAB2BGR)\n', (4300, 4337), False, 'import cv2\n'), ((5840, 5879), 'cv2.bitwise_or', 'cv2.bitwise_or', (['out_response_view', 'mask'], {}), '(out_response_view, mask)\n', (5854, 5879), False, 'import cv2\n'), ((5978, 6044), 'numpy.maximum', 'np.maximum', (['response[:, :, :-1]', 'real_out_response_view[:, :, :-1]'], {}), '(response[:, :, :-1], real_out_response_view[:, :, :-1])\n', (5988, 6044), True, 'import numpy as np\n'), ((6138, 6202), 'numpy.minimum', 'np.minimum', (['response[:, :, -1]', 'real_out_response_view[:, :, -1]'], {}), '(response[:, :, -1], real_out_response_view[:, :, -1])\n', (6148, 6202), True, 'import numpy as np\n'), ((6299, 6356), 'numpy.maximum', 'np.maximum', (['id_real_out_response_view', '(mask / 255 * index)'], {}), '(id_real_out_response_view, mask / 255 * index)\n', (6309, 6356), True, 'import numpy as np\n'), ((6459, 6544), 'numpy.maximum', 'np.maximum', (['response_in_use[:, :, :-1]', 'really_real_out_response_view[:, :, :-1]'], {}), '(response_in_use[:, :, :-1], really_real_out_response_view[:, :, :-1]\n )\n', (6469, 6544), True, 'import numpy as np\n'), ((6640, 6718), 'numpy.minimum', 'np.minimum', (['response_in_use[:, :, -1]', 'really_real_out_response_view[:, :, -1]'], {}), '(response_in_use[:, :, -1], really_real_out_response_view[:, :, -1])\n', (6650, 6718), True, 'import numpy as np\n'), ((6821, 6847), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (6838, 6847), True, 'import numpy as np\n'), ((7101, 7127), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7118, 7127), True, 'import numpy as np\n'), ((7194, 7213), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (7210, 7213), False, 'import random\n'), ((7227, 7253), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7244, 7253), True, 'import numpy as np\n'), ((7337, 7363), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7354, 7363), True, 'import numpy as np\n'), ((7451, 7477), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7468, 7477), True, 'import numpy as np\n'), ((7564, 7590), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7581, 7590), True, 'import numpy as np\n'), ((7675, 7701), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7692, 7701), True, 'import numpy as np\n'), ((7812, 7838), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (7829, 7838), True, 'import numpy as np\n'), ((2562, 2602), 'cv2.bitwise_and', 'cv2.bitwise_and', (['out_response_view', 'mask'], {}), '(out_response_view, mask)\n', (2577, 2602), False, 'import cv2\n'), ((2999, 3020), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3013, 3020), False, 'import random\n'), ((3151, 3177), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3168, 3177), True, 'import numpy as np\n'), ((3501, 3523), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (3517, 3523), True, 'import numpy as np\n'), ((3929, 3976), 'numpy.multiply', 'np.multiply', (['tag_img_view_lab[:, :, 0]', 'w_light'], {}), '(tag_img_view_lab[:, :, 0], w_light)\n', (3940, 3976), True, 'import numpy as np\n'), ((4090, 4148), 'helper.add_spot_light', 'add_spot_light', (['tag_img_view_lab[:, :, 0][..., np.newaxis]'], {}), '(tag_img_view_lab[:, :, 0][..., np.newaxis])\n', (4104, 4148), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((4194, 4256), 'helper.add_parallel_light', 'add_parallel_light', (['tag_img_view_lab[:, :, 0][..., np.newaxis]'], {}), '(tag_img_view_lab[:, :, 0][..., np.newaxis])\n', (4212, 4256), False, 'from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light\n'), ((5173, 5221), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['background_img_view', '(5, 5)', '(0)'], {}), '(background_img_view, (5, 5), 0)\n', (5189, 5221), False, 'import cv2\n'), ((5264, 5330), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (5280, 5330), False, 'import cv2\n'), ((5362, 5413), 'numpy.zeros', 'np.zeros', (['background_img_view.shape'], {'dtype': 'np.uint8'}), '(background_img_view.shape, dtype=np.uint8)\n', (5370, 5413), True, 'import numpy as np\n'), ((5436, 5496), 'cv2.drawContours', 'cv2.drawContours', (['tmp_mask', 'contours', '(-1)', '(255, 255, 255)', '(5)'], {}), '(tmp_mask, contours, -1, (255, 255, 255), 5)\n', (5452, 5496), False, 'import cv2\n'), ((4006, 4032), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (4023, 4032), True, 'import numpy as np\n'), ((5017, 5057), 'cv2.bitwise_and', 'cv2.bitwise_and', (['out_response_view', 'mask'], {}), '(out_response_view, mask)\n', (5032, 5057), False, 'import cv2\n'), ((5086, 5112), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (5103, 5112), True, 'import numpy as np\n'), ((5556, 5581), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (5564, 5581), True, 'import numpy as np\n'), ((6759, 6789), 'numpy.array', 'np.array', (['[x_offset, y_offset]'], {}), '([x_offset, y_offset])\n', (6767, 6789), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, torch
from tqdm import tqdm
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import tensorflow.compat.v1 as tf
from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd
import numpy as np
def compute_fvd(real_videos, fake_videos, device=0):
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_real = tf.placeholder(dtype=tf.float32, shape=(*real_videos[0].shape[:2], real_videos[0].shape[3],
real_videos[0].shape[4], real_videos[0].shape[2]))
input_fake = tf.placeholder(dtype=tf.float32, shape=(*real_videos[0].shape[:2], real_videos[0].shape[3],
real_videos[0].shape[4], real_videos[0].shape[2]))
real_pre = preprocess(input_real, (224, 224))
emb_real = Embedder(real_pre)
embed_real = emb_real.create_id3_embedding(real_pre)
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for rv, fv in tqdm(zip(real_videos, fake_videos)):
real_batch = ((rv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
feed_dict = {input_real: real_batch, input_fake: fake_batch}
r, f = sess.run([embed_fake, embed_real], feed_dict)
real.append(r); fake.append(f)
print('Compute FVD score')
real = np.concatenate(real, axis=0)
fake = np.concatenate(fake, axis=0)
embed_real = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
embed_fake = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
result = calculate_fvd(embed_real, embed_fake)
feed_dict = {embed_real: real, embed_fake: fake}
result = sess.run(result, feed_dict)
sess.close()
tf.reset_default_graph()
return result
def get_embeddings(fake_videos, device=0):
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_fake = tf.placeholder(dtype=tf.float32, shape=(*fake_videos[0].shape[:2], fake_videos[0].shape[3],
fake_videos[0].shape[4], fake_videos[0].shape[2]))
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for fv in tqdm(fake_videos):
fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
feed_dict = {input_fake: fake_batch}
f = sess.run([embed_fake], feed_dict)
fake.append(f)
fake = np.concatenate(fake, axis=0)
sess.close()
tf.reset_default_graph()
return fake | [
"tensorflow.compat.v1.placeholder",
"silence_tensorflow.silence_tensorflow",
"tensorflow.compat.v1.Graph",
"tqdm.tqdm",
"tensorflow.compat.v1.config.experimental.set_visible_devices",
"metrics.FVD.FVD.preprocess",
"tensorflow.compat.v1.device",
"numpy.concatenate",
"tensorflow.compat.v1.config.exper... | [((198, 218), 'silence_tensorflow.silence_tensorflow', 'silence_tensorflow', ([], {}), '()\n', (216, 218), False, 'from silence_tensorflow import silence_tensorflow\n'), ((402, 451), 'tensorflow.compat.v1.config.experimental.get_visible_devices', 'tf.config.experimental.get_visible_devices', (['"""GPU"""'], {}), "('GPU')\n", (444, 451), True, 'import tensorflow.compat.v1 as tf\n'), ((526, 587), 'tensorflow.compat.v1.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['target_dev', '"""GPU"""'], {}), "(target_dev, 'GPU')\n", (568, 587), True, 'import tensorflow.compat.v1 as tf\n'), ((2586, 2610), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2608, 2610), True, 'import tensorflow.compat.v1 as tf\n'), ((2685, 2734), 'tensorflow.compat.v1.config.experimental.get_visible_devices', 'tf.config.experimental.get_visible_devices', (['"""GPU"""'], {}), "('GPU')\n", (2727, 2734), True, 'import tensorflow.compat.v1 as tf\n'), ((2809, 2870), 'tensorflow.compat.v1.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['target_dev', '"""GPU"""'], {}), "(target_dev, 'GPU')\n", (2851, 2870), True, 'import tensorflow.compat.v1 as tf\n'), ((3880, 3904), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3902, 3904), True, 'import tensorflow.compat.v1 as tf\n'), ((598, 617), 'tensorflow.compat.v1.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (607, 617), True, 'import tensorflow.compat.v1 as tf\n'), ((2881, 2900), 'tensorflow.compat.v1.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (2890, 2900), True, 'import tensorflow.compat.v1 as tf\n'), ((706, 718), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (716, 718), True, 'import tensorflow.compat.v1 as tf\n'), ((744, 890), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(*real_videos[0].shape[:2], real_videos[0].shape[3], real_videos[0].shape[4\n ], real_videos[0].shape[2])'}), '(dtype=tf.float32, shape=(*real_videos[0].shape[:2],\n real_videos[0].shape[3], real_videos[0].shape[4], real_videos[0].shape[2]))\n', (758, 890), True, 'import tensorflow.compat.v1 as tf\n'), ((977, 1123), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(*real_videos[0].shape[:2], real_videos[0].shape[3], real_videos[0].shape[4\n ], real_videos[0].shape[2])'}), '(dtype=tf.float32, shape=(*real_videos[0].shape[:2],\n real_videos[0].shape[3], real_videos[0].shape[4], real_videos[0].shape[2]))\n', (991, 1123), True, 'import tensorflow.compat.v1 as tf\n'), ((1209, 1243), 'metrics.FVD.FVD.preprocess', 'preprocess', (['input_real', '(224, 224)'], {}), '(input_real, (224, 224))\n', (1219, 1243), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((1267, 1285), 'metrics.FVD.FVD.Embedder', 'Embedder', (['real_pre'], {}), '(real_pre)\n', (1275, 1285), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((1374, 1408), 'metrics.FVD.FVD.preprocess', 'preprocess', (['input_fake', '(224, 224)'], {}), '(input_fake, (224, 224))\n', (1384, 1408), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((1432, 1450), 'metrics.FVD.FVD.Embedder', 'Embedder', (['fake_pre'], {}), '(fake_pre)\n', (1440, 1450), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((2139, 2167), 'numpy.concatenate', 'np.concatenate', (['real'], {'axis': '(0)'}), '(real, axis=0)\n', (2153, 2167), True, 'import numpy as np\n'), ((2187, 2215), 'numpy.concatenate', 'np.concatenate', (['fake'], {'axis': '(0)'}), '(fake, axis=0)\n', (2201, 2215), True, 'import numpy as np\n'), ((2241, 2301), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(real.shape[0], 400)'}), '(dtype=tf.float32, shape=(real.shape[0], 400))\n', (2255, 2301), True, 'import tensorflow.compat.v1 as tf\n'), ((2327, 2387), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(real.shape[0], 400)'}), '(dtype=tf.float32, shape=(real.shape[0], 400))\n', (2341, 2387), True, 'import tensorflow.compat.v1 as tf\n'), ((2409, 2446), 'metrics.FVD.FVD.calculate_fvd', 'calculate_fvd', (['embed_real', 'embed_fake'], {}), '(embed_real, embed_fake)\n', (2422, 2446), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((2989, 3001), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2999, 3001), True, 'import tensorflow.compat.v1 as tf\n'), ((3027, 3173), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(*fake_videos[0].shape[:2], fake_videos[0].shape[3], fake_videos[0].shape[4\n ], fake_videos[0].shape[2])'}), '(dtype=tf.float32, shape=(*fake_videos[0].shape[:2],\n fake_videos[0].shape[3], fake_videos[0].shape[4], fake_videos[0].shape[2]))\n', (3041, 3173), True, 'import tensorflow.compat.v1 as tf\n'), ((3259, 3293), 'metrics.FVD.FVD.preprocess', 'preprocess', (['input_fake', '(224, 224)'], {}), '(input_fake, (224, 224))\n', (3269, 3293), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((3317, 3335), 'metrics.FVD.FVD.Embedder', 'Embedder', (['fake_pre'], {}), '(fake_pre)\n', (3325, 3335), False, 'from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd\n'), ((3559, 3576), 'tqdm.tqdm', 'tqdm', (['fake_videos'], {}), '(fake_videos)\n', (3563, 3576), False, 'from tqdm import tqdm\n'), ((3822, 3850), 'numpy.concatenate', 'np.concatenate', (['fake'], {'axis': '(0)'}), '(fake, axis=0)\n', (3836, 3850), True, 'import numpy as np\n'), ((1538, 1571), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1569, 1571), True, 'import tensorflow.compat.v1 as tf\n'), ((1594, 1617), 'tensorflow.compat.v1.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (1615, 1617), True, 'import tensorflow.compat.v1 as tf\n'), ((3423, 3456), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3454, 3456), True, 'import tensorflow.compat.v1 as tf\n'), ((3479, 3502), 'tensorflow.compat.v1.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (3500, 3502), True, 'import tensorflow.compat.v1 as tf\n'), ((632, 642), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (640, 642), True, 'import tensorflow.compat.v1 as tf\n'), ((2915, 2925), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2923, 2925), True, 'import tensorflow.compat.v1 as tf\n')] |
from qfengine.risk.risk_model import RiskModel
from abc import ABCMeta
import numpy as np
import pandas as pd
class CovarianceMatrixRiskModel(RiskModel):
__metaclass__ = ABCMeta
def __init__(self,
universe,
data_handler,
logarithmic_returns:bool = True,
ret_filter_op = None,
ret_std_op = None,
ret_corr_op = None,
**kwargs
):
self.universe = universe
self.data_handler = data_handler
self.logarithmic_returns = logarithmic_returns
self.ret_filter_op = ret_filter_op
self.ret_std_op = ret_std_op
self.ret_corr_op = ret_corr_op
#---| Computing Returns TimeSeries Data
def _closes_to_returns_df(self, closes_df:pd.DataFrame, **kwargs)->pd.DataFrame:
return (
np.log(closes_df/closes_df.shift(1)).dropna()
if self.logarithmic_returns else
closes_df.pct_change().dropna()
)
def _get_universe_historical_daily_close_df(self, dt, **kwargs)->pd.DataFrame:
return self.data_handler.get_assets_historical_closes(
self.universe.get_assets(dt),
end_dt = dt)
def _filter_returns_df(self, returns_df:pd.DataFrame, **kwargs)->pd.DataFrame:
if self.ret_filter_op:
return self.ret_filter_op(returns_df)
else:
return returns_df
def get_returns_df(self, dt, **kwargs):
return self._filter_returns_df(
self._closes_to_returns_df(
closes_df = self._get_universe_historical_daily_close_df(dt, **kwargs),
**kwargs
)
)
#---| Computing Covariance Matrix
def _returns_volatility(self, ret):
if self.ret_std_op is not None:
assert callable(self.ret_std_op)
std = self.ret_std_op(ret)
assert len(std) == ret.shape[1]
assert set(std.index).issubset(set(ret.columns))
return std
else:
return ret.std()
def _returns_correlation(self, ret):
if self.ret_corr_op is not None:
assert callable(self.ret_corr_op)
corr = self.ret_corr_op(ret)
assert corr.shape[0] == corr.shape[1] == ret.shape[1]
assert set(corr.index).issubset(set(ret.columns))
assert set(corr.columns).issubset(set(ret.columns))
return corr
else:
return ret.corr()
def _is_symmetric(self, matrix:pd.DataFrame, rtol=1e-05, atol=1e-08):
return matrix.shape[0] == matrix.shape[1]
# Covariance = VOL' * CORR * VOL
def _compute_covariance_matrix(self, std:pd.Series, corr:pd.DataFrame):
assert self._is_symmetric(corr)
assert set(std.index).issubset(set(corr.index))
assert set(corr.columns).issubset(set(corr.index))
vol = std.copy().reindex(corr.columns).dropna()
assert len(vol) == len(std), str([i for i in corr.columns if i not in vol.index])
vol = np.diag(vol)
return pd.DataFrame(
data = (np.dot(vol,np.dot(corr,vol))),
index = corr.index,
columns = corr.columns
)
def calculate_returns_covariance_matrix(self, ret):
std = self._returns_volatility(ret)
corr = self._returns_correlation(ret)
return self._compute_covariance_matrix(std = std, corr = corr)
#---| __call__()
def __call__(self, dt, **kwargs):
ret_df = self.get_returns_df(dt, **kwargs)
return self.calculate_returns_covariance_matrix(ret_df)
| [
"numpy.dot",
"numpy.diag"
] | [((3346, 3358), 'numpy.diag', 'np.diag', (['vol'], {}), '(vol)\n', (3353, 3358), True, 'import numpy as np\n'), ((3431, 3448), 'numpy.dot', 'np.dot', (['corr', 'vol'], {}), '(corr, vol)\n', (3437, 3448), True, 'import numpy as np\n')] |
import logging
import os
import pprint
import sys
import tempfile as tmp
from copy import copy
from sklearn.utils import shuffle
from numpy import squeeze
if sys.platform == 'darwin':
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
os.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
from fedot.core.chains.node import PrimaryNode
from fedot.core.chains.chain import Chain
from fedot.core.composer.gp_composer.gp_composer import GPComposer, GPComposerRequirements
from fedot.core.repository.model_types_repository import ModelTypesRepository
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, RegressionMetricsEnum, \
MetricsRepository
from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements
from fedot.core.composer.optimisers.gp_optimiser import GPChainOptimiserParameters, GeneticSchemeTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.data.data import InputData
from frameworks.shared.callee import call_run, result, output_subdir, utils
import numpy as np
import datetime
log = logging.getLogger(__name__)
def run(dataset, config):
log.info("\n**** FEDOT ****\n")
is_classification = config.type == 'classification'
# Mapping of benchmark metrics to FEDOT metrics
metrics_mapping = dict(
acc='accuracy',
auc='roc_auc',
f1='f1',
logloss='neg_log_loss',
mae='neg_mean_absolute_error',
mse='neg_mean_squared_error',
msle='neg_mean_squared_log_error',
r2='r2',
rmse='neg_mean_squared_error'
)
scoring_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None
if scoring_metric is None:
raise ValueError("Performance metric {} not supported.".format(config.metric))
if is_classification:
metric = ClassificationMetricsEnum.ROCAUC
task_type = TaskTypesEnum.classification
else:
metric = RegressionMetricsEnum.RMSE
task_type = TaskTypesEnum.regression
task = Task(task_type)
x_train, y_train = shuffle(dataset.train.X_enc, dataset.train.y_enc, random_state=0)
if len(y_train.shape) > 1 and y_train.shape[1] == 1:
y_train = squeeze(y_train, axis=1)
x_test = dataset.test.X_enc
training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}
dataset_to_compose = \
InputData(idx=[_ for _ in range(len(y_train))],
features=x_train,
target=y_train,
task=task,
data_type=DataTypesEnum.table)
dataset_to_test = \
InputData(idx=[_ for _ in range(len(y_train))],
features=x_test,
target=None,
task=task,
data_type=DataTypesEnum.table)
n_jobs = config.framework_params.get('_n_jobs',
config.cores) # useful to disable multicore, regardless of the dataset config
log.info('Running FEDOT with a maximum time of %ss on %s cores, optimizing %s.',
config.max_runtime_seconds, n_jobs, scoring_metric)
runtime_min = (config.max_runtime_seconds / 60)
available_model_types, _ = ModelTypesRepository().suitable_model(task_type=task.task_type)
metric_function = MetricsRepository().metric_by_id(metric)
if True:
with utils.Timer() as training:
# the choice and initialisation of the GP search
composer_requirements = GPComposerRequirements(
primary=available_model_types,
secondary=available_model_types, max_arity=3,
max_depth=2, max_lead_time=datetime.timedelta(minutes=runtime_min * 0.8))
# GP optimiser parameters choice
scheme_type = GeneticSchemeTypesEnum.parameter_free
optimiser_parameters = GPChainOptimiserParameters(genetic_scheme_type=scheme_type)
# Create builder for composer and set composer params
builder = GPComposerBuilder(task=task).with_requirements(composer_requirements).with_metrics(
metric_function).with_optimiser_parameters(optimiser_parameters)
composer = builder.build()
# the optimal chain generation by composition - the most time-consuming task
chain_evo_composed = composer.compose_chain(data=dataset_to_compose,
is_visualise=False)
else:
with utils.Timer() as training:
if is_classification:
chain_evo_composed = Chain(PrimaryNode('logit'))
else:
chain_evo_composed = Chain(PrimaryNode('lasso'))
chain_evo_composed.fit(input_data=dataset_to_compose, verbose=False)
log.info('Predicting on the test set.')
y_test = dataset.test.y_enc
predictions = chain_evo_composed.predict(dataset_to_test, output_mode='labels').predict
if not is_classification:
probabilities = None
else:
probabilities = chain_evo_composed.predict(dataset_to_test, output_mode='full_probs').predict
save_artifacts(chain_evo_composed, config)
return result(output_file=config.output_predictions_file,
predictions=predictions,
truth=y_test,
probabilities=probabilities,
target_is_encoded=is_classification,
models_count=1,
training_duration=training.duration)
def save_artifacts(chain, config):
try:
artifacts = config.framework_params.get('_save_artifacts', False)
if 'models' in artifacts:
models_file = os.path.join(output_subdir('models', config), 'model.json')
chain.save_chain(models_file)
except Exception:
log.debug("Error when saving artifacts.", exc_info=True)
if __name__ == '__main__':
call_run(run)
| [
"logging.getLogger",
"fedot.core.composer.gp_composer.gp_composer.GPComposerBuilder",
"frameworks.shared.callee.utils.Timer",
"sklearn.utils.shuffle",
"frameworks.shared.callee.result",
"numpy.squeeze",
"fedot.core.repository.model_types_repository.ModelTypesRepository",
"fedot.core.repository.quality... | [((293, 309), 'tempfile.gettempdir', 'tmp.gettempdir', ([], {}), '()\n', (307, 309), True, 'import tempfile as tmp\n'), ((1323, 1350), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1340, 1350), False, 'import logging\n'), ((2312, 2327), 'fedot.core.repository.tasks.Task', 'Task', (['task_type'], {}), '(task_type)\n', (2316, 2327), False, 'from fedot.core.repository.tasks import Task, TaskTypesEnum\n'), ((2354, 2419), 'sklearn.utils.shuffle', 'shuffle', (['dataset.train.X_enc', 'dataset.train.y_enc'], {'random_state': '(0)'}), '(dataset.train.X_enc, dataset.train.y_enc, random_state=0)\n', (2361, 2419), False, 'from sklearn.utils import shuffle\n'), ((5552, 5769), 'frameworks.shared.callee.result', 'result', ([], {'output_file': 'config.output_predictions_file', 'predictions': 'predictions', 'truth': 'y_test', 'probabilities': 'probabilities', 'target_is_encoded': 'is_classification', 'models_count': '(1)', 'training_duration': 'training.duration'}), '(output_file=config.output_predictions_file, predictions=predictions,\n truth=y_test, probabilities=probabilities, target_is_encoded=\n is_classification, models_count=1, training_duration=training.duration)\n', (5558, 5769), False, 'from frameworks.shared.callee import call_run, result, output_subdir, utils\n'), ((6291, 6304), 'frameworks.shared.callee.call_run', 'call_run', (['run'], {}), '(run)\n', (6299, 6304), False, 'from frameworks.shared.callee import call_run, result, output_subdir, utils\n'), ((2499, 2523), 'numpy.squeeze', 'squeeze', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (2506, 2523), False, 'from numpy import squeeze\n'), ((3548, 3570), 'fedot.core.repository.model_types_repository.ModelTypesRepository', 'ModelTypesRepository', ([], {}), '()\n', (3568, 3570), False, 'from fedot.core.repository.model_types_repository import ModelTypesRepository\n'), ((3637, 3656), 'fedot.core.repository.quality_metrics_repository.MetricsRepository', 'MetricsRepository', ([], {}), '()\n', (3654, 3656), False, 'from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, RegressionMetricsEnum, MetricsRepository\n'), ((3708, 3721), 'frameworks.shared.callee.utils.Timer', 'utils.Timer', ([], {}), '()\n', (3719, 3721), False, 'from frameworks.shared.callee import call_run, result, output_subdir, utils\n'), ((4209, 4268), 'fedot.core.composer.optimisers.gp_optimiser.GPChainOptimiserParameters', 'GPChainOptimiserParameters', ([], {'genetic_scheme_type': 'scheme_type'}), '(genetic_scheme_type=scheme_type)\n', (4235, 4268), False, 'from fedot.core.composer.optimisers.gp_optimiser import GPChainOptimiserParameters, GeneticSchemeTypesEnum\n'), ((4847, 4860), 'frameworks.shared.callee.utils.Timer', 'utils.Timer', ([], {}), '()\n', (4858, 4860), False, 'from frameworks.shared.callee import call_run, result, output_subdir, utils\n'), ((6075, 6106), 'frameworks.shared.callee.output_subdir', 'output_subdir', (['"""models"""', 'config'], {}), "('models', config)\n", (6088, 6106), False, 'from frameworks.shared.callee import call_run, result, output_subdir, utils\n'), ((4013, 4058), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(runtime_min * 0.8)'}), '(minutes=runtime_min * 0.8)\n', (4031, 4058), False, 'import datetime\n'), ((4953, 4973), 'fedot.core.chains.node.PrimaryNode', 'PrimaryNode', (['"""logit"""'], {}), "('logit')\n", (4964, 4973), False, 'from fedot.core.chains.node import PrimaryNode\n'), ((5038, 5058), 'fedot.core.chains.node.PrimaryNode', 'PrimaryNode', (['"""lasso"""'], {}), "('lasso')\n", (5049, 5058), False, 'from fedot.core.chains.node import PrimaryNode\n'), ((4361, 4389), 'fedot.core.composer.gp_composer.gp_composer.GPComposerBuilder', 'GPComposerBuilder', ([], {'task': 'task'}), '(task=task)\n', (4378, 4389), False, 'from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements\n')] |
from sklearn.model_selection import StratifiedKFold, KFold
from skopt.utils import use_named_args
from .pipes_and_transformers import MidasEnsembleClassifiersWithPipeline, wrap_pipeline, get_metadata_fit, _MidasIdentity
from imblearn.pipeline import Pipeline
from sklearn.calibration import CalibratedClassifierCV
from copy import deepcopy
from .metrics import evaluate_metrics, average_metrics
from .reporting import (
write_train_report,
prop_minority_to_rest_class,
MetadataFit,
averaged_metadata_list, create_report_dfs,
)
import numpy as np
from collections import Counter
def has_resampler(pipeline):
return any(
[
hasattr(step[1], "fit_resample")
for step in pipeline.steps
]
)
def train_model_without_undersampling(model, X, y, exists_resampler):
fold_model = deepcopy(model)
fold_model.fit(X, y)
if exists_resampler: # In this case, model is a pipeline with a resampler.
metadata = get_metadata_fit(fold_model)
else:
metadata = MetadataFit(len(y), prop_minority_to_rest_class(Counter(y)))
return fold_model, metadata
def train_ensemble_model_with_undersampling(model, X, y, exists_resampler, max_k_undersampling):
# See https://github.com/w4k2/umce/blob/master/method.py
# Firstly we analyze the training set to find majority class and to
# establish the imbalance ratio
counter_classes = Counter(y)
minority_class_key = counter_classes.most_common()[-1][0]
minority_class_idxs = np.where(y == minority_class_key)[0]
rest_class_idxs = np.where(y != minority_class_key)[0]
# K is the imbalanced ratio round to int (with a minimum of 2 and a max of max_k_undersamling)
imbalance_ratio = (
len(rest_class_idxs) / len(minority_class_idxs)
)
k_majority_class = int(np.around(imbalance_ratio))
k_majority_class = k_majority_class if k_majority_class < max_k_undersampling else max_k_undersampling
k_majority_class = k_majority_class if k_majority_class > 2 else 2
fold_models = []
list_metadata = []
kf = KFold(n_splits=k_majority_class)
for _, index in kf.split(rest_class_idxs):
fold_model = deepcopy(model)
fold_idx = np.concatenate([minority_class_idxs, rest_class_idxs[index]])
X_train_f, y_train_f = X[fold_idx], y[fold_idx]
fold_model.fit(X_train_f, y_train_f)
fold_models.append(fold_model)
if exists_resampler: # In this case, model is a pipeline with a resampler.
list_metadata.append(get_metadata_fit(fold_model))
else:
list_metadata.append(
MetadataFit(len(y_train_f), prop_minority_to_rest_class(Counter(y_train_f)))
)
ensemble_model = MidasEnsembleClassifiersWithPipeline(None, fold_models)
return ensemble_model, averaged_metadata_list(list_metadata)
def ensemble_model_with_resampling(X, y, pipeline_post_process,
model, loss_metric, peeking_metrics,
k_inner_fold, skip_inner_folds, undersampling_majority_class,
max_k_undersampling, calibrated
):
pipeline_post_process = wrap_pipeline(pipeline_post_process)
complete_steps = pipeline_post_process.steps + [("model", model)]
complete_pipeline = Pipeline(complete_steps)
fold_models = []
fold_metrics = []
list_metadata = []
comments = {}
comments["option"] = "build model with resampling"
inner_cv = StratifiedKFold(n_splits=k_inner_fold)
for k, (train_index, test_index) in enumerate(inner_cv.split(X, y)):
if k not in skip_inner_folds:
X_train, y_train, X_test, y_test = (
X[train_index],
y[train_index],
X[test_index],
y[test_index],
)
if undersampling_majority_class:
(
fold_base_model,
fold_metadata,
) = train_ensemble_model_with_undersampling(
complete_pipeline, X_train, y_train, True, max_k_undersampling
)
else:
fold_base_model, fold_metadata = train_model_without_undersampling(
complete_pipeline, X_train, y_train, True
)
list_metadata.append(fold_metadata)
fold_final_model = fold_base_model
if calibrated:
fold_final_model = CalibratedClassifierCV(
fold_base_model, method="isotonic", cv="prefit"
)
fold_final_model.fit(X_test, y_test)
fold_models.append(fold_final_model)
y_proba = fold_final_model.predict_proba(X_test)[:, 1]
fold_metrics.append(
evaluate_metrics(y_test, y_proba, loss_metric, peeking_metrics)
)
averaged_metrics = average_metrics(fold_metrics)
complete_model = MidasEnsembleClassifiersWithPipeline(None, fold_models)
metadata = averaged_metadata_list(list_metadata)
comments["number of folds"] = len(fold_models)
comments[
"average size of training set before resampling"
] = metadata.get_num_init_samples_bf()
comments[
"average prop of minority class before resampling"
] = metadata.get_prop_minority_class_bf()
comments[
"average size of training set after resampling"
] = metadata.get_num_init_samples_af()
comments[
"average prop of minority class after resampling"
] = metadata.get_prop_minority_class_af()
return complete_model, averaged_metrics, comments
def ensemble_model_without_resampling(X, y, pipeline_post_process,
model, loss_metric, peeking_metrics,
k_inner_fold, skip_inner_folds, undersampling_majority_class,
max_k_undersampling, calibrated
):
list_metadata = []
fold_models = [] # List of all models builded in this k-fold
fold_metrics = [] # List of all metrics
comments = {} # Dict of comments, used for reporting
comments["option"] = "build model without resampling"
inner_cv = StratifiedKFold(n_splits=k_inner_fold)
pipeline_post_process = deepcopy(pipeline_post_process)
# For efficiency we transform the data once, for all folds
X_t = pipeline_post_process.fit_transform(X, y)
y_t = y
for k, (train_index, test_index) in enumerate(inner_cv.split(X_t, y_t)):
if k not in skip_inner_folds:
X_train, y_train, X_test, y_test = (
X_t[train_index],
y_t[train_index],
X_t[test_index],
y_t[test_index],
)
if undersampling_majority_class:
fold_base_model, fold_metadata = train_ensemble_model_with_undersampling(
model, X_train, y_train, False, max_k_undersampling
)
else:
fold_base_model, fold_metadata = train_model_without_undersampling(
model, X_train, y_train, False
)
list_metadata.append(fold_metadata)
fold_final_model = fold_base_model
if calibrated:
fold_final_model = CalibratedClassifierCV(
fold_base_model, method="isotonic", cv="prefit"
)
fold_final_model.fit(X_test, y_test)
fold_models.append(fold_final_model)
y_proba = fold_final_model.predict_proba(X_test)[:, 1]
fold_metrics.append(
evaluate_metrics(y_test, y_proba, loss_metric, peeking_metrics)
)
averaged_metrics = average_metrics(fold_metrics)
metadata = averaged_metadata_list(list_metadata)
complete_model = MidasEnsembleClassifiersWithPipeline(
pipeline_post_process, fold_models
)
comments["number of folds"] = len(fold_models)
comments["average size of training set"] = metadata.get_num_init_samples_bf()
comments["average prop of minority class"] = metadata.get_prop_minority_class_bf()
return complete_model, averaged_metrics, comments
def find_best_model(list_models, list_metrics):
list_loss_metrics = [metric["loss_metric"] for metric in list_metrics]
index_best_model = list_loss_metrics.index(min(list_loss_metrics))
best_model = list_models[index_best_model]
return best_model, index_best_model
def train_inner_model(X, y, model_search_spaces,
X_hold_out, y_hold_out, k_inner_fold,
skip_inner_folds, n_initial_points, n_calls, ensemble,
calibrated, loss_metric, peeking_metrics,
skopt_func, verbose, report_doc):
list_params = []
list_models = []
list_metrics = []
list_holdout_metrics = []
list_comments = []
for key in model_search_spaces.keys():
pipeline_post_process = model_search_spaces[key]["pipeline_post_process"]
if not pipeline_post_process:
pipeline_post_process = Pipeline([("identity", _MidasIdentity())])
model_name = key
model = model_search_spaces[key]["model"]
complete_steps = pipeline_post_process.steps + [("model", model)]
complete_pipeline = Pipeline(complete_steps)
search_space = model_search_spaces[key]["search_space"]
exists_resampler = has_resampler(pipeline_post_process)
@use_named_args(search_space)
def func_to_minimize(**params):
copy_params = params.copy()
undersampling_majority_class = copy_params.pop(
"undersampling_majority_class", False
)
max_k_undersampling = copy_params.pop(
"max_k_undersampling", 0
)
complete_pipeline.set_params(**copy_params)
list_params.append({**params, **{"model": model_name}})
if verbose:
print(f"Optimizing model {model_name}\n")
print(f"With parameters {params}\n")
if exists_resampler:
ensemble_model, metrics, comments = ensemble_model_with_resampling(
X=X, y=y, pipeline_post_process=pipeline_post_process,
model=model, loss_metric=loss_metric, peeking_metrics=peeking_metrics,
k_inner_fold=k_inner_fold, skip_inner_folds=skip_inner_folds,
undersampling_majority_class=undersampling_majority_class,
max_k_undersampling=max_k_undersampling, calibrated=calibrated
)
else:
ensemble_model, metrics, comments = ensemble_model_without_resampling(
X=X, y=y, pipeline_post_process=pipeline_post_process,
model=model, loss_metric=loss_metric, peeking_metrics=peeking_metrics,
k_inner_fold=k_inner_fold, skip_inner_folds=skip_inner_folds,
undersampling_majority_class=undersampling_majority_class,
max_k_undersampling=max_k_undersampling, calibrated=calibrated
)
list_models.append(ensemble_model)
list_metrics.append(metrics)
list_comments.append(comments)
if verbose:
print(f"Metric is {metrics['loss_metric']}\n")
if len(y_hold_out) > 0:
y_hold_out_proba = ensemble_model.predict_proba(X_hold_out)[:, 1]
list_holdout_metrics.append(
evaluate_metrics(
y_hold_out, y_hold_out_proba, loss_metric, peeking_metrics
)
)
return metrics["loss_metric"]
# perform optimization
skopt_func(
func=func_to_minimize, dimensions=search_space,
n_initial_points=n_initial_points,
n_calls=n_calls,
)
best_model, index_best_model = find_best_model(list_models, list_metrics)
undersampling = list_params[index_best_model].get('undersampling_majority_class', False)
if not ensemble and undersampling:
if verbose:
print("Training final model with undersampling technique")
exists_resampler = has_resampler(best_model.get_complete_pipeline_to_fit())
max_k_undersampling = list_params[index_best_model].get('max_k_undersampling', 0)
best_model, _ = train_ensemble_model_with_undersampling(best_model.get_complete_pipeline_to_fit(),
X, y, exists_resampler, max_k_undersampling)
if not ensemble and not undersampling:
if verbose:
print("Training final model")
best_model = best_model.get_complete_pipeline_to_fit()
best_model.fit(X, y)
if verbose:
print("Best model found")
report_dfs = create_report_dfs(list_params, list_metrics, loss_metric)
if report_doc:
write_train_report(
report_doc=report_doc, list_params=list_params, list_metrics=list_metrics,
list_holdout_metrics=list_holdout_metrics, peeking_metrics=peeking_metrics,
list_comments=list_comments
)
return best_model, list_params[index_best_model], list_comments[index_best_model], report_dfs
| [
"numpy.where",
"skopt.utils.use_named_args",
"imblearn.pipeline.Pipeline",
"collections.Counter",
"sklearn.model_selection.StratifiedKFold",
"numpy.around",
"numpy.concatenate",
"copy.deepcopy",
"sklearn.calibration.CalibratedClassifierCV",
"sklearn.model_selection.KFold"
] | [((855, 870), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (863, 870), False, 'from copy import deepcopy\n'), ((1436, 1446), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (1443, 1446), False, 'from collections import Counter\n'), ((2105, 2137), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'k_majority_class'}), '(n_splits=k_majority_class)\n', (2110, 2137), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((3388, 3412), 'imblearn.pipeline.Pipeline', 'Pipeline', (['complete_steps'], {}), '(complete_steps)\n', (3396, 3412), False, 'from imblearn.pipeline import Pipeline\n'), ((3567, 3605), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_inner_fold'}), '(n_splits=k_inner_fold)\n', (3582, 3605), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((6322, 6360), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_inner_fold'}), '(n_splits=k_inner_fold)\n', (6337, 6360), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((6389, 6420), 'copy.deepcopy', 'deepcopy', (['pipeline_post_process'], {}), '(pipeline_post_process)\n', (6397, 6420), False, 'from copy import deepcopy\n'), ((1536, 1569), 'numpy.where', 'np.where', (['(y == minority_class_key)'], {}), '(y == minority_class_key)\n', (1544, 1569), True, 'import numpy as np\n'), ((1595, 1628), 'numpy.where', 'np.where', (['(y != minority_class_key)'], {}), '(y != minority_class_key)\n', (1603, 1628), True, 'import numpy as np\n'), ((1845, 1871), 'numpy.around', 'np.around', (['imbalance_ratio'], {}), '(imbalance_ratio)\n', (1854, 1871), True, 'import numpy as np\n'), ((2206, 2221), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (2214, 2221), False, 'from copy import deepcopy\n'), ((2241, 2302), 'numpy.concatenate', 'np.concatenate', (['[minority_class_idxs, rest_class_idxs[index]]'], {}), '([minority_class_idxs, rest_class_idxs[index]])\n', (2255, 2302), True, 'import numpy as np\n'), ((9436, 9460), 'imblearn.pipeline.Pipeline', 'Pipeline', (['complete_steps'], {}), '(complete_steps)\n', (9444, 9460), False, 'from imblearn.pipeline import Pipeline\n'), ((9599, 9627), 'skopt.utils.use_named_args', 'use_named_args', (['search_space'], {}), '(search_space)\n', (9613, 9627), False, 'from skopt.utils import use_named_args\n'), ((1101, 1111), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (1108, 1111), False, 'from collections import Counter\n'), ((4542, 4613), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['fold_base_model'], {'method': '"""isotonic"""', 'cv': '"""prefit"""'}), "(fold_base_model, method='isotonic', cv='prefit')\n", (4564, 4613), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((7413, 7484), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['fold_base_model'], {'method': '"""isotonic"""', 'cv': '"""prefit"""'}), "(fold_base_model, method='isotonic', cv='prefit')\n", (7435, 7484), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((2710, 2728), 'collections.Counter', 'Counter', (['y_train_f'], {}), '(y_train_f)\n', (2717, 2728), False, 'from collections import Counter\n')] |
# License: MIT
# Author: <NAME>
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import numpy as np
import visdom
import os
import model
import datasets
import config
vis = visdom.Visdom()
def numpify(tensor):
return tensor.cpu().detach().numpy()
def visualize_masks(imgs, masks, recons):
# print('recons min/max', recons[:, 0].min().item(), recons[:, 0].max().item())
# print('recons1 min/max', recons[:, 1].min().item(), recons[:, 1].max().item())
# print('recons2 min/max', recons[:, 2].min().item(), recons[:, 2].max().item())
recons = np.clip(recons, 0., 1.)
colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255), (255, 0, 255), (255, 255, 0)]
colors.extend([(c[0]//2, c[1]//2, c[2]//2) for c in colors])
colors.extend([(c[0]//4, c[1]//4, c[2]//4) for c in colors])
seg_maps = np.zeros_like(imgs)
masks = np.argmax(masks, 1)
for i in range(imgs.shape[0]):
for y in range(imgs.shape[2]):
for x in range(imgs.shape[3]):
seg_maps[i, :, y, x] = colors[masks[i, y, x]]
seg_maps /= 255.0
vis.images(np.concatenate((imgs, seg_maps, recons), 0), nrow=imgs.shape[0])
def run_training(monet, conf, trainloader):
if conf.load_parameters and os.path.isfile(conf.checkpoint_file):
monet.load_state_dict(torch.load(conf.checkpoint_file))
print('Restored parameters from', conf.checkpoint_file)
else:
for w in monet.parameters():
std_init = 0.01
nn.init.normal_(w, mean=0., std=std_init)
print('Initialized parameters')
optimizer = optim.RMSprop(monet.parameters(), lr=1e-4)
for epoch in range(conf.num_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
images, counts = data
images = images.cuda()
optimizer.zero_grad()
output = monet(images)
loss = torch.mean(output['loss'])
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % conf.vis_every == conf.vis_every-1:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / conf.vis_every))
running_loss = 0.0
visualize_masks(numpify(images[:8]),
numpify(output['masks'][:8]),
numpify(output['reconstructions'][:8]))
torch.save(monet.state_dict(), conf.checkpoint_file)
print('training done')
def sprite_experiment():
conf = config.sprite_config
transform = transforms.Compose([transforms.ToTensor(),
transforms.Lambda(lambda x: x.float()),
])
trainset = datasets.Sprites(conf.data_dir, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=conf.batch_size,
shuffle=True, num_workers=2)
monet = model.Monet(conf, 64, 64).cuda()
if conf.parallel:
monet = nn.DataParallel(monet)
run_training(monet, conf, trainloader)
def clevr_experiment():
conf = config.clevr_config
# Crop as described in appendix C
crop_tf = transforms.Lambda(lambda x: transforms.functional.crop(x, 29, 64, 192, 192))
drop_alpha_tf = transforms.Lambda(lambda x: x[:3])
transform = transforms.Compose([crop_tf,
transforms.Resize((128, 128)),
transforms.ToTensor(),
drop_alpha_tf,
transforms.Lambda(lambda x: x.float()),
])
trainset = datasets.Clevr(conf.data_dir,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=conf.batch_size,
shuffle=True, num_workers=8)
monet = model.Monet(conf, 128, 128).cuda()
if conf.parallel:
monet = nn.DataParallel(monet)
run_training(monet, conf, trainloader)
if __name__ == '__main__':
# clevr_experiment()
sprite_experiment()
| [
"numpy.clip",
"model.Monet",
"torch.mean",
"datasets.Clevr",
"torch.load",
"numpy.argmax",
"datasets.Sprites",
"torchvision.transforms.Lambda",
"os.path.isfile",
"torch.nn.DataParallel",
"torchvision.transforms.functional.crop",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torchvis... | [((254, 269), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (267, 269), False, 'import visdom\n'), ((644, 669), 'numpy.clip', 'np.clip', (['recons', '(0.0)', '(1.0)'], {}), '(recons, 0.0, 1.0)\n', (651, 669), True, 'import numpy as np\n'), ((911, 930), 'numpy.zeros_like', 'np.zeros_like', (['imgs'], {}), '(imgs)\n', (924, 930), True, 'import numpy as np\n'), ((943, 962), 'numpy.argmax', 'np.argmax', (['masks', '(1)'], {}), '(masks, 1)\n', (952, 962), True, 'import numpy as np\n'), ((2853, 2917), 'datasets.Sprites', 'datasets.Sprites', (['conf.data_dir'], {'train': '(True)', 'transform': 'transform'}), '(conf.data_dir, train=True, transform=transform)\n', (2869, 2917), False, 'import datasets\n'), ((2936, 3035), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'conf.batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=conf.batch_size, shuffle=\n True, num_workers=2)\n', (2963, 3035), False, 'import torch\n'), ((3477, 3511), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['(lambda x: x[:3])'], {}), '(lambda x: x[:3])\n', (3494, 3511), True, 'import torchvision.transforms as transforms\n'), ((3863, 3913), 'datasets.Clevr', 'datasets.Clevr', (['conf.data_dir'], {'transform': 'transform'}), '(conf.data_dir, transform=transform)\n', (3877, 3913), False, 'import datasets\n'), ((3963, 4062), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'conf.batch_size', 'shuffle': '(True)', 'num_workers': '(8)'}), '(trainset, batch_size=conf.batch_size, shuffle=\n True, num_workers=8)\n', (3990, 4062), False, 'import torch\n'), ((1180, 1223), 'numpy.concatenate', 'np.concatenate', (['(imgs, seg_maps, recons)', '(0)'], {}), '((imgs, seg_maps, recons), 0)\n', (1194, 1223), True, 'import numpy as np\n'), ((1322, 1358), 'os.path.isfile', 'os.path.isfile', (['conf.checkpoint_file'], {}), '(conf.checkpoint_file)\n', (1336, 1358), False, 'import os\n'), ((3206, 3228), 'torch.nn.DataParallel', 'nn.DataParallel', (['monet'], {}), '(monet)\n', (3221, 3228), True, 'import torch.nn as nn\n'), ((4235, 4257), 'torch.nn.DataParallel', 'nn.DataParallel', (['monet'], {}), '(monet)\n', (4250, 4257), True, 'import torch.nn as nn\n'), ((1390, 1422), 'torch.load', 'torch.load', (['conf.checkpoint_file'], {}), '(conf.checkpoint_file)\n', (1400, 1422), False, 'import torch\n'), ((1575, 1617), 'torch.nn.init.normal_', 'nn.init.normal_', (['w'], {'mean': '(0.0)', 'std': 'std_init'}), '(w, mean=0.0, std=std_init)\n', (1590, 1617), True, 'import torch.nn as nn\n'), ((1993, 2019), 'torch.mean', 'torch.mean', (["output['loss']"], {}), "(output['loss'])\n", (2003, 2019), False, 'import torch\n'), ((2700, 2721), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2719, 2721), True, 'import torchvision.transforms as transforms\n'), ((3135, 3160), 'model.Monet', 'model.Monet', (['conf', '(64)', '(64)'], {}), '(conf, 64, 64)\n', (3146, 3160), False, 'import model\n'), ((3408, 3455), 'torchvision.transforms.functional.crop', 'transforms.functional.crop', (['x', '(29)', '(64)', '(192)', '(192)'], {}), '(x, 29, 64, 192, 192)\n', (3434, 3455), True, 'import torchvision.transforms as transforms\n'), ((3593, 3622), 'torchvision.transforms.Resize', 'transforms.Resize', (['(128, 128)'], {}), '((128, 128))\n', (3610, 3622), True, 'import torchvision.transforms as transforms\n'), ((3660, 3681), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3679, 3681), True, 'import torchvision.transforms as transforms\n'), ((4162, 4189), 'model.Monet', 'model.Monet', (['conf', '(128)', '(128)'], {}), '(conf, 128, 128)\n', (4173, 4189), False, 'import model\n')] |
import numpy as np
from scipy import signal
def random_spikes(size):
"""
Generate zeros and ones in an array of size=size.
probabilities = [probability 0 will appear, probability 1 will appear]
"""
spikes = np.random.choice(2, size, p=[0.99, 0.01])
# Get rid of spikes that are on top of each other
for i, s in enumerate(spikes):
if i < len(spikes) - 1 and (spikes[i] == 1 and spikes[i + 1] == 1):
spikes[i] = 0
return spikes
| [
"numpy.random.choice"
] | [((229, 270), 'numpy.random.choice', 'np.random.choice', (['(2)', 'size'], {'p': '[0.99, 0.01]'}), '(2, size, p=[0.99, 0.01])\n', (245, 270), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Make a simple 1D gaussian profile.
"""
import numpy as np
import matplotlib.pyplot as plt
def gaussian_1D_profile(x_min, x_max, x_step, center, sigma, amplitude):
"""Function to create a 1D Gaussian distribution.
Parameters
----------
x_min, x_max, x_step: float, float, float
Creates a sequence (1D ndarray) of points over which to compute the Gaussian
center: float
The center point of the gaussian profile
sigma: float
1/e-squared width of beam
amplitude: float
Amplitude at peak value
Returns
-------
x,y: ndarray
the gaussian profile amplitude values
"""
x = np.arange(x_min, x_max,x_step) #create spatial array
d = 2*float(sigma)
y = amplitude*np.e**(-2*np.power((x-center)/d, 2))
return x,y
# todo: learn how to do proper unit testing...heres some manual checks
# what if center > max(X)? still works, just get the tail end
# what if center, sigma negative? Since is getting squared, doesn't matter
# what if amplitude is neg or zero? Straight line at zero
# what if d = 0? Straight line
# what if the ndarray goes negative? Is ok.
# What if the array is empty or null? should catch an error.
def plot_1d_gaussian(x,y,hold=True):
"""Plot the gaussian profile.
Parameters
----------
x: ndarray
X axis values
y: float
Y axis values
"""
plt.hold = hold
plt.plot(x,y)
plt.xlabel('X axis')
plt.ylabel('Amplitude')
plt.title('Gaussian 1D Profile')
plt.show()
# todo: check if the hold true works or not
if __name__ == '__main__':
x,y = gaussian_1D_profile(-50,50,.2, 0, 10, 1)
plot_1d_gaussian(x,y,True)
| [
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((760, 791), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'x_step'], {}), '(x_min, x_max, x_step)\n', (769, 791), True, 'import numpy as np\n'), ((1604, 1618), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1612, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X axis"""'], {}), "('X axis')\n", (1633, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1649, 1672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (1659, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1710), 'matplotlib.pyplot.title', 'plt.title', (['"""Gaussian 1D Profile"""'], {}), "('Gaussian 1D Profile')\n", (1687, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1724, 1726), True, 'import matplotlib.pyplot as plt\n'), ((867, 896), 'numpy.power', 'np.power', (['((x - center) / d)', '(2)'], {}), '((x - center) / d, 2)\n', (875, 896), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the DSprites dataset.
beta-vae: Learning basic visual concepts with a constrained variational
framework, Higgins, International Conference on Learning Representations, 2017.
Code: https://github.com/YannDubs/disentangling-vae
"""
# Imports
import os
import logging
import subprocess
import numpy as np
from pynet.datasets.core import DataItem
from torch.utils.data import Dataset
# Global parameters
logger = logging.getLogger("pynet")
class DSprites(Dataset):
""" Disentanglement test Sprites dataset.
Procedurally generated 2D shapes, from 6 disentangled latent factors.
This dataset uses 6 latents, controlling the color, shape, scale,
rotation and position of a sprite.
All possible variations of the latents are present. Ordering along
dimension 1 is fixed and can be mapped back to the exact latent values
that generated that image. Pixel outputs are different. No noise added.
Notes
-----
- Link : https://github.com/deepmind/dsprites-dataset/
- hard coded metadata because issue with python 3 loading of python 2
"""
urls = {
"train":
"https://github.com/deepmind/dsprites-dataset/blob/master/"
"dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz?raw=true"}
files = {"train": "dsprite_train.npz"}
lat_names = ("shape", "scale", "orientation", "posX", "posY")
img_size = (64, 64)
def __init__(self, datasetdir, size=None, **kwargs):
""" Init class.
Latent values of length 6, that gives the value of each factor of
variation.
Parameters
----------
datasetdir: string
the dataset destination folder.
size: int, default None
the size of the dataset, default use all images available.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', and 'metadata_path'.
"""
super(DSprites, self).__init__(**kwargs)
self.datasetdir = datasetdir
self.dsprites_file = os.path.join(
self.datasetdir, DSprites.files["train"])
self.download()
dataset = np.load(self.dsprites_file)
if size is None:
size = len(dataset["imgs"])
size = min(size, len(dataset["imgs"]))
index = np.arange(size)
np.random.shuffle(index)
self.imgs = dataset["imgs"][index]
self.lat_values = dataset["latents_values"][index]
self.n_samples = len(self.imgs)
def download(self):
""" Download the dataset.
"""
if not os.path.isdir(self.datasetdir):
os.makedirs(self.datasetdir)
if not os.path.isfile(self.dsprites_file):
subprocess.check_call(["curl", "-L", DSprites.urls["train"],
"--output", self.dsprites_file])
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
""" Get the image at position 'idx'.
Returns
-------
out: DataItem
input/output tensor in [0, 1] of shape 'img_size'.
"""
data = np.expand_dims(self.imgs[idx], axis=0)
return DataItem(inputs=data, outputs=data, labels=None)
| [
"logging.getLogger",
"os.makedirs",
"pynet.datasets.core.DataItem",
"subprocess.check_call",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"numpy.expand_dims",
"numpy.load",
"numpy.arange",
"numpy.random.shuffle"
] | [((870, 896), 'logging.getLogger', 'logging.getLogger', (['"""pynet"""'], {}), "('pynet')\n", (887, 896), False, 'import logging\n'), ((2490, 2544), 'os.path.join', 'os.path.join', (['self.datasetdir', "DSprites.files['train']"], {}), "(self.datasetdir, DSprites.files['train'])\n", (2502, 2544), False, 'import os\n'), ((2600, 2627), 'numpy.load', 'np.load', (['self.dsprites_file'], {}), '(self.dsprites_file)\n', (2607, 2627), True, 'import numpy as np\n'), ((2756, 2771), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2765, 2771), True, 'import numpy as np\n'), ((2780, 2804), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (2797, 2804), True, 'import numpy as np\n'), ((3575, 3613), 'numpy.expand_dims', 'np.expand_dims', (['self.imgs[idx]'], {'axis': '(0)'}), '(self.imgs[idx], axis=0)\n', (3589, 3613), True, 'import numpy as np\n'), ((3629, 3677), 'pynet.datasets.core.DataItem', 'DataItem', ([], {'inputs': 'data', 'outputs': 'data', 'labels': 'None'}), '(inputs=data, outputs=data, labels=None)\n', (3637, 3677), False, 'from pynet.datasets.core import DataItem\n'), ((3033, 3063), 'os.path.isdir', 'os.path.isdir', (['self.datasetdir'], {}), '(self.datasetdir)\n', (3046, 3063), False, 'import os\n'), ((3077, 3105), 'os.makedirs', 'os.makedirs', (['self.datasetdir'], {}), '(self.datasetdir)\n', (3088, 3105), False, 'import os\n'), ((3121, 3155), 'os.path.isfile', 'os.path.isfile', (['self.dsprites_file'], {}), '(self.dsprites_file)\n', (3135, 3155), False, 'import os\n'), ((3169, 3266), 'subprocess.check_call', 'subprocess.check_call', (["['curl', '-L', DSprites.urls['train'], '--output', self.dsprites_file]"], {}), "(['curl', '-L', DSprites.urls['train'], '--output',\n self.dsprites_file])\n", (3190, 3266), False, 'import subprocess\n')] |
import numpy as np
import pytest
from snc.environments.job_generators.discrete_review_job_generator \
import DeterministicDiscreteReviewJobGenerator as drjg
from snc.environments.job_generators.discrete_review_job_generator \
import PoissonDiscreteReviewJobGenerator as prjg
from snc.environments.controlled_random_walk import ControlledRandomWalk
import snc.utils.snc_tools as snc
import snc.environments.state_initialiser as stinit
import snc.environments.examples as examples
import \
snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing
from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import \
ScaledBernoulliServicesPoissonArrivalsGenerator
from snc.environments.state_initialiser import DeterministicCRWStateInitialiser
def test_is_binary():
c = np.ones((1, 1))
assert (snc.is_binary(c))
c = np.zeros((1, 1))
assert (snc.is_binary(c))
c = np.ones((5, 4))
assert (snc.is_binary(c))
c = np.zeros((5, 4))
assert (snc.is_binary(c))
c = np.random.randint(0, 1, (3, 6))
assert (snc.is_binary(c))
c = []
assert (not snc.is_binary(c))
c = np.random.random_sample((3, 5))
c[0] = 1
assert (not snc.is_binary(c))
def test_index_phys_resources_with_negative_values():
index_phys_resources = (-1, 0)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_index_phys_resources_with_index_higher_than_num_resources():
index_phys_resources = (0, 2)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_index_phys_resources_with_repeated_indexes():
index_phys_resources = (0, 0)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
with pytest.raises(AssertionError):
ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
def test_valid_index_phys_resources_1():
index_phys_resources = (0,)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
assert env.index_phys_resources == index_phys_resources
def test_valid_index_phys_resources_1_2():
index_phys_resources = (0, 1)
# Other needed parameters
cost_per_buffer = np.zeros((2, 1))
demand_rate = np.zeros((2, 1))
initial_state = np.zeros((2, 1))
capacity = np.zeros((2, 1))
constituency_mat = np.eye(2)
buffer_processing_mat = np.eye(2)
job_generator = drjg(demand_rate, buffer_processing_mat, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
index_phys_resources=index_phys_resources)
assert env.index_phys_resources == index_phys_resources
def test_state_initialiser_uniform():
num_buffers = 5
capacity = 10
s0 = stinit.UniformRandomCRWStateInitialiser(num_buffers, capacity)
init_state = np.zeros((num_buffers, 1))
num_samples = 100000
for i in range(num_samples):
init_state += s0.get_initial_state()
init_state /= num_samples
np.all(np.isclose(init_state, ((capacity - 1) / 2) * np.ones((num_buffers, 1))))
def test_state_initialiser_deterministic():
initial_state = np.array([2, 3, 4, 5])[:, None]
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
assert np.all(s0.get_initial_state() == initial_state)
def test_unfeasible_action():
"""Check that the step method doesn't allow actions that violate the one action per resource
constraint: C u <= 1."""
np.random.seed(42)
env = examples.double_reentrant_line_only_shared_resources_model(alpha=0)
env.reset_with_random_state(42)
action = np.array([[1], [0], [0], [1]])
with pytest.raises(AssertionError):
_, _, _, _ = env.step(action)
def test_scheduling_single_buffer_events_constant():
"""One resource (station) with 1 buffers. Thus, there are 2 possible actions namely process or
idle. At every iteration, we fill but also process the buffer, so the num of jobs remain
constant = initial_state."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[1]])
initial_state = np.array([[2]])
capacity = np.array([[20]])
job_generator = drjg(demand_rate, - np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
for i in range(100):
s, r, d, t = env.step(action)
assert s[0, 0] == initial_state
assert r == - cost_per_buffer[0] * s
def test_scheduling_single_buffer_events_grow_one():
"""One resource (station) with 1 buffers. Thus, there are 2 possible actions namely process or
idle. At every iteration, we process one job but fill with two, so jobs grow 1 at a time up to
achieving maximum capacity."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[2]])
initial_state = np.array([[2]])
capacity = np.array([[20]])
job_generator = drjg(demand_rate, - np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
for i in range(18):
s, r, d, t = env.step(action)
assert s[0, 0] == 1 + i + initial_state[0]
assert r == - cost_per_buffer[0] * s
with pytest.raises(AssertionError):
_ = env.step(action)
def test_scheduling_single_buffer_events_remove_two_until_empty():
"""One resource (station) with 1 buffers. Thus, there are 2 possible actions namely process or
idle. We don't fill the buffer, just remove jobs, two at a time."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[0]])
initial_state = np.array([[20]])
capacity = np.array([[20]])
job_generator = drjg(demand_rate, - 2 * np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
for i in range(10):
s, r, d, t = env.step(action)
assert s[0, 0] == np.max([0, initial_state[0] - (i + 1) * 2])
assert r == - cost_per_buffer[0] * s
with pytest.raises(AssertionError):
_ = env.step(action)
def test_scheduling_single_buffer_events_fill_one_but_remove_two_until_empty():
"""One resource (station) with 1 buffers. Thus, there are 2 possible actions namely process or
idle. We fill the buffer with 1 job but remove two jobs per iteration, so it will decrease to
zero."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[1]])
initial_state = np.array([[20]])
capacity = np.array([[20]])
job_generator = drjg(demand_rate, - 2 * np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
for i in range(20):
s, r, d, t = env.step(action)
assert s[0, 0] == np.max([0, initial_state[0] - (i + 1)])
assert r == - cost_per_buffer[0] * s
with pytest.raises(AssertionError):
_ = env.step(action)
def test_scheduling_multiple_buffers_events():
"""One resource (station) with 3 buffers. Thus, there are 4 possible actions namely schedule
each buffer or idle. At every iteration, if the resource chooses to schedule to work one buffer,
then the jobs of that buffer are removed deterministically at some processing rate. Then, new
jobs arrive also at some deterministic rate. The number of jobs in a buffer is always
nonnegative and less than or equal to capacity. """
cost_per_buffer = np.array([[1.1], [2.2], [3.3]])
d1 = 1 # Rate of job arrival at buffer 1
d2 = 2 # Rate of job arrival at buffer 2
d3 = 3 # Rate of job arrival at buffer 3
demand_rate = np.array([[d1], [d2], [d3]])
initial_state = np.array([[0], [0], [0]])
capacity = np.array([[40], [40], [40]])
mu1 = 1 # Rate of job processing at buffer 1
mu2 = 2 # Rate of job processing at buffer 2
mu3 = 3 # Rate of job processing at buffer 3
# Rows: buffer, columns: influence of activity.
# Actions mean scheduling processing in one buffer.
# There is no routing in this case, so this is a diagonal matrix.
buffer_processing_matrix = np.array([[-mu1, 0, 0],
[0, -mu2, 0],
[0, 0, -mu3]])
# Each row corresponds with a time-step. The resource can only work in one buffer at a time.
actions = np.array([[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# When the resource works on a buffer, it compensate the arrivals, so the number of jobs remain
# constant. Otherwise, the arrivals increases the number of jobs at the demand rate.
jobs = np.array([[1, 2, 0],
[1, 4, 3],
[1, 6, 6],
[2, 6, 9],
[3, 8, 9],
[4, 8, 12],
[4, 10, 15],
[5, 12, 15],
[6, 12, 18],
[6, 14, 21],
[7, 16, 21]])
# Expected cost Computed as dot product of cost_per_buffer and number of jobs at each iteration.
cost = [5.5, 19.8, 34.1, 45.1, 50.6, 61.6, 75.9, 81.4, 92.4, 106.7, 112.2]
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 3)), job_generator, s0)
for i in range(11):
s, r, d, t = env.step(actions[i].reshape((3, 1)))
assert np.all(s == jobs[i].reshape([3, 1]))
np.testing.assert_approx_equal(r, -cost[i])
def test_below_capacity_single_buffer():
"""One resource with one buffer. Check number of jobs is always equal or less than maximum
capacity."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[3]])
initial_state = np.array([[0]])
capacity = np.array([[20]])
job_generator = drjg(demand_rate, - np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
for i in range(10):
s, r, d, t = env.step(action)
assert 0 <= s <= capacity
with pytest.raises(AssertionError):
_ = env.step(action)
def test_below_zero_capacity_single_buffer():
"""One resource with one buffer. Check number of jobs is always equal or less than maximum
capacity, for the corner case of having zero maximum capacity."""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[3]])
initial_state = np.array([[0]])
capacity = np.array([[0]])
job_generator = drjg(demand_rate, - np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
with pytest.raises(AssertionError):
_ = env.step(action)
def test_exceed_capacity_single_buffer():
"""One resource with one buffer. Check number of jobs is always equal or less than maximum
capacity, for the corner case when initial_state > capacity"""
cost_per_buffer = np.array([[3.5]])
demand_rate = np.array([[3]])
initial_state = np.array([[10]])
capacity = np.array([[5]])
job_generator = drjg(demand_rate, - np.ones((1, 1)), sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 1)), job_generator, s0)
action = np.ones((1, 1))
with pytest.raises(AssertionError):
_ = env.step(action)
def test_exceed_capacity_multiple_buffer():
"""One resource with three buffers. Check number of jobs is always equal or less than maximum
capacity."""
cost_per_buffer = np.array([[1.1], [2.2], [3.3]])
d1 = 1 # Rate of job arrival at buffer 1
d2 = 2 # Rate of job arrival at buffer 2
d3 = 3 # Rate of job arrival at buffer 3
demand_rate = np.array([[d1], [d2], [d3]])
initial_state = np.array([[0], [0], [0]])
capacity = np.array([[4], [10], [13]])
mu1 = 1 # Rate of job processing at buffer 1
mu2 = 2 # Rate of job processing at buffer 2
mu3 = 3 # Rate of job processing at buffer 3
# Rows: buffer, columns: influence of activity.
# Actions mean scheduling processing in one buffer.
# There is no routing, so this is a diagonal matrix.
buffer_processing_matrix = np.array([[-mu1, 0, 0],
[0, -mu2, 0],
[0, 0, -mu3]])
# Each row corresponds with a time-step. The resource can only work in one buffer at a time.
actions = np.array([[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
# When we reach the maximum capacity, the amount of jobs remain constant, since processing rate
# is equal to demand rate.
jobs = np.array([[1, 2, 0],
[1, 4, 3],
[1, 6, 6],
[2, 6, 9],
[3, 8, 9],
[4, 8, 12],
[4, 10, 13]])
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.ones((1, 3)), job_generator, s0)
for i in range(6):
s, r, d, t = env.step(actions[i].reshape((3, 1)))
assert np.all(s == jobs[i].reshape([3, 1]))
with pytest.raises(AssertionError):
_ = env.step(actions[6].reshape((3, 1)))
def test_routing_two_serially_connected_resources_one_buffer_each():
"""Two resources with 1 buffer each. There are 2 possible actions. At every iteration, each
resource chooses whether to work on its buffer or idle. If it works, jobs are processed at some
rate. Jobs processed from resource 1 go to the buffer at resource 2. Events are generated
deterministically at the rate given by the mean rate."""
cost_per_buffer = np.array([[2], [4]])
d1 = 1 # Rate of job arrival at buffer 1
d2 = 0 # Rate of job arrival at buffer 2
demand_rate = np.array([[d1], [d2]])
initial_state = np.array([[0], [0]])
capacity = np.array([[10], [10]])
mu1 = 1 # Rate of job processing at buffer 1
mu2 = 1 # Rate of job processing at buffer 2
# Jobs processed at buffer 1 are routed to buffer 2.
# Rows: buffer, columns: influence of activity.
# Actions mean scheduling processing in one buffer.
buffer_processing_matrix = np.array([[-mu1, 0],
[mu1, -mu2]])
# activity 1 (column 0), processed job at buffer 1 (row 0) will be routed to buffer 2 (row 1).
# Each row corresponds with a time-step. The resource can only work in one buffer at a time.
actions = np.array([[0, 0],
[1, 0],
[1, 1],
[0, 1],
[1, 1]])
# Expected number of jobs
jobs = np.array([[1, 0], # None buffer work. A new job gets to buffer 1 (b1)
[1, 1], # Process b1 so job goes to buffer 2 (b2, then new job arrives at b1.
[1, 1], # Process b1 and b2, since a new job arrives at b1, and a job is
# routed to b2, both buffers remain the same
[2, 0], # Process b2 but not b1, so b1 accumulates one job, and b2 gets empty
[2, 0]]) # Process b1 and b2, so b1 has same number of jobs at the end, while
# b2 aims to process the empty buffer and then gets one job routed from b1.
# Expected cost
cost = [2, 6, 6, 4, 4]
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
assert job_generator.routes == {(1, 0): (0, 0)}
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, np.eye(2), job_generator, s0)
for i in range(5):
s, r, d, t = env.step(actions[i].reshape((2, 1)))
assert np.all(s == jobs[i].reshape([2, 1]))
np.testing.assert_approx_equal(r, -cost[i])
def env_job_conservation(job_conservation_flag):
demand_rate = np.zeros((2, 1))
cost_per_buffer = np.array([[2], [4]])
initial_state = np.array([[0], [0]])
capacity = np.array([[10], [10]])
buffer_processing_matrix = np.array([[-1, 0],
[1, -1]])
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
return initial_state, ControlledRandomWalk(cost_per_buffer, capacity, np.eye(2),
job_generator, s0, job_conservation_flag)
def test_job_conservation_flag_true():
initial_state, env = env_job_conservation(True)
s, r, d, t = env.step(np.array([1, 1])[:, None])
assert np.all(s == initial_state)
def test_job_conservation_flag_false():
initial_state, env = env_job_conservation(False)
with pytest.raises(AssertionError):
_ = env.step(np.array([1, 1])[:, None])
def test_job_conservation_multiple_routes_leave_same_buffer():
demand_rate = np.zeros((3, 1))
cost_per_buffer = np.ones((3, 1))
initial_state = np.array([[3], [3], [1]])
capacity = np.ones((3, 1)) * np.inf
buffer_processing_matrix = np.array([[-2, -5, 0, 0],
[2, 0, -10, 0],
[0, 5, 0, -10]])
constituency_mat = np.eye(4)
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
job_conservation_flag = True
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_mat, job_generator, s0,
job_conservation_flag)
s, r, d, t = env.step(np.array([1, 1, 1, 1])[:, None])
# Depending on the order of activities we can get different result, since they are given by a
# dictionary
# In a event driven simulator, the order would be FIFO
assert (s == np.array([[0], [2], [1]])).all() or (s == np.array([[0], [0], [3]])).all()
def test_routing_three_buffer_tree_topology():
"""
Three resources with 1 buffer each, one parent with two children. There are 4 possible actions:
children resources choose to idle or work on their respective buffer, while parent choose
whether work and route to one child or work and route to the other. Events are generated
deterministically at the rate given by the mean rate.
"""
cost_per_buffer = np.array([[2], [4], [4]])
d1 = 1 # Rate of job arrival at buffer 1
d2 = 0 # Rate of job arrival at buffer 2
d3 = 0 # Rate of job arrival at buffer 3
demand_rate = np.array([[d1], [d2], [d3]])
initial_state = np.array([[0], [0], [0]])
capacity = np.array([[10], [10], [10]])
mu12 = 1 # Rate of processing jobs at buffer 1 and routing to buffer 2
mu13 = 1 # Rate of processing jobs at buffer 1 and routing to buffer 3
mu2 = 1 # Rate of processing jobs at buffer 2
mu3 = 1 # Rate of processing jobs at buffer 3
# Jobs processed at buffer 1 are routed to either buffer 2 or 3.
buffer_processing_matrix = np.array([[-mu12, -mu13, 0, 0],
[mu12, 0, -mu2, 0],
[0, mu13, 0, -mu3]])
# Each row corresponds with a time-step. The resource can only work in one buffer at a time.
actions = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 0, 1]])
# Expected jobs
jobs = np.array([[0, 0, 1],
[0, 0, 2],
[0, 1, 2],
[0, 1, 3],
[0, 1, 4],
[1, 0, 4],
[2, 0, 3],
[2, 0, 3],
[2, 1, 3],
[2, 1, 3],
[2, 2, 2]])
job_generator = drjg(demand_rate, buffer_processing_matrix, sim_time_interval=1)
assert job_generator.routes == {(1, 0): (0, 0), (2, 1): (0, 1)}
s0 = stinit.DeterministicCRWStateInitialiser(initial_state)
constituency_matrix = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]])
env = ControlledRandomWalk(cost_per_buffer, capacity, constituency_matrix, job_generator, s0)
for i in range(actions.shape[0]):
s, r, d, t = env.step(actions[i].reshape((4, 1)))
assert np.all(s == jobs[i].reshape([3, 1]))
def test_assert_surplus_buffers_consistent_with_job_generator_one_surplus_buffer():
ind_surplus_buffers = [1]
mud = 1e2
buffer_processing_matrix = np.array([[-10, 100, 0],
[10, 0, -mud],
[0, 0, -mud]])
job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(np.array([[0], [0], [9]]),
buffer_processing_matrix)
assert ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator(
ind_surplus_buffers, job_generator.demand_nodes)
def test_assert_surplus_buffers_consistent_with_job_generator_multiple_surplus_buffer():
ind_surplus_buffers = [4, 1]
mud = 1e2
buffer_processing_matrix = np.array([[-10, 100, 0, -10, 0],
[10, 0, -mud, 0, 0],
[0, 0, -mud, 0, 0],
[0, 0, 0, 0, -mud],
[0, 0, 0, 10, -mud]])
job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(
np.array([[0], [0], [0], [0], [9]]), buffer_processing_matrix)
assert ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator(
ind_surplus_buffers, job_generator.demand_nodes)
def test_assert_surplus_buffers_consistent_with_job_generator_wrong_one_surplus_buffer():
ind_surplus_buffers = [2]
mud = 1e2
buffer_processing_matrix = np.array([[-10, 100, 0],
[10, 0, -mud],
[0, 0, -mud]])
job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(np.array([[0], [0], [9]]),
buffer_processing_matrix)
assert not ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator(
ind_surplus_buffers, job_generator.demand_nodes)
def test_assert_surplus_buffers_consistent_with_job_generator_single_station_demand_model():
examples.single_station_demand_model()
def create_pull_model(ind_surplus_buffers):
model_type = 'pull'
mud = 1e2
buffer_processing_matrix = np.array([[-10, 0, 100],
[10, -mud, 0],
[0, -mud, 0]])
demand_rate = np.array([0, 0, 9])[:, None]
job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(demand_rate,
buffer_processing_matrix)
return ControlledRandomWalk(np.array([1, 0.5, 10])[:, None],
np.ones((3, 1)) * np.inf,
np.eye(3),
job_generator,
DeterministicCRWStateInitialiser(demand_rate),
model_type=model_type,
ind_surplus_buffers=ind_surplus_buffers)
@pytest.mark.parametrize("ind_surplus_buffers", [1, None])
def test_assert_surplus_buffers_consistent_with_job_generator_invalid(ind_surplus_buffers):
with pytest.raises(AssertionError):
_ = create_pull_model(ind_surplus_buffers)
def test_ensure_jobs_conservation_with_enough_jobs():
num_buffers = 2
num_activities = 2
buffer_processing_matrix = np.array([[-2, 0],
[2, -3]])
s0 = stinit.DeterministicCRWStateInitialiser(np.zeros((num_buffers, 1)))
job_generator = drjg(np.ones((num_buffers, 1)), buffer_processing_matrix, sim_time_interval=1)
env = ControlledRandomWalk(np.ones((num_buffers, 1)), np.ones((num_buffers, 1)),
np.zeros((num_buffers, num_activities)), job_generator, s0)
state = 5 * np.ones((num_buffers, 1))
routing_jobs_matrix = env.ensure_jobs_conservation(buffer_processing_matrix, state)
assert np.all(routing_jobs_matrix == buffer_processing_matrix)
def test_ensure_jobs_conservation_with_not_enough_jobs():
num_buffers = 2
num_activities = 2
buffer_processing_matrix = np.array([[-2, 0],
[2, -3]])
s0 = stinit.DeterministicCRWStateInitialiser(np.zeros((num_buffers, 1)))
job_generator = drjg(np.ones((num_buffers, 1)), buffer_processing_matrix, sim_time_interval=1)
env = ControlledRandomWalk(np.ones((num_buffers, 1)), np.ones((num_buffers, 1)),
np.zeros((num_buffers, num_activities)), job_generator, s0)
state = np.ones((num_buffers, 1))
routing_jobs_matrix = env.ensure_jobs_conservation(buffer_processing_matrix, state)
assert np.all(routing_jobs_matrix == np.array([[-1, 0], [1, -1]]))
def test_ensure_jobs_conservation_with_zero_jobs():
num_buffers = 2
num_activities = 2
buffer_processing_matrix = np.array([[-2, 0],
[2, -3]])
s0 = stinit.DeterministicCRWStateInitialiser(np.zeros((num_buffers, 1)))
job_generator = drjg(np.ones((num_buffers, 1)), buffer_processing_matrix, sim_time_interval=1)
env = ControlledRandomWalk(np.ones((num_buffers, 1)), np.ones((num_buffers, 1)),
np.zeros((num_buffers, num_activities)), job_generator, s0)
state = np.zeros((num_buffers, 1))
routing_jobs_matrix = env.ensure_jobs_conservation(buffer_processing_matrix, state)
assert np.all(routing_jobs_matrix == np.zeros((num_buffers, num_activities)))
def test_ensure_jobs_conservation_with_multiple_demand_and_no_supply():
num_buffers = 5
num_activities = 5
mu1 = 1
mu2 = 2
mu3 = 3
mud1 = 4
mud2 = 5
buffer_processing_matrix = np.array([[-mu1, 0, 0, 0, 0],
[mu1, -mu2, mu3, 0, -mud2],
[0, 0, 0, 0, -mud2],
[0, mu2, -mu3, -mud1, 0],
[0, 0, 0, -mud1, 0]])
ind_surplus_buffers = [1, 3]
s0 = stinit.DeterministicCRWStateInitialiser(np.zeros((num_buffers, 1)))
job_generator = drjg(np.ones((num_buffers, 1)), buffer_processing_matrix, sim_time_interval=1)
env = ControlledRandomWalk(
np.ones((num_buffers, 1)),
np.ones((num_buffers, 1)),
np.zeros((num_buffers, num_activities)),
job_generator,
s0,
model_type='pull',
ind_surplus_buffers=ind_surplus_buffers
)
state = np.zeros((num_buffers, 1))
routing_jobs_matrix = env.ensure_jobs_conservation(buffer_processing_matrix, state)
assert np.all(routing_jobs_matrix == np.zeros((num_buffers, num_activities)))
def test_controlled_random_walk_reset():
"""Check that the CRW reset do reset its state and its job generator seed"""
cost_per_buffer = np.array([[3.5]])
capacity = np.ones((1, 1)) * np.inf
constituency_matrix = np.ones((1, 1))
demand_rate = np.array([[1000]])
buffer_processing_matrix = np.array([[5]])
initial_state = np.array([[20]])
seed = 42
job_generator = prjg(sim_time_interval=1, demand_rate=demand_rate,
buffer_processing_matrix=buffer_processing_matrix, job_gen_seed=seed)
initial_random_state = job_generator.np_random.get_state()[1]
s0 = stinit.DeterministicCRWStateInitialiser(initial_state=initial_state)
env = ControlledRandomWalk(cost_per_buffer=cost_per_buffer, capacity=capacity,
constituency_matrix=constituency_matrix, job_generator=job_generator,
state_initialiser=s0)
next_state, _, _, _ = env.step(action=np.ones((1, 1)))
next_random_state = job_generator.np_random.get_state()[1]
assert np.any(next_state != initial_state)
assert np.any(next_random_state != initial_random_state)
env.reset_with_random_state()
new_initial_state = env.state
new_initial_random_state = job_generator.np_random.get_state()[1]
assert np.all(new_initial_state == initial_state)
assert np.all(new_initial_random_state == initial_random_state)
def test_job_generator_reset():
"""Check that the Job Generator reset do reset its seed"""
demand_rate = np.array([[1000]])
buffer_processing_matrix = np.array([[5]])
seed = 42
job_generator = prjg(sim_time_interval=1, demand_rate=demand_rate,
buffer_processing_matrix=buffer_processing_matrix, job_gen_seed=seed)
initial_random_state = job_generator.np_random.get_state()[1]
_ = job_generator.get_arrival_jobs()
next_random_state = job_generator.np_random.get_state()[1]
assert np.any(next_random_state != initial_random_state)
job_generator.reset_seed()
new_initial_random_state = job_generator.np_random.get_state()[1]
assert np.all(new_initial_random_state == initial_random_state)
def test_job_generator_prjg_fixed_seed():
"""Check that the methods from two different instance of a PoissonDiscreteReviewJobGenerator
return the same results given the same seed."""
demand_rate = np.array([[1000]])
supply_rate = 1000.
buffer_processing_matrix = np.array([[5]])
seed = 42
np.random.seed(seed)
job_generator_1 = prjg(sim_time_interval=1, demand_rate=demand_rate,
buffer_processing_matrix=buffer_processing_matrix,
job_gen_seed=seed)
job_generator_2 = prjg(sim_time_interval=1, demand_rate=demand_rate,
buffer_processing_matrix=buffer_processing_matrix,
job_gen_seed=seed)
for _ in np.arange(1000):
arrival_jobs_1 = job_generator_1.get_arrival_jobs()
arrival_jobs_2 = job_generator_2.get_arrival_jobs()
assert np.all(arrival_jobs_1 == arrival_jobs_2)
drained_jobs_matrix_1 = job_generator_1.get_instantaneous_drained_jobs_matrix()
drained_jobs_matrix_2 = job_generator_2.get_instantaneous_drained_jobs_matrix()
assert np.all(drained_jobs_matrix_1 == drained_jobs_matrix_2)
supplied_jobs_1 = job_generator_1.get_supplied_jobs(rate=supply_rate)
supplied_jobs_2 = job_generator_2.get_supplied_jobs(rate=supply_rate)
assert np.all(supplied_jobs_1 == supplied_jobs_2)
def test_create_example_env():
envs = [examples.dai_wang_model,
examples.input_queued_switch_3x3_model,
examples.klimov_model,
examples.ksrs_network_model,
examples.processor_sharing_model,
examples.routing_with_negative_workload,
examples.simple_link_constrained_model,
examples.simple_link_constrained_with_route_scheduling_model,
examples.loop_2_queues,
examples.simple_reentrant_line_model,
examples.simple_routing_model,
examples.single_server_queue,
examples.three_station_network_model,
# Pull models
examples.double_reentrant_line_model,
examples.double_reentrant_line_only_shared_resources_model,
examples.double_reentrant_line_with_demand_model,
examples.double_reentrant_line_with_demand_only_shared_resources_model,
examples.complex_demand_driven_model,
examples.multiple_demand_model,
examples.simple_reentrant_line_with_demand_model,
examples.single_station_demand_model,
examples.willems_example_2]
for e in envs:
_ = e()
def test_create_distribution_with_rebalancing_example_env():
examples_distribution_with_rebalancing.one_warehouse()
examples_distribution_with_rebalancing.two_warehouses(r_to_w_rebalance=False,
w_to_w_rebalance=False)
examples_distribution_with_rebalancing.two_warehouses(r_to_w_rebalance=True,
w_to_w_rebalance=False)
examples_distribution_with_rebalancing.two_warehouses(r_to_w_rebalance=False,
w_to_w_rebalance=True)
examples_distribution_with_rebalancing.two_warehouses(r_to_w_rebalance=True,
w_to_w_rebalance=True)
examples_distribution_with_rebalancing.two_warehouses_simplified(r_to_w_rebalance=False,
w_to_w_rebalance=False)
examples_distribution_with_rebalancing.two_warehouses_simplified(r_to_w_rebalance=True,
w_to_w_rebalance=False)
examples_distribution_with_rebalancing.two_warehouses_simplified(r_to_w_rebalance=False,
w_to_w_rebalance=True)
examples_distribution_with_rebalancing.two_warehouses_simplified(r_to_w_rebalance=True,
w_to_w_rebalance=True)
examples_distribution_with_rebalancing.three_warehouses_simplified(r_to_w_rebalance=True)
examples_distribution_with_rebalancing.three_warehouses_simplified(r_to_w_rebalance=False)
examples_distribution_with_rebalancing.three_warehouses()
examples_distribution_with_rebalancing.three_warehouses_two_manufacturers_per_area()
| [
"snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator.ScaledBernoulliServicesPoissonArrivalsGenerator",
"numpy.array",
"snc.environments.examples.double_reentrant_line_only_shared_resources_model",
"numpy.arange",
"snc.environments.examples_distribution_with_rebalancing.one_w... | [((27004, 27061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ind_surplus_buffers"""', '[1, None]'], {}), "('ind_surplus_buffers', [1, None])\n", (27027, 27061), False, 'import pytest\n'), ((862, 877), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (869, 877), True, 'import numpy as np\n'), ((890, 906), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (903, 906), True, 'import snc.utils.snc_tools as snc\n'), ((917, 933), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (925, 933), True, 'import numpy as np\n'), ((946, 962), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (959, 962), True, 'import snc.utils.snc_tools as snc\n'), ((973, 988), 'numpy.ones', 'np.ones', (['(5, 4)'], {}), '((5, 4))\n', (980, 988), True, 'import numpy as np\n'), ((1001, 1017), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (1014, 1017), True, 'import snc.utils.snc_tools as snc\n'), ((1028, 1044), 'numpy.zeros', 'np.zeros', (['(5, 4)'], {}), '((5, 4))\n', (1036, 1044), True, 'import numpy as np\n'), ((1057, 1073), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (1070, 1073), True, 'import snc.utils.snc_tools as snc\n'), ((1084, 1115), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)', '(3, 6)'], {}), '(0, 1, (3, 6))\n', (1101, 1115), True, 'import numpy as np\n'), ((1128, 1144), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (1141, 1144), True, 'import snc.utils.snc_tools as snc\n'), ((1201, 1232), 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 5)'], {}), '((3, 5))\n', (1224, 1232), True, 'import numpy as np\n'), ((1424, 1440), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1432, 1440), True, 'import numpy as np\n'), ((1459, 1475), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1467, 1475), True, 'import numpy as np\n'), ((1496, 1512), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1504, 1512), True, 'import numpy as np\n'), ((1528, 1544), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1536, 1544), True, 'import numpy as np\n'), ((1568, 1577), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1574, 1577), True, 'import numpy as np\n'), ((1606, 1615), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1612, 1615), True, 'import numpy as np\n'), ((1636, 1697), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_mat'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_mat, sim_time_interval=1)\n', (1640, 1697), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((1707, 1761), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (1746, 1761), True, 'import snc.environments.state_initialiser as stinit\n'), ((2127, 2143), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2135, 2143), True, 'import numpy as np\n'), ((2162, 2178), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2170, 2178), True, 'import numpy as np\n'), ((2199, 2215), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2207, 2215), True, 'import numpy as np\n'), ((2231, 2247), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2239, 2247), True, 'import numpy as np\n'), ((2271, 2280), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2277, 2280), True, 'import numpy as np\n'), ((2309, 2318), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2315, 2318), True, 'import numpy as np\n'), ((2339, 2400), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_mat'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_mat, sim_time_interval=1)\n', (2343, 2400), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((2410, 2464), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (2449, 2464), True, 'import snc.environments.state_initialiser as stinit\n'), ((2815, 2831), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2823, 2831), True, 'import numpy as np\n'), ((2850, 2866), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2858, 2866), True, 'import numpy as np\n'), ((2887, 2903), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2895, 2903), True, 'import numpy as np\n'), ((2919, 2935), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2927, 2935), True, 'import numpy as np\n'), ((2959, 2968), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2965, 2968), True, 'import numpy as np\n'), ((2997, 3006), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3003, 3006), True, 'import numpy as np\n'), ((3027, 3088), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_mat'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_mat, sim_time_interval=1)\n', (3031, 3088), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((3098, 3152), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (3137, 3152), True, 'import snc.environments.state_initialiser as stinit\n'), ((3487, 3503), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (3495, 3503), True, 'import numpy as np\n'), ((3522, 3538), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (3530, 3538), True, 'import numpy as np\n'), ((3559, 3575), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (3567, 3575), True, 'import numpy as np\n'), ((3591, 3607), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (3599, 3607), True, 'import numpy as np\n'), ((3631, 3640), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3637, 3640), True, 'import numpy as np\n'), ((3669, 3678), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3675, 3678), True, 'import numpy as np\n'), ((3699, 3760), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_mat'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_mat, sim_time_interval=1)\n', (3703, 3760), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((3770, 3824), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (3809, 3824), True, 'import snc.environments.state_initialiser as stinit\n'), ((3836, 3967), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0'], {'index_phys_resources': 'index_phys_resources'}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, index_phys_resources=index_phys_resources)\n', (3856, 3967), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((4187, 4203), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (4195, 4203), True, 'import numpy as np\n'), ((4222, 4238), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (4230, 4238), True, 'import numpy as np\n'), ((4259, 4275), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (4267, 4275), True, 'import numpy as np\n'), ((4291, 4307), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (4299, 4307), True, 'import numpy as np\n'), ((4331, 4340), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4337, 4340), True, 'import numpy as np\n'), ((4369, 4378), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4375, 4378), True, 'import numpy as np\n'), ((4399, 4460), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_mat'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_mat, sim_time_interval=1)\n', (4403, 4460), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((4470, 4524), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (4509, 4524), True, 'import snc.environments.state_initialiser as stinit\n'), ((4536, 4667), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0'], {'index_phys_resources': 'index_phys_resources'}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, index_phys_resources=index_phys_resources)\n', (4556, 4667), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((4842, 4904), 'snc.environments.state_initialiser.UniformRandomCRWStateInitialiser', 'stinit.UniformRandomCRWStateInitialiser', (['num_buffers', 'capacity'], {}), '(num_buffers, capacity)\n', (4881, 4904), True, 'import snc.environments.state_initialiser as stinit\n'), ((4922, 4948), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (4930, 4948), True, 'import numpy as np\n'), ((5274, 5328), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (5313, 5328), True, 'import snc.environments.state_initialiser as stinit\n'), ((5550, 5568), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5564, 5568), True, 'import numpy as np\n'), ((5579, 5646), 'snc.environments.examples.double_reentrant_line_only_shared_resources_model', 'examples.double_reentrant_line_only_shared_resources_model', ([], {'alpha': '(0)'}), '(alpha=0)\n', (5637, 5646), True, 'import snc.environments.examples as examples\n'), ((5696, 5726), 'numpy.array', 'np.array', (['[[1], [0], [0], [1]]'], {}), '([[1], [0], [0], [1]])\n', (5704, 5726), True, 'import numpy as np\n'), ((6107, 6124), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (6115, 6124), True, 'import numpy as np\n'), ((6143, 6158), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (6151, 6158), True, 'import numpy as np\n'), ((6179, 6194), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (6187, 6194), True, 'import numpy as np\n'), ((6210, 6226), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (6218, 6226), True, 'import numpy as np\n'), ((6314, 6368), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (6353, 6368), True, 'import snc.environments.state_initialiser as stinit\n'), ((6477, 6492), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (6484, 6492), True, 'import numpy as np\n'), ((6952, 6969), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (6960, 6969), True, 'import numpy as np\n'), ((6988, 7003), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (6996, 7003), True, 'import numpy as np\n'), ((7024, 7039), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (7032, 7039), True, 'import numpy as np\n'), ((7055, 7071), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (7063, 7071), True, 'import numpy as np\n'), ((7160, 7214), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (7199, 7214), True, 'import snc.environments.state_initialiser as stinit\n'), ((7323, 7338), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (7330, 7338), True, 'import numpy as np\n'), ((7829, 7846), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (7837, 7846), True, 'import numpy as np\n'), ((7865, 7880), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (7873, 7880), True, 'import numpy as np\n'), ((7901, 7917), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (7909, 7917), True, 'import numpy as np\n'), ((7933, 7949), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (7941, 7949), True, 'import numpy as np\n'), ((8041, 8095), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (8080, 8095), True, 'import snc.environments.state_initialiser as stinit\n'), ((8204, 8219), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8211, 8219), True, 'import numpy as np\n'), ((8781, 8798), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (8789, 8798), True, 'import numpy as np\n'), ((8817, 8832), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (8825, 8832), True, 'import numpy as np\n'), ((8853, 8869), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (8861, 8869), True, 'import numpy as np\n'), ((8885, 8901), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (8893, 8901), True, 'import numpy as np\n'), ((8994, 9048), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (9033, 9048), True, 'import snc.environments.state_initialiser as stinit\n'), ((9157, 9172), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9164, 9172), True, 'import numpy as np\n'), ((9929, 9960), 'numpy.array', 'np.array', (['[[1.1], [2.2], [3.3]]'], {}), '([[1.1], [2.2], [3.3]])\n', (9937, 9960), True, 'import numpy as np\n'), ((10117, 10145), 'numpy.array', 'np.array', (['[[d1], [d2], [d3]]'], {}), '([[d1], [d2], [d3]])\n', (10125, 10145), True, 'import numpy as np\n'), ((10166, 10191), 'numpy.array', 'np.array', (['[[0], [0], [0]]'], {}), '([[0], [0], [0]])\n', (10174, 10191), True, 'import numpy as np\n'), ((10207, 10235), 'numpy.array', 'np.array', (['[[40], [40], [40]]'], {}), '([[40], [40], [40]])\n', (10215, 10235), True, 'import numpy as np\n'), ((10595, 10647), 'numpy.array', 'np.array', (['[[-mu1, 0, 0], [0, -mu2, 0], [0, 0, -mu3]]'], {}), '([[-mu1, 0, 0], [0, -mu2, 0], [0, 0, -mu3]])\n', (10603, 10647), True, 'import numpy as np\n'), ((10841, 10976), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0\n ], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0],\n [1, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1]])\n', (10849, 10976), True, 'import numpy as np\n'), ((11413, 11559), 'numpy.array', 'np.array', (['[[1, 2, 0], [1, 4, 3], [1, 6, 6], [2, 6, 9], [3, 8, 9], [4, 8, 12], [4, 10,\n 15], [5, 12, 15], [6, 12, 18], [6, 14, 21], [7, 16, 21]]'], {}), '([[1, 2, 0], [1, 4, 3], [1, 6, 6], [2, 6, 9], [3, 8, 9], [4, 8, 12],\n [4, 10, 15], [5, 12, 15], [6, 12, 18], [6, 14, 21], [7, 16, 21]])\n', (11421, 11559), True, 'import numpy as np\n'), ((11967, 12031), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (11971, 12031), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((12041, 12095), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (12080, 12095), True, 'import snc.environments.state_initialiser as stinit\n'), ((12555, 12572), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (12563, 12572), True, 'import numpy as np\n'), ((12591, 12606), 'numpy.array', 'np.array', (['[[3]]'], {}), '([[3]])\n', (12599, 12606), True, 'import numpy as np\n'), ((12627, 12642), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (12635, 12642), True, 'import numpy as np\n'), ((12658, 12674), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (12666, 12674), True, 'import numpy as np\n'), ((12763, 12817), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (12802, 12817), True, 'import snc.environments.state_initialiser as stinit\n'), ((12926, 12941), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (12933, 12941), True, 'import numpy as np\n'), ((13343, 13360), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (13351, 13360), True, 'import numpy as np\n'), ((13379, 13394), 'numpy.array', 'np.array', (['[[3]]'], {}), '([[3]])\n', (13387, 13394), True, 'import numpy as np\n'), ((13415, 13430), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (13423, 13430), True, 'import numpy as np\n'), ((13446, 13461), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (13454, 13461), True, 'import numpy as np\n'), ((13550, 13604), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (13589, 13604), True, 'import snc.environments.state_initialiser as stinit\n'), ((13713, 13728), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (13720, 13728), True, 'import numpy as np\n'), ((14027, 14044), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (14035, 14044), True, 'import numpy as np\n'), ((14063, 14078), 'numpy.array', 'np.array', (['[[3]]'], {}), '([[3]])\n', (14071, 14078), True, 'import numpy as np\n'), ((14099, 14115), 'numpy.array', 'np.array', (['[[10]]'], {}), '([[10]])\n', (14107, 14115), True, 'import numpy as np\n'), ((14131, 14146), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (14139, 14146), True, 'import numpy as np\n'), ((14235, 14289), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (14274, 14289), True, 'import snc.environments.state_initialiser as stinit\n'), ((14398, 14413), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (14405, 14413), True, 'import numpy as np\n'), ((14667, 14698), 'numpy.array', 'np.array', (['[[1.1], [2.2], [3.3]]'], {}), '([[1.1], [2.2], [3.3]])\n', (14675, 14698), True, 'import numpy as np\n'), ((14855, 14883), 'numpy.array', 'np.array', (['[[d1], [d2], [d3]]'], {}), '([[d1], [d2], [d3]])\n', (14863, 14883), True, 'import numpy as np\n'), ((14904, 14929), 'numpy.array', 'np.array', (['[[0], [0], [0]]'], {}), '([[0], [0], [0]])\n', (14912, 14929), True, 'import numpy as np\n'), ((14945, 14972), 'numpy.array', 'np.array', (['[[4], [10], [13]]'], {}), '([[4], [10], [13]])\n', (14953, 14972), True, 'import numpy as np\n'), ((15319, 15371), 'numpy.array', 'np.array', (['[[-mu1, 0, 0], [0, -mu2, 0], [0, 0, -mu3]]'], {}), '([[-mu1, 0, 0], [0, -mu2, 0], [0, 0, -mu3]])\n', (15327, 15371), True, 'import numpy as np\n'), ((15565, 15656), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0],\n [1, 0, 0]])\n', (15573, 15656), True, 'import numpy as np\n'), ((15939, 16033), 'numpy.array', 'np.array', (['[[1, 2, 0], [1, 4, 3], [1, 6, 6], [2, 6, 9], [3, 8, 9], [4, 8, 12], [4, 10, 13]\n ]'], {}), '([[1, 2, 0], [1, 4, 3], [1, 6, 6], [2, 6, 9], [3, 8, 9], [4, 8, 12],\n [4, 10, 13]])\n', (15947, 16033), True, 'import numpy as np\n'), ((16176, 16240), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (16180, 16240), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((16250, 16304), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (16289, 16304), True, 'import snc.environments.state_initialiser as stinit\n'), ((17067, 17087), 'numpy.array', 'np.array', (['[[2], [4]]'], {}), '([[2], [4]])\n', (17075, 17087), True, 'import numpy as np\n'), ((17198, 17220), 'numpy.array', 'np.array', (['[[d1], [d2]]'], {}), '([[d1], [d2]])\n', (17206, 17220), True, 'import numpy as np\n'), ((17241, 17261), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (17249, 17261), True, 'import numpy as np\n'), ((17277, 17299), 'numpy.array', 'np.array', (['[[10], [10]]'], {}), '([[10], [10]])\n', (17285, 17299), True, 'import numpy as np\n'), ((17596, 17630), 'numpy.array', 'np.array', (['[[-mu1, 0], [mu1, -mu2]]'], {}), '([[-mu1, 0], [mu1, -mu2]])\n', (17604, 17630), True, 'import numpy as np\n'), ((17882, 17932), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 1], [0, 1], [1, 1]]'], {}), '([[0, 0], [1, 0], [1, 1], [0, 1], [1, 1]])\n', (17890, 17932), True, 'import numpy as np\n'), ((18070, 18120), 'numpy.array', 'np.array', (['[[1, 0], [1, 1], [1, 1], [2, 0], [2, 0]]'], {}), '([[1, 0], [1, 1], [1, 1], [2, 0], [2, 0]])\n', (18078, 18120), True, 'import numpy as np\n'), ((18750, 18814), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (18754, 18814), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((18876, 18930), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (18915, 18930), True, 'import snc.environments.state_initialiser as stinit\n'), ((19275, 19291), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (19283, 19291), True, 'import numpy as np\n'), ((19314, 19334), 'numpy.array', 'np.array', (['[[2], [4]]'], {}), '([[2], [4]])\n', (19322, 19334), True, 'import numpy as np\n'), ((19355, 19375), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (19363, 19375), True, 'import numpy as np\n'), ((19391, 19413), 'numpy.array', 'np.array', (['[[10], [10]]'], {}), '([[10], [10]])\n', (19399, 19413), True, 'import numpy as np\n'), ((19445, 19473), 'numpy.array', 'np.array', (['[[-1, 0], [1, -1]]'], {}), '([[-1, 0], [1, -1]])\n', (19453, 19473), True, 'import numpy as np\n'), ((19535, 19599), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (19539, 19599), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((19609, 19663), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (19648, 19663), True, 'import snc.environments.state_initialiser as stinit\n'), ((19995, 20021), 'numpy.all', 'np.all', (['(s == initial_state)'], {}), '(s == initial_state)\n', (20001, 20021), True, 'import numpy as np\n'), ((20288, 20304), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (20296, 20304), True, 'import numpy as np\n'), ((20327, 20342), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (20334, 20342), True, 'import numpy as np\n'), ((20363, 20388), 'numpy.array', 'np.array', (['[[3], [3], [1]]'], {}), '([[3], [3], [1]])\n', (20371, 20388), True, 'import numpy as np\n'), ((20460, 20518), 'numpy.array', 'np.array', (['[[-2, -5, 0, 0], [2, 0, -10, 0], [0, 5, 0, -10]]'], {}), '([[-2, -5, 0, 0], [2, 0, -10, 0], [0, 5, 0, -10]])\n', (20468, 20518), True, 'import numpy as np\n'), ((20624, 20633), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (20630, 20633), True, 'import numpy as np\n'), ((20654, 20718), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (20658, 20718), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((20761, 20815), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (20800, 20815), True, 'import snc.environments.state_initialiser as stinit\n'), ((20826, 20937), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0', 'job_conservation_flag'], {}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, job_conservation_flag)\n', (20846, 20937), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((21722, 21747), 'numpy.array', 'np.array', (['[[2], [4], [4]]'], {}), '([[2], [4], [4]])\n', (21730, 21747), True, 'import numpy as np\n'), ((21904, 21932), 'numpy.array', 'np.array', (['[[d1], [d2], [d3]]'], {}), '([[d1], [d2], [d3]])\n', (21912, 21932), True, 'import numpy as np\n'), ((21953, 21978), 'numpy.array', 'np.array', (['[[0], [0], [0]]'], {}), '([[0], [0], [0]])\n', (21961, 21978), True, 'import numpy as np\n'), ((21994, 22022), 'numpy.array', 'np.array', (['[[10], [10], [10]]'], {}), '([[10], [10], [10]])\n', (22002, 22022), True, 'import numpy as np\n'), ((22377, 22449), 'numpy.array', 'np.array', (['[[-mu12, -mu13, 0, 0], [mu12, 0, -mu2, 0], [0, mu13, 0, -mu3]]'], {}), '([[-mu12, -mu13, 0, 0], [mu12, 0, -mu2, 0], [0, mu13, 0, -mu3]])\n', (22385, 22449), True, 'import numpy as np\n'), ((22643, 22816), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, \n 0, 1, 0], [0, 0, 0, 1], [1, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 1], [1, 0,\n 0, 1]]'], {}), '([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0,\n 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 1\n ], [1, 0, 0, 1]])\n', (22651, 22816), True, 'import numpy as np\n'), ((23079, 23214), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 2], [0, 1, 2], [0, 1, 3], [0, 1, 4], [1, 0, 4], [2, 0, 3\n ], [2, 0, 3], [2, 1, 3], [2, 1, 3], [2, 2, 2]]'], {}), '([[0, 0, 1], [0, 0, 2], [0, 1, 2], [0, 1, 3], [0, 1, 4], [1, 0, 4],\n [2, 0, 3], [2, 0, 3], [2, 1, 3], [2, 1, 3], [2, 2, 2]])\n', (23087, 23214), True, 'import numpy as np\n'), ((23442, 23506), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg', (['demand_rate', 'buffer_processing_matrix'], {'sim_time_interval': '(1)'}), '(demand_rate, buffer_processing_matrix, sim_time_interval=1)\n', (23446, 23506), True, 'from snc.environments.job_generators.discrete_review_job_generator import DeterministicDiscreteReviewJobGenerator as drjg\n'), ((23584, 23638), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', (['initial_state'], {}), '(initial_state)\n', (23623, 23638), True, 'import snc.environments.state_initialiser as stinit\n'), ((23665, 23731), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]])\n', (23673, 23731), True, 'import numpy as np\n'), ((23742, 23833), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_matrix', 'job_generator', 's0'], {}), '(cost_per_buffer, capacity, constituency_matrix,\n job_generator, s0)\n', (23762, 23833), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((24140, 24194), 'numpy.array', 'np.array', (['[[-10, 100, 0], [10, 0, -mud], [0, 0, -mud]]'], {}), '([[-10, 100, 0], [10, 0, -mud], [0, 0, -mud]])\n', (24148, 24194), True, 'import numpy as np\n'), ((24477, 24600), 'snc.environments.controlled_random_walk.ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', 'ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', (['ind_surplus_buffers', 'job_generator.demand_nodes'], {}), '(\n ind_surplus_buffers, job_generator.demand_nodes)\n', (24546, 24600), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((24774, 24894), 'numpy.array', 'np.array', (['[[-10, 100, 0, -10, 0], [10, 0, -mud, 0, 0], [0, 0, -mud, 0, 0], [0, 0, 0, \n 0, -mud], [0, 0, 0, 10, -mud]]'], {}), '([[-10, 100, 0, -10, 0], [10, 0, -mud, 0, 0], [0, 0, -mud, 0, 0], [\n 0, 0, 0, 0, -mud], [0, 0, 0, 10, -mud]])\n', (24782, 24894), True, 'import numpy as np\n'), ((25205, 25328), 'snc.environments.controlled_random_walk.ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', 'ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', (['ind_surplus_buffers', 'job_generator.demand_nodes'], {}), '(\n ind_surplus_buffers, job_generator.demand_nodes)\n', (25274, 25328), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((25500, 25554), 'numpy.array', 'np.array', (['[[-10, 100, 0], [10, 0, -mud], [0, 0, -mud]]'], {}), '([[-10, 100, 0], [10, 0, -mud], [0, 0, -mud]])\n', (25508, 25554), True, 'import numpy as np\n'), ((26068, 26106), 'snc.environments.examples.single_station_demand_model', 'examples.single_station_demand_model', ([], {}), '()\n', (26104, 26106), True, 'import snc.environments.examples as examples\n'), ((26222, 26276), 'numpy.array', 'np.array', (['[[-10, 0, 100], [10, -mud, 0], [0, -mud, 0]]'], {}), '([[-10, 0, 100], [10, -mud, 0], [0, -mud, 0]])\n', (26230, 26276), True, 'import numpy as np\n'), ((26426, 26516), 'snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator.ScaledBernoulliServicesPoissonArrivalsGenerator', 'ScaledBernoulliServicesPoissonArrivalsGenerator', (['demand_rate', 'buffer_processing_matrix'], {}), '(demand_rate,\n buffer_processing_matrix)\n', (26473, 26516), False, 'from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import ScaledBernoulliServicesPoissonArrivalsGenerator\n'), ((27375, 27403), 'numpy.array', 'np.array', (['[[-2, 0], [2, -3]]'], {}), '([[-2, 0], [2, -3]])\n', (27383, 27403), True, 'import numpy as np\n'), ((27939, 27994), 'numpy.all', 'np.all', (['(routing_jobs_matrix == buffer_processing_matrix)'], {}), '(routing_jobs_matrix == buffer_processing_matrix)\n', (27945, 27994), True, 'import numpy as np\n'), ((28129, 28157), 'numpy.array', 'np.array', (['[[-2, 0], [2, -3]]'], {}), '([[-2, 0], [2, -3]])\n', (28137, 28157), True, 'import numpy as np\n'), ((28564, 28589), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (28571, 28589), True, 'import numpy as np\n'), ((28877, 28905), 'numpy.array', 'np.array', (['[[-2, 0], [2, -3]]'], {}), '([[-2, 0], [2, -3]])\n', (28885, 28905), True, 'import numpy as np\n'), ((29312, 29338), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (29320, 29338), True, 'import numpy as np\n'), ((29719, 29850), 'numpy.array', 'np.array', (['[[-mu1, 0, 0, 0, 0], [mu1, -mu2, mu3, 0, -mud2], [0, 0, 0, 0, -mud2], [0,\n mu2, -mu3, -mud1, 0], [0, 0, 0, -mud1, 0]]'], {}), '([[-mu1, 0, 0, 0, 0], [mu1, -mu2, mu3, 0, -mud2], [0, 0, 0, 0, -\n mud2], [0, mu2, -mu3, -mud1, 0], [0, 0, 0, -mud1, 0]])\n', (29727, 29850), True, 'import numpy as np\n'), ((30498, 30524), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (30506, 30524), True, 'import numpy as np\n'), ((30841, 30858), 'numpy.array', 'np.array', (['[[3.5]]'], {}), '([[3.5]])\n', (30849, 30858), True, 'import numpy as np\n'), ((30925, 30940), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (30932, 30940), True, 'import numpy as np\n'), ((30959, 30977), 'numpy.array', 'np.array', (['[[1000]]'], {}), '([[1000]])\n', (30967, 30977), True, 'import numpy as np\n'), ((31009, 31024), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (31017, 31024), True, 'import numpy as np\n'), ((31045, 31061), 'numpy.array', 'np.array', (['[[20]]'], {}), '([[20]])\n', (31053, 31061), True, 'import numpy as np\n'), ((31097, 31222), 'snc.environments.job_generators.discrete_review_job_generator.PoissonDiscreteReviewJobGenerator', 'prjg', ([], {'sim_time_interval': '(1)', 'demand_rate': 'demand_rate', 'buffer_processing_matrix': 'buffer_processing_matrix', 'job_gen_seed': 'seed'}), '(sim_time_interval=1, demand_rate=demand_rate, buffer_processing_matrix\n =buffer_processing_matrix, job_gen_seed=seed)\n', (31101, 31222), True, 'from snc.environments.job_generators.discrete_review_job_generator import PoissonDiscreteReviewJobGenerator as prjg\n'), ((31318, 31386), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'stinit.DeterministicCRWStateInitialiser', ([], {'initial_state': 'initial_state'}), '(initial_state=initial_state)\n', (31357, 31386), True, 'import snc.environments.state_initialiser as stinit\n'), ((31398, 31570), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', ([], {'cost_per_buffer': 'cost_per_buffer', 'capacity': 'capacity', 'constituency_matrix': 'constituency_matrix', 'job_generator': 'job_generator', 'state_initialiser': 's0'}), '(cost_per_buffer=cost_per_buffer, capacity=capacity,\n constituency_matrix=constituency_matrix, job_generator=job_generator,\n state_initialiser=s0)\n', (31418, 31570), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((31759, 31794), 'numpy.any', 'np.any', (['(next_state != initial_state)'], {}), '(next_state != initial_state)\n', (31765, 31794), True, 'import numpy as np\n'), ((31806, 31855), 'numpy.any', 'np.any', (['(next_random_state != initial_random_state)'], {}), '(next_random_state != initial_random_state)\n', (31812, 31855), True, 'import numpy as np\n'), ((32006, 32048), 'numpy.all', 'np.all', (['(new_initial_state == initial_state)'], {}), '(new_initial_state == initial_state)\n', (32012, 32048), True, 'import numpy as np\n'), ((32060, 32116), 'numpy.all', 'np.all', (['(new_initial_random_state == initial_random_state)'], {}), '(new_initial_random_state == initial_random_state)\n', (32066, 32116), True, 'import numpy as np\n'), ((32232, 32250), 'numpy.array', 'np.array', (['[[1000]]'], {}), '([[1000]])\n', (32240, 32250), True, 'import numpy as np\n'), ((32282, 32297), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (32290, 32297), True, 'import numpy as np\n'), ((32333, 32458), 'snc.environments.job_generators.discrete_review_job_generator.PoissonDiscreteReviewJobGenerator', 'prjg', ([], {'sim_time_interval': '(1)', 'demand_rate': 'demand_rate', 'buffer_processing_matrix': 'buffer_processing_matrix', 'job_gen_seed': 'seed'}), '(sim_time_interval=1, demand_rate=demand_rate, buffer_processing_matrix\n =buffer_processing_matrix, job_gen_seed=seed)\n', (32337, 32458), True, 'from snc.environments.job_generators.discrete_review_job_generator import PoissonDiscreteReviewJobGenerator as prjg\n'), ((32661, 32710), 'numpy.any', 'np.any', (['(next_random_state != initial_random_state)'], {}), '(next_random_state != initial_random_state)\n', (32667, 32710), True, 'import numpy as np\n'), ((32824, 32880), 'numpy.all', 'np.all', (['(new_initial_random_state == initial_random_state)'], {}), '(new_initial_random_state == initial_random_state)\n', (32830, 32880), True, 'import numpy as np\n'), ((33092, 33110), 'numpy.array', 'np.array', (['[[1000]]'], {}), '([[1000]])\n', (33100, 33110), True, 'import numpy as np\n'), ((33166, 33181), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (33174, 33181), True, 'import numpy as np\n'), ((33200, 33220), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (33214, 33220), True, 'import numpy as np\n'), ((33244, 33369), 'snc.environments.job_generators.discrete_review_job_generator.PoissonDiscreteReviewJobGenerator', 'prjg', ([], {'sim_time_interval': '(1)', 'demand_rate': 'demand_rate', 'buffer_processing_matrix': 'buffer_processing_matrix', 'job_gen_seed': 'seed'}), '(sim_time_interval=1, demand_rate=demand_rate, buffer_processing_matrix\n =buffer_processing_matrix, job_gen_seed=seed)\n', (33248, 33369), True, 'from snc.environments.job_generators.discrete_review_job_generator import PoissonDiscreteReviewJobGenerator as prjg\n'), ((33442, 33567), 'snc.environments.job_generators.discrete_review_job_generator.PoissonDiscreteReviewJobGenerator', 'prjg', ([], {'sim_time_interval': '(1)', 'demand_rate': 'demand_rate', 'buffer_processing_matrix': 'buffer_processing_matrix', 'job_gen_seed': 'seed'}), '(sim_time_interval=1, demand_rate=demand_rate, buffer_processing_matrix\n =buffer_processing_matrix, job_gen_seed=seed)\n', (33446, 33567), True, 'from snc.environments.job_generators.discrete_review_job_generator import PoissonDiscreteReviewJobGenerator as prjg\n'), ((33631, 33646), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (33640, 33646), True, 'import numpy as np\n'), ((35573, 35627), 'snc.environments.examples_distribution_with_rebalancing.one_warehouse', 'examples_distribution_with_rebalancing.one_warehouse', ([], {}), '()\n', (35625, 35627), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((35633, 35739), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses', 'examples_distribution_with_rebalancing.two_warehouses', ([], {'r_to_w_rebalance': '(False)', 'w_to_w_rebalance': '(False)'}), '(r_to_w_rebalance=\n False, w_to_w_rebalance=False)\n', (35686, 35739), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((35798, 35902), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses', 'examples_distribution_with_rebalancing.two_warehouses', ([], {'r_to_w_rebalance': '(True)', 'w_to_w_rebalance': '(False)'}), '(r_to_w_rebalance=True,\n w_to_w_rebalance=False)\n', (35851, 35902), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((35962, 36067), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses', 'examples_distribution_with_rebalancing.two_warehouses', ([], {'r_to_w_rebalance': '(False)', 'w_to_w_rebalance': '(True)'}), '(r_to_w_rebalance=\n False, w_to_w_rebalance=True)\n', (36015, 36067), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((36126, 36229), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses', 'examples_distribution_with_rebalancing.two_warehouses', ([], {'r_to_w_rebalance': '(True)', 'w_to_w_rebalance': '(True)'}), '(r_to_w_rebalance=True,\n w_to_w_rebalance=True)\n', (36179, 36229), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((36289, 36406), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses_simplified', 'examples_distribution_with_rebalancing.two_warehouses_simplified', ([], {'r_to_w_rebalance': '(False)', 'w_to_w_rebalance': '(False)'}), '(\n r_to_w_rebalance=False, w_to_w_rebalance=False)\n', (36353, 36406), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((36476, 36592), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses_simplified', 'examples_distribution_with_rebalancing.two_warehouses_simplified', ([], {'r_to_w_rebalance': '(True)', 'w_to_w_rebalance': '(False)'}), '(\n r_to_w_rebalance=True, w_to_w_rebalance=False)\n', (36540, 36592), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((36662, 36778), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses_simplified', 'examples_distribution_with_rebalancing.two_warehouses_simplified', ([], {'r_to_w_rebalance': '(False)', 'w_to_w_rebalance': '(True)'}), '(\n r_to_w_rebalance=False, w_to_w_rebalance=True)\n', (36726, 36778), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((36848, 36963), 'snc.environments.examples_distribution_with_rebalancing.two_warehouses_simplified', 'examples_distribution_with_rebalancing.two_warehouses_simplified', ([], {'r_to_w_rebalance': '(True)', 'w_to_w_rebalance': '(True)'}), '(\n r_to_w_rebalance=True, w_to_w_rebalance=True)\n', (36912, 36963), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((37033, 37127), 'snc.environments.examples_distribution_with_rebalancing.three_warehouses_simplified', 'examples_distribution_with_rebalancing.three_warehouses_simplified', ([], {'r_to_w_rebalance': '(True)'}), '(\n r_to_w_rebalance=True)\n', (37099, 37127), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((37128, 37223), 'snc.environments.examples_distribution_with_rebalancing.three_warehouses_simplified', 'examples_distribution_with_rebalancing.three_warehouses_simplified', ([], {'r_to_w_rebalance': '(False)'}), '(\n r_to_w_rebalance=False)\n', (37194, 37223), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((37224, 37281), 'snc.environments.examples_distribution_with_rebalancing.three_warehouses', 'examples_distribution_with_rebalancing.three_warehouses', ([], {}), '()\n', (37279, 37281), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((37287, 37376), 'snc.environments.examples_distribution_with_rebalancing.three_warehouses_two_manufacturers_per_area', 'examples_distribution_with_rebalancing.three_warehouses_two_manufacturers_per_area', ([], {}), '(\n )\n', (37369, 37376), True, 'import snc.environments.examples_distribution_with_rebalancing as examples_distribution_with_rebalancing\n'), ((1174, 1190), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (1187, 1190), True, 'import snc.utils.snc_tools as snc\n'), ((1262, 1278), 'snc.utils.snc_tools.is_binary', 'snc.is_binary', (['c'], {}), '(c)\n', (1275, 1278), True, 'import snc.utils.snc_tools as snc\n'), ((1772, 1801), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1785, 1801), False, 'import pytest\n'), ((1811, 1942), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0'], {'index_phys_resources': 'index_phys_resources'}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, index_phys_resources=index_phys_resources)\n', (1831, 1942), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((2475, 2504), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2488, 2504), False, 'import pytest\n'), ((2514, 2645), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0'], {'index_phys_resources': 'index_phys_resources'}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, index_phys_resources=index_phys_resources)\n', (2534, 2645), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((3163, 3192), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3176, 3192), False, 'import pytest\n'), ((3202, 3333), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'ControlledRandomWalk', (['cost_per_buffer', 'capacity', 'constituency_mat', 'job_generator', 's0'], {'index_phys_resources': 'index_phys_resources'}), '(cost_per_buffer, capacity, constituency_mat,\n job_generator, s0, index_phys_resources=index_phys_resources)\n', (3222, 3333), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((5233, 5255), 'numpy.array', 'np.array', (['[2, 3, 4, 5]'], {}), '([2, 3, 4, 5])\n', (5241, 5255), True, 'import numpy as np\n'), ((5736, 5765), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5749, 5765), False, 'import pytest\n'), ((6428, 6443), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (6435, 6443), True, 'import numpy as np\n'), ((7274, 7289), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (7281, 7289), True, 'import numpy as np\n'), ((7507, 7536), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (7520, 7536), False, 'import pytest\n'), ((8155, 8170), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8162, 8170), True, 'import numpy as np\n'), ((8407, 8436), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8420, 8436), False, 'import pytest\n'), ((9108, 9123), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9115, 9123), True, 'import numpy as np\n'), ((9356, 9385), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (9369, 9385), False, 'import pytest\n'), ((12155, 12170), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (12162, 12170), True, 'import numpy as np\n'), ((12334, 12377), 'numpy.testing.assert_approx_equal', 'np.testing.assert_approx_equal', (['r', '(-cost[i])'], {}), '(r, -cost[i])\n', (12364, 12377), True, 'import numpy as np\n'), ((12877, 12892), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (12884, 12892), True, 'import numpy as np\n'), ((13048, 13077), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (13061, 13077), False, 'import pytest\n'), ((13664, 13679), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (13671, 13679), True, 'import numpy as np\n'), ((13739, 13768), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (13752, 13768), False, 'import pytest\n'), ((14349, 14364), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (14356, 14364), True, 'import numpy as np\n'), ((14424, 14453), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (14437, 14453), False, 'import pytest\n'), ((16364, 16379), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (16371, 16379), True, 'import numpy as np\n'), ((16543, 16572), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (16556, 16572), False, 'import pytest\n'), ((18990, 18999), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (18996, 18999), True, 'import numpy as np\n'), ((19162, 19205), 'numpy.testing.assert_approx_equal', 'np.testing.assert_approx_equal', (['r', '(-cost[i])'], {}), '(r, -cost[i])\n', (19192, 19205), True, 'import numpy as np\n'), ((20126, 20155), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (20139, 20155), False, 'import pytest\n'), ((20404, 20419), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (20411, 20419), True, 'import numpy as np\n'), ((24345, 24370), 'numpy.array', 'np.array', (['[[0], [0], [9]]'], {}), '([[0], [0], [9]])\n', (24353, 24370), True, 'import numpy as np\n'), ((25131, 25166), 'numpy.array', 'np.array', (['[[0], [0], [0], [0], [9]]'], {}), '([[0], [0], [0], [0], [9]])\n', (25139, 25166), True, 'import numpy as np\n'), ((25705, 25730), 'numpy.array', 'np.array', (['[[0], [0], [9]]'], {}), '([[0], [0], [9]])\n', (25713, 25730), True, 'import numpy as np\n'), ((25841, 25964), 'snc.environments.controlled_random_walk.ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', 'ControlledRandomWalk.is_surplus_buffers_consistent_with_job_generator', (['ind_surplus_buffers', 'job_generator.demand_nodes'], {}), '(\n ind_surplus_buffers, job_generator.demand_nodes)\n', (25910, 25964), False, 'from snc.environments.controlled_random_walk import ControlledRandomWalk\n'), ((26377, 26396), 'numpy.array', 'np.array', (['[0, 0, 9]'], {}), '([0, 0, 9])\n', (26385, 26396), True, 'import numpy as np\n'), ((26736, 26745), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (26742, 26745), True, 'import numpy as np\n'), ((26826, 26871), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'DeterministicCRWStateInitialiser', (['demand_rate'], {}), '(demand_rate)\n', (26858, 26871), False, 'from snc.environments.state_initialiser import DeterministicCRWStateInitialiser\n'), ((27163, 27192), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (27176, 27192), False, 'import pytest\n'), ((27494, 27520), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (27502, 27520), True, 'import numpy as np\n'), ((27547, 27572), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (27554, 27572), True, 'import numpy as np\n'), ((27652, 27677), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (27659, 27677), True, 'import numpy as np\n'), ((27679, 27704), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (27686, 27704), True, 'import numpy as np\n'), ((27737, 27776), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (27745, 27776), True, 'import numpy as np\n'), ((27814, 27839), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (27821, 27839), True, 'import numpy as np\n'), ((28248, 28274), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (28256, 28274), True, 'import numpy as np\n'), ((28301, 28326), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (28308, 28326), True, 'import numpy as np\n'), ((28406, 28431), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (28413, 28431), True, 'import numpy as np\n'), ((28433, 28458), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (28440, 28458), True, 'import numpy as np\n'), ((28491, 28530), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (28499, 28530), True, 'import numpy as np\n'), ((28996, 29022), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (29004, 29022), True, 'import numpy as np\n'), ((29049, 29074), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (29056, 29074), True, 'import numpy as np\n'), ((29154, 29179), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (29161, 29179), True, 'import numpy as np\n'), ((29181, 29206), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (29188, 29206), True, 'import numpy as np\n'), ((29239, 29278), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (29247, 29278), True, 'import numpy as np\n'), ((30092, 30118), 'numpy.zeros', 'np.zeros', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (30100, 30118), True, 'import numpy as np\n'), ((30145, 30170), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (30152, 30170), True, 'import numpy as np\n'), ((30259, 30284), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (30266, 30284), True, 'import numpy as np\n'), ((30294, 30319), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (30301, 30319), True, 'import numpy as np\n'), ((30329, 30368), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (30337, 30368), True, 'import numpy as np\n'), ((30874, 30889), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (30881, 30889), True, 'import numpy as np\n'), ((33783, 33823), 'numpy.all', 'np.all', (['(arrival_jobs_1 == arrival_jobs_2)'], {}), '(arrival_jobs_1 == arrival_jobs_2)\n', (33789, 33823), True, 'import numpy as np\n'), ((34016, 34070), 'numpy.all', 'np.all', (['(drained_jobs_matrix_1 == drained_jobs_matrix_2)'], {}), '(drained_jobs_matrix_1 == drained_jobs_matrix_2)\n', (34022, 34070), True, 'import numpy as np\n'), ((34243, 34285), 'numpy.all', 'np.all', (['(supplied_jobs_1 == supplied_jobs_2)'], {}), '(supplied_jobs_1 == supplied_jobs_2)\n', (34249, 34285), True, 'import numpy as np\n'), ((6267, 6282), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (6274, 6282), True, 'import numpy as np\n'), ((7113, 7128), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (7120, 7128), True, 'import numpy as np\n'), ((7994, 8009), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8001, 8009), True, 'import numpy as np\n'), ((8309, 8352), 'numpy.max', 'np.max', (['[0, initial_state[0] - (i + 1) * 2]'], {}), '([0, initial_state[0] - (i + 1) * 2])\n', (8315, 8352), True, 'import numpy as np\n'), ((8947, 8962), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8954, 8962), True, 'import numpy as np\n'), ((9262, 9301), 'numpy.max', 'np.max', (['[0, initial_state[0] - (i + 1)]'], {}), '([0, initial_state[0] - (i + 1)])\n', (9268, 9301), True, 'import numpy as np\n'), ((12716, 12731), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (12723, 12731), True, 'import numpy as np\n'), ((13503, 13518), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (13510, 13518), True, 'import numpy as np\n'), ((14188, 14203), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (14195, 14203), True, 'import numpy as np\n'), ((19738, 19747), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (19744, 19747), True, 'import numpy as np\n'), ((19957, 19973), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (19965, 19973), True, 'import numpy as np\n'), ((20991, 21013), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (20999, 21013), True, 'import numpy as np\n'), ((26613, 26635), 'numpy.array', 'np.array', (['[1, 0.5, 10]'], {}), '([1, 0.5, 10])\n', (26621, 26635), True, 'import numpy as np\n'), ((26678, 26693), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (26685, 26693), True, 'import numpy as np\n'), ((28719, 28747), 'numpy.array', 'np.array', (['[[-1, 0], [1, -1]]'], {}), '([[-1, 0], [1, -1]])\n', (28727, 28747), True, 'import numpy as np\n'), ((29468, 29507), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (29476, 29507), True, 'import numpy as np\n'), ((30654, 30693), 'numpy.zeros', 'np.zeros', (['(num_buffers, num_activities)'], {}), '((num_buffers, num_activities))\n', (30662, 30693), True, 'import numpy as np\n'), ((31668, 31683), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (31675, 31683), True, 'import numpy as np\n'), ((5139, 5164), 'numpy.ones', 'np.ones', (['(num_buffers, 1)'], {}), '((num_buffers, 1))\n', (5146, 5164), True, 'import numpy as np\n'), ((20178, 20194), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (20186, 20194), True, 'import numpy as np\n'), ((21215, 21240), 'numpy.array', 'np.array', (['[[0], [2], [1]]'], {}), '([[0], [2], [1]])\n', (21223, 21240), True, 'import numpy as np\n'), ((21257, 21282), 'numpy.array', 'np.array', (['[[0], [0], [3]]'], {}), '([[0], [0], [3]])\n', (21265, 21282), True, 'import numpy as np\n')] |
import numpy as np
import sys
import os
import pandas as pd
from scipy import signal
from sklearn.metrics import accuracy_score
########## Note: Using Professor's code as a starting template ########################
########################################
### Read images from train directory ###
traindir = sys.argv[1]
df = pd.read_csv(traindir+'/data.csv')#load images' names and labels
names = df['Name'].values
labels = df['Label'].values
traindata = np.empty((len(labels),3,3), dtype=np.float32)
for i in range(0, len(labels)):
image_matrix = np.loadtxt(traindir+'/'+names[i])
# traindata = np.append(traindata, np.array(image_matrix, ndmin=3, dtype=int8), axis=0)
# traindata[i] = np.array(image_matrix, ndmin=3, dtype=np.int8)
traindata[i] = image_matrix
print(traindata)
print(labels)
testdir = sys.argv[2]
df_test = pd.read_csv(testdir+'/data.csv')
test_name = df_test['Name'].values
test_label = df_test['Label'].values
test_data = np.empty((len(test_label),3,3),dtype=np.float)
for i in range(0,len(test_label)):
test_image_matrix = np.loadtxt(testdir+'/'+test_name[i])
test_data[i] = test_image_matrix
sigmoid = lambda x: 1/(1+np.exp(-x))
##############################
### Initialize all weights ###
c = np.random.rand(2,2)
#c = np.ones((2,2), dtype=np.int8)
epochs = 1000
eta = .1
prevobj = np.inf
i=0
###########################
### Calculate objective ###
objective = 0
for i in range(0, len(labels)):
hidden_layer = signal.convolve2d(traindata[i], c, mode="valid")
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidden_layer[j][k] = sigmoid(hidden_layer[j][k])
output_layer = (hidden_layer[0][0] + hidden_layer[0][1] + hidden_layer[1][0] + hidden_layer[1][1])/4
objective += (output_layer-labels[i]) ** 2
print('Output_layer = %s' % output_layer)
count = 0
while(count < epochs):
prevobj = objective
dellc1 = 0
dellc2 = 0
dellc3 = 0
dellc4 = 0
f = output_layer**.5
for i in range(0, len(labels)):
hidden_layer = signal.convolve2d(traindata[i], c, mode="valid")
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidden_layer[j][k] = sigmoid(hidden_layer[j][k])
#gd c1
sqrtf = (hidden_layer[0][0] + hidden_layer[0][1] + hidden_layer[1][0] + hidden_layer[1][1])/4 - labels[i]
dz1dc1 = hidden_layer[0][0] *(1 - hidden_layer[0][0]) *traindata[i][0][0]
dz2dc1 = hidden_layer[0][1] *(1 - hidden_layer[0][1]) *traindata[i][0][1]
dz3dc1 = hidden_layer[1][0] *(1 - hidden_layer[1][0]) *traindata[i][1][0]
dz4dc1 = hidden_layer[1][1] *(1 - hidden_layer[1][1]) *traindata[i][1][1]
dellc1 += (sqrtf * (dz1dc1 + dz2dc1 + dz3dc1 +dz4dc1))/2
#gd c2
dz1dc2 = hidden_layer[0][0] *(1 - hidden_layer[0][0]) *traindata[i][0][1]
dz2dc2 = hidden_layer[0][1] *(1 - hidden_layer[0][1]) *traindata[i][0][2]
dz3dc2 = hidden_layer[1][0] *(1 - hidden_layer[1][0]) *traindata[i][1][1]
dz4dc2 = hidden_layer[1][1] *(1 - hidden_layer[1][1]) *traindata[i][1][2]
dellc2 += (sqrtf * (dz1dc2 + dz2dc2 + dz3dc2 +dz4dc2))/2
#gd c3
dz1dc3 = hidden_layer[0][0] *(1 - hidden_layer[0][0]) *traindata[i][1][0]
dz2dc3 = hidden_layer[0][1] *(1 - hidden_layer[0][1]) *traindata[i][1][1]
dz3dc3 = hidden_layer[1][0] *(1 - hidden_layer[1][0]) *traindata[i][2][0]
dz4dc3 = hidden_layer[1][1] *(1 - hidden_layer[1][1]) *traindata[i][2][1]
dellc3 += (sqrtf * (dz1dc3 + dz2dc3 + dz3dc3 +dz4dc3))/2
#gd c4
dz1dc4 = hidden_layer[0][0] *(1 - hidden_layer[0][0]) *traindata[i][1][1]
dz2dc4 = hidden_layer[0][1] *(1 - hidden_layer[0][1]) *traindata[i][1][2]
dz3dc4 = hidden_layer[1][0] *(1 - hidden_layer[1][0]) *traindata[i][2][1]
dz4dc4 = hidden_layer[1][1] *(1 - hidden_layer[1][1]) *traindata[i][2][2]
dellc4 += (sqrtf * (dz1dc4 + dz2dc4 + dz3dc4 +dz4dc4))/2
c[0][0] -= eta * dellc1
c[0][1] -= eta * dellc2
c[1][0] -= eta * dellc3
c[1][1] -= eta * dellc4
objective = 0
for i in range(0, len(labels)):
hidden_layer = signal.convolve2d(traindata[i], c, mode="valid")
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidden_layer[j][k] = sigmoid(hidden_layer[j][k])
output_layer = (hidden_layer[0][0] + hidden_layer[0][1] + hidden_layer[1][0] + hidden_layer[1][1]) / 4
objective +=(output_layer - labels[i])**2
print("i = %s objective=%s" % (count, objective))
count += 1
print("\n\nc=%s \n" %c)
# Final Pred Labels
pred_labels = []
for i in range(0, len(test_label)):
hidden_layer = signal.convolve2d(test_data[i], c, mode="valid")
for j in range(0, 2, 1):
for k in range(0, 2, 1):
hidden_layer[j][k] = sigmoid(hidden_layer[j][k])
output_layer = (hidden_layer[0][0] + hidden_layer[0][1] + hidden_layer[1][0] + hidden_layer[1][1]) / 4
print('Output_layer= %s' % output_layer)
print('Test_data[i]= %s' % test_data[i])
if (output_layer > 0.5):
pred_labels.append(1)
else:
pred_labels.append(0)
print('\nTest Labels = %s' % test_label )
print('Predictions = %s' % pred_labels )
accuracy = accuracy_score(test_label, pred_labels) * 100
print('Accuracy = %s %%'% accuracy)
| [
"scipy.signal.convolve2d",
"numpy.random.rand",
"pandas.read_csv",
"numpy.exp",
"numpy.loadtxt",
"sklearn.metrics.accuracy_score"
] | [((334, 369), 'pandas.read_csv', 'pd.read_csv', (["(traindir + '/data.csv')"], {}), "(traindir + '/data.csv')\n", (345, 369), True, 'import pandas as pd\n'), ((841, 875), 'pandas.read_csv', 'pd.read_csv', (["(testdir + '/data.csv')"], {}), "(testdir + '/data.csv')\n", (852, 875), True, 'import pandas as pd\n'), ((1249, 1269), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (1263, 1269), True, 'import numpy as np\n'), ((559, 596), 'numpy.loadtxt', 'np.loadtxt', (["(traindir + '/' + names[i])"], {}), "(traindir + '/' + names[i])\n", (569, 596), True, 'import numpy as np\n'), ((1067, 1107), 'numpy.loadtxt', 'np.loadtxt', (["(testdir + '/' + test_name[i])"], {}), "(testdir + '/' + test_name[i])\n", (1077, 1107), True, 'import numpy as np\n'), ((1474, 1522), 'scipy.signal.convolve2d', 'signal.convolve2d', (['traindata[i]', 'c'], {'mode': '"""valid"""'}), "(traindata[i], c, mode='valid')\n", (1491, 1522), False, 'from scipy import signal\n'), ((4717, 4765), 'scipy.signal.convolve2d', 'signal.convolve2d', (['test_data[i]', 'c'], {'mode': '"""valid"""'}), "(test_data[i], c, mode='valid')\n", (4734, 4765), False, 'from scipy import signal\n'), ((5280, 5319), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_label', 'pred_labels'], {}), '(test_label, pred_labels)\n', (5294, 5319), False, 'from sklearn.metrics import accuracy_score\n'), ((2046, 2094), 'scipy.signal.convolve2d', 'signal.convolve2d', (['traindata[i]', 'c'], {'mode': '"""valid"""'}), "(traindata[i], c, mode='valid')\n", (2063, 2094), False, 'from scipy import signal\n'), ((4176, 4224), 'scipy.signal.convolve2d', 'signal.convolve2d', (['traindata[i]', 'c'], {'mode': '"""valid"""'}), "(traindata[i], c, mode='valid')\n", (4193, 4224), False, 'from scipy import signal\n'), ((1169, 1179), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1175, 1179), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.ipt` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.models import XYZ_to_IPT, IPT_to_XYZ, IPT_hue_angle
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestXYZ_to_IPT',
'TestIPT_to_XYZ',
'TestIPTHueAngle']
class TestXYZ_to_IPT(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.TestXYZ_to_IPT` definition unit tests
methods.
"""
def test_XYZ_to_IPT(self):
"""
Tests :func:`colour.models.ipt.XYZ_to_IPT` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([0.96907232, 1, 1.12179215])),
np.array([1.00300825, 0.01906918, -0.01369292]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([1.92001986, 1, -0.1241347])),
np.array([0.73974548, 0.95333412, 1.71951212]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([1.0131677, 1, 2.11217686])),
np.array([1.06406598, -0.08075812, -0.39625384]),
decimal=7)
class TestIPT_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.IPT_to_XYZ` definition unit tests
methods.
"""
def test_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.ipt.IPT_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([1.00300825, 0.01906918, -0.01369292])),
np.array([0.9689994, 0.99995764, 1.1218432]),
decimal=7)
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([0.73974548, 0.95333412, 1.71951212])),
np.array([1.91998253, 0.99988784, -0.12416715]),
decimal=7)
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([1.06406598, -0.08075812, -0.39625384])),
np.array([1.0130757, 0.9999554, 2.11229678]),
decimal=7)
class TestIPTHueAngle(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.IPT_hue_angle` definition unit tests
methods.
"""
def test_IPT_hue_angle(self):
"""
Tests :func:`colour.models.ipt.IPT_hue_angle` definition.
"""
np.testing.assert_almost_equal(
IPT_hue_angle(np.array([0.96907232, 1, 1.12179215])),
0.84273584954373859,
decimal=7)
np.testing.assert_almost_equal(
IPT_hue_angle(np.array([1.92001986, 1, -0.1241347])),
-0.12350291631562464,
decimal=7)
np.testing.assert_almost_equal(
IPT_hue_angle(np.array([1.0131677, 1, 2.11217686])),
1.1286173302440385,
decimal=7)
| [
"numpy.array"
] | [((1085, 1132), 'numpy.array', 'np.array', (['[1.00300825, 0.01906918, -0.01369292]'], {}), '([1.00300825, 0.01906918, -0.01369292])\n', (1093, 1132), True, 'import numpy as np\n'), ((1272, 1318), 'numpy.array', 'np.array', (['[0.73974548, 0.95333412, 1.71951212]'], {}), '([0.73974548, 0.95333412, 1.71951212])\n', (1280, 1318), True, 'import numpy as np\n'), ((1457, 1505), 'numpy.array', 'np.array', (['[1.06406598, -0.08075812, -0.39625384]'], {}), '([1.06406598, -0.08075812, -0.39625384])\n', (1465, 1505), True, 'import numpy as np\n'), ((1918, 1962), 'numpy.array', 'np.array', (['[0.9689994, 0.99995764, 1.1218432]'], {}), '([0.9689994, 0.99995764, 1.1218432])\n', (1926, 1962), True, 'import numpy as np\n'), ((2111, 2158), 'numpy.array', 'np.array', (['[1.91998253, 0.99988784, -0.12416715]'], {}), '([1.91998253, 0.99988784, -0.12416715])\n', (2119, 2158), True, 'import numpy as np\n'), ((2309, 2353), 'numpy.array', 'np.array', (['[1.0130757, 0.9999554, 2.11229678]'], {}), '([1.0130757, 0.9999554, 2.11229678])\n', (2317, 2353), True, 'import numpy as np\n'), ((1033, 1070), 'numpy.array', 'np.array', (['[0.96907232, 1, 1.12179215]'], {}), '([0.96907232, 1, 1.12179215])\n', (1041, 1070), True, 'import numpy as np\n'), ((1220, 1257), 'numpy.array', 'np.array', (['[1.92001986, 1, -0.1241347]'], {}), '([1.92001986, 1, -0.1241347])\n', (1228, 1257), True, 'import numpy as np\n'), ((1406, 1442), 'numpy.array', 'np.array', (['[1.0131677, 1, 2.11217686]'], {}), '([1.0131677, 1, 2.11217686])\n', (1414, 1442), True, 'import numpy as np\n'), ((1856, 1903), 'numpy.array', 'np.array', (['[1.00300825, 0.01906918, -0.01369292]'], {}), '([1.00300825, 0.01906918, -0.01369292])\n', (1864, 1903), True, 'import numpy as np\n'), ((2050, 2096), 'numpy.array', 'np.array', (['[0.73974548, 0.95333412, 1.71951212]'], {}), '([0.73974548, 0.95333412, 1.71951212])\n', (2058, 2096), True, 'import numpy as np\n'), ((2246, 2294), 'numpy.array', 'np.array', (['[1.06406598, -0.08075812, -0.39625384]'], {}), '([1.06406598, -0.08075812, -0.39625384])\n', (2254, 2294), True, 'import numpy as np\n'), ((2717, 2754), 'numpy.array', 'np.array', (['[0.96907232, 1, 1.12179215]'], {}), '([0.96907232, 1, 1.12179215])\n', (2725, 2754), True, 'import numpy as np\n'), ((2879, 2916), 'numpy.array', 'np.array', (['[1.92001986, 1, -0.1241347]'], {}), '([1.92001986, 1, -0.1241347])\n', (2887, 2916), True, 'import numpy as np\n'), ((3042, 3078), 'numpy.array', 'np.array', (['[1.0131677, 1, 2.11217686]'], {}), '([1.0131677, 1, 2.11217686])\n', (3050, 3078), True, 'import numpy as np\n')] |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| [
"os.path.exists",
"math.ceil",
"os.makedirs",
"argparse.ArgumentParser",
"npy_append_array.NpyAppendArray",
"os.path.join",
"tqdm.tqdm",
"torch.from_numpy",
"shutil.copyfile",
"torch.nn.functional.pad",
"torch.no_grad",
"numpy.load",
"os.remove"
] | [((446, 558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""mean pools representations by compressing uniform splits of the data"""'}), "(description=\n 'mean pools representations by compressing uniform splits of the data')\n", (469, 558), False, 'import argparse\n'), ((1185, 1218), 'os.path.join', 'osp.join', (['args.source', 'args.split'], {}), '(args.source, args.split)\n', (1193, 1218), True, 'import os.path as osp\n'), ((1275, 1319), 'numpy.load', 'np.load', (["(source_path + '.npy')"], {'mmap_mode': '"""r"""'}), "(source_path + '.npy', mmap_mode='r')\n", (1282, 1319), True, 'import numpy as np\n'), ((1325, 1366), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {'exist_ok': '(True)'}), '(args.save_dir, exist_ok=True)\n', (1336, 1366), False, 'import os\n'), ((1383, 1418), 'os.path.join', 'osp.join', (['args.save_dir', 'args.split'], {}), '(args.save_dir, args.split)\n', (1391, 1418), True, 'import os.path as osp\n'), ((1424, 1474), 'shutil.copyfile', 'copyfile', (["(source_path + '.tsv')", "(save_path + '.tsv')"], {}), "(source_path + '.tsv', save_path + '.tsv')\n", (1432, 1474), False, 'from shutil import copyfile\n'), ((1483, 1519), 'os.path.exists', 'os.path.exists', (["(source_path + '.phn')"], {}), "(source_path + '.phn')\n", (1497, 1519), False, 'import os\n'), ((1587, 1623), 'os.path.exists', 'os.path.exists', (["(source_path + '.wrd')"], {}), "(source_path + '.wrd')\n", (1601, 1623), False, 'import os\n'), ((1887, 1917), 'os.path.exists', 'osp.exists', (["(save_path + '.npy')"], {}), "(save_path + '.npy')\n", (1897, 1917), True, 'import os.path as osp\n'), ((1968, 2002), 'npy_append_array.NpyAppendArray', 'NpyAppendArray', (["(save_path + '.npy')"], {}), "(save_path + '.npy')\n", (1982, 2002), False, 'from npy_append_array import NpyAppendArray\n'), ((1529, 1579), 'shutil.copyfile', 'copyfile', (["(source_path + '.phn')", "(save_path + '.phn')"], {}), "(source_path + '.phn', save_path + '.phn')\n", (1537, 1579), False, 'from shutil import copyfile\n'), ((1633, 1683), 'shutil.copyfile', 'copyfile', (["(source_path + '.wrd')", "(save_path + '.wrd')"], {}), "(source_path + '.wrd', save_path + '.wrd')\n", (1641, 1683), False, 'from shutil import copyfile\n'), ((1707, 1744), 'os.path.join', 'osp.join', (['args.source', '"""dict.phn.txt"""'], {}), "(args.source, 'dict.phn.txt')\n", (1715, 1744), True, 'import os.path as osp\n'), ((1927, 1956), 'os.remove', 'os.remove', (["(save_path + '.npy')"], {}), "(save_path + '.npy')\n", (1936, 1956), False, 'import os\n'), ((2142, 2157), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2155, 2157), False, 'import torch\n'), ((1777, 1814), 'os.path.join', 'osp.join', (['args.source', '"""dict.phn.txt"""'], {}), "(args.source, 'dict.phn.txt')\n", (1785, 1814), True, 'import os.path as osp\n'), ((1828, 1867), 'os.path.join', 'osp.join', (['args.save_dir', '"""dict.phn.txt"""'], {}), "(args.save_dir, 'dict.phn.txt')\n", (1836, 1867), True, 'import os.path as osp\n'), ((2248, 2266), 'tqdm.tqdm', 'tqdm.tqdm', (['lengths'], {}), '(lengths)\n', (2257, 2266), False, 'import tqdm\n'), ((2498, 2537), 'math.ceil', 'math.ceil', (['(length * args.subsample_rate)'], {}), '(length * args.subsample_rate)\n', (2507, 2537), False, 'import math\n'), ((2438, 2461), 'torch.from_numpy', 'torch.from_numpy', (['feats'], {}), '(feats)\n', (2454, 2461), False, 'import torch\n'), ((2885, 2912), 'torch.nn.functional.pad', 'F.pad', (['x', '[0, 0, 0, to_add]'], {}), '(x, [0, 0, 0, to_add])\n', (2890, 2912), True, 'import torch.nn.functional as F\n')] |
import copy
from collections import deque
import numpy as np
from domainbed.lib import swa_utils
class SWADBase:
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
raise NotImplementedError()
def get_final_model(self):
raise NotImplementedError()
class IIDMax(SWADBase):
"""SWAD start from iid max acc and select last by iid max swa acc"""
def __init__(self, evaluator, **kwargs):
self.iid_max_acc = 0.0
self.swa_max_acc = 0.0
self.avgmodel = None
self.final_model = None
self.evaluator = evaluator
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
if self.iid_max_acc < val_acc:
self.iid_max_acc = val_acc
self.avgmodel = swa_utils.AveragedModel(segment_swa.module, rm_optimizer=True)
self.avgmodel.start_step = segment_swa.start_step
self.avgmodel.update_parameters(segment_swa.module)
self.avgmodel.end_step = segment_swa.end_step
# evaluate
accuracies, summaries = self.evaluator.evaluate(self.avgmodel)
results = {**summaries, **accuracies}
prt_fn(results, self.avgmodel)
swa_val_acc = results["train_out"]
if swa_val_acc > self.swa_max_acc:
self.swa_max_acc = swa_val_acc
self.final_model = copy.deepcopy(self.avgmodel)
def get_final_model(self):
return self.final_model
class LossValley(SWADBase):
"""IIDMax has a potential problem that bias to validation dataset.
LossValley choose SWAD range by detecting loss valley.
"""
def __init__(self, evaluator, n_converge, n_tolerance, tolerance_ratio, **kwargs):
"""
Args:
evaluator
n_converge: converge detector window size.
n_tolerance: loss min smoothing window size
tolerance_ratio: decision ratio for dead loss valley
"""
self.evaluator = evaluator
self.n_converge = n_converge
self.n_tolerance = n_tolerance
self.tolerance_ratio = tolerance_ratio
self.converge_Q = deque(maxlen=n_converge)
self.smooth_Q = deque(maxlen=n_tolerance)
self.final_model = None
self.converge_step = None
self.dead_valley = False
self.threshold = None
def get_smooth_loss(self, idx):
smooth_loss = min([model.end_loss for model in list(self.smooth_Q)[idx:]])
return smooth_loss
@property
def is_converged(self):
return self.converge_step is not None
def update_and_evaluate(self, segment_swa, val_acc, val_loss, prt_fn):
if self.dead_valley:
return
frozen = copy.deepcopy(segment_swa)
frozen.end_loss = val_loss
self.converge_Q.append(frozen)
self.smooth_Q.append(frozen)
if not self.is_converged:
if len(self.converge_Q) < self.n_converge:
return
min_idx = np.argmin([model.end_loss for model in self.converge_Q])
untilmin_segment_swa = self.converge_Q[min_idx] # until-min segment swa.
if min_idx == 0:
self.converge_step = self.converge_Q[0].end_step
self.final_model = swa_utils.AveragedModel(untilmin_segment_swa)
th_base = np.mean([model.end_loss for model in self.converge_Q])
self.threshold = th_base * (1.0 + self.tolerance_ratio)
if self.n_tolerance < self.n_converge:
for i in range(self.n_converge - self.n_tolerance):
model = self.converge_Q[1 + i]
self.final_model.update_parameters(
model, start_step=model.start_step, end_step=model.end_step
)
elif self.n_tolerance > self.n_converge:
converge_idx = self.n_tolerance - self.n_converge
Q = list(self.smooth_Q)[: converge_idx + 1]
start_idx = 0
for i in reversed(range(len(Q))):
model = Q[i]
if model.end_loss > self.threshold:
start_idx = i + 1
break
for model in Q[start_idx + 1 :]:
self.final_model.update_parameters(
model, start_step=model.start_step, end_step=model.end_step
)
print(
f"Model converged at step {self.converge_step}, "
f"Start step = {self.final_model.start_step}; "
f"Threshold = {self.threshold:.6f}, "
)
return
if self.smooth_Q[0].end_step < self.converge_step:
return
# converged -> loss valley
min_vloss = self.get_smooth_loss(0)
if min_vloss > self.threshold:
self.dead_valley = True
print(f"Valley is dead at step {self.final_model.end_step}")
return
model = self.smooth_Q[0]
self.final_model.update_parameters(
model, start_step=model.start_step, end_step=model.end_step
)
def get_final_model(self):
if not self.is_converged:
self.evaluator.logger.error(
"Requested final model, but model is not yet converged; return last model instead"
)
return self.converge_Q[-1]
if not self.dead_valley:
self.smooth_Q.popleft()
while self.smooth_Q:
smooth_loss = self.get_smooth_loss(0)
if smooth_loss > self.threshold:
break
segment_swa = self.smooth_Q.popleft()
self.final_model.update_parameters(segment_swa, step=segment_swa.end_step)
return self.final_model
| [
"numpy.mean",
"collections.deque",
"copy.deepcopy",
"numpy.argmin",
"domainbed.lib.swa_utils.AveragedModel"
] | [((2126, 2150), 'collections.deque', 'deque', ([], {'maxlen': 'n_converge'}), '(maxlen=n_converge)\n', (2131, 2150), False, 'from collections import deque\n'), ((2175, 2200), 'collections.deque', 'deque', ([], {'maxlen': 'n_tolerance'}), '(maxlen=n_tolerance)\n', (2180, 2200), False, 'from collections import deque\n'), ((2710, 2736), 'copy.deepcopy', 'copy.deepcopy', (['segment_swa'], {}), '(segment_swa)\n', (2723, 2736), False, 'import copy\n'), ((779, 841), 'domainbed.lib.swa_utils.AveragedModel', 'swa_utils.AveragedModel', (['segment_swa.module'], {'rm_optimizer': '(True)'}), '(segment_swa.module, rm_optimizer=True)\n', (802, 841), False, 'from domainbed.lib import swa_utils\n'), ((1356, 1384), 'copy.deepcopy', 'copy.deepcopy', (['self.avgmodel'], {}), '(self.avgmodel)\n', (1369, 1384), False, 'import copy\n'), ((2984, 3040), 'numpy.argmin', 'np.argmin', (['[model.end_loss for model in self.converge_Q]'], {}), '([model.end_loss for model in self.converge_Q])\n', (2993, 3040), True, 'import numpy as np\n'), ((3256, 3301), 'domainbed.lib.swa_utils.AveragedModel', 'swa_utils.AveragedModel', (['untilmin_segment_swa'], {}), '(untilmin_segment_swa)\n', (3279, 3301), False, 'from domainbed.lib import swa_utils\n'), ((3329, 3383), 'numpy.mean', 'np.mean', (['[model.end_loss for model in self.converge_Q]'], {}), '([model.end_loss for model in self.converge_Q])\n', (3336, 3383), True, 'import numpy as np\n')] |
import datajoint as dj
from . import lab, experiment, ccf
from . import get_schema_name
import numpy as np
from scipy.interpolate import CubicSpline
schema = dj.schema(get_schema_name('ephys'))
[lab, experiment, ccf] # NOQA flake8
@schema
class ProbeInsertion(dj.Manual):
definition = """
-> experiment.Session
insertion_number: int
---
-> lab.Probe
-> lab.ElectrodeConfig
"""
class InsertionLocation(dj.Part):
definition = """
-> master
---
-> lab.SkullReference
ap_location: decimal(6, 2) # (um) anterior-posterior; ref is 0; more anterior is more positive
ml_location: decimal(6, 2) # (um) medial axis; ref is 0 ; more right is more positive
depth: decimal(6, 2) # (um) manipulator depth relative to surface of the brain (0); more ventral is more negative
theta: decimal(5, 2) # (deg) - elevation - rotation about the ml-axis [0, 180] - w.r.t the z+ axis
phi: decimal(5, 2) # (deg) - azimuth - rotation about the dv-axis [0, 360] - w.r.t the x+ axis
beta: decimal(5, 2) # (deg) rotation about the shank of the probe [-180, 180] - clockwise is increasing in degree - 0 is the probe-front facing anterior
"""
class RecordableBrainRegion(dj.Part):
definition = """
-> master
-> lab.BrainArea
-> lab.Hemisphere
"""
class InsertionNote(dj.Part):
definition = """
-> master
---
insertion_note: varchar(1000)
"""
class RecordingSystemSetup(dj.Part):
definition = """
-> master
---
sampling_rate: int # (Hz)
"""
@schema
class LFP(dj.Imported):
definition = """
-> ProbeInsertion
---
lfp_sample_rate: float # (Hz)
lfp_time_stamps: longblob # timestamps with respect to the start of the recording (recording_timestamp)
lfp_mean: longblob # mean of LFP across electrodes
"""
class Channel(dj.Part):
definition = """
-> master
-> lab.ElectrodeConfig.Electrode
---
lfp: longblob # recorded lfp at this electrode
"""
@schema
class UnitQualityType(dj.Lookup):
definition = """
# Quality
unit_quality : varchar(100)
---
unit_quality_description : varchar(4000)
"""
contents = [
('good', 'single unit'),
('ok', 'probably a single unit, but could be contaminated'),
('multi', 'multi unit'),
('all', 'all units')
]
@schema
class CellType(dj.Lookup):
definition = """
#
cell_type : varchar(100)
---
cell_type_description : varchar(4000)
"""
contents = [
('Pyr', 'putative pyramidal'),
('FS', 'fast spiking'),
('not classified', 'intermediate spike-width that falls between spike-width thresholds for FS or Putative pyramidal cells'),
('all', 'all types')
]
@schema
class ClusteringMethod(dj.Lookup):
definition = """
clustering_method: varchar(16)
"""
# jrclust_v3 is the version Dave uses
# jrclust_v4 is the version Susu uses
contents = zip(['jrclust_v3', 'kilosort', 'jrclust_v4', 'kilosort2'])
@schema
class Unit(dj.Imported):
"""
A certain portion of the recording is used for clustering (could very well be the entire recording)
Thus, spike-times are relative to the 1st time point in this portion
E.g. if clustering is performed from trial 8 to trial 200, then spike-times are relative to the start of trial 8
"""
definition = """
# Sorted unit
-> ProbeInsertion
-> ClusteringMethod
unit: smallint
---
unit_uid : int # unique across sessions/animals
-> UnitQualityType
-> lab.ElectrodeConfig.Electrode # site on the electrode for which the unit has the largest amplitude
unit_posx : double # (um) estimated x position of the unit relative to probe's tip (0,0)
unit_posy : double # (um) estimated y position of the unit relative to probe's tip (0,0)
spike_times : longblob # (s) from the start of the first data point used in clustering
unit_amp : double
unit_snr : double
waveform : blob # average spike waveform
"""
class UnitTrial(dj.Part):
definition = """
# Entries for trials a unit is in
-> master
-> experiment.SessionTrial
"""
class TrialSpikes(dj.Part):
definition = """
#
-> Unit
-> experiment.SessionTrial
---
spike_times : longblob # (s) per-trial spike times relative to go-cue
"""
@schema
class ClusteringLabel(dj.Imported):
definition = """
-> Unit
---
clustering_time: datetime # time of generation of this set of clustering results
quality_control: bool # has this clustering results undergone quality control
manual_curation: bool # is manual curation performed on this clustering result
clustering_note=null: varchar(2000)
"""
@schema
class BrainAreaDepthCriteria(dj.Manual):
definition = """
-> ProbeInsertion
-> lab.BrainArea
---
depth_upper: float # (um)
depth_lower: float # (um)
"""
@schema
class UnitCoarseBrainLocation(dj.Computed):
definition = """
# Estimated unit position in the brain
-> Unit
---
-> [nullable] lab.BrainArea
-> [nullable] lab.Hemisphere
"""
key_source = Unit & BrainAreaDepthCriteria
def make(self, key):
posy = (Unit & key).fetch1('unit_posy')
# get brain location info from this ProbeInsertion
brain_area, hemi, skull_ref = (experiment.BrainLocation & (ProbeInsertion.InsertionLocation & key)).fetch1(
'brain_area', 'hemisphere', 'skull_reference')
brain_area_rules = (BrainAreaDepthCriteria & key).fetch(as_dict=True, order_by='depth_upper')
# validate rule - non-overlapping depth criteria
if len(brain_area_rules) > 1:
upper, lower = zip(*[(v['depth_upper'], v['depth_lower']) for v in brain_area_rules])
if ((np.array(lower)[:-1] - np.array(upper)[1:]) >= 0).all():
raise Exception('Overlapping depth criteria')
coarse_brain_area = None
for rule in brain_area_rules:
if rule['depth_upper'] < posy <= rule['depth_lower']:
coarse_brain_area = rule['brain_area']
break
if coarse_brain_area is None:
self.insert1(key)
else:
coarse_brain_location = (experiment.BrainLocation & {'brain_area': coarse_brain_area,
'hemisphere': hemi,
'skull_reference': skull_ref}).fetch1('KEY')
self.insert1({**key, **coarse_brain_location})
@schema
class UnitComment(dj.Manual):
definition = """
-> Unit
unit_comment : varchar(767)
"""
@schema
class UnitCellType(dj.Computed):
definition = """
-> Unit
---
-> CellType
"""
@property
def key_source(self):
return super().key_source & 'unit_quality != "all"'
def make(self, key):
upsample_factor = 100
ave_waveform, fs = (ProbeInsertion.RecordingSystemSetup * Unit & key).fetch1('waveform', 'sampling_rate')
cs = CubicSpline(range(len(ave_waveform)), ave_waveform)
ave_waveform = cs(np.linspace(0, len(ave_waveform) - 1, (len(ave_waveform))*upsample_factor))
fs = fs * upsample_factor
x_min = np.argmin(ave_waveform) / fs
x_max = np.argmax(ave_waveform) / fs
waveform_width = abs(x_max-x_min) * 1000 # convert to ms
self.insert1(dict(key,
cell_type='FS' if waveform_width < 0.4 else 'Pyr'))
@schema
class UnitStat(dj.Computed):
definition = """
-> Unit
---
isi_violation=null: float #
avg_firing_rate=null: float # (Hz)
"""
isi_threshold = 0.002 # threshold for isi violation of 2 ms
min_isi = 0 # threshold for duplicate spikes
# NOTE - this key_source logic relies on ALL TrialSpikes ingest all at once in a transaction
key_source = ProbeInsertion & Unit.TrialSpikes
def make(self, key):
# Following isi_violations() function
# Ref: https://github.com/AllenInstitute/ecephys_spike_sorting/blob/master/ecephys_spike_sorting/modules/quality_metrics/metrics.py
def make_insert():
for unit in (Unit & key).fetch('KEY'):
trial_spikes, tr_start, tr_stop = (Unit.TrialSpikes * experiment.SessionTrial & unit).fetch(
'spike_times', 'start_time', 'stop_time')
isis = np.hstack(np.diff(spks) for spks in trial_spikes)
if isis.size > 0:
# remove duplicated spikes
processed_trial_spikes = []
for spike_train in trial_spikes:
duplicate_spikes = np.where(np.diff(spike_train) <= self.min_isi)[0]
processed_trial_spikes.append(np.delete(spike_train, duplicate_spikes + 1))
num_spikes = len(np.hstack(processed_trial_spikes))
avg_firing_rate = num_spikes / float(sum(tr_stop - tr_start))
num_violations = sum(isis < self.isi_violation_thresh)
violation_time = 2 * num_spikes * (self.isi_threshold - self.min_isi)
violation_rate = num_violations / violation_time
fpRate = violation_rate / avg_firing_rate
yield {**unit, 'isi_violation': fpRate, 'avg_firing_rate': avg_firing_rate}
else:
yield {**unit, 'isi_violation': None, 'avg_firing_rate': None}
self.insert(make_insert())
@schema
class ClusterMetric(dj.Imported):
definition = """
# Quality metrics for sorted unit
# Ref: https://github.com/AllenInstitute/ecephys_spike_sorting/blob/master/ecephys_spike_sorting/modules/quality_metrics/README.md
-> Unit
epoch_name_quality_metrics: varchar(64)
---
presence_ratio: float # Fraction of epoch in which spikes are present
amplitude_cutoff: float # Estimate of miss rate based on amplitude histogram
isolation_distance=null: float # Distance to nearest cluster in Mahalanobis space
l_ratio=null: float #
d_prime=null: float # Classification accuracy based on LDA
nn_hit_rate=null: float #
nn_miss_rate=null: float
silhouette_score=null: float # Standard metric for cluster overlap
max_drift=null: float # Maximum change in spike depth throughout recording
cumulative_drift=null: float # Cumulative change in spike depth throughout recording
"""
@schema
class WaveformMetric(dj.Imported):
definition = """
-> Unit
epoch_name_waveform_metrics: varchar(64)
---
duration=null: float
halfwidth=null: float
pt_ratio=null: float
repolarization_slope=null: float
recovery_slope=null: float
spread=null: float
velocity_above=null: float
velocity_below=null: float
"""
# TODO: confirm the logic/need for this table
@schema
class UnitCCF(dj.Computed):
definition = """
-> Unit
---
-> ccf.CCF
"""
| [
"numpy.hstack",
"numpy.delete",
"numpy.diff",
"numpy.argmax",
"numpy.array",
"numpy.argmin"
] | [((7603, 7626), 'numpy.argmin', 'np.argmin', (['ave_waveform'], {}), '(ave_waveform)\n', (7612, 7626), True, 'import numpy as np\n'), ((7648, 7671), 'numpy.argmax', 'np.argmax', (['ave_waveform'], {}), '(ave_waveform)\n', (7657, 7671), True, 'import numpy as np\n'), ((8777, 8790), 'numpy.diff', 'np.diff', (['spks'], {}), '(spks)\n', (8784, 8790), True, 'import numpy as np\n'), ((9231, 9264), 'numpy.hstack', 'np.hstack', (['processed_trial_spikes'], {}), '(processed_trial_spikes)\n', (9240, 9264), True, 'import numpy as np\n'), ((9147, 9191), 'numpy.delete', 'np.delete', (['spike_train', '(duplicate_spikes + 1)'], {}), '(spike_train, duplicate_spikes + 1)\n', (9156, 9191), True, 'import numpy as np\n'), ((6123, 6138), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (6131, 6138), True, 'import numpy as np\n'), ((6146, 6161), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (6154, 6161), True, 'import numpy as np\n'), ((9052, 9072), 'numpy.diff', 'np.diff', (['spike_train'], {}), '(spike_train)\n', (9059, 9072), True, 'import numpy as np\n')] |
"""
Plot modules.
"""
#=============================================================================
#Modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
def contour_plot(array3d, **kwargs):
"""Do the contour plot of a 3D array.
Parameters
----------
array3d : arra,
3 dimensional array vector
Optional:
contour : bool,
If True, add contours.
map_name : str,
Color map name
xlabel : str,
label for x-axis.
ylabel : str,
label for y-axis.
title : str,
Title for plot.
save_name : str,
Name for saved file.
"""
n_x = len(np.unique(array3d[:, 0]))
n_y = len(np.unique(array3d[:, 1]))
X = np.reshape(array3d[:, 0], (n_x, n_y))
Y = np.reshape(array3d[:, 1], (n_x, n_y))
Z = np.reshape(array3d[:, 2], (n_x, n_y))
#Do contour plot
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline = 1, fontsize = 10)
if kwargs.has_key('xlabel'):
plt.xlabel(kwargs['xlabel'])
if kwargs.has_key('ylabel'):
plt.ylabel(kwargs['ylabel'])
if kwargs.has_key('title'):
plt.title(kwargs['title'])
if kwargs.has_key('save'):
plt.savefig(kwargs['save'])
else:
plt.savefig('contour.pdf')
plt.clf()
def color_map_plot(X_vector, Y_vector, Z_vector, num = 100, **kwargs):
"""
Do a color map of a X, Y, Z vector.
Parameters
----------
X_vector : list,
x-axis vector
Y_vector : list,
y-axis vector
Z_vector :
color-axis vector
num : int, optional
Number of samples to generate. Default is 100.
Optional:
contour : bool,
If True, add contours.
map_name : str,
Color map name
xlabel : str,
label for x-axis.
ylabel : str,
label for y-axis.
bar_label : str,
label for color bar
title : str,
Title for plot.
save_name : str,
Name for saved file.
Adapted from <NAME> code.
"""
xi = np.linspace(min(X_vector), max(X_vector), num)
yi = np.linspace(min(Y_vector), max(Y_vector), num)
zi = griddata(X_vector, Y_vector, Z_vector, xi, yi)
#Do contour plot
if 'contour' in kwargs:
plt.contour(xi, yi, zi, linewidths = 0.5, colors = 'k')
if 'map_name' in kwargs:
map_id = plt.get_cmap(kwargs['map_name'])
else:
map_id = None
plt.pcolormesh(xi, yi, zi, cmap = map_id)
cbar = plt.colorbar()
cbar.set_label(kwargs['bar_label'])
if kwargs.has_key('xlabel'):
plt.xlabel(kwargs['xlabel'])
if kwargs.has_key('ylabel'):
plt.ylabel(kwargs['ylabel'])
if kwargs.has_key('title'):
plt.title(kwargs['title'])
if kwargs.has_key('save_name'):
plt.savefig(kwargs['save_name'])
else:
plt.savefig('color_map.pdf')
plt.clf()
'''
#=============================================================================
# Contruct a spectral line intensity contour plot
def flux_countour_plot(spec_line, **kwargs):
"""Contruct a contour plot of the spectral line flux."""
teff_range = range(15000, 30000, 500)
logg_range = np.arange(3.5, 4.6, 0.1)
for i, grav in enumerate(logg_range):
if int(10*grav) == 45:
logg_range[i] = 4.499
int_storage = np.zeros([len(teff_range)*len(logg_range), 3])
#Set some synplot defaults
kwargs['noplot'] = 1
kwargs['relative'] = 1
for count, pair in \
enumerate([[i,j] for i in teff_range for j in logg_range]):
#Prepare for synplot
kwargs['teff'] = pair[0]
kwargs['logg'] = pair[1]
kwargs['wstart'] = spec_line - 30
kwargs['wend'] = spec_line + 30
# Calculate the spectra
spec = synplot(**kwargs)
spec.run()
#Find the intensity
int_storage[count] = pair[0], pair[1], \
spectra.line_position(spec.spectra, \
spec_line)[1]
#print count,pair[0],pair[1], int_storage[count,2]
#Do the contour plot
contour_plot(int_storage, xlabel = 'Teff', ylabel = 'logg', \
title = str(spec_line)+r' $\AA$', \
save = 'contour_'+str(spec_line)+'.pdf')
if kwargs.has_key('return'):
return int_storage
#=============================================================================
''' | [
"numpy.reshape",
"numpy.unique",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.mlab.griddata",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.figure",
"mat... | [((833, 870), 'numpy.reshape', 'np.reshape', (['array3d[:, 0]', '(n_x, n_y)'], {}), '(array3d[:, 0], (n_x, n_y))\n', (843, 870), True, 'import numpy as np\n'), ((880, 917), 'numpy.reshape', 'np.reshape', (['array3d[:, 1]', '(n_x, n_y)'], {}), '(array3d[:, 1], (n_x, n_y))\n', (890, 917), True, 'import numpy as np\n'), ((926, 963), 'numpy.reshape', 'np.reshape', (['array3d[:, 2]', '(n_x, n_y)'], {}), '(array3d[:, 2], (n_x, n_y))\n', (936, 963), True, 'import numpy as np\n'), ((994, 1006), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1004, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1036), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (1027, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1078), 'matplotlib.pyplot.clabel', 'plt.clabel', (['CS'], {'inline': '(1)', 'fontsize': '(10)'}), '(CS, inline=1, fontsize=10)\n', (1051, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1433), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1431, 1433), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2441), 'matplotlib.mlab.griddata', 'griddata', (['X_vector', 'Y_vector', 'Z_vector', 'xi', 'yi'], {}), '(X_vector, Y_vector, Z_vector, xi, yi)\n', (2403, 2441), False, 'from matplotlib.mlab import griddata\n'), ((2696, 2735), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xi', 'yi', 'zi'], {'cmap': 'map_id'}), '(xi, yi, zi, cmap=map_id)\n', (2710, 2735), True, 'import matplotlib.pyplot as plt\n'), ((2749, 2763), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2761, 2763), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3172), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3170, 3172), True, 'import matplotlib.pyplot as plt\n'), ((754, 778), 'numpy.unique', 'np.unique', (['array3d[:, 0]'], {}), '(array3d[:, 0])\n', (763, 778), True, 'import numpy as np\n'), ((794, 818), 'numpy.unique', 'np.unique', (['array3d[:, 1]'], {}), '(array3d[:, 1])\n', (803, 818), True, 'import numpy as np\n'), ((1125, 1153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["kwargs['xlabel']"], {}), "(kwargs['xlabel'])\n", (1135, 1153), True, 'import matplotlib.pyplot as plt\n'), ((1195, 1223), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["kwargs['ylabel']"], {}), "(kwargs['ylabel'])\n", (1205, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1290), 'matplotlib.pyplot.title', 'plt.title', (["kwargs['title']"], {}), "(kwargs['title'])\n", (1273, 1290), True, 'import matplotlib.pyplot as plt\n'), ((1339, 1366), 'matplotlib.pyplot.savefig', 'plt.savefig', (["kwargs['save']"], {}), "(kwargs['save'])\n", (1350, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1419), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""contour.pdf"""'], {}), "('contour.pdf')\n", (1404, 1419), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2559), 'matplotlib.pyplot.contour', 'plt.contour', (['xi', 'yi', 'zi'], {'linewidths': '(0.5)', 'colors': '"""k"""'}), "(xi, yi, zi, linewidths=0.5, colors='k')\n", (2519, 2559), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2650), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (["kwargs['map_name']"], {}), "(kwargs['map_name'])\n", (2630, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2852, 2880), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["kwargs['xlabel']"], {}), "(kwargs['xlabel'])\n", (2862, 2880), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2950), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["kwargs['ylabel']"], {}), "(kwargs['ylabel'])\n", (2932, 2950), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3017), 'matplotlib.pyplot.title', 'plt.title', (["kwargs['title']"], {}), "(kwargs['title'])\n", (3000, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3103), 'matplotlib.pyplot.savefig', 'plt.savefig', (["kwargs['save_name']"], {}), "(kwargs['save_name'])\n", (3082, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3158), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""color_map.pdf"""'], {}), "('color_map.pdf')\n", (3141, 3158), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Geometric example
=================
A small example script showing the usage of the 'geographic' coordinates type
for ordinary kriging on a sphere.
"""
from pykrige.ok import OrdinaryKriging
import numpy as np
from matplotlib import pyplot as plt
# Make this example reproducible:
np.random.seed(89239413)
# Generate random data following a uniform spatial distribution
# of nodes and a uniform distribution of values in the interval
# [2.0, 5.5]:
N = 7
lon = 360.0 * np.random.random(N)
lat = 180.0 / np.pi * np.arcsin(2 * np.random.random(N) - 1)
z = 3.5 * np.random.rand(N) + 2.0
# Generate a regular grid with 60° longitude and 30° latitude steps:
grid_lon = np.linspace(0.0, 360.0, 7)
grid_lat = np.linspace(-90.0, 90.0, 7)
# Create ordinary kriging object:
OK = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
# Execute on grid:
z1, ss1 = OK.execute("grid", grid_lon, grid_lat)
# Create ordinary kriging object ignoring curvature:
OK = OrdinaryKriging(
lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False
)
# Execute on grid:
z2, ss2 = OK.execute("grid", grid_lon, grid_lat)
###############################################################################
# Print data at equator (last longitude index will show periodicity):
print("Original data:")
print("Longitude:", lon.astype(int))
print("Latitude: ", lat.astype(int))
print("z: ", np.array_str(z, precision=2))
print("\nKrige at 60° latitude:\n======================")
print("Longitude:", grid_lon)
print("Value: ", np.array_str(z1[5, :], precision=2))
print("Sigma²: ", np.array_str(ss1[5, :], precision=2))
print("\nIgnoring curvature:\n=====================")
print("Value: ", np.array_str(z2[5, :], precision=2))
print("Sigma²: ", np.array_str(ss2[5, :], precision=2))
###############################################################################
# We can see that the data point at longitude 122, latitude 50 correctly
# dominates the kriged results, since it is the closest node in spherical
# distance metric, as longitude differences scale with cos(latitude).
# When kriging using longitude / latitude linearly, the value for grid points
# with longitude values further away as longitude is now incorrectly
# weighted equally as latitude.
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(z1, extent=[0, 360, -90, 90], origin="lower")
ax1.set_title("geo-coordinates")
ax2.imshow(z2, extent=[0, 360, -90, 90], origin="lower")
ax2.set_title("non geo-coordinates")
plt.show()
| [
"pykrige.ok.OrdinaryKriging",
"numpy.random.rand",
"numpy.array_str",
"numpy.random.random",
"numpy.linspace",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((313, 337), 'numpy.random.seed', 'np.random.seed', (['(89239413)'], {}), '(89239413)\n', (327, 337), True, 'import numpy as np\n'), ((697, 723), 'numpy.linspace', 'np.linspace', (['(0.0)', '(360.0)', '(7)'], {}), '(0.0, 360.0, 7)\n', (708, 723), True, 'import numpy as np\n'), ((735, 762), 'numpy.linspace', 'np.linspace', (['(-90.0)', '(90.0)', '(7)'], {}), '(-90.0, 90.0, 7)\n', (746, 762), True, 'import numpy as np\n'), ((803, 930), 'pykrige.ok.OrdinaryKriging', 'OrdinaryKriging', (['lon', 'lat', 'z'], {'variogram_model': '"""linear"""', 'verbose': '(False)', 'enable_plotting': '(False)', 'coordinates_type': '"""geographic"""'}), "(lon, lat, z, variogram_model='linear', verbose=False,\n enable_plotting=False, coordinates_type='geographic')\n", (818, 930), False, 'from pykrige.ok import OrdinaryKriging\n'), ((1086, 1182), 'pykrige.ok.OrdinaryKriging', 'OrdinaryKriging', (['lon', 'lat', 'z'], {'variogram_model': '"""linear"""', 'verbose': '(False)', 'enable_plotting': '(False)'}), "(lon, lat, z, variogram_model='linear', verbose=False,\n enable_plotting=False)\n", (1101, 1182), False, 'from pykrige.ok import OrdinaryKriging\n'), ((2422, 2440), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2434, 2440), True, 'from matplotlib import pyplot as plt\n'), ((2625, 2635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2633, 2635), True, 'from matplotlib import pyplot as plt\n'), ((501, 520), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (517, 520), True, 'import numpy as np\n'), ((1524, 1552), 'numpy.array_str', 'np.array_str', (['z'], {'precision': '(2)'}), '(z, precision=2)\n', (1536, 1552), True, 'import numpy as np\n'), ((1662, 1697), 'numpy.array_str', 'np.array_str', (['z1[5, :]'], {'precision': '(2)'}), '(z1[5, :], precision=2)\n', (1674, 1697), True, 'import numpy as np\n'), ((1720, 1756), 'numpy.array_str', 'np.array_str', (['ss1[5, :]'], {'precision': '(2)'}), '(ss1[5, :], precision=2)\n', (1732, 1756), True, 'import numpy as np\n'), ((1831, 1866), 'numpy.array_str', 'np.array_str', (['z2[5, :]'], {'precision': '(2)'}), '(z2[5, :], precision=2)\n', (1843, 1866), True, 'import numpy as np\n'), ((1889, 1925), 'numpy.array_str', 'np.array_str', (['ss2[5, :]'], {'precision': '(2)'}), '(ss2[5, :], precision=2)\n', (1901, 1925), True, 'import numpy as np\n'), ((592, 609), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (606, 609), True, 'import numpy as np\n'), ((557, 576), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (573, 576), True, 'import numpy as np\n')] |
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import logging
import numpy as np
import unittest
from panda3d.core import NodePath
from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable
from home_platform.suncg import loadModel, SunCgSceneLoader
TEST_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data")
TEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data", "suncg")
class TestMaterialColorTable(unittest.TestCase):
def testGetColorsFromObjectBasic(self):
modelId = '317'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='basic')
self.assertTrue(len(colorDescriptions) == 1)
self.assertTrue(colorDescriptions[0] == "silver")
modelId = '83'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='basic')
self.assertTrue(len(colorDescriptions) == 1)
self.assertTrue(colorDescriptions[0] == "white")
def testGetColorsFromObjectTransparent(self):
modelId = 'sphere'
modelFilename = os.path.join(TEST_DATA_DIR, "models", "sphere.egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='basic')
self.assertTrue(len(colorDescriptions) == 1)
self.assertTrue(colorDescriptions[0] == "maroon")
def testGetColorsFromObjectAdvanced(self):
modelId = '317'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='advanced')
self.assertTrue(len(colorDescriptions) == 1)
self.assertTrue(colorDescriptions[0] == "navajo white")
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='advanced', thresholdRelArea=0.0)
self.assertTrue(len(colorDescriptions) == 2)
self.assertTrue("navajo white" in colorDescriptions)
self.assertTrue("dark slate gray" in colorDescriptions)
modelId = '210'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='advanced')
self.assertTrue(len(colorDescriptions) == 2)
self.assertTrue("dark gray" in colorDescriptions)
self.assertTrue("cadet blue" in colorDescriptions)
def testGetColorsFromObjectXkcd(self):
modelId = '317'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
colorDescriptions = MaterialColorTable.getColorsFromObject(
obj, mode='xkcd')
self.assertTrue(len(colorDescriptions) == 1)
self.assertTrue(colorDescriptions[0] == "light peach")
class TestMaterialTable(unittest.TestCase):
def testGetMaterialNameFromObject(self):
modelId = '317'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
materialDescriptions = MaterialTable.getMaterialNameFromObject(obj)
self.assertTrue(len(materialDescriptions) == 1)
self.assertTrue(materialDescriptions[0] == "wood")
materialDescriptions = MaterialTable.getMaterialNameFromObject(
obj, thresholdRelArea=0.0)
self.assertTrue(len(materialDescriptions) == 1)
self.assertTrue(materialDescriptions[0] == "wood")
class TestDimensionTable(unittest.TestCase):
def testGetDimensionsFromObject(self):
modelId = '274'
modelFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
obj = NodePath('object-' + str(modelId))
model.reparentTo(obj)
# XXX: should use the full metadata files if descriptors are not
# precomputed
modelInfoFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "metadata", "models.csv")
modelCatFilename = os.path.join(
TEST_SUNCG_DATA_DIR, "metadata", "ModelCategoryMapping.csv")
dimensionDescription = DimensionTable().getDimensionsFromModelId(
modelId, modelInfoFilename, modelCatFilename)
self.assertTrue(dimensionDescription == 'normal')
class TestSuncgSemantics(unittest.TestCase):
def testDescribe(self):
scene = SunCgSceneLoader.loadHouseFromJson(
"0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)
semantics = SuncgSemantics(scene, TEST_SUNCG_DATA_DIR)
objNode = scene.scene.find('**/object-561*')
desc = semantics.describeObject(objNode)
self.assertTrue(desc == "small linen coffee table made of wood")
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
np.seterr(all='raise')
unittest.main()
| [
"logging.basicConfig",
"os.path.exists",
"os.path.join",
"home_platform.semantic.SuncgSemantics",
"home_platform.semantic.MaterialColorTable.getColorsFromObject",
"os.path.realpath",
"home_platform.suncg.SunCgSceneLoader.loadHouseFromJson",
"home_platform.suncg.loadModel",
"home_platform.semantic.Ma... | [((8243, 8282), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (8262, 8282), False, 'import logging\n'), ((8287, 8309), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (8296, 8309), True, 'import numpy as np\n'), ((8314, 8329), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8327, 8329), False, 'import unittest\n'), ((1855, 1881), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1871, 1881), False, 'import os\n'), ((1954, 1980), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1970, 1980), False, 'import os\n'), ((2258, 2287), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (2272, 2287), False, 'import os\n'), ((2304, 2328), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (2313, 2328), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((2483, 2540), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""basic"""'}), "(obj, mode='basic')\n", (2521, 2540), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((2822, 2851), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (2836, 2851), False, 'import os\n'), ((2868, 2892), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (2877, 2892), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((3047, 3104), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""basic"""'}), "(obj, mode='basic')\n", (3085, 3104), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((3330, 3381), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""models"""', '"""sphere.egg"""'], {}), "(TEST_DATA_DIR, 'models', 'sphere.egg')\n", (3342, 3381), False, 'import os\n'), ((3397, 3426), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (3411, 3426), False, 'import os\n'), ((3443, 3467), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (3452, 3467), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((3622, 3679), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""basic"""'}), "(obj, mode='basic')\n", (3660, 3679), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((4009, 4038), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (4023, 4038), False, 'import os\n'), ((4055, 4079), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (4064, 4079), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((4234, 4294), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""advanced"""'}), "(obj, mode='advanced')\n", (4272, 4294), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((4454, 4540), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""advanced"""', 'thresholdRelArea': '(0.0)'}), "(obj, mode='advanced',\n thresholdRelArea=0.0)\n", (4492, 4540), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((4886, 4915), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (4900, 4915), False, 'import os\n'), ((4932, 4956), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (4941, 4956), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((5111, 5171), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""advanced"""'}), "(obj, mode='advanced')\n", (5149, 5171), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((5556, 5585), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (5570, 5585), False, 'import os\n'), ((5602, 5626), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (5611, 5626), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((5781, 5837), 'home_platform.semantic.MaterialColorTable.getColorsFromObject', 'MaterialColorTable.getColorsFromObject', (['obj'], {'mode': '"""xkcd"""'}), "(obj, mode='xkcd')\n", (5819, 5837), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((6215, 6244), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (6229, 6244), False, 'import os\n'), ((6261, 6285), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (6270, 6285), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((6443, 6487), 'home_platform.semantic.MaterialTable.getMaterialNameFromObject', 'MaterialTable.getMaterialNameFromObject', (['obj'], {}), '(obj)\n', (6482, 6487), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((6635, 6701), 'home_platform.semantic.MaterialTable.getMaterialNameFromObject', 'MaterialTable.getMaterialNameFromObject', (['obj'], {'thresholdRelArea': '(0.0)'}), '(obj, thresholdRelArea=0.0)\n', (6674, 6701), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((7077, 7106), 'os.path.exists', 'os.path.exists', (['modelFilename'], {}), '(modelFilename)\n', (7091, 7106), False, 'import os\n'), ((7123, 7147), 'home_platform.suncg.loadModel', 'loadModel', (['modelFilename'], {}), '(modelFilename)\n', (7132, 7147), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((7398, 7457), 'os.path.join', 'os.path.join', (['TEST_SUNCG_DATA_DIR', '"""metadata"""', '"""models.csv"""'], {}), "(TEST_SUNCG_DATA_DIR, 'metadata', 'models.csv')\n", (7410, 7457), False, 'import os\n'), ((7498, 7571), 'os.path.join', 'os.path.join', (['TEST_SUNCG_DATA_DIR', '"""metadata"""', '"""ModelCategoryMapping.csv"""'], {}), "(TEST_SUNCG_DATA_DIR, 'metadata', 'ModelCategoryMapping.csv')\n", (7510, 7571), False, 'import os\n'), ((7866, 7961), 'home_platform.suncg.SunCgSceneLoader.loadHouseFromJson', 'SunCgSceneLoader.loadHouseFromJson', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""', 'TEST_SUNCG_DATA_DIR'], {}), "('0004d52d1aeeb8ae6de39d6bd993e992',\n TEST_SUNCG_DATA_DIR)\n", (7900, 7961), False, 'from home_platform.suncg import loadModel, SunCgSceneLoader\n'), ((7991, 8033), 'home_platform.semantic.SuncgSemantics', 'SuncgSemantics', (['scene', 'TEST_SUNCG_DATA_DIR'], {}), '(scene, TEST_SUNCG_DATA_DIR)\n', (8005, 8033), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n'), ((7616, 7632), 'home_platform.semantic.DimensionTable', 'DimensionTable', ([], {}), '()\n', (7630, 7632), False, 'from home_platform.semantic import MaterialColorTable, SuncgSemantics, MaterialTable, DimensionTable\n')] |
import numpy as np
import scipy.ndimage as ndimage
from skimage import measure, morphology, segmentation
from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing
from skimage.measure import label,regionprops, perimeter
from skimage.morphology import binary_dilation, binary_opening
from skimage.filters import roberts, sobel, threshold_otsu
from skimage.segmentation import clear_border, mark_boundaries
import matplotlib.pyplot as plt
import cv2
def generate_markers(image):
# Creation of the internal Marker
marker_internal = image < -400
marker_internal = segmentation.clear_border(marker_internal)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
# Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, iterations=55)
marker_external = external_b ^ external_a
# Creation of the Watershed Marker matrix
marker_watershed = np.zeros((512, 512), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
def seperate_lungs(image):
# Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
# Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
# Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
# Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3, 3))
outline = outline.astype(bool)
# Performing Black-Tophat Morphology for reinclusion
# Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
# Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
# Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
# Close holes in the lungfilter
# fill_holes is not used here, since in some slices the heart would be reincluded by accident
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5, 5)), iterations=3)
# Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000 * np.ones((512, 512)))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_filtered_lung(image):
# Creation of the internal Marker
marker_internal = image < -500
marker_internal = segmentation.clear_border(marker_internal)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
# Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, iterations=55)
marker_external = external_b ^ external_a
# Creation of the Watershed Marker matrix
marker_watershed = np.zeros((512, 512), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
# Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
# Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
# Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3, 3))
outline = outline.astype(bool)
# Performing Black-Tophat Morphology for reinclusion
# Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
# Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
# Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
# Close holes in the lungfilter
# fill_holes is not used here, since in some slices the heart would be reincluded by accident
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5, 5)), iterations=3)
return lungfilter
# key solution
def get_segmented_lungs(raw_im):
'''
对肺部CT图像实现实质
This funtion segments the lungs from the given 2D slice.
:param raw_im: 输入原始图像
:return: binaty 二值图掩码 ,im 原始图像叠加二值图掩码后结果
'''
im = raw_im.copy()
'''
将2D Slice转为二值图
Step 1: Convert into a binary image.
'''
binary = im < -567.5
# binary = im < -500
# thresh = threshold_otsu(binary)
# binary = binary > thresh
'''
删除连接到图像边界的噪点。
Step 2: Remove the blobs connected to the border of the image.
'''
cleared = clear_border(binary)
'''
标记图像
Step 3: Label the image.
'''
label_image = label(cleared)
'''
保持标记有两个最大的区域
Step 4: Keep the labels with 2 largest areas.
'''
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
binary = label_image > 0
'''
半径为2 pixels,腐蚀操作。
Step 5: Erosion operation with a disk of radius 2. This operation is
seperate the lung nodules attached to the blood vessels.
'''
selem = disk(2)
binary = binary_erosion(binary, selem)
'''
半径为10 pixels,闭合操作。
Step 6: Closure operation with a disk of radius 10. This operation is
to keep nodules attached to the lung wall.
'''
selem = disk(7)
binary = binary_closing(binary, selem)
'''
将二值图中的部分噪点填充。
Step 7: Fill in the small holes inside the binary mask of lungs.
'''
edges = roberts(binary)
binary = ndimage.binary_fill_holes(edges)
'''
在输入图像上叠加二进制掩码。
Step 8: Superimpose the binary mask on the input image.
'''
get_high_vals = binary == 0
im[get_high_vals] = 0
return binary, im
def get_segmented_lungs_with_opencv_api(raw_im, ground_truth, plot=False):
'''
对肺部CT图像实现实质
This funtion segments the lungs from the given 2D slice.
:param raw_im: 输入原始图像
:return: binaty 二值图掩码 ,im 原始图像叠加二值图掩码后结果
'''
im = raw_im.copy()
if plot == True:
f, plots = plt.subplots(8, 1, figsize=(5, 40))
'''
将2D Slice转为二值图
Step 1: Convert into a binary image.
'''
binary = im < -567.5
if plot == True:
plots[0].axis('off')
plots[0].imshow(binary, cmap=plt.cm.bone)
ostued = cv2.threshold(binary, thresh=0, maxval=255, type=cv2.THRESH_OTSU)
if plot == True:
plots[1].axis('off')
plots[1].imshow(ostued, cmap=plt.cm.bone)
expanded = cv2.copyMakeBorder(binary, 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=0)
if plot == True:
plots[2].axis('off')
plots[2].imshow(expanded, cmap=plt.cm.bone)
filled = cv2.floodFill(expanded, mask=ground_truth, seedPoint=(0, 0), newVal=255)
if plot == True:
plots[3].axis('off')
plots[4].imshow(filled, cmap=plt.cm.bone)
| [
"numpy.bitwise_or",
"skimage.morphology.binary_closing",
"scipy.ndimage.black_tophat",
"scipy.ndimage.binary_dilation",
"cv2.threshold",
"numpy.max",
"numpy.hypot",
"skimage.morphology.watershed",
"numpy.ones",
"skimage.measure.regionprops",
"scipy.ndimage.iterate_structure",
"scipy.ndimage.bi... | [((655, 697), 'skimage.segmentation.clear_border', 'segmentation.clear_border', (['marker_internal'], {}), '(marker_internal)\n', (680, 697), False, 'from skimage import measure, morphology, segmentation\n'), ((727, 757), 'skimage.measure.label', 'measure.label', (['marker_internal'], {}), '(marker_internal)\n', (740, 757), False, 'from skimage import measure, morphology, segmentation\n'), ((1212, 1267), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['marker_internal'], {'iterations': '(10)'}), '(marker_internal, iterations=10)\n', (1235, 1267), True, 'import scipy.ndimage as ndimage\n'), ((1285, 1340), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['marker_internal'], {'iterations': '(55)'}), '(marker_internal, iterations=55)\n', (1308, 1340), True, 'import scipy.ndimage as ndimage\n'), ((1456, 1490), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {'dtype': 'np.int'}), '((512, 512), dtype=np.int)\n', (1464, 1490), True, 'import numpy as np\n'), ((1863, 1886), 'scipy.ndimage.sobel', 'ndimage.sobel', (['image', '(1)'], {}), '(image, 1)\n', (1876, 1886), True, 'import scipy.ndimage as ndimage\n'), ((1911, 1934), 'scipy.ndimage.sobel', 'ndimage.sobel', (['image', '(0)'], {}), '(image, 0)\n', (1924, 1934), True, 'import scipy.ndimage as ndimage\n'), ((1956, 2002), 'numpy.hypot', 'np.hypot', (['sobel_filtered_dx', 'sobel_filtered_dy'], {}), '(sobel_filtered_dx, sobel_filtered_dy)\n', (1964, 2002), True, 'import numpy as np\n'), ((2099, 2153), 'skimage.morphology.watershed', 'morphology.watershed', (['sobel_gradient', 'marker_watershed'], {}), '(sobel_gradient, marker_watershed)\n', (2119, 2153), False, 'from skimage import measure, morphology, segmentation\n'), ((2244, 2298), 'scipy.ndimage.morphological_gradient', 'ndimage.morphological_gradient', (['watershed'], {'size': '(3, 3)'}), '(watershed, size=(3, 3))\n', (2274, 2298), True, 'import scipy.ndimage as ndimage\n'), ((2800, 2845), 'scipy.ndimage.iterate_structure', 'ndimage.iterate_structure', (['blackhat_struct', '(8)'], {}), '(blackhat_struct, 8)\n', (2825, 2845), True, 'import scipy.ndimage as ndimage\n'), ((2889, 2945), 'scipy.ndimage.black_tophat', 'ndimage.black_tophat', (['outline'], {'structure': 'blackhat_struct'}), '(outline, structure=blackhat_struct)\n', (2909, 2945), True, 'import scipy.ndimage as ndimage\n'), ((3059, 3098), 'numpy.bitwise_or', 'np.bitwise_or', (['marker_internal', 'outline'], {}), '(marker_internal, outline)\n', (3072, 3098), True, 'import numpy as np\n'), ((3741, 3783), 'skimage.segmentation.clear_border', 'segmentation.clear_border', (['marker_internal'], {}), '(marker_internal)\n', (3766, 3783), False, 'from skimage import measure, morphology, segmentation\n'), ((3813, 3843), 'skimage.measure.label', 'measure.label', (['marker_internal'], {}), '(marker_internal)\n', (3826, 3843), False, 'from skimage import measure, morphology, segmentation\n'), ((4299, 4354), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['marker_internal'], {'iterations': '(10)'}), '(marker_internal, iterations=10)\n', (4322, 4354), True, 'import scipy.ndimage as ndimage\n'), ((4372, 4427), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['marker_internal'], {'iterations': '(55)'}), '(marker_internal, iterations=55)\n', (4395, 4427), True, 'import scipy.ndimage as ndimage\n'), ((4543, 4577), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {'dtype': 'np.int'}), '((512, 512), dtype=np.int)\n', (4551, 4577), True, 'import numpy as np\n'), ((4732, 4755), 'scipy.ndimage.sobel', 'ndimage.sobel', (['image', '(1)'], {}), '(image, 1)\n', (4745, 4755), True, 'import scipy.ndimage as ndimage\n'), ((4780, 4803), 'scipy.ndimage.sobel', 'ndimage.sobel', (['image', '(0)'], {}), '(image, 0)\n', (4793, 4803), True, 'import scipy.ndimage as ndimage\n'), ((4825, 4871), 'numpy.hypot', 'np.hypot', (['sobel_filtered_dx', 'sobel_filtered_dy'], {}), '(sobel_filtered_dx, sobel_filtered_dy)\n', (4833, 4871), True, 'import numpy as np\n'), ((4968, 5022), 'skimage.morphology.watershed', 'morphology.watershed', (['sobel_gradient', 'marker_watershed'], {}), '(sobel_gradient, marker_watershed)\n', (4988, 5022), False, 'from skimage import measure, morphology, segmentation\n'), ((5113, 5167), 'scipy.ndimage.morphological_gradient', 'ndimage.morphological_gradient', (['watershed'], {'size': '(3, 3)'}), '(watershed, size=(3, 3))\n', (5143, 5167), True, 'import scipy.ndimage as ndimage\n'), ((5669, 5714), 'scipy.ndimage.iterate_structure', 'ndimage.iterate_structure', (['blackhat_struct', '(8)'], {}), '(blackhat_struct, 8)\n', (5694, 5714), True, 'import scipy.ndimage as ndimage\n'), ((5758, 5814), 'scipy.ndimage.black_tophat', 'ndimage.black_tophat', (['outline'], {'structure': 'blackhat_struct'}), '(outline, structure=blackhat_struct)\n', (5778, 5814), True, 'import scipy.ndimage as ndimage\n'), ((5928, 5967), 'numpy.bitwise_or', 'np.bitwise_or', (['marker_internal', 'outline'], {}), '(marker_internal, outline)\n', (5941, 5967), True, 'import numpy as np\n'), ((6776, 6796), 'skimage.segmentation.clear_border', 'clear_border', (['binary'], {}), '(binary)\n', (6788, 6796), False, 'from skimage.segmentation import clear_border, mark_boundaries\n'), ((6869, 6883), 'skimage.measure.label', 'label', (['cleared'], {}), '(cleared)\n', (6874, 6883), False, 'from skimage.measure import label, regionprops, perimeter\n'), ((7482, 7489), 'skimage.morphology.disk', 'disk', (['(2)'], {}), '(2)\n', (7486, 7489), False, 'from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing\n'), ((7503, 7532), 'skimage.morphology.binary_erosion', 'binary_erosion', (['binary', 'selem'], {}), '(binary, selem)\n', (7517, 7532), False, 'from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing\n'), ((7706, 7713), 'skimage.morphology.disk', 'disk', (['(7)'], {}), '(7)\n', (7710, 7713), False, 'from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing\n'), ((7727, 7756), 'skimage.morphology.binary_closing', 'binary_closing', (['binary', 'selem'], {}), '(binary, selem)\n', (7741, 7756), False, 'from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing\n'), ((7872, 7887), 'skimage.filters.roberts', 'roberts', (['binary'], {}), '(binary)\n', (7879, 7887), False, 'from skimage.filters import roberts, sobel, threshold_otsu\n'), ((7901, 7933), 'scipy.ndimage.binary_fill_holes', 'ndimage.binary_fill_holes', (['edges'], {}), '(edges)\n', (7926, 7933), True, 'import scipy.ndimage as ndimage\n'), ((8664, 8729), 'cv2.threshold', 'cv2.threshold', (['binary'], {'thresh': '(0)', 'maxval': '(255)', 'type': 'cv2.THRESH_OTSU'}), '(binary, thresh=0, maxval=255, type=cv2.THRESH_OTSU)\n', (8677, 8729), False, 'import cv2\n'), ((8845, 8913), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['binary', '(3)', '(3)', '(3)', '(3)', 'cv2.BORDER_CONSTANT'], {'value': '(0)'}), '(binary, 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=0)\n', (8863, 8913), False, 'import cv2\n'), ((9029, 9101), 'cv2.floodFill', 'cv2.floodFill', (['expanded'], {'mask': 'ground_truth', 'seedPoint': '(0, 0)', 'newVal': '(255)'}), '(expanded, mask=ground_truth, seedPoint=(0, 0), newVal=255)\n', (9042, 9101), False, 'import cv2\n'), ((894, 937), 'skimage.measure.regionprops', 'measure.regionprops', (['marker_internal_labels'], {}), '(marker_internal_labels)\n', (913, 937), False, 'from skimage import measure, morphology, segmentation\n'), ((2033, 2055), 'numpy.max', 'np.max', (['sobel_gradient'], {}), '(sobel_gradient)\n', (2039, 2055), True, 'import numpy as np\n'), ((3980, 4023), 'skimage.measure.regionprops', 'measure.regionprops', (['marker_internal_labels'], {}), '(marker_internal_labels)\n', (3999, 4023), False, 'from skimage import measure, morphology, segmentation\n'), ((4902, 4924), 'numpy.max', 'np.max', (['sobel_gradient'], {}), '(sobel_gradient)\n', (4908, 4924), True, 'import numpy as np\n'), ((7084, 7108), 'skimage.measure.regionprops', 'regionprops', (['label_image'], {}), '(label_image)\n', (7095, 7108), False, 'from skimage.measure import label, regionprops, perimeter\n'), ((8413, 8448), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(8)', '(1)'], {'figsize': '(5, 40)'}), '(8, 1, figsize=(5, 40))\n', (8425, 8448), True, 'import matplotlib.pyplot as plt\n'), ((787, 830), 'skimage.measure.regionprops', 'measure.regionprops', (['marker_internal_labels'], {}), '(marker_internal_labels)\n', (806, 830), False, 'from skimage import measure, morphology, segmentation\n'), ((3306, 3321), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (3313, 3321), True, 'import numpy as np\n'), ((3472, 3491), 'numpy.ones', 'np.ones', (['(512, 512)'], {}), '((512, 512))\n', (3479, 3491), True, 'import numpy as np\n'), ((3873, 3916), 'skimage.measure.regionprops', 'measure.regionprops', (['marker_internal_labels'], {}), '(marker_internal_labels)\n', (3892, 3916), False, 'from skimage import measure, morphology, segmentation\n'), ((6175, 6190), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (6182, 6190), True, 'import numpy as np\n'), ((6996, 7020), 'skimage.measure.regionprops', 'regionprops', (['label_image'], {}), '(label_image)\n', (7007, 7020), False, 'from skimage.measure import label, regionprops, perimeter\n')] |
#!/usr/bin/env python
# coding: utf-8
# */Aaipnd-project-master/train_commonfns.py
#
# PROGRAMMER: <NAME>
# DATE CREATED: 01/01/2020
# REVISED DATE:
# PURPOSE: common support needed for train program
# AND
# Common functions. The functions are described later in this file
##
# Imports python modules to setup command line parameters
import argparse
import os.path
# for the plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
# This is to manage the actual lable names of the flowers
import json
import urllib
# Define get_train_input_args function to return with parser.parse_args() parsed argument
# collection that you created with this function
#
#
def get_train_input_args():
"""
Retrieves and parses the 7 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to create and define these 7 command line arguments. If
the user fails to provide some or all of the needed arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Training Image Folder as --data_dir with default value 'flowers'
2. CNN Model Architecture as --arch with default value 'vgg16'
3. Check point Save Directory as --save_dir with default value null and means current folder
4. Learning Rate as --learning_rate with default value 0.001
5. epoch as --epoch with default value 1
6. If to use GPU as --gpu. If proided, True. Default is False ( means cpu)
7. Hidden Units as -hidden_unit if nulll use [1000, 500].
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
Train a new network on a data set with train.py
Basic usage: python train.py data_directory
Prints out training loss, validation loss, and validation accuracy as the network trains
Options:
Set directory to save checkpoints: python train.py data_dir --save_dir save_directory
Choose architecture: python train.py data_dir --arch "vgg13"
Set hyperparameters: python train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs 20
Use GPU for training: python train.py data_dir --gpu
Example
train.py --data_dir flowers --arch vgg16 --learning_rate 0.001 --gpu cuda
"""
# Create Parse using ArgumentParser
chImagesParser = argparse.ArgumentParser()
chImagesParser.add_argument('data_dir', type = str, default = 'flowers', help = 'Path to the folder of flower images')
chImagesParser.add_argument('--arch', type = str, default = 'vgg16', help = 'CNN Model Architecture')
chImagesParser.add_argument('--save_dir', type = str, default = '.', help = 'The Checkpoint file folder to save the model')
chImagesParser.add_argument('--learning_rate', type = float, default = 0.001, help = 'The learning rate to be used for training the model')
chImagesParser.add_argument('--epoch', type = int, default = 5, help = 'The number of epocs to use for training')
chImagesParser.add_argument('--gpu', type = str, default = 'cpu', help = 'If to use CUDA or not. If not provided then use cpu. Even if GPU is given, if the system does not have a GPU, then cpu is used ')
chImagesParser.add_argument('--hidden_units', type = str, default = '1000,250', help = 'Provide hidden units for each hidden layer')
return chImagesParser.parse_args()
def get_predict_input_args():
"""
Predict flower name from an image with predict.py along with the probability of that name. That is, you'll pass in a single image /path/to/image and return the flower name and class probability.
Basic usage: python predict.py /path/to/image checkpoint
Options:
Return top KK most likely classes: python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference: python predict.py input checkpoint --gpu
"""
# Create Parse using ArgumentParser
chImagesParser = argparse.ArgumentParser()
chImagesParser.add_argument('flowerfile', type = str, default = 'flowerssmall/valid/1/image_06764.jpg', help = '/path/to/image to predict its name')
chImagesParser.add_argument('checkpoint', type = str, default = 'project2_gpud_vgg16.pth', help = 'The filename of the checkpoint - saved model')
chImagesParser.add_argument('--top_k', type = int, default = 3, help = 'the number of top KK most likely classes for the image')
chImagesParser.add_argument('----category_names', type = str, default = 'cat_to_name.json', help = 'the real name file for the classes of flowers')
chImagesParser.add_argument('--gpu', type = str, default = 'cpu', help = 'If to use CUDA or not. If not provided then use cpu. Even if GPU is given, if the system does not have a GPU, then cpu is used ')
return chImagesParser.parse_args()
def check_predict_command_line_arguments(in_arg) :
"""
Check if proper command lines are provided. If not, proper defaults are set.
See documentation on function "get_predict_input_args" for the details of expected command lines
"""
if in_arg is None:
print("* Doesn't Check the Command Line Arguments because 'get_predict_input_args' hasn't been defined.")
return False
else:
if(os.path.exists(in_arg.flowerfile)==False):
print("Command Line Arguments:\n Error!!! - The flower file to predict ", in_arg.flowername,
"does not exsists")
return False
if(os.path.exists( in_arg.checkpoint)==False):
print("Command Line Arguments:\n Error!!! - The model checkpoint file does not exists ", in_arg.checkpoint,
"does not exsists")
return False
if(os.path.exists( in_arg.category_names)==False):
print("Command Line Arguments:\n Error!!! - The category realname file does not exists ", in_arg.category_names,
"does not exsists")
return False
# prints command line agrs
print("\n Command Line Arguments: Flower to predict file =", in_arg.flowerfile,
"\n Checkpoint file =", in_arg.checkpoint, "\n top KK =", in_arg.top_k,
"\n category real name file = ", in_arg.category_names,
"\n gpu = ", in_arg.gpu)
return True
def check_command_line_arguments(in_arg, archsupported) :
"""
Check if proper command lines are provided for predict process. If not, proper defaults are set.
See documentation on function "get_train_input_args" for the details of expected command lines
"""
print(type(archsupported))
if in_arg is None:
print("* Doesn't Check the Command Line Arguments because 'get_input_args' hasn't been defined.")
return False
else:
# prints command line agrs
print("\n Command Line Arguments: data dir =", in_arg.data_dir,
"\n arch =", in_arg.arch, "\n save dir =", in_arg.save_dir,
"\n learning rate = ", in_arg.learning_rate,
"\n epoc = ", in_arg.epoch,
"\n gpu = ", in_arg.gpu)
if(os.path.exists(in_arg.data_dir)==False):
print("Command Line Arguments:\n Error!!! - the training, test, and validation folders ", in_arg.data_dir,
"does not exsists")
return False
if((in_arg.arch in archsupported) == False):
print("Command Line Arguments:\n Error!!! : Network Architecture ", in_arg.arch,
"Not supported")
return False
if (in_arg.hidden_units == None):
print("Command Line Arguments:\n Error!!! : Network Hidden Layer ", in_arg.hiddenlayers,
"Not supported. It should be like 1000,250")
return False
try:
print ("Hidden Units :", in_arg.hidden_units)
values = [int(i) for i in in_arg.hidden_units.split(',')]
print ("Hidden Units Array:", values)
except Exception as defect:
print("Command Line Arguments:\n Error!!! : Network Hidden Layer Not supported. It should be like 1000,250")
return False
return True
def imshow(image, ax=None, title=None, normalize=True):
matplotlib.use('agg')
"""The image viewer given a Image Tensor"""
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.set_title(title)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def TestDataloaders(imgdataloader, title=None):
matplotlib.use('agg')
#check some examples of test, train and validation data
data_iter = iter(imgdataloader)
images, labels = next(data_iter)
print (images.shape, labels.shape)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
imshow(images[ii], ax=ax, normalize=True)
def plotModelMetric(objNModule):
# to print and show the training and test losses and accuracy
print ("Accuracy : ", objNModule.accuracy)
plt.plot(objNModule.train_losses, label='Training loss')
plt.plot(objNModule.test_losses, label='Validation loss')
plt.legend(frameon=False)
| [
"numpy.clip",
"argparse.ArgumentParser",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((526, 547), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (540, 547), False, 'import matplotlib\n'), ((2743, 2768), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2766, 2768), False, 'import argparse\n'), ((4419, 4444), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4442, 4444), False, 'import argparse\n'), ((8791, 8812), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (8805, 8812), False, 'import matplotlib\n'), ((9785, 9806), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (9799, 9806), False, 'import matplotlib\n'), ((10003, 10041), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)', 'ncols': '(4)'}), '(figsize=(10, 4), ncols=4)\n', (10015, 10041), True, 'import matplotlib.pyplot as plt\n'), ((10301, 10357), 'matplotlib.pyplot.plot', 'plt.plot', (['objNModule.train_losses'], {'label': '"""Training loss"""'}), "(objNModule.train_losses, label='Training loss')\n", (10309, 10357), True, 'import matplotlib.pyplot as plt\n'), ((10362, 10419), 'matplotlib.pyplot.plot', 'plt.plot', (['objNModule.test_losses'], {'label': '"""Validation loss"""'}), "(objNModule.test_losses, label='Validation loss')\n", (10370, 10419), True, 'import matplotlib.pyplot as plt\n'), ((10424, 10449), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (10434, 10449), True, 'import matplotlib.pyplot as plt\n'), ((8932, 8946), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8944, 8946), True, 'import matplotlib.pyplot as plt\n'), ((9160, 9191), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (9168, 9191), True, 'import numpy as np\n'), ((9206, 9237), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (9214, 9237), True, 'import numpy as np\n'), ((9384, 9404), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (9391, 9404), True, 'import numpy as np\n')] |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
def plot_duration(df):
'''
Plots a histogram of the movie duration
:param df: cleaned dataframe
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 10))
sns.histplot(data = df, x = 'duration', ax = ax, kde = True, bins = 75)
ax.set_xlim((0, 300))
ax.set_title('Distribution of Movie Durrations')
sns.despine()
return fig, ax
def plot_budget(df):
'''
Plots histogram of budget on log scale
:param df: cleaned dataframe
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 8))
sns.histplot(data = df, x = 'budget_adjusted', ax = ax, kde = True, log_scale = True)
ax.set_title('Distribution of Movie Budget')
ax.set_xlabel('Budget ($)')
sns.despine()
return fig, ax
def plot_usa_income(df):
'''
plots histogram of domestic income
:param df: cleaned dataframe
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 8))
sns.histplot(data = df, x = 'usa_gross_income_adjusted', ax = ax, kde = True, log_scale = True)
ax.set_title('Distribution of Movie Income (USA)')
ax.set_xlabel('USA Income ($)')
sns.despine()
return fig, ax
def plot_worldwide_income(df):
'''
plots worldwide income histogram
:param df: cleaned dataframe
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 8))
sns.histplot(data = df, x = 'worldwide_gross_income_adjusted', ax = ax, kde = True, log_scale = True)
ax.set_title('Distribution of Movie Income (Worldwide)')
ax.set_xlabel('Worldwide Income ($)')
sns.despine()
return fig, ax
def plot_votes(df):
'''
plots target feature histogram
:param df: cleaned data frame
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 10))
sns.histplot(data = df, x = 'weighted_average_vote', ax = ax, bins = 25, kde = True)
ax.set_title('Distribution of Weighted Votes')
sns.despine()
return fig, ax
def plot_vote_by_budget(df):
'''
plots votes by budget
:param df: cleaned data frame
:return: figure and axes object
'''
a = sns.lmplot(data = df, x = 'budget_adjusted', y = 'weighted_average_vote')
a.figure.axes[0].set_title('Weighted Vote By Budget')
a.figure.axes[0].set_ylabel('Weighted Average Vote')
a.figure.axes[0].set_xlabel('Budget ($)')
return a.figure, a.figure.axes[0]
def plot_worldwide_income_by_date(df):
'''
plots worldwide income by date
:param df: cleaned data frame
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 6))
p = sns.scatterplot(data = df, x = 'date_published_year', y = 'worldwide_gross_income_adjusted', alpha = 0.2, ax = ax)
p.set(yscale = 'log')
ax.set_title('Worldwide Gross Income by Date')
ax.set_ylabel('Worldwide Gross Income ($)')
ax.set_xlabel('Year Published')
sns.despine()
return fig, ax
def plot_USA_income_by_date(df):
'''
plots usa income by date
:param df: cleaned dataframe
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 6))
p = sns.scatterplot(data = df, x = 'date_published_year', y = 'usa_gross_income_adjusted', alpha = 0.2, ax = ax)
p.set(yscale = 'log')
ax.set_title('USA Gross Income by Date')
ax.set_ylabel('USA Gross Income ($)')
ax.set_xlabel('Year Published')
sns.despine()
return fig, ax
def plot_vote_by_decade(df):
'''
plots votes by decade
:param df: cleaned data frame
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
df['decade'] = pd.cut(df['date_published_year'], np.arange(1910, 2030, 10))
sns.barplot(data = df, x = 'decade', y = 'weighted_average_vote', ax = ax)
labs = np.arange(1910, 2300, 10)
t = ax.set_xticklabels(labs)
ax.set_title('Average Vote By Decade')
ax.set_xlabel('Decade')
ax.set_ylabel("Weighted Average Vote")
sns.despine()
return fig, ax
def plot_region_count(df):
'''
plots counts of movies by region
:param df: cleaned data frame
:return: figure and axes objects
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8, 5))
regions = df[['region_Africa', 'region_Americas',
'region_Asia', 'region_Europe', 'region_None', 'region_Oceania']].sum()
labs = [x[x.find('_') + 1:] for x in regions.index]
sns.barplot(x = labs, y = regions.values, ax = ax)
ax.set_title("Movie Count by Region")
ax.set_xlabel('Region')
ax.set_ylabel('Count')
sns.despine()
return fig, ax
def plot_corr(df):
'''
plots correlation matrix of all numerical transformed variables
:param df: cleaned data frame
:return: correlation matrix and figure and axes objects of plot
'''
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 8))
hmap = df[['weighted_average_vote', 'duration', 'budget_adjusted',
'usa_gross_income_adjusted', 'worldwide_gross_income_adjusted',
'date_published_year', 'date_published_month', 'date_published_day',
'actors_weighted_frequency', 'director_weighted_frequency',
'writer_weighted_frequency', 'production_company_frequency', 'title_n_words',
'title_ratio_long_words', 'title_ratio_vowels',
'title_ratio_capital_letters', 'description_n_words',
'description_ratio_long_words', 'description_ratio_vowels',
'description_ratio_capital_letters', ]].corr()
labs = ['Vote', 'Duration', 'Budget', 'USA Income', 'World Income', 'Year', 'Month', 'Day', 'Actors', 'Director',
'Writer', 'Production', 'Title Len', 'Title Long', 'Title Vowels', 'Title Caps', 'Desc Len', 'Desc Long',
'Desc Vowels', 'Desc Caps']
hmap.index = labs
hmap.columns = labs
sns.heatmap(hmap, vmin = -1, vmax = 1, ax = ax, cmap = 'coolwarm')
ax.set_title('Correlation Matrix of Transformed Numeric Variables')
return fig, ax, hmap
def statsdf(df):
'''
calculates stats for every variable and puts it in an organized dataframe
:param df: cleaned dataframe
:return: stats dataframe
'''
stats = df.describe().T
percent_missing = pd.DataFrame(df.isnull().sum() * 100 / len(df)).reset_index().rename(columns = {'index': 'var', 0: 'perc'})
stats['perc_null'] = percent_missing['perc'].to_numpy()
stats['count_encoded'] = df.sum().to_numpy()
stats.loc[
['duration', 'weighted_average_vote', 'budget_adjusted', 'usa_gross_income_adjusted', 'worldwide_gross_income_adjusted', 'date_published_year', 'date_published_month', 'date_published_day', 'actors_weighted_frequency', 'director_weighted_frequency', 'writer_weighted_frequency', 'production_company_frequency', 'title_n_words', 'title_ratio_long_words', 'title_ratio_vowels', 'title_ratio_capital_letters', 'description_n_words', 'description_ratio_long_words', 'description_ratio_vowels',
'description_ratio_capital_letters'], ['count_encoded']] = np.nan
stats['corr'] = df.corr()['weighted_average_vote'].to_numpy()
stats.loc[['genre_1', 'genre_2', 'genre_3', 'genre_4', 'genre_5', 'genre_6', 'genre_7', 'genre_8', 'genre_9', 'genre_10', 'region_Africa', 'region_Americas', 'region_Asia', 'region_Europe', 'region_None', 'region_Oceania'], ['corr']] = np.nan
return stats
def decade_anova(df):
'''
performs the anova for votes by decade
:param df: cleaned dataframe
:return: F statistic and P value for ANOVA
'''
df['decade'] = pd.cut(df['date_published_year'], np.arange(1910, 2030, 10))
df['decade'] = df['decade'].apply(lambda x: x.left)
melted = df[['weighted_average_vote', 'decade']].pivot(values = 'weighted_average_vote', columns = 'decade')
F, p = stats.f_oneway(melted[1910].dropna(), melted[1920].dropna(), melted[1930].dropna(), melted[1940].dropna(), melted[1950].dropna(), melted[1960].dropna(), melted[1970].dropna(), melted[1980].dropna(), melted[1990].dropna(), melted[2000].dropna(), melted[2010].dropna())
return F, p
def moneytary_plots(df):
'''
makes the pretty combined monetary plots
:param df: cleaned data frame
:return: figure for plot
'''
moneycols = df[['budget_adjusted', 'worldwide_gross_income_adjusted', 'usa_gross_income_adjusted', 'weighted_average_vote']]
melty = moneycols.melt(id_vars = ['weighted_average_vote'], value_vars = ['budget_adjusted', 'worldwide_gross_income_adjusted', 'usa_gross_income_adjusted'], var_name = ['money'])
a = sns.relplot(data = melty, x = 'value', y = 'weighted_average_vote', col = 'money', color = '#F3880E', kind = 'scatter', alpha = 0.2)
a.figure.axes[0].set_ylabel('Weighted Average Vote')
a.figure.axes[0].set_title('Budget')
a.figure.axes[0].set_xlabel('Budget Adjusted ($)')
a.figure.axes[0].set_xlim((0, 1e9))
a.figure.axes[1].set_title('Worldwide Income')
a.figure.axes[1].set_xlabel('Income Adjusted ($)')
a.figure.axes[2].set_title('USA Income')
a.figure.axes[2].set_xlabel('Income Adjusted ($)')
a.figure.savefig('votebymoney.jpeg')
return a
| [
"seaborn.lmplot",
"seaborn.relplot",
"seaborn.despine",
"seaborn.histplot",
"seaborn.heatmap",
"seaborn.scatterplot",
"seaborn.barplot",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((289, 337), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 10)'}), '(nrows=1, ncols=1, figsize=(10, 10))\n', (301, 337), True, 'import matplotlib.pyplot as plt\n'), ((348, 409), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""duration"""', 'ax': 'ax', 'kde': '(True)', 'bins': '(75)'}), "(data=df, x='duration', ax=ax, kde=True, bins=75)\n", (360, 409), True, 'import seaborn as sns\n'), ((503, 516), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (514, 516), True, 'import seaborn as sns\n'), ((701, 747), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 8)'}), '(nrows=1, ncols=1, figsize=(8, 8))\n', (713, 747), True, 'import matplotlib.pyplot as plt\n'), ((758, 833), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""budget_adjusted"""', 'ax': 'ax', 'kde': '(True)', 'log_scale': '(True)'}), "(data=df, x='budget_adjusted', ax=ax, kde=True, log_scale=True)\n", (770, 833), True, 'import seaborn as sns\n'), ((929, 942), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (940, 942), True, 'import seaborn as sns\n'), ((1127, 1173), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 8)'}), '(nrows=1, ncols=1, figsize=(8, 8))\n', (1139, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1273), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""usa_gross_income_adjusted"""', 'ax': 'ax', 'kde': '(True)', 'log_scale': '(True)'}), "(data=df, x='usa_gross_income_adjusted', ax=ax, kde=True,\n log_scale=True)\n", (1196, 1273), True, 'import seaborn as sns\n'), ((1375, 1388), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1386, 1388), True, 'import seaborn as sns\n'), ((1577, 1623), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 8)'}), '(nrows=1, ncols=1, figsize=(8, 8))\n', (1589, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1729), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""worldwide_gross_income_adjusted"""', 'ax': 'ax', 'kde': '(True)', 'log_scale': '(True)'}), "(data=df, x='worldwide_gross_income_adjusted', ax=ax, kde=True,\n log_scale=True)\n", (1646, 1729), True, 'import seaborn as sns\n'), ((1843, 1856), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1854, 1856), True, 'import seaborn as sns\n'), ((2034, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 10)'}), '(nrows=1, ncols=1, figsize=(10, 10))\n', (2046, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2167), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""weighted_average_vote"""', 'ax': 'ax', 'bins': '(25)', 'kde': '(True)'}), "(data=df, x='weighted_average_vote', ax=ax, bins=25, kde=True)\n", (2105, 2167), True, 'import seaborn as sns\n'), ((2233, 2246), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (2244, 2246), True, 'import seaborn as sns\n'), ((2416, 2483), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df', 'x': '"""budget_adjusted"""', 'y': '"""weighted_average_vote"""'}), "(data=df, x='budget_adjusted', y='weighted_average_vote')\n", (2426, 2483), True, 'import seaborn as sns\n'), ((2865, 2911), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 6)'}), '(nrows=1, ncols=1, figsize=(8, 6))\n', (2877, 2911), True, 'import matplotlib.pyplot as plt\n'), ((2926, 3035), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""date_published_year"""', 'y': '"""worldwide_gross_income_adjusted"""', 'alpha': '(0.2)', 'ax': 'ax'}), "(data=df, x='date_published_year', y=\n 'worldwide_gross_income_adjusted', alpha=0.2, ax=ax)\n", (2941, 3035), True, 'import seaborn as sns\n'), ((3206, 3219), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (3217, 3219), True, 'import seaborn as sns\n'), ((3402, 3448), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 6)'}), '(nrows=1, ncols=1, figsize=(8, 6))\n', (3414, 3448), True, 'import matplotlib.pyplot as plt\n'), ((3463, 3566), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""date_published_year"""', 'y': '"""usa_gross_income_adjusted"""', 'alpha': '(0.2)', 'ax': 'ax'}), "(data=df, x='date_published_year', y=\n 'usa_gross_income_adjusted', alpha=0.2, ax=ax)\n", (3478, 3566), True, 'import seaborn as sns\n'), ((3725, 3738), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (3736, 3738), True, 'import seaborn as sns\n'), ((3915, 3962), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 5)'}), '(nrows=1, ncols=1, figsize=(10, 5))\n', (3927, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4119), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': '"""decade"""', 'y': '"""weighted_average_vote"""', 'ax': 'ax'}), "(data=df, x='decade', y='weighted_average_vote', ax=ax)\n", (4064, 4119), True, 'import seaborn as sns\n'), ((4139, 4164), 'numpy.arange', 'np.arange', (['(1910)', '(2300)', '(10)'], {}), '(1910, 2300, 10)\n', (4148, 4164), True, 'import numpy as np\n'), ((4316, 4329), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (4327, 4329), True, 'import seaborn as sns\n'), ((4515, 4561), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 5)'}), '(nrows=1, ncols=1, figsize=(8, 5))\n', (4527, 4561), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4816), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'labs', 'y': 'regions.values', 'ax': 'ax'}), '(x=labs, y=regions.values, ax=ax)\n', (4783, 4816), True, 'import seaborn as sns\n'), ((4924, 4937), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (4935, 4937), True, 'import seaborn as sns\n'), ((5177, 5224), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 8)'}), '(nrows=1, ncols=1, figsize=(10, 8))\n', (5189, 5224), True, 'import matplotlib.pyplot as plt\n'), ((6230, 6288), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'vmin': '(-1)', 'vmax': '(1)', 'ax': 'ax', 'cmap': '"""coolwarm"""'}), "(hmap, vmin=-1, vmax=1, ax=ax, cmap='coolwarm')\n", (6241, 6288), True, 'import seaborn as sns\n'), ((8932, 9054), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'melty', 'x': '"""value"""', 'y': '"""weighted_average_vote"""', 'col': '"""money"""', 'color': '"""#F3880E"""', 'kind': '"""scatter"""', 'alpha': '(0.2)'}), "(data=melty, x='value', y='weighted_average_vote', col='money',\n color='#F3880E', kind='scatter', alpha=0.2)\n", (8943, 9054), True, 'import seaborn as sns\n'), ((4022, 4047), 'numpy.arange', 'np.arange', (['(1910)', '(2030)', '(10)'], {}), '(1910, 2030, 10)\n', (4031, 4047), True, 'import numpy as np\n'), ((7969, 7994), 'numpy.arange', 'np.arange', (['(1910)', '(2030)', '(10)'], {}), '(1910, 2030, 10)\n', (7978, 7994), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.datasets import make_classification, make_moons
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
def generate_moons_df(n_samples=50, noise=.6):
X, y = make_moons(n_samples=n_samples, noise=noise)
df = pd.DataFrame(X, columns=['A', 'B'])
df['target'] = y
return df
def preprocess(df):
X_train, X_test, y_train, y_test = train_test_split(df.drop('target', axis=1), df['target'])
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_train.columns)
X_test_scaled['target'] = y_test.values
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_train_scaled['target'] = y_train.values
return X_train_scaled, X_test_scaled, y_train, y_test
def plot_boundaries(model, X_test, X_train, ax, padding = 1, grid_granularity = 0.01, show_test=False, plot_probas=True):
x_min, x_max = X_train['A'].min() - padding, X_train['A'].max() + padding
y_min, y_max = X_train['B'].min() - padding, X_train['B'].max() + padding
xs = np.arange(x_min, x_max, grid_granularity)
ys = np.arange(y_min, y_max, grid_granularity)
xx, yy = np.meshgrid(xs, ys)
Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
if not plot_probas:
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy , Z, cmap='PRGn', levels=20, alpha=.5)
train_positives = X_train[X_train['target'] == 1]
train_negatives = X_train[X_train['target'] == 0]
ax.scatter(train_positives['A'], train_positives['B'], color='forestgreen', edgecolors='lightgreen')
ax.scatter(train_negatives['A'], train_negatives['B'], color='purple', edgecolors='plum')
if show_test:
test_positives = X_test[X_test['target'] == 1]
test_negatives = X_test[X_test['target'] == 0]
ax.scatter(test_positives['A'], test_positives['B'], color='forestgreen', edgecolors='black')
ax.scatter(test_negatives['A'], test_negatives['B'], color='purple', edgecolors='black')
| [
"sklearn.datasets.make_moons",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.arange"
] | [((468, 512), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': 'n_samples', 'noise': 'noise'}), '(n_samples=n_samples, noise=noise)\n', (478, 512), False, 'from sklearn.datasets import make_classification, make_moons\n'), ((527, 562), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "['A', 'B']"}), "(X, columns=['A', 'B'])\n", (539, 562), True, 'import pandas as pd\n'), ((736, 752), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (750, 752), False, 'from sklearn.preprocessing import StandardScaler\n'), ((863, 915), 'pandas.DataFrame', 'pd.DataFrame', (['X_test_scaled'], {'columns': 'X_train.columns'}), '(X_test_scaled, columns=X_train.columns)\n', (875, 915), True, 'import pandas as pd\n'), ((983, 1036), 'pandas.DataFrame', 'pd.DataFrame', (['X_train_scaled'], {'columns': 'X_train.columns'}), '(X_train_scaled, columns=X_train.columns)\n', (995, 1036), True, 'import pandas as pd\n'), ((1437, 1478), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'grid_granularity'], {}), '(x_min, x_max, grid_granularity)\n', (1446, 1478), True, 'import numpy as np\n'), ((1488, 1529), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'grid_granularity'], {}), '(y_min, y_max, grid_granularity)\n', (1497, 1529), True, 'import numpy as np\n'), ((1544, 1563), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (1555, 1563), True, 'import numpy as np\n')] |
from collections import deque
from pathlib import Path
import numpy as np
import pandas as pd
from pyoaz.bots.leftmost_bot import LeftmostBot
from pyoaz.bots.random_bot import RandomBot
from pyoaz.bots.nn_bot import NNBot
from pyoaz.bots.mcts_bot import MCTSBot
from pyoaz.tournament import Participant, Tournament
def compute_policy_entropy(policies, eps=1e-10):
entropy = policies * np.log(policies + eps)
entropy = entropy.sum(-1)
return -entropy
def load_benchmark(benchmark_path):
benchmark_path = Path(benchmark_path)
boards_path = benchmark_path / "benchmark_boards.npy"
values_path = benchmark_path / "benchmark_values.npy"
if boards_path.exists():
boards = np.load(boards_path)
values = np.load(values_path)
boards = to_canonical(boards)
values = static_score_to_value(boards, values).squeeze()
return boards, values
return None, None
def static_score_to_value(boards, values):
new_values = values.copy()
player_2_idx = boards[..., 0].sum(axis=(1, 2)) != boards[..., 1].sum(
axis=(1, 2)
)
new_values[player_2_idx] *= -1.0
return new_values
def to_canonical(boards):
canonical_boards = boards.copy()
flip_idx = boards[..., 0].sum(axis=(1, 2)) != boards[..., 1].sum(
axis=(1, 2)
)
canonical_boards[flip_idx, ..., 0] = boards[flip_idx, ..., 1]
canonical_boards[flip_idx, ..., 1] = boards[flip_idx, ..., 0]
return canonical_boards
def get_gt_values(benchmark_path, boards):
from pyoaz.games.tic_tac_toe import boards_to_bin
# raise NotImplementedError # Quick hack: to_canonical is actually its own inverse.
# boards = to_canonical(canonical_boards)
tic_tac_toe_df = pd.read_csv(
benchmark_path / "tic_tac_toe_table.csv", index_col=False
)
rep_df = pd.read_csv(
benchmark_path / "tic_tac_toe_reps.csv", index_col=False
)
boards_list = boards_to_bin(boards)
board_df = pd.DataFrame(boards_list, columns=["board_rep"])
board_df = pd.merge(board_df, rep_df, on="board_rep", how="left")
values = pd.merge(board_df, tic_tac_toe_df, on="board_num", how="left")[
"reward"
].values
return static_score_to_value(boards, values)
def play_tournament(game, model, n_games=100, mcts_bot_iterations=None):
oazbot = Participant(NNBot(model), name="oaz")
left_bot = Participant(LeftmostBot(), name="left")
random_bot = Participant(RandomBot(), name="random")
participants = [oazbot, left_bot, random_bot]
if mcts_bot_iterations is not None:
for iterations in mcts_bot_iterations:
participants.append(
Participant(
MCTSBot(n_iterations=iterations, n_concurrent_workers=16),
name=f"mcts {iterations}",
)
)
tournament = Tournament(game)
win_loss = tournament.start_tournament(
participants, n_games=n_games, prioritised_participant=oazbot,
)
oaz_wins, oaz_losses = win_loss[0, :].sum(), win_loss[:, 0].sum()
draws = 2 * n_games * (len(participants) - 1) - oaz_wins - oaz_losses
return oaz_wins, oaz_losses, draws
def play_best_self(game, model, save_path, n_games=100):
if not Path(save_path).exists():
return 1, 0
current_bot = NNBot(model, use_cpu=False, greedy=False)
best_bot = NNBot.load_model(save_path, use_cpu=True, greedy=False)
current = Participant(current_bot, name="Current model")
best = Participant(best_bot, name="Best Model")
tournament = Tournament(game)
win_loss = tournament.start_tournament([current, best], n_games=n_games)
current_wins, current_losses = win_loss[0, :].sum(), win_loss[:, 0].sum()
return current_wins, current_losses
def running_mean(arr, window=10):
dq = deque(maxlen=window)
all_means = []
for el in arr:
dq.append(el)
all_means.append(np.mean(list(dq)))
return all_means
| [
"pyoaz.tournament.Tournament",
"pyoaz.tournament.Participant",
"collections.deque",
"pandas.read_csv",
"pathlib.Path",
"pyoaz.bots.nn_bot.NNBot.load_model",
"pyoaz.bots.random_bot.RandomBot",
"pyoaz.games.tic_tac_toe.boards_to_bin",
"pandas.merge",
"numpy.log",
"pyoaz.bots.leftmost_bot.LeftmostB... | [((525, 545), 'pathlib.Path', 'Path', (['benchmark_path'], {}), '(benchmark_path)\n', (529, 545), False, 'from pathlib import Path\n'), ((1737, 1807), 'pandas.read_csv', 'pd.read_csv', (["(benchmark_path / 'tic_tac_toe_table.csv')"], {'index_col': '(False)'}), "(benchmark_path / 'tic_tac_toe_table.csv', index_col=False)\n", (1748, 1807), True, 'import pandas as pd\n'), ((1835, 1904), 'pandas.read_csv', 'pd.read_csv', (["(benchmark_path / 'tic_tac_toe_reps.csv')"], {'index_col': '(False)'}), "(benchmark_path / 'tic_tac_toe_reps.csv', index_col=False)\n", (1846, 1904), True, 'import pandas as pd\n'), ((1937, 1958), 'pyoaz.games.tic_tac_toe.boards_to_bin', 'boards_to_bin', (['boards'], {}), '(boards)\n', (1950, 1958), False, 'from pyoaz.games.tic_tac_toe import boards_to_bin\n'), ((1974, 2022), 'pandas.DataFrame', 'pd.DataFrame', (['boards_list'], {'columns': "['board_rep']"}), "(boards_list, columns=['board_rep'])\n", (1986, 2022), True, 'import pandas as pd\n'), ((2038, 2092), 'pandas.merge', 'pd.merge', (['board_df', 'rep_df'], {'on': '"""board_rep"""', 'how': '"""left"""'}), "(board_df, rep_df, on='board_rep', how='left')\n", (2046, 2092), True, 'import pandas as pd\n'), ((2863, 2879), 'pyoaz.tournament.Tournament', 'Tournament', (['game'], {}), '(game)\n', (2873, 2879), False, 'from pyoaz.tournament import Participant, Tournament\n'), ((3322, 3363), 'pyoaz.bots.nn_bot.NNBot', 'NNBot', (['model'], {'use_cpu': '(False)', 'greedy': '(False)'}), '(model, use_cpu=False, greedy=False)\n', (3327, 3363), False, 'from pyoaz.bots.nn_bot import NNBot\n'), ((3379, 3434), 'pyoaz.bots.nn_bot.NNBot.load_model', 'NNBot.load_model', (['save_path'], {'use_cpu': '(True)', 'greedy': '(False)'}), '(save_path, use_cpu=True, greedy=False)\n', (3395, 3434), False, 'from pyoaz.bots.nn_bot import NNBot\n'), ((3450, 3496), 'pyoaz.tournament.Participant', 'Participant', (['current_bot'], {'name': '"""Current model"""'}), "(current_bot, name='Current model')\n", (3461, 3496), False, 'from pyoaz.tournament import Participant, Tournament\n'), ((3508, 3548), 'pyoaz.tournament.Participant', 'Participant', (['best_bot'], {'name': '"""Best Model"""'}), "(best_bot, name='Best Model')\n", (3519, 3548), False, 'from pyoaz.tournament import Participant, Tournament\n'), ((3567, 3583), 'pyoaz.tournament.Tournament', 'Tournament', (['game'], {}), '(game)\n', (3577, 3583), False, 'from pyoaz.tournament import Participant, Tournament\n'), ((3826, 3846), 'collections.deque', 'deque', ([], {'maxlen': 'window'}), '(maxlen=window)\n', (3831, 3846), False, 'from collections import deque\n'), ((393, 415), 'numpy.log', 'np.log', (['(policies + eps)'], {}), '(policies + eps)\n', (399, 415), True, 'import numpy as np\n'), ((708, 728), 'numpy.load', 'np.load', (['boards_path'], {}), '(boards_path)\n', (715, 728), True, 'import numpy as np\n'), ((746, 766), 'numpy.load', 'np.load', (['values_path'], {}), '(values_path)\n', (753, 766), True, 'import numpy as np\n'), ((2350, 2362), 'pyoaz.bots.nn_bot.NNBot', 'NNBot', (['model'], {}), '(model)\n', (2355, 2362), False, 'from pyoaz.bots.nn_bot import NNBot\n'), ((2403, 2416), 'pyoaz.bots.leftmost_bot.LeftmostBot', 'LeftmostBot', ([], {}), '()\n', (2414, 2416), False, 'from pyoaz.bots.leftmost_bot import LeftmostBot\n'), ((2460, 2471), 'pyoaz.bots.random_bot.RandomBot', 'RandomBot', ([], {}), '()\n', (2469, 2471), False, 'from pyoaz.bots.random_bot import RandomBot\n'), ((2106, 2168), 'pandas.merge', 'pd.merge', (['board_df', 'tic_tac_toe_df'], {'on': '"""board_num"""', 'how': '"""left"""'}), "(board_df, tic_tac_toe_df, on='board_num', how='left')\n", (2114, 2168), True, 'import pandas as pd\n'), ((3257, 3272), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (3261, 3272), False, 'from pathlib import Path\n'), ((2707, 2764), 'pyoaz.bots.mcts_bot.MCTSBot', 'MCTSBot', ([], {'n_iterations': 'iterations', 'n_concurrent_workers': '(16)'}), '(n_iterations=iterations, n_concurrent_workers=16)\n', (2714, 2764), False, 'from pyoaz.bots.mcts_bot import MCTSBot\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 28 12:48:10 2021
@author: huzongxiang
"""
import numpy as np
from typing import List
from enum import Enum, unique
from pymatgen.core import Structure
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
@unique
class Features(Enum):
atom = 1
bond = 2
image = 3
state = 4
pair_indices = 5
lattice = 6
cart_coords = 7
masking_indices = 8
masking_node_labels = 9
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
def adjacent_matrix(num_atoms, pair_indices):
"""
Parameters
----------
num_atoms : TYPE
DESCRIPTION.
pair_indices : TYPE
DESCRIPTION.
Returns
-------
adj_matrix : TYPE
DESCRIPTION.
"""
adj_matrix = np.eye(num_atoms, dtype=np.float32)
for pair_indice in pair_indices:
begin_atom = pair_indice[0]
end_atom = pair_indice[1]
adj_matrix[begin_atom, end_atom] = adj_matrix[end_atom, begin_atom] = 1
return adj_matrix
def get_valences(structure:Structure) -> np.ndarray:
"""
Parameters
----------
structure : Structure
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
BV = BVAnalyzer(symm_tol=0.1)
try:
sites_valences = BV.get_oxi_state_decorated_structure(structure)
except:
structure.add_oxidation_state_by_guess()
sites_valences = structure
atoms_valences = []
for specie in sites_valences.species:
atoms_valences.append(specie.oxi_state)
return np.array(atoms_valences)
def get_space_group_number(structure: Structure) -> List:
"""
Parameters
----------
structure : structure
DESCRIPTION.
Returns
-------
List
DESCRIPTION.
"""
return SpacegroupAnalyzer(structure).get_space_group_number()
def get_space_group_info(structure: Structure) :
"""
Parameters
----------
structure : structure
DESCRIPTION.
Returns
-------
SpacegroupAnalyzer
including symmetry info of structrue.
"""
return SpacegroupAnalyzer(structure) | [
"numpy.array",
"numpy.eye",
"pymatgen.analysis.bond_valence.BVAnalyzer",
"pymatgen.symmetry.analyzer.SpacegroupAnalyzer"
] | [((950, 985), 'numpy.eye', 'np.eye', (['num_atoms'], {'dtype': 'np.float32'}), '(num_atoms, dtype=np.float32)\n', (956, 985), True, 'import numpy as np\n'), ((1408, 1432), 'pymatgen.analysis.bond_valence.BVAnalyzer', 'BVAnalyzer', ([], {'symm_tol': '(0.1)'}), '(symm_tol=0.1)\n', (1418, 1432), False, 'from pymatgen.analysis.bond_valence import BVAnalyzer\n'), ((1736, 1760), 'numpy.array', 'np.array', (['atoms_valences'], {}), '(atoms_valences)\n', (1744, 1760), True, 'import numpy as np\n'), ((2286, 2315), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {}), '(structure)\n', (2304, 2315), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((1981, 2010), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {}), '(structure)\n', (1999, 2010), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n')] |
"""
This Python module serves to analyze the performance metrics of kharon.
"""
#%%
import matplotlib.pyplot as plt
import sys
import os
import numpy as np
from scipy.stats import norm
import statistics
# retrieve data from
# option = -1
# if sys.argv[1] == "new":
# option = 0
# else:
# option = 1
out_file = "performance.txt"
if len(sys.argv) == 2:
out_file = sys.argv[1]
TEST_SIZE = 5
sample_averages = []
if os.path.exists("means.txt"):
os.remove("means.txt")
for i in range(TEST_SIZE + 1):
# if (i != 0 and i % 5 == 0) or i == TEST_SIZE - 2:
# mf = open("means.txt", "a")
# mf.write("{}\n".format(sample_averages))
# sample_averages = []
# mf.close()
# if i == TEST_SIZE - 1:
# break
os.system("ab -n 10000 -c 500 http://localhost:4000/ > performance/{}".format(out_file))
pf = open("performance/{}".format(out_file), "r")
# The mean is on the 21st line
for i in range(20):
pf.readline()
mean_list = pf.readline().split()
if len(mean_list) > 4:
mean = float(mean_list[3])
sample_averages.append(mean)
pf.close()
x_axis = np.arange(4000, 10000, 0.5)
mu = statistics.mean(data=sample_averages)
sd = statistics.stdev(data=sample_averages)
plt.plot(x_axis, norm.pdf(x_axis, mu, sd))
# with open("means.txt") as openfileobject:
# for line in openfileobject:
# # plt.plot(line.strip('][').split(', '))
# plt.plot([1,2,3,4])
# plt.ylabel('some numbers')
plt.savefig("performance/{}".format(out_file.replace(".txt", ".png")))
# plt.show()
# %%
| [
"statistics.mean",
"os.path.exists",
"statistics.stdev",
"scipy.stats.norm.pdf",
"numpy.arange",
"os.remove"
] | [((426, 453), 'os.path.exists', 'os.path.exists', (['"""means.txt"""'], {}), "('means.txt')\n", (440, 453), False, 'import os\n'), ((1099, 1126), 'numpy.arange', 'np.arange', (['(4000)', '(10000)', '(0.5)'], {}), '(4000, 10000, 0.5)\n', (1108, 1126), True, 'import numpy as np\n'), ((1133, 1170), 'statistics.mean', 'statistics.mean', ([], {'data': 'sample_averages'}), '(data=sample_averages)\n', (1148, 1170), False, 'import statistics\n'), ((1176, 1214), 'statistics.stdev', 'statistics.stdev', ([], {'data': 'sample_averages'}), '(data=sample_averages)\n', (1192, 1214), False, 'import statistics\n'), ((457, 479), 'os.remove', 'os.remove', (['"""means.txt"""'], {}), "('means.txt')\n", (466, 479), False, 'import os\n'), ((1233, 1257), 'scipy.stats.norm.pdf', 'norm.pdf', (['x_axis', 'mu', 'sd'], {}), '(x_axis, mu, sd)\n', (1241, 1257), False, 'from scipy.stats import norm\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time
def completeRank1Matrix(observations,mask,PLOT=False):
# observations and mask are two 2D numpy arrays of the same size, where observations is a
# numerical matrix indicating the observed values of the matrix and mask is a boolean array
# indicating where observations occur.
if PLOT:
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
f.show()
done = False
while(done == False):
# Identify nodes that have paths of length 3 between them and not paths of length 1
maskInt = mask.astype(int)
Q = np.logical_and(np.logical_not(mask),
np.greater(np.dot(np.dot(maskInt, np.transpose(maskInt)),maskInt),0))
if not np.any(Q):
done = True
continue
# For each entry in Q solve for new node
solvable = np.argwhere(Q)
for fillPt in solvable:
# Need to find a length 3 path from the row to the column corresponding to entry.
# A simple approach is to traverse a single edge from a given row and column and then test
# if they are connected.
rowConnected = np.argwhere(mask[fillPt[0],:])
columnConnected = np.argwhere(mask[:,fillPt[1]])
pathFound = False
for j in rowConnected:
for i in columnConnected:
if mask[i,j]:
pathFound = True
break
if pathFound:
break
# pdb.set_trace()
assert(mask[i,fillPt[1]] and mask[fillPt[0],j] and mask[i,j] and not mask[fillPt[0],fillPt[1]])
# We now have two points that are "diagonal" in the square
observations[fillPt[0],fillPt[1]] = observations[i,fillPt[1]]*observations[fillPt[0],j]/observations[i,j]
mask[fillPt[0],fillPt[1]] = True
if PLOT:
ax1.imshow(observations, interpolation="nearest")
ax2.imshow(mask, interpolation="nearest")
f.canvas.draw()
time.sleep(0.5)
plt.close(f)
return [observations,mask]
| [
"numpy.logical_not",
"numpy.any",
"time.sleep",
"matplotlib.pyplot.close",
"numpy.argwhere",
"numpy.transpose",
"matplotlib.pyplot.subplots"
] | [((1925, 1937), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (1934, 1937), True, 'import matplotlib.pyplot as plt\n'), ((378, 419), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, sharex=True, sharey=True)\n', (390, 419), True, 'import matplotlib.pyplot as plt\n'), ((851, 865), 'numpy.argwhere', 'np.argwhere', (['Q'], {}), '(Q)\n', (862, 865), True, 'import numpy as np\n'), ((620, 640), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (634, 640), True, 'import numpy as np\n'), ((746, 755), 'numpy.any', 'np.any', (['Q'], {}), '(Q)\n', (752, 755), True, 'import numpy as np\n'), ((1131, 1162), 'numpy.argwhere', 'np.argwhere', (['mask[fillPt[0], :]'], {}), '(mask[fillPt[0], :])\n', (1142, 1162), True, 'import numpy as np\n'), ((1186, 1217), 'numpy.argwhere', 'np.argwhere', (['mask[:, fillPt[1]]'], {}), '(mask[:, fillPt[1]])\n', (1197, 1217), True, 'import numpy as np\n'), ((1906, 1921), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1916, 1921), False, 'import time\n'), ((699, 720), 'numpy.transpose', 'np.transpose', (['maskInt'], {}), '(maskInt)\n', (711, 720), True, 'import numpy as np\n')] |
import unittest
import os
import numpy as np
from phonopy.interface.phonopy_yaml import read_cell_yaml
from phono3py.phonon3.triplets import (get_grid_point_from_address,
get_grid_point_from_address_py)
data_dir = os.path.dirname(os.path.abspath(__file__))
class TestTriplets(unittest.TestCase):
def setUp(self):
self._cell = read_cell_yaml(os.path.join(data_dir, "POSCAR.yaml"))
def tearDown(self):
pass
def test_get_grid_point_from_address(self):
self._mesh = (10, 10, 10)
print("Compare get_grid_point_from_address from spglib and that "
"written in python")
print("with mesh numbers [%d %d %d]" % self._mesh)
for address in list(np.ndindex(self._mesh)):
gp_spglib = get_grid_point_from_address(address, self._mesh)
gp_py = get_grid_point_from_address_py(address, self._mesh)
# print("%s %d %d" % (address, gp_spglib, gp_py))
self.assertEqual(gp_spglib, gp_py)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTriplets)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"phono3py.phonon3.triplets.get_grid_point_from_address",
"phono3py.phonon3.triplets.get_grid_point_from_address_py",
"os.path.join",
"numpy.ndindex",
"os.path.abspath",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((271, 296), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'import os\n'), ((396, 433), 'os.path.join', 'os.path.join', (['data_dir', '"""POSCAR.yaml"""'], {}), "(data_dir, 'POSCAR.yaml')\n", (408, 433), False, 'import os\n'), ((753, 775), 'numpy.ndindex', 'np.ndindex', (['self._mesh'], {}), '(self._mesh)\n', (763, 775), True, 'import numpy as np\n'), ((802, 850), 'phono3py.phonon3.triplets.get_grid_point_from_address', 'get_grid_point_from_address', (['address', 'self._mesh'], {}), '(address, self._mesh)\n', (829, 850), False, 'from phono3py.phonon3.triplets import get_grid_point_from_address, get_grid_point_from_address_py\n'), ((871, 922), 'phono3py.phonon3.triplets.get_grid_point_from_address_py', 'get_grid_point_from_address_py', (['address', 'self._mesh'], {}), '(address, self._mesh)\n', (901, 922), False, 'from phono3py.phonon3.triplets import get_grid_point_from_address, get_grid_point_from_address_py\n'), ((1073, 1094), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1092, 1094), False, 'import unittest\n'), ((1135, 1171), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1158, 1171), False, 'import unittest\n')] |
import numpy
import simtk.unit
import simtk.unit as units
import simtk.openmm as mm
from openmmtools.integrators import ExternalPerturbationLangevinIntegrator
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
class NCMCGeodesicBAOAB(ExternalPerturbationLangevinIntegrator):
"""
Implementation of a g-BAOAB integrator which tracks external protocol work.
"""
def __init__(self, temperature=298.0 * simtk.unit.kelvin,
collision_rate=91.0 / simtk.unit.picoseconds,
timestep=1.0 * simtk.unit.femtoseconds,
constraint_tolerance=1e-7
):
super(NCMCGeodesicBAOAB, self).__init__(splitting="V R R O R R V",
temperature=temperature,
collision_rate=collision_rate,
timestep=timestep,
constraint_tolerance=constraint_tolerance,
measure_shadow_work=False,
measure_heat=False,
)
class NCMCMetpropolizedGeodesicBAOAB(ExternalPerturbationLangevinIntegrator):
"""
Implementation of a Metropolized g-BAOAB integrator which tracks external protocol work.
"""
def __init__(self, temperature=298.0 * simtk.unit.kelvin,
collision_rate=91.0 / simtk.unit.picoseconds,
timestep=1.0 * simtk.unit.femtoseconds,
constraint_tolerance=1e-7
):
super(NCMCMetpropolizedGeodesicBAOAB, self).__init__(splitting="{ V R R O R R V }",
temperature=temperature,
collision_rate=collision_rate,
timestep=timestep,
constraint_tolerance=constraint_tolerance,
measure_shadow_work=True,
measure_heat=False,
)
class GHMCIntegrator(mm.CustomIntegrator):
"""
This generalized hybrid Monte Carlo (GHMC) integrator is a modification of the GHMC integrator found
here https://github.com/choderalab/openmmtools. Multiple steps can be taken per integrator.step() in order to save
the potential energy before the steps were made.
"""
def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=1.0 / simtk.unit.picoseconds,
timestep=1.0 * simtk.unit.femtoseconds, nsteps=1):
"""
Create a generalized hybrid Monte Carlo (GHMC) integrator.
Parameters
----------
temperature : simtk.unit.Quantity compatible with kelvin, default: 298*unit.kelvin
The temperature.
collision_rate : simtk.unit.Quantity compatible with 1/picoseconds, default: 91.0/unit.picoseconds
The collision rate.
timestep : simtk.unit.Quantity compatible with femtoseconds, default: 1.0*unit.femtoseconds
The integration timestep.
nsteps : int
The number of steps to take per integrator.step()
Notes
-----
It is equivalent to a Langevin integrator in the velocity Verlet discretization with a
Metrpolization step to ensure sampling from the appropriate distribution.
An additional global variable 'potential_initial' records the potential energy before 'nsteps' have been taken.
Example
-------
Create a GHMC integrator.
>>> temperature = 298.0 * simtk.unit.kelvin
>>> collision_rate = 91.0 / simtk.unit.picoseconds
>>> timestep = 1.0 * simtk.unit.femtoseconds
>>> integrator = GHMCIntegrator(temperature, collision_rate, timestep)
References
----------
<NAME>, <NAME>, and <NAME>. Free Energy Computations: A Mathematical Perspective
http://www.amazon.com/Free-Energy-Computations-Mathematical-Perspective/dp/1848162472
"""
# Initialize constants.
kT = kB * temperature
gamma = collision_rate
# Create a new custom integrator.
super(GHMCIntegrator, self).__init__(timestep)
#
# Integrator initialization.
#
self.addGlobalVariable("kT", kT) # thermal energy
self.addGlobalVariable("b", numpy.exp(-gamma * timestep)) # velocity mixing parameter
self.addPerDofVariable("sigma", 0) # velocity standard deviation
self.addGlobalVariable("ke", 0) # kinetic energy
self.addPerDofVariable("vold", 0) # old velocities
self.addPerDofVariable("xold", 0) # old positions
self.addGlobalVariable("Eold", 0) # old energy
self.addGlobalVariable("Enew", 0) # new energy
self.addGlobalVariable("potential_initial", 0) # initial potential energy
self.addGlobalVariable("potential_old", 0) # old potential energy
self.addGlobalVariable("potential_new", 0) # new potential energy
self.addGlobalVariable("protocol_work", 0)
self.addGlobalVariable("accept", 0) # accept or reject
self.addGlobalVariable("naccept", 0) # number accepted
self.addGlobalVariable("ntrials", 0) # number of Metropolization trials
self.addGlobalVariable("first_step", 0) # number of Metropolization trials
self.addPerDofVariable("x1", 0) # position before application of constraints
self.addGlobalVariable("step", 0) # variable to keep track of number of propagation steps
self.addGlobalVariable("nsteps", nsteps) # The number of iterations per integrator.step(1).
#
# Initialization.
#
self.beginIfBlock("first_step < 1")
self.addComputePerDof("sigma", "sqrt(kT/m)")
self.addComputeGlobal("protocol_work", "0.0")
self.addConstrainPositions()
self.addConstrainVelocities()
self.addComputeGlobal("potential_new", "energy")
self.addComputeGlobal("first_step", "1")
self.endBlock()
self.addComputeGlobal("potential_initial", "energy")
self.addComputeGlobal("step", "0")
self.addComputeGlobal("protocol_work", "protocol_work + (potential_initial - potential_new)")
#
# Allow context updating here.
#
self.addUpdateContextState()
if True:
self.beginWhileBlock("step < nsteps")
#
# Velocity randomization
#
self.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
self.addConstrainVelocities()
# Compute initial total energy
self.addComputeSum("ke", "0.5*m*v*v")
self.addComputeGlobal("potential_old", "energy")
self.addComputeGlobal("Eold", "ke + potential_old")
self.addComputePerDof("xold", "x")
self.addComputePerDof("vold", "v")
# Velocity Verlet step
self.addComputePerDof("v", "v + 0.5*dt*f/m")
self.addComputePerDof("x", "x + v*dt")
self.addComputePerDof("x1", "x")
self.addConstrainPositions()
self.addComputePerDof("v", "v + 0.5*dt*f/m + (x-x1)/dt")
self.addConstrainVelocities()
# Compute final total energy
self.addComputeSum("ke", "0.5*m*v*v")
self.addComputeGlobal("potential_new", "energy")
self.addComputeGlobal("Enew", "ke + potential_new")
# Accept/reject, ensuring rejection if energy is NaN
self.addComputeGlobal("accept", "step(exp(-(Enew-Eold)/kT) - uniform)")
self.beginIfBlock("accept != 1")
self.addComputePerDof("x", "xold")
self.addComputePerDof("v", "-vold")
self.addComputeGlobal("potential_new", "potential_old")
self.endBlock()
#
# Velocity randomization
#
self.addComputePerDof("v", "sqrt(b)*v + sqrt(1-b)*sigma*gaussian")
self.addConstrainVelocities()
#s
# Accumulate statistics.
#
self.addComputeGlobal("naccept", "naccept + accept")
self.addComputeGlobal("ntrials", "ntrials + 1")
self.addComputeGlobal("step", "step+1")
self.endBlock()
def reset_protocol_work(self):
"""
Set protocol work to zero in order to restart an NCMC procedure.
"""
self.setGlobalVariableByName("protocol_work", 0)
def get_protocol_work(self, dimensionless=False):
"""
Return the accumulated protocol work either in kJ/mol or in dimensionless units of thermal energy.
Parameter
---------
dimensionless: bool
whether to return the work in units of thermal energy
"""
work = self.getGlobalVariableByName("protocol_work")
if dimensionless:
return work / self.getGlobalVariableByName("kT")
else:
return work
| [
"numpy.exp"
] | [((4535, 4563), 'numpy.exp', 'numpy.exp', (['(-gamma * timestep)'], {}), '(-gamma * timestep)\n', (4544, 4563), False, 'import numpy\n')] |
from typing import List
from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum
from numpy.linalg import norm
import meshpy.triangle as triangle
class tri_mesh:
def __init__(self, surf_points: ndarray, external_n: float, external_radius: float) -> None:
self.__surf_points = surf_points
self.__external_n = external_n
self.__external_radius = external_radius
return
def __round_trip_connect(self, start, end):
return [(i, i + 1) for i in range(start, end)] + [(end, start)]
def build(self) -> List[ndarray]:
# Refinement
dif_x = self.__surf_points[1:, 0] - self.__surf_points[:len(self.__surf_points) - 1, 0]
dif_y = self.__surf_points[1:, 1] - self.__surf_points[:len(self.__surf_points) - 1, 1]
max_l_surf = max(power(power(dif_x, 2) + power(dif_y, 2), 0.5))
max_l_ext = 2 * pi * self.__external_radius / (self.__external_n - 1)
max_area_surf = max_l_surf * max_l_surf * (3 ** 0.5) / 4
max_area_ext = max_l_ext * max_l_ext * (3 ** 0.5) / 4
# Mesh
points = []
for i in range(len(self.__surf_points)):
points.append((self.__surf_points[i, 0], self.__surf_points[i, 1]))
points.append((self.__surf_points[0, 0], self.__surf_points[0, 1]))
circ_start = len(points)
facets = self.__round_trip_connect(0, circ_start - 1)
points.extend(
(self.__external_radius * cos(angle), self.__external_radius * sin(angle))
for angle in linspace(0, 2 * pi, self.__external_n, endpoint=False)
)
facets.extend(self.__round_trip_connect(circ_start, len(points) - 1))
info = triangle.MeshInfo()
info.set_points(points)
info.set_holes([(0, 0)])
info.set_facets(facets)
def needs_refinement(vertices, area):
v = asarray(vertices)
center = (v[0, :] + v[1, :] + v[2, :]) / 3
dist = norm(center)
dist = 0 if dist < 0.5 else dist - 0.5
max_area = max_area_surf + (max_area_ext - max_area_surf) * ((dist / (self.__external_radius - 0.5)) ** 3)
return bool(area > max_area * 1.1)
mesh = triangle.build(info, quality_meshing=0.9, min_angle=25, refinement_func=needs_refinement)
mesh_points = array(mesh.points)
mesh_tris = array(mesh.elements)
return mesh_points, mesh_tris | [
"meshpy.triangle.MeshInfo",
"numpy.power",
"meshpy.triangle.build",
"numpy.asarray",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((1726, 1745), 'meshpy.triangle.MeshInfo', 'triangle.MeshInfo', ([], {}), '()\n', (1743, 1745), True, 'import meshpy.triangle as triangle\n'), ((2245, 2339), 'meshpy.triangle.build', 'triangle.build', (['info'], {'quality_meshing': '(0.9)', 'min_angle': '(25)', 'refinement_func': 'needs_refinement'}), '(info, quality_meshing=0.9, min_angle=25, refinement_func=\n needs_refinement)\n', (2259, 2339), True, 'import meshpy.triangle as triangle\n'), ((2358, 2376), 'numpy.array', 'array', (['mesh.points'], {}), '(mesh.points)\n', (2363, 2376), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((2397, 2417), 'numpy.array', 'array', (['mesh.elements'], {}), '(mesh.elements)\n', (2402, 2417), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((1906, 1923), 'numpy.asarray', 'asarray', (['vertices'], {}), '(vertices)\n', (1913, 1923), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((1998, 2010), 'numpy.linalg.norm', 'norm', (['center'], {}), '(center)\n', (2002, 2010), False, 'from numpy.linalg import norm\n'), ((836, 851), 'numpy.power', 'power', (['dif_x', '(2)'], {}), '(dif_x, 2)\n', (841, 851), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((854, 869), 'numpy.power', 'power', (['dif_y', '(2)'], {}), '(dif_y, 2)\n', (859, 869), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((1567, 1621), 'numpy.linspace', 'linspace', (['(0)', '(2 * pi)', 'self.__external_n'], {'endpoint': '(False)'}), '(0, 2 * pi, self.__external_n, endpoint=False)\n', (1575, 1621), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((1493, 1503), 'numpy.cos', 'cos', (['angle'], {}), '(angle)\n', (1496, 1503), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n'), ((1530, 1540), 'numpy.sin', 'sin', (['angle'], {}), '(angle)\n', (1533, 1540), False, 'from numpy import array, asarray, cos, linspace, ndarray, pi, power, sin, sum\n')] |
# -*- coding: utf-8 -*-
"""
Simulating diffraction by a 2D metamaterial
===========================================
Finite element simulation of the diffraction of a plane wave by a mono-periodic
grating and calculation of diffraction efficiencies.
"""
##############################################################################
# First we import the required modules and class
import numpy as np
import matplotlib.pyplot as plt
from pytheas import genmat
from pytheas import Periodic2D
##############################################################################
# Then we need to instanciate the class :py:class:`Periodic2D`:
fem = Periodic2D()
##############################################################################
# The model consist of a single unit cell with quasi-periodic boundary conditions
# in the :math:`x` direction enclosed with perfectly matched layers (PMLs)
# in the :math:`y` direction to truncate the semi infinite media. From top to bottom:
#
# - PML top
# - superstrate (incident medium)
# - layer 1
# - design layer: this is the layer containing the periodic pattern, can be continuous or discrete
# - layer 2
# - substrate
# - PML bottom
#
# We define here the opto-geometric parameters:
mum = 1e-6 #: flt: the scale of the problem (here micrometers)
fem.d = 0.4 * mum #: flt: period
fem.h_sup = 1.0 * mum #: flt: "thickness" superstrate
fem.h_sub = 1.0 * mum #: flt: "thickness" substrate
fem.h_layer1 = 0.1 * mum #: flt: thickness layer 1
fem.h_layer2 = 0.1 * mum #: flt: thickness layer 2
fem.h_des = 0.4 * mum #: flt: thickness layer design
fem.h_pmltop = 1.0 * mum #: flt: thickness pml top
fem.h_pmlbot = 1.0 * mum #: flt: thickness pml bot
fem.a_pml = 1 #: flt: PMLs parameter, real part
fem.b_pml = 1 #: flt: PMLs parameter, imaginary part
fem.eps_sup = 1 #: flt: permittivity superstrate
fem.eps_sub = 3 #: flt: permittivity substrate
fem.eps_layer1 = 1 #: flt: permittivity layer 1
fem.eps_layer2 = 1 #: flt: permittivity layer 2
fem.eps_des = 1 #: flt: permittivity layer design
fem.lambda0 = 0.6 * mum #: flt: incident wavelength
fem.theta_deg = 0.0 #: flt: incident angle
fem.pola = "TE" #: str: polarization (TE or TM)
fem.lambda_mesh = 0.6 * mum #: flt: incident wavelength
#: mesh parameters, correspond to a mesh size of lambda_mesh/(n*parmesh),
#: where n is the refractive index of the medium
fem.parmesh_des = 15
fem.parmesh = 13
fem.parmesh_pml = fem.parmesh * 2 / 3
fem.type_des = "elements"
##############################################################################
# We then initialize the model (copying files, etc...) and mesh the unit
# cell using gmsh
fem.getdp_verbose = 0
fem.gmsh_verbose = 0
fem.initialize()
mesh = fem.make_mesh()
##############################################################################
# We use the :py:mod:`genmat` module to generate a material pattern
genmat.np.random.seed(100)
mat = genmat.MaterialDensity() # instanciate
mat.n_x, mat.n_y, mat.n_z = 2 ** 7, 2 ** 7, 1 # sizes
mat.xsym = True # symmetric with respect to x?
mat.p_seed = mat.mat_rand # fix the pattern random seed
mat.nb_threshold = 3 # number of materials
mat._threshold_val = np.random.permutation(mat.threshold_val)
mat.pattern = mat.discrete_pattern
fig, ax = plt.subplots()
mat.plot_pattern(fig, ax)
##############################################################################
# We now assign the permittivity
fem.register_pattern(mat.pattern, mat._threshold_val)
fem.matprop_pattern = [1.4, 4 - 0.02 * 1j, 2] # refractive index values
##############################################################################
# Now we're ready to compute the solution:
fem.compute_solution()
##############################################################################
# Finally we compute the diffraction efficiencies, absorption and energy balance
effs_TE = fem.diffraction_efficiencies()
print("efficiencies TE", effs_TE)
##############################################################################
# It is fairly easy to switch to TM polarization:
fem.pola = "TM"
fem.compute_solution()
effs_TM = fem.diffraction_efficiencies()
print("efficiencies TM", effs_TM)
| [
"pytheas.Periodic2D",
"pytheas.genmat.MaterialDensity",
"pytheas.genmat.np.random.seed",
"matplotlib.pyplot.subplots",
"numpy.random.permutation"
] | [((645, 657), 'pytheas.Periodic2D', 'Periodic2D', ([], {}), '()\n', (655, 657), False, 'from pytheas import Periodic2D\n'), ((2884, 2910), 'pytheas.genmat.np.random.seed', 'genmat.np.random.seed', (['(100)'], {}), '(100)\n', (2905, 2910), False, 'from pytheas import genmat\n'), ((2917, 2941), 'pytheas.genmat.MaterialDensity', 'genmat.MaterialDensity', ([], {}), '()\n', (2939, 2941), False, 'from pytheas import genmat\n'), ((3182, 3222), 'numpy.random.permutation', 'np.random.permutation', (['mat.threshold_val'], {}), '(mat.threshold_val)\n', (3203, 3222), True, 'import numpy as np\n'), ((3268, 3282), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3280, 3282), True, 'import matplotlib.pyplot as plt\n')] |
# This script creates a callable class which runs a single Perceptron
# The perceptron is able to solve the logical OR, the logical AND, but not
# The logical XOR problem. The only library used is numpy.
#
# Code from <NAME>, Machine Learning An Algorithmic Perspective, 2nd edition
# https://seat.massey.ac.nz/personal/s.r.marsland/Code/Ch3/pcn.py
# Chapter 3, basic perceptron
import numpy as np
class Perceptron:
"""This class implements a basic bare-bones neural perceptron
Attributes:
inputs: input vector
target: target vector
n_in: number of input features
n_out: number of output classes
weights: input weights
Methods:
train_step: trains the perceptron
train_step: trains the perceptron
train_serial_linear: trains the perceptron 1 datapoint at a time using
a linear Delta rule activation function
fwd: run a forward learning algorithm
confusion_matrix: calculate the confusion matrix
"""
def __init__(self, inputs, targets):
# Set up the network size
if np.ndim(inputs) > 1:
self.n_in = np.shape(inputs)[1]
else:
self.n_in = 1
if np.ndim(targets) > 1:
self.n_out = np.shape(targets)[1]
else:
self.n_out = 1
self.n_data = np.shape(inputs)[0]
self.activations = []
# Now initialize the network
self.weights = np.random.rand(self.n_in + 1, self.n_out) * 0.1 - 0.5
def train_step(self, inputs, targets, learning_rate, num_iterations, verbose=False):
"""
Train with a step function in the perceptron
:param inputs: input vector
:param targets: target vector
:param learning_rate: this is "eta" given as a fraction
:param num_iterations: to convergence
:return: trained weights of the inputs based on the gradient
"""
# Add the inputs that match the bias node
# original code
# inputs = np.concatenate((inputs, -np.ones((self.n_data, 1))), axis=1)
# which I change to the below with the reversal of the bias sign
inputs = np.concatenate((inputs, np.ones((self.n_data, 1))), axis=1)
if verbose:
print("Initial inputs: ", inputs)
# Training
change = range(self.n_data)
self.errors_step = []
for n in range(num_iterations):
self.activations = self.fwd(inputs)
self.weights -= learning_rate * np.dot(np.transpose(inputs), self.activations - targets)
current_error = abs((self.activations - targets).sum())
# randomnize the order of inputs
# np.random.shuffle(change)
# inputs = inputs[change, :]
# targets = targets[change, :]
self.errors_step.append(current_error)
if verbose:
print("iteration number: ", n)
print("Weights")
print(self.weights)
print("Total error (number of incorrect predictions)")
print(self.errors_step)
return self.weights
def train_linear(self, inputs, targets, learning_rate, num_iterations, verbose=False):
"""
Train using the Delta-rule, using a linear function in the perceptron
:param inputs: input vector
:param targets: target vector
:param learning_rate: this is "eta" given as a fraction
:param num_iterations: to convergence
:return: trained weights of the inputs based on the gradient
"""
# Add the inputs that match the bias node
# original code
# inputs = np.concatenate((inputs, -np.ones((self.n_data, 1))), axis=1)
# which I change to the below with the reversal of the bias sign
inputs = np.concatenate((inputs, np.ones((self.n_data, 1))), axis=1)
if verbose:
print("Initial inputs: ", inputs)
# Training
self.cost_function = []
for n in range(num_iterations):
self.activations = self.fwd(inputs)
errors = self.activations - targets
self.weights -= (learning_rate * inputs.T.dot(errors))
cost = (errors ** 2).sum() / 2.0
self.cost_function.append(cost)
# randomnize the order of inputs
# np.random.shuffle(change)
# inputs = inputs[change, :]
# targets = targets[change, :]
if verbose:
print("iteration number: ", n)
print("Weights")
print(self.weights)
print("Cost function, appended after each iteration")
print(self.cost_function)
print()
return self.weights
def fwd_serial(self, single_input_data_row, verbose):
"""Run the network forward"""
# Compute output activations
output = np.dot(single_input_data_row.T, self.weights)
if verbose:
print("double checking shape of fwd_serial calculation: (output) ")
print(np.shape(np.dot(single_input_data_row.T, self.weights)))
print("actual output: ", output)
single_activation = 0
if output == 0:
single_activation = 0
if output > 0:
single_activation = 1
return single_activation
def train_serial_linear(self, inputs, targets, learning_rate, num_iterations, verbose=False):
"""
Train using the Delta-rule, calculating each input point one at at time
using a linear function in the perceptron
:param inputs: input vector
:param targets: target vector
:param learning_rate: this is "eta" given as a fraction
:param num_iterations: to convergence
:return: trained weights of the inputs based on the gradient
"""
# Add the inputs that match the bias node
# original code
# inputs = np.concatenate((inputs, -np.ones((self.n_data, 1))), axis=1)
# which I change to the below with the reversal of the bias sign
inputs = np.concatenate((inputs, np.ones((self.n_data, 1))), axis=1)
if verbose:
print("Initial inputs: ", inputs)
# Training
self.cost_function = []
self.activations = np.zeros((np.shape(inputs)[0], np.shape(inputs)[1]))
data_points = np.shape(inputs)[0]
num_weights = np.shape(self.weights)[0]
if verbose:
print("shape of activations matrix: ", np.shape(self.activations))
print("shape of weights matrix: ", np.shape(self.weights))
print("shape of inputs matrix: ", np.shape(inputs))
print("shape of targets matrix: ", np.shape(targets))
print("data_points ", data_points)
print("num_weights", num_weights)
print("self.n_in + 1: ", self.n_in + 1)
# Loop over the input vector here
for data in range(data_points):
single_input_data_row = np.zeros((self.n_in + 1, 1))
single_input_data_row[:, 0] = inputs[data].T
if verbose:
print("Iterating through the nth data point: ", data)
print("single input data row: ", single_input_data_row)
print("shape of input data row: ", np.shape(single_input_data_row))
print("shape of targets[data] ", np.shape(targets[data]))
self.activations[data] = self.fwd_serial(single_input_data_row, verbose)
error = np.zeros((3, 1))
error[:, 0] = self.activations[data] - targets[data]
if verbose:
print("error: ", error)
print("shape of activations error: ", np.shape(error))
self.weights -= (learning_rate * np.multiply(inputs[data].T, error.T)).T
cost = (error ** 2).sum() / 2.0
self.cost_function.append(cost)
if verbose:
print("Weights")
print(self.weights)
print("Cost function, appended after each iteration")
print(self.cost_function)
print()
return self.weights
def fwd(self, inputs):
"""Run the network forward"""
# Compute activations
self.activations = np.dot(inputs, self.weights)
# Threshold the activations
return np.where(self.activations > 0, 1, 0)
def confusion_matrix(self, con_inputs, con_targets, verbose=False):
# Add the inputs that match the bias node
con_inputs = np.concatenate((con_inputs, -np.ones((self.n_data, 1))), axis=1)
con_outputs = np.dot(con_inputs, self.weights)
num_classes = np.shape(con_targets)[1]
if num_classes == 1:
num_classes = 2
con_outputs = np.where(con_outputs > 0, 1, 0)
else:
# 1 of N encoding
con_outputs = np.argmax(con_outputs, 1)
con_targets = np.argmax(con_targets, 1)
if verbose:
print("Outputs at the end of the iterations are: ")
print(np.transpose(con_outputs[0:5, :]))
print("The targets for these were: ")
print(np.transpose(con_targets[0:5, :]))
print()
conf_mat = np.zeros((num_classes, num_classes))
for i in range(num_classes):
for j in range(num_classes):
conf_mat[i, j] = np.sum(np.where(con_outputs == i, 1, 0) * np.where(con_targets == j, 1, 0))
print(conf_mat)
print(np.trace(conf_mat) / np.sum(conf_mat))
print("----------------------------------------------")
return con_outputs
if __name__ == '__main__':
"""
Run the OR, AND and the XOR logic functions through the perceptron
and see if it is able to train the outputs to match the
3 functions given x1, x2, and y for 4 different datapoints
"""
# OR
a = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# AND
b = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])
# XOR
c = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])
# OR function
print("Running the OR function through the perceptron")
p = Perceptron(a[:, 0:2], a[:, 2:])
p.train_step(a[:, 0:2], a[:, 2:], 0.25, 20, verbose=True)
p.confusion_matrix(a[:, 0:2], a[:, 2:], verbose=True)
# AND function
print("Running the AND function through the perceptron")
q = Perceptron(b[:, 0:2], b[:, 2:])
q.train_step(b[:, 0:2], b[:, 2:], 0.25, 20, verbose=True)
q.confusion_matrix(b[:, 0:2], b[:, 2:], verbose=True)
# XOR function
print("Running the XOR function through the perceptron")
r = Perceptron(c[:, 0:2], c[:, 2:])
r.train_step(c[:, 0:2], c[:, 2:], 0.25, 20, verbose=True)
r.confusion_matrix(c[:, 0:2], c[:, 2:], verbose=True)
| [
"numpy.trace",
"numpy.multiply",
"numpy.random.rand",
"numpy.ones",
"numpy.where",
"numpy.ndim",
"numpy.argmax",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"numpy.shape",
"numpy.transpose"
] | [((10313, 10367), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]'], {}), '([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\n', (10321, 10367), True, 'import numpy as np\n'), ((10388, 10442), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]'], {}), '([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])\n', (10396, 10442), True, 'import numpy as np\n'), ((10463, 10517), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (10471, 10517), True, 'import numpy as np\n'), ((5092, 5137), 'numpy.dot', 'np.dot', (['single_input_data_row.T', 'self.weights'], {}), '(single_input_data_row.T, self.weights)\n', (5098, 5137), True, 'import numpy as np\n'), ((8628, 8656), 'numpy.dot', 'np.dot', (['inputs', 'self.weights'], {}), '(inputs, self.weights)\n', (8634, 8656), True, 'import numpy as np\n'), ((8712, 8748), 'numpy.where', 'np.where', (['(self.activations > 0)', '(1)', '(0)'], {}), '(self.activations > 0, 1, 0)\n', (8720, 8748), True, 'import numpy as np\n'), ((8989, 9021), 'numpy.dot', 'np.dot', (['con_inputs', 'self.weights'], {}), '(con_inputs, self.weights)\n', (8995, 9021), True, 'import numpy as np\n'), ((9632, 9668), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {}), '((num_classes, num_classes))\n', (9640, 9668), True, 'import numpy as np\n'), ((1145, 1160), 'numpy.ndim', 'np.ndim', (['inputs'], {}), '(inputs)\n', (1152, 1160), True, 'import numpy as np\n'), ((1267, 1283), 'numpy.ndim', 'np.ndim', (['targets'], {}), '(targets)\n', (1274, 1283), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (1412, 1420), True, 'import numpy as np\n'), ((6613, 6629), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (6621, 6629), True, 'import numpy as np\n'), ((6656, 6678), 'numpy.shape', 'np.shape', (['self.weights'], {}), '(self.weights)\n', (6664, 6678), True, 'import numpy as np\n'), ((7264, 7292), 'numpy.zeros', 'np.zeros', (['(self.n_in + 1, 1)'], {}), '((self.n_in + 1, 1))\n', (7272, 7292), True, 'import numpy as np\n'), ((7825, 7841), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (7833, 7841), True, 'import numpy as np\n'), ((9045, 9066), 'numpy.shape', 'np.shape', (['con_targets'], {}), '(con_targets)\n', (9053, 9066), True, 'import numpy as np\n'), ((9158, 9189), 'numpy.where', 'np.where', (['(con_outputs > 0)', '(1)', '(0)'], {}), '(con_outputs > 0, 1, 0)\n', (9166, 9189), True, 'import numpy as np\n'), ((9263, 9288), 'numpy.argmax', 'np.argmax', (['con_outputs', '(1)'], {}), '(con_outputs, 1)\n', (9272, 9288), True, 'import numpy as np\n'), ((9316, 9341), 'numpy.argmax', 'np.argmax', (['con_targets', '(1)'], {}), '(con_targets, 1)\n', (9325, 9341), True, 'import numpy as np\n'), ((1191, 1207), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (1199, 1207), True, 'import numpy as np\n'), ((1315, 1332), 'numpy.shape', 'np.shape', (['targets'], {}), '(targets)\n', (1323, 1332), True, 'import numpy as np\n'), ((1519, 1560), 'numpy.random.rand', 'np.random.rand', (['(self.n_in + 1)', 'self.n_out'], {}), '(self.n_in + 1, self.n_out)\n', (1533, 1560), True, 'import numpy as np\n'), ((2280, 2305), 'numpy.ones', 'np.ones', (['(self.n_data, 1)'], {}), '((self.n_data, 1))\n', (2287, 2305), True, 'import numpy as np\n'), ((3984, 4009), 'numpy.ones', 'np.ones', (['(self.n_data, 1)'], {}), '((self.n_data, 1))\n', (3991, 4009), True, 'import numpy as np\n'), ((6344, 6369), 'numpy.ones', 'np.ones', (['(self.n_data, 1)'], {}), '((self.n_data, 1))\n', (6351, 6369), True, 'import numpy as np\n'), ((6757, 6783), 'numpy.shape', 'np.shape', (['self.activations'], {}), '(self.activations)\n', (6765, 6783), True, 'import numpy as np\n'), ((6833, 6855), 'numpy.shape', 'np.shape', (['self.weights'], {}), '(self.weights)\n', (6841, 6855), True, 'import numpy as np\n'), ((6904, 6920), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (6912, 6920), True, 'import numpy as np\n'), ((6970, 6987), 'numpy.shape', 'np.shape', (['targets'], {}), '(targets)\n', (6978, 6987), True, 'import numpy as np\n'), ((9449, 9482), 'numpy.transpose', 'np.transpose', (['con_outputs[0:5, :]'], {}), '(con_outputs[0:5, :])\n', (9461, 9482), True, 'import numpy as np\n'), ((9554, 9587), 'numpy.transpose', 'np.transpose', (['con_targets[0:5, :]'], {}), '(con_targets[0:5, :])\n', (9566, 9587), True, 'import numpy as np\n'), ((9901, 9919), 'numpy.trace', 'np.trace', (['conf_mat'], {}), '(conf_mat)\n', (9909, 9919), True, 'import numpy as np\n'), ((9922, 9938), 'numpy.sum', 'np.sum', (['conf_mat'], {}), '(conf_mat)\n', (9928, 9938), True, 'import numpy as np\n'), ((2620, 2640), 'numpy.transpose', 'np.transpose', (['inputs'], {}), '(inputs)\n', (2632, 2640), True, 'import numpy as np\n'), ((5268, 5313), 'numpy.dot', 'np.dot', (['single_input_data_row.T', 'self.weights'], {}), '(single_input_data_row.T, self.weights)\n', (5274, 5313), True, 'import numpy as np\n'), ((6545, 6561), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (6553, 6561), True, 'import numpy as np\n'), ((6566, 6582), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (6574, 6582), True, 'import numpy as np\n'), ((7594, 7625), 'numpy.shape', 'np.shape', (['single_input_data_row'], {}), '(single_input_data_row)\n', (7602, 7625), True, 'import numpy as np\n'), ((7681, 7704), 'numpy.shape', 'np.shape', (['targets[data]'], {}), '(targets[data])\n', (7689, 7704), True, 'import numpy as np\n'), ((8047, 8062), 'numpy.shape', 'np.shape', (['error'], {}), '(error)\n', (8055, 8062), True, 'import numpy as np\n'), ((8116, 8152), 'numpy.multiply', 'np.multiply', (['inputs[data].T', 'error.T'], {}), '(inputs[data].T, error.T)\n', (8127, 8152), True, 'import numpy as np\n'), ((8928, 8953), 'numpy.ones', 'np.ones', (['(self.n_data, 1)'], {}), '((self.n_data, 1))\n', (8935, 8953), True, 'import numpy as np\n'), ((9790, 9822), 'numpy.where', 'np.where', (['(con_outputs == i)', '(1)', '(0)'], {}), '(con_outputs == i, 1, 0)\n', (9798, 9822), True, 'import numpy as np\n'), ((9825, 9857), 'numpy.where', 'np.where', (['(con_targets == j)', '(1)', '(0)'], {}), '(con_targets == j, 1, 0)\n', (9833, 9857), True, 'import numpy as np\n')] |
"""
A helper class for solving the non-linear time dependent equations
of biofilm growth which includes models of the cell concentration
and also nutrient concentrations in both the substrate and biofilm.
All of these are asumed to be radially symmetric and depend on
r and t, and the article concentration additionally depends on z.
The specific equations solved by this class are described in the
publication:
A Thin-Film Lubrication Model for Biofilm Expansion Under Strong Adhesion,
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
To be submitted soon, 2020.
This works builds upon the model developed by <NAME> in his PhD thesis:
Mathematical Modelling of Pattern Formation in Yeast Biofilms,
<NAME>,
The University of Adelaide, 2019.
Two solvers are currently implemented within the class.
The first, a "decoupled" Crank-Nicolson implementation, denoted DCN,
solves the non-linear system of equations in a weakly coupled manner.
Each equation is solved one at a time (via Newton iterations where
applicable) using the last known/computed solution of any other variables.
The second, a fully coupled Crank-Nicolson implementation, denoted FCN,
solves the complete non-linear system of equations using Newton iterations.
Both use the scipy sparse LU solver to solve the discretised systems
of equations that result from a compact finite difference discretisation
(although iterative solvers for some variables can be toggled through
private class switches). Both methods can be expected to achieve 2nd order
convergence in both space and time.
Compatibility notes:
The code was written in Python3 (3.7.3 specifically) although it
should also work in 2.7.x releases that are not to old as well.
The scientific computing packages numpy and scipy are required.
Again, any version that is not unreasonably old should be fine.
You will probably also want matplotlib for plotting.
Maintainer: <NAME>
Initial development: June-July 2020
Last updated: August 2020
"""
import numpy as np
from scipy.sparse.linalg import spsolve,spilu,LinearOperator,gmres,bicgstab
from scipy.sparse import diags,bmat,coo_matrix
class gmres_counter(object):
"""
Convenience class for monitoring gmres iterations (from scipy.sparse.linalg)
(Useful for debugging purposes)
"""
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
if self._disp:
print('gmres: iteration {:03d} residual = {:s}'.format(self.niter,str(rk)))
class BiofilmTwoDLubricationModel(object):
"""
Helper class for solving the PDEs describing the development of
a radially symmetric and thin yeast biofilm over time.
The model/system that is solved includes the biofilm height,
the cell concentration, and the nutrient concentrations in both
the biofilm and the substrate.
"""
def __init__(self,R=2.0,dr=0.5**7,nxi=33,dt=None,params=None,solver='DCN',verbose=False):
"""
Initialise the class
With no arguments a default problem set up is initialised.
Optionally you may pass the following:
R: The radius of the domain (or petri dish). If not specified
a default value of 2 is used.
dr: The grid spacing used for the discretisation of the domain.
If not specified a default value of 0.5**7 is used.
dt: The time step size, if not specified 0.25*dr is used.
params: Parameters for the system of equations. These should
be passed as a dictionary. Any which are not specified will
be set to a default value (specifically corresponding to
Table 6.1 in Alex's thesis).
solver: specify which solver to use.
verbose: Set to True to output convergence information when solving
"""
# Set up the radial coordinate array
self._r = np.arange(0.0,R+0.5*dr,dr)
self._r_half = self._r[:-1]+0.5*dr
self._nxi = nxi
self._xi = np.linspace(0,1,nxi)
self._R,self._XI = np.meshgrid(self._r,self._xi)
# Set up the parameters
if dt is None:
self._dt = 0.25*dr # this is quite conservative... (and assumes dr<h*dxi)
else:
self._dt = dt
if type(params)==dict:
# Set various parameters depending what is passed,
# those not specified will be set to those Alex used
# in his thesis (Table 6.1 specifically)
self._b = params.get("b",0.0001)
self._H0 = params.get("H0",0.1)
self._Psi_m = params.get("Psi_m",0.111)
self._Psi_d = params.get("Psi_d",0.0)
#self._R = params.get("R",10.0)
#self._T = params.get("T",50.0)
self._gamma_ast = params.get("gamma_ast",1.0)
self._D = params.get("D",1.05)
self._Pe = params.get("Pe",3.94)
self._Upsilon = params.get("Upsilon",3.15)
self._Q_b = params.get("Q_b",8.65)
self._Q_s = params.get("Q_s",2.09)
self._h_ast = params.get("h_ast",0.002)
self._lambda_ast = params.get("lambda_ast",np.inf)
else:
if params is not None:
print("Setting parameters is currently only supported through a dictionary, default values will be used")
# Set various parameters to those Alex used in
# his thesis (Table 6.1 specifically)
self._b = 0.0001
self._H0 = 0.1
self._Psi_m = 0.111
self._Psi_d = 0.0
#self._R = 10.0
#self._T = 50.0
self._gamma_ast = 1.0
self._D = 1.05
self._Pe = 3.94
self._Upsilon = 3.15
self._Q_b = 8.65
self._Q_s = 2.09
self._h_ast = 0.002
self._lambda_ast = np.inf
self.set_solver(solver)
self._verbose = verbose
# Set up the solution arrays with default initial conditions
self._set_default_initial_conditions()
# The following were used in initial debugging and testing and generally need not be changed
self._Phi_n_DCN_solver = 3 # Changes the numerical method used for solving Phi_n
self._FCN_solver_mode = -1 # Change the FCN solver
self._add_top_Phi_bc = False
self._use_artificial_dr_bc = True # untested with False...
# done
def _set_default_initial_conditions(self):
"""
Sets the initial conditions to be those described by
equation 6.22 of <NAME>'s thesis.
"""
self._t = 0
r = self._r
R = self._R
XI = self._XI
self._h = self._b + (self._H0-self._b)*(r<1)*(1-r**2)**4
self._Phi_n = (XI**3-0.5*XI**4)*self._h[np.newaxis,:]*(R<1)*(1-3*R**2+2*R**3)
self._g_s = np.ones(len(self._r))
self._g_b = np.zeros(len(self._r))
# done
# add getters and setters
def set_parameters(self,params):
"""
Set the current problem parameters.
Parameters should be passed using a dictionary.
"""
if type(params)==dict:
# Set various parameters depending what is passed,
# those not specified will be set to those Alex used
# in his thesis (Table 6.1 specifically)
self._b = params.get("b",self._b)
self._H0 = params.get("H0",self._H0)
self._Psi_m = params.get("Psi_m",self._Psi_m)
self._Psi_d = params.get("Psi_d",self._Psi_d)
#self._R = params.get("R",self._R)
#self._T = params.get("T",self._T)
self._gamma_ast = params.get("gamma_ast",self._gamma_ast)
self._D = params.get("D",self._D )
self._Pe = params.get("Pe",self._Pe)
self._Upsilon = params.get("Upsilon",self._Upsilon)
self._Q_b = params.get("Q_b",self._Q_b)
self._Q_s = params.get("Q_s",self._Q_s)
self._h_ast = params.get("h_ast",self._h_ast)
self._lambda_ast = params.get("lambda_ast",self._lambda_ast)
else:
print("Setting parameters is currently only supported through a dictionary, existing values will be used")
# done
def get_parameters(self,param=None):
"""
Get the current problem parameters.
If a specific parameter is not requested
then all are returned in a dictionary.
"""
params_dict = {"b":self._b,"H0":self._H0,"Psi_m":self._Psi_m,"Psi_d":self._Psi_d,\
"gamma_ast":self._gamma_ast,"D":self._D,"Pe":self._Pe,\
"Upsilon":self._Upsilon,"Q_b":self._Q_b,"Q_s":self._Q_s,\
"h_ast":self._h_ast,"lambda_ast":self._lambda_ast}
#params_dict["R"] = self._R
#params_dict["T"] = self._T
if param is None:
# return dictionary with all parameters
return params_dict
elif param in params_dict.keys():
return params_dict[param]
else:
print("Requested parameter does not exist")
# done
def get_r(self):
"""
Returns the array for the radial coordinates.
"""
return self._r
def get_xi(self):
"""
Returns the array for the radial coordinates.
"""
return self._xi
def set_verbosity(self,verbose):
"""
Set the verbosity for the solvers (True or False).
"""
self._verbose = verbose
# done
def set_h(self,h):
"""
Update the biofilm height h.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
Accepts a callable function h(r), or an array (with correct length).
Note: This will not alter Phi_n=int_0^{h xi} phi_n dz. If it is desired that this
too be changed it should be done separately via set_Phi_n or set_phi_n.
"""
if callable(h):
self._h[:] = h(self._r)
else:
assert len(h)==len(self._r)
self._h[:] = h
# done
def get_h(self):
"""
Returns the current biofilm height h.
"""
return self._h
def set_Phi_n(self,Phi_n):
"""
Update the cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
It is expected that Phi_n be provided in the re-scaled coordinates r,xi.
Accepts a callable function Phi_n(r,xi), or an array (with correct shape).
"""
if callable(Phi_n):
self._Phi_n[:,:] = Phi_n(self._XI,self._R)
else:
assert Phi_n.shape==self._R.shape
self._Phi_n[:,:] = Phi_n
# done
def get_Phi_n(self):
"""
Returns the current cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
return self._Phi_n
def get_phi_n_bar(self):
"""
Returns the vertically averaged cell volume fraction bar{phi_n} =(1/h) int_0^{h} phi_n dz.
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
return self._Phi_n[-1,:]/self._h
def set_phi_n(self,phi_n):
"""
Update the cell volume fraction phi_n.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
It is expected that phi_n be provided in re-scaled coordinates r,xi.
Accepts a callable function phi_n(r,xi), or an array (with correct length).
Note: This internally updates Phi_n=\int_0^{h xi} phi_n dz using the existing h.
If h is also to be updated, it should be done first!
"""
XI,R = self._XI,self._R
if callable(phi_n):
phi_n_int_dxi = XI[1,0]*np.cumsum(0.5*(phi_n(XI,R)[1:,:]+phi_n(XI,R)[:-1,:]),axis=0)
else:
assert phi_n.shape==self._R.shape
phi_n_int_dxi = XI[1,0]*np.cumsum(0.5*(phi_n[1:,:]+phi_n[:-1,:]),axis=0)
self._Phi_n[0,:] = 0
self._Phi_n[1:,:] = phi_n_int_dxi*self._h[np.newaxis,:]
self._Phi_n[(self._h<self._h_ast)[np.newaxis,:]] = 0 # zero areas where h is small
# done
def get_phi_n(self):
"""
Returns the current cell volume fraction phi_n.
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
phi_n = np.empty_like(self._Phi_n)
phi_n[1:-1,:] = 0.5*(self._Phi_n[2:,:]-self._Phi_n[:-2,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[ 0,:] = 0.5*(-3*self._Phi_n[ 0,:]+4*self._Phi_n[ 1,:]-self._Phi_n[ 2,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[-1,:] = 0.5*( 3*self._Phi_n[-1,:]-4*self._Phi_n[-2,:]+self._Phi_n[-3,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[:,self._h<self._h_ast] = 0
return phi_n
def set_g_s(self,g_s):
"""
Update the substrate nutrient concentration g_s.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class)
Accepts a callable function g_s(r), or an array (with correct length).
"""
if callable(g_s):
self._g_s[:] = g_s(self._r)
else:
assert len(g_s)==len(self._r)
self._g_s[:] = g_s
# done
def get_g_s(self):
"""
Returns the substrate nutrient concentration g_s.
"""
return self._g_s
def set_g_b(self,g_b):
"""
Update the biofilm nutrient concentration g_b.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class)
Accepts a callable function g_b(r), or an array (with correct length).
"""
if callable(g_b):
self._g_b[:] = g_b(self._r)
else:
assert len(g_b)==len(self._r)
self._g_b[:] = g_b
# done
def get_g_b(self):
"""
Returns the biofilm nutrient concentration g_b.
"""
return self._g_b
def set_dt(self,dt):
"""
Set/change the time step size (dt) which is used by default
(i.e. if dt is not specified when solve is called then this value is used)
"""
self._dt = dt
# done
def get_dt(self):
"""
Get the current time step size (dt) which is used by default
(i.e. if dt is not specified when solve is called then this value is used)
"""
return self._dt
def set_t(self,t):
"""
Set/change the current solution time t.
"""
self._t = t
# done
def get_t(self):
"""
Get the current solution time T.
"""
return self._t
# done
# Add private methods relating to the discretisation of the fourth order 'advective' term
def _advective_term(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of:
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones(len(r))
f_half = f[:-1]
else:
f_half = 0.5*(f[1:]+f[:-1])
res = np.empty(len(r))
res[[0,1,-2,-1]] = 0
res[2:-2] = r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*D_half[1:] \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*D_half[:-1]
if near_boundary:
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] for r=0
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
res[1] = 0.5*r[2]*h[2]**p*f[2]*D_p2
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
res[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*D_m3o2 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*D_m5o2
if prefix is not None:
res[1:-1] *= prefix[1:-1]
return res/dr**4
def _advective_term_h_gradient(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to h.
Note: the caller is responsible for enforcing boundary conditions
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones(len(r))
f_half = f[:-1]
else:
f_half = 0.5*(f[1:]+f[:-1])
Dh_diag_p2 = np.empty((len(r)))
Dh_diag_p1 = np.empty((len(r)))
Dh_diag_p0 = np.empty((len(r)))
Dh_diag_m1 = np.empty((len(r)))
Dh_diag_m2 = np.empty((len(r)))
Dh_diag_p2[[0,1,-2,-1]] = 0
Dh_diag_p1[[0,1,-2,-1]] = 0
Dh_diag_p0[[0,1,-2,-1]] = 0
Dh_diag_m1[[0,1,-2,-1]] = 0
Dh_diag_m2[[0,1,-2,-1]] = 0
Dh_diag_p1[2:-2] = r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*f_half[2:-1]*D_half[1:]/dr**4
Dh_diag_p0[2:-2] = r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*f_half[2:-1]*D_half[1:]/dr**4 \
-r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*f_half[1:-2]*D_half[:-1]/dr**4
Dh_diag_m1[2:-2] = -r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*f_half[1:-2]*D_half[:-1]/dr**4
# I think the following 5 are okay...
Dh_diag_p2[2:-2] = r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[3: ]/r[3:-1])/dr**4
Dh_diag_p1[2:-2] += -r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[2:-1]/r[2:-2]+2)/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[2:-1]/r[2:-2])/dr**4
Dh_diag_p0[2:-2] += r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[2:-1]/r[3:-1]+2)/dr**4 \
+r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[1:-2]/r[1:-3]+2)/dr**4
Dh_diag_m1[2:-2] += -r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[1:-2]/r[2:-2])/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[1:-2]/r[2:-2]+2)/dr**4
Dh_diag_m2[2:-2] = r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[ :-3]/r[1:-3])/dr**4
if near_boundary:
# Pre-allocate additional diagonals for the boundary terms
Dh_diag_p3 = np.zeros((len(r)))
Dh_diag_m3 = np.zeros((len(r)))
Dh_diag_m4 = np.zeros((len(r)))
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ]
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Dh_diag_p1[1] = 0.5*r[2]*p*h[2]**(p-1)*f[2]*D_p2/dr**4
Dh_diag_p3[1] = 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[3]/r[3])/dr**4
Dh_diag_p2[1] = -0.5*r[2]*h[2]**p*f[2]/dr**4
Dh_diag_p1[1] += 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[2]/r[3]-r_half[1]/r[1])/dr**4
Dh_diag_p0[1] = -0.5*r[2]*h[2]**p*f[2]/dr**4
Dh_diag_m1[1] = 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[0]/r[1])/dr**4
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Dh_diag_p1[-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[-1]*D_m3o2
Dh_diag_p0[-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[-1]*D_m3o2 \
-r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[-2]*D_m5o2
Dh_diag_m1[-2] = -r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[-2]*D_m5o2
# I think the following are okay...
Dh_diag_p1[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*( r[-2]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*(-r[-2]/r[-3])*0.25/dr**4
Dh_diag_p0[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*(-r[-3]/r[-4])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*(-r[-3]/r[-2])*0.25/dr**4
Dh_diag_m1[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*(-2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( 2)*0.25/dr**4
Dh_diag_m2[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*( 2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( r[-3]/r[-2])*0.25/dr**4
Dh_diag_m3[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*( r[-4]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( r[-4]/r[-3])*0.25/dr**4
Dh_diag_m4[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*(-r[-5]/r[-4])*0.25/dr**4
#Dh = diags([Dh_diag_m4[4:],Dh_diag_m3[3:],Dh_diag_m2[2:],Dh_diag_m1[1:],\
# Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2],Dh_diag_p3[:-3]],\
# [-4,-3,-2,-1,0,1,2,3])
diagonals = [Dh_diag_m4,Dh_diag_m3,Dh_diag_m2,Dh_diag_m1,\
Dh_diag_p0,Dh_diag_p1,Dh_diag_p2,Dh_diag_p3]
offsets = [-4,-3,-2,-1,0,1,2,3]
else:
#Dh = diags([Dh_diag_m2[2:],Dh_diag_m1[1:],Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2]],\
# [-2,-1,0,1,2])
diagonals = [Dh_diag_m2,Dh_diag_m1,Dh_diag_p0,Dh_diag_p1,Dh_diag_p2]
offsets = [-2,-1,0,1,2]
if prefix is not None:
for diagonal in diagonals:
diagonal[1:-1] *= prefix[1:-1]
return diagonals,offsets
def _advective_term_f_gradient(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to f.
"""
if f is None: # This is the only place f is actually used...
return None
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
#f_half = 0.5*(f[1:]+f[:-1])
Df_diag_p1 = np.empty((len(r)))
Df_diag_p0 = np.empty((len(r)))
Df_diag_m1 = np.empty((len(r)))
Df_diag_p1[[0,1,-2,-1]] = 0
Df_diag_p0[[0,1,-2,-1]] = 0
Df_diag_m1[[0,1,-2,-1]] = 0
Df_diag_p1[2:-2] = r_half[2:-1]*h_half[2:-1]**p*0.5*D_half[1:]/dr**4
Df_diag_p0[2:-2] = r_half[2:-1]*h_half[2:-1]**p*0.5*D_half[1:]/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*0.5*D_half[:-1]/dr**4
Df_diag_m1[2:-2] = -r_half[1:-2]*h_half[1:-2]**p*0.5*D_half[:-1]/dr**4
if near_boundary:
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Df_diag_p1[1] = 0.5*r[2]*h[2]**p*D_p2/dr**4
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Df_diag_p1[-2] = r_half[-1]*h_half[-1]**p*D_m3o2/dr**4
Df_diag_p0[-2] = -r_half[-2]*h_half[-2]**p*D_m5o2/dr**4
#Df = diags([Df_diag_m1[1:],Df_diag_p0,Df_diag_p1[:-1]],[-1,0,1])#,format="csr")
diagonals = [Df_diag_m1,Df_diag_p0,Df_diag_p1]
offsets = [-1,0,1]
if prefix is not None:
for diagonal in diagonals:
diagonal[1:-1] *= prefix[1:-1]
return diagonals,offsets
# Add 'private' methods related to the solvers
def _h_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the height equation.
The internal time step dt is used if one is not provided.
"""
r = self._r
nr = len(r)
dr = r[1]
b = self._b
h_ast = self._h_ast
g_ast = self._gamma_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Initialise rhs vector
rhs = np.empty(nr)
rhs[2:-2] = -(h_new[2:-2]-h_old[2:-2])
# Calculate spatial stencil and add to the rhs
adv_old = self._advective_term(r,h_old,near_boundary=False)
adv_new = self._advective_term(r,h_new,near_boundary=False)
rhs[2:-2] -= 0.5*dt*g_ast/3.0*(adv_old[2:-2]+adv_new[2:-2])/r[2:-2]
if np.isfinite(lambda_ast): # add slip term if lambda_ast is finite
adv_old = self._advective_term(r,h_old,p=2,near_boundary=False)
adv_new = self._advective_term(r,h_new,p=2,near_boundary=False)
rhs[2:-2] -= 0.5*dt*g_ast/lambda_ast*(adv_old[2:-2]+adv_new[2:-2])/r[2:-2]
# Add the forcing term
forcing_old = (h_old>h_ast)*(1.0+Psi_m)*Phi_n_old[-1,:]*g_b_old
forcing_new = (h_new>h_ast)*(1.0+Psi_m)*Phi_n_new[-1,:]*g_b_new
rhs[2:-2] += 0.5*dt*(forcing_old[2:-2]+forcing_new[2:-2])
# Set RHS entries relating to boundary conditions
rhs[ 0] = 3.0*h_new[ 0]- 4.0*h_new[ 1]+ h_new[ 2]
rhs[ 1] = 5.0*h_new[ 0]-18.0*h_new[ 1]+24.0*h_new[ 2]-14.0*h_new[ 3]+3.0*h_new[ 4]
rhs[-2] = -3.0*h_new[-1]+ 4.0*h_new[-2]- h_new[-3]
rhs[-1] = b-h_new[-1]
# done
return rhs
def _h_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
g_ast = self._gamma_ast
h_ast = self._h_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Construct/fetch the diagonal components from the gradient of the fourth order "advective" term
diagonals,offsets = self._advective_term_h_gradient(r,h_new,near_boundary=False)
for i in range(len(diagonals)):
assert offsets[i]==i-2 # sanity check
diagonals[i][2:-2] *= (0.5*dt*g_ast/3.0)*r[2:-2]**(-1)
if np.isfinite(lambda_ast): # add slip term if lambda_ast is finite
diagonals2,offsets2 = self._advective_term_h_gradient(r,h_new,p=2,near_boundary=False)
for i in range(len(diagonals2)):
assert offsets2[i]==offsets[i]
diagonals[i][2:-2] += (0.5*dt*g_ast/lambda_ast)*r[2:-2]**(-1)*diagonals2[i][2:-2]
# Add to the main diagonal
diagonals[2][2:-2] += 1.0
# Note: there is no longer a 'forcing term' since h is absorbed into Phi_n
# Enforce the boundary conditions
diagonals.append(np.zeros(nr))
offsets.append(3)
diagonals[2][ 0] = -3 # first order BC at r=0
diagonals[3][ 0] = 4
diagonals[4][ 0] = -1
diagonals[1][ 1] = -5 # third order BC at r=0
diagonals[2][ 1] = 18
diagonals[3][ 1] = -24
diagonals[4][ 1] = 14
diagonals[5][ 1] = -3
diagonals[1][-2] = 1 # first order BC at r=R
diagonals[2][-2] = -4
diagonals[3][-2] = 3
diagonals[2][-1] = 1 # Dirichlet BC at r=R
# Final construction
A_00 = diags([diagonals[0][2:],diagonals[1][1:],diagonals[2],diagonals[3][:-1],\
diagonals[4][:-2],diagonals[5][:-3]],\
offsets)#,format="csr")
return A_00
def _h_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
Phi_n dependence in the height equation (Phi_n = int_0^{h xi} phi_n dz).
The internal time step dt is used if one is not provided.
"""
h_ast = self._h_ast
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note: this block has a rectangular shape
nr,nxi = len(self._r),len(self._xi)
row = np.arange(2,nr-2)
col = nxi-1+nxi*row
dat = -0.5*dt*(1.0+Psi_m)*((h_new>h_ast)*g_b_new)[2:-2]
return coo_matrix((dat,(row,col)),shape=(nr,nr*nxi))#.tocsr()
def _h_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
# Note: there is no g_s dependence
return None
def _h_equation_LHS3(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
h_ast = self._h_ast
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
A_diag = -0.5*dt*(1.0+Psi_m)*(h_new>h_ast)*Phi_n_new[-1,:]
A_diag[[0,1,-2,-1]] = 0
return diags(A_diag)#,format="csr")
# Add private methods relating to the discretisation of the fourth order 'advective' term
def _advective_term_alt(self,r,h,f,near_boundary=True):
"""
Finite difference discretisation of:
(d/dr)[ f r (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
This version handles f which is two dimensional.
Note the h**p factor and the prefix have been dropped in this alt version.
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
f_half = 0.5*(f[:,1:]+f[:,:-1])
res = np.empty(f.shape)
res[:,[0,1,-2,-1]] = 0
res[:,2:-2] = r_half[np.newaxis,2:-1]*D_half[np.newaxis,1: ]*f_half[:,2:-1] \
-r_half[np.newaxis,1:-2]*D_half[np.newaxis, :-1]*f_half[:,1:-2]
if near_boundary:
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] for r=0
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
res[:,1] = 0.5*r[2]*D_p2*f[:,2]
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
res[:,-2] = r_half[-1]*D_m3o2*f_half[:,-1] \
-r_half[-2]*D_m5o2*f_half[:,-2]
return res/dr**4
def _advective_term_h_gradient_alt(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to h.
This version handles f which is two dimensional.
Note: the caller is responsible for enforcing boundary conditions
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones((1,len(r)))
f_half = f[:,:-1]
else:
f_half = 0.5*(f[:,1:]+f[:,:-1])
Dh_diag_p2 = np.empty(f.shape)
Dh_diag_p1 = np.empty(f.shape)
Dh_diag_p0 = np.empty(f.shape)
Dh_diag_m1 = np.empty(f.shape)
Dh_diag_m2 = np.empty(f.shape)
Dh_diag_p2[:,[0,1,-2,-1]] = 0
Dh_diag_p1[:,[0,1,-2,-1]] = 0
Dh_diag_p0[:,[0,1,-2,-1]] = 0
Dh_diag_m1[:,[0,1,-2,-1]] = 0
Dh_diag_m2[:,[0,1,-2,-1]] = 0
Dh_diag_p1[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*D_half[1:]/dr**4)[np.newaxis,:]
Dh_diag_p0[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*D_half[1:]/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*D_half[:-1]/dr**4)[np.newaxis,:]
Dh_diag_m1[:,2:-2] = -f_half[:,1:-2]*(r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*D_half[:-1]/dr**4)[np.newaxis,:]
# I think the following 5 are okay...
Dh_diag_p2[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[3: ]/r[3:-1])/dr**4)[np.newaxis,:]
Dh_diag_p1[:,2:-2] += -f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[2:-1]/r[2:-2]+2)/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[2:-1]/r[2:-2])/dr**4)[np.newaxis,:]
Dh_diag_p0[:,2:-2] += f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[2:-1]/r[3:-1]+2)/dr**4)[np.newaxis,:] \
+f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[1:-2]/r[1:-3]+2)/dr**4)[np.newaxis,:]
Dh_diag_m1[:,2:-2] += -f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[1:-2]/r[2:-2])/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[1:-2]/r[2:-2]+2)/dr**4)[np.newaxis,:]
Dh_diag_m2[:,2:-2] = f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[ :-3]/r[1:-3])/dr**4)[np.newaxis,:]
if near_boundary:
# Pre-allocate additional diagonals for the boundary terms
Dh_diag_p3 = np.zeros(f.shape)
Dh_diag_m3 = np.zeros(f.shape)
Dh_diag_m4 = np.zeros(f.shape)
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ]
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Dh_diag_p1[:,1] = 0.5*r[2]*p*h[2]**(p-1)*f[:,2]*D_p2/dr**4
Dh_diag_p3[:,1] = 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[3]/r[3])/dr**4
Dh_diag_p2[:,1] = -0.5*r[2]*h[2]**p*f[:,2]/dr**4
Dh_diag_p1[:,1] += 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[2]/r[3]-r_half[1]/r[1])/dr**4
Dh_diag_p0[:,1] = -0.5*r[2]*h[2]**p*f[:,2]/dr**4
Dh_diag_m1[:,1] = 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[0]/r[1])/dr**4
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Dh_diag_p1[:,-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[:,-1]*D_m3o2
Dh_diag_p0[:,-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[:,-1]*D_m3o2 \
-r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[:,-2]*D_m5o2
Dh_diag_m1[:,-2] = -r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[:,-2]*D_m5o2
# I think the following are okay...
Dh_diag_p1[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*( r[-2]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*(-r[-2]/r[-3])*0.25/dr**4
Dh_diag_p0[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-r[-3]/r[-4])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*(-r[-3]/r[-2])*0.25/dr**4
Dh_diag_m1[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( 2)*0.25/dr**4
Dh_diag_m2[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*( 2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( r[-3]/r[-2])*0.25/dr**4
Dh_diag_m3[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*( r[-4]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( r[-4]/r[-3])*0.25/dr**4
Dh_diag_m4[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-r[-5]/r[-4])*0.25/dr**4
#Dh = diags([Dh_diag_m4[4:],Dh_diag_m3[3:],Dh_diag_m2[2:],Dh_diag_m1[1:],\
# Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2],Dh_diag_p3[:-3]],\
# [-4,-3,-2,-1,0,1,2,3])
diagonals = [Dh_diag_m4,Dh_diag_m3,Dh_diag_m2,Dh_diag_m1,\
Dh_diag_p0,Dh_diag_p1,Dh_diag_p2,Dh_diag_p3]
offsets = [-4,-3,-2,-1,0,1,2,3]
else:
#Dh = diags([Dh_diag_m2[2:],Dh_diag_m1[1:],Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2]],\
# [-2,-1,0,1,2])
diagonals = [Dh_diag_m2,Dh_diag_m1,Dh_diag_p0,Dh_diag_p1,Dh_diag_p2]
offsets = [-2,-1,0,1,2]
if prefix is not None:
if len(prefix.shape)==1:
for diagonal in diagonals:
diagonal[:,1:-1] *= prefix[np.newaxis,1:-1]
elif len(prefix.shape)==2:
for diagonal in diagonals:
diagonal[:,1:-1] *= prefix[:,1:-1]
# else do nothing...
return diagonals,offsets
def _Phi_n_equation_explicit(self,v_old,dt=None):
"""
Calculate a simple forward Euler step of the Phi_n equations.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
Note: This is generally going to be unstable, however I have been able to 'get lucky' with some grid choices.
"""
r = self._r
nr = len(r)
dr = r[1]
R,XI = self._R,self._XI
dxi = XI[1,1]
gamma_ast = self._gamma_ast
h_ast = self._h_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
if dt is None:
dt = self._dt
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_old = self._advective_term(r,h_old)
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*(fot_old)[np.newaxis,1:-1]
# Setup the horizontal 'advection' stencil
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
adv_old = self._advective_term_alt(r,h_old,f_old)
# Initialise the update with the forcing term
delta_Phi_n = Phi_n_old*(g_b_old-Psi_d)[np.newaxis,:]
# Add the vertical advection part (note no flux through the top or bottom)
delta_Phi_n[1:-1,1:-1] -= v_z_old[1:-1,1:-1]/h_old[np.newaxis,1:-1]*(Phi_n_old[2:,1:-1]-Phi_n_old[:-2,1:-1])/(2.0*dxi)
# Add the horizontal 'advection' part
delta_Phi_n[:,1:-1] += gamma_ast/r[np.newaxis,1:-1]*adv_old[:,1:-1]
# Perform the update
Phi_n_new = Phi_n_old+dt*delta_Phi_n
# Enforce the boundary conditions post update
Phi_n_new[:,-1] = 0
Phi_n_new[:, 0] = (4*Phi_n_new[:,1]-Phi_n_new[:,0])/3.0
if self._use_artificial_dr_bc: # if artificial 'BC' is also enforced near r=0
Phi_n_new[:,0] = 0.2*(9*Phi_n_new[:,2]-4*Phi_n_new[:,3])
Phi_n_new[:,1] = 0.2*(8*Phi_n_new[:,2]-3*Phi_n_new[:,3])
if False: # if high order BC enforcement at r=0
Phi_n_new[:,0] = (18*Phi_n_new[:,1]-9*Phi_n_new[:,2]+2*Phi_n_new[:,3])/11.0
if False: # if both high order BC enforcement at r=0 and additional artificial BC near r=0
Phi_n_new[:,0] = 0.2*(9*Phi_n_new[:,2]-4*Phi_n_new[:,3]) # (note: it works out same as above...)
Phi_n_new[:,1] = 0.2*(8*Phi_n_new[:,2]-3*Phi_n_new[:,3])
Phi_n_new[ 0,:] = 0 # by definition
#Phi_n_new[-1,:] = 2*Phi_n_new[-2,:]-Phi_n_new[-3,:] # need to do something here? maybe enforce d^2\Phi_n/d\xi^2=0
Phi_n_new[-1,:] = 2.5*Phi_n_new[-2,:]-2*Phi_n_new[-3,:]+0.5*Phi_n_new[-4,:] # higher order...
# Zero parts where h is still too small
Phi_n_new[:,h_old<=h_ast] = 0
# done
return Phi_n_new
def _Phi_n_equation_semi_implicit(self,v_old,dt=None,explicit_r_advection=False):
"""
Calculate a simple backward Euler step of the Phi_n equations.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
Note: This is semi-implicit in the sense we linearise the equations to make it somewhat easier to implement.
This currently works reasonably well in the current form...
"""
r = self._r
nr = len(r)
dr = r[1]
R,XI = self._R,self._XI
nxi = len(self._xi)
dxi = XI[1,1]
gamma_ast = self._gamma_ast
h_ast = self._h_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
if dt is None:
dt = self._dt
# Initialise the lhs matrix with ones on the main diagonal
A_p0_p0 = np.ones(Phi_n_old.shape)
# Initialise the rhs vector with the 'old' Phi_n
rhs = Phi_n_old.copy()
# Note: the xi=0 boundary condition should require no changes to the above (since Phi_n_old should be 0 on the bottom)
rhs[0,:] = 0 # but we set it explicitly to be absolutely clear
# Note: the same applies to the r=R boundary condition (where we enforce Phi_n=0 since h=b here)
rhs[:,-1] = 0 # but we again set it explicitly to be absolutely clear
# For the xi=1 boundary condition we implicitly make the 2nd derivative zero
A_p0_m1 = np.zeros(Phi_n_old.shape)
A_p0_m2 = np.zeros(Phi_n_old.shape)
A_p0_m3 = np.zeros(Phi_n_old.shape) # required for higher order stencil
A_p0_p0[-1,2:-1] = 2.0 # 1 low order, 2 higher order
A_p0_m1[-1,2:-1] = -5.0 # -2 low order, -5 higher order
A_p0_m2[-1,2:-1] = 4.0 # 1 low order, 4 higher order
A_p0_m3[-1,2:-1] = -1.0 # -1 higher order
rhs[-1,2:-1] = 0
# Now the BC at r=0 (and the artificial one I enforce next to it)
A_p1_p0 = np.zeros(Phi_n_old.shape)
A_p2_p0 = np.zeros(Phi_n_old.shape)
A_m1_p0 = np.zeros(Phi_n_old.shape) # required for the artificial BC
A_p3_p0 = np.zeros(Phi_n_old.shape) # required for higher order stencil at r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
rhs[:,0] = 0
if self._use_artificial_dr_bc: # if artificial 'BC' is also enforced near r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = -7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
if False: # if high order BC enforcement at r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = - 2.0
if False: # if both high order BC enforcement at r=0 and additional artificial BC near r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = -2.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = - 7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
# Add the forcing terms on the 'interior' (this need not be implicit really)
A_p0_p0[1:-1,2:-1] += -dt*(g_b_old-Psi_d)[np.newaxis,2:-1] # implicit forcing...
#rhs[1:-1,2:-1] += dt*Phi_n_old[1:-1,2:-1]*(g_b_old-Psi_d)[np.newaxis,2:-1] # explicit forcing...
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_old = self._advective_term(r,h_old)
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*(fot_old)[np.newaxis,1:-1]
# Now add this to the appropriate diagonals...
A_p0_p1 = np.zeros(Phi_n_old.shape)
A_p0_m1[1:-1,2:-1] = -dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # central...
A_p0_p1[1:-1,2:-1] = +dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_m1[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # upwinded...
#A_p0_p0[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p0[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p1[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
# Setup the horizontal 'advection' stencil
if explicit_r_advection: # true - explicit, false - implicit
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part to the system
# Note: currently this is treated explicitly, which seems to work okay for the most part...
rhs[1:-1,2:-1] += dt*gamma_ast*adv_old[1:-1,2:-1]/r[np.newaxis,2:-1]
else:
# Note: we can re-use the _advective_term_f_gradient function here
diagonals_h2,offsets_h2 = self._advective_term_f_gradient(r,h_old,2,Phi_n_old)
assert offsets_h2[0]==-1
A_m1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
if np.isfinite(lambda_ast):
diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_old,1,Phi_n_old)
assert offsets_h1[0]==-1
A_m1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
# Now add the integral component (note this is somewhat denser than usual)
# (Note: it might be easier to build the entire matrix directly in coo format?)
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
H = (h_old>h_ast) # Use this to zero out bits where h is too small...
row,col,dat = [],[],[]
for j in range(1,nxi-1): # exclude the first and last index... (the first is 0 regardless)
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[0][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[1][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[2][2:-1]/r[2:-1])
M_trap = coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr*nxi))
# Zero parts where h is still too small
h_small = (h_old<=h_ast)
A_p0_p0[:,h_small] = 1
rhs[:,h_small] = 0
A_m1_p0[:,h_small] = 0;A_p1_p0[:,h_small] = 0;A_p2_p0[:,h_small] = 0;#A_p3_p0[:,h_small] = 0;
A_p0_m3[:,h_small] = 0;A_p0_m2[:,h_small] = 0;A_p0_m1[:,h_small] = 0;A_p0_p1[:,h_small] = 0;
# Now setup the sparse linear system...
if explicit_r_advection:
A_11 = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="csr")
else:
A_11_partial = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="coo")
A_11 = (A_11_partial+M_trap).tocsr()
# Now solve the sparse linear system...
Phi_n_new = spsolve(A_11,rhs.ravel()).reshape(Phi_n_old.shape)
# done
return Phi_n_new
def _Phi_n_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Some extra fields for convenience
H_old,H_new = (h_old>h_ast),(h_new>h_ast) # Use this to zero out bits where h is too small...
Hor_old,Hor_new = H_old[2:-1]/r[2:-1],H_new[2:-1]/r[2:-1]
# Setup the rhs field and initialise on the interior with the difference in the fields
rhs = np.zeros(Phi_n_new.shape)
rhs[1:-1,2:-1] = -(Phi_n_new[1:-1,2:-1]-Phi_n_old[1:-1,2:-1]*H_old[np.newaxis,2:-1])
# Note: the H_old in the above line should ensure that delta_Phi will be 0 where-ever h remains small
# (although it should be redundant since Phi_n_old should be zero here regardless)
# Add the forcing term
rhs[1:-1,2:-1] += 0.5*dt*( Phi_n_old[1:-1,2:-1]*(H_old*(g_b_old-Psi_d))[np.newaxis,2:-1]\
+Phi_n_new[1:-1,2:-1]*(H_new*(g_b_new-Psi_d))[np.newaxis,2:-1])
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_new = self._advective_term(r,h_new)
fot_old = self._advective_term(r,h_old)
#v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*(fot_old)[np.newaxis,:]
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_old[np.newaxis,1:-1]
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
# Add the vertical advection part (note no flux through the top or bottom...
rhs[1:-1,2:-1] -= 0.25*dt/dxi*( v_z_old[1:-1,2:-1]*(Phi_n_old[2:,2:-1]-Phi_n_old[:-2,2:-1])*(H_old/h_old)[np.newaxis,2:-1]\
+v_z_new[1:-1,2:-1]*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])*(H_new/h_new)[np.newaxis,2:-1])
# Setup the horizontal 'advection' stencil
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
Phi_int_dxi_new = np.cumsum(0.5*((Phi_n_new*(1-XI))[1:,:]+(Phi_n_new*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
integral_new = np.empty(Phi_n_new.shape)
integral_new[0 ,:] = 0
integral_new[1:,:] = Phi_int_dxi_new
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
f_new = (Phi_n_new*(0.5*XI**2-XI)+integral_new)*h_new[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
f_new -= Phi_n_new*h_new[np.newaxis,:]/lambda_ast
adv_new = self._advective_term_alt(r,h_new,f_new)
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part
rhs[1:-1,2:-1] += 0.5*dt*gamma_ast*( adv_new[1:-1,2:-1]*Hor_new[np.newaxis,:]
+adv_old[1:-1,2:-1]*Hor_old[np.newaxis,:])
# Set all of the entries relating to boundary conditions
# Set the RHS corresponding to the \xi=0 boundary condition (delta_Phi+Phi_n_new)=0
rhs[0,2:-1] = -Phi_n_new[0,2:-1]
# Set the RHS corresponding to the r=R boundary condition (delta_Phi+Phi_n_new)=0 (since h=b~0 is enforced)
rhs[:, -1] = -Phi_n_new[:, -1]
if self._add_top_Phi_bc:
# Set the RHS corresponding to the \xi=1 boundary condition d^2/dr^2(delta_Phi+Phi_n_new)=0
rhs[-1,2:-1] = -2*Phi_n_new[-1,2:-1]+5*Phi_n_new[-2,2:-1]-4*Phi_n_new[-3,2:-1]+Phi_n_new[-4,2:-1]
else:
# Implement the discretisation of the horizontal advection
rhs[-1,2:-1] = -(Phi_n_new[-1,2:-1]-Phi_n_old[-1,2:-1]*H_old[2:-1])\
+0.5*dt*gamma_ast*(adv_new[-1,2:-1]*Hor_new+adv_old[-1,2:-1]*Hor_old)\
+0.5*dt*( Phi_n_old[-1,2:-1]*(H_old*(g_b_old-Psi_d))[2:-1]\
+Phi_n_new[-1,2:-1]*(H_new*(g_b_new-Psi_d))[2:-1])
# Set the RHS corresponding to the r=0 boundary condition d/dr(delta_Phi+Phi_n_new)=0
rhs[:, 0] = -3.0*Phi_n_new[:,0]+4.0*Phi_n_new[:,1]-Phi_n_new[:,2]
if False: # optional, higher order stencil
rhs[:, 0] = -11*Phi_n_new[:,0]+18*Phi_n_new[:,1]-9*Phi_n_new[:,2]+2*Phi_n_new[:,3]
if self._use_artificial_dr_bc:
# Set the RHS corresponding to the introduced r=dr condition Phi(dr)=Phi(0)+0.5*dr^2*Phi''(0)
rhs[:, 1] = 4*Phi_n_new[:,0]-7*Phi_n_new[:,1]+4*Phi_n_new[:,2]-Phi_n_new[:,3]
# done
return rhs.ravel()
def _Phi_n_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note this block has rectangular shape
# Setup some index arrays for constructing the matrix in coo format
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
row,col,dat = [],[],[]
H = (h_new>h_ast) # Use this to zero out bits where h is too small...
Hor = H[1:-1]/r[1:-1]
# Setup the vertical advection components first
# Do the easier part first
fot_new = self._advective_term(r,h_new)
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
xi_adv_term1 = -0.25*dt/dxi*v_z_new[1:-1,:]*(Phi_n_new[2:,:]-Phi_n_new[:-2,:])*(H/h_new**2)[np.newaxis,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(xi_adv_term1[:,1:-1].ravel())
if self._use_artificial_dr_bc:
#xi_adv_term1[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = xi_adv_term1[:,2:-1].ravel()
# Now the more difficult/involved part...
# First get diagonals relating to the fourth order h term
diagonals_h3,offsets_h3 = self._advective_term_h_gradient(r,h_new,3)
if self._use_artificial_dr_bc:
# Need to modify diagonals in conjunction with the 'artificial' BC at r=dr
for k in range(len(diagonals_h3)):
diagonals_h3[k][1] = 0
# now construct the 2D factor and then add the diagonals to the matrix
twoD_factor = 0.25*dt/dxi*gamma_ast*(XI**3-3*XI**2+2*XI)[1:-1,1:-1]*(Phi_n_new[2:,1:-1]-Phi_n_new[:-2,1:-1])\
*H[np.newaxis,1:-1]/(6.0*r*h_new)[np.newaxis,1:-1]
diag_h3_m4_dat = diagonals_h3[0][np.newaxis,4:-1]*twoD_factor[:,3:]
row.append(indices[1:-1,4:-1].ravel())
col.append(r_i[1:-1,0:-5].ravel())
dat.append(diag_h3_m4_dat.ravel())
diag_h3_m3_dat = diagonals_h3[1][np.newaxis,3:-1]*twoD_factor[:,2:]
row.append(indices[1:-1,3:-1].ravel())
col.append(r_i[1:-1,0:-4].ravel())
dat.append(diag_h3_m3_dat.ravel())
diag_h3_m2_dat = diagonals_h3[2][np.newaxis,2:-1]*twoD_factor[:,1:]
row.append(indices[1:-1,2:-1].ravel())
col.append(r_i[1:-1,0:-3].ravel())
dat.append(diag_h3_m2_dat.ravel())
diag_h3_m1_dat = diagonals_h3[3][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,0:-2].ravel())
dat.append(diag_h3_m1_dat.ravel())
diag_h3_p0_dat = diagonals_h3[4][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(diag_h3_p0_dat.ravel())
diag_h3_p1_dat = diagonals_h3[5][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,2:].ravel())
dat.append(diag_h3_p1_dat.ravel())
diag_h3_p2_dat = diagonals_h3[6][np.newaxis,1:-2]*twoD_factor[:,:-1]
row.append(indices[1:-1,1:-2].ravel())
col.append(r_i[1:-1,3:].ravel())
dat.append(diag_h3_p2_dat.ravel())
diag_h3_p3_dat = diagonals_h3[7][np.newaxis,1:-3]*twoD_factor[:,:-2]
row.append(indices[1:-1,1:-3].ravel())
col.append(r_i[1:-1,4:].ravel())
dat.append(diag_h3_p3_dat.ravel())
# Now we need to do the radial 'advective' term
# First get diagonals relating to the fourth order h term
Phi_int_dxi_new = np.cumsum(0.5*((Phi_n_new*(1-XI))[1:,:]+(Phi_n_new*(1-XI))[:-1,:]),axis=0)*dxi
h2_factor = Phi_n_new*(0.5*XI**2-XI)
h2_factor[1:,:] += Phi_int_dxi_new
h2_prefix = np.zeros(nr)
h2_prefix[1:-1] = -0.5*dt*gamma_ast*Hor
diagonals_h2,offsets_h2 = self._advective_term_h_gradient_alt(r,h_new,2,h2_factor,True,h2_prefix)
if np.isfinite(lambda_ast):
h1_prefix = np.zeros(nr)
h1_prefix[1:-1] = 0.5*dt*gamma_ast/lambda_ast*Hor
diagonals_h1,offsets_h1 = self._advective_term_h_gradient_alt(r,h_new,1,Phi_n_new,True,h1_prefix)
for k in range(len(diagonals_h2)):
diagonals_h2[k] += diagonals_h1[k]
if self._use_artificial_dr_bc:
# Need to modify diagonals in conjunction with the 'artificial' BC at r=dr
for k in range(len(diagonals_h2)):
diagonals_h2[k][:,1] = 0
diag_h2_m4_dat = diagonals_h2[0][1:-1,4:-1]
row.append(indices[1:-1,4:-1].ravel())
col.append(r_i[1:-1,0:-5].ravel())
dat.append(diag_h2_m4_dat.ravel())
diag_h2_m3_dat = diagonals_h2[1][1:-1,3:-1]
row.append(indices[1:-1,3:-1].ravel())
col.append(r_i[1:-1,0:-4].ravel())
dat.append(diag_h2_m3_dat.ravel())
diag_h2_m2_dat = diagonals_h2[2][1:-1,2:-1]
row.append(indices[1:-1,2:-1].ravel())
col.append(r_i[1:-1,0:-3].ravel())
dat.append(diag_h2_m2_dat.ravel())
diag_h2_m1_dat = diagonals_h2[3][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,0:-2].ravel())
dat.append(diag_h2_m1_dat.ravel())
diag_h2_p0_dat = diagonals_h2[4][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(diag_h2_p0_dat.ravel())
diag_h2_p1_dat = diagonals_h2[5][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,2:].ravel())
dat.append(diag_h2_p1_dat.ravel())
diag_h2_p2_dat = diagonals_h2[6][1:-1,1:-2]
row.append(indices[1:-1,1:-2].ravel())
col.append(r_i[1:-1,3:].ravel())
dat.append(diag_h2_p2_dat.ravel())
diag_h2_p3_dat = diagonals_h2[7][1:-1,1:-3]
row.append(indices[1:-1,1:-3].ravel())
col.append(r_i[1:-1,4:].ravel())
dat.append(diag_h2_p3_dat.ravel())
if not self._add_top_Phi_bc:
row.append(indices[-1,4:-1].ravel())
col.append(r_i[-1,0:-5].ravel())
dat.append(diagonals_h2[0][-1,4:-1].ravel())
row.append(indices[-1,3:-1].ravel())
col.append(r_i[-1,0:-4].ravel())
dat.append(diagonals_h2[1][-1,3:-1].ravel())
row.append(indices[-1,2:-1].ravel())
col.append(r_i[-1,0:-3].ravel())
dat.append(diagonals_h2[2][-1,2:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,0:-2].ravel())
dat.append(diagonals_h2[3][-1,1:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,1:-1].ravel())
dat.append(diagonals_h2[4][-1,1:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,2:].ravel())
dat.append(diagonals_h2[5][-1,1:-1].ravel())
row.append(indices[-1,1:-2].ravel())
col.append(r_i[-1,3:].ravel())
dat.append(diagonals_h2[6][-1,1:-2].ravel())
row.append(indices[-1,1:-3].ravel())
col.append(r_i[-1,4:].ravel())
dat.append(diagonals_h2[7][-1,1:-3].ravel())
# done, construct and return
return coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr))#.tocsr()
def _Phi_n_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
phi_n dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n in place of phi_n)
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Setup some index arrays for constructing the matrix in coo format
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
H = (h_new>h_ast) # Use this to zero out bits where h is too small...
Hor = H[2:-1]/r[2:-1]
A_p0_p0 = np.ones(Phi_n_new.shape)
row,col,dat = [indices.ravel()],[indices.ravel()],[A_p0_p0.ravel()] # initialise with a view of A_p0_p0
# We start by filling out the interior stencils
# Add the forcing term to the main diagonal
A_p0_p0[1:-1,2:-1] += -0.5*dt*H[np.newaxis,2:-1]*(g_b_new-Psi_d)[np.newaxis,2:-1]
# Add the simple non-linear component of the vertical advection term
A_p0_p0[1:-1,2:-1] += +0.25*dt/dxi*(1+Psi_m)*(H*g_b_new/h_new)[np.newaxis,2:-1]*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])
# Add the other non-linear component of the vertical advection term
#for k in range(1,nxi-1): # exclude the two ends
# row.append(indices[k,2:-1])
# col.append(indices[-1,2:-1])
# dat.append(-0.25*dt/dxi*(1+Psi_m)*(XI[k]*H*g_b_new/h)[2:-1]*(Phi_n_new[k+1,2:-1]-Phi_n_new[k-1,2:-1]))
row.append(indices[1:-1,2:-1].ravel())
col.append(np.broadcast_to(indices[-1,2:-1],(nxi-2,nr-3)).ravel())
dat.append((-0.25*dt/dxi*(1+Psi_m)*(H*g_b_new/h_new)[np.newaxis,2:-1]*XI[1:-1,2:-1]\
*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])).ravel())
# Add the remaining vertical advection term
fot_new = self._advective_term(r,h_new)
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
A_p0_m1 = np.zeros(Phi_n_new.shape)
A_p0_p1 = np.zeros(Phi_n_new.shape)
A_p0_m1[1:-1,2:-1] = -0.5*dt/(2.0*dxi)*H[np.newaxis,2:-1]*v_z_new[1:-1,2:-1]/h_new[np.newaxis,2:-1]
A_p0_p1[1:-1,2:-1] = +0.5*dt/(2.0*dxi)*H[np.newaxis,2:-1]*v_z_new[1:-1,2:-1]/h_new[np.newaxis,2:-1]
row.append(indices[1:-1,2:-1].ravel());row.append(indices[1:-1,2:-1].ravel())
col.append(indices[2: ,2:-1].ravel());col.append(indices[ :-2,2:-1].ravel())
dat.append(A_p0_p1[1:-1,2:-1].ravel());dat.append(A_p0_m1[1:-1,2:-1].ravel())
# Add the radial 'advective' terms
# Note: we can re-use the self._advective_term_f_gradient function here
A_m1_p0 = np.zeros(Phi_n_new.shape)
A_p1_p0 = np.zeros(Phi_n_new.shape)
diagonals_h2,offsets_h2 = self._advective_term_f_gradient(r,h_new,2,Phi_n_new)
assert offsets_h2[0]==-1
A_m1_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[0][np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[1][np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[2][np.newaxis,2:-1]
if np.isfinite(lambda_ast):
diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_new,1,Phi_n_new)
assert offsets_h1[0]==-1
A_m1_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[0][np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[1][np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[2][np.newaxis,2:-1]
row.append(indices[1:-1,2:-1].ravel());row.append(indices[1:-1,2:-1].ravel())
col.append(indices[1:-1,3: ].ravel());col.append(indices[1:-1,1:-2].ravel())
dat.append(A_p1_p0[1:-1,2:-1].ravel());dat.append(A_m1_p0[1:-1,2:-1].ravel())
# Now add the integral component (note this is somewhat denser than other components)
for j in range(1,nxi-1): # exclude the first and last index... (the first is 0 regardless)
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[0][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[1][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[2][2:-1])
# Now we need to enforce the boundary conditions...
# When \xi=0 then we want (delta_Phi+Phi_n_new) = 0 ==> delta_Phi = -Phi_n_new
# ==> so ones on the main diagonal is fine, and rhs needs to be set accordingly.
# When r=R then we want (delta_Phi+Phi_n_new) = 0 ==> delta_Phi = -Phi_n_new
# ==> so ones on the main diagonal is fine, and rhs needs to be set accordingly.
if self._add_top_Phi_bc:
# When \xi=1 then we want d^2/d\xi^2(delta_Phi+Phi_n_new) = 0
# ==> stencil 1,-2,_1_ for 1st order, -1,4,-5,_2_ for second order
A_p0_xi1m1 = np.empty(nr)
A_p0_xi1m2 = np.empty(nr)
A_p0_xi1m3 = np.empty(nr)
A_p0_p0[-1,2:-1] = 2.0 # 1 low order, 2 higher order
A_p0_xi1m1[2:-1] = -5.0 # -2 low order, -5 higher order
A_p0_xi1m2[2:-1] = 4.0 # 1 low order, 4 higher order
A_p0_xi1m3[2:-1] = -1.0 # -1 higher order
row.append(indices[-1,2:-1]);row.append(indices[-1,2:-1]);row.append(indices[-1,2:-1])
col.append(indices[-2,2:-1]);col.append(indices[-3,2:-1]);col.append(indices[-4,2:-1])
dat.append(A_p0_xi1m1[2:-1]);dat.append(A_p0_xi1m2[2:-1]);dat.append(A_p0_xi1m3[2:-1])
else:
# Implement the discretisation of the horizontal advection
A_p0_p0[-1,2:-1] = 1.0-0.5*dt*H[2:-1]*(g_b_new-Psi_d)[2:-1]
# the radial advective part... (could really just sub XI=1 here...)
A_m1_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[0][2:-1]
A_p0_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[1][2:-1]
A_p1_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[2][2:-1]
if np.isfinite(lambda_ast):
#diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_new,1,Phi_n_new) # should already exist...
#assert offsets_h1[0]==-1
A_m1_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[0][2:-1]
A_p0_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[1][2:-1]
A_p1_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[2][2:-1]
row.append(indices[-1,2:-1].ravel());row.append(indices[-1,2:-1].ravel())
col.append(indices[-1,3: ].ravel());col.append(indices[-1,1:-2].ravel())
dat.append(A_p1_p0[-1,2:-1].ravel());dat.append(A_m1_p0[-1,2:-1].ravel())
# Now add the integral component (note this is somewhat denser than other components)
j = nxi-1
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[0][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[1][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[2][2:-1])
# When r=0 we want to enforce d/dr(delta_Phi+Phi_n_new) = 0
# ==> stencil _3_,-4,1 for second order
A_r0p1_p0 = np.empty(nxi)
A_r0p2_p0 = np.empty(nxi)
A_p0_p0[:,0] = 3.0
A_r0p1_p0[:] = -4.0
A_r0p2_p0[:] = 1.0
row.append(indices[:,0]);row.append(indices[:,0])
col.append(indices[:,1]);col.append(indices[:,2])
dat.append(A_r0p1_p0); dat.append(A_r0p2_p0)
if False:
# Coud implement optional higher order stencil for r=0 here, but not really needed...
pass
if self._use_artificial_dr_bc:
# When r=dr we also enforce Phi(dr)=Phi(0)+0.5*dr^2*Phi''(0) (to smooth things out a bit here)
# ==> stencil -4,_7_,-4,1 (derived from a forward 2nd order stencil for Phi''(0))
A_r1m1_p0 = np.empty(nxi)
A_r1p1_p0 = np.empty(nxi)
A_r1p2_p0 = np.empty(nxi)
A_r1m1_p0[:] = -4.0
A_p0_p0[:,1] = 7.0
A_r1p1_p0[:] = -4.0
A_r1p2_p0[:] = 1.0
row.append(indices[:,1]);row.append(indices[:,1]);row.append(indices[:,1])
col.append(indices[:,0]);col.append(indices[:,2]);col.append(indices[:,3])
dat.append(A_r1m1_p0); dat.append(A_r1p1_p0); dat.append(A_r1p2_p0)
# Final constructions
A_11 = coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr*nxi))
# done
return A_11
def _Phi_n_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_new must contain Phi_n rather than phi_n)
"""
# Note: there is no dependence on g_s
return None
def _Phi_n_equation_LHS3(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_new must contain Phi_n rather than phi_n)
"""
nr,nxi = len(self._r),len(self._xi)
XI = self._XI
dxi = XI[1,1]
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
H = (h_new>self._h_ast) # Use this to zero out bits where h is too small...
# Note: This block has a rectangular shape
r_i,xi_i = np.meshgrid(np.arange(nr),np.arange(nxi))
indices = r_i+nr*xi_i
row,col,dat = [],[],[]
# First add the component coming from the forcing term
forcing_term = -0.5*dt*Phi_n_new*H
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(forcing_term[1:-1,1:-1].ravel())
if self._use_artificial_dr_bc:
#forcing_term[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = forcing_term[1:-1,2:-1].ravel()
# Now add the component coming from the vertical advection term
# Note: the following term (from the vertical advection) excludes xi=0 and xi=1 entries
xi_adv_term = 0.25*dt/dxi*(1+Psi_m)*(Phi_n_new[1:-1,:]-XI[1:-1,:]*(Phi_n_new[-1,:])[np.newaxis,:])\
*(Phi_n_new[2:,:]-Phi_n_new[:-2,:])*(H/h_new)[np.newaxis,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(xi_adv_term[:,1:-1].ravel())
if self._use_artificial_dr_bc:
#xi_adv_term[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = xi_adv_term[:,2:-1].ravel()
if not self._add_top_Phi_bc:
# Add forcing on top row...
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,1:-1].ravel())
dat.append(forcing_term[-1,1:-1].ravel())
# done, construct and return
return coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr))#.tocsr()
def _g_s_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the g_s equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
h_ast = self._h_ast
D = self._D
Q_s = self._Q_s
h_old,phi_n_old,g_s_old,g_b_old = v_old
h_new,phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Calculate spatial stencil and add to the interior of the rhs vector
rhs = np.empty(nr)
rhs[1:-1] = g_s_old[1:-1]-g_s_new[1:-1]
rhs[1:-1] += (0.5*dt*D/dr**2)*( r_half[1:]*(g_s_old[2:]-g_s_old[1:-1]) \
+r_half[1:]*(g_s_new[2:]-g_s_new[1:-1]) \
-r_half[:-1]*(g_s_old[1:-1]-g_s_old[:-2]) \
-r_half[:-1]*(g_s_new[1:-1]-g_s_new[:-2]))/r[1:-1]
# Now the forcing term
rhs[1:-1] -= 0.5*dt*D*Q_s*( (h_old>h_ast)*(g_s_old-g_b_old)\
+(h_new>h_ast)*(g_s_new-g_b_new))[1:-1]
# Now set the appropriate boundary values ( (d/dr)g_s=0 at both r=0 and r=r_max )
rhs[ 0] = -3.0*g_s_new[ 0]+4.0*g_s_new[ 1]-g_s_new[ 2]
rhs[-1] = 3.0*g_s_new[-1]-4.0*g_s_new[-2]+g_s_new[-3]
# done
return rhs
def _g_s_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the g_s equation.
"""
# There is no dependence (we don't discretise the gradient of the heavyside function)
return None
def _g_s_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
Phi_n dependence in the g_s equation.
"""
# There is no dependence
return None
def _g_s_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the g_s equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
h_ast = self._h_ast
D = self._D
Q_s = self._Q_s
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Pre-allocate diagonals for the sparse matrix construction
A_diag_m2 = np.zeros(nr) # for upwind component
A_diag_m1 = np.empty(nr)
A_diag_p0 = np.ones(nr) # note the ones
A_diag_p1 = np.empty(nr)
A_diag_p2 = np.zeros(nr) # for boundary components
# Set the entries which enforce the BC at r=0 and r=r_max (these don't change)
A_diag_p0[ 0] = 3.0
A_diag_p1[ 0] = -4.0
A_diag_p2[ 0] = 1.0
A_diag_m2[-1] = -1.0
A_diag_m1[-1] = 4.0
A_diag_p0[-1] = -3.0
# Set up the diagonals relating to the interior
A_diag_m1[1:-1] = -(0.5*dt*D/dr**2)*r_half[:-1]/r[1:-1]
A_diag_p0[1:-1] += (0.5*dt*D/dr**2)*2.0 # 2.0=(r_half[1:]+r_half[:-1])/r[1:-1]
A_diag_p1[1:-1] = -(0.5*dt*D/dr**2)*r_half[1:]/r[1:-1]
# Now add the forcing component
A_diag_p0[1:-1] += (0.5*dt*D*Q_s)*(h_new>h_ast)[1:-1]
# Final construction
A_22 = diags([A_diag_m2[2:],A_diag_m1[1:],A_diag_p0,A_diag_p1[:-1],A_diag_p2[:-2]],\
[-2,-1,0,1,2])#,format="csr")
return A_22
def _g_s_equation_LHS3(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the g_s equation.
"""
h_ast = self._h_ast
D = self._D
Q_s = self._Q_s
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
A_diag = -0.5*dt*D*Q_s*(h_new>h_ast)
A_diag[[0,-1]] = 0
return diags(A_diag)#,format="csr")
def _g_b_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the g_b equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
g_ast = self._gamma_ast
h_ast = self._h_ast
Pe = self._Pe
Q_b = self._Q_b
Upsilon = self._Upsilon
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Additional terms for convenience
H_old = (h_old>h_ast)
H_new = (h_new>h_ast)
h_old_half = 0.5*(h_old[:-1]+h_old[1:])
h_new_half = 0.5*(h_new[:-1]+h_new[1:])
# Initialise the rhs vector with the advective terms...
prefix = np.empty(nr)
prefix[[0,-1]] = 0
prefix[1:-1] = (-0.5*dt*Pe*g_ast/3.0)*r[1:-1]**(-1)
#adv_new = self._advective_term(r,h_new,3,g_b_new*(1-phi_n_new),True,prefix)
#adv_old = self._advective_term(r,h_old,3,g_b_old*(1-phi_n_old),True,prefix)
adv_new = self._advective_term(r,h_new,3,g_b_new,True,prefix*H_new)
adv_old = self._advective_term(r,h_old,3,g_b_old,True,prefix*H_old)
rhs = adv_new+adv_old
if np.isfinite(lambda_ast): # add the slip terms
prefix[1:-1] *= 3.0/lambda_ast
adv_new = self._advective_term(r,h_new,2,g_b_new,True,prefix*H_new)
adv_old = self._advective_term(r,h_old,2,g_b_old,True,prefix*H_old)
rhs += adv_new+adv_old
# Now add the easy terms
rhs[1:-1] += -Pe*0.5*(h_old+h_new)[1:-1]*(g_b_new-g_b_old)[1:-1]
rhs[1:-1] += 0.5*dt*Q_b*( H_old*(g_s_old-g_b_old) \
+H_new*(g_s_new-g_b_new))[1:-1]
# The following term no longer has h as it is absorbed into Phi_n
rhs[1:-1] -= 0.5*dt*Upsilon*( H_old*Phi_n_old[-1,:]*g_b_old \
+H_new*Phi_n_new[-1,:]*g_b_new)[1:-1]
# Now the second order g_b term
rhs[1:-1] += (0.5*dt/dr**2)*( H_old[1:-1]*r_half[1: ]*h_old_half[1: ]*(g_b_old[2: ]-g_b_old[1:-1]) \
-H_old[1:-1]*r_half[ :-1]*h_old_half[ :-1]*(g_b_old[1:-1]-g_b_old[ :-2]) \
+H_new[1:-1]*r_half[1: ]*h_new_half[1: ]*(g_b_new[2: ]-g_b_new[1:-1]) \
-H_new[1:-1]*r_half[ :-1]*h_new_half[ :-1]*(g_b_new[1:-1]-g_b_new[ :-2]))/r[1:-1]
"""
# Perhaps need to treat the whole thing similar to the h equation,
# e.g. by introducing g_b_old_half,phi_n_new_half,etc.
# but I would then also need biased stencils near the ends anyway...
if False:
inds = 1-H_new
#inds = 1-H_old
rhs[inds] = 0
"""
# Lastly set the appropriate boundary values ( (d/dr)g_s=0 at both r=0 and r=r_max )
rhs[ 0] = -3.0*g_b_new[ 0]+4.0*g_b_new[ 1]-g_b_new[ 2]
rhs[-1] = 3.0*g_b_new[-1]-4.0*g_b_new[-2]+g_b_new[-3]
if self._use_artificial_dr_bc:
# Add an artificial BC, for the same reason as Phi_n, to avoid dealing with the wide h stencil there...
rhs[ 1] = 4*g_b_new[0]-7*g_b_new[1]+4*g_b_new[2]-g_b_new[3]
# done
return rhs
def _g_b_equation_LHS0(self,g_b_old,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the g_b equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
g_ast = self._gamma_ast
h_ast = self._h_ast
Pe = self._Pe
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Additional vector for convenience
H_over_r = (h_new>h_ast)[1:-1]/r[1:-1]
# Calculate/fetch the diagonals from the advective term
prefix = np.empty(nr)
prefix[[0,-1]] = 0
prefix[1:-1] = (0.5*dt*Pe*g_ast/3.0)*H_over_r
diagonals,offsets = self._advective_term_h_gradient(r,h_new,3,g_b_new,True,prefix)
for i in range(len(diagonals)):
assert offsets[i]==i-4 # sanity check
if np.isfinite(lambda_ast): # add the slip terms
prefix[1:-1] *= 3.0/lambda_ast
diagonals2,offsets2 = self._advective_term_h_gradient(r,h_new,2,g_b_new,True,prefix)
for i in range(len(diagonals2)):
assert offsets2[i]==offsets[i]
diagonals[i][1:-1] += diagonals2[i][1:-1]
# Add the parts from the second order g_b term
diagonals[3][1:-1] += -(0.25*dt/dr**2)*(-r_half[:-1]*(g_b_new[1:-1]-g_b_new[ :-2]))*H_over_r
diagonals[4][1:-1] -= (0.25*dt/dr**2)*(-r_half[:-1]*(g_b_new[1:-1]-g_b_new[ :-2]) \
+r_half[1: ]*(g_b_new[2: ]-g_b_new[1:-1]))*H_over_r
diagonals[5][1:-1] += -(0.25*dt/dr**2)*( r_half[1: ]*(g_b_new[2: ]-g_b_new[1:-1]))*H_over_r
# Add the additional parts to the main diagonal
diagonals[4][1:-1] += Pe*0.5*(g_b_new-g_b_old)[1:-1]
"""
#
if False:
inds = 1-H_new
#inds = (h_old<=h_ast) # not available currently
for i in range(len(diagonals)):
diagonals[i][inds] = 0
"""
if self._use_artificial_dr_bc:
# Related to an artificial BC
for diag in diagonals:
diag[1] = 0
# Final construction
A_30 = diags([diagonals[0][4:],diagonals[1][3:],diagonals[2][2:],diagonals[3][1:],diagonals[4],\
diagonals[5][:-1],diagonals[6][:-2],diagonals[7][:-3]],\
offsets)#,format='csr')
return A_30
def _g_b_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
Phi_n dependence in the g_b equation.
(Note: this is much simplified with the
modification of the g_b equation.)
"""
h_ast = self._h_ast
Upsilon = self._Upsilon
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note: this block has a rectangular shape
nr,nxi = len(self._r),len(self._xi)
row = np.arange(1,nr-1)
col = nxi-1+nxi*row
dat = 0.5*dt*Upsilon*((h_new>h_ast)*g_b_new)[1:-1]
if self._use_artificial_dr_bc:
# Related to the enforcement of an artificial BC at r=dr
#dat[1] = 0
row = row[1:]
col = col[1:]
dat = dat[1:]
return coo_matrix((dat,(row,col)),shape=(nr,nr*nxi))#.tocsr()
def _g_b_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the g_b equation.
"""
h_ast = self._h_ast
Q_b = self._Q_b
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
A_diag = -0.5*dt*Q_b*(h_new>h_ast)
A_diag[[0,-1]] = 0
if self._use_artificial_dr_bc:
# Related to an artificial BC
A_diag[1] = 0
return diags(A_diag)#,format='csr')
def _g_b_equation_LHS3(self,h_old,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the g_b equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
g_ast = self._gamma_ast
h_ast = self._h_ast
Pe = self._Pe
Q_b = self._Q_b
Upsilon = self._Upsilon
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Additional vector for convenience
h_half = 0.5*(h_new[1:]+h_new[:-1])
H_new = h_new>h_ast
H_over_r = H_new[1:-1]/r[1:-1]
# Calculate/fetch the diagonals from the advective term
prefix = np.empty(nr)
prefix[[0,-1]] = 0
prefix[1:-1] = (0.5*dt*Pe*g_ast/3.0)*r[1:-1]**(-1)
diagonals,offsets = self._advective_term_f_gradient(r,h_new,3,g_b_new,True,prefix*H_new)
if np.isfinite(lambda_ast): # add the slip terms
prefix[1:-1] *= 3.0/lambda_ast
diagonals2,offsets2 = self._advective_term_f_gradient(r,h_new,2,g_b_new,True,prefix*H_new)
for i in range(len(diagonals2)):
assert offsets2[i]==offsets[i]
diagonals[i][1:-1] += diagonals2[i][1:-1]
diagonals.insert(0,np.zeros(nr))
offsets.insert(0,-2)
diagonals.append(np.zeros(nr))
offsets.append(2)
# Add the appropriate terms to the main diagonal
diagonals[2][1:-1] += 0.5*Pe*(h_old+h_new)[1:-1] # Note h_old is needed here with my current discretisation
diagonals[2][1:-1] += 0.5*dt*Q_b*H_new[1:-1]
diagonals[2][1:-1] += 0.5*dt*Upsilon*H_new[1:-1]*Phi_n_new[-1,1:-1]
# Now the diagonal components for the second order g_b term
diagonals[1][1:-1] -= (0.5*dt/dr**2)*r_half[:-1]*h_half[:-1]*H_over_r
diagonals[2][1:-1] -= -(0.5*dt/dr**2)*(r_half[1: ]*h_half[1: ]+r_half[:-1]*h_half[:-1])*H_over_r
diagonals[3][1:-1] -= (0.5*dt/dr**2)*r_half[1: ]*h_half[1: ]*H_over_r
"""
#
if False:
inds = 1-H_new
#inds = (h_old<=h_ast)
for i in range(len(diagonals)):
diagonals[i][inds] = (0 if offsets[i]!=2 else 1)
"""
# Set the entries which enforce the BC at r=0 and r=r_max (these don't change)
diagonals[2][ 0] = 3.0
diagonals[3][ 0] = -4.0
diagonals[4][ 0] = 1.0
diagonals[0][-1] = -1.0
diagonals[1][-1] = 4.0
diagonals[2][-1] = -3.0
if self._use_artificial_dr_bc:
# Add an artificial BC, for the same reason as Phi_n
diagonals[1][ 1] = -4
diagonals[2][ 1] = 7
diagonals[3][ 1] = -4
diagonals[4][ 1] = 1
# Final construction
A_33 = diags([diagonals[0][2:],diagonals[1][1:],diagonals[2],diagonals[3][:-1],diagonals[4][:-2]],\
offsets)#,format='csr')
return A_33
# public methods for solving
def set_solver(self,solver):
"""
Set/change the solver used by the class
"""
if solver in ['DCN','FCN']:
self._solver = solver
else:
print('Warning: Requested solver does not exist, falling back to DCN')
self._solver = 'DCN' # default
# done
def solve(self,T,dt=None):
"""
Solve the biofilm evolution for a duration T (from the current time)
Optional: dt can be provided to override that stored internally.
"""
# Run a solver based on self._solver
if self._solver=='DCN':
# A de-coupled non-linear Crank-Nicolson solver
# (Only h and phi_n are non-linear given the decoupling)
self._decoupled_Crank_Nicolson(T,dt)
if self._solver=='FCN':
# The fully coupled non-linear Crank-Nicolson solver
self._full_Crank_Nicolson(T,dt)
return self._h,self._Phi_n,self._g_s,self._g_b
def _decoupled_Crank_Nicolson(self,T,dt=None):
"""
This solves the non-linear system equations using Newton iterations
on each field individually. Consequently the four fields of
interest are only weakly coupled through the time stepping.
This appears to be fine for all parameters ranges we have
considered, and is much cheaper computationally.
The private switch _Phi_n_DCN_solver may be used to toggle
through a few different modifications
of the Phi_n solver.
"""
# Setup...
v_old = [self._h,self._Phi_n,self._g_s,self._g_b]
h_new = self._h.copy()
Phi_n_new = self._Phi_n.copy()
g_s_new = self._g_s.copy()
g_b_new = self._g_b.copy()
v_new = [h_new,Phi_n_new,g_s_new,g_b_new]
if dt is None:
dt = self._dt
# Define the newton iteration
def decoupled_newton_iterate(v_old,v_new,dt,order=[0,1,2,3],newt_its=20,newt_tol=1.0E-8,newt_verbose=self._verbose):
"""
Construct the sparse blocks and RHS vectors for a Newton iteration
(based on a de-coupled Crank-Nicolson discretisation)
"""
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
for block in order:
for k in range(newt_its):
if block==0:
# h equation rows block matrices and rhs vector
b_0 = self._h_equation_RHS(v_old,v_new,dt)
A_00 = self._h_equation_LHS0(v_new,dt)
# Solve the linear system and update current guess
dh = spsolve(A_00.tocsr(),b_0)
h_new += dh
eps = np.linalg.norm(dh)/np.linalg.norm(h_new)
elif block==1:
if self._Phi_n_DCN_solver==0:
# Explicit Phi_n iterate
Phi_n_new[:,:] = self._Phi_n_equation_explicit(v_old,dt)
eps = -1
elif self._Phi_n_DCN_solver==1:
# Semi-implicit Phi_n iterate (but explicit in the d/dr term)
#Phi_n_new[:,:] = self._Phi_n_equation_semi_implicit(v_old,dt,explicit_r_advection=True)
Phi_n_new[:,:] = self._Phi_n_equation_semi_implicit([h_new,Phi_n_old,g_s_old,g_b_old],
dt,explicit_r_advection=True)
eps = -1
elif self._Phi_n_DCN_solver==2:
# Implicit Phi_n iterate (linearised in the non-linear d/d\xi term)
Phi_n_new[:,:] = self._Phi_n_equation_semi_implicit([h_new,Phi_n_old,g_s_old,g_b_old],dt)
eps = -1
elif self._Phi_n_DCN_solver==3:
# A decoupled Crank-Nicolson iteration
# Phi_n equation row block matrices and rhs vector
b_1 = self._Phi_n_equation_RHS(v_old,v_new,dt)
A_11 = self._Phi_n_equation_LHS1(v_new,dt)
# Solve the linear system and update current guess
dPhi_n = spsolve(A_11.tocsr(),b_1)
Phi_n_new += dPhi_n.reshape(Phi_n_new.shape)
eps = np.linalg.norm(dPhi_n)/np.linalg.norm(Phi_n_new.ravel())
else: #self._Phi_n_DCN_solver==4
# A decoupled Crank-Nicolson iteration
# Phi_n equation row block matrices and rhs vector
b_1 = self._Phi_n_equation_RHS(v_old,v_new,dt)
A_11 = self._Phi_n_equation_LHS1(v_new,dt).tocsc()
# Solve the linear system and update current guess
A_11_ILU = spilu(A_11)
P_op = LinearOperator(A_11.shape,A_11_ILU.solve)
dPhi_n,info = gmres(A_11,b_1,M=P_op,tol=1.0E-8,atol=1.0E-15)
if info!=0:
print("gmres iteration failed with code ",info)
Phi_n_new += dPhi_n.reshape(Phi_n_new.shape)
eps = np.linalg.norm(dPhi_n)/np.linalg.norm(Phi_n_new.ravel())
elif block==2:
# g_s equation row block matrices and rhs vector
b_2 = self._g_s_equation_RHS(v_old,v_new,dt)
A_22 = self._g_s_equation_LHS2(v_new,dt)
# Solve the linear system and update current guess
dg_s = spsolve(A_22.tocsr(),b_2)
g_s_new += dg_s
eps = np.linalg.norm(dg_s)/np.linalg.norm(g_s_new)
elif block==3:
# g_b equation row block matrices and rhs vector
b_3 = self._g_b_equation_RHS(v_old,v_new,dt)
A_33 = self._g_b_equation_LHS3(h_old,v_new,dt)
# Solve the linear system and update current guess
dg_b = spsolve(A_33.tocsr(),b_3)
g_b_new += dg_b
eps = np.linalg.norm(dg_b)/np.linalg.norm(g_b_new)
# Check epsilon and the current iteration number for termination conditions...
if newt_verbose:
print("Newton Method: Completed iteration {:d} with eps={:g}".format(k,eps))
if eps<newt_tol:# or block in [1,2,3]: # Note: blocks 1,2,3 are linear
if newt_verbose:
print("Newton Method: Converged within {:g} in {:d} iterations".format(newt_tol,newt_its))
break
if k==newt_its-1:
print("Newton Method: Failed to converge in {:d} iterations (block {:d}, eps={:g})".format(newt_its,block,eps))
# done
# Now perform the Newton iterations until the final time is reached
t = self._t
S = t
while t<=S+T-dt:
decoupled_newton_iterate(v_old,v_new,dt)
t += dt
self._h[:] = h_new[:]
self._Phi_n[:,:] = Phi_n_new[:,:]
self._g_s[:] = g_s_new[:]
self._g_b[:] = g_b_new[:]
if t<S+T and (S+T-t)>1.0E-12:
decoupled_newton_iterate(v_old,v_new,S+T-t)
t = S+T
self._h[:] = h_new[:]
self._Phi_n[:,:] = Phi_n_new[:,:]
self._g_s[:] = g_s_new[:]
self._g_b[:] = g_b_new[:]
# done, no return
def _full_Crank_Nicolson(self,T,dt=None):
"""
This solves the non-linear system equations using Newton iterations
on the entire system of equations simultaneously. Consequently the
four fields of interest are strongly coupled through each time step.
This is generally much costlier for negligible gain over the
'decoupled' routine. It may however be useful if we find
parameters in which non-linear effects are more important.
The private switch _FCN_solver_mode can be used to toggle through
a variety of different modifications to the solver.
"""
# Setup...
v_old = [self._h,self._Phi_n,self._g_s,self._g_b]
h_new = self._h.copy()
Phi_n_new = self._Phi_n.copy() # may want to flatten this?
g_s_new = self._g_s.copy()
g_b_new = self._g_b.copy()
v_new = [h_new,Phi_n_new,g_s_new,g_b_new]
if dt is None:
dt = self._dt
# Define the newton iteration
def fully_coupled_newton_iterate(v_old,v_new,dt,newt_its=20,newt_tol=1.0E-8,newt_verbose=self._verbose):
"""
Construct the sparse blocks and RHS vectors for a Newton iteration
(based on a fully coupled Crank-Nicolson discretisation)
"""
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
nr = len(self._r)
nxi = len(self._xi)
for k in range(newt_its):
# h equation rows block matrices and rhs vector
b_0 = self._h_equation_RHS(v_old,v_new,dt)
A_00 = self._h_equation_LHS0(v_new,dt)
if self._FCN_solver_mode>=3:
A_01 = self._h_equation_LHS1(v_new,dt) # simple diag
if self._FCN_solver_mode>=1:
A_02 = self._h_equation_LHS2(v_new,dt) # None
A_03 = self._h_equation_LHS3(v_new,dt) # simple diag
# phi_n equation row block matrices and rhs vector
b_1 = self._Phi_n_equation_RHS(v_old,v_new,dt)
if self._FCN_solver_mode>=3:
A_10 = self._Phi_n_equation_LHS0(v_new,dt)
A_11 = self._Phi_n_equation_LHS1(v_new,dt)
if self._FCN_solver_mode>=2:
A_12 = self._Phi_n_equation_LHS2(v_new,dt) # None
A_13 = self._Phi_n_equation_LHS3(v_new,dt) # simple diag
# g_s equation row block matrices and rhs vector
b_2 = self._g_s_equation_RHS(v_old,v_new,dt)
if self._FCN_solver_mode>=1:
A_20 = self._g_s_equation_LHS0(v_new,dt) # None
if self._FCN_solver_mode>=2:
A_21 = self._g_s_equation_LHS1(v_new,dt) # None
A_22 = self._g_s_equation_LHS2(v_new,dt)
if self._FCN_solver_mode>=1:
A_23 = self._g_s_equation_LHS3(v_new,dt) # simple diag
# g_b equation row block matrices and rhs vector
b_3 = self._g_b_equation_RHS(v_old,v_new,dt)
if self._FCN_solver_mode>=1:
A_30 = self._g_b_equation_LHS0(g_b_old,v_new,dt)
if self._FCN_solver_mode>=2:
A_31 = self._g_b_equation_LHS1(v_new,dt)
if self._FCN_solver_mode>=1:
A_32 = self._g_b_equation_LHS2(v_new,dt) # simple diag
A_33 = self._g_b_equation_LHS3(h_old,v_new,dt)
# Construct the sparse block matrix
b_full = np.concatenate([b_0,b_1,b_2,b_3])
if self._FCN_solver_mode<=0: # roughly equivalent to DCN
A_full = bmat([[A_00,None,None,None],
[None,A_11,None,None],
[None,None,A_22,None],
[None,None,None,A_33]],format='csr')
# Initial debugging test --> success (up to t=10/32)
elif self._FCN_solver_mode==1: # add coupling between h,g_s and g_b
A_full = bmat([[A_00,None,A_02,A_03],
[None,A_11,None,None],
[A_20,None,A_22,A_23],
[A_30,None,A_32,A_33]],format='csr')
# Second debugging test --> success (up to t=5)
elif self._FCN_solver_mode==2: # add coupling between g_b and Phi_n (noting A_12 and A_21 are None)
A_full = bmat([[A_00,None,A_02,A_03],
[None,A_11,A_12,A_13],
[A_20,A_21,A_22,A_23],
[A_30,A_31,A_32,A_33]],format='csr')
# Third debugging test --> success (up to t=10/32)
# Going from the 2nd to 3rd debug test significantly increased the time to solve, but result looks okay.
elif self._FCN_solver_mode==3: # Use the complete FCN matrix
A_full = bmat([[A_00,A_01,A_02,A_03],
[A_10,A_11,A_12,A_13],
[A_20,A_21,A_22,A_23],
[A_30,A_31,A_32,A_33]],format='csr') # Full solve
elif self._FCN_solver_mode>=4: # Use the complete FCN matrix
A_full = bmat([[A_00,A_01,A_02,A_03],
[A_10,A_11,A_12,A_13],
[A_20,A_21,A_22,A_23],
[A_30,A_31,A_32,A_33]],format='csc') # Full solve
# Note: another significant slow down going from the 3rd debug test to the full solver...
# Solve the linear system
if self._FCN_solver_mode<=3:
# Direct method
dv = spsolve(A_full,b_full)
elif self._FCN_solver_mode==4:
# Iterative method
counter = gmres_counter(newt_verbose)
#A_iLU = spilu(A_full) # defaults: fill_factor=10,drop_tol=1.0E-4
#P_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_iLU.solve)
#dv,info = gmres(A_full,b_full,M=P_op,tol=1.0E-8,atol=1.0E-15) # defaults: tol=1.0E-5,restart=20
# This seems to work quite well really and is much quicker... (with only 1 thread!)
# But... the gmres solver seems to hang around t=4...
# It may be an error in the matrix as h starts to move,
# but a preliminary check suggests this is not the case...
# (although s couple of extra newton iteration do seem to be required...)
# Rather, I may need to play around with gmres and spilu parameters...
# Yes, some different parameters seems to get past that point...
# Potential improvements:
# a) use only diagonal blocks for the preconditioner
# b) replace A_full with a linear operator in the gmres call
# (using a matrix free function to construct the linear operator...)
# The following seems a bit more robust, but slows things down a little
#A_iLU = spilu(A_full,fill_factor=20,drop_tol=1.0E-5)
#P_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_iLU.solve)
#dv,info = gmres(A_full,b_full,M=P_op,tol=1.0E-8,atol=1.0E-15,restart=50)
# Maybe this is sufficient? No, it just gets stuck a little later...
A_iLU = spilu(A_full,fill_factor=10,drop_tol=0.5**15)
P_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_iLU.solve)
dv,info = gmres(A_full,b_full,M=P_op,tol=1.0E-8,atol=1.0E-15,restart=20,callback=counter)
if info!=0:
print("gmres iteration failed with code ",info)
elif self._FCN_solver_mode==5:
# Iterative method bicgstab
A_iLU = spilu(A_full) # defaults: fill_factor=10,drop_tol=1.0E-4
P_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_iLU.solve)
dv,info = bicgstab(A_full,b_full,M=P_op,tol=1.0E-8,atol=1.0E-15) # defaults: tol=1.0E-5,atol=None
# Note: bicgstab fails at the same place as gmres, it is no faster/slower up to that point
if info!=0:
print("bicgstab iteration failed with code ",info)
elif self._FCN_solver_mode==-1:
# Try a 'matrix free' gmres method (apart from the pre-conditioner construction)
A_iLU = spilu(A_full) # note this has only the diagonal blocks currently...
P_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_iLU.solve)
def A_fun(x):
h_temp = h_new + x[ : nr]
Phi_n_temp = Phi_n_new + x[ nr:(1+nxi)*nr].reshape((nxi,nr))
g_s_temp = g_s_new + x[(1+nxi)*nr:(2+nxi)*nr]
g_b_temp = g_b_new + x[(2+nxi)*nr: ]
v_temp = [h_temp,Phi_n_temp,g_s_temp,g_b_temp]
Ax_0 = self._h_equation_RHS(v_old,v_temp,dt)
Ax_1 = self._Phi_n_equation_RHS(v_old,v_temp,dt)
Ax_2 = self._g_s_equation_RHS(v_old,v_temp,dt)
Ax_3 = self._g_b_equation_RHS(v_old,v_temp,dt)
Ax = np.concatenate([Ax_0,Ax_1,Ax_2,Ax_3])
Ax -= b_full
return Ax
A_op = LinearOperator((nr*(3+nxi),nr*(3+nxi)),A_fun)
dv,info = gmres(A_op,b_full,M=P_op,tol=1.0E-8,atol=1.0E-15)
# This works really well actually, and doesn't get hung up where the others did...
if info!=0:
print("gmres (matrix free) iteration failed with code ",info)
else:
print("Unrecognized FCN_solver_mode parameter, exiting...")
return
# Calculate epsilon
eps = np.linalg.norm(dv)/np.linalg.norm(np.concatenate([array.ravel() for array in v_new]))
# Update current guess
h_new += dv[ : nr]
Phi_n_new += dv[ nr:(1+nxi)*nr].reshape((nxi,nr))
g_s_new += dv[(1+nxi)*nr:(2+nxi)*nr]
g_b_new += dv[(2+nxi)*nr: ]
# Check epsilon and the current iteration number for termination conditions...
if newt_verbose:
print("Newton Method: Completed iteration {:d} with eps={:g}".format(k+1,eps))
if eps<newt_tol:
if newt_verbose:
print("Newton Method: Converged within {:g} in {:d} iterations".format(newt_tol,k+1))
break
if k==newt_its-1:
print("Newton Method: Failed to converge in {:d} iterations (eps={:g})".format(newt_its,eps))
# done
# Now perform the Newton iterations until the final time is reached
t = self._t
S = t
while t<=S+T-dt:
fully_coupled_newton_iterate(v_old,v_new,dt)
t += dt
self._h[:] = h_new[:]
self._Phi_n[:,:] = Phi_n_new[:,:]
self._g_s[:] = g_s_new[:]
self._g_b[:] = g_b_new[:]
if t<S+T and (S+T-t)>1.0E-12:
fully_coupled_newton_iterate(v_old,v_new,S+T-t)
t = S+T
self._h[:] = h_new[:]
self._Phi_n[:,:] = Phi_n_new[:,:]
self._g_s[:] = g_s_new[:]
self._g_b[:] = g_b_new[:]
# done, no return
# End of class
| [
"scipy.sparse.linalg.LinearOperator",
"scipy.sparse.linalg.bicgstab",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.arange",
"scipy.sparse.linalg.spilu",
"scipy.sparse.linalg.gmres",
"numpy.linspace",
"numpy.empty",
"numpy.concatenate",
"scipy.sparse.diags",
"scipy.sparse.coo_matrix",
"numpy... | [((3903, 3935), 'numpy.arange', 'np.arange', (['(0.0)', '(R + 0.5 * dr)', 'dr'], {}), '(0.0, R + 0.5 * dr, dr)\n', (3912, 3935), True, 'import numpy as np\n'), ((4016, 4038), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nxi'], {}), '(0, 1, nxi)\n', (4027, 4038), True, 'import numpy as np\n'), ((4064, 4094), 'numpy.meshgrid', 'np.meshgrid', (['self._r', 'self._xi'], {}), '(self._r, self._xi)\n', (4075, 4094), True, 'import numpy as np\n'), ((12667, 12693), 'numpy.empty_like', 'np.empty_like', (['self._Phi_n'], {}), '(self._Phi_n)\n', (12680, 12693), True, 'import numpy as np\n'), ((25481, 25493), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (25489, 25493), True, 'import numpy as np\n'), ((25819, 25842), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (25830, 25842), True, 'import numpy as np\n'), ((27621, 27644), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (27632, 27644), True, 'import numpy as np\n'), ((28738, 28865), 'scipy.sparse.diags', 'diags', (['[diagonals[0][2:], diagonals[1][1:], diagonals[2], diagonals[3][:-1],\n diagonals[4][:-2], diagonals[5][:-3]]', 'offsets'], {}), '([diagonals[0][2:], diagonals[1][1:], diagonals[2], diagonals[3][:-1],\n diagonals[4][:-2], diagonals[5][:-3]], offsets)\n', (28743, 28865), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((29479, 29499), 'numpy.arange', 'np.arange', (['(2)', '(nr - 2)'], {}), '(2, nr - 2)\n', (29488, 29499), True, 'import numpy as np\n'), ((29604, 29655), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(dat, (row, col))'], {'shape': '(nr, nr * nxi)'}), '((dat, (row, col)), shape=(nr, nr * nxi))\n', (29614, 29655), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((30479, 30492), 'scipy.sparse.diags', 'diags', (['A_diag'], {}), '(A_diag)\n', (30484, 30492), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((31259, 31276), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (31267, 31276), True, 'import numpy as np\n'), ((33123, 33140), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (33131, 33140), True, 'import numpy as np\n'), ((33162, 33179), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (33170, 33179), True, 'import numpy as np\n'), ((33201, 33218), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (33209, 33218), True, 'import numpy as np\n'), ((33240, 33257), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (33248, 33257), True, 'import numpy as np\n'), ((33279, 33296), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (33287, 33296), True, 'import numpy as np\n'), ((40202, 40227), 'numpy.empty', 'np.empty', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (40210, 40227), True, 'import numpy as np\n'), ((40394, 40417), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (40405, 40417), True, 'import numpy as np\n'), ((43266, 43290), 'numpy.ones', 'np.ones', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (43273, 43290), True, 'import numpy as np\n'), ((43864, 43889), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (43872, 43889), True, 'import numpy as np\n'), ((43908, 43933), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (43916, 43933), True, 'import numpy as np\n'), ((43952, 43977), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (43960, 43977), True, 'import numpy as np\n'), ((44388, 44413), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (44396, 44413), True, 'import numpy as np\n'), ((44432, 44457), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (44440, 44457), True, 'import numpy as np\n'), ((44476, 44501), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (44484, 44501), True, 'import numpy as np\n'), ((44553, 44578), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (44561, 44578), True, 'import numpy as np\n'), ((46271, 46296), 'numpy.zeros', 'np.zeros', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (46279, 46296), True, 'import numpy as np\n'), ((52883, 52908), 'numpy.zeros', 'np.zeros', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (52891, 52908), True, 'import numpy as np\n'), ((55192, 55217), 'numpy.empty', 'np.empty', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (55200, 55217), True, 'import numpy as np\n'), ((55317, 55342), 'numpy.empty', 'np.empty', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (55325, 55342), True, 'import numpy as np\n'), ((55588, 55611), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (55599, 55611), True, 'import numpy as np\n'), ((62510, 62522), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (62518, 62522), True, 'import numpy as np\n'), ((62688, 62711), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (62699, 62711), True, 'import numpy as np\n'), ((67084, 67108), 'numpy.ones', 'np.ones', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (67091, 67108), True, 'import numpy as np\n'), ((68764, 68789), 'numpy.zeros', 'np.zeros', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (68772, 68789), True, 'import numpy as np\n'), ((68808, 68833), 'numpy.zeros', 'np.zeros', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (68816, 68833), True, 'import numpy as np\n'), ((69449, 69474), 'numpy.zeros', 'np.zeros', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (69457, 69474), True, 'import numpy as np\n'), ((69493, 69518), 'numpy.zeros', 'np.zeros', (['Phi_n_new.shape'], {}), '(Phi_n_new.shape)\n', (69501, 69518), True, 'import numpy as np\n'), ((70025, 70048), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (70036, 70048), True, 'import numpy as np\n'), ((75028, 75041), 'numpy.empty', 'np.empty', (['nxi'], {}), '(nxi)\n', (75036, 75041), True, 'import numpy as np\n'), ((75062, 75075), 'numpy.empty', 'np.empty', (['nxi'], {}), '(nxi)\n', (75070, 75075), True, 'import numpy as np\n'), ((79757, 79769), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (79765, 79769), True, 'import numpy as np\n'), ((81623, 81635), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (81631, 81635), True, 'import numpy as np\n'), ((81679, 81691), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (81687, 81691), True, 'import numpy as np\n'), ((81712, 81723), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (81719, 81723), True, 'import numpy as np\n'), ((81761, 81773), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (81769, 81773), True, 'import numpy as np\n'), ((81795, 81807), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (81803, 81807), True, 'import numpy as np\n'), ((82519, 82623), 'scipy.sparse.diags', 'diags', (['[A_diag_m2[2:], A_diag_m1[1:], A_diag_p0, A_diag_p1[:-1], A_diag_p2[:-2]]', '[-2, -1, 0, 1, 2]'], {}), '([A_diag_m2[2:], A_diag_m1[1:], A_diag_p0, A_diag_p1[:-1], A_diag_p2[:\n -2]], [-2, -1, 0, 1, 2])\n', (82524, 82623), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((83101, 83114), 'scipy.sparse.diags', 'diags', (['A_diag'], {}), '(A_diag)\n', (83106, 83114), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((83974, 83986), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (83982, 83986), True, 'import numpy as np\n'), ((84437, 84460), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (84448, 84460), True, 'import numpy as np\n'), ((87145, 87157), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (87153, 87157), True, 'import numpy as np\n'), ((87431, 87454), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (87442, 87454), True, 'import numpy as np\n'), ((88747, 88916), 'scipy.sparse.diags', 'diags', (['[diagonals[0][4:], diagonals[1][3:], diagonals[2][2:], diagonals[3][1:],\n diagonals[4], diagonals[5][:-1], diagonals[6][:-2], diagonals[7][:-3]]', 'offsets'], {}), '([diagonals[0][4:], diagonals[1][3:], diagonals[2][2:], diagonals[3][1\n :], diagonals[4], diagonals[5][:-1], diagonals[6][:-2], diagonals[7][:-\n 3]], offsets)\n', (88752, 88916), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((89519, 89539), 'numpy.arange', 'np.arange', (['(1)', '(nr - 1)'], {}), '(1, nr - 1)\n', (89528, 89539), True, 'import numpy as np\n'), ((89849, 89900), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(dat, (row, col))'], {'shape': '(nr, nr * nxi)'}), '((dat, (row, col)), shape=(nr, nr * nxi))\n', (89859, 89900), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((90422, 90435), 'scipy.sparse.diags', 'diags', (['A_diag'], {}), '(A_diag)\n', (90427, 90435), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((91231, 91243), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (91239, 91243), True, 'import numpy as np\n'), ((91438, 91461), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (91449, 91461), True, 'import numpy as np\n'), ((93347, 93455), 'scipy.sparse.diags', 'diags', (['[diagonals[0][2:], diagonals[1][1:], diagonals[2], diagonals[3][:-1],\n diagonals[4][:-2]]', 'offsets'], {}), '([diagonals[0][2:], diagonals[1][1:], diagonals[2], diagonals[3][:-1],\n diagonals[4][:-2]], offsets)\n', (93352, 93455), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((28194, 28206), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (28202, 28206), True, 'import numpy as np\n'), ((35106, 35123), 'numpy.zeros', 'np.zeros', (['f.shape'], {}), '(f.shape)\n', (35114, 35123), True, 'import numpy as np\n'), ((35149, 35166), 'numpy.zeros', 'np.zeros', (['f.shape'], {}), '(f.shape)\n', (35157, 35166), True, 'import numpy as np\n'), ((35192, 35209), 'numpy.zeros', 'np.zeros', (['f.shape'], {}), '(f.shape)\n', (35200, 35209), True, 'import numpy as np\n'), ((40100, 40193), 'numpy.cumsum', 'np.cumsum', (['(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1, :]))'], {'axis': '(0)'}), '(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1,\n :]), axis=0)\n', (40109, 40193), True, 'import numpy as np\n'), ((47180, 47205), 'numpy.empty', 'np.empty', (['Phi_n_old.shape'], {}), '(Phi_n_old.shape)\n', (47188, 47205), True, 'import numpy as np\n'), ((47388, 47411), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (47399, 47411), True, 'import numpy as np\n'), ((48404, 48427), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (48415, 48427), True, 'import numpy as np\n'), ((54985, 55078), 'numpy.cumsum', 'np.cumsum', (['(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1, :]))'], {'axis': '(0)'}), '(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1,\n :]), axis=0)\n', (54994, 55078), True, 'import numpy as np\n'), ((55090, 55183), 'numpy.cumsum', 'np.cumsum', (['(0.5 * ((Phi_n_new * (1 - XI))[1:, :] + (Phi_n_new * (1 - XI))[:-1, :]))'], {'axis': '(0)'}), '(0.5 * ((Phi_n_new * (1 - XI))[1:, :] + (Phi_n_new * (1 - XI))[:-1,\n :]), axis=0)\n', (55099, 55183), True, 'import numpy as np\n'), ((62323, 62416), 'numpy.cumsum', 'np.cumsum', (['(0.5 * ((Phi_n_new * (1 - XI))[1:, :] + (Phi_n_new * (1 - XI))[:-1, :]))'], {'axis': '(0)'}), '(0.5 * ((Phi_n_new * (1 - XI))[1:, :] + (Phi_n_new * (1 - XI))[:-1,\n :]), axis=0)\n', (62332, 62416), True, 'import numpy as np\n'), ((62737, 62749), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (62745, 62749), True, 'import numpy as np\n'), ((72219, 72231), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (72227, 72231), True, 'import numpy as np\n'), ((72257, 72269), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (72265, 72269), True, 'import numpy as np\n'), ((72295, 72307), 'numpy.empty', 'np.empty', (['nr'], {}), '(nr)\n', (72303, 72307), True, 'import numpy as np\n'), ((73431, 73454), 'numpy.isfinite', 'np.isfinite', (['lambda_ast'], {}), '(lambda_ast)\n', (73442, 73454), True, 'import numpy as np\n'), ((75730, 75743), 'numpy.empty', 'np.empty', (['nxi'], {}), '(nxi)\n', (75738, 75743), True, 'import numpy as np\n'), ((75768, 75781), 'numpy.empty', 'np.empty', (['nxi'], {}), '(nxi)\n', (75776, 75781), True, 'import numpy as np\n'), ((75806, 75819), 'numpy.empty', 'np.empty', (['nxi'], {}), '(nxi)\n', (75814, 75819), True, 'import numpy as np\n'), ((77390, 77403), 'numpy.arange', 'np.arange', (['nr'], {}), '(nr)\n', (77399, 77403), True, 'import numpy as np\n'), ((77404, 77418), 'numpy.arange', 'np.arange', (['nxi'], {}), '(nxi)\n', (77413, 77418), True, 'import numpy as np\n'), ((91807, 91819), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (91815, 91819), True, 'import numpy as np\n'), ((91875, 91887), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (91883, 91887), True, 'import numpy as np\n'), ((12220, 12275), 'numpy.cumsum', 'np.cumsum', (['(0.5 * (phi_n[1:, :] + phi_n[:-1, :]))'], {'axis': '(0)'}), '(0.5 * (phi_n[1:, :] + phi_n[:-1, :]), axis=0)\n', (12229, 12275), True, 'import numpy as np\n'), ((47074, 47167), 'numpy.cumsum', 'np.cumsum', (['(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1, :]))'], {'axis': '(0)'}), '(0.5 * ((Phi_n_old * (1 - XI))[1:, :] + (Phi_n_old * (1 - XI))[:-1,\n :]), axis=0)\n', (47083, 47167), True, 'import numpy as np\n'), ((66011, 66030), 'numpy.concatenate', 'np.concatenate', (['dat'], {}), '(dat)\n', (66025, 66030), True, 'import numpy as np\n'), ((76263, 76282), 'numpy.concatenate', 'np.concatenate', (['dat'], {}), '(dat)\n', (76277, 76282), True, 'import numpy as np\n'), ((79114, 79133), 'numpy.concatenate', 'np.concatenate', (['dat'], {}), '(dat)\n', (79128, 79133), True, 'import numpy as np\n'), ((105115, 105151), 'numpy.concatenate', 'np.concatenate', (['[b_0, b_1, b_2, b_3]'], {}), '([b_0, b_1, b_2, b_3])\n', (105129, 105151), True, 'import numpy as np\n'), ((50120, 50139), 'numpy.concatenate', 'np.concatenate', (['dat'], {}), '(dat)\n', (50134, 50139), True, 'import numpy as np\n'), ((66032, 66051), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (66046, 66051), True, 'import numpy as np\n'), ((66052, 66071), 'numpy.concatenate', 'np.concatenate', (['col'], {}), '(col)\n', (66066, 66071), True, 'import numpy as np\n'), ((68024, 68077), 'numpy.broadcast_to', 'np.broadcast_to', (['indices[-1, 2:-1]', '(nxi - 2, nr - 3)'], {}), '(indices[-1, 2:-1], (nxi - 2, nr - 3))\n', (68039, 68077), True, 'import numpy as np\n'), ((76284, 76303), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (76298, 76303), True, 'import numpy as np\n'), ((76304, 76323), 'numpy.concatenate', 'np.concatenate', (['col'], {}), '(col)\n', (76318, 76323), True, 'import numpy as np\n'), ((79135, 79154), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (79149, 79154), True, 'import numpy as np\n'), ((79155, 79174), 'numpy.concatenate', 'np.concatenate', (['col'], {}), '(col)\n', (79169, 79174), True, 'import numpy as np\n'), ((105251, 105379), 'scipy.sparse.bmat', 'bmat', (['[[A_00, None, None, None], [None, A_11, None, None], [None, None, A_22,\n None], [None, None, None, A_33]]'], {'format': '"""csr"""'}), "([[A_00, None, None, None], [None, A_11, None, None], [None, None, A_22,\n None], [None, None, None, A_33]], format='csr')\n", (105255, 105379), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((107430, 107453), 'scipy.sparse.linalg.spsolve', 'spsolve', (['A_full', 'b_full'], {}), '(A_full, b_full)\n', (107437, 107453), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((111957, 111975), 'numpy.linalg.norm', 'np.linalg.norm', (['dv'], {}), '(dv)\n', (111971, 111975), True, 'import numpy as np\n'), ((50141, 50160), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (50155, 50160), True, 'import numpy as np\n'), ((50161, 50180), 'numpy.concatenate', 'np.concatenate', (['col'], {}), '(col)\n', (50175, 50180), True, 'import numpy as np\n'), ((105655, 105783), 'scipy.sparse.bmat', 'bmat', (['[[A_00, None, A_02, A_03], [None, A_11, None, None], [A_20, None, A_22,\n A_23], [A_30, None, A_32, A_33]]'], {'format': '"""csr"""'}), "([[A_00, None, A_02, A_03], [None, A_11, None, None], [A_20, None, A_22,\n A_23], [A_30, None, A_32, A_33]], format='csr')\n", (105659, 105783), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((109269, 109318), 'scipy.sparse.linalg.spilu', 'spilu', (['A_full'], {'fill_factor': '(10)', 'drop_tol': '(0.5 ** 15)'}), '(A_full, fill_factor=10, drop_tol=0.5 ** 15)\n', (109274, 109318), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((109342, 109403), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(nr * (3 + nxi), nr * (3 + nxi))', 'A_iLU.solve'], {}), '((nr * (3 + nxi), nr * (3 + nxi)), A_iLU.solve)\n', (109356, 109403), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((109424, 109511), 'scipy.sparse.linalg.gmres', 'gmres', (['A_full', 'b_full'], {'M': 'P_op', 'tol': '(1e-08)', 'atol': '(1e-15)', 'restart': '(20)', 'callback': 'counter'}), '(A_full, b_full, M=P_op, tol=1e-08, atol=1e-15, restart=20, callback=\n counter)\n', (109429, 109511), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((96354, 96372), 'numpy.linalg.norm', 'np.linalg.norm', (['dh'], {}), '(dh)\n', (96368, 96372), True, 'import numpy as np\n'), ((96373, 96394), 'numpy.linalg.norm', 'np.linalg.norm', (['h_new'], {}), '(h_new)\n', (96387, 96394), True, 'import numpy as np\n'), ((106086, 106214), 'scipy.sparse.bmat', 'bmat', (['[[A_00, None, A_02, A_03], [None, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]]'], {'format': '"""csr"""'}), "([[A_00, None, A_02, A_03], [None, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]], format='csr')\n", (106090, 106214), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((109731, 109744), 'scipy.sparse.linalg.spilu', 'spilu', (['A_full'], {}), '(A_full)\n', (109736, 109744), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((109815, 109876), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(nr * (3 + nxi), nr * (3 + nxi))', 'A_iLU.solve'], {}), '((nr * (3 + nxi), nr * (3 + nxi)), A_iLU.solve)\n', (109829, 109876), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((109897, 109952), 'scipy.sparse.linalg.bicgstab', 'bicgstab', (['A_full', 'b_full'], {'M': 'P_op', 'tol': '(1e-08)', 'atol': '(1e-15)'}), '(A_full, b_full, M=P_op, tol=1e-08, atol=1e-15)\n', (109905, 109952), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((106606, 106734), 'scipy.sparse.bmat', 'bmat', (['[[A_00, A_01, A_02, A_03], [A_10, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]]'], {'format': '"""csr"""'}), "([[A_00, A_01, A_02, A_03], [A_10, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]], format='csr')\n", (106610, 106734), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((110380, 110393), 'scipy.sparse.linalg.spilu', 'spilu', (['A_full'], {}), '(A_full)\n', (110385, 110393), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((110475, 110536), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(nr * (3 + nxi), nr * (3 + nxi))', 'A_iLU.solve'], {}), '((nr * (3 + nxi), nr * (3 + nxi)), A_iLU.solve)\n', (110489, 110536), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((111402, 111457), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(nr * (3 + nxi), nr * (3 + nxi))', 'A_fun'], {}), '((nr * (3 + nxi), nr * (3 + nxi)), A_fun)\n', (111416, 111457), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((111478, 111528), 'scipy.sparse.linalg.gmres', 'gmres', (['A_op', 'b_full'], {'M': 'P_op', 'tol': '(1e-08)', 'atol': '(1e-15)'}), '(A_op, b_full, M=P_op, tol=1e-08, atol=1e-15)\n', (111483, 111528), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((99531, 99551), 'numpy.linalg.norm', 'np.linalg.norm', (['dg_s'], {}), '(dg_s)\n', (99545, 99551), True, 'import numpy as np\n'), ((99552, 99575), 'numpy.linalg.norm', 'np.linalg.norm', (['g_s_new'], {}), '(g_s_new)\n', (99566, 99575), True, 'import numpy as np\n'), ((106942, 107070), 'scipy.sparse.bmat', 'bmat', (['[[A_00, A_01, A_02, A_03], [A_10, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]]'], {'format': '"""csc"""'}), "([[A_00, A_01, A_02, A_03], [A_10, A_11, A_12, A_13], [A_20, A_21, A_22,\n A_23], [A_30, A_31, A_32, A_33]], format='csc')\n", (106946, 107070), False, 'from scipy.sparse import diags, bmat, coo_matrix\n'), ((111266, 111306), 'numpy.concatenate', 'np.concatenate', (['[Ax_0, Ax_1, Ax_2, Ax_3]'], {}), '([Ax_0, Ax_1, Ax_2, Ax_3])\n', (111280, 111306), True, 'import numpy as np\n'), ((100030, 100050), 'numpy.linalg.norm', 'np.linalg.norm', (['dg_b'], {}), '(dg_b)\n', (100044, 100050), True, 'import numpy as np\n'), ((100051, 100074), 'numpy.linalg.norm', 'np.linalg.norm', (['g_b_new'], {}), '(g_b_new)\n', (100065, 100074), True, 'import numpy as np\n'), ((98623, 98634), 'scipy.sparse.linalg.spilu', 'spilu', (['A_11'], {}), '(A_11)\n', (98628, 98634), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((98670, 98712), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['A_11.shape', 'A_11_ILU.solve'], {}), '(A_11.shape, A_11_ILU.solve)\n', (98684, 98712), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((98754, 98801), 'scipy.sparse.linalg.gmres', 'gmres', (['A_11', 'b_1'], {'M': 'P_op', 'tol': '(1e-08)', 'atol': '(1e-15)'}), '(A_11, b_1, M=P_op, tol=1e-08, atol=1e-15)\n', (98759, 98801), False, 'from scipy.sparse.linalg import spsolve, spilu, LinearOperator, gmres, bicgstab\n'), ((98090, 98112), 'numpy.linalg.norm', 'np.linalg.norm', (['dPhi_n'], {}), '(dPhi_n)\n', (98104, 98112), True, 'import numpy as np\n'), ((99028, 99050), 'numpy.linalg.norm', 'np.linalg.norm', (['dPhi_n'], {}), '(dPhi_n)\n', (99042, 99050), True, 'import numpy as np\n')] |
"""
Image processing utilities
"""
__all__ = ['background_mask', 'foreground_mask', 'overlay_edges', 'diff_image',
'equalize_image_histogram']
from scipy import ndimage
from visualqc import config as cfg
from visualqc.utils import scale_0to1
import numpy as np
from functools import partial
from scipy.ndimage import sobel, binary_closing
from scipy.ndimage.morphology import binary_fill_holes
from scipy.ndimage.filters import median_filter, minimum_filter, maximum_filter
from scipy.signal import medfilt2d
import matplotlib
matplotlib.interactive(True)
from matplotlib.cm import get_cmap
gray_cmap = get_cmap('gray')
hot_cmap = get_cmap('hot')
filter_params = dict(size=cfg.median_filter_size, mode='constant', cval=0)
min_filter = partial(minimum_filter, **filter_params)
max_filter = partial(maximum_filter, **filter_params)
med_filter = partial(median_filter , **filter_params)
def background_mask(mri, thresh_perc=1):
"""Creates the background mask from an MRI"""
grad_magnitude = gradient_magnitude(mri)
nonzero_grad_mag = grad_magnitude[grad_magnitude > 0]
thresh_val = np.percentile(nonzero_grad_mag.flatten(), thresh_perc)
background_mask = grad_magnitude < thresh_val
se36 = ndimage.generate_binary_structure(3, 6)
closed = ndimage.binary_closing(background_mask, se36, iterations=6)
final_mask = ndimage.binary_erosion(closed, se36, iterations=5)
return final_mask
def gradient_magnitude(mri):
"""Computes the gradient magnitude"""
grad = np.asarray(np.gradient(mri))
grad_magnitude = np.sqrt(np.sum(np.power(grad, 2.), axis=0))
return grad_magnitude
def mask_image(input_img,
update_factor=0.5,
init_percentile=2,
iterations_closing=5,
return_inverse=False,
out_dtype=None):
"""
Estimates the foreground mask for a given image.
Similar to 3dAutoMask from AFNI.
iterations_closing : int
Number of iterations of binary_closing to apply at the end.
"""
prev_clip_level = np.percentile(input_img, init_percentile)
while True:
mask_img = input_img >= prev_clip_level
cur_clip_level = update_factor * np.median(input_img[mask_img])
if np.isclose(cur_clip_level, prev_clip_level, rtol=0.05):
break
else:
prev_clip_level = cur_clip_level
if len(input_img.shape) == 3:
se = ndimage.generate_binary_structure(3, 6)
elif len(input_img.shape) == 2:
se = ndimage.generate_binary_structure(2, 4)
else:
raise ValueError('Image must be 2D or 3D')
mask_img = binary_closing(mask_img, se, iterations=iterations_closing)
mask_img = binary_fill_holes(mask_img, se)
if return_inverse:
mask_img = np.logical_not(mask_img)
if out_dtype is not None:
mask_img = mask_img.astype(out_dtype)
return mask_img
# alias
foreground_mask = mask_image
def equalize_image_histogram(image_in, num_bins=cfg.num_bins_histogram_contrast_enhancement,
max_value=255):
"""Modifies the image to achieve an equalized histogram."""
image_flat = image_in.flatten()
hist_image, bin_edges = np.histogram(image_flat, bins=num_bins, normed=True)
cdf = hist_image.cumsum()
cdf = max_value * cdf / cdf[-1] # last element is total sum
# linear interpolation
array_equalized = np.interp(image_flat, bin_edges[:-1], cdf)
return array_equalized.reshape(image_in.shape)
def overlay_edges(slice_one, slice_two, sharper=True):
"""
Makes a composite image with edges from second image overlaid on first.
It will be in colormapped (RGB format) already.
"""
if slice_one.shape != slice_two.shape:
raise ValueError("slices' dimensions do not match: "
" {} and {} ".format(slice_one.shape, slice_two.shape))
# simple filtering to remove noise, while supposedly keeping edges
slice_two = medfilt2d(slice_two, kernel_size=cfg.median_filter_size)
# extracting edges
edges = np.hypot(sobel(slice_two, axis=0, mode='constant'),
sobel(slice_two, axis=1, mode='constant'))
# trying to remove weak edges
if not sharper: # level of removal
edges = med_filter(max_filter(min_filter(edges)))
else:
edges = min_filter(min_filter(max_filter(min_filter(edges))))
edges_color_mapped = hot_cmap(edges, alpha=cfg.alpha_edge_overlay_alignment)
composite = gray_cmap(slice_one, alpha=cfg.alpha_background_slice_alignment)
composite[edges_color_mapped>0] = edges_color_mapped[edges_color_mapped>0]
# mask_rgba = np.dstack([edges>0] * 4)
# composite[mask_rgba] = edges_color_mapped[mask_rgba]
return composite
def dwi_overlay_edges(slice_one, slice_two):
"""
Makes a composite image with edges from second image overlaid on first.
It will be in colormapped (RGB format) already.
"""
if slice_one.shape != slice_two.shape:
raise ValueError("slices' dimensions do not match: "
" {} and {} ".format(slice_one.shape, slice_two.shape))
# simple filtering to remove noise, while supposedly keeping edges
slice_two = medfilt2d(slice_two, kernel_size=cfg.median_filter_size)
# extracting edges
edges = med_filter(np.hypot(sobel(slice_two, axis=0, mode='constant'),
sobel(slice_two, axis=1, mode='constant')))
edges_color_mapped = hot_cmap(edges, alpha=cfg.alpha_edge_overlay_alignment)
composite = gray_cmap(slice_one, alpha=cfg.alpha_background_slice_alignment)
composite[edges_color_mapped>0] = edges_color_mapped[edges_color_mapped>0]
# mask_rgba = np.dstack([edges>0] * 4)
# composite[mask_rgba] = edges_color_mapped[mask_rgba]
return composite
def _get_checkers(slice_shape, patch_size):
"""Creates checkerboard of a given tile size, filling a given slice."""
if patch_size is not None:
patch_size = check_patch_size(patch_size)
else:
# 7 patches in each axis, min voxels/patch = 3
patch_size = np.round(np.array(slice_shape) / 7).astype('int16')
patch_size = np.maximum(patch_size, np.array([3, 3]))
black = np.zeros(patch_size)
white = np.ones(patch_size)
tile = np.vstack((np.hstack([black, white]), np.hstack([white, black])))
# using ceil so we can clip the extra portions
num_tiles = np.ceil(np.divide(slice_shape, tile.shape)).astype(int)
checkers = np.tile(tile, num_tiles)
# clipping any extra columns or rows
if any(np.greater(checkers.shape, slice_shape)):
if checkers.shape[0] > slice_shape[0]:
checkers = np.delete(checkers, np.s_[slice_shape[0]:], axis=0)
if checkers.shape[1] > slice_shape[1]:
checkers = np.delete(checkers, np.s_[slice_shape[1]:], axis=1)
return checkers
def mix_color(slice1, slice2,
alpha_channels=cfg.default_color_mix_alphas,
color_space='rgb'):
"""Mixing them as red and green channels"""
if slice1.shape != slice2.shape:
raise ValueError('size mismatch between cropped slices and checkers!!!')
alpha_channels = np.array(alpha_channels)
if len(alpha_channels) != 2:
raise ValueError('Alphas must be two value tuples.')
slice1 = scale_0to1(slice1)
slice2 = scale_0to1(slice2)
# masking background
combined_distr = np.concatenate((slice1.flatten(), slice2.flatten()))
image_eps = np.percentile(combined_distr, 5)
background = np.logical_or(slice1 <= image_eps, slice2 <= image_eps)
if color_space.lower() in ['rgb']:
red = alpha_channels[0] * slice1
grn = alpha_channels[1] * slice2
blu = np.zeros_like(slice1)
# foreground = np.logical_not(background)
# blu[foreground] = 1.0
mixed = np.stack((red, grn, blu), axis=2)
elif color_space.lower() in ['hsv']:
raise NotImplementedError(
'This method (color_space="hsv") is yet to fully conceptualized and implemented.')
# ensuring all values are clipped to [0, 1]
mixed[mixed <= 0.0] = 0.0
mixed[mixed >= 1.0] = 1.0
return mixed
def mix_slices_in_checkers(slice1, slice2,
checker_size=cfg.default_checkerboard_size):
"""Mixes the two slices in alternating areas specified by checkers"""
checkers = _get_checkers(slice1.shape, checker_size)
if slice1.shape != slice2.shape or slice2.shape != checkers.shape:
raise ValueError('size mismatch between cropped slices and checkers!!!')
mixed = slice1.copy()
mixed[checkers > 0] = slice2[checkers > 0]
return mixed
def diff_image(slice1, slice2, abs_value=True):
"""Computes the difference image"""
diff = slice1 - slice2
if abs_value:
diff = np.abs(diff)
return diff
def check_patch_size(patch_size):
"""Validation and typcasting"""
patch_size = np.array(patch_size)
if patch_size.size == 1:
patch_size = np.repeat(patch_size, 2).astype('int16')
return patch_size
def rescale_without_outliers(img, trim_percentile=1):
"""This utility trims the outliers first, and then rescales it [0, 1]"""
return scale_0to1(img,
exclude_outliers_below=trim_percentile,
exclude_outliers_above=trim_percentile)
| [
"matplotlib.interactive",
"numpy.hstack",
"numpy.logical_not",
"numpy.array",
"visualqc.utils.scale_0to1",
"numpy.gradient",
"numpy.divide",
"numpy.histogram",
"numpy.greater",
"numpy.repeat",
"scipy.ndimage.binary_erosion",
"scipy.ndimage.generate_binary_structure",
"numpy.delete",
"numpy... | [((543, 571), 'matplotlib.interactive', 'matplotlib.interactive', (['(True)'], {}), '(True)\n', (565, 571), False, 'import matplotlib\n'), ((621, 637), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""gray"""'], {}), "('gray')\n", (629, 637), False, 'from matplotlib.cm import get_cmap\n'), ((649, 664), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""hot"""'], {}), "('hot')\n", (657, 664), False, 'from matplotlib.cm import get_cmap\n'), ((754, 794), 'functools.partial', 'partial', (['minimum_filter'], {}), '(minimum_filter, **filter_params)\n', (761, 794), False, 'from functools import partial\n'), ((808, 848), 'functools.partial', 'partial', (['maximum_filter'], {}), '(maximum_filter, **filter_params)\n', (815, 848), False, 'from functools import partial\n'), ((862, 901), 'functools.partial', 'partial', (['median_filter'], {}), '(median_filter, **filter_params)\n', (869, 901), False, 'from functools import partial\n'), ((1235, 1274), 'scipy.ndimage.generate_binary_structure', 'ndimage.generate_binary_structure', (['(3)', '(6)'], {}), '(3, 6)\n', (1268, 1274), False, 'from scipy import ndimage\n'), ((1288, 1347), 'scipy.ndimage.binary_closing', 'ndimage.binary_closing', (['background_mask', 'se36'], {'iterations': '(6)'}), '(background_mask, se36, iterations=6)\n', (1310, 1347), False, 'from scipy import ndimage\n'), ((1365, 1415), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['closed', 'se36'], {'iterations': '(5)'}), '(closed, se36, iterations=5)\n', (1387, 1415), False, 'from scipy import ndimage\n'), ((2076, 2117), 'numpy.percentile', 'np.percentile', (['input_img', 'init_percentile'], {}), '(input_img, init_percentile)\n', (2089, 2117), True, 'import numpy as np\n'), ((2652, 2711), 'scipy.ndimage.binary_closing', 'binary_closing', (['mask_img', 'se'], {'iterations': 'iterations_closing'}), '(mask_img, se, iterations=iterations_closing)\n', (2666, 2711), False, 'from scipy.ndimage import sobel, binary_closing\n'), ((2727, 2758), 'scipy.ndimage.morphology.binary_fill_holes', 'binary_fill_holes', (['mask_img', 'se'], {}), '(mask_img, se)\n', (2744, 2758), False, 'from scipy.ndimage.morphology import binary_fill_holes\n'), ((3231, 3283), 'numpy.histogram', 'np.histogram', (['image_flat'], {'bins': 'num_bins', 'normed': '(True)'}), '(image_flat, bins=num_bins, normed=True)\n', (3243, 3283), True, 'import numpy as np\n'), ((3428, 3470), 'numpy.interp', 'np.interp', (['image_flat', 'bin_edges[:-1]', 'cdf'], {}), '(image_flat, bin_edges[:-1], cdf)\n', (3437, 3470), True, 'import numpy as np\n'), ((3999, 4055), 'scipy.signal.medfilt2d', 'medfilt2d', (['slice_two'], {'kernel_size': 'cfg.median_filter_size'}), '(slice_two, kernel_size=cfg.median_filter_size)\n', (4008, 4055), False, 'from scipy.signal import medfilt2d\n'), ((5252, 5308), 'scipy.signal.medfilt2d', 'medfilt2d', (['slice_two'], {'kernel_size': 'cfg.median_filter_size'}), '(slice_two, kernel_size=cfg.median_filter_size)\n', (5261, 5308), False, 'from scipy.signal import medfilt2d\n'), ((6268, 6288), 'numpy.zeros', 'np.zeros', (['patch_size'], {}), '(patch_size)\n', (6276, 6288), True, 'import numpy as np\n'), ((6301, 6320), 'numpy.ones', 'np.ones', (['patch_size'], {}), '(patch_size)\n', (6308, 6320), True, 'import numpy as np\n'), ((6537, 6561), 'numpy.tile', 'np.tile', (['tile', 'num_tiles'], {}), '(tile, num_tiles)\n', (6544, 6561), True, 'import numpy as np\n'), ((7236, 7260), 'numpy.array', 'np.array', (['alpha_channels'], {}), '(alpha_channels)\n', (7244, 7260), True, 'import numpy as np\n'), ((7369, 7387), 'visualqc.utils.scale_0to1', 'scale_0to1', (['slice1'], {}), '(slice1)\n', (7379, 7387), False, 'from visualqc.utils import scale_0to1\n'), ((7401, 7419), 'visualqc.utils.scale_0to1', 'scale_0to1', (['slice2'], {}), '(slice2)\n', (7411, 7419), False, 'from visualqc.utils import scale_0to1\n'), ((7536, 7568), 'numpy.percentile', 'np.percentile', (['combined_distr', '(5)'], {}), '(combined_distr, 5)\n', (7549, 7568), True, 'import numpy as np\n'), ((7586, 7641), 'numpy.logical_or', 'np.logical_or', (['(slice1 <= image_eps)', '(slice2 <= image_eps)'], {}), '(slice1 <= image_eps, slice2 <= image_eps)\n', (7599, 7641), True, 'import numpy as np\n'), ((9000, 9020), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (9008, 9020), True, 'import numpy as np\n'), ((9280, 9379), 'visualqc.utils.scale_0to1', 'scale_0to1', (['img'], {'exclude_outliers_below': 'trim_percentile', 'exclude_outliers_above': 'trim_percentile'}), '(img, exclude_outliers_below=trim_percentile,\n exclude_outliers_above=trim_percentile)\n', (9290, 9379), False, 'from visualqc.utils import scale_0to1\n'), ((1535, 1551), 'numpy.gradient', 'np.gradient', (['mri'], {}), '(mri)\n', (1546, 1551), True, 'import numpy as np\n'), ((2265, 2319), 'numpy.isclose', 'np.isclose', (['cur_clip_level', 'prev_clip_level'], {'rtol': '(0.05)'}), '(cur_clip_level, prev_clip_level, rtol=0.05)\n', (2275, 2319), True, 'import numpy as np\n'), ((2446, 2485), 'scipy.ndimage.generate_binary_structure', 'ndimage.generate_binary_structure', (['(3)', '(6)'], {}), '(3, 6)\n', (2479, 2485), False, 'from scipy import ndimage\n'), ((2802, 2826), 'numpy.logical_not', 'np.logical_not', (['mask_img'], {}), '(mask_img)\n', (2816, 2826), True, 'import numpy as np\n'), ((4100, 4141), 'scipy.ndimage.sobel', 'sobel', (['slice_two'], {'axis': '(0)', 'mode': '"""constant"""'}), "(slice_two, axis=0, mode='constant')\n", (4105, 4141), False, 'from scipy.ndimage import sobel, binary_closing\n'), ((4164, 4205), 'scipy.ndimage.sobel', 'sobel', (['slice_two'], {'axis': '(1)', 'mode': '"""constant"""'}), "(slice_two, axis=1, mode='constant')\n", (4169, 4205), False, 'from scipy.ndimage import sobel, binary_closing\n'), ((6615, 6654), 'numpy.greater', 'np.greater', (['checkers.shape', 'slice_shape'], {}), '(checkers.shape, slice_shape)\n', (6625, 6654), True, 'import numpy as np\n'), ((7779, 7800), 'numpy.zeros_like', 'np.zeros_like', (['slice1'], {}), '(slice1)\n', (7792, 7800), True, 'import numpy as np\n'), ((7901, 7934), 'numpy.stack', 'np.stack', (['(red, grn, blu)'], {'axis': '(2)'}), '((red, grn, blu), axis=2)\n', (7909, 7934), True, 'import numpy as np\n'), ((8880, 8892), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (8886, 8892), True, 'import numpy as np\n'), ((1589, 1608), 'numpy.power', 'np.power', (['grad', '(2.0)'], {}), '(grad, 2.0)\n', (1597, 1608), True, 'import numpy as np\n'), ((2223, 2253), 'numpy.median', 'np.median', (['input_img[mask_img]'], {}), '(input_img[mask_img])\n', (2232, 2253), True, 'import numpy as np\n'), ((2535, 2574), 'scipy.ndimage.generate_binary_structure', 'ndimage.generate_binary_structure', (['(2)', '(4)'], {}), '(2, 4)\n', (2568, 2574), False, 'from scipy import ndimage\n'), ((5364, 5405), 'scipy.ndimage.sobel', 'sobel', (['slice_two'], {'axis': '(0)', 'mode': '"""constant"""'}), "(slice_two, axis=0, mode='constant')\n", (5369, 5405), False, 'from scipy.ndimage import sobel, binary_closing\n'), ((5439, 5480), 'scipy.ndimage.sobel', 'sobel', (['slice_two'], {'axis': '(1)', 'mode': '"""constant"""'}), "(slice_two, axis=1, mode='constant')\n", (5444, 5480), False, 'from scipy.ndimage import sobel, binary_closing\n'), ((6237, 6253), 'numpy.array', 'np.array', (['[3, 3]'], {}), '([3, 3])\n', (6245, 6253), True, 'import numpy as np\n'), ((6343, 6368), 'numpy.hstack', 'np.hstack', (['[black, white]'], {}), '([black, white])\n', (6352, 6368), True, 'import numpy as np\n'), ((6370, 6395), 'numpy.hstack', 'np.hstack', (['[white, black]'], {}), '([white, black])\n', (6379, 6395), True, 'import numpy as np\n'), ((6727, 6778), 'numpy.delete', 'np.delete', (['checkers', 'np.s_[slice_shape[0]:]'], {'axis': '(0)'}), '(checkers, np.s_[slice_shape[0]:], axis=0)\n', (6736, 6778), True, 'import numpy as np\n'), ((6849, 6900), 'numpy.delete', 'np.delete', (['checkers', 'np.s_[slice_shape[1]:]'], {'axis': '(1)'}), '(checkers, np.s_[slice_shape[1]:], axis=1)\n', (6858, 6900), True, 'import numpy as np\n'), ((6474, 6508), 'numpy.divide', 'np.divide', (['slice_shape', 'tile.shape'], {}), '(slice_shape, tile.shape)\n', (6483, 6508), True, 'import numpy as np\n'), ((9071, 9095), 'numpy.repeat', 'np.repeat', (['patch_size', '(2)'], {}), '(patch_size, 2)\n', (9080, 9095), True, 'import numpy as np\n'), ((6150, 6171), 'numpy.array', 'np.array', (['slice_shape'], {}), '(slice_shape)\n', (6158, 6171), True, 'import numpy as np\n')] |
import numpy as np
import statistics
import time
def time_stat(func, size, ntrials):
total = 0
# the time to generate the random array should not be included
for i in range(ntrials):
data = np.random.rand(size)
# modify this function to time func with ntrials times using a new random array each time
start = time.perf_counter()
res = func(data)
total += time.perf_counter() - start
# return the average run time
return total/ntrials
if __name__ == '__main__':
print('{:.6f}s for statistics.mean'.format(time_stat(statistics.mean, 10**5, 10)))
print('{:.6f}s for np.mean'.format(time_stat(np.mean, 10**5, 1000)))
| [
"numpy.random.rand",
"time.perf_counter"
] | [((209, 229), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (223, 229), True, 'import numpy as np\n'), ((336, 355), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (353, 355), False, 'import time\n'), ((392, 411), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (409, 411), False, 'import time\n')] |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output
import os
print(os.getcwd())
df_input_large=pd.read_csv('C:/Users/Asus/ads_covid-19/data/processed/COVID_large_flat_table.csv',sep=';',parse_dates=[0])
df_input_large=df_input_large.sort_values('date',ascending=True)
df_input_SIR=pd.read_csv('C:/Users/Asus/ads_covid-19/data/processed/COVID_large_fitted_table.csv',sep=';')
df_input_SIR=df_input_SIR.sort_values('date',ascending=True)
fig=go.Figure()
app=dash.Dash()
app.layout=html.Div([
dcc.Markdown('''
# Applied Data Science on COVID-19 data
Goal of the project is to teach data science by applying a cross industry standard process,
it covers the full walkthrough of: automated data gathering, data transformations,
filtering and machine learning to approximating the doubling time, and
(static) deployment of responsive dashboard.
'''),
dcc.Markdown('''
## Select a Country for Visualization of a Simulated SIR Curve
'''),
dcc.Dropdown( id='country_drop_down_simulated_list',
options=[{'label':each,'value':each} for each in df_input_large.columns[1:]],
value='Germany',
multi=False),
#Manipulating the values of beta ,gamma, t_initial, t_intro_measures,t_hold,t_relax to achieve the simulated curve
dcc.Markdown('''
## Vary the different values to reshape the SIR curve(Enter a number and press Enter)
'''),
html.Label(["Infection rate in days, when no measure introduced",
dcc.Input(
id='t_initial',
type='number',
value=28,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Infection rate in days, when measure introduced",
dcc.Input(
id='t_intro_measures',
type='number',
value=14,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Infection rate in days, when measure sustained/held",
dcc.Input(
id='t_hold',
type='number',
value=21,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Infection rate in days, when measure relaxed/removed",
dcc.Input(
id='t_relax',
type='number',
value=21,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Beta max",
dcc.Input(
id='beta_max',
type='number',
value=0.4,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Beta min",
dcc.Input(
id='beta_min',
type='number',
value=0.1,debounce=True)]),
html.Br(),
html.Br(),
html.Label(["Gamma",
dcc.Input(
id='gamma',
type='number',
value=0.1,debounce=True)]),
html.Br(),
html.Br(),
dcc.Graph(figure=fig, id='SIR_simulated', animate=False,),
dcc.Markdown('''
## Select a Country for Visualization of a Fitted SIR Curve
'''),
dcc.Dropdown( id='country_drop_down_fitted_list',
options=[{'label':each,'value':each} for each in df_input_SIR.columns[1:]],
value='Germany',
multi=False),
dcc.Graph(id='SIR_fitted', animate=False,)
])
@app.callback(
Output('SIR_simulated', 'figure'),
[Input('country_drop_down_simulated_list', 'value'),
Input('t_initial','value'),
Input('t_intro_measures','value'),
Input('t_hold','value'),
Input('t_relax','value'),
Input('beta_max','value'),
Input('beta_min','value'),
Input('gamma','value')])
def update_figure(country,t_initial, t_intro_measures, t_hold, t_relax, beta_max, beta_min, gamma):
ydata=df_input_large[country][df_input_large[country]>=30]
xdata=np.arange(len(ydata))
N0=10000000
I0=30
S0=N0-I0
R0=0
gamma
SIR=np.array([S0,I0,R0])
t_initial
t_intro_measures
t_hold
t_relax
beta_max
beta_min
propagation_rates=pd.DataFrame(columns={'susceptible':S0,'infected':I0,'recovered':R0})
pd_beta=np.concatenate((np.array(t_initial*[beta_max]),
np.linspace(beta_max,beta_min,t_intro_measures),
np.array(t_hold*[beta_min]),
np.linspace(beta_min,beta_max,t_relax),
))
def SIR_model(SIR,beta,gamma):
'SIR model for simulatin spread'
'S: Susceptible population'
'I: Infected popuation'
'R: Recovered population'
'S+I+R=N (remains constant)'
'dS+dI+dR=0 model has to satisfy this condition at all time'
S,I,R=SIR
dS_dt=-beta*S*I/N0
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return ([dS_dt,dI_dt,dR_dt])
for each_beta in pd_beta:
new_delta_vec=SIR_model(SIR,each_beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],'infected':SIR[1],'recovered':SIR[2]},ignore_index=True)
fig=go.Figure()
fig.add_trace(go.Bar(x=xdata,
y=ydata,
marker_color='red',
name='Confirmed Cases'
))
fig.add_trace(go.Scatter(x=xdata,
y=propagation_rates.infected,
mode='lines',
marker_color='blue',
name='Simulated curve'))
fig.update_layout(shapes=[
dict(type='rect',xref='x',yref='paper',x0=0,y0=0,x1=t_initial,y1=1,fillcolor="midnightblue",opacity=0.3,layer="below"),
dict(type='rect',xref='x',yref='paper',x0=t_initial,y0=0,x1=t_initial+t_intro_measures,y1=1,fillcolor="midnightblue",opacity=0.4,layer="below"),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures,y0=0,x1=t_initial+t_intro_measures+t_hold,y1=1,fillcolor="midnightblue",opacity=0.5,layer='below'),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures+t_hold,y0=0,x1=t_initial+t_intro_measures+t_hold+t_relax,y1=1,fillcolor="midnightblue",opacity=0.6,layer='below')
],
title='SIR Simulation Scenario',
title_x=0.5,
xaxis=dict(title='Timeline',
titlefont_size=16),
yaxis=dict(title='Confirmed infected people (source johns hopkins csse, log-scale)',
type='log',
titlefont_size=16,
),
width=1600,
height=900,
)
return fig
@app.callback(
Output('SIR_fitted', 'figure'),
[Input('country_drop_down_fitted_list', 'value')])
def SIR_figure(country_list):
df_SIR= df_large_flat
for n in df_SIR[1:]:
data = []
trace = go.Scatter(x=df_SIR.date,
y=df_SIR[country_list],
mode='lines+markers',
name = country_list)
data.append(trace)
trace_fit = go.Scatter(x=df_SIR.date,
y=df_SIR[country_list +'_fitted'],
mode='lines+markers',
name=country_list+'_fitted')
data.append(trace_fit)
return {'data': data,
'layout' : dict(
width=1600,
height=900,
title= 'SIR Fitted Curve',
xaxis={'tickangle':-45,
'nticks':20,
'tickfont':dict(size=14,color="#7f7f7f"),
},
yaxis={'type':"log"
}
)
}
if __name__ == '__main__':
app.run_server(debug=True,use_reloader=False) | [
"plotly.graph_objects.Bar",
"pandas.read_csv",
"dash_core_components.Input",
"dash.dependencies.Output",
"dash_html_components.Br",
"os.getcwd",
"dash.dependencies.Input",
"plotly.graph_objects.Figure",
"numpy.array",
"dash_core_components.Dropdown",
"plotly.graph_objects.Scatter",
"dash_core_... | [((254, 372), 'pandas.read_csv', 'pd.read_csv', (['"""C:/Users/Asus/ads_covid-19/data/processed/COVID_large_flat_table.csv"""'], {'sep': '""";"""', 'parse_dates': '[0]'}), "(\n 'C:/Users/Asus/ads_covid-19/data/processed/COVID_large_flat_table.csv',\n sep=';', parse_dates=[0])\n", (265, 372), True, 'import pandas as pd\n'), ((442, 545), 'pandas.read_csv', 'pd.read_csv', (['"""C:/Users/Asus/ads_covid-19/data/processed/COVID_large_fitted_table.csv"""'], {'sep': '""";"""'}), "(\n 'C:/Users/Asus/ads_covid-19/data/processed/COVID_large_fitted_table.csv',\n sep=';')\n", (453, 545), True, 'import pandas as pd\n'), ((605, 616), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (614, 616), True, 'import plotly.graph_objects as go\n'), ((622, 633), 'dash.Dash', 'dash.Dash', ([], {}), '()\n', (631, 633), False, 'import dash\n'), ((225, 236), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (234, 236), False, 'import os\n'), ((4129, 4151), 'numpy.array', 'np.array', (['[S0, I0, R0]'], {}), '([S0, I0, R0])\n', (4137, 4151), True, 'import numpy as np\n'), ((4269, 4343), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "{'susceptible': S0, 'infected': I0, 'recovered': R0}"}), "(columns={'susceptible': S0, 'infected': I0, 'recovered': R0})\n", (4281, 4343), True, 'import pandas as pd\n'), ((5326, 5337), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (5335, 5337), True, 'import plotly.graph_objects as go\n'), ((3528, 3561), 'dash.dependencies.Output', 'Output', (['"""SIR_simulated"""', '"""figure"""'], {}), "('SIR_simulated', 'figure')\n", (3534, 3561), False, 'from dash.dependencies import Input, Output\n'), ((7155, 7185), 'dash.dependencies.Output', 'Output', (['"""SIR_fitted"""', '"""figure"""'], {}), "('SIR_fitted', 'figure')\n", (7161, 7185), False, 'from dash.dependencies import Input, Output\n'), ((668, 1057), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n # Applied Data Science on COVID-19 data\n\n Goal of the project is to teach data science by applying a cross industry standard process,\n it covers the full walkthrough of: automated data gathering, data transformations,\n filtering and machine learning to approximating the doubling time, and\n (static) deployment of responsive dashboard.\n\n """'], {}), '(\n """\n # Applied Data Science on COVID-19 data\n\n Goal of the project is to teach data science by applying a cross industry standard process,\n it covers the full walkthrough of: automated data gathering, data transformations,\n filtering and machine learning to approximating the doubling time, and\n (static) deployment of responsive dashboard.\n\n """\n )\n', (680, 1057), True, 'import dash_core_components as dcc\n'), ((1064, 1166), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n ## Select a Country for Visualization of a Simulated SIR Curve\n """'], {}), '(\n """\n ## Select a Country for Visualization of a Simulated SIR Curve\n """\n )\n', (1076, 1166), True, 'import dash_core_components as dcc\n'), ((1181, 1351), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country_drop_down_simulated_list"""', 'options': "[{'label': each, 'value': each} for each in df_input_large.columns[1:]]", 'value': '"""Germany"""', 'multi': '(False)'}), "(id='country_drop_down_simulated_list', options=[{'label': each,\n 'value': each} for each in df_input_large.columns[1:]], value='Germany',\n multi=False)\n", (1193, 1351), True, 'import dash_core_components as dcc\n'), ((1542, 1675), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n ## Vary the different values to reshape the SIR curve(Enter a number and press Enter)\n """'], {}), '(\n """\n ## Vary the different values to reshape the SIR curve(Enter a number and press Enter)\n """\n )\n', (1554, 1675), True, 'import dash_core_components as dcc\n'), ((1855, 1864), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1862, 1864), True, 'import dash_html_components as html\n'), ((1871, 1880), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1878, 1880), True, 'import dash_html_components as html\n'), ((2068, 2077), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2075, 2077), True, 'import dash_html_components as html\n'), ((2084, 2093), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2091, 2093), True, 'import dash_html_components as html\n'), ((2275, 2284), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2282, 2284), True, 'import dash_html_components as html\n'), ((2291, 2300), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2298, 2300), True, 'import dash_html_components as html\n'), ((2490, 2499), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2497, 2499), True, 'import dash_html_components as html\n'), ((2506, 2515), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2513, 2515), True, 'import dash_html_components as html\n'), ((2661, 2670), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2668, 2670), True, 'import dash_html_components as html\n'), ((2677, 2686), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2684, 2686), True, 'import dash_html_components as html\n'), ((2819, 2828), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2826, 2828), True, 'import dash_html_components as html\n'), ((2835, 2844), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2842, 2844), True, 'import dash_html_components as html\n'), ((2984, 2993), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2991, 2993), True, 'import dash_html_components as html\n'), ((3000, 3009), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3007, 3009), True, 'import dash_html_components as html\n'), ((3022, 3078), 'dash_core_components.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'id': '"""SIR_simulated"""', 'animate': '(False)'}), "(figure=fig, id='SIR_simulated', animate=False)\n", (3031, 3078), True, 'import dash_core_components as dcc\n'), ((3092, 3191), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n ## Select a Country for Visualization of a Fitted SIR Curve\n """'], {}), '(\n """\n ## Select a Country for Visualization of a Fitted SIR Curve\n """\n )\n', (3104, 3191), True, 'import dash_core_components as dcc\n'), ((3196, 3361), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country_drop_down_fitted_list"""', 'options': "[{'label': each, 'value': each} for each in df_input_SIR.columns[1:]]", 'value': '"""Germany"""', 'multi': '(False)'}), "(id='country_drop_down_fitted_list', options=[{'label': each,\n 'value': each} for each in df_input_SIR.columns[1:]], value='Germany',\n multi=False)\n", (3208, 3361), True, 'import dash_core_components as dcc\n'), ((3430, 3471), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""SIR_fitted"""', 'animate': '(False)'}), "(id='SIR_fitted', animate=False)\n", (3439, 3471), True, 'import dash_core_components as dcc\n'), ((5357, 5425), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'xdata', 'y': 'ydata', 'marker_color': '"""red"""', 'name': '"""Confirmed Cases"""'}), "(x=xdata, y=ydata, marker_color='red', name='Confirmed Cases')\n", (5363, 5425), True, 'import plotly.graph_objects as go\n'), ((5571, 5683), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'xdata', 'y': 'propagation_rates.infected', 'mode': '"""lines"""', 'marker_color': '"""blue"""', 'name': '"""Simulated curve"""'}), "(x=xdata, y=propagation_rates.infected, mode='lines',\n marker_color='blue', name='Simulated curve')\n", (5581, 5683), True, 'import plotly.graph_objects as go\n'), ((3569, 3619), 'dash.dependencies.Input', 'Input', (['"""country_drop_down_simulated_list"""', '"""value"""'], {}), "('country_drop_down_simulated_list', 'value')\n", (3574, 3619), False, 'from dash.dependencies import Input, Output\n'), ((3626, 3653), 'dash.dependencies.Input', 'Input', (['"""t_initial"""', '"""value"""'], {}), "('t_initial', 'value')\n", (3631, 3653), False, 'from dash.dependencies import Input, Output\n'), ((3659, 3693), 'dash.dependencies.Input', 'Input', (['"""t_intro_measures"""', '"""value"""'], {}), "('t_intro_measures', 'value')\n", (3664, 3693), False, 'from dash.dependencies import Input, Output\n'), ((3699, 3723), 'dash.dependencies.Input', 'Input', (['"""t_hold"""', '"""value"""'], {}), "('t_hold', 'value')\n", (3704, 3723), False, 'from dash.dependencies import Input, Output\n'), ((3729, 3754), 'dash.dependencies.Input', 'Input', (['"""t_relax"""', '"""value"""'], {}), "('t_relax', 'value')\n", (3734, 3754), False, 'from dash.dependencies import Input, Output\n'), ((3760, 3786), 'dash.dependencies.Input', 'Input', (['"""beta_max"""', '"""value"""'], {}), "('beta_max', 'value')\n", (3765, 3786), False, 'from dash.dependencies import Input, Output\n'), ((3792, 3818), 'dash.dependencies.Input', 'Input', (['"""beta_min"""', '"""value"""'], {}), "('beta_min', 'value')\n", (3797, 3818), False, 'from dash.dependencies import Input, Output\n'), ((3824, 3847), 'dash.dependencies.Input', 'Input', (['"""gamma"""', '"""value"""'], {}), "('gamma', 'value')\n", (3829, 3847), False, 'from dash.dependencies import Input, Output\n'), ((7381, 7475), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_SIR.date', 'y': 'df_SIR[country_list]', 'mode': '"""lines+markers"""', 'name': 'country_list'}), "(x=df_SIR.date, y=df_SIR[country_list], mode='lines+markers',\n name=country_list)\n", (7391, 7475), True, 'import plotly.graph_objects as go\n'), ((7617, 7736), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_SIR.date', 'y': "df_SIR[country_list + '_fitted']", 'mode': '"""lines+markers"""', 'name': "(country_list + '_fitted')"}), "(x=df_SIR.date, y=df_SIR[country_list + '_fitted'], mode=\n 'lines+markers', name=country_list + '_fitted')\n", (7627, 7736), True, 'import plotly.graph_objects as go\n'), ((7193, 7240), 'dash.dependencies.Input', 'Input', (['"""country_drop_down_fitted_list"""', '"""value"""'], {}), "('country_drop_down_fitted_list', 'value')\n", (7198, 7240), False, 'from dash.dependencies import Input, Output\n'), ((4368, 4400), 'numpy.array', 'np.array', (['(t_initial * [beta_max])'], {}), '(t_initial * [beta_max])\n', (4376, 4400), True, 'import numpy as np\n'), ((4424, 4473), 'numpy.linspace', 'np.linspace', (['beta_max', 'beta_min', 't_intro_measures'], {}), '(beta_max, beta_min, t_intro_measures)\n', (4435, 4473), True, 'import numpy as np\n'), ((4497, 4526), 'numpy.array', 'np.array', (['(t_hold * [beta_min])'], {}), '(t_hold * [beta_min])\n', (4505, 4526), True, 'import numpy as np\n'), ((4550, 4590), 'numpy.linspace', 'np.linspace', (['beta_min', 'beta_max', 't_relax'], {}), '(beta_min, beta_max, t_relax)\n', (4561, 4590), True, 'import numpy as np\n'), ((1764, 1829), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""t_initial"""', 'type': '"""number"""', 'value': '(28)', 'debounce': '(True)'}), "(id='t_initial', type='number', value=28, debounce=True)\n", (1773, 1829), True, 'import dash_core_components as dcc\n'), ((1970, 2042), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""t_intro_measures"""', 'type': '"""number"""', 'value': '(14)', 'debounce': '(True)'}), "(id='t_intro_measures', type='number', value=14, debounce=True)\n", (1979, 2042), True, 'import dash_core_components as dcc\n'), ((2187, 2249), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""t_hold"""', 'type': '"""number"""', 'value': '(21)', 'debounce': '(True)'}), "(id='t_hold', type='number', value=21, debounce=True)\n", (2196, 2249), True, 'import dash_core_components as dcc\n'), ((2395, 2458), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""t_relax"""', 'type': '"""number"""', 'value': '(21)', 'debounce': '(True)'}), "(id='t_relax', type='number', value=21, debounce=True)\n", (2404, 2458), True, 'import dash_core_components as dcc\n'), ((2570, 2635), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""beta_max"""', 'type': '"""number"""', 'value': '(0.4)', 'debounce': '(True)'}), "(id='beta_max', type='number', value=0.4, debounce=True)\n", (2579, 2635), True, 'import dash_core_components as dcc\n'), ((2728, 2793), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""beta_min"""', 'type': '"""number"""', 'value': '(0.1)', 'debounce': '(True)'}), "(id='beta_min', type='number', value=0.1, debounce=True)\n", (2737, 2793), True, 'import dash_core_components as dcc\n'), ((2896, 2958), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""gamma"""', 'type': '"""number"""', 'value': '(0.1)', 'debounce': '(True)'}), "(id='gamma', type='number', value=0.1, debounce=True)\n", (2905, 2958), True, 'import dash_core_components as dcc\n')] |
import csv
import numpy as np
from collections import Counter
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import f1_score
from sklearn import preprocessing
from sklearn import svm
from support.helper import tokenize
from support.helper import process_tokens
from support.helper import find_index
from support.helper import Label
class BaselineTrainer:
def __init__(self, data):
self.data = data
self.cash_tags = []
self.bow = Counter()
self.bow_features = []
self.bow_X = []
self.bow_y = []
def _fill_bow(self): # fill the bow dictionary with self.data
for label in self.data:
for message in self.data[label]:
cash_tags, tokens = tokenize(message)
self.cash_tags.extend(x for x in cash_tags if x not in self.cash_tags) # add new cash_tags
for token in process_tokens(tokens):
self.bow[token] = self.bow[token] + 1 if token in self.bow else 1
def _generate_bow_feature_vector(self): # use DictVectorizer to feature vector for log reg
vec = DictVectorizer() # To be decided
vec.fit_transform(self.bow)
self.bow_features = vec.get_feature_names()
self.bow_features.sort() # prepare for binary search
def _generate_bow_dataset(self):
# key = (NEG_LABEL, POS_LABEL)
for key in self.data:
for message in self.data[key]:
tokens = process_tokens(tokenize(message)[1])
line_vector = np.zeros(len(self.bow_features) + 1)
for token in tokens:
idx = find_index(token, 0, len(self.bow_features) - 1, self.bow_features)
line_vector[idx] += 1
line_vector[len(line_vector) - 1] = len(tokens)
self.bow_X.append(line_vector)
self.bow_y = np.concatenate(([Label.NEG_LABEL.value] * len(self.data[Label.NEG_LABEL.value]),
[Label.POS_LABEL.value] * len(self.data[Label.POS_LABEL.value])))
# Format into numpy data structure just in case
self.bow_X = np.array(self.bow_X)
self.bow_y = np.array(self.bow_y)
def _bow_train(self, model):
# Make copies because of reuse
X = self.bow_X.copy()
y = self.bow_y.copy()
X = preprocessing.scale(X)
# Creates model and cross validation sets
kf = KFold(n_splits=5, shuffle=True)
kf.get_n_splits()
model_accuracy = []
model_f1 = []
for train_index, test_index in kf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
model_accuracy.append(model.score(X_test, y_test))
model_f1.append(f1_score(y_test, model.predict(X_test), pos_label='Bullish'))
return model_accuracy, model_f1
def print_bow_score(self):
self._fill_bow()
self._generate_bow_feature_vector()
self._generate_bow_dataset()
# Print stats about BOW and dataset
print("Total messages count: ", len(self.bow_y))
print("Vocab size: ", len(self.bow_features) - 1)
print("Tokens size: ", sum(self.bow.values()))
print("Unique cash tags: ", len(self.cash_tags))
log_reg_model = LogisticRegression(solver='sag', random_state=0, max_iter=100,
n_jobs=-1, verbose=1, class_weight={Label.NEG_LABEL.value: 2, Label.POS_LABEL.value: 1})
log_reg_accuracy, log_reg_f1 = self._bow_train(log_reg_model)
print("Log Reg accuracy: ", sum(log_reg_accuracy) / len(log_reg_accuracy))
print(log_reg_accuracy)
print("Log Reg f1 score: ", sum(log_reg_f1) / len(log_reg_f1))
print(log_reg_f1)
naive_bayes_model = GaussianNB()
naive_bayes_accuracy, naive_bayes_f1 = self._bow_train(naive_bayes_model)
print("Naive Bayes accuracy: ", sum(naive_bayes_accuracy) / len(naive_bayes_accuracy))
print(naive_bayes_accuracy)
print("Naive Bayes f1 score: ", sum(naive_bayes_f1) / len(naive_bayes_f1))
print(naive_bayes_f1)
# svm_model = svm.SVC(kernel='rbf', cache_size=4000, max_iter=5000, verbose=True) # performs similar to LR, disabled for speed
# svm_accuracy, svm_precision = self._bow_train(svm_model)
# print("SVM accuracy: ", sum(svm_accuracy) / len(svm_accuracy))
# print(svm_accuracy)
# print("SVM precision: ", sum(svm_precision) / len(svm_precision))
# print(svm_precision)
def main():
path = "data/stocktwits_labelled_train.csv"
with open(path, 'r', encoding='utf-8') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
data = {
Label.NEG_LABEL.value: [],
Label.POS_LABEL.value: [],
}
for label, msg in reader:
data[label].append(msg)
print("The NEG class' messages count: ", len(data[Label.NEG_LABEL.value]))
print("The POS class' messages count: ", len(data[Label.POS_LABEL.value]))
trainer = BaselineTrainer(data)
trainer.print_bow_score()
if __name__ == "__main__":
# execute only if run as a script
main()
| [
"sklearn.feature_extraction.DictVectorizer",
"support.helper.process_tokens",
"sklearn.linear_model.LogisticRegression",
"collections.Counter",
"numpy.array",
"support.helper.tokenize",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.KFold",
"csv.reader",
"sklearn.preprocessing.scale"
] | [((654, 663), 'collections.Counter', 'Counter', ([], {}), '()\n', (661, 663), False, 'from collections import Counter\n'), ((1314, 1330), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (1328, 1330), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((2370, 2390), 'numpy.array', 'np.array', (['self.bow_X'], {}), '(self.bow_X)\n', (2378, 2390), True, 'import numpy as np\n'), ((2413, 2433), 'numpy.array', 'np.array', (['self.bow_y'], {}), '(self.bow_y)\n', (2421, 2433), True, 'import numpy as np\n'), ((2587, 2609), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (2606, 2609), False, 'from sklearn import preprocessing\n'), ((2677, 2708), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)'}), '(n_splits=5, shuffle=True)\n', (2682, 2708), False, 'from sklearn.model_selection import KFold\n'), ((3655, 3815), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""sag"""', 'random_state': '(0)', 'max_iter': '(100)', 'n_jobs': '(-1)', 'verbose': '(1)', 'class_weight': '{Label.NEG_LABEL.value: 2, Label.POS_LABEL.value: 1}'}), "(solver='sag', random_state=0, max_iter=100, n_jobs=-1,\n verbose=1, class_weight={Label.NEG_LABEL.value: 2, Label.POS_LABEL.\n value: 1})\n", (3673, 3815), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4169, 4181), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (4179, 4181), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5075, 5110), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (5085, 5110), False, 'import csv\n'), ((932, 949), 'support.helper.tokenize', 'tokenize', (['message'], {}), '(message)\n', (940, 949), False, 'from support.helper import tokenize\n'), ((1089, 1111), 'support.helper.process_tokens', 'process_tokens', (['tokens'], {}), '(tokens)\n', (1103, 1111), False, 'from support.helper import process_tokens\n'), ((1697, 1714), 'support.helper.tokenize', 'tokenize', (['message'], {}), '(message)\n', (1705, 1714), False, 'from support.helper import tokenize\n')] |
import numpy as np
import ast
def newtonInterpolation(x, y):
x = ast.literal_eval(x)
y = ast.literal_eval(y)
n = len(y)
table = np.zeros([n, n]) # Create a square matrix to hold table
table[::, 0] = y # first column is y
results = {"table": [], "coefficient": []}
results["table"].append(y)
""" Creates Newton table and extracts coefficients """
for j in range(1, n):
column = []
for i in range(j):
column.append(0)
for i in range(n - j):
# create table by updating other columns
table[i][j] = (table[i + 1][j - 1] - table[i][j - 1]) / (x[i + j] - x[i])
column.append( table[i][j])
results["table"].append(column)
coeff = table[0] # return first row
for c in coeff:
results["coefficient"].append(c)
polynom = ""
for i in range(n):
polynom += str(round(table[0][i],4))
for j in range(i):
polynom+= "*( x -"+ str(round(x[j],4))+ ")"
if (i != n - 1):
polynom += "+"
polynom = polynom.replace(" ", "").replace("--", "+").replace("++", "+").replace("+-", "-").replace("-+", "-")
results["polynom"] = polynom
return results
| [
"ast.literal_eval",
"numpy.zeros"
] | [((74, 93), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (90, 93), False, 'import ast\n'), ((103, 122), 'ast.literal_eval', 'ast.literal_eval', (['y'], {}), '(y)\n', (119, 122), False, 'import ast\n'), ((152, 168), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (160, 168), True, 'import numpy as np\n')] |
import torch as th
import time
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
sns.set()
sns.set_style("darkgrid", {"axes.facecolor": "#f0f0f7"})
linestyle = [':', '--', '-.', '-']
fontsize = 20
#EXP_PATH = os.path.join(os.environ['NFS_HOME'], 'code/pymarl')
attn_feats = []
# def hook(model, input, output):
# out = output[1].view(4, -1, output[1].size(1), output[1].size(2))
# out = out.permute(1, 0, 2, 3).contiguous()
# attn_feats.append(out.clone().detach().cpu().numpy())
def hook(model, input, output):
out = output[1].view(4, -1, output[1].size(1), output[1].size(2))
out = out.permute(1, 0, 2, 3).contiguous()
attn_feats.append(out.clone().detach().cpu().numpy())
def statistic_attn(args, runner, learner):
algo_name = args.name
map_name = args.env_args['map_name']
learner.mac.agent.transformer.transformer_blocks[0].attn.attention.register_forward_hook(hook)
run(runner, test_mode=True)
runner.close_env()
n_heads = attn_feats[0].shape[1]
plt.figure(figsize=(5, 20))
n_steps = 20
for i in range(n_steps):
for j in range(n_heads):
plt.subplot(n_steps, n_heads, i * n_heads + j + 1)
attn_tmp = np.zeros((runner.env.n_agents, runner.env.n_agents + runner.env.n_enemies))
attn_tmp[:runner.env.n_agents, :runner.env.n_agents] = attn_feats[i][0, j][:runner.env.n_agents, :runner.env.n_agents]
attn_tmp[:runner.env.n_agents, runner.env.n_agents:] = attn_feats[i][0, j][:runner.env.n_agents, runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies]
# attn_tmp = np.zeros((runner.env.n_agents + runner.env.n_enemies, runner.env.n_agents + runner.env.n_enemies))
# attn_tmp[:runner.env.n_agents, :runner.env.n_agents] = attn_feats[i][0, j][:runner.env.n_agents, :runner.env.n_agents]
# attn_tmp[runner.env.n_agents:, :runner.env.n_agents] = attn_feats[i][0, j][runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies, :runner.env.n_agents]
# attn_tmp[:runner.env.n_agents, runner.env.n_agents:] = attn_feats[i][0, j][:runner.env.n_agents, runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies]
# attn_tmp[runner.env.n_agents:, runner.env.n_agents:] = attn_feats[i][0, j][runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies, runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies]
sns.heatmap(attn_tmp, cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5, cbar=False)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.tight_layout()
plt.show()
# learner.mac.agent.attn.attention.register_forward_hook(hook)
# while 1:
# run(runner, test_mode=True)
# runner.save_replay()
# runner.close_env()
# n_steps = len(attn_feats)
# n_agents = attn_feats[0].shape[0]
# n_heads = attn_feats[0].shape[1]
# n_tokens = attn_feats[0].shape[2]
# plt.figure(figsize=(5, 100))
# for i in range(n_steps):
# plt.subplot(n_steps, 1, i + 1)
# sns.heatmap(np.vstack([np.hstack([attn_feats[0][k, j] for j in range(n_heads)]) for k in range(n_agents)]), cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5, cbar=False)
# plt.xticks([])
# plt.yticks([])
# plt.axis('off')
# plt.tight_layout()
# plt.show()
# n_steps = len(attn_feats)
# n_agents = attn_feats[0].shape[0]
# n_heads = attn_feats[0].shape[1]
# n_tokens = attn_feats[0].shape[2]
# plt.figure(figsize=(5, 100))
# n_steps = 20
# for k in range(n_steps):
# for i in range(n_agents):
# for j in range(n_heads):
# plt.subplot(n_steps * n_agents, n_heads, k * n_agents * n_heads + i * n_heads + j + 1)
# # sns.heatmap(1 - attn_agent_data[0, 0, 0, i - 1], vmin=0.5, vmax=1, cmap='rocket', linewidths=.5)
# sns.heatmap(attn_feats[k][i, j], cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5, cbar=False)
# plt.xticks([])
# plt.yticks([])
# plt.axis('off')
# plt.tight_layout()
# plt.show()
#
# plt.savefig(os.path.join(EXP_PATH, 'results', 'fig', 'test.pdf'), format='pdf', bbox_inches='tight')
pass
def run(runner, test_mode=False):
runner.reset()
terminated = False
episode_return = 0
runner.mac.init_hidden(batch_size=runner.batch_size)
while not terminated:
if runner.args.evaluate and runner.args.render:
runner.env.render()
time.sleep(0.2)
pre_transition_data = runner._get_pre_transition_data()
runner.batch.update(pre_transition_data, ts=runner.t)
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch of size 1
actions = runner.mac.select_actions(runner.batch, t_ep=runner.t, t_env=runner.t_env, test_mode=test_mode)
reward, terminated, env_info = runner.env.step(actions[0])
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [(terminated != env_info.get("episode_limit", False),)],
}
runner.batch.update(post_transition_data, ts=runner.t)
runner.t += 1
# n_agents = attn_feats[-1].shape[0]
# n_heads = attn_feats[-1].shape[1]
# plt.figure(figsize=(5, 5))
# for i in range(n_agents):
# for j in range(n_heads):
# plt.subplot(n_agents, n_heads, i * n_heads + j + 1)
# sns.heatmap(attn_feats[-1][i, j], cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5, cbar=False)
# plt.xticks([])
# plt.yticks([])
# plt.axis('off')
# plt.tight_layout()
# plt.show()
# n_heads = attn_feats[-1].shape[1]
# plt.figure(figsize=(5, 2))
# for i in range(n_heads):
# plt.subplot(1, n_heads, i + 1)
# attn_tmp = np.zeros((runner.env.n_agents + runner.env.n_enemies, runner.env.n_agents + runner.env.n_enemies))
# attn_tmp[:runner.env.n_agents, :runner.env.n_agents] = attn_feats[-1][0, i][:runner.env.n_agents, :runner.env.n_agents]
# attn_tmp[runner.env.n_agents:, :runner.env.n_agents] = attn_feats[-1][0, i][runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies, :runner.env.n_agents]
# attn_tmp[:runner.env.n_agents, runner.env.n_agents:] = attn_feats[-1][0, i][:runner.env.n_agents, runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies]
# attn_tmp[runner.env.n_agents:, runner.env.n_agents:] = attn_feats[-1][0, i][runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies, runner.env.max_n_agents:runner.env.max_n_agents + runner.env.n_enemies]
# sns.heatmap(attn_tmp, cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5, cbar=False)
# plt.xticks([])
# plt.yticks([])
# plt.axis('off')
# plt.tight_layout()
# plt.show()
last_data = runner._get_pre_transition_data()
runner.batch.update(last_data, ts=runner.t)
# Select actions in the last stored state
actions = runner.mac.select_actions(runner.batch, t_ep=runner.t, t_env=runner.t_env, test_mode=test_mode)
runner.batch.update({"actions": actions}, ts=runner.t)
cur_stats = {}
cur_returns = []
log_prefix = "test/" if test_mode else "run/"
cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})
cur_stats["n_episodes"] = 1 + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = runner.t + cur_stats.get("ep_length", 0)
cur_returns.append(episode_return)
if runner.args.evaluate:
runner.logger.console_logger.info("episode_return: {:.4f}".format(episode_return))
if test_mode and (len(runner.test_returns) == runner.args.test_nepisode):
runner._log(cur_returns, cur_stats, log_prefix)
return runner.batch
def statistic_q(args, runner, learner):
method = args.checkpoint_path.split('/')[9]
original_map = args.checkpoint_path.split('/')[8]
current_map = args.env_args['map_name']
file_name = method + '_' + original_map + '_to_' + current_map + '.pdf'
with th.no_grad():
episode_batch = runner.run(test_mode=True)
q, sum_q, mix_q, returns = test(learner, episode_batch)
plot(file_name, q, sum_q, mix_q, returns)
def test(learner, batch):
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
mac_out = []
learner.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = learner.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1)
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3)
if learner.args.mixer not in ['ext_qmix', 'latent_qmix']:
mix_chosen_action_qvals = learner.mixer(chosen_action_qvals, batch["state"][:, :-1])
else:
mix_chosen_action_qvals = learner.mixer(chosen_action_qvals, batch["state"][:, :-1], learner.args.enemy_num, learner.args.ally_num)
returns = rewards
for t in range(batch.max_seq_length - 1):
returns[:, t, :] = rewards[:, t:, :].sum(1, True)
q = chosen_action_qvals.squeeze().cpu().numpy()
sum_q = chosen_action_qvals.sum(2, True).squeeze().cpu().numpy()
mix_q = mix_chosen_action_qvals.squeeze().cpu().numpy()
returns = returns.squeeze().cpu().numpy()
return q, sum_q, mix_q, returns
def plot(file_name, q, sum_q, mix_q, returns):
label = ['sum', 'mix', 'return', 'agent']
plt.figure(figsize=(7, 2.5))
sns.tsplot(time=[i for i in range(len(sum_q))], data=sum_q, linestyle=linestyle[0], condition=label[0], color=sns.color_palette()[0])
sns.tsplot(time=[i for i in range(len(mix_q))], data=mix_q, linestyle=linestyle[1], condition=label[1], color=sns.color_palette()[1])
sns.tsplot(time=[i for i in range(len(returns))], data=returns, linestyle=linestyle[2], condition=label[2], color=sns.color_palette()[2])
for a_id in range(len(q[0, :])):
sns.tsplot(time=[i for i in range(len(q[:, a_id]))], data=q[:, a_id], linestyle=linestyle[3], condition=label[3] + str(a_id), color=sns.color_palette()[3])
plt.legend(loc='upper right', ncol=2, fontsize=14)
plt.xlim((-2, 71))
plt.ylim((-10, 35))
plt.yticks(fontsize=fontsize)
plt.title(file_name[:-4], fontsize=fontsize)
plt.ylabel('Q value', fontsize=fontsize, labelpad=10)
plt.xlabel(r'Total timesteps', fontsize=fontsize)
plt.savefig(os.path.join(EXP_PATH, 'results', 'fig', file_name), format='pdf', bbox_inches='tight')
plt.show() | [
"seaborn.cubehelix_palette",
"matplotlib.pyplot.ylabel",
"time.sleep",
"seaborn.set_style",
"seaborn.set",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.ylim",
"torch.gather",
"matplotlib.pyplot.xticks",
"matplot... | [((178, 187), 'seaborn.set', 'sns.set', ([], {}), '()\n', (185, 187), True, 'import seaborn as sns\n'), ((188, 244), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""', "{'axes.facecolor': '#f0f0f7'}"], {}), "('darkgrid', {'axes.facecolor': '#f0f0f7'})\n", (201, 244), True, 'import seaborn as sns\n'), ((1112, 1139), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 20)'}), '(figsize=(5, 20))\n', (1122, 1139), True, 'import matplotlib.pyplot as plt\n'), ((2782, 2792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((9306, 9330), 'torch.stack', 'th.stack', (['mac_out'], {'dim': '(1)'}), '(mac_out, dim=1)\n', (9314, 9330), True, 'import torch as th\n'), ((10215, 10243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 2.5)'}), '(figsize=(7, 2.5))\n', (10225, 10243), True, 'import matplotlib.pyplot as plt\n'), ((10870, 10920), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(14)'}), "(loc='upper right', ncol=2, fontsize=14)\n", (10880, 10920), True, 'import matplotlib.pyplot as plt\n'), ((10925, 10943), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2, 71)'], {}), '((-2, 71))\n', (10933, 10943), True, 'import matplotlib.pyplot as plt\n'), ((10948, 10967), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-10, 35)'], {}), '((-10, 35))\n', (10956, 10967), True, 'import matplotlib.pyplot as plt\n'), ((10972, 11001), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fontsize'}), '(fontsize=fontsize)\n', (10982, 11001), True, 'import matplotlib.pyplot as plt\n'), ((11007, 11051), 'matplotlib.pyplot.title', 'plt.title', (['file_name[:-4]'], {'fontsize': 'fontsize'}), '(file_name[:-4], fontsize=fontsize)\n', (11016, 11051), True, 'import matplotlib.pyplot as plt\n'), ((11057, 11110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Q value"""'], {'fontsize': 'fontsize', 'labelpad': '(10)'}), "('Q value', fontsize=fontsize, labelpad=10)\n", (11067, 11110), True, 'import matplotlib.pyplot as plt\n'), ((11115, 11163), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total timesteps"""'], {'fontsize': 'fontsize'}), "('Total timesteps', fontsize=fontsize)\n", (11125, 11163), True, 'import matplotlib.pyplot as plt\n'), ((11275, 11285), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11283, 11285), True, 'import matplotlib.pyplot as plt\n'), ((8661, 8673), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (8671, 8673), True, 'import torch as th\n'), ((11182, 11233), 'os.path.join', 'os.path.join', (['EXP_PATH', '"""results"""', '"""fig"""', 'file_name'], {}), "(EXP_PATH, 'results', 'fig', file_name)\n", (11194, 11233), False, 'import os\n'), ((1232, 1282), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_steps', 'n_heads', '(i * n_heads + j + 1)'], {}), '(n_steps, n_heads, i * n_heads + j + 1)\n', (1243, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1381), 'numpy.zeros', 'np.zeros', (['(runner.env.n_agents, runner.env.n_agents + runner.env.n_enemies)'], {}), '((runner.env.n_agents, runner.env.n_agents + runner.env.n_enemies))\n', (1314, 1381), True, 'import numpy as np\n'), ((2677, 2691), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2687, 2691), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2718), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2714, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2746), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2739, 2746), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2777), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2775, 2777), True, 'import matplotlib.pyplot as plt\n'), ((4776, 4791), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4786, 4791), False, 'import time\n'), ((9358, 9406), 'torch.gather', 'th.gather', (['mac_out[:, :-1]'], {'dim': '(3)', 'index': 'actions'}), '(mac_out[:, :-1], dim=3, index=actions)\n', (9367, 9406), True, 'import torch as th\n'), ((10359, 10378), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (10376, 10378), True, 'import seaborn as sns\n'), ((10497, 10516), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (10514, 10516), True, 'import seaborn as sns\n'), ((10639, 10658), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (10656, 10658), True, 'import seaborn as sns\n'), ((2590, 2636), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'as_cmap': '(True)', 'gamma': '(0.8)'}), '(as_cmap=True, gamma=0.8)\n', (2611, 2636), True, 'import seaborn as sns\n'), ((10841, 10860), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (10858, 10860), True, 'import seaborn as sns\n')] |
# electric.csv를 읽어서 w,b를 구하고
# 실측데이터 scatter, 예측데이터는 라인차트를 그리시요.
# 전기생산량이 5인경우 전기사용량을 예측하시오
# 전기생산량, 전기사용량
# Keras 버전으로
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
matplotlib.rcParams['font.family']='Malgun Gothic'
matplotlib.rcParams['axes.unicode_minus'] = False
e = np.loadtxt("../../../data/electric.csv", delimiter=",", skiprows=1, dtype=np.float32, encoding='UTF8')
x = e[:, 1]
y = e[:, 2]
print(x)
print(y)
# Document
# https://keras.io/models/model/
model = Sequential(Dense(units=1, input_shape=[1]))
model.compile(loss="min-squared-error", optimizer=Adam(learning_rate=0.01))
history = model.fit(x, y, epochs=500)
print(history.history["loss"])
plt.plot(history.history["loss"])
plt.show()
print(model.predict([5])) | [
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((449, 556), 'numpy.loadtxt', 'np.loadtxt', (['"""../../../data/electric.csv"""'], {'delimiter': '""","""', 'skiprows': '(1)', 'dtype': 'np.float32', 'encoding': '"""UTF8"""'}), "('../../../data/electric.csv', delimiter=',', skiprows=1, dtype=\n np.float32, encoding='UTF8')\n", (459, 556), True, 'import numpy as np\n'), ((838, 871), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (846, 871), True, 'import matplotlib.pyplot as plt\n'), ((872, 882), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (880, 882), True, 'import matplotlib.pyplot as plt\n'), ((659, 690), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), '(units=1, input_shape=[1])\n', (664, 690), False, 'from tensorflow.keras.layers import Dense\n'), ((742, 766), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (746, 766), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
#data
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
df = pd.read_csv(csv_path)
df.head()
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i)
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(dataset[indices], (history_size, 1)))
labels.append(dataset[i+target_size])
return np.array(data), np.array(labels)
TRAIN_SPLIT = 300000
tf.random.set_seed(13)
uni_data = df['T (degC)']
uni_data.index = df['Date Time']
uni_data.head()
uni_data.plot(subplots=True)
uni_data = uni_data.values
uni_train_mean = uni_data[:TRAIN_SPLIT].mean()
uni_train_std = uni_data[:TRAIN_SPLIT].std()
uni_data = (uni_data-uni_train_mean)/uni_train_std
univariate_past_history = 20
univariate_future_target = 0
x_train_uni, y_train_uni = univariate_data(uni_data, 0, TRAIN_SPLIT,
univariate_past_history,
univariate_future_target)
x_val_uni, y_val_uni = univariate_data(uni_data, TRAIN_SPLIT, None,
univariate_past_history,
univariate_future_target)
print ('Single window of past history')
print (x_train_uni[0])
print ('\n Target temperature to predict')
print (y_train_uni[0])
def create_time_steps(length):
return list(range(-length, 0))
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Time-Step')
return plt
show_plot([x_train_uni[0], y_train_uni[0]], 0, 'Sample Example')
def baseline(history):
return np.mean(history)
show_plot([x_train_uni[0], y_train_uni[0], baseline(x_train_uni[0])], 0,
'Baseline Prediction Example')
BATCH_SIZE = 256
BUFFER_SIZE = 10000
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
simple_lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
tf.keras.layers.Dense(1)
])
simple_lstm_model.compile(optimizer='adam', loss='mae')
for x, y in val_univariate.take(1):
print(simple_lstm_model.predict(x).shape)
EVALUATION_INTERVAL = 200
EPOCHS = 5
simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_univariate, validation_steps=50)
for x, y in val_univariate.take(3):
plot = show_plot([x[0].numpy(), y[0].numpy(),
simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
plot.show() | [
"numpy.mean",
"numpy.reshape",
"tensorflow.random.set_seed",
"pandas.read_csv",
"tensorflow.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.splitext",
"numpy.array",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"tensorflow.ke... | [((225, 413), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', ([], {'origin': '"""https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"""', 'fname': '"""jena_climate_2009_2016.csv.zip"""', 'extract': '(True)'}), "(origin=\n 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip'\n , fname='jena_climate_2009_2016.csv.zip', extract=True)\n", (248, 413), True, 'import tensorflow as tf\n'), ((431, 457), 'os.path.splitext', 'os.path.splitext', (['zip_path'], {}), '(zip_path)\n', (447, 457), False, 'import os\n'), ((463, 484), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (474, 484), True, 'import pandas as pd\n'), ((1032, 1054), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(13)'], {}), '(13)\n', (1050, 1054), True, 'import tensorflow as tf\n'), ((2899, 2961), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train_uni, y_train_uni)'], {}), '((x_train_uni, y_train_uni))\n', (2933, 2961), True, 'import tensorflow as tf\n'), ((3072, 3130), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_val_uni, y_val_uni)'], {}), '((x_val_uni, y_val_uni))\n', (3106, 3130), True, 'import tensorflow as tf\n'), ((2267, 2283), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2276, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2516, 2528), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2526, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2531, 2574), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[time_steps[0], (future + 5) * 2]'], {}), '([time_steps[0], (future + 5) * 2])\n', (2539, 2574), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time-Step"""'], {}), "('Time-Step')\n", (2583, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2723), 'numpy.mean', 'np.mean', (['history'], {}), '(history)\n', (2714, 2723), True, 'import numpy as np\n'), ((973, 987), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (981, 987), True, 'import numpy as np\n'), ((989, 1005), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (997, 1005), True, 'import numpy as np\n'), ((3243, 3302), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(8)'], {'input_shape': 'x_train_uni.shape[-2:]'}), '(8, input_shape=x_train_uni.shape[-2:])\n', (3263, 3302), True, 'import tensorflow as tf\n'), ((3308, 3332), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (3329, 3332), True, 'import tensorflow as tf\n'), ((873, 920), 'numpy.reshape', 'np.reshape', (['dataset[indices]', '(history_size, 1)'], {}), '(dataset[indices], (history_size, 1))\n', (883, 920), True, 'import numpy as np\n'), ((2336, 2409), 'matplotlib.pyplot.plot', 'plt.plot', (['future', 'plot_data[i]', 'marker[i]'], {'markersize': '(10)', 'label': 'labels[i]'}), '(future, plot_data[i], marker[i], markersize=10, label=labels[i])\n', (2344, 2409), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import cv2
def rescale(im, target_size, max_size):
"""
only resize input image to target size and return scale
Parameters:
----------
im : numpy.array
BGR image input by opencv
target_size: int
one dimensional size (the short side)
max_size: int
one dimensional max size (the long side)
Returns:
----------
numpy.array, rescaled image
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.min(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale
def resize(im, target_size, interp_method=cv2.INTER_LINEAR):
"""
resize image to target size regardless of aspect ratio
Parameters:
----------
im : numpy.array
BGR image input by opencv
target_size : tuple (int, int)
(h, w) two dimensional size
Returns:
----------
numpy.array, resized image
"""
return cv2.resize(im, target_size, interpolation=interp_method)
def transform(im, pixel_means):
"""
transform into mxnet tensor
subtract pixel size and transform to correct format
Parameters:
----------
im : numpy.array
[height, width, channel] in BGR
pixel_means : list
[[[R, G, B pixel means]]]
Returns:
----------
numpy.array as in shape [channel, height, width]
"""
im = im.copy()
im[:, :, (0, 1, 2)] = im[:, :, (2, 1, 0)]
im = im.astype(float)
im -= pixel_means
# put channel first
channel_swap = (2, 0, 1)
im_tensor = im.transpose(channel_swap)
return im_tensor
def transform_inverse(im_tensor, pixel_means):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
Parameters:
----------
im_tensor : numpy.array
in shape [batch, channel, height, width]
pixel_means: list
[[[R, G, B pixel means]]]
Returns:
----------
im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 1
im_tensor = im_tensor.copy()
# put channel back
channel_swap = (0, 2, 3, 1)
im_tensor = im_tensor.transpose(channel_swap)
im = im_tensor[0]
assert im.shape[2] == 3
im += pixel_means
im = im.astype(np.uint8)
return im
| [
"cv2.resize",
"numpy.round",
"numpy.min"
] | [((471, 492), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (477, 492), True, 'import numpy as np\n'), ((511, 532), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (517, 532), True, 'import numpy as np\n'), ((762, 851), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (772, 851), False, 'import cv2\n'), ((1236, 1292), 'cv2.resize', 'cv2.resize', (['im', 'target_size'], {'interpolation': 'interp_method'}), '(im, target_size, interpolation=interp_method)\n', (1246, 1292), False, 'import cv2\n'), ((652, 684), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (660, 684), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
class perceptron:
def __init__(self):
#self.w = np.random.rand(2)
self.w = np.array([0.5, 0.5])
self.w = normalize_v(self.w)
#self.b = np.random.rand()
self.b = 1
def get_loss(self, y, y_hat, X):
"""
loss function for two classes
:param y: value
:param y_hat: predicted value
:param X: parameters of training data
:return: :type float: loss
"""
loss = 0
for i in range(len(y)):
loss += (y_hat[i] - y[i]) * (self.w @ X[i] + self.b) / 2
return loss
def predict(self, x):
"""
prediction for two classes
:param x: observation
:return: +1 if the observation is above the boundry -1 otherwise
"""
res = self.w @ x + self.b
if res > 0: return 1
return -1
def minimize_loss(self, eta_w, eta_b, X, y, it = 100):
"""
minimize the loss function using gradient / partial derivates with respect to w and b
:param eta_w: learning rate weights
:param eta_b: learning rate biase ?
:param X: training data
:param y: training labels
:param it: # iterations to optimize
:returns: final weights and bias
"""
for z in range(it):
delta_w = np.array([0.0, 0.0])
delta_b = 0.0
y_hat = []
for i in range(len(y)):
prediction = (self.predict(X[i]) - y[i])
y_hat.append(prediction)
delta_w += prediction * X[i] / 2
delta_b += prediction / 2
self.w = normalize_v(self.w - eta_w*normalize_v(delta_w))
self.b = self.b - eta_b*delta_b
#print(z, self.w, self.b, delta_w, delta_b, self.get_loss(y, y_hat, X))
return (self.w, self.b)
def normalize_v(v):
"""
normalize a vector
:param v: vector
:return: normalized v
"""
norm = sqrt(sum([x*x for x in v]))
if norm == 0:
raise ValueError("cannot normalize zero length vector")
return np.array([x/norm for x in v])
if __name__=="__main__":
d = np.loadtxt(open("./data/linear_datat_2class.csv", "r", encoding='utf-8-sig'), delimiter=",", skiprows=0)
X, y = d[:,:-1], d[:,-1:]
colors = ["red" if x==1 else "blue" for x in y]
plt.scatter(X[:,0], X[:,1], color=colors)
perc = perceptron()
w, b = perc.minimize_loss(0.3, 0.3, X, y, 100)
point1 = [0, -b/w[0]]
point2 = [-b/w[1], 0]
plt.axis('equal')
#plt.axis([30, 100, 30, 100])
plt.plot(point1, point2, '-r', color="green")
correct = 0
predictions = []
for i in range(len(y)):
predictions.append(perc.predict(X[i]))
if perc.predict(X[i]) == y[i]:
correct += 1
print("{}/100".format(correct))
plt.show() | [
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((2187, 2220), 'numpy.array', 'np.array', (['[(x / norm) for x in v]'], {}), '([(x / norm) for x in v])\n', (2195, 2220), True, 'import numpy as np\n'), ((2444, 2487), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'color': 'colors'}), '(X[:, 0], X[:, 1], color=colors)\n', (2455, 2487), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2638), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2629, 2638), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2722), 'matplotlib.pyplot.plot', 'plt.plot', (['point1', 'point2', '"""-r"""'], {'color': '"""green"""'}), "(point1, point2, '-r', color='green')\n", (2685, 2722), True, 'import matplotlib.pyplot as plt\n'), ((2941, 2951), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2949, 2951), True, 'import matplotlib.pyplot as plt\n'), ((190, 210), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (198, 210), True, 'import numpy as np\n'), ((1421, 1441), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1429, 1441), True, 'import numpy as np\n')] |
import abc, logging, os, anytree
from dbac_lib.dbac_primitives import IPrimitiveCollection, HardMiningSVM
from dbac_lib.dbac_feature_ext import IFeatureExtractor
from dbac_lib.dbac_util import CycleIterator, batch_iterator, TicToc
from dbac_lib import dbac_expression
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as tf_slim
from joblib import Parallel, delayed
from sklearn import svm as sklearn_svm
from tensorflow.contrib import slim
from tensorflow.contrib.slim.nets import vgg
from dbac_lib import vgg_preprocessing
_logger = logging.getLogger(__name__)
_MODEL_NAMES = ['cha', 'sup', 'ind', 'nba_mlp']
def exp_members(exp_tup):
op, v_a, v_b = exp_tup[0], int(exp_tup[1]), int(exp_tup[2]) if exp_tup[2] != 'None' else None
return op, v_a, v_b
class IModel(metaclass=abc.ABCMeta):
def __init__(self, name, feat_ext, prim_rpr):
assert name in _MODEL_NAMES
assert isinstance(feat_ext, IFeatureExtractor)
assert isinstance(prim_rpr, IPrimitiveCollection)
self.name = name
self.feat_ext = feat_ext
self.prim_rpr = prim_rpr
@staticmethod
def factory(name, feat_ext, prim_rpr, **kwargs):
if name == _MODEL_NAMES[0]:
return Chance(feat_ext, prim_rpr, **kwargs)
elif name == _MODEL_NAMES[1]:
return Supervised(feat_ext, prim_rpr, **kwargs)
elif name == _MODEL_NAMES[2]:
return Independent(feat_ext, prim_rpr, **kwargs)
elif name == _MODEL_NAMES[3]:
return NBA_MLP(feat_ext, prim_rpr, **kwargs)
else:
raise ValueError("Model {} is not defined.".format(name))
@abc.abstractmethod
def learning(self, images_path, labels, expressions, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def inference(self, expressions, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def score(self, images_path, expressions, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def save(self, file_path):
raise NotImplementedError()
@abc.abstractmethod
def load(self, file_path):
raise NotImplementedError()
class Chance(IModel):
def __init__(self, feat_ext, prim_rpr, **kwargs):
super().__init__(_MODEL_NAMES[0], feat_ext, prim_rpr)
def inference(self, expressions, **kwargs):
_logger.info("Chance model cannot infer classifiers.")
def learning(self, images_path, labels, expressions, **kwargs):
_logger.info("Chance model does not require learning.")
def score(self, images_path, expressions, **kwargs):
return np.random.rand(len(expressions), images_path.shape[0])
def save(self, file_path):
_logger.info("Chance model does not require save.")
def load(self, file_path):
_logger.info("Chance model does not require load.")
class Supervised(IModel):
def __init__(self, feat_ext, prim_rpr, **kwargs):
super().__init__(_MODEL_NAMES[1], feat_ext, prim_rpr)
self.cls_dic = dict()
def inference(self, expressions, **kwargs):
dims = self.cls_dic.get(list(self.cls_dic.keys())[0]).shape
ret = [self.cls_dic.get(self._exp2key(exp), np.random.rand(*dims)) for exp in expressions]
return ret
def learning(self, images_path, labels, expressions, **kwargs):
num_threads = int(kwargs.get('num_threads', 10))
image_feats = self.feat_ext.compute(images_path)
svm_parameters = Parallel(n_jobs=num_threads)(
delayed(_compute_svm_params)(image_feats, labels, expressions, exp_idx) for exp_idx in
range(len(expressions)))
for idx, (exp_lst, param) in enumerate(zip(expressions, svm_parameters)):
self.cls_dic[self._exp2key(exp_lst)] = param
def score(self, images_path, expressions, **kwargs):
images_feat = self.feat_ext.compute(images_path)
expressions_w = np.vstack(self.inference(expressions))
scs = np.reshape(expressions_w[:, 0], (-1, 1)) + np.dot(expressions_w[:, 1:], images_feat.T)
return scs
def save(self, file_path):
np.save(file_path, self.cls_dic)
def load(self, file_path):
self.cls_dic = np.load(file_path).item()
def _exp2key(self, exp):
if isinstance(exp, anytree.node.Node):
return dbac_expression.exp2list_parse(exp)
elif isinstance(exp, (list, tuple, np.ndarray)):
return dbac_expression.exp2list_parse(dbac_expression.list2exp_parse(exp))
else:
raise ValueError("Not supported expression format")
def _compute_svm_params(img_feats, prim_labels, expressions, exp_idx):
# setup svm
exp_lst = expressions[exp_idx]
_logger.info("{}/{} - Training svm ...".format(exp_idx, len(expressions)))
exp_tree = dbac_expression.list2exp_parse(exp_lst)
var_dic = {p: prim_labels[:, int(p)] for p in dbac_expression.get_vars(exp_tree)}
exp_labels = dbac_expression.eval_exp(exp_tree, var_dic)
svm_object = sklearn_svm.LinearSVC(C=1e-5, class_weight={1: 2.0, 0: 1.0}, verbose=0, penalty='l2',
loss='hinge', dual=True)
svm_object.fit(img_feats, exp_labels)
train_acc = svm_object.score(img_feats, exp_labels)
_logger.info("{}/{} - Finalized svm. Positives {}, Negatives {}, Accuracy {}."
.format(exp_idx, len(expressions), np.sum(exp_labels), np.sum(np.logical_not(exp_labels)), train_acc))
svm_params = np.hstack((svm_object.intercept_.ravel(), svm_object.coef_.ravel()))
return svm_params
class Independent(IModel):
def __init__(self, feat_ext, prim_rpr, **kwargs):
super().__init__(_MODEL_NAMES[2], feat_ext, prim_rpr)
def learning(self, images_path, labels, expressions, **kwargs):
_logger.info("Independent model does not require learning.")
def inference(self, expressions, **kwargs):
_logger.info("Independent model cannot infer classifiers.")
def score(self, images_path, expressions, **kwargs):
scores = np.zeros((len(expressions), images_path.shape[0]), dtype=np.float)
images_feat = self.feat_ext.compute(images_path)
ops_dic = {dbac_expression.OPS[0]: lambda v: 1.0 - v, dbac_expression.OPS[1]: np.multiply,
dbac_expression.OPS[2]: lambda v1, v2: (v1 + v2) - np.multiply(v1, v2)}
var_dic = {str(p): self.prim_rpr.get_cls(int(p))[0].predict_proba(images_feat)[:, 1] for p in
self.prim_rpr.get_ids()}
for idx, exp_lst in enumerate(expressions):
exp_tree = dbac_expression.list2exp_parse(exp_lst)
scores[idx] = dbac_expression.eval_exp(exp_tree, var_dic, ops_dic)
if idx % 100 == 0:
_logger.info("Tested for {}/{} expressions.".format(idx, len(expressions)))
return scores
def save(self, file_path):
_logger.info("Indepedent model does not require save.")
def load(self, file_path):
_logger.info("Indepedent model does not require load.")
class NBA_MLP(IModel):
def __init__(self, feat_ext, prim_rpr, **kwargs):
super().__init__(_MODEL_NAMES[3], feat_ext, prim_rpr)
# tensorflow graph and session
self.graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.log_device_placement = True
config.allow_soft_placement = True
self.tf_session = tf.Session(graph=self.graph, config=config)
self.dim = self.prim_rpr.get_rpr(self.prim_rpr.get_ids()[0]).shape[-1]
self.learn_feats = bool(int(kwargs.get('learn_feats', False)))
self.is_train = bool(int(kwargs.get('is_train', True)))
self.norm_in = bool(int(kwargs.get('norm_in', False)))
self.norm_out = bool(int(kwargs.get('norm_out', False)))
self.demorgan_reg = bool(int(kwargs.get('demorgan_reg', False)))
self.net_type = int(kwargs.get('net_type', 0)) # 0: NOT AND OR, 1: NOT AND, 2: NOT OR
_logger.info("Model parameters: learn_feats={}, is_train={}, norm_in={}, norm_out={}, demorgan_reg={}, "
"net_type={}".format(self.learn_feats, self.is_train, self.norm_in, self.norm_out,
self.demorgan_reg, self.net_type))
# Model definition
with self.graph.as_default() as graph:
# input tensors
self._prims_rpr_ph = tf.placeholder(tf.float32, (None, 2 * self.dim))
self._ground_truth_ph = tf.placeholder(tf.float32, (None, None))
self._switch_ph = tf.placeholder(tf.int32, (None,))
self.is_training_ph = tf.placeholder(tf.bool)
# normalize inputs
if self.norm_in:
prims_rpr_a, prims_rpr_b = tf.split(self._prims_rpr_ph, num_or_size_splits=2, axis=1)
prims_rpr_a, prims_rpr_b = tf.nn.l2_normalize(prims_rpr_a, dim=1), tf.nn.l2_normalize(prims_rpr_b, dim=1)
prims_rpr_tn = tf.concat([prims_rpr_a, prims_rpr_b], axis=1)
else:
prims_rpr_tn = 1.0 * self._prims_rpr_ph
# mlps and multiplexing
not_ex = lambda input_tn: -1.0 * tf.slice(input_tn, [0, 0],
[tf.shape(input_tn)[0], self.dim])
and_mlp = lambda input_tn, reuse=False: _mlp(input_tn, [int(np.ceil(1.5 * self.dim)), self.dim],
scope='nba_mlp/AND', reuse=reuse)
or_mlp = lambda input_tn, reuse=False: _mlp(input_tn, [int(np.ceil(1.5 * self.dim)), self.dim],
scope='nba_mlp/OR', reuse=reuse)
zero_tn = tf.zeros((tf.shape(prims_rpr_tn)[0], self.dim), tf.float32)
if self.net_type == 0:
# NOT AND OR
self.output_tn = tf.where(tf.equal(self._switch_ph, 0), not_ex(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 1), and_mlp(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 2), or_mlp(prims_rpr_tn), zero_tn)
elif self.net_type == 1:
# NOT AND
self.output_tn = tf.where(tf.equal(self._switch_ph, 0), not_ex(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 1), and_mlp(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 2), -1.0 * and_mlp(-1.0 * prims_rpr_tn, reuse=True), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 50), or_mlp(prims_rpr_tn), zero_tn)
elif self.net_type == 2:
# NOT OR
self.output_tn = tf.where(tf.equal(self._switch_ph, 0), not_ex(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 1), -1.0 * or_mlp(-1.0 * prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 2), or_mlp(prims_rpr_tn, reuse=True), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 50), and_mlp(prims_rpr_tn), zero_tn)
else:
raise ValueError("Network Type is not supported! net_type={}".format(self.net_type))
# Regularization based on De Morgan laws
if self.demorgan_reg:
self.dm_output_tn = -1.0 * prims_rpr_tn
self.dm_output_tn = tf.where(tf.equal(self._switch_ph, 0), not_ex(prims_rpr_tn), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 1), or_mlp(prims_rpr_tn, True), zero_tn) \
+ tf.where(tf.equal(self._switch_ph, 2), and_mlp(prims_rpr_tn, True), zero_tn)
self.dm_output_tn = -1.0 * self.dm_output_tn
# normalize output
if self.norm_out:
self.output_tn = tf.nn.l2_normalize(self.output_tn, dim=1)
if self.demorgan_reg:
self.dm_output_tn = tf.nn.l2_normalize(self.dm_output_tn, dim=1)
# visual branch
if self.learn_feats:
with tf.device('/gpu:0'):
self._images_ph = tf.placeholder(tf.string, (None,))
img_prep_func = lambda img_path: vgg_preprocessing.preprocess_image(
tf.image.decode_jpeg(tf.read_file(img_path), channels=3), 224, 224, is_training=self.is_train)
processed_images = tf.map_fn(img_prep_func, self._images_ph, dtype=tf.float32)
with slim.arg_scope(vgg.vgg_arg_scope()):
logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=self.is_training_ph)
self._images_feats = tf.squeeze(graph.get_tensor_by_name('vgg_16/fc6/BiasAdd:0'))
self.init_vgg = slim.assign_from_checkpoint_fn('/data/home/rfsc/dbac/models/vgg_16.ckpt',
slim.get_model_variables('vgg_16'))
self.tf_saver_feats = tf.train.Saver(tf_slim.get_model_variables(scope='vgg_16'))
else:
self._images_ph = tf.placeholder(tf.float32, (None, self.dim - 1))
self._images_feats = 1.0 * self._images_ph
self.init_vgg = None
# score images
self.scores_tn = tf.add(
tf.reshape(self.output_tn[:, 0], (-1, 1)),
tf.matmul(self.output_tn[:, 1:], self._images_feats, transpose_b=True))
# Checkpoint save and restore
self.tf_saver_and = tf.train.Saver(tf_slim.get_model_variables(scope='nba_mlp/AND'))
self.tf_saver_or = tf.train.Saver(tf_slim.get_model_variables(scope='nba_mlp/OR'))
def learning(self, images_path, labels, expressions, **kwargs):
# training parameters
batch_size, num_epochs = int(kwargs.get('batch_size', 32)), int(kwargs.get('num_epochs', 1e3))
snap_int, snap_dir, log_dir = int(kwargs.get('snap_int', 250)), kwargs.get('snap_dir', None), kwargs.get(
'log_dir', None)
init_weights = kwargs.get('init_weights', None)
snapshot = kwargs.get('snapshot', None)
learning_rate = float(kwargs.get('learning_rate', 1e-5))
alphas = [float(p) for p in kwargs.get('alphas', '10.0 1.0 0.1 1.0').split()]
_logger.info("Training parameters: batch_size={}, num_epochs={}, snap_int={}, snap_dir={}, log_dir={}, "
"learning_rate={}, alphas={}, learn_feats={}, init_weights={}, norm_in={}, norm_out={},"
" snapshot={}, demorgan_reg={}".format(batch_size, num_epochs, snap_int, snap_dir, log_dir, learning_rate, alphas,
self.learn_feats, init_weights, self.norm_in, self.norm_out,
snapshot, self.demorgan_reg))
# setup training network
with self.graph.as_default() as graph:
# Loss
reg_loss = tf.reduce_mean(tf.losses.get_regularization_loss())
norm_loss = tf.reduce_mean(0.5 * tf.pow(tf.norm(self.output_tn, axis=-1), 2.0))
cls_loss = tf.losses.hinge_loss(self._ground_truth_ph, self.scores_tn, reduction=tf.losses.Reduction.MEAN)
loss_tn = alphas[0] * norm_loss + alphas[1] * cls_loss + alphas[2] * reg_loss
if self.demorgan_reg:
dem_loss = tf.reduce_mean(0.5 * tf.pow(tf.norm(self.output_tn - self.dm_output_tn, axis=-1), 2.0))
loss_tn = loss_tn + (alphas[3] * dem_loss)
dem_loss_val, dem_loss_up, dem_loss_reset = _create_reset_metric(
tf.metrics.mean, 'epoch_dem_loss', values=dem_loss)
tf.summary.scalar('dem_loss', dem_loss_val)
pred = tf.greater(self.scores_tn, 0.0)
# Metrics
reg_loss_val, reg_loss_up, reg_loss_reset = _create_reset_metric(
tf.metrics.mean, 'epoch_reg_loss', values=reg_loss)
tf.summary.scalar('reg_loss', reg_loss_val)
cls_loss_val, cls_loss_up, cls_loss_reset = _create_reset_metric(
tf.metrics.mean, 'epoch_cls_loss', values=cls_loss)
tf.summary.scalar('cls_loss', cls_loss_val)
norm_loss_val, norm_loss_up, norm_loss_reset = _create_reset_metric(
tf.metrics.mean, 'epoch_norm_loss', values=norm_loss)
tf.summary.scalar('norm_loss', norm_loss_val)
loss_val, loss_up, loss_reset = _create_reset_metric(
tf.metrics.mean, 'epoch_loss', values=loss_tn)
tf.summary.scalar('total_loss', loss_val)
prec_val, prec_up, prec_reset = _create_reset_metric(
tf.metrics.precision, 'epoch_prec', predictions=pred, labels=self._ground_truth_ph)
tf.summary.scalar('Precision', prec_val)
rec_val, rec_up, rec_reset = _create_reset_metric(
tf.metrics.recall, 'epoch_rec', predictions=pred, labels=self._ground_truth_ph)
tf.summary.scalar('Recall', rec_val)
tf.summary.scalar('Fscore', (2 * prec_val * rec_val) / (prec_val + rec_val + 1e-6))
summ_ops = tf.summary.merge_all()
summ_writer = tf.summary.FileWriter(log_dir) if log_dir else None
metrics_ops_reset = [reg_loss_reset, cls_loss_reset, norm_loss_reset, loss_reset, prec_reset, rec_reset]
metrics_ops_update = [reg_loss_up, cls_loss_up, norm_loss_up, loss_up, prec_up, rec_up]
if self.demorgan_reg:
metrics_ops_reset += [dem_loss_reset]
metrics_ops_update += [dem_loss_up]
# Optimizer
global_step_tn = tf.train.get_or_create_global_step(graph)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss_tn, global_step=global_step_tn, colocate_gradients_with_ops=True)
init = tf.global_variables_initializer()
tf_snap_tr = tf.train.Saver(max_to_keep=1)
# Decompose expressions and compute labels
_logger.info("Decomposing expressions...")
valid_exps, pos_img_ites, neg_img_ites, exp_labels = [], [], [], []
for exp_lst in expressions:
for exp_term in dbac_expression.get_terms(dbac_expression.list2exp_parse(exp_lst)):
term_lst = dbac_expression.exp2list_parse(exp_term)
var_dic = {p: labels[:, int(p)] for p in dbac_expression.get_vars(exp_term)}
term_labels = dbac_expression.eval_exp(exp_term, var_dic)
if (term_lst not in valid_exps) and (exp_term.name != dbac_expression.OPS[0]) \
and (term_labels.sum() > 0) and (np.logical_not(term_labels).sum() > 0):
valid_exps.append(term_lst)
exp_labels.append(term_labels)
pos_img_ites.append(CycleIterator(list(np.where(term_labels)[0])))
neg_img_ites.append(CycleIterator(list(np.where(np.logical_not(term_labels))[0])))
expressions = valid_exps
exp_ite = CycleIterator(np.arange(len(expressions)).tolist())
exp_labels = np.vstack(exp_labels).astype(np.float32)
_logger.info("Total of expressions decomposed: {}".format(len(expressions)))
# Initialization
_logger.info("Initializing model...")
self.tf_session.run(init)
if self.init_vgg is not None:
_logger.info("Loading features pre-trained weights")
self.init_vgg(self.tf_session)
if init_weights:
_logger.info("Loading model pre-trained weights")
self.load(init_weights)
init_epoch = 0
if snapshot:
_logger.info("Loading from training snapshot")
tf_snap_tr.restore(self.tf_session, snapshot)
init_epoch = int((self.tf_session.run(global_step_tn) * batch_size) / len(expressions))
# training loop
_logger.info("Training...")
for epoch in range(init_epoch, num_epochs):
self.tf_session.run(metrics_ops_reset)
for b in range(int(np.ceil(len(expressions) / batch_size))):
# batch sampling
b_exp_ids = [next(exp_ite) for _ in range(batch_size)]
b_img_ids = [next(pos_img_ites[exp_id]) for _ in range(5) for exp_id in b_exp_ids]
b_img_ids += [next(neg_img_ites[exp_id]) for _ in range(5) for exp_id in b_exp_ids]
# compute image features
if self.learn_feats:
b_img_feats = images_path[b_img_ids]
else:
b_img_feats = self.feat_ext.compute(images_path[b_img_ids])
# compute operations
b_prims_rpr, b_op_switch = [], []
for exp_id in b_exp_ids:
exp_tree = dbac_expression.list2exp_parse(expressions[exp_id])
b_op_switch.append({'NOT': 0, 'AND': 1, 'OR': 2}[exp_tree.name])
operand_a, operand_b = exp_tree.children if np.random.rand() > 0.5 else exp_tree.children[::-1]
b_prims_rpr.append(dbac_expression.exp2list_parse(operand_a))
b_prims_rpr.append(dbac_expression.exp2list_parse(operand_b))
b_op_switch = np.array(b_op_switch)
b_prims_rpr = self.inference(b_prims_rpr).reshape((len(b_exp_ids), 2 * self.dim))
# compute labels
b_exp_labels = exp_labels[b_exp_ids, :]
b_exp_labels = b_exp_labels[:, b_img_ids]
# run model
self.tf_session.run(
[train_op, loss_tn] + metrics_ops_update,
feed_dict={self.is_training_ph: True,
self._images_ph: b_img_feats,
self._prims_rpr_ph: b_prims_rpr,
self._switch_ph: b_op_switch,
self._ground_truth_ph: b_exp_labels})
if (epoch + 1) % 2 == 0:
loss, prec, rec, summary = self.tf_session.run([loss_val, prec_val, rec_val, summ_ops])
_logger.info("Epoch {}: Loss={:.4f}, Prec={:.2f}, Rec={:.2f}, Fsc={:.2f}"
.format((epoch + 1), loss, prec, rec, (2 * prec * rec) / (prec + rec + 1e-6)))
if summ_writer:
summ_writer.add_summary(summary, global_step=epoch + 1)
if snap_dir and (epoch + 1) % snap_int == 0:
snap_file = os.path.join(snap_dir, 'nba_mlp_snap_E{}.npz'.format((epoch + 1)))
self.save(snap_file)
tf_snap_tr.save(self.tf_session, os.path.join(snap_dir, 'train.chk'), latest_filename='checkpoint.TRAIN')
_logger.info("Model epoch {} snapshoted to {}".format(epoch + 1, snap_file))
def score(self, images_path, expressions, **kwargs):
images_feat = []
_logger.info("Computing image representation")
for b_img_paths in batch_iterator(10, images_path):
b_img_paths = list(filter(lambda f: f is not None, b_img_paths))
if b_img_paths:
if self.learn_feats:
feats = self.tf_session.run(self._images_feats, feed_dict={self.is_training_ph: False,
self._images_ph: b_img_paths})
else:
feats = self.feat_ext.compute(b_img_paths)
images_feat.append(feats)
images_feat = np.vstack(images_feat)
_logger.info("Computing expression classifiers")
exp_rpr = self.inference(expressions)
b, A = np.reshape(exp_rpr[:, 0], (-1, 1)), exp_rpr[:, 1:]
_logger.info("Computing scores")
scores = np.dot(A, images_feat.T) + b
return scores
def inference(self, expressions, **kwargs):
ops_dic = dict()
ops_dic[dbac_expression.OPS[0]] = lambda v1: -1 * v1
ops_dic[dbac_expression.OPS[1]] = lambda v1, v2: np.ravel(self.tf_session.run(
self.output_tn, feed_dict={self._prims_rpr_ph: np.expand_dims(np.hstack([v1, v2]), 0),
self._switch_ph: 1 * np.ones(1, dtype=np.int)}))
ops_dic[dbac_expression.OPS[2]] = lambda v1, v2: np.ravel(self.tf_session.run(
self.output_tn, feed_dict={self._prims_rpr_ph: np.expand_dims(np.hstack([v1, v2]), 0),
self._switch_ph: 2 * np.ones(1, dtype=np.int)}))
var_dic = {str(p): self.prim_rpr.get_rpr(int(p))[0] for p in self.prim_rpr.get_ids()}
exp_rpr = np.zeros((len(expressions), self.dim), dtype=np.float)
for idx, exp_lst in enumerate(expressions):
exp_tree = dbac_expression.list2exp_parse(exp_lst)
exp_rpr[idx] = dbac_expression.eval_exp(exp_tree, var_dic, ops_dic)
if (idx+1) % 250 == 0:
_logger.info("Inference expression classifiers {}/{}.".format(idx, len(expressions)))
return exp_rpr
def load(self, file_path):
self.tf_saver_and.restore(self.tf_session, "{}.AND.chk".format(file_path))
self.tf_saver_or.restore(self.tf_session, "{}.OR.chk".format(file_path))
feat_var_file = "{}.FEAT.chk".format(file_path)
if self.learn_feats and tf.gfile.Glob("{}*".format(feat_var_file)):
self.tf_saver_feats.restore(self.tf_session, feat_var_file)
def save(self, file_path):
self.tf_saver_and.save(self.tf_session, "{}.AND.chk".format(file_path), latest_filename='checkpoint.AND')
self.tf_saver_or.save(self.tf_session, "{}.OR.chk".format(file_path), latest_filename='checkpoint.OR')
if self.learn_feats:
self.tf_saver_feats.save(self.tf_session, "{}.FEAT.chk".format(file_path), latest_filename='checkpoint.FEAT')
def _leaky_relu(x, alpha=0.1):
return tf.maximum(x, alpha * x)
def _create_reset_metric(metric, scope='reset_metrics', **metric_args):
with tf.variable_scope(scope) as scope:
metric_op, update_op = metric(**metric_args)
vars = tf.contrib.framework.get_variables(
scope, collection=tf.GraphKeys.LOCAL_VARIABLES)
reset_op = tf.variables_initializer(vars)
return metric_op, update_op, reset_op
def _mlp(input_tn, dims, l2reg=0.001, scope='mlp', reuse=False):
x = tf_slim.fully_connected(input_tn, dims[0], scope='{}/fc1'.format(scope), reuse=reuse, activation_fn=_leaky_relu,
weights_regularizer=tf_slim.l2_regularizer(l2reg))
x = tf_slim.fully_connected(x, dims[1], scope='{}/fc2'.format(scope), reuse=reuse, activation_fn=None,
weights_regularizer=tf_slim.l2_regularizer(l2reg))
return x
| [
"logging.getLogger",
"tensorflow.equal",
"tensorflow.shape",
"dbac_lib.dbac_expression.list2exp_parse",
"numpy.random.rand",
"numpy.hstack",
"tensorflow.split",
"numpy.logical_not",
"numpy.array",
"dbac_lib.dbac_util.batch_iterator",
"tensorflow.variables_initializer",
"dbac_lib.dbac_expressio... | [((560, 587), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (577, 587), False, 'import abc, logging, os, anytree\n'), ((4839, 4878), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['exp_lst'], {}), '(exp_lst)\n', (4869, 4878), False, 'from dbac_lib import dbac_expression\n'), ((4982, 5025), 'dbac_lib.dbac_expression.eval_exp', 'dbac_expression.eval_exp', (['exp_tree', 'var_dic'], {}), '(exp_tree, var_dic)\n', (5006, 5025), False, 'from dbac_lib import dbac_expression\n'), ((5043, 5162), 'sklearn.svm.LinearSVC', 'sklearn_svm.LinearSVC', ([], {'C': '(1e-05)', 'class_weight': '{(1): 2.0, (0): 1.0}', 'verbose': '(0)', 'penalty': '"""l2"""', 'loss': '"""hinge"""', 'dual': '(True)'}), "(C=1e-05, class_weight={(1): 2.0, (0): 1.0}, verbose=0,\n penalty='l2', loss='hinge', dual=True)\n", (5064, 5162), True, 'from sklearn import svm as sklearn_svm\n'), ((26125, 26149), 'tensorflow.maximum', 'tf.maximum', (['x', '(alpha * x)'], {}), '(x, alpha * x)\n', (26135, 26149), True, 'import tensorflow as tf\n'), ((4152, 4184), 'numpy.save', 'np.save', (['file_path', 'self.cls_dic'], {}), '(file_path, self.cls_dic)\n', (4159, 4184), True, 'import numpy as np\n'), ((7269, 7279), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7277, 7279), True, 'import tensorflow as tf\n'), ((7297, 7313), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7311, 7313), True, 'import tensorflow as tf\n'), ((7474, 7517), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph', 'config': 'config'}), '(graph=self.graph, config=config)\n', (7484, 7517), True, 'import tensorflow as tf\n'), ((23226, 23257), 'dbac_lib.dbac_util.batch_iterator', 'batch_iterator', (['(10)', 'images_path'], {}), '(10, images_path)\n', (23240, 23257), False, 'from dbac_lib.dbac_util import CycleIterator, batch_iterator, TicToc\n'), ((23767, 23789), 'numpy.vstack', 'np.vstack', (['images_feat'], {}), '(images_feat)\n', (23776, 23789), True, 'import numpy as np\n'), ((26233, 26257), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (26250, 26257), True, 'import tensorflow as tf\n'), ((26336, 26423), 'tensorflow.contrib.framework.get_variables', 'tf.contrib.framework.get_variables', (['scope'], {'collection': 'tf.GraphKeys.LOCAL_VARIABLES'}), '(scope, collection=tf.GraphKeys.\n LOCAL_VARIABLES)\n', (26370, 26423), True, 'import tensorflow as tf\n'), ((26451, 26481), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['vars'], {}), '(vars)\n', (26475, 26481), True, 'import tensorflow as tf\n'), ((3509, 3537), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_threads'}), '(n_jobs=num_threads)\n', (3517, 3537), False, 'from joblib import Parallel, delayed\n'), ((4006, 4046), 'numpy.reshape', 'np.reshape', (['expressions_w[:, 0]', '(-1, 1)'], {}), '(expressions_w[:, 0], (-1, 1))\n', (4016, 4046), True, 'import numpy as np\n'), ((4049, 4092), 'numpy.dot', 'np.dot', (['expressions_w[:, 1:]', 'images_feat.T'], {}), '(expressions_w[:, 1:], images_feat.T)\n', (4055, 4092), True, 'import numpy as np\n'), ((4362, 4397), 'dbac_lib.dbac_expression.exp2list_parse', 'dbac_expression.exp2list_parse', (['exp'], {}), '(exp)\n', (4392, 4397), False, 'from dbac_lib import dbac_expression\n'), ((4929, 4963), 'dbac_lib.dbac_expression.get_vars', 'dbac_expression.get_vars', (['exp_tree'], {}), '(exp_tree)\n', (4953, 4963), False, 'from dbac_lib import dbac_expression\n'), ((5426, 5444), 'numpy.sum', 'np.sum', (['exp_labels'], {}), '(exp_labels)\n', (5432, 5444), True, 'import numpy as np\n'), ((6612, 6651), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['exp_lst'], {}), '(exp_lst)\n', (6642, 6651), False, 'from dbac_lib import dbac_expression\n'), ((6678, 6730), 'dbac_lib.dbac_expression.eval_exp', 'dbac_expression.eval_exp', (['exp_tree', 'var_dic', 'ops_dic'], {}), '(exp_tree, var_dic, ops_dic)\n', (6702, 6730), False, 'from dbac_lib import dbac_expression\n'), ((8458, 8506), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 2 * self.dim)'], {}), '(tf.float32, (None, 2 * self.dim))\n', (8472, 8506), True, 'import tensorflow as tf\n'), ((8543, 8583), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, None)'], {}), '(tf.float32, (None, None))\n', (8557, 8583), True, 'import tensorflow as tf\n'), ((8614, 8647), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None,)'], {}), '(tf.int32, (None,))\n', (8628, 8647), True, 'import tensorflow as tf\n'), ((8682, 8705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (8696, 8705), True, 'import tensorflow as tf\n'), ((15340, 15440), 'tensorflow.losses.hinge_loss', 'tf.losses.hinge_loss', (['self._ground_truth_ph', 'self.scores_tn'], {'reduction': 'tf.losses.Reduction.MEAN'}), '(self._ground_truth_ph, self.scores_tn, reduction=tf.\n losses.Reduction.MEAN)\n', (15360, 15440), True, 'import tensorflow as tf\n'), ((15968, 15999), 'tensorflow.greater', 'tf.greater', (['self.scores_tn', '(0.0)'], {}), '(self.scores_tn, 0.0)\n', (15978, 15999), True, 'import tensorflow as tf\n'), ((16180, 16223), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""reg_loss"""', 'reg_loss_val'], {}), "('reg_loss', reg_loss_val)\n", (16197, 16223), True, 'import tensorflow as tf\n'), ((16382, 16425), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cls_loss"""', 'cls_loss_val'], {}), "('cls_loss', cls_loss_val)\n", (16399, 16425), True, 'import tensorflow as tf\n'), ((16589, 16634), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""norm_loss"""', 'norm_loss_val'], {}), "('norm_loss', norm_loss_val)\n", (16606, 16634), True, 'import tensorflow as tf\n'), ((16776, 16817), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'loss_val'], {}), "('total_loss', loss_val)\n", (16793, 16817), True, 'import tensorflow as tf\n'), ((16996, 17036), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Precision"""', 'prec_val'], {}), "('Precision', prec_val)\n", (17013, 17036), True, 'import tensorflow as tf\n'), ((17208, 17244), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Recall"""', 'rec_val'], {}), "('Recall', rec_val)\n", (17225, 17244), True, 'import tensorflow as tf\n'), ((17257, 17344), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Fscore"""', '(2 * prec_val * rec_val / (prec_val + rec_val + 1e-06))'], {}), "('Fscore', 2 * prec_val * rec_val / (prec_val + rec_val + \n 1e-06))\n", (17274, 17344), True, 'import tensorflow as tf\n'), ((17364, 17386), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (17384, 17386), True, 'import tensorflow as tf\n'), ((17875, 17916), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', (['graph'], {}), '(graph)\n', (17909, 17916), True, 'import tensorflow as tf\n'), ((17941, 17978), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (17963, 17978), True, 'import tensorflow as tf\n'), ((18111, 18144), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (18142, 18144), True, 'import tensorflow as tf\n'), ((18170, 18199), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (18184, 18199), True, 'import tensorflow as tf\n'), ((23908, 23942), 'numpy.reshape', 'np.reshape', (['exp_rpr[:, 0]', '(-1, 1)'], {}), '(exp_rpr[:, 0], (-1, 1))\n', (23918, 23942), True, 'import numpy as np\n'), ((24017, 24041), 'numpy.dot', 'np.dot', (['A', 'images_feat.T'], {}), '(A, images_feat.T)\n', (24023, 24041), True, 'import numpy as np\n'), ((24993, 25032), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['exp_lst'], {}), '(exp_lst)\n', (25023, 25032), False, 'from dbac_lib import dbac_expression\n'), ((25060, 25112), 'dbac_lib.dbac_expression.eval_exp', 'dbac_expression.eval_exp', (['exp_tree', 'var_dic', 'ops_dic'], {}), '(exp_tree, var_dic, ops_dic)\n', (25084, 25112), False, 'from dbac_lib import dbac_expression\n'), ((26764, 26793), 'tensorflow.contrib.slim.l2_regularizer', 'tf_slim.l2_regularizer', (['l2reg'], {}), '(l2reg)\n', (26786, 26793), True, 'import tensorflow.contrib.slim as tf_slim\n'), ((26954, 26983), 'tensorflow.contrib.slim.l2_regularizer', 'tf_slim.l2_regularizer', (['l2reg'], {}), '(l2reg)\n', (26976, 26983), True, 'import tensorflow.contrib.slim as tf_slim\n'), ((3235, 3256), 'numpy.random.rand', 'np.random.rand', (['*dims'], {}), '(*dims)\n', (3249, 3256), True, 'import numpy as np\n'), ((4240, 4258), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (4247, 4258), True, 'import numpy as np\n'), ((5453, 5479), 'numpy.logical_not', 'np.logical_not', (['exp_labels'], {}), '(exp_labels)\n', (5467, 5479), True, 'import numpy as np\n'), ((8810, 8868), 'tensorflow.split', 'tf.split', (['self._prims_rpr_ph'], {'num_or_size_splits': '(2)', 'axis': '(1)'}), '(self._prims_rpr_ph, num_or_size_splits=2, axis=1)\n', (8818, 8868), True, 'import tensorflow as tf\n'), ((9022, 9067), 'tensorflow.concat', 'tf.concat', (['[prims_rpr_a, prims_rpr_b]'], {'axis': '(1)'}), '([prims_rpr_a, prims_rpr_b], axis=1)\n', (9031, 9067), True, 'import tensorflow as tf\n'), ((11984, 12025), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.output_tn'], {'dim': '(1)'}), '(self.output_tn, dim=1)\n', (12002, 12025), True, 'import tensorflow as tf\n'), ((13288, 13336), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.dim - 1)'], {}), '(tf.float32, (None, self.dim - 1))\n', (13302, 13336), True, 'import tensorflow as tf\n'), ((13514, 13555), 'tensorflow.reshape', 'tf.reshape', (['self.output_tn[:, 0]', '(-1, 1)'], {}), '(self.output_tn[:, 0], (-1, 1))\n', (13524, 13555), True, 'import tensorflow as tf\n'), ((13573, 13643), 'tensorflow.matmul', 'tf.matmul', (['self.output_tn[:, 1:]', 'self._images_feats'], {'transpose_b': '(True)'}), '(self.output_tn[:, 1:], self._images_feats, transpose_b=True)\n', (13582, 13643), True, 'import tensorflow as tf\n'), ((13734, 13782), 'tensorflow.contrib.slim.get_model_variables', 'tf_slim.get_model_variables', ([], {'scope': '"""nba_mlp/AND"""'}), "(scope='nba_mlp/AND')\n", (13761, 13782), True, 'import tensorflow.contrib.slim as tf_slim\n'), ((13830, 13877), 'tensorflow.contrib.slim.get_model_variables', 'tf_slim.get_model_variables', ([], {'scope': '"""nba_mlp/OR"""'}), "(scope='nba_mlp/OR')\n", (13857, 13877), True, 'import tensorflow.contrib.slim as tf_slim\n'), ((15188, 15223), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (15221, 15223), True, 'import tensorflow as tf\n'), ((15904, 15947), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""dem_loss"""', 'dem_loss_val'], {}), "('dem_loss', dem_loss_val)\n", (15921, 15947), True, 'import tensorflow as tf\n'), ((17413, 17443), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (17434, 17443), True, 'import tensorflow as tf\n'), ((18469, 18508), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['exp_lst'], {}), '(exp_lst)\n', (18499, 18508), False, 'from dbac_lib import dbac_expression\n'), ((18538, 18578), 'dbac_lib.dbac_expression.exp2list_parse', 'dbac_expression.exp2list_parse', (['exp_term'], {}), '(exp_term)\n', (18568, 18578), False, 'from dbac_lib import dbac_expression\n'), ((18702, 18745), 'dbac_lib.dbac_expression.eval_exp', 'dbac_expression.eval_exp', (['exp_term', 'var_dic'], {}), '(exp_term, var_dic)\n', (18726, 18745), False, 'from dbac_lib import dbac_expression\n'), ((19352, 19373), 'numpy.vstack', 'np.vstack', (['exp_labels'], {}), '(exp_labels)\n', (19361, 19373), True, 'import numpy as np\n'), ((21499, 21520), 'numpy.array', 'np.array', (['b_op_switch'], {}), '(b_op_switch)\n', (21507, 21520), True, 'import numpy as np\n'), ((3551, 3579), 'joblib.delayed', 'delayed', (['_compute_svm_params'], {}), '(_compute_svm_params)\n', (3558, 3579), False, 'from joblib import Parallel, delayed\n'), ((4505, 4540), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['exp'], {}), '(exp)\n', (4535, 4540), False, 'from dbac_lib import dbac_expression\n'), ((6370, 6389), 'numpy.multiply', 'np.multiply', (['v1', 'v2'], {}), '(v1, v2)\n', (6381, 6389), True, 'import numpy as np\n'), ((8912, 8950), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['prims_rpr_a'], {'dim': '(1)'}), '(prims_rpr_a, dim=1)\n', (8930, 8950), True, 'import tensorflow as tf\n'), ((8952, 8990), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['prims_rpr_b'], {'dim': '(1)'}), '(prims_rpr_b, dim=1)\n', (8970, 8990), True, 'import tensorflow as tf\n'), ((12104, 12148), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.dm_output_tn'], {'dim': '(1)'}), '(self.dm_output_tn, dim=1)\n', (12122, 12148), True, 'import tensorflow as tf\n'), ((12232, 12251), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (12241, 12251), True, 'import tensorflow as tf\n'), ((12291, 12325), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string', '(None,)'], {}), '(tf.string, (None,))\n', (12305, 12325), True, 'import tensorflow as tf\n'), ((12573, 12632), 'tensorflow.map_fn', 'tf.map_fn', (['img_prep_func', 'self._images_ph'], {'dtype': 'tf.float32'}), '(img_prep_func, self._images_ph, dtype=tf.float32)\n', (12582, 12632), True, 'import tensorflow as tf\n'), ((13191, 13234), 'tensorflow.contrib.slim.get_model_variables', 'tf_slim.get_model_variables', ([], {'scope': '"""vgg_16"""'}), "(scope='vgg_16')\n", (13218, 13234), True, 'import tensorflow.contrib.slim as tf_slim\n'), ((21052, 21103), 'dbac_lib.dbac_expression.list2exp_parse', 'dbac_expression.list2exp_parse', (['expressions[exp_id]'], {}), '(expressions[exp_id])\n', (21082, 21103), False, 'from dbac_lib import dbac_expression\n'), ((22895, 22930), 'os.path.join', 'os.path.join', (['snap_dir', '"""train.chk"""'], {}), "(snap_dir, 'train.chk')\n", (22907, 22930), False, 'import abc, logging, os, anytree\n'), ((9769, 9791), 'tensorflow.shape', 'tf.shape', (['prims_rpr_tn'], {}), '(prims_rpr_tn)\n', (9777, 9791), True, 'import tensorflow as tf\n'), ((10141, 10169), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(2)'], {}), '(self._switch_ph, 2)\n', (10149, 10169), True, 'import tensorflow as tf\n'), ((11760, 11788), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(2)'], {}), '(self._switch_ph, 2)\n', (11768, 11788), True, 'import tensorflow as tf\n'), ((12731, 12810), 'tensorflow.contrib.slim.nets.vgg.vgg_16', 'vgg.vgg_16', (['processed_images'], {'num_classes': '(1000)', 'is_training': 'self.is_training_ph'}), '(processed_images, num_classes=1000, is_training=self.is_training_ph)\n', (12741, 12810), False, 'from tensorflow.contrib.slim.nets import vgg\n'), ((15277, 15309), 'tensorflow.norm', 'tf.norm', (['self.output_tn'], {'axis': '(-1)'}), '(self.output_tn, axis=-1)\n', (15284, 15309), True, 'import tensorflow as tf\n'), ((18636, 18670), 'dbac_lib.dbac_expression.get_vars', 'dbac_expression.get_vars', (['exp_term'], {}), '(exp_term)\n', (18660, 18670), False, 'from dbac_lib import dbac_expression\n'), ((21344, 21385), 'dbac_lib.dbac_expression.exp2list_parse', 'dbac_expression.exp2list_parse', (['operand_a'], {}), '(operand_a)\n', (21374, 21385), False, 'from dbac_lib import dbac_expression\n'), ((21426, 21467), 'dbac_lib.dbac_expression.exp2list_parse', 'dbac_expression.exp2list_parse', (['operand_b'], {}), '(operand_b)\n', (21456, 21467), False, 'from dbac_lib import dbac_expression\n'), ((9412, 9435), 'numpy.ceil', 'np.ceil', (['(1.5 * self.dim)'], {}), '(1.5 * self.dim)\n', (9419, 9435), True, 'import numpy as np\n'), ((9611, 9634), 'numpy.ceil', 'np.ceil', (['(1.5 * self.dim)'], {}), '(1.5 * self.dim)\n', (9618, 9634), True, 'import numpy as np\n'), ((9926, 9954), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(0)'], {}), '(self._switch_ph, 0)\n', (9934, 9954), True, 'import tensorflow as tf\n'), ((10033, 10061), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(1)'], {}), '(self._switch_ph, 1)\n', (10041, 10061), True, 'import tensorflow as tf\n'), ((10656, 10685), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(50)'], {}), '(self._switch_ph, 50)\n', (10664, 10685), True, 'import tensorflow as tf\n'), ((11540, 11568), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(0)'], {}), '(self._switch_ph, 0)\n', (11548, 11568), True, 'import tensorflow as tf\n'), ((11647, 11675), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(1)'], {}), '(self._switch_ph, 1)\n', (11655, 11675), True, 'import tensorflow as tf\n'), ((12673, 12692), 'tensorflow.contrib.slim.nets.vgg.vgg_arg_scope', 'vgg.vgg_arg_scope', ([], {}), '()\n', (12690, 12692), False, 'from tensorflow.contrib.slim.nets import vgg\n'), ((13102, 13136), 'tensorflow.contrib.slim.get_model_variables', 'slim.get_model_variables', (['"""vgg_16"""'], {}), "('vgg_16')\n", (13126, 13136), False, 'from tensorflow.contrib import slim\n'), ((15615, 15667), 'tensorflow.norm', 'tf.norm', (['(self.output_tn - self.dm_output_tn)'], {'axis': '(-1)'}), '(self.output_tn - self.dm_output_tn, axis=-1)\n', (15622, 15667), True, 'import tensorflow as tf\n'), ((21253, 21269), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (21267, 21269), True, 'import numpy as np\n'), ((9306, 9324), 'tensorflow.shape', 'tf.shape', (['input_tn'], {}), '(input_tn)\n', (9314, 9324), True, 'import tensorflow as tf\n'), ((10522, 10550), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(2)'], {}), '(self._switch_ph, 2)\n', (10530, 10550), True, 'import tensorflow as tf\n'), ((11169, 11198), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(50)'], {}), '(self._switch_ph, 50)\n', (11177, 11198), True, 'import tensorflow as tf\n'), ((12460, 12482), 'tensorflow.read_file', 'tf.read_file', (['img_path'], {}), '(img_path)\n', (12472, 12482), True, 'import tensorflow as tf\n'), ((18899, 18926), 'numpy.logical_not', 'np.logical_not', (['term_labels'], {}), '(term_labels)\n', (18913, 18926), True, 'import numpy as np\n'), ((24364, 24383), 'numpy.hstack', 'np.hstack', (['[v1, v2]'], {}), '([v1, v2])\n', (24373, 24383), True, 'import numpy as np\n'), ((24449, 24473), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'np.int'}), '(1, dtype=np.int)\n', (24456, 24473), True, 'import numpy as np\n'), ((24638, 24657), 'numpy.hstack', 'np.hstack', (['[v1, v2]'], {}), '([v1, v2])\n', (24647, 24657), True, 'import numpy as np\n'), ((24723, 24747), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'np.int'}), '(1, dtype=np.int)\n', (24730, 24747), True, 'import numpy as np\n'), ((10307, 10335), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(0)'], {}), '(self._switch_ph, 0)\n', (10315, 10335), True, 'import tensorflow as tf\n'), ((10414, 10442), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(1)'], {}), '(self._switch_ph, 1)\n', (10422, 10442), True, 'import tensorflow as tf\n'), ((11050, 11078), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(2)'], {}), '(self._switch_ph, 2)\n', (11058, 11078), True, 'import tensorflow as tf\n'), ((19097, 19118), 'numpy.where', 'np.where', (['term_labels'], {}), '(term_labels)\n', (19105, 19118), True, 'import numpy as np\n'), ((10822, 10850), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(0)'], {}), '(self._switch_ph, 0)\n', (10830, 10850), True, 'import tensorflow as tf\n'), ((10929, 10957), 'tensorflow.equal', 'tf.equal', (['self._switch_ph', '(1)'], {}), '(self._switch_ph, 1)\n', (10937, 10957), True, 'import tensorflow as tf\n'), ((19193, 19220), 'numpy.logical_not', 'np.logical_not', (['term_labels'], {}), '(term_labels)\n', (19207, 19220), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.