code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import absolute_import
from __future__ import print_function
import sys
import abc
import copy
import logging
import re
from abc import abstractmethod
from collections import OrderedDict
from .dna_reshapers import ReshapeDnaString, ReshapeDna
from .mutators import OneHotSequenceMutator, DNAStringSequenceMutator, rc_str
import numpy as np
import kipoi
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def is_indel_wrapper(record):
if record.is_indel:
return True
if len(record.ALT) == 0 or len(record.REF) == 0:
return True
if record.REF == "." or record.REF == b".":
return True
return False
def ensure_tabixed_vcf(input_fn, is_sorted=False, force_tabix=True):
import pybedtools
import pysam
pbh = pybedtools.BedTool(input_fn)
fn = input_fn
if not pbh._tabixed():
# pybedtools bug.
fn = pbh.bgzip(in_place=True, force=force_tabix)
pysam.tabix_index(fn, force=force_tabix, preset="vcf")
# tbxd = pbh.tabix(is_sorted=is_sorted, force=force_tabix)
# fn = tbxd.fn
return fn
def prep_str(s):
# https://stackoverflow.com/questions/1007481/how-do-i-replace-whitespaces-with-underscore-and-vice-versa
# Remove all non-word characters (everything except numbers and letters)
# s = re.sub(r"[^\w\s]", '', s)
s = re.sub(r"[^\w\.\:\s/]+", '_', s)
#
# Replace all runs of whitespace with a single underscore
s = re.sub(r"\s+", '_', s)
#
return s
def select_from_dl_batch(obj, rows, nrows_expected=None):
def subset(in_obj):
if nrows_expected is not None:
if not in_obj.shape[0] == nrows_expected:
raise Exception("Error selecting: Expected the first dimension to have %d rows!" % nrows_expected)
return in_obj[rows, ...]
if isinstance(obj, dict):
out_obj = {}
if isinstance(obj, OrderedDict):
out_obj = OrderedDict()
for k in obj:
out_obj[k] = subset(obj[k])
elif isinstance(obj, list):
out_obj = [subset(el) for el in obj]
else:
out_obj = subset(obj)
return out_obj
def write_hdf5(fname, data):
"""Generic hdf5 bulk writer
"""
if isinstance(data, list):
data = {"_list_{i}".format(i=i): v for i, v in enumerate(data)}
import deepdish
deepdish.io.save(fname, data)
# Alternative
# def recursive_h5_mutmap_writer(objs, handle, path=""):
# import six
# for key in objs.keys():
# if isinstance(objs[key], dict):
# g = handle.create_group(key)
# recursive_h5_mutmap_writer(objs[key], g, path=path + "/" + key)
# else:
# if isinstance(objs[key], list) or isinstance(objs[key], np.ndarray):
# el = np.array(objs[key])
# if "U" in el.dtype.str:
# el = el.astype("S")
# handle.create_dataset(name=path + "/" + key, data=el, chunks=True, compression='gzip')
# else:
# el = objs[key]
# if isinstance(el, six.string_types):
# el = str(el)
# handle.create_dataset(name=path + "/" + key, data=el)
def read_hdf5(fname):
"""Generic hdf5 bulk reader
"""
import deepdish
data = deepdish.io.load(fname)
if isinstance(data, dict) and list(data.keys())[0].startswith("_list_"):
data = [data["_list_{i}".format(i=i)] for i in range(len(data))]
return data
# def recursive_h5_mutmap_reader(handle):
# import h5py
# objs = {}
# for key in handle.keys():
# if isinstance(handle[key], h5py.Group):
# objs[key] = recursive_h5_mutmap_reader(handle[key])
# else:
# if isinstance(handle[key], h5py.Dataset):
# el = handle[key].value
# if isinstance(el, np.ndarray):
# if "S" in el.dtype.str:
# el = el.astype(str)
# objs[key] = el
# return objs
def _get_seq_len(input_data):
if isinstance(input_data, (list, tuple)):
return input_data[0].shape
elif isinstance(input_data, dict):
for k in input_data:
return input_data[k].shape
elif isinstance(input_data, np.ndarray):
return input_data.shape
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
def concat_columns(df, sep="|"):
"""Concatenate all columns of a dataframe into a pd.Series
"""
for i in range(df.shape[1]):
vec = df.iloc[:, i].astype(str)
if i == 0:
column = vec
else:
column = column.str.cat(vec, sep=sep)
return column
# TODO: generalise so that also FORMAT, FILTER and sample identifiers are supported...
def convert_record(input_record, pyvcf_reader):
"""
Convert a cyvcf2 record into a pyvcf record. The source files should at least be similar in terms of INFO tags.
FILTER and FORMAT tags might not be handeled correctly at the moment!
"""
import vcf
def revert_to_info(info_obj):
out_str_elms = []
for el in list(info_obj):
out_str_elms.append(u"{0}={1}".format(*el))
if len(out_str_elms) > 0:
if sys.version_info[0] < 3:
return pyvcf_reader._parse_info(u";".join(out_str_elms).encode("ascii", "ignore"))
else:
return pyvcf_reader._parse_info(u";".join(out_str_elms))
else:
return {}
#
info_tag = revert_to_info(input_record.INFO)
alt = pyvcf_reader._map(pyvcf_reader._parse_alt, input_record.ALT)
return vcf.model._Record(input_record.CHROM, input_record.POS, input_record.ID,
input_record.REF, alt, input_record.QUAL, input_record.FILTER,
info_tag, input_record.FORMAT, {})
def default_vcf_id_gen(vcf_record, id_delim=":"):
# make sure that also in python2 the variant output is like in python3
alt_ids = str([str(alt) for alt in vcf_record.ALT])
return str(vcf_record.CHROM) + id_delim + str(vcf_record.POS) + id_delim + str(vcf_record.REF) + id_delim + alt_ids
class RegionGenerator(object):
__metaclass__ = abc.ABCMeta
def __init__(self, model_info_extractor, seq_length=None):
self.seq_length = None
self.centered_l_offset = None
self.centered_r_offset = None
self.model_info_extractor = model_info_extractor
@abstractmethod
def __call__(self, variant):
"""single variant instance yielded by vcf_iter
"""
pass
class SnvCenteredRg(RegionGenerator):
def __init__(self, model_info_extractor, seq_length=None):
"""
Arguments:
model_info_extractor: ModelInfoExtractor object.
seq_length: Not required parameter: Sequence length in case model has variable sequence length input
"""
super(SnvCenteredRg, self).__init__(model_info_extractor)
if seq_length is not None:
self.seq_length = seq_length
else:
self.seq_length = model_info_extractor.get_seq_len()
if self.seq_length is None:
raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`")
seq_length_half = int(self.seq_length / 2)
self.centered_l_offset = seq_length_half - 1
self.centered_r_offset = seq_length_half + self.seq_length % 2
# self.centered_l_offset = seq_length_half
# self.centered_r_offset = seq_length_half + self.seq_length % 2 -1
def __call__(self, variant_record):
"""single variant instance yielded by vcf_iter
"""
return {"chrom": [variant_record.CHROM],
"start": [variant_record.POS - self.centered_l_offset],
"end": [variant_record.POS + self.centered_r_offset],
}
class BedOverlappingRg(RegionGenerator):
def __init__(self, model_info_extractor, seq_length=None):
super(BedOverlappingRg, self).__init__(model_info_extractor)
if seq_length is not None:
self.seq_length = seq_length
else:
self.seq_length = model_info_extractor.get_seq_len()
if self.seq_length is None:
raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`")
def __call__(self, bed_entry):
"""Generate regions based on a bed file entry. outputs consecutive regions of model sequence length starting
from bed_entry.start and reaching at least until bed_entry.end. Output regions are non-overlapping hence the
covered output regions may cover more genetic space than specified in bed_entry. (Overhanging tail)
"""
chroms = []
starts = []
ends = []
ids = []
region_len = bed_entry.end - bed_entry.start
num_intervals = region_len // self.seq_length + int((region_len % self.seq_length) != 0)
for i in range(num_intervals):
chroms.append(bed_entry.chrom)
starts.append(bed_entry.start + (i * self.seq_length))
ends.append(bed_entry.start + ((i + 1) * self.seq_length))
ids.append(bed_entry.name + ".%d" % i)
return {"chrom": chroms, "start": starts, "end": ends, "id": ids}
class SnvPosRestrictedRg(RegionGenerator):
def __init__(self, model_info_extractor, pybed_def, seq_length=None):
super(SnvPosRestrictedRg, self).__init__(model_info_extractor)
self.tabixed = pybed_def.tabix(in_place=False)
if seq_length is not None:
self.seq_length = seq_length
else:
self.seq_length = model_info_extractor.get_seq_len()
if self.seq_length is None:
raise Exception("Model input sequence length is not defined. Please set it manually using `seq_length`")
seq_length_half = int(self.seq_length / 2)
self.centered_l_offset = seq_length_half - 1
self.centered_r_offset = seq_length_half + self.seq_length % 2
def __call__(self, variant_record):
"""single variant instance yielded by vcf_iter
"""
overlap = self.tabixed.tabix_intervals(
"%s:%d-%d" % (variant_record.CHROM, variant_record.POS, variant_record.POS + 1))
chroms = []
starts = []
ends = []
for interval in overlap:
i_s = interval.start + 1
i_e = interval.end
if len(interval) < self.seq_length:
continue
if len(interval) != self.seq_length:
centered_se = np.array(
[(variant_record.POS - self.centered_l_offset), (variant_record.POS + self.centered_r_offset)])
start_missing = centered_se[0] - i_s # >=0 if ok
end_missing = i_e - centered_se[1] # >=0 if ok
if start_missing < 0:
centered_se -= start_missing # shift right
elif end_missing < 0:
centered_se += end_missing # shift left
assert centered_se[1] - centered_se[0] + 1 == self.seq_length
assert (i_s <= centered_se[0]) and (i_e >= centered_se[1])
i_s, i_e = centered_se.tolist()
chroms.append(variant_record.CHROM)
starts.append(i_s)
ends.append(i_e)
return {"chrom": chroms, "start": starts, "end": ends}
class ModelInfoExtractor(object):
def __init__(self, model_obj, dataloader_obj):
self.model = model_obj
self.dataloader = dataloader_obj
self.seq_fields = _get_seq_fields(model_obj)
# Here we really have to go and collect all the possible different input DNA sequences and prepare the correct
# transformation to standard
# Collect the different sequence inputs and the corresponfing ranges objects:
self.seq_input_metadata = {}
self.seq_input_mutator = {}
self.seq_to_str_converter = {}
self.seq_input_array_trafo = {}
for seq_field in self.seq_fields:
special_type = _get_specialtype(dataloader_obj, seq_field)
if special_type is None:
logger.warn("special_type of sequence field '%s' is not set,"
"assuming 1-hot encoded DNA sequence." % str(seq_field))
if (special_type is None) or (special_type == kipoi.components.ArraySpecialType.DNASeq):
dna_seq_trafo = ReshapeDna(_get_seq_shape_model(model_obj, seq_field))
self.seq_input_mutator[seq_field] = OneHotSequenceMutator(dna_seq_trafo)
self.seq_to_str_converter[seq_field] = OneHotSeqExtractor(dna_seq_trafo)
self.seq_input_array_trafo[seq_field] = dna_seq_trafo
if special_type == kipoi.components.ArraySpecialType.DNAStringSeq:
dna_seq_trafo = ReshapeDnaString(_get_seq_shape_model(model_obj, seq_field))
self.seq_input_mutator[seq_field] = DNAStringSequenceMutator(dna_seq_trafo)
self.seq_to_str_converter[seq_field] = StrSeqExtractor(dna_seq_trafo)
self.seq_input_array_trafo[seq_field] = dna_seq_trafo
self.seq_input_metadata[seq_field] = _get_metadata_name(dataloader_obj, seq_field)
# If then where do I have to put my bed file in the command?
self.exec_files_bed_keys = _get_dl_bed_fields(dataloader_obj)
self.requires_region_definition = False
# If there is a field for putting the a postprocessing bed file, then generate the bed file.
if (self.exec_files_bed_keys is not None) and (len(self.exec_files_bed_keys) != 0):
self.requires_region_definition = True
self.seq_length = None # None means either not requires_region_definition or undefined sequence length
if self.requires_region_definition:
# seems to require a bed file definition, so try to assign a sequence length:
seq_lens = [self.seq_input_array_trafo[seq_field].get_seq_len() for seq_field in self.seq_input_array_trafo]
seq_len = list(set([el for el in seq_lens]))
seq_len_noNone = [el for el in seq_len if el is not None]
if len(seq_len) == 0:
raise Exception("dataloader.yaml defines postprocessing > args > bed_input, but in model.yaml none of "
"the postprocessing > args > seq_input entries defines a sequence length within their "
"shape.")
elif len(seq_len_noNone) > 1:
raise Exception("dataloader.yaml defines postprocessing > args > bed_input, but in model.yaml sequence"
"lengths differ in the postprocessing > args > seq_input entries which is inferred "
"from the shapes.")
if seq_len_noNone != seq_len:
self.seq_length = None
else:
self.seq_length = seq_len[0]
self.model_out_annotation = None
# Get model output annotation:
if self.model_out_annotation is None:
if isinstance(model_obj.schema.targets, dict):
raise Exception("Variant effect prediction with dict(array) model output not implemented!")
elif isinstance(model_obj.schema.targets, list):
self.model_out_annotation = np.array([x.name for x in model_obj.schema.targets])
else:
if model_obj.schema.targets.column_labels is not None:
self.model_out_annotation = np.array(model_obj.schema.targets.column_labels)
# If no model model output annotation defined,
if self.model_out_annotation is None:
self.model_out_annotation = np.array([str(i) for i in range(model_obj.schema.targets.shape[0])])
# Check if model supports simple rc-testing of input sequences:
self.use_seq_only_rc = _get_model_use_seq_only_rc(model_obj)
def get_mutatable_inputs(self, only_one_hot=False):
if only_one_hot:
return [k for k, v in self.seq_input_mutator.items() if isinstance(v, OneHotSequenceMutator)]
return list(self.seq_input_mutator.keys())
def get_seq_mutator(self, seq_field):
return self.seq_input_mutator[seq_field]
def get_seq_metadata(self, seq_field):
return self.seq_input_metadata[seq_field]
def get_all_metadata_fields(self):
return list(set(self.seq_input_metadata.values()))
def get_seq_len(self):
return self.seq_length
def requires_region_definition(self):
return self.requires_region_definition
def get_exec_files_bed_keys(self):
if self.requires_region_definition:
return self.exec_files_bed_keys
def get_model_out_annotation(self):
return self.model_out_annotation
def _get_metadata_name(dataloader, seq_key):
if isinstance(dataloader.output_schema.inputs, dict):
ranges_slots = dataloader.output_schema.inputs[seq_key].associated_metadata
elif isinstance(dataloader.output_schema.inputs, list):
ranges_slots = [x.associated_metadata for x in dataloader.output_schema.inputs if x.name == seq_key][0]
else:
ranges_slots = dataloader.output_schema.inputs.associated_metadata
# check the ranges slots
if len(ranges_slots) != 1:
raise ValueError(
"Exactly one metadata ranges field must defined for a sequence that has to be used for effect precition.")
return ranges_slots[0]
def _get_specialtype(dataloader, seq_field):
if isinstance(dataloader.output_schema.inputs, dict):
seq_obj = dataloader.output_schema.inputs[seq_field]
elif isinstance(dataloader.output_schema.inputs, list):
seq_obj = [x for x in dataloader.output_schema.inputs if x.name == seq_field][0]
else:
seq_obj = dataloader.output_schema.inputs
if hasattr(seq_obj, "special_type"):
return seq_obj.special_type
else:
return None
def _get_seq_fields(model):
if model.postprocessing.get("variant_effects", None) is None:
raise Exception("Model does not support var_effect_prediction")
else:
return model.postprocessing["variant_effects"].seq_input
def _get_model_use_seq_only_rc(model):
if model.postprocessing.get("variant_effects", None) is None:
return False
else:
return model.postprocessing["variant_effects"].use_rc
def _get_seq_shape(dataloader, seq_field):
if isinstance(dataloader.output_schema.inputs, dict):
orig_shape = dataloader.output_schema.inputs[seq_field].shape
elif isinstance(dataloader.output_schema.inputs, list):
orig_shape = [x.shape for x in dataloader.output_schema.inputs if x.name == seq_field][0]
else:
orig_shape = dataloader.output_schema.inputs.shape
return orig_shape
def _get_seq_shape_model(model, seq_field):
if isinstance(model.schema.inputs, dict):
orig_shape = model.schema.inputs[seq_field].shape
elif isinstance(model.schema.inputs, list):
orig_shape = [x.shape for x in model.schema.inputs if x.name == seq_field][0]
else:
orig_shape = model.schema.inputs.shape
return orig_shape
def _get_dl_bed_fields(dataloader):
if dataloader.postprocessing.get("variant_effects", None) is None:
return None
else:
return getattr(dataloader.postprocessing["variant_effects"], "bed_input", None)
# TODO - can we find a better name for this class?
class OneHotSeqExtractor(object):
alphabet = ['A', 'C', 'G', 'T']
def __init__(self, array_trafo=None):
self.array_trafo = array_trafo
def to_str(self, input_set, is_rc):
# input_set: the input sequence in one-hot encoded format
# is_rc: list of binary value indicating if samples are reverse-complemented
# returns the list of sequences in string representation, if it is_rc was True then the input sequence was rc'd
if self.array_trafo is not None:
input_set = self.array_trafo.to_standard(input_set)
# input_set should now be [N_samples, seq_len, 4]
str_sets = []
for rcd, sample_i in zip(is_rc, range(len(input_set[0]))):
str_set = np.empty(input_set.shape[1], dtype=str)
str_set[:] = "N"
conv_seq = input_set[sample_i, ...]
if rcd:
# If the sequence was in reverse complement then convert it to fwd.
conv_seq = conv_seq[::-1, ::-1]
for i, letter in enumerate(self.alphabet):
str_set[conv_seq[:, i] == 1] = letter
str_sets.append("".join(str_set.tolist()))
return str_sets
class StrSeqExtractor(object):
def __init__(self, array_trafo=None):
self.array_trafo = array_trafo
def to_str(self, input_set, is_rc):
# input_set: the input sequence in string sequence format
# is_rc: list of binary value indicating if samples are reverse-complemented
# returns the list of sequences in string representation
if self.array_trafo is not None:
input_set = self.array_trafo.to_standard(input_set)
# convert all sequences to forward direction
if any(is_rc):
input_set = [rc_str(conv_seq) if rcd else conv_seq for rcd, conv_seq in zip(is_rc, input_set)]
return input_set
class VariantLocalisation(object):
def __init__(self):
self.obj_keys = ["pp_line", "varpos_rel", "ref", "alt", "start", "end", "id", "do_mutate", "strand"]
self.dummy_initialisable_keys = ["varpos_rel", "ref", "alt", "start", "end", "id", "strand"]
self.data = {k: [] for k in self.obj_keys}
def append_multi(self, seq_key, ranges_input_obj, vcf_records, process_lines, process_ids, process_seq_fields):
import six
strand_avail = False
strand_default = "."
if ("strand" in ranges_input_obj) and (isinstance(ranges_input_obj["strand"], list) or
isinstance(ranges_input_obj["strand"], np.ndarray)):
strand_avail = True
# If the strand is a single string value rather than a list or numpy array than use that as a
# default for everything
if ("strand" in ranges_input_obj) and isinstance(ranges_input_obj["strand"], six.string_types):
strand_default = ranges_input_obj["strand"]
# Iterate over all variants
for i, record in enumerate(vcf_records):
assert not is_indel_wrapper(record) # Catch indels, that needs a slightly modified processing
ranges_input_i = process_lines[i]
# Initialise the new values as missing values, and as skip for processing
new_vals = {k: np.nan for k in self.dummy_initialisable_keys}
new_vals["do_mutate"] = False
new_vals["pp_line"] = i
new_vals["id"] = str(process_ids[i])
new_vals["strand"] = strand_default
# If the corresponding sequence key should be modified, then calculate the relative variant position
if seq_key in process_seq_fields[i]:
pre_new_vals = {}
pre_new_vals["start"] = ranges_input_obj["start"][ranges_input_i] + 1
pre_new_vals["end"] = ranges_input_obj["end"][ranges_input_i]
pre_new_vals["varpos_rel"] = int(record.POS) - pre_new_vals["start"]
# Check if variant position is valid
if not ((pre_new_vals["varpos_rel"] < 0) or
(pre_new_vals["varpos_rel"] > (pre_new_vals["end"] - pre_new_vals["start"] + 1))):
# If variant lies in the region then actually mutate it with the first alternative allele
pre_new_vals["do_mutate"] = True
pre_new_vals["ref"] = str(record.REF)
pre_new_vals["alt"] = str(record.ALT[0])
if strand_avail:
pre_new_vals["strand"] = ranges_input_obj["strand"][ranges_input_i]
# overwrite the nans with actual data now that
for k in pre_new_vals:
new_vals[k] = pre_new_vals[k]
for k in new_vals:
self.data[k].append(new_vals[k])
def subset_to_mutate(self):
sel_mutate = [i for i, dm in enumerate(self.data["do_mutate"]) if dm]
data_subset = {k: [self.data[k][i] for i in sel_mutate] for k in self.data}
new_obj = self.__class__()
new_obj.data = data_subset
return new_obj
def get_seq_lens(self):
lens = np.array([end - start + 1 for start, end in zip(self.data["start"], self.data["end"])])
return lens
def strand_vals_valid(self):
return all([el in ["+", "-", "*", "."] for el in self.data["strand"]])
def get(self, item, trafo=None):
vals = self.data.__getitem__(item)
if trafo is not None:
vals = [trafo(el) for el in vals]
return np.array(vals)
def __getitem__(self, item):
return self.get(item)
def num_entries(self):
return len(self.data["pp_line"])
def to_df(self):
import pandas as pd
return pd.DataFrame(self.data)
| [
"logging.getLogger",
"logging.NullHandler",
"collections.OrderedDict",
"deepdish.io.save",
"pybedtools.BedTool",
"numpy.array",
"numpy.empty",
"vcf.model._Record",
"pandas.DataFrame",
"re.sub",
"pysam.tabix_index",
"deepdish.io.load"
] | [((382, 409), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (399, 409), False, 'import logging\n'), ((428, 449), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (447, 449), False, 'import logging\n'), ((805, 833), 'pybedtools.BedTool', 'pybedtools.BedTool', (['input_fn'], {}), '(input_fn)\n', (823, 833), False, 'import pybedtools\n'), ((1379, 1414), 're.sub', 're.sub', (['"""[^\\\\w\\\\.\\\\:\\\\s/]+"""', '"""_"""', 's'], {}), "('[^\\\\w\\\\.\\\\:\\\\s/]+', '_', s)\n", (1385, 1414), False, 'import re\n'), ((1488, 1510), 're.sub', 're.sub', (['"""\\\\s+"""', '"""_"""', 's'], {}), "('\\\\s+', '_', s)\n", (1494, 1510), False, 'import re\n'), ((2382, 2411), 'deepdish.io.save', 'deepdish.io.save', (['fname', 'data'], {}), '(fname, data)\n', (2398, 2411), False, 'import deepdish\n'), ((3336, 3359), 'deepdish.io.load', 'deepdish.io.load', (['fname'], {}), '(fname)\n', (3352, 3359), False, 'import deepdish\n'), ((5696, 5874), 'vcf.model._Record', 'vcf.model._Record', (['input_record.CHROM', 'input_record.POS', 'input_record.ID', 'input_record.REF', 'alt', 'input_record.QUAL', 'input_record.FILTER', 'info_tag', 'input_record.FORMAT', '{}'], {}), '(input_record.CHROM, input_record.POS, input_record.ID,\n input_record.REF, alt, input_record.QUAL, input_record.FILTER, info_tag,\n input_record.FORMAT, {})\n', (5713, 5874), False, 'import vcf\n'), ((970, 1024), 'pysam.tabix_index', 'pysam.tabix_index', (['fn'], {'force': 'force_tabix', 'preset': '"""vcf"""'}), "(fn, force=force_tabix, preset='vcf')\n", (987, 1024), False, 'import pysam\n'), ((25219, 25233), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (25227, 25233), True, 'import numpy as np\n'), ((25432, 25455), 'pandas.DataFrame', 'pd.DataFrame', (['self.data'], {}), '(self.data)\n', (25444, 25455), True, 'import pandas as pd\n'), ((1970, 1983), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1981, 1983), False, 'from collections import OrderedDict\n'), ((20408, 20447), 'numpy.empty', 'np.empty', (['input_set.shape[1]'], {'dtype': 'str'}), '(input_set.shape[1], dtype=str)\n', (20416, 20447), True, 'import numpy as np\n'), ((10701, 10805), 'numpy.array', 'np.array', (['[variant_record.POS - self.centered_l_offset, variant_record.POS + self.\n centered_r_offset]'], {}), '([variant_record.POS - self.centered_l_offset, variant_record.POS +\n self.centered_r_offset])\n', (10709, 10805), True, 'import numpy as np\n'), ((15525, 15577), 'numpy.array', 'np.array', (['[x.name for x in model_obj.schema.targets]'], {}), '([x.name for x in model_obj.schema.targets])\n', (15533, 15577), True, 'import numpy as np\n'), ((15715, 15763), 'numpy.array', 'np.array', (['model_obj.schema.targets.column_labels'], {}), '(model_obj.schema.targets.column_labels)\n', (15723, 15763), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import torch
from sklearn.metrics import log_loss, roc_auc_score,f1_score,classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
import sys
import os
import torch.nn as nn
import numpy as np
import torch.utils.data as Data
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import log_loss, roc_auc_score
from collections import OrderedDict, namedtuple, defaultdict
import random
class Deepfm(nn.Module):
def __init__(self, feat_sizes, sparse_feature_columns,sparse_shared_embedding_map,
dense_feature_columns,
model_checkpoint_path,dnn_hidden_units=[400, 400,400],
dnn_dropout=0.0, ebedding_size=4,
l2_reg_linear=0.00001, l2_reg_embedding=0.00001,
l2_reg_dnn=0, init_std=0.0001, seed=1024,label_thres =0.5,
device='cpu'):
super(Deepfm, self).__init__()
self.label_thres =label_thres
self.feat_sizes = feat_sizes
self.model_checkpoint_path =model_checkpoint_path
self.device = device
self.sparse_feature_columns = sparse_feature_columns
self.sparse_shared_embedding_map =sparse_shared_embedding_map
self.dense_feature_columns = dense_feature_columns
self.embedding_size = ebedding_size
self.l2_reg_linear = l2_reg_linear
# self.feature_index
self.feature_index = self.build_input_features(self.feat_sizes)
self.bias = nn.Parameter(torch.zeros((1,)))
# self.weight
self.weight = nn.Parameter(torch.Tensor(len(self.dense_feature_columns), 1)).to(device)
torch.nn.init.normal_(self.weight, mean=0, std=0.0001)
self.embedding_dict1 = self.create_embedding_matrix(self.sparse_feature_columns ,self.sparse_shared_embedding_map,
feat_sizes , 1 ,
sparse=False, device=self.device)
self.embedding_dict2 = self.create_embedding_matrix(self.sparse_feature_columns ,self.sparse_shared_embedding_map,
feat_sizes , self.embedding_size ,
sparse=False, device=self.device)
# dnn
self.dropout = nn.Dropout(dnn_dropout)
self.dnn_input_size = self.embedding_size * len(self.sparse_feature_columns) + len(self.dense_feature_columns)
hidden_units = [self.dnn_input_size] + dnn_hidden_units
self.linears = nn.ModuleList(
[nn.Linear(hidden_units[i], hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
self.relus = nn.ModuleList(
[nn.ReLU() for i in range(len(hidden_units) - 1)])
for name, tensor in self.linears.named_parameters():
if 'weight' in name:
nn.init.normal_(tensor, mean=0, std=init_std)
# self.linears =self.linears.to(device)
self.dnn_linear = nn.Linear(
dnn_hidden_units[-1], 1, bias=False).to(device)
self.to(device)
def forward(self, X):
'''
:param X: pd.DtateFrame
:return: y_pre
'''
'''
FM liner
'''
# print(f"{X}")
sparse_embedding_list1 = [self.embedding_dict1[feat](
X[:, self.feature_index[feat][0]:self.feature_index[feat][1]].long())
for feat in self.sparse_feature_columns]
dense_value_list2 = [X[:, self.feature_index[feat][0]:self.feature_index[feat][1]]
for feat in self.dense_feature_columns]
linear_sparse_logit = torch.sum(
torch.cat(sparse_embedding_list1, dim=-1), dim=-1, keepdim=False)
linear_dense_logit = torch.cat(
dense_value_list2, dim=-1).matmul(self.weight)
logit = linear_sparse_logit + linear_dense_logit
sparse_embedding_list = [self.embedding_dict2[feat](
X[:, self.feature_index[feat][0]:self.feature_index[feat][1]].long())
for feat in self.sparse_feature_columns]
'''
FM second
'''
fm_input = torch.cat(sparse_embedding_list, dim=1) # shape: (batch_size,field_size,embedding_size)
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2) # shape: (batch_size,1,embedding_size)
sum_of_square = torch.sum(torch.pow(fm_input, 2), dim=1, keepdim=True) # shape: (batch_size,1,embedding_size)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False) # shape: (batch_size,1)
logit += cross_term
'''
DNN
'''
# sparse_embedding_list、 dense_value_list2
dnn_sparse_input = torch.cat(sparse_embedding_list, dim=1)
batch_size = dnn_sparse_input.shape[0]
# print(dnn_sparse_input.shape)
dnn_sparse_input=dnn_sparse_input.reshape(batch_size,-1)
# dnn_sparse_input shape: [ batch_size, len(sparse_feat)*embedding_size ]
dnn_dense_input = torch.cat(dense_value_list2, dim=-1)
# print(dnn_sparse_input.shape)
# dnn_dense_input shape: [ batch_size, len(dense_feat) ]
dnn_total_input = torch.cat([dnn_sparse_input, dnn_dense_input], dim=-1)
deep_input = dnn_total_input
for i in range(len(self.linears)):
fc = self.linears[i](deep_input)
fc = self.relus[i](fc)
fc = self.dropout(fc)
deep_input = fc
dnn_output = self.dnn_linear(deep_input)
logit += dnn_output
'''
output
'''
y_pred = torch.sigmoid(logit+self.bias)
return y_pred
def fit(self, train_input, y_label, val_input, y_val, batch_size=256, epochs=15, verbose=5):
x = [train_input[feature] for feature in self.feature_index]
for i in range(len(x)):
if len(x[i].shape) == 1:
x[i] = np.expand_dims(x[i], axis=1)
train_tensor_data = Data.TensorDataset(torch.from_numpy(np.concatenate(x, axis=-1)), torch.from_numpy(y_label))
train_loader = DataLoader(dataset=train_tensor_data,shuffle=True ,batch_size=batch_size)
print(self.device, end="\n")
model = self.train()
loss_func = F.binary_cross_entropy
# loss_func = F.binary_cross_entropy_with_logits
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay = 0.0)
# optimizer = optim.Adagrad(model.parameters(),lr=0.01)
sample_num = len(train_tensor_data)
steps_per_epoch = (sample_num - 1) // batch_size + 1
print("Train on {0} samples, {1} steps per epoch".format(
len(train_tensor_data), steps_per_epoch))
f1_bst =0
for epoch in range(epochs):
loss_epoch = 0
total_loss_epoch = 0.0
train_result = {}
pred_ans = []
true_ans = []
with torch.autograd.set_detect_anomaly(True):
for index, (x_train, y_train) in enumerate(train_loader):
x = x_train.to(self.device).float()
y = y_train.to(self.device).float()
y_pred = model(x).squeeze()
optimizer.zero_grad()
loss = loss_func(y_pred, y.squeeze(),reduction='mean')
#L2 norm
loss = loss + self.l2_reg_linear * self.get_L2_Norm()
loss.backward(retain_graph=True)
optimizer.step()
total_loss_epoch = total_loss_epoch + loss.item()
y_pred = y_pred.cpu().data.numpy() # .squeeze()
pred_ans.append(y_pred)
true_ans.append(y.squeeze().cpu().data.numpy())
if (epoch % verbose == 0):
print('epoch %d train loss is %.4f train AUC is %.4f' %
(epoch,total_loss_epoch / steps_per_epoch,roc_auc_score(np.concatenate(true_ans), np.concatenate(pred_ans))))
# self.val_auc_logloss(val_input, y_val, batch_size=batch_size)
f1,report =self.val_auc_logloss(val_input, y_val, batch_size=batch_size)
if f1_bst <=f1:
f1_bst = f1
torch.save(model,self.model_checkpoint_path)
print(report)
def predict(self, test_input, batch_size = 256, use_double=False):
"""
:param x: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
:param batch_size: Integer. If unspecified, it will default to 256.
:return: Numpy array(s) of predictions.
"""
model = self.eval()
x = [test_input[feature] for feature in self.feature_index]
for i in range(len(x)):
if len(x[i].shape) == 1:
x[i] = np.expand_dims(x[i], axis=1)
tensor_data = Data.TensorDataset(
torch.from_numpy(np.concatenate(x, axis=-1)))
test_loader = DataLoader(
dataset=tensor_data, shuffle=False, batch_size=batch_size)
pred_ans = []
with torch.no_grad():
for index, x_test in enumerate(test_loader):
x = x_test[0].to(self.device).float()
# y = y_test.to(self.device).float()
y_pred = model(x).cpu().data.numpy() # .squeeze()
pred_ans.append(y_pred)
if use_double:
return np.concatenate(pred_ans).astype("float64"),pred_ans
else:
return np.concatenate(pred_ans)
def val_auc_logloss(self, val_input, y_val, batch_size=50000, use_double=False):
pred_ans = self.predict(val_input, batch_size)
pred_binary = pred_ans.reshape((1,-1)).tolist()[0]
pred_binary =[ 1 if i >self.label_thres else 0 for i in pred_binary]
f1 = f1_score(y_val,pred_binary, average='weighted')
report = classification_report(y_val,pred_binary)
print("test LogLoss is %.4f test AUC is %.4f"%(log_loss(y_val, pred_ans),roc_auc_score(y_val, pred_ans)) )
print(f"f1_score :{f1}")
return f1,report
def get_L2_Norm(self ):
loss = torch.zeros((1,), device=self.device)
loss = loss + torch.norm(self.weight)
for t in self.embedding_dict1.parameters():
loss = loss+ torch.norm(t)
for t in self.embedding_dict2.parameters():
loss = loss+ torch.norm(t)
return loss
def build_input_features(self, feat_sizes):
# Return OrderedDict: {feature_name:(start, start+dimension)}
features = OrderedDict()
start = 0
for feat in feat_sizes:
feat_name = feat
if feat_name in features:
continue
features[feat_name] = (start, start + 1)
start += 1
return features
def create_embedding_matrix(self ,sparse_feature_columns,
shared_embedding_map,
feat_sizes,embedding_size,init_std=0.0001, sparse=False, device='cpu'):
embedding_dict = nn.ModuleDict(
{feat: nn.Embedding(feat_sizes[feat], embedding_size, sparse=False)
for feat in sparse_feature_columns}
)
update_dict =nn.ModuleDict({feat:embedding_dict[shared_feat] for feat,shared_feat in shared_embedding_map.items()})
embedding_dict.update(update_dict)
for tensor in embedding_dict.values():
nn.init.normal_(tensor.weight, mean=0, std=init_std)
return embedding_dict.to(device) | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"sklearn.metrics.classification_report",
"torch.pow",
"torch.from_numpy",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.log_loss",
"torch.sum",
"numpy.concatenate",
"torch.nn.Embedding",
"collections.OrderedDict",
"torch.autograd.set_detect_anomaly",
... | [((1850, 1904), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['self.weight'], {'mean': '(0)', 'std': '(0.0001)'}), '(self.weight, mean=0, std=0.0001)\n', (1871, 1904), False, 'import torch\n'), ((2561, 2584), 'torch.nn.Dropout', 'nn.Dropout', (['dnn_dropout'], {}), '(dnn_dropout)\n', (2571, 2584), True, 'import torch.nn as nn\n'), ((4448, 4487), 'torch.cat', 'torch.cat', (['sparse_embedding_list'], {'dim': '(1)'}), '(sparse_embedding_list, dim=1)\n', (4457, 4487), False, 'import torch\n'), ((5081, 5120), 'torch.cat', 'torch.cat', (['sparse_embedding_list'], {'dim': '(1)'}), '(sparse_embedding_list, dim=1)\n', (5090, 5120), False, 'import torch\n'), ((5386, 5422), 'torch.cat', 'torch.cat', (['dense_value_list2'], {'dim': '(-1)'}), '(dense_value_list2, dim=-1)\n', (5395, 5422), False, 'import torch\n'), ((5557, 5611), 'torch.cat', 'torch.cat', (['[dnn_sparse_input, dnn_dense_input]'], {'dim': '(-1)'}), '([dnn_sparse_input, dnn_dense_input], dim=-1)\n', (5566, 5611), False, 'import torch\n'), ((5987, 6019), 'torch.sigmoid', 'torch.sigmoid', (['(logit + self.bias)'], {}), '(logit + self.bias)\n', (6000, 6019), False, 'import torch\n'), ((6486, 6560), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_tensor_data', 'shuffle': '(True)', 'batch_size': 'batch_size'}), '(dataset=train_tensor_data, shuffle=True, batch_size=batch_size)\n', (6496, 6560), False, 'from torch.utils.data import DataLoader\n'), ((9471, 9540), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'tensor_data', 'shuffle': '(False)', 'batch_size': 'batch_size'}), '(dataset=tensor_data, shuffle=False, batch_size=batch_size)\n', (9481, 9540), False, 'from torch.utils.data import DataLoader\n'), ((10345, 10393), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'pred_binary'], {'average': '"""weighted"""'}), "(y_val, pred_binary, average='weighted')\n", (10353, 10393), False, 'from sklearn.metrics import log_loss, roc_auc_score, f1_score, classification_report\n'), ((10411, 10452), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'pred_binary'], {}), '(y_val, pred_binary)\n', (10432, 10452), False, 'from sklearn.metrics import log_loss, roc_auc_score, f1_score, classification_report\n'), ((10685, 10722), 'torch.zeros', 'torch.zeros', (['(1,)'], {'device': 'self.device'}), '((1,), device=self.device)\n', (10696, 10722), False, 'import torch\n'), ((11120, 11133), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11131, 11133), False, 'from collections import OrderedDict, namedtuple, defaultdict\n'), ((1702, 1719), 'torch.zeros', 'torch.zeros', (['(1,)'], {}), '((1,))\n', (1713, 1719), False, 'import torch\n'), ((3953, 3994), 'torch.cat', 'torch.cat', (['sparse_embedding_list1'], {'dim': '(-1)'}), '(sparse_embedding_list1, dim=-1)\n', (3962, 3994), False, 'import torch\n'), ((4572, 4612), 'torch.sum', 'torch.sum', (['fm_input'], {'dim': '(1)', 'keepdim': '(True)'}), '(fm_input, dim=1, keepdim=True)\n', (4581, 4612), False, 'import torch\n'), ((4692, 4714), 'torch.pow', 'torch.pow', (['fm_input', '(2)'], {}), '(fm_input, 2)\n', (4701, 4714), False, 'import torch\n'), ((4857, 4900), 'torch.sum', 'torch.sum', (['cross_term'], {'dim': '(2)', 'keepdim': '(False)'}), '(cross_term, dim=2, keepdim=False)\n', (4866, 4900), False, 'import torch\n'), ((6435, 6460), 'torch.from_numpy', 'torch.from_numpy', (['y_label'], {}), '(y_label)\n', (6451, 6460), False, 'import torch\n'), ((9594, 9609), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9607, 9609), False, 'import torch\n'), ((10022, 10046), 'numpy.concatenate', 'np.concatenate', (['pred_ans'], {}), '(pred_ans)\n', (10036, 10046), True, 'import numpy as np\n'), ((10746, 10769), 'torch.norm', 'torch.norm', (['self.weight'], {}), '(self.weight)\n', (10756, 10769), False, 'import torch\n'), ((12052, 12104), 'torch.nn.init.normal_', 'nn.init.normal_', (['tensor.weight'], {'mean': '(0)', 'std': 'init_std'}), '(tensor.weight, mean=0, std=init_std)\n', (12067, 12104), True, 'import torch.nn as nn\n'), ((2823, 2870), 'torch.nn.Linear', 'nn.Linear', (['hidden_units[i]', 'hidden_units[i + 1]'], {}), '(hidden_units[i], hidden_units[i + 1])\n', (2832, 2870), True, 'import torch.nn as nn\n'), ((2962, 2971), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2969, 2971), True, 'import torch.nn as nn\n'), ((3125, 3170), 'torch.nn.init.normal_', 'nn.init.normal_', (['tensor'], {'mean': '(0)', 'std': 'init_std'}), '(tensor, mean=0, std=init_std)\n', (3140, 3170), True, 'import torch.nn as nn\n'), ((3247, 3293), 'torch.nn.Linear', 'nn.Linear', (['dnn_hidden_units[-1]', '(1)'], {'bias': '(False)'}), '(dnn_hidden_units[-1], 1, bias=False)\n', (3256, 3293), True, 'import torch.nn as nn\n'), ((4049, 4085), 'torch.cat', 'torch.cat', (['dense_value_list2'], {'dim': '(-1)'}), '(dense_value_list2, dim=-1)\n', (4058, 4085), False, 'import torch\n'), ((6308, 6336), 'numpy.expand_dims', 'np.expand_dims', (['x[i]'], {'axis': '(1)'}), '(x[i], axis=1)\n', (6322, 6336), True, 'import numpy as np\n'), ((6406, 6432), 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (6420, 6432), True, 'import numpy as np\n'), ((7336, 7375), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7369, 7375), False, 'import torch\n'), ((9313, 9341), 'numpy.expand_dims', 'np.expand_dims', (['x[i]'], {'axis': '(1)'}), '(x[i], axis=1)\n', (9327, 9341), True, 'import numpy as np\n'), ((9419, 9445), 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (9433, 9445), True, 'import numpy as np\n'), ((10849, 10862), 'torch.norm', 'torch.norm', (['t'], {}), '(t)\n', (10859, 10862), False, 'import torch\n'), ((10942, 10955), 'torch.norm', 'torch.norm', (['t'], {}), '(t)\n', (10952, 10955), False, 'import torch\n'), ((11696, 11756), 'torch.nn.Embedding', 'nn.Embedding', (['feat_sizes[feat]', 'embedding_size'], {'sparse': '(False)'}), '(feat_sizes[feat], embedding_size, sparse=False)\n', (11708, 11756), True, 'import torch.nn as nn\n'), ((8696, 8741), 'torch.save', 'torch.save', (['model', 'self.model_checkpoint_path'], {}), '(model, self.model_checkpoint_path)\n', (8706, 8741), False, 'import torch\n'), ((10508, 10533), 'sklearn.metrics.log_loss', 'log_loss', (['y_val', 'pred_ans'], {}), '(y_val, pred_ans)\n', (10516, 10533), False, 'from sklearn.metrics import log_loss, roc_auc_score\n'), ((10534, 10564), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_val', 'pred_ans'], {}), '(y_val, pred_ans)\n', (10547, 10564), False, 'from sklearn.metrics import log_loss, roc_auc_score\n'), ((9935, 9959), 'numpy.concatenate', 'np.concatenate', (['pred_ans'], {}), '(pred_ans)\n', (9949, 9959), True, 'import numpy as np\n'), ((8384, 8408), 'numpy.concatenate', 'np.concatenate', (['true_ans'], {}), '(true_ans)\n', (8398, 8408), True, 'import numpy as np\n'), ((8410, 8434), 'numpy.concatenate', 'np.concatenate', (['pred_ans'], {}), '(pred_ans)\n', (8424, 8434), True, 'import numpy as np\n')] |
import os
import re
import sys
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox
from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon
from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import scoreatpercentile
from fluff.color import create_colormap
from fluff.config import FONTSIZE
from fluff.fluffio import load_annotation
from fluff.track import Track
DEFAULT_COLORS = ["#e41a1c", "#4daf4a", "#377eb8"]
GENE_ARROW = "->"
GENE_ARROW = ArrowStyle._Curve(beginarrow=False, endarrow=True, head_length=.4, head_width=.4)
def colortext(x, y, texts, colors, **kwargs):
pos = {
"right": 1,
"center": 0.5,
"left": 0,
"top": 0,
"bottom": 1
}
ax = kwargs.get("ax")
verticalalignment = pos[kwargs.get("verticalalignment", "center")]
horizontalalignment = pos[kwargs.get("horizontalalignment", "center")]
annotation_clip = kwargs.get("clip_on", False)
fontproperties = kwargs.get("fontproperties", None)
textprops = {"fontproperties":fontproperties}
transform = kwargs.get("transform", None)
areas = []
for t,c in zip(texts, colors):
textprops["color"] = c
text = TextArea(t, textprops=textprops)
areas.append(text)
txt = HPacker(children=areas,
align="baseline",
pad=0, sep=0)
bbox = AnnotationBbox(txt, xy=(x, y),
xycoords='data',
annotation_clip=annotation_clip,
frameon=False,
boxcoords=("axes fraction"),
box_alignment=(
horizontalalignment,
verticalalignment), # alignment center, center
#bboxprops={"bbox_transmuter":transform},
)
ax.add_artist(bbox)
def hide_axes(ax):
for x in [ax.xaxis, ax.yaxis]:
x.set_major_formatter(NullFormatter())
x.set_major_locator(NullLocator())
for _, spine in ax.spines.items():
spine.set_color('none')
def heatmap_plot(data, ind, outfile, tracks, titles, colors, bgcolors, scale, tscale, labels, fontsize, colorbar=True):
font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
label_ratio = 4.0
# space between heatmaps
btw_space = 0.02
plot_width = 1.75 * len(tracks) + btw_space * len(tracks)
plot_height = 6
width_ratios = [label_ratio] * len(tracks)
numplots = len(tracks)
if labels is not None and len(labels) == len(ind):
plot_width += 1 / label_ratio
numplots += 1
width_ratios += [1]
# Create figure
fig = plt.figure(figsize=(plot_width, plot_height))
# Create subplot layout
gs = gridspec.GridSpec(1, numplots, width_ratios=width_ratios, )
axes = []
for i, track in enumerate(tracks):
c = create_colormap(bgcolors[i % len(bgcolors)], colors[i % len(colors)])
ax = fig.add_subplot(gs[i])
ax.set_title(titles[i], fontproperties=font, y=1)
axes.append(ax)
cax_mat = ax.pcolormesh(data[track][ind], cmap=c, vmin=0, vmax=scale * tscale[i], rasterized=True)
hide_axes(ax)
ylim = ax.get_ylim()
#fig.colorbar(cax_mat, orientation="horizontal", pad=0.05)
if colorbar:
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="2%", pad=0.1, pack_start=True)
fig.add_axes(ax_cb)
tick_locator = MaxNLocator(nbins=3)
cbar = fig.colorbar(cax_mat, cax=ax_cb, orientation="horizontal", ticks=tick_locator)
cbar_labels = cbar.ax.get_xticklabels()
for lab in cbar_labels:
lab.set_fontsize(fontsize / 1.25)
cbar_ticks = cbar.ax.get_xticks()
if cbar_ticks[0] == 0:
# if the label is at the start of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[0].set_horizontalalignment('left')
if cbar_ticks[-1] == 1:
# if the label is at the end of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[-1].set_horizontalalignment('right')
if labels is not None and len(labels) == len(ind):
axcluster = fig.add_subplot(gs[len(tracks)])
axcluster.axis('off')
if colorbar:
divider = make_axes_locatable(axcluster)
ax_cb = divider.new_vertical(size="2%", pad=0.1, pack_start=True)
axbl = fig.add_axes(ax_cb)
axbl.axis('off')
min_y, max_y = ylim
s = 0
axcluster.hlines(y=0, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
labels = np.array(labels)
# Smaller cluster on the top ([::-1])
for i in range(max(labels) + 1)[::-1]:
prev = s
s += sum(labels == i)
axcluster.hlines(y=s, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
axcluster.text(0.5, (prev + s) / 2,
str(i + 1),
verticalalignment="center",
horizontalalignment="center",
fontproperties=font)
axcluster.set_ylim(ylim)
fig.subplots_adjust(wspace=btw_space, hspace=0)
ext = outfile.split(".")[-1]
if ext not in ["png", "svg", "ps", "eps", "pdf"]:
outfile += ".png"
sys.stderr.write("Saving figure\n")
# Object orientated pyplot
fig.savefig(outfile, dpi=600, bbox_inches='tight')
def coverage_plot(ax, x, data, color="red", percs=None):
"""
ax = matplotlib axes instance
x = x-axis coordinates
data = profile data
color = color in any way matplotlib accepts
"""
# Might change this into an argument for the function
if percs is None:
percs = [50, 90]
percs = [(100 - float(p)) / 2 for p in percs[::-1]]
alphas = [0.1, 0.4]
# Convert to numpy array
vals = np.array(data)
# Get the median
m = np.median(vals, axis=0)
# Draw the minimum percentiles
lines = [np.array([scoreatpercentile(vals[:, i], perc) for i in range(len(vals[0]))]) for perc in percs] + [m]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the maximum percentiles
lines = [m] + [np.array([scoreatpercentile(vals[:, i], 100 - perc) for i in range(len(vals[0]))]) for perc in
percs[::-1]]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas[::-1]):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the median
ax.plot(x, m, color="black", alpha=0.95, linewidth=0.8)
# ax.plot(x, mean(vals, axis = 0), color = "purple", alpha = 0.95, linewidth = 0.8)
def create_grid_figure(nrows, ncolumns, plotwidth=2.0, plotheight=2.0, pad=0.1, padleft=0.1, padright=0.1, padtop=0.1,
padbottom=0.1, clean=True):
wsize = padleft + (ncolumns * plotwidth) + (pad * (ncolumns - 1)) + padright
hsize = padtop + (nrows * plotheight) + (pad * (nrows - 1)) + padbottom
fig = plt.figure(figsize=(wsize, hsize))
wpadfraction = pad / wsize
hpadfraction = pad / hsize
wplotsize = plotwidth / wsize
hplotsize = plotheight / hsize
axes = {}
# Create all the subplots
for row in range(nrows):
axes[row] = {}
for col in range(ncolumns):
axes[row][col] = plt.subplot(nrows, ncolumns, row * ncolumns + col + 1)
# No labels, ticks, etc.
if clean:
for ax in [axes[row][col].xaxis, axes[row][col].yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
# Resize all the subplots
for row in range(nrows):
for col in range(ncolumns):
x0 = (padleft / wsize) + (wplotsize + wpadfraction) * col
x1 = wplotsize
y0 = (padbottom / hsize) + (nrows - row - 1) * (hplotsize + hpadfraction)
y1 = hplotsize
coords = [x0, y0, x1, y1]
axes[row][col].set_position(coords)
for s in list(axes[row][col].spines.values()):
s.set_linewidth(0.8)
return fig, axes
def profile_screenshot(fname, interval, tracks, fontsize=None, colors=None, scalegroups=None, scale=None, show_scale=True, annotation=None, bgmode="color", fragmentsize=200, dpi=600, rmdup=False, rmrepeats=False, reverse=False, adjscale=False):
"""
Plot a genome browser like profile
Parameters
----------
fname: string
output file name
interval: string
interval to plot in "chrom:start-end" format
tracks: list
list of filenames
"""
if scalegroups is None:
scalegroups = []
if not fontsize:
fontsize = FONTSIZE
if not colors:
colors = DEFAULT_COLORS
# Plot size and padding definition
plotwidth = 6
plotheight = 0.3
pad = {
"left": 1.5,
"right": 0.05,
"top": 0.05,
"bottom": 0.05,
"row": 0,
"column": 3,
}
# adjust width for track names if they are to long
# kind of a quick hack
max_len = 0
for group in tracks:
names = [os.path.splitext(os.path.basename(t))[0].strip() for t in group]
l = sum([len(name) for name in names])
if l > max_len:
max_len = l
if max_len > 27:
pad["left"] = 3
# Genomic scale
scale_height = 0.1
# Annotation track height
annotation_height = 0.01
chrom, start, end = re.split(r'[-:]', interval)
start, end = int(start), int(end)
if annotation:
ann = load_annotation([chrom,start,end], annotation)
if ann:
annotation_height = 0.2 * len(list(ann.keys()))
else:
annotation = False
nrows = len(tracks)
wsize = pad["left"] + plotwidth + pad["right"]
hsize = pad["top"] + (nrows * plotheight) + (pad["row"] * (nrows - 1)) + pad["bottom"]
hsize += scale_height + pad["row"] + annotation_height + pad["row"]
# initialize figure
fig = plt.figure(figsize=(wsize, hsize))
# initialize profile figure
pfig = ProfileFigure(fig=fig, fontsize=fontsize, pad=pad)
# add the genomic scale
pfig.add_panel(ScalePanel())
if type(scale) != type([]):
scale = [scale]
# add the signal tracks
c = 0
for group in tracks:
for i,track in enumerate(group):
panel = pfig.add_panel(
BamProfilePanel(track,
color = colors[c % len(colors)],
bgmode = bgmode,
name = os.path.splitext(os.path.split(track)[-1])[0],
fragmentsize = fragmentsize,
rmrepeats = rmrepeats,
rmdup = rmdup,
adjscale = adjscale,
show_scale = show_scale,
),
overlay= i != 0
)
panel.ymax = scale[c % len(scale)]
c += 1
# add the annotation panel
if annotation:
pfig.add_panel(AnnotationPanel(annotation))
pfig.plot([chrom, start, end], scalegroups=scalegroups, reverse=reverse)
plt.savefig(fname, dpi=dpi)
class ProfileFigure(object):
def __init__(self, fig=None, gs=None, fontsize=FONTSIZE, pad=None):
self._panels = []
if not fig:
fig = plt.figure()
self.fig = fig
self.pad = {}
if pad:
self.pad.update(pad)
relpad = {}
for k in ["left", "right"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figwidth()
for k in ["top", "bottom"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figheight()
if gs:
self.gs = gs
else:
gs = gridspec.GridSpec(1, 1)
gs.update(
left=relpad["left"],
right=1 - relpad["right"],
top=1 - relpad["top"],
bottom=relpad["bottom"],
wspace=0,
hspace=0
)
self.gs = gs[0]
self.font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
def _plot_panel_names(self, ax, panels):
names = [p.name for p in panels]
colors = ["black"]
if len(names) > 1:
tmp_names = []
colors = []
for name,color in zip(names, [p.color for p in panels]):
tmp_names.append("= ")
tmp_names.append(name + ", ")
colors += [color,"black"]
names = tmp_names
names[-1] = names[-1].strip(", ")
colortext(-0.01, 0.5,
names,
colors,
ax=ax,
horizontalalignment='right',
verticalalignment="center",
#transform=ax.transAxes,
clip_on=False,
fontproperties=self.font)
def plot(self, interval, scalegroups=None, reverse=False, **kwargs):
if scalegroups is None:
scalegroups = []
for panels in self._panels:
for panel in panels:
panel._load_data(interval)
gs0 = gridspec.GridSpecFromSubplotSpec(
len(self._panels),
1,
subplot_spec=self.gs,
height_ratios=[max([p.height for p in panels]) for panels in self._panels]
)
for panels in self._panels:
if isinstance(panels[-1], BamProfilePanel):
ymax = max([p.ymax for p in panels])
for panel in panels:
panel.ymax = ymax
if scalegroups and len(scalegroups) > 0:
for group in scalegroups:
ymax = max([self._panels[g][-1].ymax for g in group])
for g in group:
for panel in self._panels[g]:
panel.ymax = ymax
# These are quick hacks to to get the track groups to work
for panels in self._panels:
if len(panels) > 1:
# Set the alpha for overlapping tracks
for panel in panels:
panel.alpha = 0.5
for i, panels in enumerate(self._panels):
ax = plt.Subplot(self.fig, gs0[i])
plt.subplots_adjust(bottom=0, top=1, left=0, right=1, hspace=0)
# add track labels
self._plot_panel_names(ax, panels)
for panel in panels:
panel._plot(ax, interval, fig=self.fig, reverse=reverse, odd=i % 2, font=self.font, **kwargs)
self.fig.add_subplot(ax)
def add_panel(self, panel, overlay=False):
if overlay and len(self._panels) > 0:
self._panels[-1].append(panel)
else:
self._panels.append([panel])
return panel
class ProfilePanel(object):
name = ""
def hide_axes(self, axes):
for ax in [axes.xaxis, axes.yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_minor_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
ax.set_minor_locator(NullLocator())
for s in list(axes.spines.values()):
s.set_color('none')
class BamProfilePanel(ProfilePanel):
def __init__(self, bamfile, height=1, color=None, bgmode=None, alpha=None, fragmentsize=200, rmdup=True,
rmrepeats=True, **kwargs):
self.height = height
self.track = Track.load(bamfile, fragmentsize=fragmentsize, rmdup=rmdup, rmrepeats=rmrepeats)
self.ymax = None
self.bgmode = bgmode
self.scalepm = kwargs.get("adjscale", False)
self.show_scale = kwargs.get("show_scale", True)
if color:
self.color = color
else:
self.color = "#a7004b"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
self.fragmentsize = fragmentsize
self.rmdup = rmdup
self.rmrepeats = rmrepeats
self.name = kwargs.get('name')
def _load_data(self, interval):
self.profile = self.track.get_profile(interval,
scalepm=self.scalepm)
if not self.ymax:
self.ymax = np.nanmax(self.profile) * 1.10
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
# Background of profile
if self.bgmode == "stripes":
bgcol = {0: "white", 1: (0.95, 0.95, 0.95)}[int(odd)]
ax.set_facecolor(bgcol)
elif self.bgmode == "color":
ax.set_facecolor(self.color)
ax.patch.set_alpha(0.07)
# get interval
chrom, start, end = interval
profile = self.profile
if reverse:
profile = profile[::-1]
# plot data
ax.fill_between(
list(range(start, end)),
np.zeros(len(profile)),
profile,
edgecolor='face',
facecolor=self.color,
linewidth=0.5,
alpha=self.alpha)
# set the y-limit
ax.set_ylim(0, self.ymax)
# add y-limit label
if self.show_scale:
ax.text(0.005, 0.90,
int(ax.get_ylim()[-1] + 0.5),
horizontalalignment='left',
verticalalignment="top",
transform=ax.transAxes,
clip_on=False,
fontproperties=font)
ax.set_xlim(start, end)
self.hide_axes(ax)
class AnnotationPanel(ProfilePanel):
def __init__(self, annofile, height=0.3, vis="stack", color="black"):
self.annofile = annofile
self.height = height
self.vis = vis
self.color = color
def _load_data(self, interval):
self.gene_track = load_annotation(interval, self.annofile, vis=self.vis)
self.max_tracks = len(list(self.gene_track.keys()))
self.height *= self.max_tracks
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
ax.set_ylim(- 1 * self.max_tracks, 0)
for track_id, genes in list(self.gene_track.items()):
for gene in genes:
h_gene = -1 * track_id - 0.5
genestart = gene[1]
geneend = gene[2]
genename = gene[3]
if len(gene) >= 6:
genestrand = gene[5]
else:
genestrand = "+"
# BED12 format
if len(gene) == 12:
exonstarts = [int(x) for x in gene[11].split(",") if x]
exonsizes = [int(x) for x in gene[10].split(",") if x]
else:
exonstarts = [0]
exonsizes = [geneend - genestart]
x1 = (genestart - start)
x2 = (geneend - start)
if reverse:
x1 = end - genestart
x2 = end - geneend
gstart = x1 / float(end - start)
gend = x2 / float(end - start)
# Horizontal line for complete gene
ax.axhline(h_gene,
gstart,
gend,
color=self.color,
solid_capstyle="butt",
)
# Exons
for exonstart, exonsize in zip(exonstarts, exonsizes):
estart = (genestart + exonstart - start)
eend = (genestart + exonstart + exonsize - start)
if reverse:
estart = end - (genestart + exonstart)
eend = end - (genestart + exonstart + exonsize)
ax.axhspan(
h_gene - 0.35,
h_gene + 0.35,
estart / float(end - start),
eend / float(end - start),
linewidth=0.1,
color=self.color)
# Only draw arrows for BED12 entries
if len(gene) == 12:
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
figwidth, figheight = bbox.width, bbox.height
# Scale with absolute width of figure
step = 0.04 / figwidth
if reverse:
step = -step
for i in np.arange(gstart + step, gend - step, step):
if genestrand == "-":
astart = (i + step, h_gene)
aend = (i, h_gene)
else:
astart = (i, h_gene)
aend = (i + step, h_gene)
arr = FancyArrowPatch(
astart,
aend,
arrowstyle=GENE_ARROW,
mutation_scale=(figheight * fig.dpi) / 8 / self.max_tracks * 1.5,
linewidth=0.5,
color=self.color,
)
ax.add_patch(arr)
if gstart > 0:
ax.text(gstart - 0.01, h_gene, genename,
horizontalalignment="right",
verticalalignment="center",
fontproperties=font)
self.hide_axes(ax)
class ScalePanel(ProfilePanel):
def __init__(self, height=0.3, color=None, alpha=None):
self.height = height
if color:
self.color = color
else:
self.color = "black"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
def _load_data(self, interval):
pass
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
# Formatting
for s in list(ax.spines.values()):
s.set_color('none')
ax.yaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_locator(NullLocator())
ax.set_xlim(start, end)
# ax.set_ylim(0,1)
# Set font
# Plot the numbers
ticks = [s for s in ax.xaxis.get_ticklocs()[:-1] if s > start and s < end]
xcoords = [(s - start) / (end - start) + 0.01 for s in ticks]
if reverse:
ticks = ticks[::-1]
for s, x in zip(ticks[:-1], xcoords[:-1]):
ax.text(
x,
0.5,
str(int(s)),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
ax.text(
0,
0.5,
chrom,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
class ConservationPanel(ProfilePanel):
def __init__(self, track, target, height=1):
self.track = track
self.height = height
self.data = []
self.target = target
def _load_data(self, ival1):
for line in open(self.track):
vals = line.strip().split("\t")
for i in [1, 2, 4, 5]:
vals[i] = int(vals[i])
self.data.append(vals)
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
reverse_other = reverse
reverse_self = kwargs.get("reverse_self", False)
chrom, start, end = interval
c2, s2, e2 = self.target
span1 = float(end - start)
span2 = float(e2 - s2)
for [chrom1, start1, end1, chrom2, start2, end2] in self.data:
if reverse_self:
if reverse_other:
coords = [
[1 - (end1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
coords = [
[1 - (end1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
if reverse_other:
coords = [
[(start1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
else:
coords = [
[(start1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
poly = Polygon(coords,
facecolor="black",
edgecolor='none',
alpha=0.2,
)
ax.add_patch(poly)
self.hide_axes(ax)
| [
"matplotlib.ticker.NullFormatter",
"matplotlib.ticker.NullLocator",
"matplotlib.offsetbox.TextArea",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"numpy.arange",
"re.split",
"scipy.stats.scoreatpercentile",
"fluff.fluffio.load_annotation",
"matplotlib.patches.ArrowStyle._Curve",
"matplotlib.p... | [((706, 793), 'matplotlib.patches.ArrowStyle._Curve', 'ArrowStyle._Curve', ([], {'beginarrow': '(False)', 'endarrow': '(True)', 'head_length': '(0.4)', 'head_width': '(0.4)'}), '(beginarrow=False, endarrow=True, head_length=0.4,\n head_width=0.4)\n', (723, 793), False, 'from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon\n'), ((1525, 1580), 'matplotlib.offsetbox.HPacker', 'HPacker', ([], {'children': 'areas', 'align': '"""baseline"""', 'pad': '(0)', 'sep': '(0)'}), "(children=areas, align='baseline', pad=0, sep=0)\n", (1532, 1580), False, 'from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox\n'), ((1634, 1821), 'matplotlib.offsetbox.AnnotationBbox', 'AnnotationBbox', (['txt'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'annotation_clip': 'annotation_clip', 'frameon': '(False)', 'boxcoords': '"""axes fraction"""', 'box_alignment': '(horizontalalignment, verticalalignment)'}), "(txt, xy=(x, y), xycoords='data', annotation_clip=\n annotation_clip, frameon=False, boxcoords='axes fraction',\n box_alignment=(horizontalalignment, verticalalignment))\n", (1648, 1821), False, 'from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox\n'), ((2520, 2613), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '(fontsize / 1.25)', 'family': "['Nimbus Sans L', 'Helvetica', 'sans-serif']"}), "(size=fontsize / 1.25, family=['Nimbus Sans L', 'Helvetica',\n 'sans-serif'])\n", (2534, 2613), False, 'from matplotlib.font_manager import FontProperties\n'), ((3012, 3057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(plot_width, plot_height)'}), '(figsize=(plot_width, plot_height))\n', (3022, 3057), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3152), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', 'numplots'], {'width_ratios': 'width_ratios'}), '(1, numplots, width_ratios=width_ratios)\n', (3112, 3152), True, 'import matplotlib.gridspec as gridspec\n'), ((5933, 5968), 'sys.stderr.write', 'sys.stderr.write', (['"""Saving figure\n"""'], {}), "('Saving figure\\n')\n", (5949, 5968), False, 'import sys\n'), ((6489, 6503), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6497, 6503), True, 'import numpy as np\n'), ((6534, 6557), 'numpy.median', 'np.median', (['vals'], {'axis': '(0)'}), '(vals, axis=0)\n', (6543, 6557), True, 'import numpy as np\n'), ((7827, 7861), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(wsize, hsize)'}), '(figsize=(wsize, hsize))\n', (7837, 7861), True, 'import matplotlib.pyplot as plt\n'), ((10350, 10376), 're.split', 're.split', (['"""[-:]"""', 'interval'], {}), "('[-:]', interval)\n", (10358, 10376), False, 'import re\n'), ((10893, 10927), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(wsize, hsize)'}), '(figsize=(wsize, hsize))\n', (10903, 10927), True, 'import matplotlib.pyplot as plt\n'), ((12071, 12098), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'dpi': 'dpi'}), '(fname, dpi=dpi)\n', (12082, 12098), True, 'import matplotlib.pyplot as plt\n'), ((1454, 1486), 'matplotlib.offsetbox.TextArea', 'TextArea', (['t'], {'textprops': 'textprops'}), '(t, textprops=textprops)\n', (1462, 1486), False, 'from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox\n'), ((5204, 5220), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5212, 5220), True, 'import numpy as np\n'), ((10450, 10498), 'fluff.fluffio.load_annotation', 'load_annotation', (['[chrom, start, end]', 'annotation'], {}), '([chrom, start, end], annotation)\n', (10465, 10498), False, 'from fluff.fluffio import load_annotation\n'), ((13032, 13125), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '(fontsize / 1.25)', 'family': "['Nimbus Sans L', 'Helvetica', 'sans-serif']"}), "(size=fontsize / 1.25, family=['Nimbus Sans L', 'Helvetica',\n 'sans-serif'])\n", (13046, 13125), False, 'from matplotlib.font_manager import FontProperties\n'), ((16437, 16522), 'fluff.track.Track.load', 'Track.load', (['bamfile'], {'fragmentsize': 'fragmentsize', 'rmdup': 'rmdup', 'rmrepeats': 'rmrepeats'}), '(bamfile, fragmentsize=fragmentsize, rmdup=rmdup, rmrepeats=rmrepeats\n )\n', (16447, 16522), False, 'from fluff.track import Track\n'), ((18787, 18841), 'fluff.fluffio.load_annotation', 'load_annotation', (['interval', 'self.annofile'], {'vis': 'self.vis'}), '(interval, self.annofile, vis=self.vis)\n', (18802, 18841), False, 'from fluff.fluffio import load_annotation\n'), ((2257, 2272), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (2270, 2272), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((2302, 2315), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (2313, 2315), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((3677, 3700), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (3696, 3700), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3838, 3858), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'nbins': '(3)'}), '(nbins=3)\n', (3849, 3858), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((4838, 4868), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axcluster'], {}), '(axcluster)\n', (4857, 4868), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((8154, 8208), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', 'ncolumns', '(row * ncolumns + col + 1)'], {}), '(nrows, ncolumns, row * ncolumns + col + 1)\n', (8165, 8208), True, 'import matplotlib.pyplot as plt\n'), ((12265, 12277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12275, 12277), True, 'import matplotlib.pyplot as plt\n'), ((12679, 12702), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (12696, 12702), True, 'import matplotlib.gridspec as gridspec\n'), ((15218, 15247), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['self.fig', 'gs0[i]'], {}), '(self.fig, gs0[i])\n', (15229, 15247), True, 'import matplotlib.pyplot as plt\n'), ((15260, 15323), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0)', 'top': '(1)', 'left': '(0)', 'right': '(1)', 'hspace': '(0)'}), '(bottom=0, top=1, left=0, right=1, hspace=0)\n', (15279, 15323), True, 'import matplotlib.pyplot as plt\n'), ((23241, 23256), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (23254, 23256), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((23295, 23310), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (23308, 23310), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((23347, 23360), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (23358, 23360), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((26398, 26461), 'matplotlib.patches.Polygon', 'Polygon', (['coords'], {'facecolor': '"""black"""', 'edgecolor': '"""none"""', 'alpha': '(0.2)'}), "(coords, facecolor='black', edgecolor='none', alpha=0.2)\n", (26405, 26461), False, 'from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon\n'), ((15953, 15968), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (15966, 15968), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((16005, 16020), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (16018, 16020), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((16055, 16068), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (16066, 16068), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((16103, 16116), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (16114, 16116), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((17200, 17223), 'numpy.nanmax', 'np.nanmax', (['self.profile'], {}), '(self.profile)\n', (17209, 17223), True, 'import numpy as np\n'), ((6617, 6652), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['vals[:, i]', 'perc'], {}), '(vals[:, i], perc)\n', (6634, 6652), False, 'from scipy.stats import scoreatpercentile\n'), ((6980, 7021), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['vals[:, i]', '(100 - perc)'], {}), '(vals[:, i], 100 - perc)\n', (6997, 7021), False, 'from scipy.stats import scoreatpercentile\n'), ((21561, 21604), 'numpy.arange', 'np.arange', (['(gstart + step)', '(gend - step)', 'step'], {}), '(gstart + step, gend - step, step)\n', (21570, 21604), True, 'import numpy as np\n'), ((8384, 8399), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (8397, 8399), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((8442, 8455), 'matplotlib.ticker.NullLocator', 'NullLocator', ([], {}), '()\n', (8453, 8455), False, 'from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator\n'), ((21919, 22078), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['astart', 'aend'], {'arrowstyle': 'GENE_ARROW', 'mutation_scale': '(figheight * fig.dpi / 8 / self.max_tracks * 1.5)', 'linewidth': '(0.5)', 'color': 'self.color'}), '(astart, aend, arrowstyle=GENE_ARROW, mutation_scale=\n figheight * fig.dpi / 8 / self.max_tracks * 1.5, linewidth=0.5, color=\n self.color)\n', (21934, 22078), False, 'from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon\n'), ((10034, 10053), 'os.path.basename', 'os.path.basename', (['t'], {}), '(t)\n', (10050, 10053), False, 'import os\n'), ((11472, 11492), 'os.path.split', 'os.path.split', (['track'], {}), '(track)\n', (11485, 11492), False, 'import os\n')] |
import gym
import matplotlib
import torch
import numpy as np
from sac.model import GaussianPolicy, QNetwork, DeterministicPolicy
# from core.notebook_utils import animate
# from core.notebook_utils import gen_video
seed = 123456
hidden_size = 256
device = 'cpu'
# env_name = 'Hopper-v2'
env_name = 'Walker2d-v2'
# env_name = 'HalfCheetah'
model_path = './model_last.pt'
env = gym.make(env_name)
env.seed(seed)
policy = GaussianPolicy(env.observation_space.shape[0], env.action_space.shape[0], hidden_size, env.action_space).to(device)
policy.load_state_dict(torch.load(model_path, map_location=torch.device(device))['Policy'])
def select_action(state, policy, device):
state = torch.FloatTensor(state).to(device).unsqueeze(0)
action, log_prob, mean = policy.sample(state)
# return mean.detach().cpu().numpy()[0]
return action.detach().cpu().numpy()[0]
# Now we generate video to see the performance.
state = env.reset()
state = np.expand_dims(state, axis=0)
frames = []
rewards = []
for i in range(1000):
# frame = env.render(mode='rgb_array')
state, reward, done, info = env.step(select_action(state, policy, device))
# frames.append(frame.copy())
rewards.append(reward)
if i % 100 == 0:
print("Step: {}, reward: {}".format(i, reward) )
if done:
break
print('total reward: {}'.format(np.sum(rewards))) | [
"sac.model.GaussianPolicy",
"torch.FloatTensor",
"numpy.sum",
"numpy.expand_dims",
"gym.make",
"torch.device"
] | [((383, 401), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (391, 401), False, 'import gym\n'), ((956, 985), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (970, 985), True, 'import numpy as np\n'), ((427, 535), 'sac.model.GaussianPolicy', 'GaussianPolicy', (['env.observation_space.shape[0]', 'env.action_space.shape[0]', 'hidden_size', 'env.action_space'], {}), '(env.observation_space.shape[0], env.action_space.shape[0],\n hidden_size, env.action_space)\n', (441, 535), False, 'from sac.model import GaussianPolicy, QNetwork, DeterministicPolicy\n'), ((1355, 1370), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (1361, 1370), True, 'import numpy as np\n'), ((602, 622), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (614, 622), False, 'import torch\n'), ((691, 715), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (708, 715), False, 'import torch\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import time
import argparse
from scipy.special import softmax
from openvino.runtime import Core
def image_preprocess(img_path, re_shape):
img = cv2.imread(img_path)
img = cv2.resize(
img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, [2, 0, 1]) / 255
img = np.expand_dims(img, 0)
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img -= img_mean
img /= img_std
return img.astype(np.float32)
def draw_box(img, results, class_label, scale_x, scale_y):
label_list = list(
map(lambda x: x.strip(), open(class_label, 'r').readlines()))
for i in range(len(results)):
print(label_list[int(results[i][0])], ':', results[i][1])
bbox = results[i, 2:]
label_id = int(results[i, 0])
score = results[i, 1]
if (score > 0.20):
xmin, ymin, xmax, ymax = [
int(bbox[0] * scale_x), int(bbox[1] * scale_y),
int(bbox[2] * scale_x), int(bbox[3] * scale_y)
]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_SIMPLEX
label_text = label_list[label_id]
cv2.rectangle(img, (xmin, ymin), (xmax, ymin - 60), (0, 255, 0), -1)
cv2.putText(img, "#" + label_text, (xmin, ymin - 10), font, 1,
(255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(img,
str(round(score, 3)), (xmin, ymin - 40), font, 0.8,
(255, 255, 255), 2, cv2.LINE_AA)
return img
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(
current_box, axis=0), )
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class PicoDetPostProcess(object):
"""
Args:
input_shape (int): network input image size
ori_shape (int): ori image shape of before padding
scale_factor (float): scale factor of ori image
enable_mkldnn (bool): whether to open MKLDNN
"""
def __init__(self,
input_shape,
ori_shape,
scale_factor,
strides=[8, 16, 32, 64],
score_threshold=0.4,
nms_threshold=0.5,
nms_top_k=1000,
keep_top_k=100):
self.ori_shape = ori_shape
self.input_shape = input_shape
self.scale_factor = scale_factor
self.strides = strides
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
def warp_boxes(self, boxes, ori_shape):
"""Apply transform to boxes
"""
width, height = ori_shape[1], ori_shape[0]
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2) # x1y1, x2y2, x1y2, x2y1
# xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate(
(x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
def __call__(self, scores, raw_boxes):
batch_size = raw_boxes[0].shape[0]
reg_max = int(raw_boxes[0].shape[-1] / 4 - 1)
out_boxes_num = []
out_boxes_list = []
for batch_id in range(batch_size):
# generate centers
decode_boxes = []
select_scores = []
for stride, box_distribute, score in zip(self.strides, raw_boxes,
scores):
box_distribute = box_distribute[batch_id]
score = score[batch_id]
# centers
fm_h = self.input_shape[0] / stride
fm_w = self.input_shape[1] / stride
h_range = np.arange(fm_h)
w_range = np.arange(fm_w)
ww, hh = np.meshgrid(w_range, h_range)
ct_row = (hh.flatten() + 0.5) * stride
ct_col = (ww.flatten() + 0.5) * stride
center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)
# box distribution to distance
reg_range = np.arange(reg_max + 1)
box_distance = box_distribute.reshape((-1, reg_max + 1))
box_distance = softmax(box_distance, axis=1)
box_distance = box_distance * np.expand_dims(reg_range, axis=0)
box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))
box_distance = box_distance * stride
# top K candidate
topk_idx = np.argsort(score.max(axis=1))[::-1]
topk_idx = topk_idx[:self.nms_top_k]
center = center[topk_idx]
score = score[topk_idx]
box_distance = box_distance[topk_idx]
# decode box
decode_box = center + [-1, -1, 1, 1] * box_distance
select_scores.append(score)
decode_boxes.append(decode_box)
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = np.concatenate(select_scores, axis=0)
picked_box_probs = []
picked_labels = []
for class_index in range(0, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > self.score_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = bboxes[mask, :]
box_probs = np.concatenate(
[subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(
box_probs,
iou_threshold=self.nms_threshold,
top_k=self.keep_top_k, )
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if len(picked_box_probs) == 0:
out_boxes_list.append(np.empty((0, 4)))
out_boxes_num.append(0)
else:
picked_box_probs = np.concatenate(picked_box_probs)
# resize output boxes
picked_box_probs[:, :4] = self.warp_boxes(
picked_box_probs[:, :4], self.ori_shape[batch_id])
im_scale = np.concatenate([
self.scale_factor[batch_id][::-1],
self.scale_factor[batch_id][::-1]
])
picked_box_probs[:, :4] /= im_scale
# clas score box
out_boxes_list.append(
np.concatenate(
[
np.expand_dims(
np.array(picked_labels),
axis=-1), np.expand_dims(
picked_box_probs[:, 4], axis=-1),
picked_box_probs[:, :4]
],
axis=1))
out_boxes_num.append(len(picked_labels))
out_boxes_list = np.concatenate(out_boxes_list, axis=0)
out_boxes_num = np.asarray(out_boxes_num).astype(np.int32)
return out_boxes_list, out_boxes_num
def detect(img_file, compiled_model, re_shape, class_label):
output = compiled_model.infer_new_request({0: test_image})
result_ie = list(output.values()) #[0]
test_im_shape = np.array([[re_shape, re_shape]]).astype('float32')
test_scale_factor = np.array([[1, 1]]).astype('float32')
np_score_list = []
np_boxes_list = []
num_outs = int(len(result_ie) / 2)
for out_idx in range(num_outs):
np_score_list.append(result_ie[out_idx])
np_boxes_list.append(result_ie[out_idx + num_outs])
postprocess = PicoDetPostProcess(test_image.shape[2:], test_im_shape,
test_scale_factor)
np_boxes, np_boxes_num = postprocess(np_score_list, np_boxes_list)
image = cv2.imread(img_file, 1)
scale_x = image.shape[1] / test_image.shape[3]
scale_y = image.shape[0] / test_image.shape[2]
res_image = draw_box(image, np_boxes, class_label, scale_x, scale_y)
cv2.imwrite('res.jpg', res_image)
cv2.imshow("res", res_image)
cv2.waitKey()
def benchmark(test_image, compiled_model):
# benchmark
loop_num = 100
warm_up = 8
timeall = 0
time_min = float("inf")
time_max = float('-inf')
for i in range(loop_num + warm_up):
time0 = time.time()
#perform the inference step
output = compiled_model.infer_new_request({0: test_image})
time1 = time.time()
timed = time1 - time0
if i >= warm_up:
timeall = timeall + timed
time_min = min(time_min, timed)
time_max = max(time_max, timed)
time_avg = timeall / loop_num
print('inference_time(ms): min={}, max={}, avg={}'.format(
round(time_min * 1000, 2),
round(time_max * 1000, 1), round(time_avg * 1000, 1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--benchmark', type=int, default=1, help="0:detect; 1:benchmark")
parser.add_argument(
'--img_path',
type=str,
default='../../../../demo/000000014439.jpg',
help="image path")
parser.add_argument(
'--onnx_path',
type=str,
default='out_onnxsim/picodet_s_320_processed.onnx',
help="onnx filepath")
parser.add_argument('--in_shape', type=int, default=320, help="input_size")
parser.add_argument(
'--class_label',
type=str,
default='coco_label.txt',
help="class label file")
args = parser.parse_args()
ie = Core()
net = ie.read_model(args.onnx_path)
test_image = image_preprocess(args.img_path, args.in_shape)
compiled_model = ie.compile_model(net, 'CPU')
if args.benchmark == 0:
detect(args.img_path, compiled_model, args.in_shape, args.class_label)
if args.benchmark == 1:
benchmark(test_image, compiled_model)
| [
"numpy.clip",
"cv2.rectangle",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.stack",
"numpy.empty",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.maximum",
"cv2.waitKey",
"numpy.ones",
"cv2.putText",
"cv2.cvtColor",
... | [((791, 811), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (801, 811), False, 'import cv2\n'), ((822, 893), 'cv2.resize', 'cv2.resize', (['img', '(re_shape, re_shape)'], {'interpolation': 'cv2.INTER_LANCZOS4'}), '(img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)\n', (832, 893), False, 'import cv2\n'), ((913, 949), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (925, 949), False, 'import cv2\n'), ((1005, 1027), 'numpy.expand_dims', 'np.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (1019, 1027), True, 'import numpy as np\n'), ((2876, 2894), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (2886, 2894), True, 'import numpy as np\n'), ((3736, 3780), 'numpy.maximum', 'np.maximum', (['boxes0[..., :2]', 'boxes1[..., :2]'], {}), '(boxes0[..., :2], boxes1[..., :2])\n', (3746, 3780), True, 'import numpy as np\n'), ((3808, 3852), 'numpy.minimum', 'np.minimum', (['boxes0[..., 2:]', 'boxes1[..., 2:]'], {}), '(boxes0[..., 2:], boxes1[..., 2:])\n', (3818, 3852), True, 'import numpy as np\n'), ((4356, 4399), 'numpy.clip', 'np.clip', (['(right_bottom - left_top)', '(0.0)', 'None'], {}), '(right_bottom - left_top, 0.0, None)\n', (4363, 4399), True, 'import numpy as np\n'), ((11175, 11198), 'cv2.imread', 'cv2.imread', (['img_file', '(1)'], {}), '(img_file, 1)\n', (11185, 11198), False, 'import cv2\n'), ((11379, 11412), 'cv2.imwrite', 'cv2.imwrite', (['"""res.jpg"""', 'res_image'], {}), "('res.jpg', res_image)\n", (11390, 11412), False, 'import cv2\n'), ((11417, 11445), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res_image'], {}), "('res', res_image)\n", (11427, 11445), False, 'import cv2\n'), ((11450, 11463), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (11461, 11463), False, 'import cv2\n'), ((12264, 12289), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12287, 12289), False, 'import argparse\n'), ((12946, 12952), 'openvino.runtime.Core', 'Core', ([], {}), '()\n', (12950, 12952), False, 'from openvino.runtime import Core\n'), ((960, 988), 'numpy.transpose', 'np.transpose', (['img', '[2, 0, 1]'], {}), '(img, [2, 0, 1])\n', (972, 988), True, 'import numpy as np\n'), ((10273, 10311), 'numpy.concatenate', 'np.concatenate', (['out_boxes_list'], {'axis': '(0)'}), '(out_boxes_list, axis=0)\n', (10287, 10311), True, 'import numpy as np\n'), ((11698, 11709), 'time.time', 'time.time', ([], {}), '()\n', (11707, 11709), False, 'import time\n'), ((11830, 11841), 'time.time', 'time.time', ([], {}), '()\n', (11839, 11841), False, 'import time\n'), ((1043, 1074), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1051, 1074), True, 'import numpy as np\n'), ((1108, 1139), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1116, 1139), True, 'import numpy as np\n'), ((1805, 1867), 'cv2.rectangle', 'cv2.rectangle', (['img', '(xmin, ymin)', '(xmax, ymax)', '(0, 255, 0)', '(3)'], {}), '(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)\n', (1818, 1867), False, 'import cv2\n'), ((1970, 2038), 'cv2.rectangle', 'cv2.rectangle', (['img', '(xmin, ymin)', '(xmax, ymin - 60)', '(0, 255, 0)', '(-1)'], {}), '(img, (xmin, ymin), (xmax, ymin - 60), (0, 255, 0), -1)\n', (1983, 2038), False, 'import cv2\n'), ((2051, 2151), 'cv2.putText', 'cv2.putText', (['img', "('#' + label_text)", '(xmin, ymin - 10)', 'font', '(1)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), "(img, '#' + label_text, (xmin, ymin - 10), font, 1, (255, 255, \n 255), 2, cv2.LINE_AA)\n", (2062, 2151), False, 'import cv2\n'), ((3268, 3303), 'numpy.expand_dims', 'np.expand_dims', (['current_box'], {'axis': '(0)'}), '(current_box, axis=0)\n', (3282, 3303), True, 'import numpy as np\n'), ((5545, 5564), 'numpy.ones', 'np.ones', (['(n * 4, 3)'], {}), '((n * 4, 3))\n', (5552, 5564), True, 'import numpy as np\n'), ((8208, 8244), 'numpy.concatenate', 'np.concatenate', (['decode_boxes'], {'axis': '(0)'}), '(decode_boxes, axis=0)\n', (8222, 8244), True, 'import numpy as np\n'), ((8271, 8308), 'numpy.concatenate', 'np.concatenate', (['select_scores'], {'axis': '(0)'}), '(select_scores, axis=0)\n', (8285, 8308), True, 'import numpy as np\n'), ((10615, 10647), 'numpy.array', 'np.array', (['[[re_shape, re_shape]]'], {}), '([[re_shape, re_shape]])\n', (10623, 10647), True, 'import numpy as np\n'), ((10690, 10708), 'numpy.array', 'np.array', (['[[1, 1]]'], {}), '([[1, 1]])\n', (10698, 10708), True, 'import numpy as np\n'), ((6948, 6963), 'numpy.arange', 'np.arange', (['fm_h'], {}), '(fm_h)\n', (6957, 6963), True, 'import numpy as np\n'), ((6990, 7005), 'numpy.arange', 'np.arange', (['fm_w'], {}), '(fm_w)\n', (6999, 7005), True, 'import numpy as np\n'), ((7031, 7060), 'numpy.meshgrid', 'np.meshgrid', (['w_range', 'h_range'], {}), '(w_range, h_range)\n', (7042, 7060), True, 'import numpy as np\n'), ((7196, 7246), 'numpy.stack', 'np.stack', (['(ct_col, ct_row, ct_col, ct_row)'], {'axis': '(1)'}), '((ct_col, ct_row, ct_col, ct_row), axis=1)\n', (7204, 7246), True, 'import numpy as np\n'), ((7323, 7345), 'numpy.arange', 'np.arange', (['(reg_max + 1)'], {}), '(reg_max + 1)\n', (7332, 7345), True, 'import numpy as np\n'), ((7450, 7479), 'scipy.special.softmax', 'softmax', (['box_distance'], {'axis': '(1)'}), '(box_distance, axis=1)\n', (7457, 7479), False, 'from scipy.special import softmax\n'), ((9289, 9321), 'numpy.concatenate', 'np.concatenate', (['picked_box_probs'], {}), '(picked_box_probs)\n', (9303, 9321), True, 'import numpy as np\n'), ((9518, 9609), 'numpy.concatenate', 'np.concatenate', (['[self.scale_factor[batch_id][::-1], self.scale_factor[batch_id][::-1]]'], {}), '([self.scale_factor[batch_id][::-1], self.scale_factor[\n batch_id][::-1]])\n', (9532, 9609), True, 'import numpy as np\n'), ((10336, 10361), 'numpy.asarray', 'np.asarray', (['out_boxes_num'], {}), '(out_boxes_num)\n', (10346, 10361), True, 'import numpy as np\n'), ((7526, 7559), 'numpy.expand_dims', 'np.expand_dims', (['reg_range'], {'axis': '(0)'}), '(reg_range, axis=0)\n', (7540, 7559), True, 'import numpy as np\n'), ((9177, 9193), 'numpy.empty', 'np.empty', (['(0, 4)'], {}), '((0, 4))\n', (9185, 9193), True, 'import numpy as np\n'), ((7591, 7619), 'numpy.sum', 'np.sum', (['box_distance'], {'axis': '(1)'}), '(box_distance, axis=1)\n', (7597, 7619), True, 'import numpy as np\n'), ((9992, 10039), 'numpy.expand_dims', 'np.expand_dims', (['picked_box_probs[:, 4]'], {'axis': '(-1)'}), '(picked_box_probs[:, 4], axis=-1)\n', (10006, 10039), True, 'import numpy as np\n'), ((9925, 9948), 'numpy.array', 'np.array', (['picked_labels'], {}), '(picked_labels)\n', (9933, 9948), True, 'import numpy as np\n')] |
'''
------------------------------------------------------------------------
This program reads in the output from TPI.py and creates table of
percentage changes between the baseline and policy results.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
import os
from ogindia.utils import safe_read_pickle
def dump_diff_output(baseline_dir, policy_dir):
'''
This function reads the pickles with the SS and time path results
from the baseline and reform and then calculates the percentage
differences between the two for each year in the 10-year budget
window, over the entire budget window, and in the SS.
Args:
baseline_dir (str): path for directory with baseline policy
results
policy_dir (str): path for directory with reform policy results
Returns:
baseline_macros (Numpy array): time path for relevant macro
variables from baseline equilibrium, order of variables
is Y, C, I, L, w, r, total_revenue
policy_macros (Numpy array): time path for relevant macro
variables from reform equilibrium
pct_changes (Numpy array): percentage changes in macro variables
from baseline to reform for each year in the time path
'''
# read macro output
tpi_baseline_dir = os.path.join(baseline_dir, "TPI")
tpi_policy_dir = os.path.join(policy_dir, "TPI")
if not os.path.exists(tpi_policy_dir):
os.mkdir(tpi_policy_dir)
tpi_macro_vars_policy_path = os.path.join(tpi_policy_dir,
"TPI_vars.pkl")
tpi_macro_vars_policy = safe_read_pickle(tpi_macro_vars_policy_path)
tpi_macro_vars_baseline_path = os.path.join(tpi_baseline_dir,
"TPI_vars.pkl")
tpi_macro_vars_baseline = safe_read_pickle(tpi_macro_vars_baseline_path)
T = len(tpi_macro_vars_baseline['C'])
baseline_macros = np.zeros((7, T))
baseline_macros[0, :] = tpi_macro_vars_baseline['Y'][:T]
baseline_macros[1, :] = tpi_macro_vars_baseline['C'][:T]
baseline_macros[2, :] = tpi_macro_vars_baseline['I'][:T]
baseline_macros[3, :] = tpi_macro_vars_baseline['L'][:T]
baseline_macros[4, :] = tpi_macro_vars_baseline['w'][:T]
baseline_macros[5, :] = tpi_macro_vars_baseline['r'][:T]
baseline_macros[6, :] = tpi_macro_vars_baseline['total_revenue'][:T]
policy_macros = np.zeros((7, T))
policy_macros[0, :] = tpi_macro_vars_policy['Y'][:T]
policy_macros[1, :] = tpi_macro_vars_policy['C'][:T]
policy_macros[2, :] = tpi_macro_vars_policy['I'][:T]
policy_macros[3, :] = tpi_macro_vars_policy['L'][:T]
policy_macros[4, :] = tpi_macro_vars_policy['w'][:T]
policy_macros[5, :] = tpi_macro_vars_policy['r'][:T]
policy_macros[6, :] = tpi_macro_vars_policy['total_revenue'][:T]
pct_changes = np.zeros((7, 12))
# pct changes for each year in budget window
pct_changes[:, :10] = ((policy_macros-baseline_macros) /
policy_macros)[:, :10]
# pct changes over entire budget window
pct_changes[:, 10] = ((policy_macros[:, :10].sum(axis=1) -
baseline_macros[:, :10].sum(axis=1)) /
policy_macros[:, :10].sum(axis=1))
# Load SS results
ss_policy_path = os.path.join(policy_dir, "SS", "SS_vars.pkl")
ss_policy = safe_read_pickle(ss_policy_path)
ss_baseline_path = os.path.join(baseline_dir, "SS", "SS_vars.pkl")
ss_baseline = safe_read_pickle(ss_baseline_path)
# pct changes in macro aggregates in SS
pct_changes[0, 11] = ((ss_policy['Yss'] - ss_baseline['Yss']) /
ss_baseline['Yss'])
pct_changes[1, 11] = ((ss_policy['Css'] - ss_baseline['Css']) /
ss_baseline['Css'])
pct_changes[2, 11] = ((ss_policy['Iss'] - ss_baseline['Iss']) /
ss_baseline['Iss'])
pct_changes[3, 11] = ((ss_policy['Lss'] - ss_baseline['Lss']) /
ss_baseline['Lss'])
pct_changes[4, 11] = ((ss_policy['wss'] - ss_baseline['wss']) /
ss_baseline['wss'])
pct_changes[5, 11] = ((ss_policy['rss'] - ss_baseline['rss']) /
ss_baseline['rss'])
pct_changes[6, 11] = ((ss_policy['total_revenue_ss'] -
ss_baseline['total_revenue_ss']) /
ss_baseline['total_revenue_ss'])
return pct_changes, baseline_macros, policy_macros
| [
"os.path.exists",
"os.path.join",
"numpy.zeros",
"os.mkdir",
"ogindia.utils.safe_read_pickle"
] | [((1335, 1368), 'os.path.join', 'os.path.join', (['baseline_dir', '"""TPI"""'], {}), "(baseline_dir, 'TPI')\n", (1347, 1368), False, 'import os\n'), ((1390, 1421), 'os.path.join', 'os.path.join', (['policy_dir', '"""TPI"""'], {}), "(policy_dir, 'TPI')\n", (1402, 1421), False, 'import os\n'), ((1531, 1575), 'os.path.join', 'os.path.join', (['tpi_policy_dir', '"""TPI_vars.pkl"""'], {}), "(tpi_policy_dir, 'TPI_vars.pkl')\n", (1543, 1575), False, 'import os\n'), ((1650, 1694), 'ogindia.utils.safe_read_pickle', 'safe_read_pickle', (['tpi_macro_vars_policy_path'], {}), '(tpi_macro_vars_policy_path)\n', (1666, 1694), False, 'from ogindia.utils import safe_read_pickle\n'), ((1730, 1776), 'os.path.join', 'os.path.join', (['tpi_baseline_dir', '"""TPI_vars.pkl"""'], {}), "(tpi_baseline_dir, 'TPI_vars.pkl')\n", (1742, 1776), False, 'import os\n'), ((1855, 1901), 'ogindia.utils.safe_read_pickle', 'safe_read_pickle', (['tpi_macro_vars_baseline_path'], {}), '(tpi_macro_vars_baseline_path)\n', (1871, 1901), False, 'from ogindia.utils import safe_read_pickle\n'), ((1967, 1983), 'numpy.zeros', 'np.zeros', (['(7, T)'], {}), '((7, T))\n', (1975, 1983), True, 'import numpy as np\n'), ((2444, 2460), 'numpy.zeros', 'np.zeros', (['(7, T)'], {}), '((7, T))\n', (2452, 2460), True, 'import numpy as np\n'), ((2891, 2908), 'numpy.zeros', 'np.zeros', (['(7, 12)'], {}), '((7, 12))\n', (2899, 2908), True, 'import numpy as np\n'), ((3347, 3392), 'os.path.join', 'os.path.join', (['policy_dir', '"""SS"""', '"""SS_vars.pkl"""'], {}), "(policy_dir, 'SS', 'SS_vars.pkl')\n", (3359, 3392), False, 'import os\n'), ((3409, 3441), 'ogindia.utils.safe_read_pickle', 'safe_read_pickle', (['ss_policy_path'], {}), '(ss_policy_path)\n', (3425, 3441), False, 'from ogindia.utils import safe_read_pickle\n'), ((3465, 3512), 'os.path.join', 'os.path.join', (['baseline_dir', '"""SS"""', '"""SS_vars.pkl"""'], {}), "(baseline_dir, 'SS', 'SS_vars.pkl')\n", (3477, 3512), False, 'import os\n'), ((3531, 3565), 'ogindia.utils.safe_read_pickle', 'safe_read_pickle', (['ss_baseline_path'], {}), '(ss_baseline_path)\n', (3547, 3565), False, 'from ogindia.utils import safe_read_pickle\n'), ((1433, 1463), 'os.path.exists', 'os.path.exists', (['tpi_policy_dir'], {}), '(tpi_policy_dir)\n', (1447, 1463), False, 'import os\n'), ((1473, 1497), 'os.mkdir', 'os.mkdir', (['tpi_policy_dir'], {}), '(tpi_policy_dir)\n', (1481, 1497), False, 'import os\n')] |
import os
import argparse
import pickle
import numpy as np
import torch
import torch.nn.functional as F
import dnnlib
import legacy
from util.utilgan import basename, calc_init_res
try: # progress bar for notebooks
get_ipython().__class__.__name__
from util.progress_bar import ProgressIPy as ProgressBar
except: # normal console
from util.progress_bar import ProgressBar
parser = argparse.ArgumentParser()
parser.add_argument('--source', required=True, help='Source model path')
parser.add_argument('--out_dir', default='./', help='Output directory for reduced/reconstructed model')
parser.add_argument('-r', '--reconstruct', action='store_true', help='Reconstruct model (add internal arguments)')
parser.add_argument('-s', '--res', default=None, help='Target resolution in format X-Y')
parser.add_argument('-a', '--alpha', action='store_true', help='Add alpha channel for RGBA processing')
parser.add_argument('-l', '--labels', default=0, type=int, help='Make conditional model')
parser.add_argument('-v', '--verbose', action='store_true')
a = parser.parse_args()
if a.res is not None:
a.res = [int(s) for s in a.res.split('-')][::-1]
if len(a.res) == 1: a.res = a.res + a.res
def load_pkl(filepath):
with dnnlib.util.open_url(filepath) as f:
nets = legacy.load_network_pkl(f, custom=False) # ['G', 'D', 'G_ema', 'training_set_kwargs', 'augment_pipe']
return nets
def save_pkl(nets, filepath):
with open(filepath, 'wb') as file:
pickle.dump(nets, file) # , protocol=pickle.HIGHEST_PROTOCOL
def create_model(net_in, data_shape, labels=0, full=False, custom=False):
init_res, resolution, res_log2 = calc_init_res(data_shape[1:])
net_in['G_ema'].img_resolution = resolution
net_in['G_ema'].img_channels = data_shape[0]
net_in['G_ema'].init_res = init_res
if labels > 0:
net_in['G_ema'].c_dim = labels
net_out = legacy.create_networks(net_in, full=full, custom=custom)
return net_out
def add_channel(x, subnet): # [BCHW]
if subnet == 'D': # pad second dim [1]
padding = [0] * (len(x.shape)-2)*2
padding += [0,1,0,0]
else: # pad last dim [-1]
padding = [0] * (len(x.shape)-1)*2
padding += [0,1]
y = F.pad(x, padding, 'constant', 1)
return y
def pad_up_to(x, size, type='side'):
sh = x.shape
if list(x.shape) == list(size): return x
padding = []
for i, s in enumerate(size):
p0 = (s-sh[i]) // 2
p1 = s-sh[i] - p0
padding = padding + [p0,p1]
y = F.pad(x, padding[::-1], 'constant', 0)
return y
def copy_vars(src_net, tgt_net, add_alpha=False, tile=False) -> None:
for subnet in ['G_ema', 'G', 'D']:
if subnet in src_net.keys() and subnet in tgt_net.keys():
src_dict = src_net[subnet].state_dict()
tgt_dict = tgt_net[subnet].state_dict()
vars = [name for name in src_dict.keys() if name in tgt_dict.keys()]
pbar = ProgressBar(len(vars))
for name in vars:
source_shape = src_dict[name].shape
target_shape = tgt_dict[name].shape
if source_shape == target_shape:
tgt_dict[name].copy_(src_dict[name]).requires_grad_(False)
else:
if add_alpha:
update = add_channel(src_dict[name], subnet)
assert target_shape == update.shape, 'Diff shapes yet: src %s tgt %s' % (str(update.shape), str(target_shape))
tgt_dict[name].copy_(update).requires_grad_(False)
elif tile:
assert len(source_shape) == len(target_shape), "Diff shape ranks: src %s tgt %s" % (str(source_shape), str(target_shape))
tile_count = [target_shape[i] // source_shape[i] for i in range(len(source_shape))]
update = np.tile(src_dict[name], tile_count) # [512,512] => [1024,512]
if a.verbose is True: print(name, tile_count, source_shape, '=>', target_shape, '\n\n') # G_mapping/Dense0, D/Output
tgt_dict[name].copy_(torch.from_numpy(update)).requires_grad_(False)
else: # crop/pad
update = pad_up_to(src_dict[name], target_shape)
if a.verbose is True: print(name, source_shape, '=>', update.shape, '\n\n')
tgt_dict[name].copy_(update).requires_grad_(False)
pbar.upd(name)
def main():
net_in = load_pkl(a.source)
Gs_in = net_in['G_ema']
if hasattr(Gs_in, 'output_shape'):
out_shape = Gs_in.output_shape
print(' Loading model', a.source, out_shape)
_, res_in, _ = calc_init_res(out_shape[1:])
else: # original model
res_in = Gs_in.img_resolution
out_shape = [None, Gs_in.img_channels, res_in, res_in]
save_full = False
# netdict = net_in['G_ema'].state_dict()
# for k in netdict.keys():
# print(k, netdict[k].shape)
if a.res is not None or a.alpha is True:
if a.res is None: a.res = out_shape[2:]
colors = 4 if a.alpha is True else out_shape[1]
_, res_out, _ = calc_init_res([colors, *a.res])
if res_in != res_out or a.alpha is True: # add or remove layers
assert 'G' in net_in.keys() and 'D' in net_in.keys(), " !! G/D subnets not found in source model !!"
data_shape = [colors, res_out, res_out]
print(' Reconstructing full model with shape', data_shape)
net_out = create_model(net_in, data_shape, full=True)
copy_vars(net_in, net_out, add_alpha=True)
save_full = True
if a.res[0] != res_out or a.res[1] != res_out: # crop or pad layers
data_shape = [colors, *a.res]
net_out = create_model(net_in, data_shape, full=True)
copy_vars(net_in, net_out)
if a.labels > 0:
assert 'G' in net_in.keys() and 'D' in net_in.keys(), " !! G/D subnets not found in source model !!"
print(' Reconstructing full model with labels', a.labels)
data_shape = out_shape[1:]
net_out = create_model(net_in, data_shape, labels=a.labels, full=True)
copy_vars(net_in, net_out, tile=True)
save_full = True
if a.labels == 0 and a.res is None and a.alpha is not True:
if a.reconstruct is True:
print(' Reconstructing Gs model with same size')
data_shape = out_shape[1:]
net_out = create_model(net_in, data_shape, full=False) # FULL=TRUE - to enable full customization of foreign models ??
else:
net_out = dict(G_ema = Gs_in)
out_name = basename(a.source)
if a.res is not None: out_name += '-%dx%d' % (a.res[1], a.res[0])
if a.alpha is True: out_name += 'a'
if a.labels > 0: out_name += '-c%d' % a.labels
if not save_full: out_name += '-Gs'
save_pkl(net_out, os.path.join(a.out_dir, '%s.pkl' % out_name))
print(' Done')
if __name__ == '__main__':
main()
| [
"torch.nn.functional.pad",
"numpy.tile",
"legacy.load_network_pkl",
"pickle.dump",
"argparse.ArgumentParser",
"dnnlib.util.open_url",
"os.path.join",
"torch.from_numpy",
"util.utilgan.calc_init_res",
"legacy.create_networks",
"util.utilgan.basename"
] | [((398, 423), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (421, 423), False, 'import argparse\n'), ((1665, 1694), 'util.utilgan.calc_init_res', 'calc_init_res', (['data_shape[1:]'], {}), '(data_shape[1:])\n', (1678, 1694), False, 'from util.utilgan import basename, calc_init_res\n'), ((1905, 1961), 'legacy.create_networks', 'legacy.create_networks', (['net_in'], {'full': 'full', 'custom': 'custom'}), '(net_in, full=full, custom=custom)\n', (1927, 1961), False, 'import legacy\n'), ((2240, 2272), 'torch.nn.functional.pad', 'F.pad', (['x', 'padding', '"""constant"""', '(1)'], {}), "(x, padding, 'constant', 1)\n", (2245, 2272), True, 'import torch.nn.functional as F\n'), ((2534, 2572), 'torch.nn.functional.pad', 'F.pad', (['x', 'padding[::-1]', '"""constant"""', '(0)'], {}), "(x, padding[::-1], 'constant', 0)\n", (2539, 2572), True, 'import torch.nn.functional as F\n'), ((6715, 6733), 'util.utilgan.basename', 'basename', (['a.source'], {}), '(a.source)\n', (6723, 6733), False, 'from util.utilgan import basename, calc_init_res\n'), ((1240, 1270), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['filepath'], {}), '(filepath)\n', (1260, 1270), False, 'import dnnlib\n'), ((1292, 1332), 'legacy.load_network_pkl', 'legacy.load_network_pkl', (['f'], {'custom': '(False)'}), '(f, custom=False)\n', (1315, 1332), False, 'import legacy\n'), ((1492, 1515), 'pickle.dump', 'pickle.dump', (['nets', 'file'], {}), '(nets, file)\n', (1503, 1515), False, 'import pickle\n'), ((4745, 4773), 'util.utilgan.calc_init_res', 'calc_init_res', (['out_shape[1:]'], {}), '(out_shape[1:])\n', (4758, 4773), False, 'from util.utilgan import basename, calc_init_res\n'), ((5216, 5247), 'util.utilgan.calc_init_res', 'calc_init_res', (['[colors, *a.res]'], {}), '([colors, *a.res])\n', (5229, 5247), False, 'from util.utilgan import basename, calc_init_res\n'), ((6969, 7013), 'os.path.join', 'os.path.join', (['a.out_dir', "('%s.pkl' % out_name)"], {}), "(a.out_dir, '%s.pkl' % out_name)\n", (6981, 7013), False, 'import os\n'), ((3904, 3939), 'numpy.tile', 'np.tile', (['src_dict[name]', 'tile_count'], {}), '(src_dict[name], tile_count)\n', (3911, 3939), True, 'import numpy as np\n'), ((4152, 4176), 'torch.from_numpy', 'torch.from_numpy', (['update'], {}), '(update)\n', (4168, 4176), False, 'import torch\n')] |
from __future__ import absolute_import
import unittest
import numpy as np
from bilby.core.utils import create_frequency_series, create_time_series
from bilby.core.series import CoupledTimeAndFrequencySeries
class TestCoupledTimeAndFrequencySeries(unittest.TestCase):
def setUp(self):
self.duration = 2
self.sampling_frequency = 4096
self.start_time = -1
self.series = CoupledTimeAndFrequencySeries(
duration=self.duration, sampling_frequency=self.sampling_frequency,
start_time=self.start_time)
def tearDown(self):
del self.duration
del self.sampling_frequency
del self.start_time
del self.series
def test_repr(self):
expected =\
'CoupledTimeAndFrequencySeries(duration={}, sampling_frequency={},'\
' start_time={})'.format(
self.series.duration, self.series.sampling_frequency,
self.series.start_time)
self.assertEqual(expected, repr(self.series))
def test_duration_from_init(self):
self.assertEqual(self.duration, self.series.duration)
def test_sampling_from_init(self):
self.assertEqual(
self.sampling_frequency, self.series.sampling_frequency)
def test_start_time_from_init(self):
self.assertEqual(self.start_time, self.series.start_time)
def test_frequency_array_type(self):
self.assertIsInstance(self.series.frequency_array, np.ndarray)
def test_time_array_type(self):
self.assertIsInstance(self.series.time_array, np.ndarray)
def test_frequency_array_from_init(self):
expected = create_frequency_series(
sampling_frequency=self.sampling_frequency, duration=self.duration)
self.assertTrue(np.array_equal(expected, self.series.frequency_array))
def test_time_array_from_init(self):
expected = create_time_series(
sampling_frequency=self.sampling_frequency, duration=self.duration,
starting_time=self.start_time)
self.assertTrue(np.array_equal(expected, self.series.time_array))
def test_frequency_array_setter(self):
new_sampling_frequency = 100
new_duration = 3
new_frequency_array = create_frequency_series(
sampling_frequency=new_sampling_frequency, duration=new_duration)
self.series.frequency_array = new_frequency_array
self.assertTrue(np.array_equal(
new_frequency_array, self.series.frequency_array))
self.assertLessEqual(np.abs(
new_sampling_frequency - self.series.sampling_frequency), 1)
self.assertAlmostEqual(new_duration, self.series.duration)
self.assertAlmostEqual(self.start_time, self.series.start_time)
def test_time_array_setter(self):
new_sampling_frequency = 100
new_duration = 3
new_start_time = 4
new_time_array = create_time_series(
sampling_frequency=new_sampling_frequency, duration=new_duration,
starting_time=new_start_time)
self.series.time_array = new_time_array
self.assertTrue(np.array_equal(new_time_array, self.series.time_array))
self.assertAlmostEqual(
new_sampling_frequency, self.series.sampling_frequency, places=1)
self.assertAlmostEqual(new_duration, self.series.duration, places=1)
self.assertAlmostEqual(new_start_time, self.series.start_time, places=1)
def test_time_array_without_sampling_frequency(self):
self.series.sampling_frequency = None
self.series.duration = 4
with self.assertRaises(ValueError):
_ = self.series.time_array
def test_time_array_without_duration(self):
self.series.sampling_frequency = 4096
self.series.duration = None
with self.assertRaises(ValueError):
_ = self.series.time_array
def test_frequency_array_without_sampling_frequency(self):
self.series.sampling_frequency = None
self.series.duration = 4
with self.assertRaises(ValueError):
_ = self.series.frequency_array
def test_frequency_array_without_duration(self):
self.series.sampling_frequency = 4096
self.series.duration = None
with self.assertRaises(ValueError):
_ = self.series.frequency_array
if __name__ == '__main__':
unittest.main()
| [
"numpy.abs",
"bilby.core.series.CoupledTimeAndFrequencySeries",
"numpy.array_equal",
"unittest.main",
"bilby.core.utils.create_frequency_series",
"bilby.core.utils.create_time_series"
] | [((4377, 4392), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4390, 4392), False, 'import unittest\n'), ((408, 538), 'bilby.core.series.CoupledTimeAndFrequencySeries', 'CoupledTimeAndFrequencySeries', ([], {'duration': 'self.duration', 'sampling_frequency': 'self.sampling_frequency', 'start_time': 'self.start_time'}), '(duration=self.duration, sampling_frequency=\n self.sampling_frequency, start_time=self.start_time)\n', (437, 538), False, 'from bilby.core.series import CoupledTimeAndFrequencySeries\n'), ((1654, 1749), 'bilby.core.utils.create_frequency_series', 'create_frequency_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration'}), '(sampling_frequency=self.sampling_frequency,\n duration=self.duration)\n', (1677, 1749), False, 'from bilby.core.utils import create_frequency_series, create_time_series\n'), ((1899, 2021), 'bilby.core.utils.create_time_series', 'create_time_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration', 'starting_time': 'self.start_time'}), '(sampling_frequency=self.sampling_frequency, duration=\n self.duration, starting_time=self.start_time)\n', (1917, 2021), False, 'from bilby.core.utils import create_frequency_series, create_time_series\n'), ((2252, 2346), 'bilby.core.utils.create_frequency_series', 'create_frequency_series', ([], {'sampling_frequency': 'new_sampling_frequency', 'duration': 'new_duration'}), '(sampling_frequency=new_sampling_frequency, duration\n =new_duration)\n', (2275, 2346), False, 'from bilby.core.utils import create_frequency_series, create_time_series\n'), ((2918, 3037), 'bilby.core.utils.create_time_series', 'create_time_series', ([], {'sampling_frequency': 'new_sampling_frequency', 'duration': 'new_duration', 'starting_time': 'new_start_time'}), '(sampling_frequency=new_sampling_frequency, duration=\n new_duration, starting_time=new_start_time)\n', (2936, 3037), False, 'from bilby.core.utils import create_frequency_series, create_time_series\n'), ((1783, 1836), 'numpy.array_equal', 'np.array_equal', (['expected', 'self.series.frequency_array'], {}), '(expected, self.series.frequency_array)\n', (1797, 1836), True, 'import numpy as np\n'), ((2066, 2114), 'numpy.array_equal', 'np.array_equal', (['expected', 'self.series.time_array'], {}), '(expected, self.series.time_array)\n', (2080, 2114), True, 'import numpy as np\n'), ((2437, 2501), 'numpy.array_equal', 'np.array_equal', (['new_frequency_array', 'self.series.frequency_array'], {}), '(new_frequency_array, self.series.frequency_array)\n', (2451, 2501), True, 'import numpy as np\n'), ((2545, 2608), 'numpy.abs', 'np.abs', (['(new_sampling_frequency - self.series.sampling_frequency)'], {}), '(new_sampling_frequency - self.series.sampling_frequency)\n', (2551, 2608), True, 'import numpy as np\n'), ((3130, 3184), 'numpy.array_equal', 'np.array_equal', (['new_time_array', 'self.series.time_array'], {}), '(new_time_array, self.series.time_array)\n', (3144, 3184), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import sklearn.preprocessing as skp
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import time
FILE_PATH = './data/train.csv'
TEST_PATH = './data/testA.csv'
label = 'isDefault'
rng = np.random.RandomState(1)
m = [
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'Sep',
'Oct',
'Nov',
'Dec']
drop = [
'id', # 用户随机编码,无用信息
'grade', # 贷款等级,可由贷款子级得知,冗余信息
'issueDate', # 贷款发放时间,弱相关信息
'earliesCreditLine', # 信用报告最早年份,弱相关信息
'policyCode', # 策略代码,仅有唯一值
'postCode', # 邮政编码,可用地区编码代替且信息不全,冗余信息
'applicationType',
'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9', 'n10', 'n11', 'n12', 'n13',
'installment',
'ficoRangeHigh',
'pubRec',
'pubRecBankruptcies'
]
num_fea_std_dict = {
'loanAmnt': [13764.531844126117, 8229.833179234107],
'interestRate': [13.117565814391591, 4.485626513250098],
'installment': [416.05665872775666, 240.01393014758233],
'employmentTitle': [69828.22155017142, 104265.65461932094],
'employmentLength': [5.871908993627371, 3.5792858114921042],
'annualIncome': [69874.42387101255, 37141.55533626742],
'dti': [17.92615427617815, 8.3685205989137],
'delinquency_2years': [0.21005529120087593, 0.4926942628937087],
'ficoRangeLow': [694.9133418345829, 28.09035002383393],
'ficoRangeHigh': [698.9133418345829, 28.09035002383393],
'openAcc': [10.387779371072927, 4.100794672071401],
'pubRec': [0.17802509687951215, 0.4166958083554704],
'pubRecBankruptcies': [0.123799033106909, 0.32935214058618695],
'revolBal': [13738.190533842086, 11248.653462696366],
'revolUtil': [53.279701695453504, 23.885004346671042],
'totalAcc': [22.20439716382437, 9.568257649023632],
'title': [525.9512732854022, 2806.4384949171076],
'n0': [0.337752659948364, 0.776477646488613],
'n1': [3.2983773198570336, 1.7260141552580641],
'n2': [5.0793692535935975, 2.3933368509672306],
'n3': [5.0793692535935975, 2.3933368509672306],
'n4': [4.1818936616072655, 2.1211244657031467],
'n5': [7.108668793632994, 3.5937709313703174],
'n6': [7.530021484675423, 5.615899874308533],
'n7': [7.320349907462079, 3.253273580312179],
'n8': [12.772766892098634, 5.9698320597743475],
'n9': [5.058337757621767, 2.373023693113698],
'n10': [10.385319962885596, 3.989022339994767],
'n14': [1.9686892694052598, 1.5044985893218008]
}
num_fea_std_mm_dict = {
'loanAmnt': [3.187849326286761, -1.6117619343239902],
'interestRate': [3.2041085326996472, -1.7405742077118576],
'installment': [3.338070172758736, -1.6680975911755411],
'employmentTitle': [2.9590067753016127, -0.6697145076690663],
'employmentLength': [1.153328128510571, -1.6405253178648889],
'annualIncome': [5.630501314111197, -1.8813004258543433],
'dti': [3.9557584082566506, -2.2615890171358264],
'delinquency_2years': [3.6329725016207006, -0.42634003888572203],
'ficoRangeLow': [3.029035170199849, -2.310876930316126],
'ficoRangeHigh': [3.029035170199849, -2.310876930316126],
'openAcc': [4.050975958895274, -2.533113750322406],
'pubRec': [4.37243395922576, -0.4272303519973122],
'pubRecBankruptcies': [2.6603773254171434, -0.3758865294956615],
'revolBal': [5.212606972079556, -1.2213186742219155],
'revolUtil': [2.939932406348229, -2.230675821621918],
'totalAcc': [3.7410784856768884, -2.1116067214061776],
'title': [9.182117752940716, -0.18740880095465515],
'n0': [4.716487791519885, -0.4349805322480937],
'n1': [3.3033464197114255, -1.9109792986395748],
'n2': [3.309450879513847, -2.1222960117548215],
'n3': [3.309450879513847, -2.1222960117548215],
'n4': [3.2143829599045013, -1.971545625551483],
'n5': [3.308872889634287, -1.9780528390334644],
'n6': [3.645004179823448, -1.3408396967908138],
'n7': [3.2827396248406258, -2.250148881349115],
'n8': [3.220732663060564, -1.9720432290592158],
'n9': [2.9252393317952596, -2.131600191056082],
'n10': [3.1623488067833065, -2.6034750065850023],
'n14': [3.344177765472521, -1.30853513813709]
}
def time_cost(func):
def wrapper(*args, **kw):
start = time.time()
re = func(*args, **kw)
end = time.time()
print(
"Function: {}, Cost: {:.3f}sec".format(
func.__name__,
(end - start)))
return re
return wrapper
def employmentLength_to_value(data):
if pd.isnull(data):
return data
else:
if data == '10+ years':
return 10
elif data == '< 1 year':
return 0
else:
return int(data.split(' ')[0])
def issueDate_to_value(data):
if pd.isnull(data):
return data
else:
data = pd.to_datetime(str(data), format='%Y-%m-%d')
start = pd.to_datetime('2000-01-01', format='%Y-%m-%d')
return (data - start).days
def earliesCreditLine_to_value(data):
if pd.isnull(data):
return data
else:
year = str(data).split('-')[1]
month = str(m.index(str(data).split('-')[0]) + 1)
data = year + '-' + month + '-01'
data = pd.to_datetime(data, format='%Y-%m-%d')
start = pd.to_datetime('1900-01-01', format='%Y-%m-%d')
return (data - start).days
def data_clean(data, fea, sigma=3):
data_mean = np.mean(data[fea])
data_std = np.std(data[fea], ddof=1)
delta = sigma * data_std
lower_thr = data_mean - delta
upper_thr = data_mean + delta
data[fea + '_outlier'] = data[fea].apply(lambda x: str(
'T') if x > upper_thr or x < lower_thr else str('F'))
return data
def standardization(data_frame, columns, max_min=False):
for fea in columns:
data_mean = np.mean(data_frame[fea])
data_std = np.std(data_frame[fea])
# print("'{}': [{}, {}]".format(fea, data_mean, data_std))
data_frame[fea] = data_frame[fea].apply(
lambda x: np.divide((x - data_mean), data_std))
if max_min:
data_max = np.max(data_frame[fea])
data_min = np.min(data_frame[fea])
# print("'{}': [{}, {}]".format(fea, data_max, data_min))
data_frame[fea] = data_frame[fea].apply(
lambda x: np.divide((x - data_min), (data_max - data_min)))
return data_frame
def standardization_test(data_frame, columns, max_min=False):
for fea in columns:
data_mean = num_fea_std_dict[fea][0]
data_std = num_fea_std_dict[fea][1]
data_frame[fea] = data_frame[fea].apply(
lambda x: np.divide((x - data_mean), data_std))
if max_min:
data_max = num_fea_std_mm_dict[fea][0]
data_min = num_fea_std_mm_dict[fea][1]
data_frame[fea] = data_frame[fea].apply(
lambda x: np.divide((x - data_min), (data_max - data_min)))
return data_frame
@time_cost
def load_data(file_path=FILE_PATH, save_flag=False):
print('Loading Dataset ...')
ori_data = pd.read_csv(file_path)
print('Done.')
print('*' * 20)
ori_data = ori_data.drop(columns=drop)
cate_fea = [
'term',
'subGrade',
'homeOwnership',
'verificationStatus',
'purpose',
'regionCode',
'initialListStatus']
num_fea = list(filter(lambda x: x not in cate_fea, list(ori_data.columns)))
num_fea.remove(label)
print('Transforming Datetime Items ...')
ori_data['employmentLength'] = ori_data['employmentLength'].apply(
employmentLength_to_value)
# ori_data['issueDate'] = ori_data['issueDate'].apply(issueDate_to_value)
# ori_data['earliesCreditLine'] = ori_data['earliesCreditLine'].apply(earliesCreditLine_to_value)
print('Done.')
print('*' * 20)
print('Pre-processing Dataset ...')
print('-' * 20)
data = ori_data.copy()
print('Cleaning Dataset ...')
for fea in num_fea:
data = data_clean(data, fea)
data = data[data[fea + '_outlier'] == 'F']
data = data.drop(columns=[fea + '_outlier'])
data = data.reset_index(drop=True)
# x1 = data[fea + '_outlier'].value_counts()
# x2 = data.groupby(fea + '_outlier')['isDefault'].sum()
# print(x1)
# print(x2)
# print('- Default Proportion -')
# print('D/A: {:.2f}%'.format(x2[0] / x1[0] * 100))
# print('- Outlier Proportion -')
# if len(x1) < 2:
# print('Have No Outlier.')
# else:
# a = x1[1] / x1[0] * 100
# b = x2[1] / x2[0] * 100
# print(fea + ': {:.2f}%'.format(a))
# print('isDefault: {:.2f}%'.format(b))
# print('Reject!') if np.abs(a - b) > 5 else print('Accept!')
# print('-' * 20)
print('Cleaning Done.')
print('-' * 20)
print('Filling NaN Items ...')
data[num_fea] = data[num_fea].fillna(data[num_fea].median())
data[cate_fea] = data[cate_fea].fillna(data[cate_fea].mode())
print('Filling Done.')
print('-' * 20)
print('Standardizing Numerical Items ...')
data = standardization(data, columns=num_fea, max_min=True)
print('Coding Done.')
print('-' * 20)
# print("Printing Heatmap of Feature's Correlation ...")
# fig = plt.figure()
# plt.title("Heat Map of some Feature's Correlation")
# names = num_fea + [label]
# corr = abs(data[names].corr())
# ax = sns.heatmap(corr, square=True, vmax=0.8, xticklabels=names, yticklabels=names)
# plt.savefig('./data/heatmap_fig.png', dpi=400, bbox_inches='tight')
# print('Printing Done.')
# print('-' * 20)
print('One-hot Coding categorized Items ...')
data = pd.get_dummies(data, columns=cate_fea)
print('Coding Done.')
print('-' * 20)
print('Done.')
print('*' * 20)
if save_flag:
print('Saving Pre-processed Dataset ...')
data.to_csv('./data/pp_dataset.csv', index=False, sep=',')
print('Done.')
print('*' * 20)
labels = np.asarray(data[label])
data = data.drop(columns=label)
dataset = np.asarray(data)
print('Shape of Dataset: {}'.format(dataset.shape))
return dataset, labels
@time_cost
def load_test(file_path=TEST_PATH):
print('Load Test Dataset ...')
test_data = pd.read_csv(file_path)
id_list = np.asarray(test_data['id'])
print('Done.')
print('*' * 20)
test_data = test_data.drop(columns=drop)
cate_fea = [
'term',
'subGrade',
'homeOwnership',
'verificationStatus',
'purpose',
'regionCode',
'initialListStatus']
num_fea = list(
filter(
lambda x: x not in cate_fea,
list(
test_data.columns)))
print('Transforming Datetime Items ...')
test_data['employmentLength'] = test_data['employmentLength'].apply(
employmentLength_to_value)
print('Done.')
print('*' * 20)
print('Pre-processing Dataset ...')
print('-' * 20)
data = test_data.copy()
print('Filling NaN Items ...')
data[num_fea] = data[num_fea].fillna(data[num_fea].median())
data[cate_fea] = data[cate_fea].fillna(data[cate_fea].mode())
print('Filling Done.')
print('-' * 20)
print('Standardizing Numerical Items ...')
data = standardization_test(data, columns=num_fea, max_min=True)
print('Coding Done.')
print('-' * 20)
print('One-hot Coding categorized Items ...')
data = pd.get_dummies(data, columns=cate_fea)
print('Coding Done.')
print('-' * 20)
print('Done.')
print('*' * 20)
testset = np.asarray(data)
print('Shape of Dataset: {}'.format(testset.shape))
return testset, id_list
| [
"pandas.isnull",
"numpy.mean",
"pandas.read_csv",
"numpy.divide",
"pandas.to_datetime",
"numpy.asarray",
"numpy.min",
"numpy.max",
"numpy.std",
"pandas.get_dummies",
"time.time",
"numpy.random.RandomState"
] | [((254, 278), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (275, 278), True, 'import numpy as np\n'), ((4516, 4531), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (4525, 4531), True, 'import pandas as pd\n'), ((4767, 4782), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (4776, 4782), True, 'import pandas as pd\n'), ((5020, 5035), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (5029, 5035), True, 'import pandas as pd\n'), ((5414, 5432), 'numpy.mean', 'np.mean', (['data[fea]'], {}), '(data[fea])\n', (5421, 5432), True, 'import numpy as np\n'), ((5448, 5473), 'numpy.std', 'np.std', (['data[fea]'], {'ddof': '(1)'}), '(data[fea], ddof=1)\n', (5454, 5473), True, 'import numpy as np\n'), ((7064, 7086), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (7075, 7086), True, 'import pandas as pd\n'), ((9726, 9764), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': 'cate_fea'}), '(data, columns=cate_fea)\n', (9740, 9764), True, 'import pandas as pd\n'), ((10047, 10070), 'numpy.asarray', 'np.asarray', (['data[label]'], {}), '(data[label])\n', (10057, 10070), True, 'import numpy as np\n'), ((10121, 10137), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (10131, 10137), True, 'import numpy as np\n'), ((10321, 10343), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (10332, 10343), True, 'import pandas as pd\n'), ((10358, 10385), 'numpy.asarray', 'np.asarray', (["test_data['id']"], {}), "(test_data['id'])\n", (10368, 10385), True, 'import numpy as np\n'), ((11499, 11537), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': 'cate_fea'}), '(data, columns=cate_fea)\n', (11513, 11537), True, 'import pandas as pd\n'), ((11637, 11653), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (11647, 11653), True, 'import numpy as np\n'), ((4234, 4245), 'time.time', 'time.time', ([], {}), '()\n', (4243, 4245), False, 'import time\n'), ((4291, 4302), 'time.time', 'time.time', ([], {}), '()\n', (4300, 4302), False, 'import time\n'), ((4890, 4937), 'pandas.to_datetime', 'pd.to_datetime', (['"""2000-01-01"""'], {'format': '"""%Y-%m-%d"""'}), "('2000-01-01', format='%Y-%m-%d')\n", (4904, 4937), True, 'import pandas as pd\n'), ((5221, 5260), 'pandas.to_datetime', 'pd.to_datetime', (['data'], {'format': '"""%Y-%m-%d"""'}), "(data, format='%Y-%m-%d')\n", (5235, 5260), True, 'import pandas as pd\n'), ((5277, 5324), 'pandas.to_datetime', 'pd.to_datetime', (['"""1900-01-01"""'], {'format': '"""%Y-%m-%d"""'}), "('1900-01-01', format='%Y-%m-%d')\n", (5291, 5324), True, 'import pandas as pd\n'), ((5812, 5836), 'numpy.mean', 'np.mean', (['data_frame[fea]'], {}), '(data_frame[fea])\n', (5819, 5836), True, 'import numpy as np\n'), ((5856, 5879), 'numpy.std', 'np.std', (['data_frame[fea]'], {}), '(data_frame[fea])\n', (5862, 5879), True, 'import numpy as np\n'), ((6099, 6122), 'numpy.max', 'np.max', (['data_frame[fea]'], {}), '(data_frame[fea])\n', (6105, 6122), True, 'import numpy as np\n'), ((6146, 6169), 'numpy.min', 'np.min', (['data_frame[fea]'], {}), '(data_frame[fea])\n', (6152, 6169), True, 'import numpy as np\n'), ((6018, 6052), 'numpy.divide', 'np.divide', (['(x - data_mean)', 'data_std'], {}), '(x - data_mean, data_std)\n', (6027, 6052), True, 'import numpy as np\n'), ((6639, 6673), 'numpy.divide', 'np.divide', (['(x - data_mean)', 'data_std'], {}), '(x - data_mean, data_std)\n', (6648, 6673), True, 'import numpy as np\n'), ((6319, 6363), 'numpy.divide', 'np.divide', (['(x - data_min)', '(data_max - data_min)'], {}), '(x - data_min, data_max - data_min)\n', (6328, 6363), True, 'import numpy as np\n'), ((6878, 6922), 'numpy.divide', 'np.divide', (['(x - data_min)', '(data_max - data_min)'], {}), '(x - data_min, data_max - data_min)\n', (6887, 6922), True, 'import numpy as np\n')] |
import numpy as np
np.random.seed(1000)
import imp
import input_data_class
import keras
from keras.models import Model
from keras.backend.tensorflow_backend import set_session
from keras import backend as K
import tensorflow as tf
import os
import configparser
import argparse
from scipy.special import softmax
config = configparser.ConfigParser()
parser = argparse.ArgumentParser()
parser.add_argument('-qt',type=str,default='evaluation')
parser.add_argument('-dataset',default='location')
args = parser.parse_args()
dataset=args.dataset
input_data=input_data_class.InputData(dataset=dataset)
config = configparser.ConfigParser()
config.read('config.ini')
user_label_dim=int(config[dataset]["num_classes"])
num_classes=1
user_epochs=int(config[dataset]["user_epochs"])
defense_epochs=int(config[dataset]["defense_epochs"])
result_folder=config[dataset]["result_folder"]
network_architecture=str(config[dataset]["network_architecture"])
fccnet=imp.load_source(str(config[dataset]["network_name"]),network_architecture)
print("Config: ")
print("dataset: {}".format(dataset))
print("result folder: {}".format(result_folder))
print("network architecture: {}".format(network_architecture))
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.per_process_gpu_memory_fraction = 0.5
config_gpu.gpu_options.visible_device_list = "0"
#set_session(tf.Session(config=config))
sess = tf.InteractiveSession(config=config_gpu)
sess.run(tf.global_variables_initializer())
print("Loading Evaluation dataset...")
(x_evaluate,y_evaluate,l_evaluate) =input_data.input_data_attacker_evaluate()
print("Loading target model...")
npzdata=np.load(result_folder+"/models/"+"epoch_{}_weights_user.npz".format(user_epochs))
######load target model##############
weights=npzdata['x']
input_shape=x_evaluate.shape[1:]
model=fccnet.model_user(input_shape=input_shape,labels_dim=user_label_dim)
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.SGD(lr=0.01),metrics=['accuracy'])
model.set_weights(weights)
output_logits=model.layers[-2].output
f_evaluate=model.predict(x_evaluate) #confidence score result of target model on evaluation dataset
f_evaluate_logits=np.zeros([1,user_label_dim],dtype=np.float)
batch_predict=100
batch_num=np.ceil(x_evaluate.shape[0]/float(batch_predict))
for i in np.arange(batch_num):
f_evaluate_logits_temp=sess.run(output_logits,feed_dict={model.input:x_evaluate[int(i*batch_predict):int(min((i+1)*batch_predict,x_evaluate.shape[0])),:]})
f_evaluate_logits=np.concatenate((f_evaluate_logits,f_evaluate_logits_temp),axis=0)
f_evaluate_logits=f_evaluate_logits[1:,:] #logits of target model on evaluation dataset
del model
f_evaluate_origin=np.copy(f_evaluate) #keep a copy of original one
f_evaluate_logits_origin=np.copy(f_evaluate_logits)
#############as we sort the prediction sscores, back_index is used to get back original scores#############
sort_index=np.argsort(f_evaluate,axis=1)
back_index=np.copy(sort_index)
for i in np.arange(back_index.shape[0]):
back_index[i,sort_index[i,:]]=np.arange(back_index.shape[1])
f_evaluate=np.sort(f_evaluate,axis=1)
f_evaluate_logits=np.sort(f_evaluate_logits,axis=1)
print("f evaluate shape: {}".format(f_evaluate.shape))
print("f evaluate logits shape: {}".format(f_evaluate_logits.shape))
##########loading defense model
input_shape=f_evaluate.shape[1:]
print("Loading defense model...")
npzdata=np.load(result_folder+"/models/"+"epoch_{}_weights_defense.npz".format(defense_epochs))
model=fccnet.model_defense_optimize(input_shape=input_shape,labels_dim=num_classes)
model.compile(loss=keras.losses.binary_crossentropy,optimizer=keras.optimizers.SGD(lr=0.001),metrics=['accuracy'])
#model.summary()
weights=npzdata['x']
model.set_weights(weights)
model.trainable=False
########evaluate the performance of defense's attack model on undefended data########
scores_evaluate = model.evaluate(f_evaluate_logits, l_evaluate, verbose=0)
print('evaluate loss on model:', scores_evaluate[0])
print('evaluate accuracy on model:', scores_evaluate[1])
output=model.layers[-2].output[:,0]
c1=1.0 #used to find adversarial examples
c2=10.0 #penalty such that the index of max score is keeped
c3=0.1
#alpha_value=0.0
origin_value_placeholder=tf.placeholder(tf.float32,shape=(1,user_label_dim)) #placeholder with original confidence score values (not logit)
label_mask=tf.placeholder(tf.float32,shape=(1,user_label_dim)) # one-hot encode that encodes the predicted label
c1_placeholder=tf.placeholder(tf.float32)
c2_placeholder=tf.placeholder(tf.float32)
c3_placeholder=tf.placeholder(tf.float32)
correct_label = tf.reduce_sum(label_mask * model.input, axis=1)
wrong_label = tf.reduce_max((1-label_mask) * model.input - 1e8*label_mask, axis=1)
loss1=tf.abs(output)
loss2=tf.nn.relu(wrong_label-correct_label)
loss3=tf.reduce_sum(tf.abs(tf.nn.softmax(model.input)-origin_value_placeholder)) #L-1 norm
loss=c1_placeholder*loss1+c2_placeholder*loss2+c3_placeholder*loss3
gradient_targetlabel=K.gradients(loss,model.input)
label_mask_array=np.zeros([1,user_label_dim],dtype=np.float)
##########################################################
result_array=np.zeros(f_evaluate.shape,dtype=np.float)
result_array_logits=np.zeros(f_evaluate.shape,dtype=np.float)
success_fraction=0.0
max_iteration=300 #max iteration if can't find adversarial example that satisfies requirements
np.random.seed(1000)
for test_sample_id in np.arange(0,f_evaluate.shape[0]):
if test_sample_id%100==0:
print("test sample id: {}".format(test_sample_id))
max_label=np.argmax(f_evaluate[test_sample_id,:])
origin_value=np.copy(f_evaluate[test_sample_id,:]).reshape(1,user_label_dim)
origin_value_logits=np.copy(f_evaluate_logits[test_sample_id,:]).reshape(1,user_label_dim)
label_mask_array[0,:]=0.0
label_mask_array[0,max_label]=1.0
sample_f=np.copy(origin_value_logits)
result_predict_scores_initial=model.predict(sample_f)
########## if the output score is already very close to 0.5, we can just use it for numerical reason
if np.abs(result_predict_scores_initial-0.5)<=1e-5:
success_fraction+=1.0
result_array[test_sample_id,:]=origin_value[0,back_index[test_sample_id,:]]
result_array_logits[test_sample_id,:]=origin_value_logits[0,back_index[test_sample_id,:]]
continue
last_iteration_result=np.copy(origin_value)[0,back_index[test_sample_id,:]]
last_iteration_result_logits=np.copy(origin_value_logits)[0,back_index[test_sample_id,:]]
success=True
c3=0.1
iterate_time=1
while success==True:
sample_f=np.copy(origin_value_logits)
j=1
result_max_label=-1
result_predict_scores=result_predict_scores_initial
while j<max_iteration and (max_label!=result_max_label or (result_predict_scores-0.5)*(result_predict_scores_initial-0.5)>0):
gradient_values=sess.run(gradient_targetlabel,feed_dict={model.input:sample_f,origin_value_placeholder:origin_value,label_mask:label_mask_array,c3_placeholder:c3,c1_placeholder:c1,c2_placeholder:c2})[0][0]
gradient_values=gradient_values/np.linalg.norm(gradient_values)
sample_f=sample_f-0.1*gradient_values
result_predict_scores=model.predict(sample_f)
result_max_label=np.argmax(sample_f)
j+=1
if max_label!=result_max_label:
if iterate_time==1:
print("failed sample for label not same for id: {},c3:{} not add noise".format(test_sample_id,c3))
success_fraction-=1.0
break
if ((model.predict(sample_f)-0.5)*(result_predict_scores_initial-0.5))>0:
if iterate_time==1:
print("max iteration reached with id: {}, max score: {}, prediction_score: {}, c3: {}, not add noise".format(test_sample_id,np.amax(softmax(sample_f)),result_predict_scores,c3))
break
last_iteration_result[:]=softmax(sample_f)[0,back_index[test_sample_id,:]]
last_iteration_result_logits[:]=sample_f[0,back_index[test_sample_id,:]]
iterate_time+=1
c3=c3*10
if c3>100000:
break
success_fraction+=1.0
result_array[test_sample_id,:]=last_iteration_result[:]
result_array_logits[test_sample_id,:]=last_iteration_result_logits[:]
print("Success fraction: {}".format(success_fraction/float(f_evaluate.shape[0])))
if not os.path.exists(result_folder):
os.makedirs(result_folder)
if not os.path.exists(result_folder+"/attack"):
os.makedirs(result_folder+"/attack")
del model
input_shape=f_evaluate.shape[1:]
print("Loading defense model...")
npzdata=np.load(result_folder+"/models/"+"epoch_{}_weights_defense.npz".format(defense_epochs))
model=fccnet.model_defense(input_shape=input_shape,labels_dim=num_classes)
weights=npzdata['x']
model.compile(loss=keras.losses.binary_crossentropy,optimizer=keras.optimizers.SGD(lr=0.001),metrics=['accuracy'])
model.set_weights(weights)
model.trainable=False
predict_origin=model.predict(np.sort(f_evaluate_origin,axis=1))
predict_modified=model.predict(np.sort(result_array,axis=1))
np.savez(result_folder+"/attack/"+"MemGuard_noise_data_{}.npz".format(args.qt),defense_output=result_array,defense_output_logits=result_array_logits,tc_output=f_evaluate_origin,tc_output_logits=f_evaluate_logits_origin,predict_origin=predict_origin,predict_modified=predict_modified)
| [
"configparser.ConfigParser",
"tensorflow.reduce_sum",
"keras.backend.gradients",
"numpy.argsort",
"keras.optimizers.SGD",
"tensorflow.nn.softmax",
"numpy.linalg.norm",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.sort",
"tensorflow.placeholder",
"numpy.random.seed",
... | [((19, 39), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (33, 39), True, 'import numpy as np\n'), ((323, 350), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (348, 350), False, 'import configparser\n'), ((360, 385), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (383, 385), False, 'import argparse\n'), ((554, 597), 'input_data_class.InputData', 'input_data_class.InputData', ([], {'dataset': 'dataset'}), '(dataset=dataset)\n', (580, 597), False, 'import input_data_class\n'), ((607, 634), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (632, 634), False, 'import configparser\n'), ((1210, 1226), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1224, 1226), True, 'import tensorflow as tf\n'), ((1385, 1425), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config_gpu'}), '(config=config_gpu)\n', (1406, 1425), True, 'import tensorflow as tf\n'), ((2187, 2232), 'numpy.zeros', 'np.zeros', (['[1, user_label_dim]'], {'dtype': 'np.float'}), '([1, user_label_dim], dtype=np.float)\n', (2195, 2232), True, 'import numpy as np\n'), ((2318, 2338), 'numpy.arange', 'np.arange', (['batch_num'], {}), '(batch_num)\n', (2327, 2338), True, 'import numpy as np\n'), ((2706, 2725), 'numpy.copy', 'np.copy', (['f_evaluate'], {}), '(f_evaluate)\n', (2713, 2725), True, 'import numpy as np\n'), ((2781, 2807), 'numpy.copy', 'np.copy', (['f_evaluate_logits'], {}), '(f_evaluate_logits)\n', (2788, 2807), True, 'import numpy as np\n'), ((2927, 2957), 'numpy.argsort', 'np.argsort', (['f_evaluate'], {'axis': '(1)'}), '(f_evaluate, axis=1)\n', (2937, 2957), True, 'import numpy as np\n'), ((2968, 2987), 'numpy.copy', 'np.copy', (['sort_index'], {}), '(sort_index)\n', (2975, 2987), True, 'import numpy as np\n'), ((2997, 3027), 'numpy.arange', 'np.arange', (['back_index.shape[0]'], {}), '(back_index.shape[0])\n', (3006, 3027), True, 'import numpy as np\n'), ((3105, 3132), 'numpy.sort', 'np.sort', (['f_evaluate'], {'axis': '(1)'}), '(f_evaluate, axis=1)\n', (3112, 3132), True, 'import numpy as np\n'), ((3150, 3184), 'numpy.sort', 'np.sort', (['f_evaluate_logits'], {'axis': '(1)'}), '(f_evaluate_logits, axis=1)\n', (3157, 3184), True, 'import numpy as np\n'), ((4264, 4317), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, user_label_dim)'}), '(tf.float32, shape=(1, user_label_dim))\n', (4278, 4317), True, 'import tensorflow as tf\n'), ((4390, 4443), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, user_label_dim)'}), '(tf.float32, shape=(1, user_label_dim))\n', (4404, 4443), True, 'import tensorflow as tf\n'), ((4509, 4535), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4523, 4535), True, 'import tensorflow as tf\n'), ((4551, 4577), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4565, 4577), True, 'import tensorflow as tf\n'), ((4593, 4619), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4607, 4619), True, 'import tensorflow as tf\n'), ((4637, 4684), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(label_mask * model.input)'], {'axis': '(1)'}), '(label_mask * model.input, axis=1)\n', (4650, 4684), True, 'import tensorflow as tf\n'), ((4699, 4784), 'tensorflow.reduce_max', 'tf.reduce_max', (['((1 - label_mask) * model.input - 100000000.0 * label_mask)'], {'axis': '(1)'}), '((1 - label_mask) * model.input - 100000000.0 * label_mask, axis=1\n )\n', (4712, 4784), True, 'import tensorflow as tf\n'), ((4776, 4790), 'tensorflow.abs', 'tf.abs', (['output'], {}), '(output)\n', (4782, 4790), True, 'import tensorflow as tf\n'), ((4797, 4836), 'tensorflow.nn.relu', 'tf.nn.relu', (['(wrong_label - correct_label)'], {}), '(wrong_label - correct_label)\n', (4807, 4836), True, 'import tensorflow as tf\n'), ((5016, 5046), 'keras.backend.gradients', 'K.gradients', (['loss', 'model.input'], {}), '(loss, model.input)\n', (5027, 5046), True, 'from keras import backend as K\n'), ((5063, 5108), 'numpy.zeros', 'np.zeros', (['[1, user_label_dim]'], {'dtype': 'np.float'}), '([1, user_label_dim], dtype=np.float)\n', (5071, 5108), True, 'import numpy as np\n'), ((5179, 5221), 'numpy.zeros', 'np.zeros', (['f_evaluate.shape'], {'dtype': 'np.float'}), '(f_evaluate.shape, dtype=np.float)\n', (5187, 5221), True, 'import numpy as np\n'), ((5241, 5283), 'numpy.zeros', 'np.zeros', (['f_evaluate.shape'], {'dtype': 'np.float'}), '(f_evaluate.shape, dtype=np.float)\n', (5249, 5283), True, 'import numpy as np\n'), ((5401, 5421), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (5415, 5421), True, 'import numpy as np\n'), ((5444, 5477), 'numpy.arange', 'np.arange', (['(0)', 'f_evaluate.shape[0]'], {}), '(0, f_evaluate.shape[0])\n', (5453, 5477), True, 'import numpy as np\n'), ((1435, 1468), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1466, 1468), True, 'import tensorflow as tf\n'), ((2522, 2589), 'numpy.concatenate', 'np.concatenate', (['(f_evaluate_logits, f_evaluate_logits_temp)'], {'axis': '(0)'}), '((f_evaluate_logits, f_evaluate_logits_temp), axis=0)\n', (2536, 2589), True, 'import numpy as np\n'), ((3063, 3093), 'numpy.arange', 'np.arange', (['back_index.shape[1]'], {}), '(back_index.shape[1])\n', (3072, 3093), True, 'import numpy as np\n'), ((5581, 5621), 'numpy.argmax', 'np.argmax', (['f_evaluate[test_sample_id, :]'], {}), '(f_evaluate[test_sample_id, :])\n', (5590, 5621), True, 'import numpy as np\n'), ((5878, 5906), 'numpy.copy', 'np.copy', (['origin_value_logits'], {}), '(origin_value_logits)\n', (5885, 5906), True, 'import numpy as np\n'), ((8439, 8468), 'os.path.exists', 'os.path.exists', (['result_folder'], {}), '(result_folder)\n', (8453, 8468), False, 'import os\n'), ((8474, 8500), 'os.makedirs', 'os.makedirs', (['result_folder'], {}), '(result_folder)\n', (8485, 8500), False, 'import os\n'), ((8508, 8549), 'os.path.exists', 'os.path.exists', (["(result_folder + '/attack')"], {}), "(result_folder + '/attack')\n", (8522, 8549), False, 'import os\n'), ((8553, 8591), 'os.makedirs', 'os.makedirs', (["(result_folder + '/attack')"], {}), "(result_folder + '/attack')\n", (8564, 8591), False, 'import os\n'), ((9056, 9090), 'numpy.sort', 'np.sort', (['f_evaluate_origin'], {'axis': '(1)'}), '(f_evaluate_origin, axis=1)\n', (9063, 9090), True, 'import numpy as np\n'), ((9122, 9151), 'numpy.sort', 'np.sort', (['result_array'], {'axis': '(1)'}), '(result_array, axis=1)\n', (9129, 9151), True, 'import numpy as np\n'), ((1950, 1979), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (1970, 1979), False, 'import keras\n'), ((3655, 3685), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3675, 3685), False, 'import keras\n'), ((6077, 6120), 'numpy.abs', 'np.abs', (['(result_predict_scores_initial - 0.5)'], {}), '(result_predict_scores_initial - 0.5)\n', (6083, 6120), True, 'import numpy as np\n'), ((6381, 6402), 'numpy.copy', 'np.copy', (['origin_value'], {}), '(origin_value)\n', (6388, 6402), True, 'import numpy as np\n'), ((6468, 6496), 'numpy.copy', 'np.copy', (['origin_value_logits'], {}), '(origin_value_logits)\n', (6475, 6496), True, 'import numpy as np\n'), ((6619, 6647), 'numpy.copy', 'np.copy', (['origin_value_logits'], {}), '(origin_value_logits)\n', (6626, 6647), True, 'import numpy as np\n'), ((8925, 8955), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (8945, 8955), False, 'import keras\n'), ((4862, 4888), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['model.input'], {}), '(model.input)\n', (4875, 4888), True, 'import tensorflow as tf\n'), ((5638, 5676), 'numpy.copy', 'np.copy', (['f_evaluate[test_sample_id, :]'], {}), '(f_evaluate[test_sample_id, :])\n', (5645, 5676), True, 'import numpy as np\n'), ((5726, 5771), 'numpy.copy', 'np.copy', (['f_evaluate_logits[test_sample_id, :]'], {}), '(f_evaluate_logits[test_sample_id, :])\n', (5733, 5771), True, 'import numpy as np\n'), ((7313, 7332), 'numpy.argmax', 'np.argmax', (['sample_f'], {}), '(sample_f)\n', (7322, 7332), True, 'import numpy as np\n'), ((7976, 7993), 'scipy.special.softmax', 'softmax', (['sample_f'], {}), '(sample_f)\n', (7983, 7993), False, 'from scipy.special import softmax\n'), ((7144, 7175), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient_values'], {}), '(gradient_values)\n', (7158, 7175), True, 'import numpy as np\n'), ((7879, 7896), 'scipy.special.softmax', 'softmax', (['sample_f'], {}), '(sample_f)\n', (7886, 7896), False, 'from scipy.special import softmax\n')] |
import numpy as np
import visgeom as vg
from camera import PerspectiveCamera
from measurements import PrecalibratedCameraMeasurementsFixedCamera
from optim import CompositeStateVariable, levenberg_marquardt
from visualise_ba import visualise_soba
"""Example 3 - Structure-only Bundle Adjustment"""
class PrecalibratedStructureOnlyBAObjective:
"""Implements linearisation of the structure-only BA objective function"""
def __init__(self, measurements):
"""Constructs the objective
:param measurements: A list of PrecalibratedCameraMeasurementsFixedCamera objects, one for each camera.
"""
self.measurements = measurements
@staticmethod
def extract_measurement_jacobian(point_index, point_state_w, measurement):
"""Computes the measurement Jacobian for a specific point and camera measurement.
:param point_index: Index of current point.
:param point_state_w: Current state of a specific world point.
:param measurement: The measurement
:return: The measurement Jacobian
"""
A = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.jac_project_world_to_normalised_wrt_x_w(measurement.pose_c_w, point_state_w)
return A
@staticmethod
def extract_measurement_error(point_index, point_state_w, measurement):
"""Computes the measurement error for a specific point and camera measurement.
:param point_index: Index of current point.
:param point_state_w: Current state of a specific world point.
:param measurement: The measurement
:return: The measurement error
"""
b = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.reprojection_error_normalised(measurement.pose_c_w * point_state_w,
measurement.xn[:, [point_index]])
return b
def linearise(self, point_states_w):
"""Linearises the objective over all states and measurements
:param point_states_w: The current state of the points in the world frame.
:return:
A - The full measurement Jacobian
b - The full measurement error
cost - The current cost
"""
num_cameras = len(self.measurements)
num_points = len(point_states_w)
A = np.zeros((2 * num_cameras * num_points, 3 * num_points))
b = np.zeros((2 * num_cameras * num_points, 1))
for i in range(num_cameras):
for j in range(num_points):
rows = slice(i * 2 * num_points + j * 2, i * 2 * num_points + (j + 1) * 2)
cols = slice(j * 3, (j + 1) * 3)
A[rows, cols] = self.extract_measurement_jacobian(j, point_states_w[j], self.measurements[i])
b[rows, :] = self.extract_measurement_error(j, point_states_w[j], self.measurements[i])
return A, b, b.T.dot(b)
def main():
# World box.
true_points_w = vg.utils.generate_box()
# Define common camera parameters.
w = 640
h = 480
focal_lengths = 0.75 * h * np.ones((2, 1))
principal_point = 0.5 * np.array([[w, h]]).T
camera = PerspectiveCamera(focal_lengths, principal_point)
# Define a set of cameras.
true_poses_w_c = [
PerspectiveCamera.looks_at_pose(np.array([[3, -4, 0]]).T, np.zeros((3, 1)), np.array([[0, 0, 1]]).T),
PerspectiveCamera.looks_at_pose(np.array([[3, 4, 0]]).T, np.zeros((3, 1)), np.array([[0, 0, 1]]).T)]
# Generate a set of camera measurements.
measurements = \
[PrecalibratedCameraMeasurementsFixedCamera.generate(camera, pose, true_points_w) for pose in true_poses_w_c]
# Construct model from measurements.
model = PrecalibratedStructureOnlyBAObjective(measurements)
# Perturb world points and use as initial state.
init_points_w = [true_points_w[:, [i]] + 0.3 * np.random.randn(3, 1) for i in range(true_points_w.shape[1])]
init_state = CompositeStateVariable(init_points_w)
# Estimate pose in the world frame from point correspondences.
x, cost, A, b = levenberg_marquardt(init_state, model)
cov_x_final = np.linalg.inv(A.T @ A)
# Print covariance.
with np.printoptions(precision=3, suppress=True):
print('Covariance:')
print(cov_x_final)
# Visualise
visualise_soba(true_poses_w_c, true_points_w, measurements, x, cost)
if __name__ == "__main__":
main()
| [
"numpy.ones",
"camera.PerspectiveCamera",
"optim.levenberg_marquardt",
"visualise_ba.visualise_soba",
"measurements.PrecalibratedCameraMeasurementsFixedCamera.generate",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"optim.CompositeStateVariable",
"numpy.random.randn",
"visgeom.utils.generat... | [((2998, 3021), 'visgeom.utils.generate_box', 'vg.utils.generate_box', ([], {}), '()\n', (3019, 3021), True, 'import visgeom as vg\n'), ((3195, 3244), 'camera.PerspectiveCamera', 'PerspectiveCamera', (['focal_lengths', 'principal_point'], {}), '(focal_lengths, principal_point)\n', (3212, 3244), False, 'from camera import PerspectiveCamera\n'), ((3994, 4031), 'optim.CompositeStateVariable', 'CompositeStateVariable', (['init_points_w'], {}), '(init_points_w)\n', (4016, 4031), False, 'from optim import CompositeStateVariable, levenberg_marquardt\n'), ((4120, 4158), 'optim.levenberg_marquardt', 'levenberg_marquardt', (['init_state', 'model'], {}), '(init_state, model)\n', (4139, 4158), False, 'from optim import CompositeStateVariable, levenberg_marquardt\n'), ((4177, 4199), 'numpy.linalg.inv', 'np.linalg.inv', (['(A.T @ A)'], {}), '(A.T @ A)\n', (4190, 4199), True, 'import numpy as np\n'), ((4356, 4424), 'visualise_ba.visualise_soba', 'visualise_soba', (['true_poses_w_c', 'true_points_w', 'measurements', 'x', 'cost'], {}), '(true_poses_w_c, true_points_w, measurements, x, cost)\n', (4370, 4424), False, 'from visualise_ba import visualise_soba\n'), ((2369, 2425), 'numpy.zeros', 'np.zeros', (['(2 * num_cameras * num_points, 3 * num_points)'], {}), '((2 * num_cameras * num_points, 3 * num_points))\n', (2377, 2425), True, 'import numpy as np\n'), ((2438, 2481), 'numpy.zeros', 'np.zeros', (['(2 * num_cameras * num_points, 1)'], {}), '((2 * num_cameras * num_points, 1))\n', (2446, 2481), True, 'import numpy as np\n'), ((3117, 3132), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (3124, 3132), True, 'import numpy as np\n'), ((3595, 3680), 'measurements.PrecalibratedCameraMeasurementsFixedCamera.generate', 'PrecalibratedCameraMeasurementsFixedCamera.generate', (['camera', 'pose', 'true_points_w'], {}), '(camera, pose, true_points_w\n )\n', (3646, 3680), False, 'from measurements import PrecalibratedCameraMeasurementsFixedCamera\n'), ((4234, 4277), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (4249, 4277), True, 'import numpy as np\n'), ((3161, 3179), 'numpy.array', 'np.array', (['[[w, h]]'], {}), '([[w, h]])\n', (3169, 3179), True, 'import numpy as np\n'), ((3366, 3382), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3374, 3382), True, 'import numpy as np\n'), ((3475, 3491), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3483, 3491), True, 'import numpy as np\n'), ((3340, 3362), 'numpy.array', 'np.array', (['[[3, -4, 0]]'], {}), '([[3, -4, 0]])\n', (3348, 3362), True, 'import numpy as np\n'), ((3384, 3405), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (3392, 3405), True, 'import numpy as np\n'), ((3450, 3471), 'numpy.array', 'np.array', (['[[3, 4, 0]]'], {}), '([[3, 4, 0]])\n', (3458, 3471), True, 'import numpy as np\n'), ((3493, 3514), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (3501, 3514), True, 'import numpy as np\n'), ((3915, 3936), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (3930, 3936), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
def cut_off_img(pixels, size):
over_height = len(pixels) % size
over_width = len(pixels[1]) % size
return pixels[:len(pixels) - over_height, :len(pixels[1]) - over_width]
def get_colors(cutted_img, x, y):
red = cutted_img[y][x][0]
green = cutted_img[y][x][1]
blue = cutted_img[y][x][2]
return int(red) + int(green) + int(blue)
def set_colors(cutted_img, x, y, color, step):
cutted_img[y][x][0] = int(color // step) * step
cutted_img[y][x][1] = int(color // step) * step
cutted_img[y][x][2] = int(color // step) * step
image = Image.open("img2.jpg")
pixels = np.array(image)
size = int(input("Размер мозайки: "))
step = 256 / int(input("Градация: "))
cutted_img = cut_off_img(pixels, size)
height = len(cutted_img)
width = len(cutted_img[1])
y = 0
while y < height:
x = 0
while x < width:
color = 0
for y1 in range(y, y + size):
for x1 in range(x, x + size):
color += get_colors(cutted_img, x1, y1)
color = int(color // (size ** 2))
for y1 in range(y, y + size):
for x1 in range(x, x + size):
set_colors(cutted_img, x1, y1, color / 3, step)
x = x + size
y = y + size
res = Image.fromarray(pixels)
res.save('res.jpg')
| [
"numpy.array",
"PIL.Image.fromarray",
"PIL.Image.open"
] | [((630, 652), 'PIL.Image.open', 'Image.open', (['"""img2.jpg"""'], {}), "('img2.jpg')\n", (640, 652), False, 'from PIL import Image\n'), ((663, 678), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (671, 678), True, 'import numpy as np\n'), ((1307, 1330), 'PIL.Image.fromarray', 'Image.fromarray', (['pixels'], {}), '(pixels)\n', (1322, 1330), False, 'from PIL import Image\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import conv
from ipdb import set_trace as debug
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1. / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
class Actor(nn.Module):
def __init__(self, obs_dim, action_dim, terrain_dim, terrain_output, hidden_layers = [300,200,100], conv_layers = [], kernel_sizes = [150,100,50], init_w=3e-3):
super(Actor, self).__init__()
# State features
self.obs_dim = obs_dim
self.action_dim = action_dim
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(self.obs_dim, hidden_layers[0]))
for i in range(0, len(hidden_layers)):
if (i + 1) < len(hidden_layers):
self.layers.append(nn.Linear(hidden_layers[i],hidden_layers[i+1]))
else :
self.layers.append(nn.Linear(hidden_layers[i] + terrain_output**2, self.action_dim))
self.init_weights(init_w)
# self.layers = nn.ParameterList(self.layers)
# Terrain features
self.terrain_dim = terrain_dim
self.terrain_output = terrain_output
self.conv_layers = nn.ModuleList()
if len(conv_layers) == 0 :
self.conv_layers.append(nn.Conv3d(terrain_dim,terrain_output, kernel_sizes[0]))
else :
self.conv_layers.append(nn.Conv3d(terrain_dim,conv_layers[0],kernel_sizes[0]))
for i in range(0, len(conv_layers)):
if (i+1) < len(conv_layers):
self.conv_layers.append(nn.Conv3d(conv_layers[i], conv_layers[i+1], kernel_sizes[i+1]))
else :
self.conv_layers.append(nn.Conv3d(conv_layers[i], self.terrain_output, kernel_sizes[i]))
def init_weights(self, init_w):
for layer in self.layers[:-1]:
layer.weight.data = fanin_init(layer.weight.data.size())
self.layers[-1].weight.data.uniform_(-init_w, init_w)
def forward(self, observations, terrain):
# action forward
out = self.layers[0](observations)
for layer in self.layers[1:-1]:
out = nn.ReLU()(out)
out = layer(out)
# out=out
# terrain forward
if len(terrain.shape) > 5 :
terrain = terrain[0]
out_t = self.conv_layers[0](terrain)
for layer in self.conv_layers[1:]:
out_t = nn.ReLU()(out_t)
out_t = layer(out_t)
out_t = torch.flatten(out_t)
tmp = []
for _ in range(out.shape[0]):
tmp.append(out_t.data.cpu().numpy())
# out_t = torch.cat((out_t,out_t))
if out.is_cuda :
out_t = torch.FloatTensor(tmp).to(torch.device('cuda'))
else :
out_t = torch.FloatTensor(tmp)
# add layer
out = torch.cat((out,out_t), dim=1)
# output layer
out = self.layers[-1](out)
out = nn.Tanh()(out)
return out
# def cvv_forward(self, data):
# out = self.conv_layers[0](data)
# for layer in self.conv_layers[1:]:
# out = nn.ReLU()(out)
# out = layer(out)
# out = nn.Tanh()(out)
# return out | [
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Tanh",
"torch.device",
"torch.nn.ModuleList",
"torch.Tensor",
"torch.flatten",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.cat",
"torch.nn.Conv3d"
] | [((234, 248), 'numpy.sqrt', 'np.sqrt', (['fanin'], {}), '(fanin)\n', (241, 248), True, 'import numpy as np\n'), ((648, 663), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (661, 663), True, 'import torch.nn as nn\n'), ((1257, 1272), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1270, 1272), True, 'import torch.nn as nn\n'), ((2553, 2573), 'torch.flatten', 'torch.flatten', (['out_t'], {}), '(out_t)\n', (2566, 2573), False, 'import torch\n'), ((2912, 2942), 'torch.cat', 'torch.cat', (['(out, out_t)'], {'dim': '(1)'}), '((out, out_t), dim=1)\n', (2921, 2942), False, 'import torch\n'), ((260, 278), 'torch.Tensor', 'torch.Tensor', (['size'], {}), '(size)\n', (272, 278), False, 'import torch\n'), ((691, 732), 'torch.nn.Linear', 'nn.Linear', (['self.obs_dim', 'hidden_layers[0]'], {}), '(self.obs_dim, hidden_layers[0])\n', (700, 732), True, 'import torch.nn as nn\n'), ((2854, 2876), 'torch.FloatTensor', 'torch.FloatTensor', (['tmp'], {}), '(tmp)\n', (2871, 2876), False, 'import torch\n'), ((3015, 3024), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3022, 3024), True, 'import torch.nn as nn\n'), ((1344, 1399), 'torch.nn.Conv3d', 'nn.Conv3d', (['terrain_dim', 'terrain_output', 'kernel_sizes[0]'], {}), '(terrain_dim, terrain_output, kernel_sizes[0])\n', (1353, 1399), True, 'import torch.nn as nn\n'), ((1451, 1506), 'torch.nn.Conv3d', 'nn.Conv3d', (['terrain_dim', 'conv_layers[0]', 'kernel_sizes[0]'], {}), '(terrain_dim, conv_layers[0], kernel_sizes[0])\n', (1460, 1506), True, 'import torch.nn as nn\n'), ((2221, 2230), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2228, 2230), True, 'import torch.nn as nn\n'), ((2487, 2496), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2494, 2496), True, 'import torch.nn as nn\n'), ((2797, 2817), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2809, 2817), False, 'import torch\n'), ((861, 910), 'torch.nn.Linear', 'nn.Linear', (['hidden_layers[i]', 'hidden_layers[i + 1]'], {}), '(hidden_layers[i], hidden_layers[i + 1])\n', (870, 910), True, 'import torch.nn as nn\n'), ((963, 1029), 'torch.nn.Linear', 'nn.Linear', (['(hidden_layers[i] + terrain_output ** 2)', 'self.action_dim'], {}), '(hidden_layers[i] + terrain_output ** 2, self.action_dim)\n', (972, 1029), True, 'import torch.nn as nn\n'), ((2771, 2793), 'torch.FloatTensor', 'torch.FloatTensor', (['tmp'], {}), '(tmp)\n', (2788, 2793), False, 'import torch\n'), ((1644, 1710), 'torch.nn.Conv3d', 'nn.Conv3d', (['conv_layers[i]', 'conv_layers[i + 1]', 'kernel_sizes[i + 1]'], {}), '(conv_layers[i], conv_layers[i + 1], kernel_sizes[i + 1])\n', (1653, 1710), True, 'import torch.nn as nn\n'), ((1775, 1838), 'torch.nn.Conv3d', 'nn.Conv3d', (['conv_layers[i]', 'self.terrain_output', 'kernel_sizes[i]'], {}), '(conv_layers[i], self.terrain_output, kernel_sizes[i])\n', (1784, 1838), True, 'import torch.nn as nn\n')] |
#!python3
#
# Copyright (C) 2014-2015 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Voltage Source Converter Model Class
Average model of a VSC in voltage-control mode (i.e. controlled voltage source behind an impedance).
"""
import numpy as np
class vsc_average:
def __init__(self, ID, gen_no, Rl, Xl, dynopt):
self.id = ID
self.gen_no = gen_no
self.opt = dynopt['iopt']
self.signals = {}
self.states = {}
self.states0 = {}
self.dsteps = {}
self.params = {}
self.params['Rl'] = Rl
self.params['Xl'] = Xl
self.params['fn'] = dynopt['fn']
# Equivalent Norton impedance for Ybus modification
self.Yg = 1 / (Rl + 1j * Xl)
def initialise(self,vt0,S0):
"""
Initialise converter emf based on load flow voltage and grid current injection
"""
# Calculate initial armature current
Ia0 = np.conj(S0 / vt0)
phi0 = np.angle(Ia0)
# Calculate steady state machine emf (i.e. voltage behind synchronous reactance)
Edq0 = vt0 + (self.params['Rl'] + 1j * self.params['Xl']) * Ia0
delta0 = np.angle(Edq0)
# Convert currents to rotor reference frame
Id0 = np.abs(Ia0) * np.sin(delta0 - phi0)
Iq0 = np.abs(Ia0) * np.cos(delta0 - phi0)
# Initialise signals, states and parameters
self.signals['Vt'] = np.abs(vt0)
self.signals['Edq'] = Edq0
self.signals['Ed'] = np.real(Edq0)
self.signals['Eq'] = np.imag(Edq0)
self.signals['Id'] = Id0
self.signals['Iq'] = Iq0
self.states['delta'] = delta0
self.states['omega'] = 1
def calc_currents(self, vt):
"""
Solve grid current injections (in network reference frame)
"""
Edq = self.signals['Ed'] + 1j * self.signals['Eq']
delta = np.angle(Edq)
# Calculate terminal voltage in dq reference frame
Vd = np.abs(vt) * np.sin(self.states['delta'] - np.angle(vt))
Vq = np.abs(vt) * np.cos(self.states['delta'] - np.angle(vt))
# Calculate Id and Iq (Norton equivalent current injection in dq frame)
Ia = (Edq - vt) / (self.params['Rl'] + 1j * self.params['Xl'])
phi = np.angle(Ia)
Id = np.abs(Ia) * np.sin(delta - phi)
Iq = np.abs(Ia) * np.cos(delta - phi)
# Calculate machine current injection (Norton equivalent current injection in network frame)
In = (Iq - 1j * Id) * np.exp(1j * (self.states['delta']))
Im = In + self.Yg * vt
# Update signals
self.signals['Edq'] = Edq
self.signals['Vd'] = Vd
self.signals['Vq'] = Vq
self.signals['Id'] = Id
self.signals['Iq'] = Iq
self.signals['Vt'] = np.abs(vt)
self.states['delta'] = delta
return Im
def solve_step(self,h,dstep):
"""
Solve machine differential equations for the next stage in the integration step
"""
# State variables do not change in this model
pass
| [
"numpy.abs",
"numpy.conj",
"numpy.angle",
"numpy.exp",
"numpy.real",
"numpy.cos",
"numpy.sin",
"numpy.imag"
] | [((1082, 1099), 'numpy.conj', 'np.conj', (['(S0 / vt0)'], {}), '(S0 / vt0)\n', (1089, 1099), True, 'import numpy as np\n'), ((1115, 1128), 'numpy.angle', 'np.angle', (['Ia0'], {}), '(Ia0)\n', (1123, 1128), True, 'import numpy as np\n'), ((1316, 1330), 'numpy.angle', 'np.angle', (['Edq0'], {}), '(Edq0)\n', (1324, 1330), True, 'import numpy as np\n'), ((1589, 1600), 'numpy.abs', 'np.abs', (['vt0'], {}), '(vt0)\n', (1595, 1600), True, 'import numpy as np\n'), ((1674, 1687), 'numpy.real', 'np.real', (['Edq0'], {}), '(Edq0)\n', (1681, 1687), True, 'import numpy as np\n'), ((1717, 1730), 'numpy.imag', 'np.imag', (['Edq0'], {}), '(Edq0)\n', (1724, 1730), True, 'import numpy as np\n'), ((2073, 2086), 'numpy.angle', 'np.angle', (['Edq'], {}), '(Edq)\n', (2081, 2086), True, 'import numpy as np\n'), ((2477, 2489), 'numpy.angle', 'np.angle', (['Ia'], {}), '(Ia)\n', (2485, 2489), True, 'import numpy as np\n'), ((3022, 3032), 'numpy.abs', 'np.abs', (['vt'], {}), '(vt)\n', (3028, 3032), True, 'import numpy as np\n'), ((1406, 1417), 'numpy.abs', 'np.abs', (['Ia0'], {}), '(Ia0)\n', (1412, 1417), True, 'import numpy as np\n'), ((1420, 1441), 'numpy.sin', 'np.sin', (['(delta0 - phi0)'], {}), '(delta0 - phi0)\n', (1426, 1441), True, 'import numpy as np\n'), ((1456, 1467), 'numpy.abs', 'np.abs', (['Ia0'], {}), '(Ia0)\n', (1462, 1467), True, 'import numpy as np\n'), ((1470, 1491), 'numpy.cos', 'np.cos', (['(delta0 - phi0)'], {}), '(delta0 - phi0)\n', (1476, 1491), True, 'import numpy as np\n'), ((2176, 2186), 'numpy.abs', 'np.abs', (['vt'], {}), '(vt)\n', (2182, 2186), True, 'import numpy as np\n'), ((2246, 2256), 'numpy.abs', 'np.abs', (['vt'], {}), '(vt)\n', (2252, 2256), True, 'import numpy as np\n'), ((2503, 2513), 'numpy.abs', 'np.abs', (['Ia'], {}), '(Ia)\n', (2509, 2513), True, 'import numpy as np\n'), ((2516, 2535), 'numpy.sin', 'np.sin', (['(delta - phi)'], {}), '(delta - phi)\n', (2522, 2535), True, 'import numpy as np\n'), ((2549, 2559), 'numpy.abs', 'np.abs', (['Ia'], {}), '(Ia)\n', (2555, 2559), True, 'import numpy as np\n'), ((2562, 2581), 'numpy.cos', 'np.cos', (['(delta - phi)'], {}), '(delta - phi)\n', (2568, 2581), True, 'import numpy as np\n'), ((2722, 2757), 'numpy.exp', 'np.exp', (["(1.0j * self.states['delta'])"], {}), "(1.0j * self.states['delta'])\n", (2728, 2757), True, 'import numpy as np\n'), ((2219, 2231), 'numpy.angle', 'np.angle', (['vt'], {}), '(vt)\n', (2227, 2231), True, 'import numpy as np\n'), ((2289, 2301), 'numpy.angle', 'np.angle', (['vt'], {}), '(vt)\n', (2297, 2301), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def plot_img_array(img_array, ncol=3):
nrow = len(img_array) // ncol
f, plots = plt.subplots(nrow, ncol, sharex='all', sharey='all', figsize=(ncol * 4, nrow * 4))
for i in range(len(img_array)):
plots[i // ncol, i % ncol]
plots[i // ncol, i % ncol].imshow(img_array[i])
from functools import reduce
def plot_side_by_side(img_arrays):
flatten_list = reduce(lambda x,y: x+y, zip(*img_arrays))
plot_img_array(np.array(flatten_list), ncol=len(img_arrays))
import itertools
def plot_errors(results_dict, title):
markers = itertools.cycle(('+', 'x', 'o'))
plt.title('{}'.format(title))
for label, result in sorted(results_dict.items()):
plt.plot(result, marker=next(markers), label=label)
plt.ylabel('dice_coef')
plt.xlabel('epoch')
plt.legend(loc=3, bbox_to_anchor=(1, 0))
plt.show()
def masks_to_colorimg(masks):
colors = np.asarray([(201, 58, 64)])
colorimg = np.ones((masks.shape[1], masks.shape[2], 3), dtype=np.float32) * 255
channels, height, width = masks.shape
for y in range(height):
for x in range(width):
selected_colors = colors[masks[:,y,x] > 0.5]
if len(selected_colors) > 0:
colorimg[y,x,:] = np.mean(selected_colors, axis=0)
return colorimg.astype(np.uint8) | [
"numpy.mean",
"itertools.cycle",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((141, 228), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'sharex': '"""all"""', 'sharey': '"""all"""', 'figsize': '(ncol * 4, nrow * 4)'}), "(nrow, ncol, sharex='all', sharey='all', figsize=(ncol * 4, \n nrow * 4))\n", (153, 228), True, 'import matplotlib.pyplot as plt\n'), ((614, 646), 'itertools.cycle', 'itertools.cycle', (["('+', 'x', 'o')"], {}), "(('+', 'x', 'o'))\n", (629, 646), False, 'import itertools\n'), ((912, 922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (920, 922), True, 'import matplotlib.pyplot as plt\n'), ((967, 994), 'numpy.asarray', 'np.asarray', (['[(201, 58, 64)]'], {}), '([(201, 58, 64)])\n', (977, 994), True, 'import numpy as np\n'), ((498, 520), 'numpy.array', 'np.array', (['flatten_list'], {}), '(flatten_list)\n', (506, 520), True, 'import numpy as np\n'), ((806, 829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dice_coef"""'], {}), "('dice_coef')\n", (816, 829), True, 'import matplotlib.pyplot as plt\n'), ((838, 857), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (848, 857), True, 'import matplotlib.pyplot as plt\n'), ((866, 906), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)', 'bbox_to_anchor': '(1, 0)'}), '(loc=3, bbox_to_anchor=(1, 0))\n', (876, 906), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1073), 'numpy.ones', 'np.ones', (['(masks.shape[1], masks.shape[2], 3)'], {'dtype': 'np.float32'}), '((masks.shape[1], masks.shape[2], 3), dtype=np.float32)\n', (1018, 1073), True, 'import numpy as np\n'), ((1315, 1347), 'numpy.mean', 'np.mean', (['selected_colors'], {'axis': '(0)'}), '(selected_colors, axis=0)\n', (1322, 1347), True, 'import numpy as np\n')] |
import os.path
import random
import glob
import math
from .listdatasets import ListDataset_random_mid, AIM_Challenge_loader
import numpy
def make_dataset(root='', split=0.0, train_or_test='train'):
"""
60fps
Our dataset has the following structure.
Each folder have 181 frames.
We use 9 frams and a clip.
For example:
clip_0 : 0 2 4 6 8 10 12 14 16
clip_1 : 18 ................34
|
clip_19: .....................
#------------------------------------------------------#
Returns: framesPath
"""
Ban_list = []
dataset_path = ''
if train_or_test == 'train':
dataset_path = os.path.join(root, "train/train_60fps")
elif train_or_test == 'test':
dataset_path = os.path.join(root, "val/val_60fps")
else:
print("Error")
# load file list
framesPath = []
# Find and loop over all the frames in the dir
for index, folder in enumerate(sorted(os.listdir(dataset_path))): # folder 0 1 2 3 4 5 ...
if folder in Ban_list:
print("ban dir: ", folder)
continue
folder_path = os.path.join(dataset_path, folder)
frames_list = sorted(os.listdir(folder_path)) # frames 0 1 2 3 4 5 ...
frames_len = len(frames_list)
first_clips_index = [0, 2, 4, 6, 8, 10, 12, 14, 16]
step = 18
clips_num = int(numpy.floor(frames_len / 9))
for i in range(clips_num):
if i == 0:
clips_index = first_clips_index
else:
clips_index = [j + step for j in clips_index]
temp_paths = []
for clip_index in clips_index:
temp_name = str(clip_index).zfill(8) + '.png'
temp_path = os.path.join(folder_path, temp_name)
temp_paths.append(temp_path)
framesPath.append(temp_paths)
random.shuffle(framesPath)
split_index = int(math.floor(len(framesPath) * split / 100.0))
assert (split_index >= 0 and split_index <= len(framesPath))
return (framesPath[:split_index], framesPath[split_index:]) if split_index < len(framesPath) else (framesPath, [])
# use 1% of the samples to be a validation dataset
def AIM_Challenge(root, split=1.0, single='', task='interp', middle=False, high_fps=False):
if middle == True:
print("just select the middle frame")
else:
print("random select the middle frame")
train_list, test_list = make_dataset(root=root, train_or_test='train', split=split)
# test_list = make_dataset(root=root,train_or_test='test')
train_dataset = ListDataset_random_mid(root, train_list, loader=AIM_Challenge_loader, middle=middle,
high_fps=high_fps)
test_dataset = ListDataset_random_mid(root, test_list, loader=AIM_Challenge_loader, middle=middle,
high_fps=high_fps)
return train_dataset, test_dataset
| [
"numpy.floor",
"random.shuffle"
] | [((1931, 1957), 'random.shuffle', 'random.shuffle', (['framesPath'], {}), '(framesPath)\n', (1945, 1957), False, 'import random\n'), ((1421, 1448), 'numpy.floor', 'numpy.floor', (['(frames_len / 9)'], {}), '(frames_len / 9)\n', (1432, 1448), False, 'import numpy\n')] |
#!/usr/bin/env python
"""
Created on Thu Jan 23 10:43:35 2014
Author: <NAME>
Email: <EMAIL>
"""
import numpy as np
#from scipy.linalg import expm
from scipy.sparse.linalg import expm # scipy.linalg.expm is just a wrapper around this one.
#from expm_hacked import expm
#from scipy.sparse.linalg import expm_multiply
from cpab.cpaNd import CpaCalcs as CpaCalcsNd
from of.utils import ipshell
#from pyvision.essentials import *
from pylab import plt
#from class_affine_flow import AffineFlow
#from cy.transform.transform import calc_flowline_arr1d
#from cy.transform32.transform import calc_flowline_arr1d as calc_flowline_arr1d32
class CpaCalcs(CpaCalcsNd):
def __init__(self,XMINS,XMAXS,Ngrids,use_GPU_if_possible,my_dtype=np.float64):
"""
Ngrids: number of pixels in each dim.
Don't confuse Nx & Ny with the numbers of cells in each dim.
"""
super(CpaCalcs,self).__init__(XMINS,XMAXS,Ngrids,use_GPU_if_possible,my_dtype)
if np.asarray(XMINS).any():
raise NotImplementedError
Nx,Ny=Ngrids
Nx = int(Nx)
Ny = int(Ny)
self.Nx=Nx
self.Ny=Ny
yy,xx = np.mgrid[0:Ny+1,0:Nx+1]
xx=xx.astype(self.my_dtype)
yy=yy.astype(self.my_dtype)
# The shape is (2,1 + #pixels in y direction, 1 + #pixels in y direction)
self.x_dense_grid = np.asarray([xx,yy]).copy()
# The shape is (2, #pixels in y direction, #pixels in y direction)
self.x_dense_grid_img = np.asarray([xx[:-1,:-1],yy[:-1,:-1]]).copy()
# The shape is ( (1 + #pixels in y direction) * (1 + #pixels in y direction) , 2)
self.x_dense = np.asarray([self.x_dense_grid[0].ravel(),
self.x_dense_grid[1].ravel()]).T.copy()
# The shape is ( #pixels in y direction * #pixels in y direction , 2)
self.x_dense_img = np.asarray([self.x_dense_grid_img[0].ravel(),
self.x_dense_grid_img[1].ravel()]).T.copy()
if self.x_dense.shape[1] !=2:
raise ValueError(self.x.shape)
if self.x_dense_img.shape[1] !=2:
raise ValueError(self.x_dense_img.shape)
self.XMINS = np.asarray([xx.min(),yy.min()])
self.XMAXS = np.asarray([xx.max(),yy.max()]) # note this is greater than XMAXS (by 1)
| [
"numpy.asarray"
] | [((1023, 1040), 'numpy.asarray', 'np.asarray', (['XMINS'], {}), '(XMINS)\n', (1033, 1040), True, 'import numpy as np\n'), ((1468, 1488), 'numpy.asarray', 'np.asarray', (['[xx, yy]'], {}), '([xx, yy])\n', (1478, 1488), True, 'import numpy as np\n'), ((1607, 1647), 'numpy.asarray', 'np.asarray', (['[xx[:-1, :-1], yy[:-1, :-1]]'], {}), '([xx[:-1, :-1], yy[:-1, :-1]])\n', (1617, 1647), True, 'import numpy as np\n')] |
'''
Created on Dec 22, 2014
@author: <NAME> <<EMAIL>>
'''
from __future__ import division
import copy
import numpy as np
from scipy.spatial import distance
from shapely import geometry
import cv2
from utils.data_structures.cache import cached_property
from video.analysis import curves, regions, shapes
from video.analysis.active_contour import ActiveContour
from video import debug # @UnusedImport
class Tail(shapes.Polygon):
""" class representing a single mouse tail in a single frame.
Every tail is defined by its contour.
"""
endpoint_mass_radius = 500 #< radius for identifying the end points
contour_spacing = 20 #< spacing of the contour points
centerline_spacing = 50 #< spacing of centerline points
# parameters for the active snake algorithm for finding the centerline
centerline_blur_radius = 5
centerline_bending_stiffness = 1e6
centerline_adaptation_rate = 1e-2
centerline_max_iterations = 500
line_names = ['Side A', 'Side B']
def __init__(self, contour, extra_data=None):
""" initialize a tail with its contour and optional extra data """
super(Tail, self).__init__(contour)
self.persistence_data = {}
if extra_data is None:
self.data = {}
else:
self.data = extra_data
def __repr__(self):
return '%s(pos=(%d, %d), %d contour points)' % \
(self.__class__.__name__, self.centroid[0], self.centroid[1],
len(self.contour))
def copy(self):
obj = self.__class__(self.contour.copy(), self.extra_data.copy())
obj.persistence_data = self.persistence_data.copy()
return obj
@shapes.Polygon.contour.setter
def contour(self, points):
""" sets the contour of the tail performing some sanity tests """
# do a first regularization
points = regions.regularize_contour_points(points)
spacing = self.contour_spacing
# make the contour line equidistant
points = curves.make_curve_equidistant(points, spacing=spacing)
# regularize again, just to be sure
points = regions.regularize_contour_points(points)
# call parent setter
shapes.Polygon.contour.fset(self, points)
@classmethod
def create_similar(cls, contour, tail):
""" creates a tail described by `contour` that has a similar orientation
to the given `tail` """
obj = cls(contour)
obj.determine_endpoint_indices(tail)
obj.match_side_order(tail)
return obj
def update_contour(self, points):
""" updates the contour, keeping the identity of the end points,
ventral line, and the measurement lines intact """
tail_prev = copy.deepcopy(self)
self.contour = points
# update important features of the tail in reference to the previous
self.determine_endpoint_indices(tail_prev)
self.match_side_order(tail_prev)
@cached_property
def mask(self):
""" return a binary mask large enough to hold the tails image and an
offset the determines the position of the mask in global coordinates """
return self.get_mask(margin=5, ret_offset=True)
def determine_endpoint_indices(self, tail_prev=None):
""" locate the end points as contour points with maximal distance
The posterior point is returned first.
If `tail_prev` is given, the end points are oriented in the same way as
in the previous tail, otherwise they are assigned automatically.
"""
# get the points which are farthest away from each other
dist = distance.squareform(distance.pdist(self.contour))
indices = np.unravel_index(np.argmax(dist), dist.shape)
if tail_prev is None:
# determine the mass of tissue to determine posterior end
mass = []
for k in indices:
radius = self.endpoint_mass_radius
endzone = geometry.Point(self.contour[k]).buffer(radius)
poly = self.polygon.buffer(0) #< clean the polygon
mass.append(poly.intersection(endzone).area)
# determine posterior end point by measuring the surrounding
if mass[1] < mass[0]:
indices = indices[::-1]
else:
# sort end points according to previous frame
prev_p, prev_a = tail_prev.endpoints
this_1 = self.contour[indices[0]]
this_2 = self.contour[indices[1]]
dist1 = curves.point_distance(this_1, prev_p) + \
curves.point_distance(this_2, prev_a)
dist2 = curves.point_distance(this_1, prev_a) + \
curves.point_distance(this_2, prev_p)
if dist2 < dist1:
indices = indices[::-1]
# save indices in cache
self.persistence_data['endpoint_indices'] = indices
return indices
@property
def endpoint_indices(self):
""" locate the end points as contour points with maximal distance
The posterior point is returned first.
"""
indices = self.persistence_data.get('endpoint_indices', None)
if indices is None:
indices = self.determine_endpoint_indices()
return indices
@cached_property
def endpoints(self):
""" returns the posterior and the anterior end point """
j, k = self.endpoint_indices
return self.contour[j], self.contour[k]
def _sort_sides(self, sides, first_line):
""" sorts sides such that the first line in `sides` is closest to
`first_line` """
def determine_side_order(self, sides):
""" determine which of the sides is the ventral side based on geometric
properties and return indices such that the ventral side comes first """
# define a line connecting both end points
k1, k2 = self.endpoint_indices
line = geometry.LineString([self.contour[k1], self.contour[k2]])
# cut the shape using this line and return the largest part
parts = self.polygon.difference(line.buffer(0.1))
if isinstance(parts, geometry.MultiPolygon):
areas = [part.area for part in parts]
polygon = parts[np.argmax(areas)].buffer(0.1)
else:
polygon = parts
# measure the fraction of points that lie in the polygon
fs = []
for c in sides:
mp = geometry.MultiPoint(c)
intersection = mp.intersection(polygon)
if isinstance(intersection, geometry.Point):
frac = 1/len(mp)
else:
frac = len(intersection)/len(mp)
fs.append(frac)
# return the order in which the sides should be put
if np.argmax(fs) == 0:
return (0, 1)
else:
return (1, 0)
def get_sides(self):
""" determine the sides of the tail and return them in arbitrary order
"""
# get the two sides
k1, k2 = self.endpoint_indices
if k2 > k1:
sides = [self.contour[k1:k2 + 1],
np.r_[self.contour[k2:], self.contour[:k1 + 1]]]
else:
sides = [self.contour[k2:k1 + 1][::-1],
np.r_[self.contour[k1:], self.contour[:k2 + 1]][::-1, :]]
return sides
def match_side_order(self, tail_prev):
""" determines the side of the tails and align them with an earlier
shape """
# get the two sides
sides = self.get_sides()
# get the reference line to determine the order of the sides
line_ref = tail_prev.sides[0]
# sort lines such that reference line comes first
first_line = geometry.LineString(line_ref)
dists = [np.mean([first_line.distance(geometry.Point(p))
for p in side])
for side in sides]
if dists[0] < dists[1]:
order = (0, 1)
else:
order = (1, 0)
# save the order of the sides to be able to recover them after pickling
self.persistence_data['side_order'] = order
# get the sides and make sure they agree with the previous order
self._cache['sides'] = sides[order[0]], sides[order[1]]
@cached_property
def sides(self):
""" return the two sides of the tail """
sides = self.get_sides()
order = self.persistence_data.get('side_order', None)
if order is None:
order = self.determine_side_order(sides)
self.persistence_data['side_order'] = order
return sides[order[0]], sides[order[1]]
@property
def ventral_side(self):
""" returns the ventral side """
return self.sides[0]
@property
def dorsal_side(self):
""" returns the ventral side """
return self.sides[1]
@cached_property
def centerline(self):
""" determine the center line of the tail """
mask, offset = self.mask
dist_map = cv2.distanceTransform(mask, cv2.DIST_L2, 5)
# setup active contour algorithm
ac = ActiveContour(blur_radius=self.centerline_blur_radius,
closed_loop=False,
alpha=0, #< line length is constraint by beta
beta=self.centerline_bending_stiffness,
gamma=self.centerline_adaptation_rate)
ac.max_iterations = self.centerline_max_iterations
ac.set_potential(dist_map)
# find centerline starting from the ventral_side
points = curves.translate_points(self.ventral_side,
-offset[0], -offset[1])
spacing = self.centerline_spacing
points = curves.make_curve_equidistant(points, spacing=spacing)
# use the active contour algorithm
points = ac.find_contour(points)
points = curves.make_curve_equidistant(points, spacing=spacing)
# translate points back into global coordinate system
points = curves.translate_points(points, offset[0], offset[1])
# orient the centerline such that it starts at the posterior end
dist1 = curves.point_distance(points[0], self.endpoints[0])
dist2 = curves.point_distance(points[-1], self.endpoints[0])
if dist1 > dist2:
points = points[::-1]
return points
| [
"video.analysis.active_contour.ActiveContour",
"scipy.spatial.distance.pdist",
"video.analysis.shapes.Polygon.contour.fset",
"numpy.argmax",
"video.analysis.curves.make_curve_equidistant",
"video.analysis.curves.translate_points",
"video.analysis.curves.point_distance",
"shapely.geometry.Point",
"sh... | [((1941, 1982), 'video.analysis.regions.regularize_contour_points', 'regions.regularize_contour_points', (['points'], {}), '(points)\n', (1974, 1982), False, 'from video.analysis import curves, regions, shapes\n'), ((2083, 2137), 'video.analysis.curves.make_curve_equidistant', 'curves.make_curve_equidistant', (['points'], {'spacing': 'spacing'}), '(points, spacing=spacing)\n', (2112, 2137), False, 'from video.analysis import curves, regions, shapes\n'), ((2199, 2240), 'video.analysis.regions.regularize_contour_points', 'regions.regularize_contour_points', (['points'], {}), '(points)\n', (2232, 2240), False, 'from video.analysis import curves, regions, shapes\n'), ((2278, 2319), 'video.analysis.shapes.Polygon.contour.fset', 'shapes.Polygon.contour.fset', (['self', 'points'], {}), '(self, points)\n', (2305, 2319), False, 'from video.analysis import curves, regions, shapes\n'), ((2831, 2850), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2844, 2850), False, 'import copy\n'), ((6152, 6209), 'shapely.geometry.LineString', 'geometry.LineString', (['[self.contour[k1], self.contour[k2]]'], {}), '([self.contour[k1], self.contour[k2]])\n', (6171, 6209), False, 'from shapely import geometry\n'), ((8039, 8068), 'shapely.geometry.LineString', 'geometry.LineString', (['line_ref'], {}), '(line_ref)\n', (8058, 8068), False, 'from shapely import geometry\n'), ((9400, 9443), 'cv2.distanceTransform', 'cv2.distanceTransform', (['mask', 'cv2.DIST_L2', '(5)'], {}), '(mask, cv2.DIST_L2, 5)\n', (9421, 9443), False, 'import cv2\n'), ((9507, 9677), 'video.analysis.active_contour.ActiveContour', 'ActiveContour', ([], {'blur_radius': 'self.centerline_blur_radius', 'closed_loop': '(False)', 'alpha': '(0)', 'beta': 'self.centerline_bending_stiffness', 'gamma': 'self.centerline_adaptation_rate'}), '(blur_radius=self.centerline_blur_radius, closed_loop=False,\n alpha=0, beta=self.centerline_bending_stiffness, gamma=self.\n centerline_adaptation_rate)\n', (9520, 9677), False, 'from video.analysis.active_contour import ActiveContour\n'), ((9991, 10057), 'video.analysis.curves.translate_points', 'curves.translate_points', (['self.ventral_side', '(-offset[0])', '(-offset[1])'], {}), '(self.ventral_side, -offset[0], -offset[1])\n', (10014, 10057), False, 'from video.analysis import curves, regions, shapes\n'), ((10158, 10212), 'video.analysis.curves.make_curve_equidistant', 'curves.make_curve_equidistant', (['points'], {'spacing': 'spacing'}), '(points, spacing=spacing)\n', (10187, 10212), False, 'from video.analysis import curves, regions, shapes\n'), ((10314, 10368), 'video.analysis.curves.make_curve_equidistant', 'curves.make_curve_equidistant', (['points'], {'spacing': 'spacing'}), '(points, spacing=spacing)\n', (10343, 10368), False, 'from video.analysis import curves, regions, shapes\n'), ((10448, 10501), 'video.analysis.curves.translate_points', 'curves.translate_points', (['points', 'offset[0]', 'offset[1]'], {}), '(points, offset[0], offset[1])\n', (10471, 10501), False, 'from video.analysis import curves, regions, shapes\n'), ((10600, 10651), 'video.analysis.curves.point_distance', 'curves.point_distance', (['points[0]', 'self.endpoints[0]'], {}), '(points[0], self.endpoints[0])\n', (10621, 10651), False, 'from video.analysis import curves, regions, shapes\n'), ((10668, 10720), 'video.analysis.curves.point_distance', 'curves.point_distance', (['points[-1]', 'self.endpoints[0]'], {}), '(points[-1], self.endpoints[0])\n', (10689, 10720), False, 'from video.analysis import curves, regions, shapes\n'), ((3770, 3798), 'scipy.spatial.distance.pdist', 'distance.pdist', (['self.contour'], {}), '(self.contour)\n', (3784, 3798), False, 'from scipy.spatial import distance\n'), ((3835, 3850), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (3844, 3850), True, 'import numpy as np\n'), ((6683, 6705), 'shapely.geometry.MultiPoint', 'geometry.MultiPoint', (['c'], {}), '(c)\n', (6702, 6705), False, 'from shapely import geometry\n'), ((7027, 7040), 'numpy.argmax', 'np.argmax', (['fs'], {}), '(fs)\n', (7036, 7040), True, 'import numpy as np\n'), ((4690, 4727), 'video.analysis.curves.point_distance', 'curves.point_distance', (['this_1', 'prev_p'], {}), '(this_1, prev_p)\n', (4711, 4727), False, 'from video.analysis import curves, regions, shapes\n'), ((4752, 4789), 'video.analysis.curves.point_distance', 'curves.point_distance', (['this_2', 'prev_a'], {}), '(this_2, prev_a)\n', (4773, 4789), False, 'from video.analysis import curves, regions, shapes\n'), ((4810, 4847), 'video.analysis.curves.point_distance', 'curves.point_distance', (['this_1', 'prev_a'], {}), '(this_1, prev_a)\n', (4831, 4847), False, 'from video.analysis import curves, regions, shapes\n'), ((4872, 4909), 'video.analysis.curves.point_distance', 'curves.point_distance', (['this_2', 'prev_p'], {}), '(this_2, prev_p)\n', (4893, 4909), False, 'from video.analysis import curves, regions, shapes\n'), ((4102, 4133), 'shapely.geometry.Point', 'geometry.Point', (['self.contour[k]'], {}), '(self.contour[k])\n', (4116, 4133), False, 'from shapely import geometry\n'), ((6476, 6492), 'numpy.argmax', 'np.argmax', (['areas'], {}), '(areas)\n', (6485, 6492), True, 'import numpy as np\n'), ((8115, 8132), 'shapely.geometry.Point', 'geometry.Point', (['p'], {}), '(p)\n', (8129, 8132), False, 'from shapely import geometry\n')] |
import torch.nn as nn
import torch
import numpy as np
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
i = 0
with open(filepath, encoding='utf-8') as f:
f.readline()
for line in f:
if line != '\n':
word, *vector = line.split()
if word in word_index:
i += 1
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
print('unk tokens:', vocab_size - i)
return embedding_matrix
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout_em=0.5,dropout_rnn=1,dropout_out=1, tie_weights=False, bidirection=False):
super(RNNModel, self).__init__()
self.drop_em = nn.Dropout(dropout_em)
self.drop_out = nn.Dropout(dropout_out)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.bidirection = bidirection
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout_rnn, bidirectional=bidirection)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout_rnn)
n = 2 if bidirection else 1
self.decoder = nn.Linear(nhid*n, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def load_embedding(self, filepath, Corpus_Dic, device):
print('loading embedding......')
self.encoder = self.encoder.from_pretrained(torch.Tensor(create_embedding_matrix(filepath, Corpus_Dic.word2idx, self.ninp))).to(device)
print('embedding loaded.')
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop_em(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop_out(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
n = 2 if self.bidirection else 1
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers*n, bsz, self.nhid),
weight.new_zeros(self.nlayers*n, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers*n, bsz, self.nhid)
| [
"torch.nn.Dropout",
"torch.nn.RNN",
"numpy.array",
"numpy.zeros",
"torch.nn.Linear",
"torch.nn.Embedding"
] | [((224, 261), 'numpy.zeros', 'np.zeros', (['(vocab_size, embedding_dim)'], {}), '((vocab_size, embedding_dim))\n', (232, 261), True, 'import numpy as np\n'), ((1055, 1077), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_em'], {}), '(dropout_em)\n', (1065, 1077), True, 'import torch.nn as nn\n'), ((1102, 1125), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_out'], {}), '(dropout_out)\n', (1112, 1125), True, 'import torch.nn as nn\n'), ((1149, 1175), 'torch.nn.Embedding', 'nn.Embedding', (['ntoken', 'ninp'], {}), '(ntoken, ninp)\n', (1161, 1175), True, 'import torch.nn as nn\n'), ((1868, 1895), 'torch.nn.Linear', 'nn.Linear', (['(nhid * n)', 'ntoken'], {}), '(nhid * n, ntoken)\n', (1877, 1895), True, 'import torch.nn as nn\n'), ((1733, 1808), 'torch.nn.RNN', 'nn.RNN', (['ninp', 'nhid', 'nlayers'], {'nonlinearity': 'nonlinearity', 'dropout': 'dropout_rnn'}), '(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout_rnn)\n', (1739, 1808), True, 'import torch.nn as nn\n'), ((592, 626), 'numpy.array', 'np.array', (['vector'], {'dtype': 'np.float32'}), '(vector, dtype=np.float32)\n', (600, 626), True, 'import numpy as np\n')] |
# Module that split color channels and merge with empty channels to generate
# specific colored images
import cv2
import numpy as np
image = cv2.imread("lena.jpg")
blue_channel, green_channel, red_channel = cv2.split(image)
zeros = np.zeros(image.shape[:2], dtype=np.uint8)
cv2.imshow('Red', cv2.merge([zeros, zeros, red_channel]))
cv2.imshow('Green', cv2.merge([zeros, green_channel, zeros]))
cv2.imshow('Blue', cv2.merge([blue_channel, zeros, zeros]))
cv2.imshow('Original', image)
cv2.waitKey(0)
| [
"cv2.merge",
"cv2.imshow",
"numpy.zeros",
"cv2.split",
"cv2.waitKey",
"cv2.imread"
] | [((142, 164), 'cv2.imread', 'cv2.imread', (['"""lena.jpg"""'], {}), "('lena.jpg')\n", (152, 164), False, 'import cv2\n'), ((208, 224), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (217, 224), False, 'import cv2\n'), ((233, 274), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': 'np.uint8'}), '(image.shape[:2], dtype=np.uint8)\n', (241, 274), True, 'import numpy as np\n'), ((456, 485), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (466, 485), False, 'import cv2\n'), ((486, 500), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (497, 500), False, 'import cv2\n'), ((294, 332), 'cv2.merge', 'cv2.merge', (['[zeros, zeros, red_channel]'], {}), '([zeros, zeros, red_channel])\n', (303, 332), False, 'import cv2\n'), ((354, 394), 'cv2.merge', 'cv2.merge', (['[zeros, green_channel, zeros]'], {}), '([zeros, green_channel, zeros])\n', (363, 394), False, 'import cv2\n'), ((415, 454), 'cv2.merge', 'cv2.merge', (['[blue_channel, zeros, zeros]'], {}), '([blue_channel, zeros, zeros])\n', (424, 454), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
"""
import math
import numpy as np
from pdftabextract.common import (update_text_dict_pos, sorted_by_attr,
DIRECTION_HORIZONTAL, DIRECTION_VERTICAL, SKEW_X, SKEW_Y)
from pdftabextract.geom import pt, vecrotate
def border_positions_from_texts(texts, direction, only_attr=None):
"""
From a list of textboxes in <texts>, get the border positions for the respective direction.
For vertical direction, return the text boxes' top and bottom border positions.
For horizontal direction, return the text boxes' left and right border positions.
<direction> must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL from pdftabextract.common.
optional <only_attr> must be either 'low' (only return 'top' or 'left' borders) or 'high' (only return 'bottom' or
'right').
Border positions are returned as sorted NumPy array.
"""
if direction not in (DIRECTION_HORIZONTAL, DIRECTION_VERTICAL):
raise ValueError("direction must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL (see pdftabextract.common)")
if only_attr is not None and only_attr not in ('low', 'high'):
raise ValueError("only_attr must be either 'low' or 'high' if not set to None (default)")
if direction == DIRECTION_VERTICAL:
attr_lo = 'top'
attr_hi = 'bottom'
else:
attr_lo = 'left'
attr_hi = 'right'
positions = []
for t in texts:
if only_attr is None or only_attr == 'low':
positions.append(t[attr_lo])
if only_attr is None or only_attr == 'high':
positions.append(t[attr_hi])
return np.array(sorted(positions))
def split_texts_by_positions(texts, positions, direction, alignment='high',
discard_empty_sections=True,
enrich_with_positions=False):
"""
Split textboxes in <texts> into sections according to <positions> either horizontally or vertically (depending on
<direction>.)
<alignment> must be one of ('low', 'middle', 'high') and is used to determine the text box border (or center
for 'middle') to use for checking if this text box is inside of a section
<positions> must be sorted from low to high!
"""
if direction not in (DIRECTION_HORIZONTAL, DIRECTION_VERTICAL):
raise ValueError("direction must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL (see pdftabextract.common)")
if alignment not in ('low', 'middle', 'high'):
raise ValueError("alignment must be 'low' or 'high'")
if len(positions) == 0:
raise ValueError("positions must be non-empty sequence")
if alignment != 'middle':
if direction == DIRECTION_VERTICAL:
attr = 'bottom' if alignment == 'high' else 'top'
else:
attr = 'right' if alignment == 'high' else 'left'
t_in_section = lambda t, p1, p2: p1 < t[attr] <= p2
else:
if direction == DIRECTION_VERTICAL:
t_in_section = lambda t, p1, p2: p1 < t['top'] + t['height'] / 2 <= p2
else:
t_in_section = lambda t, p1, p2: p1 < t['left'] + t['width'] / 2 <= p2
prev_pos = -1
split_texts = []
n_added_texts = 0
for pos in positions:
texts_in_section = [t for t in texts if t_in_section(t, prev_pos, pos)]
if texts_in_section or not discard_empty_sections:
if enrich_with_positions:
to_append = (texts_in_section, (prev_pos, pos))
else:
to_append = texts_in_section
split_texts.append(to_append)
n_added_texts += len(texts_in_section)
prev_pos = pos
assert n_added_texts == len(texts)
return split_texts
def put_texts_in_lines(texts):
"""
Sort text boxes <texts> vertically first and split them into lines. Sort each line horizontally (left to right).
Returns list of lists, each representing a line with text boxes. Empty lines contain empty lists.
"""
if not texts:
return []
mean_text_height = np.mean([t['bottom'] - t['top'] for t in texts])
sorted_ts = list(sorted(texts, key=lambda x: x['top'])) # sort texts vertically
# create list of vertical spacings between sorted texts
text_spacings = [t['top'] - sorted_ts[i - 1]['bottom'] for i, t in enumerate(sorted_ts) if i > 0]
text_spacings.append(0.0) # last line
# minimum positive spacing is considered to be the general vertical line spacing
pos_sp = [v for v in text_spacings if v > 0]
line_vspace = min(pos_sp) if pos_sp else None
# go through all vertically sorted texts
lines = []
cur_line = []
min_vspace_for_break = -mean_text_height / 2 # texts might overlap vertically. if the overlap is more than half
# the mean text height, it is considered a line break
for t, spacing in zip(sorted_ts, text_spacings):
cur_line.append(t)
if spacing >= min_vspace_for_break: # this is a line break
# add all texts to this line sorted by x-position
lines.append(list(sorted(cur_line, key=lambda x: x['left'])))
# add some empty line breaks if necessary
if line_vspace:
lines.extend([] * int(spacing / line_vspace))
# reset
cur_line = []
assert len(cur_line) == 0 # because last line gets a zero-spacing appended
assert len(texts) == sum(map(len, lines)) # check if all texts were put into lines
return lines
def join_texts(texts, sorted_by='left', glue=' ', strip=True):
"""Join strings in text boxes <texts>, sorting them by <sorted_by> and concatenating them using <glue>."""
if sorted_by:
texts = sorted_by_attr(texts, sorted_by)
s = glue.join([t['value'] for t in texts])
if strip:
s = s.strip()
return s
def create_text_from_lines(lines, linebreak='\n', linejoin=' ', strip=True):
"""Create a multi-line text string from text boxes <lines> (generated by put_texts_in_lines)"""
text = ''
for l in lines:
text += join_texts(l, glue=linejoin, strip=strip) + linebreak
if strip:
text = text.strip()
return text
def rotate_textboxes(page, page_rot, about_pt):
"""
Rotate all text boxes in <page> about a point <about_pt> by <page_rot> radians.
"""
for t in page['texts']:
t_pt = pt(t['left'], t['top'])
# rotate back
t_pt_rot = vecrotate(t_pt, page_rot, about_pt)
# update text dict
update_text_dict_pos(t, t_pt_rot, update_node=True)
def deskew_textboxes(page, skew_radians, skew_direction, about_pt):
"""
Deskew all text boxes in <page> about a point <about_pt> by <skew_radians> radians in direction <skew_direction>.
"""
if skew_direction not in (SKEW_X, SKEW_Y):
raise ValueError("invalid parameter value '%s' for skew_direction" % skew_direction)
for t in page['texts']:
if skew_direction == SKEW_X:
x = t['top'] + t['height'] / 2
ref_idx = 1
a = -1
else:
x = t['left'] + t['width'] / 2
ref_idx = 0
a = 1
# x, y have nothing to do with the x and y in a cartesian coord. system
# y is the coordinate that gets changed depending on x
d = x - about_pt[ref_idx]
y_diff = a * math.sin(skew_radians) * d
if skew_direction == SKEW_X:
pt_deskewed = pt(t['left'] + y_diff, t['top'])
else:
pt_deskewed = pt(t['left'], t['top'] + y_diff)
update_text_dict_pos(t, pt_deskewed, update_node=True)
| [
"numpy.mean",
"pdftabextract.geom.pt",
"pdftabextract.common.update_text_dict_pos",
"pdftabextract.common.sorted_by_attr",
"math.sin",
"pdftabextract.geom.vecrotate"
] | [((4177, 4227), 'numpy.mean', 'np.mean', (["[(t['bottom'] - t['top']) for t in texts]"], {}), "([(t['bottom'] - t['top']) for t in texts])\n", (4184, 4227), True, 'import numpy as np\n'), ((5965, 5997), 'pdftabextract.common.sorted_by_attr', 'sorted_by_attr', (['texts', 'sorted_by'], {}), '(texts, sorted_by)\n', (5979, 5997), False, 'from pdftabextract.common import update_text_dict_pos, sorted_by_attr, DIRECTION_HORIZONTAL, DIRECTION_VERTICAL, SKEW_X, SKEW_Y\n'), ((6647, 6670), 'pdftabextract.geom.pt', 'pt', (["t['left']", "t['top']"], {}), "(t['left'], t['top'])\n", (6649, 6670), False, 'from pdftabextract.geom import pt, vecrotate\n'), ((6721, 6756), 'pdftabextract.geom.vecrotate', 'vecrotate', (['t_pt', 'page_rot', 'about_pt'], {}), '(t_pt, page_rot, about_pt)\n', (6730, 6756), False, 'from pdftabextract.geom import pt, vecrotate\n'), ((6801, 6852), 'pdftabextract.common.update_text_dict_pos', 'update_text_dict_pos', (['t', 't_pt_rot'], {'update_node': '(True)'}), '(t, t_pt_rot, update_node=True)\n', (6821, 6852), False, 'from pdftabextract.common import update_text_dict_pos, sorted_by_attr, DIRECTION_HORIZONTAL, DIRECTION_VERTICAL, SKEW_X, SKEW_Y\n'), ((7882, 7936), 'pdftabextract.common.update_text_dict_pos', 'update_text_dict_pos', (['t', 'pt_deskewed'], {'update_node': '(True)'}), '(t, pt_deskewed, update_node=True)\n', (7902, 7936), False, 'from pdftabextract.common import update_text_dict_pos, sorted_by_attr, DIRECTION_HORIZONTAL, DIRECTION_VERTICAL, SKEW_X, SKEW_Y\n'), ((7759, 7791), 'pdftabextract.geom.pt', 'pt', (["(t['left'] + y_diff)", "t['top']"], {}), "(t['left'] + y_diff, t['top'])\n", (7761, 7791), False, 'from pdftabextract.geom import pt, vecrotate\n'), ((7832, 7864), 'pdftabextract.geom.pt', 'pt', (["t['left']", "(t['top'] + y_diff)"], {}), "(t['left'], t['top'] + y_diff)\n", (7834, 7864), False, 'from pdftabextract.geom import pt, vecrotate\n'), ((7651, 7673), 'math.sin', 'math.sin', (['skew_radians'], {}), '(skew_radians)\n', (7659, 7673), False, 'import math\n')] |
"""
==============================
Generate simulated evoked data
==============================
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mne import (read_proj, read_forward_solution, read_cov, read_label,
pick_types_evoked, pick_types_forward, pick_types,
read_evokeds)
from mne.io import Raw
from mne.datasets import sample
from mne.time_frequency import iir_filter_raw, morlet
from mne.viz import plot_sparse_source_estimates
from mne.simulation import generate_sparse_stc, generate_evoked
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
condition = 'Left Auditory'
evoked_template = read_evokeds(ave_fname, condition=condition, baseline=None)
evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
exclude=raw.info['bads'])
label_names = ['Aud-lh', 'Aud-rh']
labels = [read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses and the correspond evoked data
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series from 2 Morlet wavelets
stc_data = np.zeros((len(labels), len(times)))
Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
stc_data[0][:len(Ws[0])] = np.real(Ws[0])
stc_data[1][:len(Ws[1])] = np.real(Ws[1])
stc_data *= 100 * 1e-9 # use nAm as unit
# time translation
stc_data[1] = np.roll(stc_data[1], 80)
stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
random_state=0)
###############################################################################
# Generate noisy evoked data
picks = pick_types(raw.info, meg=True, exclude='bads')
iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180)
evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot()
| [
"mne.read_proj",
"mne.simulation.generate_sparse_stc",
"mne.datasets.sample.data_path",
"mne.read_forward_solution",
"mne.simulation.generate_evoked",
"mne.io.Raw",
"mne.time_frequency.iir_filter_raw",
"numpy.real",
"numpy.linspace",
"mne.read_label",
"mne.read_cov",
"mne.pick_types_forward",
... | [((770, 788), 'mne.datasets.sample.data_path', 'sample.data_path', ([], {}), '()\n', (786, 788), False, 'from mne.datasets import sample\n'), ((796, 848), 'mne.io.Raw', 'Raw', (["(data_path + '/MEG/sample/sample_audvis_raw.fif')"], {}), "(data_path + '/MEG/sample/sample_audvis_raw.fif')\n", (799, 848), False, 'from mne.io import Raw\n'), ((856, 919), 'mne.read_proj', 'read_proj', (["(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')"], {}), "(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')\n", (865, 919), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((1222, 1287), 'mne.read_forward_solution', 'read_forward_solution', (['fwd_fname'], {'force_fixed': '(True)', 'surf_ori': '(True)'}), '(fwd_fname, force_fixed=True, surf_ori=True)\n', (1243, 1287), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((1294, 1363), 'mne.pick_types_forward', 'pick_types_forward', (['fwd'], {'meg': '(True)', 'eeg': '(True)', 'exclude': "raw.info['bads']"}), "(fwd, meg=True, eeg=True, exclude=raw.info['bads'])\n", (1312, 1363), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((1371, 1390), 'mne.read_cov', 'read_cov', (['cov_fname'], {}), '(cov_fname)\n', (1379, 1390), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((1438, 1497), 'mne.read_evokeds', 'read_evokeds', (['ave_fname'], {'condition': 'condition', 'baseline': 'None'}), '(ave_fname, condition=condition, baseline=None)\n', (1450, 1497), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((1516, 1601), 'mne.pick_types_evoked', 'pick_types_evoked', (['evoked_template'], {'meg': '(True)', 'eeg': '(True)', 'exclude': "raw.info['bads']"}), "(evoked_template, meg=True, eeg=True, exclude=raw.info['bads']\n )\n", (1533, 1601), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((2003, 2057), 'numpy.linspace', 'np.linspace', (['tmin', '(tmin + n_samples * tstep)', 'n_samples'], {}), '(tmin, tmin + n_samples * tstep, n_samples)\n', (2014, 2057), True, 'import numpy as np\n'), ((2158, 2199), 'mne.time_frequency.morlet', 'morlet', (['sfreq', '[3, 10]'], {'n_cycles': '[1, 1.5]'}), '(sfreq, [3, 10], n_cycles=[1, 1.5])\n', (2164, 2199), False, 'from mne.time_frequency import iir_filter_raw, morlet\n'), ((2227, 2241), 'numpy.real', 'np.real', (['Ws[0]'], {}), '(Ws[0])\n', (2234, 2241), True, 'import numpy as np\n'), ((2269, 2283), 'numpy.real', 'np.real', (['Ws[1]'], {}), '(Ws[1])\n', (2276, 2283), True, 'import numpy as np\n'), ((2360, 2384), 'numpy.roll', 'np.roll', (['stc_data[1]', '(80)'], {}), '(stc_data[1], 80)\n', (2367, 2384), True, 'import numpy as np\n'), ((2391, 2469), 'mne.simulation.generate_sparse_stc', 'generate_sparse_stc', (["fwd['src']", 'labels', 'stc_data', 'tmin', 'tstep'], {'random_state': '(0)'}), "(fwd['src'], labels, stc_data, tmin, tstep, random_state=0)\n", (2410, 2469), False, 'from mne.simulation import generate_sparse_stc, generate_evoked\n'), ((2614, 2660), 'mne.pick_types', 'pick_types', (['raw.info'], {'meg': '(True)', 'exclude': '"""bads"""'}), "(raw.info, meg=True, exclude='bads')\n", (2624, 2660), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n'), ((2674, 2734), 'mne.time_frequency.iir_filter_raw', 'iir_filter_raw', (['raw'], {'order': '(5)', 'picks': 'picks', 'tmin': '(60)', 'tmax': '(180)'}), '(raw, order=5, picks=picks, tmin=60, tmax=180)\n', (2688, 2734), False, 'from mne.time_frequency import iir_filter_raw, morlet\n'), ((2744, 2843), 'mne.simulation.generate_evoked', 'generate_evoked', (['fwd', 'stc', 'evoked_template', 'cov', 'snr'], {'tmin': '(0.0)', 'tmax': '(0.2)', 'iir_filter': 'iir_filter'}), '(fwd, stc, evoked_template, cov, snr, tmin=0.0, tmax=0.2,\n iir_filter=iir_filter)\n', (2759, 2843), False, 'from mne.simulation import generate_sparse_stc, generate_evoked\n'), ((2953, 3057), 'mne.viz.plot_sparse_source_estimates', 'plot_sparse_source_estimates', (["fwd['src']", 'stc'], {'bgcolor': '(1, 1, 1)', 'opacity': '(0.5)', 'high_resolution': '(True)'}), "(fwd['src'], stc, bgcolor=(1, 1, 1), opacity=\n 0.5, high_resolution=True)\n", (2981, 3057), False, 'from mne.viz import plot_sparse_source_estimates\n'), ((3083, 3095), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3093, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3119), 'matplotlib.pyplot.psd', 'plt.psd', (['evoked.data[0]'], {}), '(evoked.data[0])\n', (3103, 3119), True, 'import matplotlib.pyplot as plt\n'), ((1679, 1737), 'mne.read_label', 'read_label', (["(data_path + '/MEG/sample/labels/%s.label' % ln)"], {}), "(data_path + '/MEG/sample/labels/%s.label' % ln)\n", (1689, 1737), False, 'from mne import read_proj, read_forward_solution, read_cov, read_label, pick_types_evoked, pick_types_forward, pick_types, read_evokeds\n')] |
#!/usr/bin/env python
"""Use numpy array."""
import sys
import numpy as np
A = np.array([4, 6, 8])
print(type(A))
A[0] = 7
print(A)
sys.exit()
| [
"numpy.array",
"sys.exit"
] | [((79, 98), 'numpy.array', 'np.array', (['[4, 6, 8]'], {}), '([4, 6, 8])\n', (87, 98), True, 'import numpy as np\n'), ((132, 142), 'sys.exit', 'sys.exit', ([], {}), '()\n', (140, 142), False, 'import sys\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
def show_modal_data(nat_freq, damping):
"""
Show modal data in a table-like structure.
"""
print(' Nat. f. Damping')
print(23*'-')
for i, f in enumerate(nat_freq):
print(f'{i+1}) {f:6.1f}\t{damping[i]:5.4f}')
def plot_mode_shape(shape, axis, style='o-', frequency=None, **kwargs):
"""
Plot a mode shape in a consistent fashion.
"""
plot = axis.plot(shape / np.max(np.abs(shape)) * np.sign(shape[0]),
style, **kwargs)
if frequency is not None:
axis.set_title(f'Mode shape - {frequency:.0f} Hz')
axis.set_yticks([])
plt.tight_layout()
def show_reconstructed(freq, acc, FRF, frf_rec, select_loc=0):
"""
Split code from visualization to fit the presentation view.
"""
freq_a = acc.freq
plt.subplot(211)
plt.semilogy(freq, np.abs(FRF[select_loc]), label='Experiment')
plt.semilogy(freq_a, np.abs(frf_rec[select_loc]),'--', label='LSCF')
plt.xlim(0,freq[-1])
plt.ylabel(r"abs($\alpha$)")
plt.legend(loc = 'best')
plt.subplot(212)
plt.plot(freq, np.angle(FRF[select_loc],deg = 1), label='Experiment')
plt.plot(freq_a, np.angle(frf_rec[select_loc],deg = 1),'--',label='LSCF')
plt.xlim(0,freq[-1])
plt.ylabel(r"angle($\alpha$)")
plt.legend(loc = 'best')
def show_lighting(x0, video_light):
"""
Interactively visualize an example of poorly illuminated video.
"""
y0 = 9
d = 20
roi = video_light.mraw[0, y0:y0+d, x0:x0+d*2]
fig, ax = plt.subplots(2)
ax[0].imshow(video_light.mraw[0], cmap='gray')
ax[1].hist(roi.flatten(), bins=50);
# Formating
ax[0].add_patch(patches.Rectangle((x0, y0), d*2, d, fill=False, color='r', linewidth=2))
ax[0].grid(False)
ax[1].set_xlabel('Grayscale value [/]')
ax[1].set_ylabel('n pixels [/]')
ax[1].set_xlim([0, 260])
plt.tight_layout() | [
"numpy.abs",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.ylabel",
"numpy.angle",
"numpy.sign",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((704, 722), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (720, 722), True, 'import matplotlib.pyplot as plt\n'), ((895, 911), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (906, 911), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1079), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'freq[-1]'], {}), '(0, freq[-1])\n', (1066, 1079), True, 'import matplotlib.pyplot as plt\n'), ((1083, 1111), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""abs($\\\\alpha$)"""'], {}), "('abs($\\\\alpha$)')\n", (1093, 1111), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1139), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1127, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1163), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1158, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1341), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'freq[-1]'], {}), '(0, freq[-1])\n', (1328, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1376), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""angle($\\\\alpha$)"""'], {}), "('angle($\\\\alpha$)')\n", (1356, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1381, 1403), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1391, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1630), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1627, 1630), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1985), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((936, 959), 'numpy.abs', 'np.abs', (['FRF[select_loc]'], {}), '(FRF[select_loc])\n', (942, 959), True, 'import numpy as np\n'), ((1006, 1033), 'numpy.abs', 'np.abs', (['frf_rec[select_loc]'], {}), '(frf_rec[select_loc])\n', (1012, 1033), True, 'import numpy as np\n'), ((1183, 1215), 'numpy.angle', 'np.angle', (['FRF[select_loc]'], {'deg': '(1)'}), '(FRF[select_loc], deg=1)\n', (1191, 1215), True, 'import numpy as np\n'), ((1259, 1295), 'numpy.angle', 'np.angle', (['frf_rec[select_loc]'], {'deg': '(1)'}), '(frf_rec[select_loc], deg=1)\n', (1267, 1295), True, 'import numpy as np\n'), ((1758, 1831), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x0, y0)', '(d * 2)', 'd'], {'fill': '(False)', 'color': '"""r"""', 'linewidth': '(2)'}), "((x0, y0), d * 2, d, fill=False, color='r', linewidth=2)\n", (1775, 1831), False, 'from matplotlib import patches\n'), ((529, 546), 'numpy.sign', 'np.sign', (['shape[0]'], {}), '(shape[0])\n', (536, 546), True, 'import numpy as np\n'), ((512, 525), 'numpy.abs', 'np.abs', (['shape'], {}), '(shape)\n', (518, 525), True, 'import numpy as np\n')] |
from selection.tests.instance import gaussian_instance
import numpy as np
from selection.randomized.lasso import full_targets, selected_targets
from selection.randomized.marginal_slope_twostage import marginal_screening, slope
from selection.randomized.randomization import randomization
from selection.randomized.query import twostage_selective_MLE
from scipy.stats import norm as ndist
try:
from rpy2.robjects.packages import importr
from rpy2 import robjects
import rpy2.robjects.numpy2ri
rpy_loaded = True
except ImportError:
rpy_loaded = False
if rpy_loaded:
def slope_R(X, Y, W = None, normalize = True, choice_weights = "gaussian", sigma = None):
rpy2.robjects.numpy2ri.activate()
robjects.r('''
library('SLOPE')
slope = function(X, Y, W , normalize, choice_weights, sigma, fdr = NA){
if(is.na(sigma)){
sigma=NULL} else{
sigma = as.matrix(sigma)[1,1]}
if(is.na(fdr)){
fdr = 0.1 }
if(normalize=="TRUE"){
normalize = TRUE} else{
normalize = FALSE}
if(is.na(W))
{
if(choice_weights == "gaussian"){
lambda = "gaussian"} else{
lambda = "bhq"}
result = SLOPE(X, Y, fdr = fdr, lambda = lambda, normalize = normalize, sigma = sigma)
} else{
result = SLOPE(X, Y, fdr = fdr, lambda = W, normalize = normalize, sigma = sigma)
}
return(list(beta = result$beta, E = result$selected, lambda_seq = result$lambda, sigma = result$sigma))
}''')
r_slope = robjects.globalenv['slope']
n, p = X.shape
r_X = robjects.r.matrix(X, nrow=n, ncol=p)
r_Y = robjects.r.matrix(Y, nrow=n, ncol=1)
if normalize is True:
r_normalize = robjects.StrVector('True')
else:
r_normalize = robjects.StrVector('False')
if W is None:
r_W = robjects.NA_Logical
if choice_weights is "gaussian":
r_choice_weights = robjects.StrVector('gaussian')
elif choice_weights is "bhq":
r_choice_weights = robjects.StrVector('bhq')
else:
r_W = robjects.r.matrix(W, nrow=p, ncol=1)
if sigma is None:
r_sigma = robjects.NA_Logical
else:
r_sigma = robjects.r.matrix(sigma, nrow=1, ncol=1)
result = r_slope(r_X, r_Y, r_W, r_normalize, r_choice_weights, r_sigma)
result = np.asarray(result.rx2('beta')), np.asarray(result.rx2('E')), \
np.asarray(result.rx2('lambda_seq')), np.asscalar(np.array(result.rx2('sigma')))
rpy2.robjects.numpy2ri.deactivate()
return result
def test_marginal_slope(n=3000, p=1000, signal_fac=1.5, s=30, sigma=2., rho=0.20, randomizer_scale= np.sqrt(0.5),
split_proportion= 0.67, target = "selected"):
inst = gaussian_instance
signal = np.sqrt(signal_fac * 2. * np.log(p))
X, y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
sigma_ = np.sqrt(np.linalg.norm(y - X.dot(np.linalg.pinv(X).dot(y))) ** 2 / (n - p))
#sigma_ = np.std(y)/np.sqrt(2)
#sigma_ = 1.
Y = y/sigma_
score = X.T.dot(Y)
omega = randomization.isotropic_gaussian((p,), randomizer_scale * sigma_).sample()
W = X.T.dot(X)
marginal_select = marginal_screening.type1(score,
W,
0.1,
randomizer_scale,
useC=True,
perturb=omega)
boundary, cond_mean_1, cond_cov_1, affine_con_1, logdens_linear_1, initial_soln_1 = marginal_select.fit()
nonzero = boundary != 0
first_selected = np.asarray([t for t in range(p) if nonzero[t]])
X_tilde = X[:, nonzero]
r_beta, r_E, r_lambda_seq, r_sigma = slope_R(X_tilde,
Y,
W=None,
normalize=True,
choice_weights="gaussian", # put gaussian
sigma=1.)
conv = slope.gaussian(X_tilde,
Y,
r_sigma * r_lambda_seq,
sigma=1.,
randomizer_scale=randomizer_scale * 1.)
signs, cond_mean_2, cond_cov_2, affine_con_2, logdens_linear_2, initial_soln_2 = conv.fit()
nonzero_slope = signs != 0
second_selected = np.asarray([s for s in range(nonzero.sum()) if nonzero_slope[s]])
subsample_size = int(split_proportion * n)
sel_idx = np.zeros(n, np.bool)
sel_idx[:subsample_size] = 1
np.random.shuffle(sel_idx)
inf_idx = ~sel_idx
Y_inf = Y[inf_idx]
X_inf = X[inf_idx, :]
#_sigma_ = np.sqrt(np.linalg.norm(Y_inf - X_inf.dot(np.linalg.pinv(X_inf).dot(Y_inf))) ** 2 / (n - p))
Y_sel = Y[sel_idx]
X_sel = X[sel_idx, :]
#Y_inf /= _sigma_
score_split = X_sel.T.dot(Y_sel)
stdev_split = np.sqrt(np.diag(X_sel.T.dot(X_sel)))
threshold_split = stdev_split * ndist.ppf(1. - 0.1/ 2.)
boundary_split = np.fabs(score_split) >= threshold_split
nonzero_split = boundary_split != 0
first_selected_split = np.asarray([u for u in range(p) if nonzero_split[u]])
X_tilde_sel = X_sel[:, nonzero_split]
r_beta_split, r_E_split, r_lambda_seq_split, r_sigma_split = slope_R(X_tilde_sel,
Y_sel,
W=None,
normalize=True,
choice_weights="gaussian",
sigma=1.)
nonzero_slope_split = (r_beta_split != 0)
second_selected_split = np.asarray([r for r in range(nonzero_split.sum()) if nonzero_slope_split[r]])
print("compare dimensions- ms ", nonzero.sum(), nonzero_split.sum())
print("compare dimensions- slope ", nonzero_slope.sum(), nonzero_slope_split.sum())
beta_target_split = np.linalg.pinv(X_inf[:, first_selected_split[second_selected_split]]).dot(X_inf[:, first_selected_split].dot(beta[nonzero_split]))/ sigma_
post_split_OLS = np.linalg.pinv(X_inf[:, first_selected_split[second_selected_split]]).dot(Y_inf)
naive_split_sd = np.sqrt(np.diag((np.linalg.inv(X_inf[:, first_selected_split[second_selected_split]].T.dot(X_inf[:, first_selected_split[second_selected_split]])))))
intervals_split = np.vstack([post_split_OLS - 1.65 * naive_split_sd,
post_split_OLS + 1.65 * naive_split_sd]).T
coverage_split = (beta_target_split > intervals_split[:, 0]) * (beta_target_split < intervals_split[:, 1])
length_split = intervals_split[:, 1] - intervals_split[:, 0]
pval_split = 2 *(1.-ndist.cdf(np.abs(post_split_OLS) / naive_split_sd))
pval_alt_split = (pval_split[beta[first_selected_split[second_selected_split]] != 0]) < 0.1
if pval_alt_split.sum() > 0:
power_split = np.mean(pval_alt_split)
else:
power_split = 0.
if target == "selected":
_, _, cov_target_score_1, _ = marginal_select.multivariate_targets(first_selected[second_selected])
(observed_target,
cov_target,
cov_target_score_2,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero_slope,
dispersion=1.)
beta_target = np.linalg.pinv(X_tilde[:, nonzero_slope]).dot(X_tilde.dot(beta[nonzero])) / sigma_
elif target == "full":
_, _, cov_target_score_1, _ = marginal_select.marginal_targets(first_selected[second_selected])
(observed_target,
cov_target,
cov_target_score_2,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero_slope,
dispersion=1.)
beta_target = beta[first_selected[second_selected]] / sigma_
estimate, _, _, pval, intervals, _ = twostage_selective_MLE(observed_target,
cov_target,
cov_target_score_1,
cov_target_score_2,
initial_soln_1,
initial_soln_2,
cond_mean_1,
cond_mean_2,
cond_cov_1,
cond_cov_2,
logdens_linear_1,
logdens_linear_2,
affine_con_1.linear_part,
affine_con_2.linear_part,
affine_con_1.offset,
affine_con_2.offset,
solve_args={'tol': 1.e-12},
level=0.9)
pval_alt = (pval[beta[first_selected[second_selected]] != 0]) < 0.1
if pval_alt.sum() > 0:
power_adjusted = np.mean(pval_alt)
else:
power_adjusted = 0.
fdr = ((pval[beta[first_selected[second_selected]] == 0]) < 0.1).sum() / float((pval < 0.1).sum())
coverage_adjusted = (beta_target > intervals[:, 0]) * (beta_target < intervals[:, 1])
length_adjusted = intervals[:, 1] - intervals[:, 0]
post_sel_OLS = np.linalg.pinv(X_tilde[:, nonzero_slope]).dot(Y)
naive_sd = np.sqrt(np.diag((np.linalg.inv(X_tilde[:, nonzero_slope].T.dot(X_tilde[:, nonzero_slope])))))
intervals_naive = np.vstack([post_sel_OLS - 1.65 * naive_sd,
post_sel_OLS + 1.65 * naive_sd]).T
coverage_naive = (beta_target > intervals_naive[:, 0]) * (beta_target < intervals_naive[:, 1])
length_naive = intervals_naive[:, 1] - intervals_naive[:, 0]
return coverage_adjusted, sigma_ * length_adjusted, power_adjusted, coverage_naive, sigma_ * length_naive, \
coverage_split, sigma_ * length_split, power_split, fdr
def main(nsim=100):
cover_adjusted, length_adjusted, power_adjusted, cover_naive, length_naive, \
cover_split, length_split, power_split, fdr = [], [], 0., [], [], [], [], 0., 0.
for i in range(nsim):
results_ = test_marginal_slope()
cover_adjusted.extend(results_[0])
cover_naive.extend(results_[3])
cover_split.extend(results_[5])
length_adjusted.extend(results_[1])
length_naive.extend(results_[4])
length_split.extend(results_[6])
power_split += results_[7]
power_adjusted += results_[2]
fdr += results_[8]
print('coverage and lengths', np.mean(cover_adjusted), np.mean(cover_split), np.mean(cover_naive), np.mean(length_adjusted),
np.mean(length_split), np.mean(length_naive), power_adjusted/float(i+1), power_split/float(i+1), fdr/float(i+1))
main()
| [
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.log",
"rpy2.robjects.StrVector",
"selection.randomized.marginal_slope_twostage.slope.gaussian",
"selection.randomized.lasso.selected_targets",
"rpy2.robjects.r",
"numpy.mean",
"selection.randomized.lasso.full_targets",
"selection.randomized.marginal_slope_... | [((2834, 2846), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2841, 2846), True, 'import numpy as np\n'), ((3582, 3669), 'selection.randomized.marginal_slope_twostage.marginal_screening.type1', 'marginal_screening.type1', (['score', 'W', '(0.1)', 'randomizer_scale'], {'useC': '(True)', 'perturb': 'omega'}), '(score, W, 0.1, randomizer_scale, useC=True,\n perturb=omega)\n', (3606, 3669), False, 'from selection.randomized.marginal_slope_twostage import marginal_screening, slope\n'), ((4534, 4640), 'selection.randomized.marginal_slope_twostage.slope.gaussian', 'slope.gaussian', (['X_tilde', 'Y', '(r_sigma * r_lambda_seq)'], {'sigma': '(1.0)', 'randomizer_scale': '(randomizer_scale * 1.0)'}), '(X_tilde, Y, r_sigma * r_lambda_seq, sigma=1.0,\n randomizer_scale=randomizer_scale * 1.0)\n', (4548, 4640), False, 'from selection.randomized.marginal_slope_twostage import marginal_screening, slope\n'), ((5017, 5037), 'numpy.zeros', 'np.zeros', (['n', 'np.bool'], {}), '(n, np.bool)\n', (5025, 5037), True, 'import numpy as np\n'), ((5075, 5101), 'numpy.random.shuffle', 'np.random.shuffle', (['sel_idx'], {}), '(sel_idx)\n', (5092, 5101), True, 'import numpy as np\n'), ((8682, 9039), 'selection.randomized.query.twostage_selective_MLE', 'twostage_selective_MLE', (['observed_target', 'cov_target', 'cov_target_score_1', 'cov_target_score_2', 'initial_soln_1', 'initial_soln_2', 'cond_mean_1', 'cond_mean_2', 'cond_cov_1', 'cond_cov_2', 'logdens_linear_1', 'logdens_linear_2', 'affine_con_1.linear_part', 'affine_con_2.linear_part', 'affine_con_1.offset', 'affine_con_2.offset'], {'solve_args': "{'tol': 1e-12}", 'level': '(0.9)'}), "(observed_target, cov_target, cov_target_score_1,\n cov_target_score_2, initial_soln_1, initial_soln_2, cond_mean_1,\n cond_mean_2, cond_cov_1, cond_cov_2, logdens_linear_1, logdens_linear_2,\n affine_con_1.linear_part, affine_con_2.linear_part, affine_con_1.offset,\n affine_con_2.offset, solve_args={'tol': 1e-12}, level=0.9)\n", (8704, 9039), False, 'from selection.randomized.query import twostage_selective_MLE\n'), ((732, 1604), 'rpy2.robjects.r', 'robjects.r', (['"""\n library(\'SLOPE\')\n slope = function(X, Y, W , normalize, choice_weights, sigma, fdr = NA){\n if(is.na(sigma)){\n sigma=NULL} else{\n sigma = as.matrix(sigma)[1,1]}\n if(is.na(fdr)){\n fdr = 0.1 }\n if(normalize=="TRUE"){\n normalize = TRUE} else{\n normalize = FALSE}\n if(is.na(W))\n {\n if(choice_weights == "gaussian"){\n lambda = "gaussian"} else{\n lambda = "bhq"}\n result = SLOPE(X, Y, fdr = fdr, lambda = lambda, normalize = normalize, sigma = sigma)\n } else{\n result = SLOPE(X, Y, fdr = fdr, lambda = W, normalize = normalize, sigma = sigma)\n }\n return(list(beta = result$beta, E = result$selected, lambda_seq = result$lambda, sigma = result$sigma))\n }"""'], {}), '(\n """\n library(\'SLOPE\')\n slope = function(X, Y, W , normalize, choice_weights, sigma, fdr = NA){\n if(is.na(sigma)){\n sigma=NULL} else{\n sigma = as.matrix(sigma)[1,1]}\n if(is.na(fdr)){\n fdr = 0.1 }\n if(normalize=="TRUE"){\n normalize = TRUE} else{\n normalize = FALSE}\n if(is.na(W))\n {\n if(choice_weights == "gaussian"){\n lambda = "gaussian"} else{\n lambda = "bhq"}\n result = SLOPE(X, Y, fdr = fdr, lambda = lambda, normalize = normalize, sigma = sigma)\n } else{\n result = SLOPE(X, Y, fdr = fdr, lambda = W, normalize = normalize, sigma = sigma)\n }\n return(list(beta = result$beta, E = result$selected, lambda_seq = result$lambda, sigma = result$sigma))\n }"""\n )\n', (742, 1604), False, 'from rpy2 import robjects\n'), ((1680, 1716), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['X'], {'nrow': 'n', 'ncol': 'p'}), '(X, nrow=n, ncol=p)\n', (1697, 1716), False, 'from rpy2 import robjects\n'), ((1731, 1767), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['Y'], {'nrow': 'n', 'ncol': '(1)'}), '(Y, nrow=n, ncol=1)\n', (1748, 1767), False, 'from rpy2 import robjects\n'), ((5482, 5508), 'scipy.stats.norm.ppf', 'ndist.ppf', (['(1.0 - 0.1 / 2.0)'], {}), '(1.0 - 0.1 / 2.0)\n', (5491, 5508), True, 'from scipy.stats import norm as ndist\n'), ((5527, 5547), 'numpy.fabs', 'np.fabs', (['score_split'], {}), '(score_split)\n', (5534, 5547), True, 'import numpy as np\n'), ((7024, 7119), 'numpy.vstack', 'np.vstack', (['[post_split_OLS - 1.65 * naive_split_sd, post_split_OLS + 1.65 * naive_split_sd\n ]'], {}), '([post_split_OLS - 1.65 * naive_split_sd, post_split_OLS + 1.65 *\n naive_split_sd])\n', (7033, 7119), True, 'import numpy as np\n'), ((7555, 7578), 'numpy.mean', 'np.mean', (['pval_alt_split'], {}), '(pval_alt_split)\n', (7562, 7578), True, 'import numpy as np\n'), ((7854, 7924), 'selection.randomized.lasso.selected_targets', 'selected_targets', (['conv.loglike', 'conv._W', 'nonzero_slope'], {'dispersion': '(1.0)'}), '(conv.loglike, conv._W, nonzero_slope, dispersion=1.0)\n', (7870, 7924), False, 'from selection.randomized.lasso import full_targets, selected_targets\n'), ((10237, 10254), 'numpy.mean', 'np.mean', (['pval_alt'], {}), '(pval_alt)\n', (10244, 10254), True, 'import numpy as np\n'), ((10743, 10818), 'numpy.vstack', 'np.vstack', (['[post_sel_OLS - 1.65 * naive_sd, post_sel_OLS + 1.65 * naive_sd]'], {}), '([post_sel_OLS - 1.65 * naive_sd, post_sel_OLS + 1.65 * naive_sd])\n', (10752, 10818), True, 'import numpy as np\n'), ((1825, 1851), 'rpy2.robjects.StrVector', 'robjects.StrVector', (['"""True"""'], {}), "('True')\n", (1843, 1851), False, 'from rpy2 import robjects\n'), ((1892, 1919), 'rpy2.robjects.StrVector', 'robjects.StrVector', (['"""False"""'], {}), "('False')\n", (1910, 1919), False, 'from rpy2 import robjects\n'), ((2228, 2264), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['W'], {'nrow': 'p', 'ncol': '(1)'}), '(W, nrow=p, ncol=1)\n', (2245, 2264), False, 'from rpy2 import robjects\n'), ((2370, 2410), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['sigma'], {'nrow': '(1)', 'ncol': '(1)'}), '(sigma, nrow=1, ncol=1)\n', (2387, 2410), False, 'from rpy2 import robjects\n'), ((2987, 2996), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (2993, 2996), True, 'import numpy as np\n'), ((3466, 3531), 'selection.randomized.randomization.randomization.isotropic_gaussian', 'randomization.isotropic_gaussian', (['(p,)', '(randomizer_scale * sigma_)'], {}), '((p,), randomizer_scale * sigma_)\n', (3498, 3531), False, 'from selection.randomized.randomization import randomization\n'), ((6750, 6819), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X_inf[:, first_selected_split[second_selected_split]]'], {}), '(X_inf[:, first_selected_split[second_selected_split]])\n', (6764, 6819), True, 'import numpy as np\n'), ((8390, 8456), 'selection.randomized.lasso.full_targets', 'full_targets', (['conv.loglike', 'conv._W', 'nonzero_slope'], {'dispersion': '(1.0)'}), '(conv.loglike, conv._W, nonzero_slope, dispersion=1.0)\n', (8402, 8456), False, 'from selection.randomized.lasso import full_targets, selected_targets\n'), ((10563, 10604), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X_tilde[:, nonzero_slope]'], {}), '(X_tilde[:, nonzero_slope])\n', (10577, 10604), True, 'import numpy as np\n'), ((11845, 11868), 'numpy.mean', 'np.mean', (['cover_adjusted'], {}), '(cover_adjusted)\n', (11852, 11868), True, 'import numpy as np\n'), ((11870, 11890), 'numpy.mean', 'np.mean', (['cover_split'], {}), '(cover_split)\n', (11877, 11890), True, 'import numpy as np\n'), ((11892, 11912), 'numpy.mean', 'np.mean', (['cover_naive'], {}), '(cover_naive)\n', (11899, 11912), True, 'import numpy as np\n'), ((11914, 11938), 'numpy.mean', 'np.mean', (['length_adjusted'], {}), '(length_adjusted)\n', (11921, 11938), True, 'import numpy as np\n'), ((11948, 11969), 'numpy.mean', 'np.mean', (['length_split'], {}), '(length_split)\n', (11955, 11969), True, 'import numpy as np\n'), ((11971, 11992), 'numpy.mean', 'np.mean', (['length_naive'], {}), '(length_naive)\n', (11978, 11992), True, 'import numpy as np\n'), ((2062, 2092), 'rpy2.robjects.StrVector', 'robjects.StrVector', (['"""gaussian"""'], {}), "('gaussian')\n", (2080, 2092), False, 'from rpy2 import robjects\n'), ((6590, 6659), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X_inf[:, first_selected_split[second_selected_split]]'], {}), '(X_inf[:, first_selected_split[second_selected_split]])\n', (6604, 6659), True, 'import numpy as np\n'), ((2170, 2195), 'rpy2.robjects.StrVector', 'robjects.StrVector', (['"""bhq"""'], {}), "('bhq')\n", (2188, 2195), False, 'from rpy2 import robjects\n'), ((7361, 7383), 'numpy.abs', 'np.abs', (['post_split_OLS'], {}), '(post_split_OLS)\n', (7367, 7383), True, 'import numpy as np\n'), ((8073, 8114), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X_tilde[:, nonzero_slope]'], {}), '(X_tilde[:, nonzero_slope])\n', (8087, 8114), True, 'import numpy as np\n'), ((3318, 3335), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X'], {}), '(X)\n', (3332, 3335), True, 'import numpy as np\n')] |
# matching.py
import numpy as np
import cv2
import os
from feature_detection import *
from anms import *
from feature_descriptor import *
def show_matches(matches, img_1, features_1, img_2, features_2, mask=None):
img = np.hstack((img_1, img_2))
if mask is None:
mask = np.ones((len(matches), 1))
for idx, match in enumerate(matches):
if mask[idx] == 0:
color = (0,0,255)
else:
color = (0,255,0)
pt1 = features_1[match[0], :]
pt2 = features_2[match[1], :].copy()
pt2[0] += img_1.shape[1]
img = cv2.circle(img, tuple(pt1), 5, (255,0,0), 1)
img = cv2.circle(img, tuple(pt2), 5, (255,0,0), 1)
img = cv2.line(img, tuple(pt1), tuple(pt2), color, 2)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# converts img points from (row, col) to (x,y)
def reverse_pts(corners):
new_corners = np.zeros_like(corners)
for idx, pt in enumerate(corners):
x,y = pt.ravel()
new_corners[idx] = [y,x]
return new_corners
#sum of squared differences
def ssd(vec_1, vec_2):
return np.sum((vec_1 - vec_2)**2, axis=1)
def match(feature_1, feature_des_1, feature_2, feature_des_2):
matches = []
rejected = []
for idx, feature_des in enumerate(feature_des_1):
distances = ssd(feature_des, feature_des_2)
dist_small_idx = np.argpartition(distances, 2)
ratio = distances[dist_small_idx[0]]/distances[dist_small_idx[1]]
if ratio < 0.7:
matches.append([idx, dist_small_idx[0]])
else:
rejected.append([idx, dist_small_idx[0]])
return matches, rejected
if __name__ == '__main__':
data_dir = '../Data/Train/Set1'
img_name_list = os.listdir(data_dir)
img_name_list.sort()
img_list = []
feature_vec_list = []
feature_des_list = []
for img_name in img_name_list:
print(f'Processing {img_name}')
img = cv2.imread(os.path.join(data_dir,img_name), 1)
print(f'img_size: {img.shape}')
img_list.append(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#feature detections
detections, corner_fn = detect_harris_coreners(gray)
#anms
anms_corners_vect, r = anms_vectorised(detections, 200, corner_fn)
anms_corners_vect = reverse_pts(anms_corners_vect)
feature_vec_list.append(anms_corners_vect)
#description
feature_des_vec = extract_descriptor(anms_corners_vect, gray, 8, 5)
feature_des_list.append(feature_des_vec)
#Match features between images
for i in range(len(feature_vec_list)):
matches, rejected = match( feature_vec_list[i],
feature_des_list[i],
feature_vec_list[int((i+1)%len(feature_vec_list))],
feature_des_list[int((i+1)%len(feature_vec_list))])
print(f'matches found: {len(matches)}')
print(f'rejections: {len(rejected)}')
show_matches(matches,
img_list[i],
feature_vec_list[i],
img_list[int((i+1)%len(feature_vec_list))],
feature_vec_list[int((i+1)%len(feature_vec_list))])
| [
"os.listdir",
"numpy.argpartition",
"numpy.hstack",
"numpy.zeros_like",
"os.path.join",
"cv2.imshow",
"numpy.sum",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.waitKey"
] | [((225, 250), 'numpy.hstack', 'np.hstack', (['(img_1, img_2)'], {}), '((img_1, img_2))\n', (234, 250), True, 'import numpy as np\n'), ((762, 784), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (772, 784), False, 'import cv2\n'), ((789, 803), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (800, 803), False, 'import cv2\n'), ((808, 831), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (829, 831), False, 'import cv2\n'), ((924, 946), 'numpy.zeros_like', 'np.zeros_like', (['corners'], {}), '(corners)\n', (937, 946), True, 'import numpy as np\n'), ((1130, 1166), 'numpy.sum', 'np.sum', (['((vec_1 - vec_2) ** 2)'], {'axis': '(1)'}), '((vec_1 - vec_2) ** 2, axis=1)\n', (1136, 1166), True, 'import numpy as np\n'), ((1759, 1779), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1769, 1779), False, 'import os\n'), ((1395, 1424), 'numpy.argpartition', 'np.argpartition', (['distances', '(2)'], {}), '(distances, 2)\n', (1410, 1424), True, 'import numpy as np\n'), ((2098, 2135), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2110, 2135), False, 'import cv2\n'), ((1978, 2010), 'os.path.join', 'os.path.join', (['data_dir', 'img_name'], {}), '(data_dir, img_name)\n', (1990, 2010), False, 'import os\n')] |
import numpy as np
from numpy.testing import assert_allclose
import unittest
from pb_bss_eval.distribution import GaussianTrainer
class TestGaussian(unittest.TestCase):
def test_gaussian(self):
samples = 10000
mean = np.ones((3,))
covariance = 2 * np.eye(3)
x = np.random.multivariate_normal(mean, covariance, size=(samples,))
model = GaussianTrainer().fit(x)
assert_allclose(model.mean, mean, atol=0.1)
assert_allclose(model.covariance, covariance, atol=0.1)
def test_diagonal_gaussian(self):
samples = 10000
mean = np.ones((3,))
covariance = 2 * np.eye(3)
x = np.random.multivariate_normal(mean, covariance, size=(samples,))
model = GaussianTrainer().fit(x, covariance_type="diagonal")
assert_allclose(model.mean, mean, atol=0.1)
assert_allclose(model.covariance, np.diag(covariance), atol=0.1)
def test_spherical_gaussian(self):
samples = 10000
mean = np.ones((3,))
covariance = 2 * np.eye(3)
x = np.random.multivariate_normal(mean, covariance, size=(samples,))
model = GaussianTrainer().fit(x, covariance_type="spherical")
assert_allclose(model.mean, mean, atol=0.1)
assert_allclose(
model.covariance, np.mean(np.diag(covariance)), atol=0.1
)
| [
"numpy.eye",
"numpy.ones",
"numpy.random.multivariate_normal",
"numpy.testing.assert_allclose",
"numpy.diag",
"pb_bss_eval.distribution.GaussianTrainer"
] | [((239, 252), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (246, 252), True, 'import numpy as np\n'), ((300, 364), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'covariance'], {'size': '(samples,)'}), '(mean, covariance, size=(samples,))\n', (329, 364), True, 'import numpy as np\n'), ((414, 457), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.mean', 'mean'], {'atol': '(0.1)'}), '(model.mean, mean, atol=0.1)\n', (429, 457), False, 'from numpy.testing import assert_allclose\n'), ((466, 521), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.covariance', 'covariance'], {'atol': '(0.1)'}), '(model.covariance, covariance, atol=0.1)\n', (481, 521), False, 'from numpy.testing import assert_allclose\n'), ((600, 613), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (607, 613), True, 'import numpy as np\n'), ((661, 725), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'covariance'], {'size': '(samples,)'}), '(mean, covariance, size=(samples,))\n', (690, 725), True, 'import numpy as np\n'), ((803, 846), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.mean', 'mean'], {'atol': '(0.1)'}), '(model.mean, mean, atol=0.1)\n', (818, 846), False, 'from numpy.testing import assert_allclose\n'), ((999, 1012), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (1006, 1012), True, 'import numpy as np\n'), ((1060, 1124), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'covariance'], {'size': '(samples,)'}), '(mean, covariance, size=(samples,))\n', (1089, 1124), True, 'import numpy as np\n'), ((1203, 1246), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.mean', 'mean'], {'atol': '(0.1)'}), '(model.mean, mean, atol=0.1)\n', (1218, 1246), False, 'from numpy.testing import assert_allclose\n'), ((278, 287), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (284, 287), True, 'import numpy as np\n'), ((639, 648), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (645, 648), True, 'import numpy as np\n'), ((889, 908), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (896, 908), True, 'import numpy as np\n'), ((1038, 1047), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1044, 1047), True, 'import numpy as np\n'), ((381, 398), 'pb_bss_eval.distribution.GaussianTrainer', 'GaussianTrainer', ([], {}), '()\n', (396, 398), False, 'from pb_bss_eval.distribution import GaussianTrainer\n'), ((742, 759), 'pb_bss_eval.distribution.GaussianTrainer', 'GaussianTrainer', ([], {}), '()\n', (757, 759), False, 'from pb_bss_eval.distribution import GaussianTrainer\n'), ((1141, 1158), 'pb_bss_eval.distribution.GaussianTrainer', 'GaussianTrainer', ([], {}), '()\n', (1156, 1158), False, 'from pb_bss_eval.distribution import GaussianTrainer\n'), ((1310, 1329), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (1317, 1329), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
"""
"""
import cudf
import cupy
import numpy as np
from sklearn.pipeline import _name_estimators
from hypernets.tabular.dataframe_mapper import DataFrameMapper, TransformerPipeline
from ._transformer import Localizable
class CumlTransformerPipeline(TransformerPipeline):
def as_local(self):
steps = [(name, tf.as_local()) for name, tf in self.steps]
target = TransformerPipeline(steps)
return target
def make_transformer_pipeline(*steps):
"""Construct a TransformerPipeline from the given estimators.
"""
return CumlTransformerPipeline(_name_estimators(steps))
class CumlDataFrameMapper(DataFrameMapper, Localizable):
@staticmethod
def _build_transformer(transformers):
if isinstance(transformers, list):
transformers = make_transformer_pipeline(*transformers)
return transformers
def _to_df(self, X, extracted, columns):
dfs = [cudf.DataFrame(arr, index=None) for arr in extracted]
for df, pos in zip(dfs, np.cumsum([d.shape[1] for d in dfs])):
df.reset_index(drop=True, inplace=True)
df.columns = [f'c{i}' for i in range(pos - df.shape[1], pos)]
df_out = cudf.concat(dfs, axis=1, ignore_index=True) if len(dfs) > 1 else dfs[0]
if len(X) == len(df_out):
df_out.index = X.index
df_out.columns = columns
return df_out
@staticmethod
def _hstack_array(extracted):
arrs = [arr.values if isinstance(arr, cudf.DataFrame) else arr for arr in extracted]
return cupy.hstack(arrs)
@staticmethod
def _fix_feature(fea):
if isinstance(fea, (np.ndarray, cupy.ndarray)) and len(fea.shape) == 1:
fea = fea.reshape(-1, 1)
return fea
def as_local(self):
target = DataFrameMapper([], default=None, df_out=self.df_out, input_df=self.input_df,
df_out_dtype_transforms=self.df_out_dtype_transforms)
target.fitted_features_ = [(cols, t.as_local(), opts) for cols, t, opts in self.fitted_features_]
return target
| [
"cupy.hstack",
"sklearn.pipeline._name_estimators",
"cudf.concat",
"cudf.DataFrame",
"numpy.cumsum",
"hypernets.tabular.dataframe_mapper.DataFrameMapper",
"hypernets.tabular.dataframe_mapper.TransformerPipeline"
] | [((406, 432), 'hypernets.tabular.dataframe_mapper.TransformerPipeline', 'TransformerPipeline', (['steps'], {}), '(steps)\n', (425, 432), False, 'from hypernets.tabular.dataframe_mapper import DataFrameMapper, TransformerPipeline\n'), ((605, 628), 'sklearn.pipeline._name_estimators', '_name_estimators', (['steps'], {}), '(steps)\n', (621, 628), False, 'from sklearn.pipeline import _name_estimators\n'), ((1575, 1592), 'cupy.hstack', 'cupy.hstack', (['arrs'], {}), '(arrs)\n', (1586, 1592), False, 'import cupy\n'), ((1817, 1953), 'hypernets.tabular.dataframe_mapper.DataFrameMapper', 'DataFrameMapper', (['[]'], {'default': 'None', 'df_out': 'self.df_out', 'input_df': 'self.input_df', 'df_out_dtype_transforms': 'self.df_out_dtype_transforms'}), '([], default=None, df_out=self.df_out, input_df=self.\n input_df, df_out_dtype_transforms=self.df_out_dtype_transforms)\n', (1832, 1953), False, 'from hypernets.tabular.dataframe_mapper import DataFrameMapper, TransformerPipeline\n'), ((949, 980), 'cudf.DataFrame', 'cudf.DataFrame', (['arr'], {'index': 'None'}), '(arr, index=None)\n', (963, 980), False, 'import cudf\n'), ((1035, 1071), 'numpy.cumsum', 'np.cumsum', (['[d.shape[1] for d in dfs]'], {}), '([d.shape[1] for d in dfs])\n', (1044, 1071), True, 'import numpy as np\n'), ((1217, 1260), 'cudf.concat', 'cudf.concat', (['dfs'], {'axis': '(1)', 'ignore_index': '(True)'}), '(dfs, axis=1, ignore_index=True)\n', (1228, 1260), False, 'import cudf\n')] |
"""Time series modelling example showing how to set initial state.
Supplementary code for:
<NAME> and <NAME>. "Signal Processing with Recurrent Neural Networks in TensorFlow"
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
def generate_plot(time_series, filename, plot_input=False):
"""
Plots time series
:param time_series: list of three arrays of time series
:param filename: name of the image
"""
if plot_input:
plt.plot(time_series[0][0,:,0], label='input', color='lightgray', linestyle=':', linewidth=3)
plt.rc('font', size=14)
plt.plot(time_series[1][0,:,0], label='target', linestyle='-', linewidth=3)
plt.plot(time_series[2][0,:,0], label='prediction', linestyle='-', linewidth=3)
plt.legend(loc=9)
plt.xlabel('time [t]')
plt.ylabel('signal')
plt.savefig(filename)
plt.close()
# Parameters
gap = 5 # Time steps to predict into the future
T = 500 # Length of training time series
N = 32 # Size of recurrent neural network
n_train = 1 # Number of training sequences
n_test = 1 # Number of test sequences
m = 1 # Output dimension
d = 1 # Input dimension
epochs = 200 # Number of training epochs
lr = 0.05 # Learning rate
# Load and arrange data
raw_data = np.genfromtxt('data/lorenz1000.dt')
train_X = raw_data[0:T]
train_Y = raw_data[0+gap:T+gap]
test_X = raw_data[T:-gap]
test_Y = raw_data[T+gap:]
train_X.resize(n_train, train_X.size, d)
train_Y.resize(n_train, train_Y.size, m)
test_X.resize(n_test, test_X.size, d)
test_Y.resize(n_test, test_Y.size, m)
# Baselines
# Predicting zero
prediction = np.zeros((n_train, train_Y.size, m))
mse = ((train_Y - prediction) ** 2).mean()
print("predicting zero, train: ", mse)
# Predicting mean
prediction = np.zeros((n_train, train_Y.size, m)) + train_Y.mean()
mse = ((train_Y - prediction) ** 2).mean()
print("predicting mean, train: ", mse)
# Predicting previous input
prediction = np.zeros((n_train, train_Y.size, m))
prediction[:,1:,:] = train_Y[:,:-1,:]
mse = ((train_Y - prediction) ** 2).mean()
print("predicting previous input, train: ", mse)
# Placeholders
inputs = tf.placeholder(tf.float32, [None, None, d])
targets = tf.placeholder(tf.float32, [None, None, m])
# Network architecture
cell = tf.nn.rnn_cell.GRUCell(N)
# A state with all variables set to zero
zero_state = cell.zero_state(tf.shape(inputs)[0], tf.float32)
external_state = tf.placeholder_with_default(zero_state, [None, N])
# RNN definition
rnn_output, new_state = tf.nn.dynamic_rnn(
cell, inputs, initial_state=external_state, dtype=tf.float32)
# Note the following reshaping:
# We want a prediction for every time step.
# Weights of fully connected layer should be the same (shared) for every time step.
# This is achieved by flattening the first two dimensions.
# Now all time steps look the same as individual inputs in a batch fed into a feed-forward network.
rnn_output_flat = tf.reshape(rnn_output, [-1, N])
targets_flat = tf.reshape(targets, [-1, m])
prediction_flat = tf.layers.dense(rnn_output_flat, m, activation=None)
prediction = tf.reshape(prediction_flat, [-1, tf.shape(inputs)[1], m])
# Error function and optimizer
loss = tf.losses.mean_squared_error(targets_flat, prediction_flat)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# Create session and initialize variables
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
sess.graph.finalize() # Graph is read-only after this statement.
# Do the learning
for i in range(epochs):
sess.run(train_step, feed_dict={inputs: train_X, targets: train_Y})
if (i+1)%10==0:
temp_loss = sess.run(loss, feed_dict={inputs: train_X, targets: train_Y})
print(i+1, ' Loss =', temp_loss)
# Visualize modelling of training data
model, final_state = sess.run([prediction, new_state], feed_dict={
inputs: train_X})
generate_plot([train_X, train_Y, model], 'lorenzTrain.pdf')
# Visualize modelling of test data
model = sess.run(prediction, feed_dict={inputs: test_X})
generate_plot([test_X, test_Y, model], 'lorenzTestZero.pdf')
# Visualize modelling of test data starting from zero state
model, loss = sess.run([prediction, loss], feed_dict={
inputs: test_X, targets: test_Y, external_state: final_state})
print("RNN MSE on test set:", loss)
generate_plot([test_X, test_Y, model], 'lorenzTestFinal.pdf')
| [
"tensorflow.shape",
"matplotlib.pyplot.ylabel",
"numpy.genfromtxt",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.GRUCell",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"tensorflow.nn.dynamic_rnn",
"tensorflow.Session",
"matplotlib.pyplot.close",
"tensorflow.train.AdamOptimizer",
... | [((1311, 1346), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/lorenz1000.dt"""'], {}), "('data/lorenz1000.dt')\n", (1324, 1346), True, 'import numpy as np\n'), ((1657, 1693), 'numpy.zeros', 'np.zeros', (['(n_train, train_Y.size, m)'], {}), '((n_train, train_Y.size, m))\n', (1665, 1693), True, 'import numpy as np\n'), ((1984, 2020), 'numpy.zeros', 'np.zeros', (['(n_train, train_Y.size, m)'], {}), '((n_train, train_Y.size, m))\n', (1992, 2020), True, 'import numpy as np\n'), ((2177, 2220), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, d]'], {}), '(tf.float32, [None, None, d])\n', (2191, 2220), True, 'import tensorflow as tf\n'), ((2231, 2274), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, m]'], {}), '(tf.float32, [None, None, m])\n', (2245, 2274), True, 'import tensorflow as tf\n'), ((2306, 2331), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['N'], {}), '(N)\n', (2328, 2331), True, 'import tensorflow as tf\n'), ((2453, 2503), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['zero_state', '[None, N]'], {}), '(zero_state, [None, N])\n', (2480, 2503), True, 'import tensorflow as tf\n'), ((2546, 2625), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'inputs'], {'initial_state': 'external_state', 'dtype': 'tf.float32'}), '(cell, inputs, initial_state=external_state, dtype=tf.float32)\n', (2563, 2625), True, 'import tensorflow as tf\n'), ((2977, 3008), 'tensorflow.reshape', 'tf.reshape', (['rnn_output', '[-1, N]'], {}), '(rnn_output, [-1, N])\n', (2987, 3008), True, 'import tensorflow as tf\n'), ((3024, 3052), 'tensorflow.reshape', 'tf.reshape', (['targets', '[-1, m]'], {}), '(targets, [-1, m])\n', (3034, 3052), True, 'import tensorflow as tf\n'), ((3071, 3123), 'tensorflow.layers.dense', 'tf.layers.dense', (['rnn_output_flat', 'm'], {'activation': 'None'}), '(rnn_output_flat, m, activation=None)\n', (3086, 3123), True, 'import tensorflow as tf\n'), ((3235, 3294), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['targets_flat', 'prediction_flat'], {}), '(targets_flat, prediction_flat)\n', (3263, 3294), True, 'import tensorflow as tf\n'), ((620, 643), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(14)'}), "('font', size=14)\n", (626, 643), True, 'import matplotlib.pyplot as plt\n'), ((648, 725), 'matplotlib.pyplot.plot', 'plt.plot', (['time_series[1][0, :, 0]'], {'label': '"""target"""', 'linestyle': '"""-"""', 'linewidth': '(3)'}), "(time_series[1][0, :, 0], label='target', linestyle='-', linewidth=3)\n", (656, 725), True, 'import matplotlib.pyplot as plt\n'), ((728, 813), 'matplotlib.pyplot.plot', 'plt.plot', (['time_series[2][0, :, 0]'], {'label': '"""prediction"""', 'linestyle': '"""-"""', 'linewidth': '(3)'}), "(time_series[2][0, :, 0], label='prediction', linestyle='-',\n linewidth=3)\n", (736, 813), True, 'import matplotlib.pyplot as plt\n'), ((812, 829), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(9)'}), '(loc=9)\n', (822, 829), True, 'import matplotlib.pyplot as plt\n'), ((834, 856), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [t]"""'], {}), "('time [t]')\n", (844, 856), True, 'import matplotlib.pyplot as plt\n'), ((861, 881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""signal"""'], {}), "('signal')\n", (871, 881), True, 'import matplotlib.pyplot as plt\n'), ((886, 907), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (897, 907), True, 'import matplotlib.pyplot as plt\n'), ((912, 923), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (921, 923), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1843), 'numpy.zeros', 'np.zeros', (['(n_train, train_Y.size, m)'], {}), '((n_train, train_Y.size, m))\n', (1815, 1843), True, 'import numpy as np\n'), ((3398, 3410), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3408, 3410), True, 'import tensorflow as tf\n'), ((3431, 3464), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3462, 3464), True, 'import tensorflow as tf\n'), ((522, 621), 'matplotlib.pyplot.plot', 'plt.plot', (['time_series[0][0, :, 0]'], {'label': '"""input"""', 'color': '"""lightgray"""', 'linestyle': '""":"""', 'linewidth': '(3)'}), "(time_series[0][0, :, 0], label='input', color='lightgray',\n linestyle=':', linewidth=3)\n", (530, 621), True, 'import matplotlib.pyplot as plt\n'), ((2403, 2419), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (2411, 2419), True, 'import tensorflow as tf\n'), ((3308, 3334), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (3330, 3334), True, 'import tensorflow as tf\n'), ((3171, 3187), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (3179, 3187), True, 'import tensorflow as tf\n')] |
## Num_Integ.py
# ------------------------- #
# Description:
# A collection of Numerical Methods functions for solving/simulating deterministic and stochastic differential equations.
# ------------------------- #
# Created by: <NAME>, RRSG, UCT.
# Date created: 20 June 2016
# Edits: 16 July 2016: added fnRK4_moment used to numerically integrate the moment differential equations for the CD-EKF. removed it.
# 27 July 2016: included the stochastic numerical integration schemes in this file. added references and comments.
# 28 July 2016: changed the arguments for fnRK4_vector. Q and L are included to integrate fnMoment_DE from DynFn.
# ------------------------- #
import numpy as np
# ------------------------------------------------------------------------------------------------------------------------------------------#
## References
""" 1. <NAME>: Numerical Solution of Stochastic Differential Equations.
@book{kloeden2011numerical,
title={Numerical Solution of Stochastic Differential Equations},
author={<NAME>. and <NAME>.},
series={Stochastic Modelling and Applied Probability},
year={1992},
publisher={Springer Berlin Heidelberg}
}
2.Crouse(2015) Basic tracking using nonlinear continuous-time dynamic models [Tutorial]
@article{crouse2015basic,
title={Basic tracking using nonlinear continuous-time dynamic models [Tutorial]},
author={<NAME>},
journal={Aerospace and Electronic Systems Magazine, IEEE},
volume={30},
number={2},
pages={4--41},
year={2015},
publisher={IEEE}
}
3. Burden, Faires (2011) Numerical Analysis
@book{Burden,
author = {<NAME>. and <NAME>},
title = {Numerical Analysis},
year = {2011},
edition = {9th},
publisher={Brooks/Cole}
}
"""
# ------------------------------------------------------------------------------------------------------------------------------------------#
## Deterministic numerical integration
def fnRK4_vector(f, dt, x,t,Q=None,L=None):
"""
fnRK4_vector implements the Runge-Kutta fourth order method for solving Initial Value Problems.
f : dynamics function
dt : fixed stepsize
x : state vector
t : current time instant.
Refer to <NAME> (2011) for the RK4 method.
Edit: 28/7/16: Added Q and L to integrate fnMoment_DE.
"""
if Q is None: # Assuming L is also None.
# Execute one RK4 integration step
k1 = dt* f(t ,x );
k2 = dt* f(t + 0.5*dt,x + 0.5*k1);
k3 = dt* f(t + 0.5*dt,x + 0.5*k2);
k4 = dt* f(t + dt,x + k3);
else:
# Execute one RK4 integration step
k1 = dt* f(t ,x ,Q,L);
k2 = dt* f(t + 0.5*dt,x + 0.5*k1,Q,L);
k3 = dt* f(t + 0.5*dt,x + 0.5*k2,Q,L);
k4 = dt* f(t + dt,x + k3,Q,L);
return x + (k1 + 2*k2 + 2*k3 + k4) / 6.0
# -------------------------------------------------------------------------------------------------------------------------------------------#
## Stochastic numerical integration
def fnEuler_Maruyama(x,fnD,timevec,L,Qd):
"""
fnEuler_Maruyama implements the 0.5 strong Euler-Maruyama scheme. See <NAME> (2014).
x : state vector of dimensions dx by 1.
fnD : nonlinear dynamics function.
timevec : time vector for simulation duration.
L : dispersion matrix of dimensions dx by dw.
Qd : discretized covariance matrix of dimensions dw by dw.
"""
dt = timevec[1]-timevec[0];
dx = np.shape(L)[0]; # dimension of state vector.
dw = np.shape(L)[1]; # dimension of process noise vector
x_state = np.zeros([dx,len(timevec)],dtype=np.float64);
x_state[:,0] = x;
for index in range (1,len(timevec)):
x_state[:,index] = x_state[:,index-1] + dt*fnD(timevec[index-1],x_state[:,index-1]) + np.dot(L,np.random.multivariate_normal(np.zeros(np.shape(L)[1],dtype=np.float64),Qd));
return x_state
def fnSRK_Crouse(x,fnD,timevec,L,Qd):
"""
fnSRK_Crouse implements the 1.5 strong Stochastic Runge-Kutta method in Crouse (2015).
Note that as opposed to Crouse (2015), we have assumed that the dispersion matrix is constant, i.e. not time-varying
and definitely not state-dependent.
x : state vector of dimensions dx by 1.
fnD : nonlinear dynamics function.
timevec : time vector for simulation duration.
L : dispersion matrix of dimensions dx by dw.
Qd : discretized covariance matrix of dimensions dw by dw.
Note: 27/07/16: explicit order 1.5 strong scheme in section 11.2 in Kloden, Platen (1992) and Crouse(2015).
"""
dw = np.shape(L)[1]; # dimension of process noise vector
dx = np.shape(L)[0]; # dimension of state vector.
x_state = np.zeros([dx,len(timevec)],dtype=np.float64);
x_state[:,0] = x;
dt = timevec[1]-timevec[0];
# Form the covariance matrix for delta_beta and delta_alpha.
beta_beta = dt*Qd;
beta_alpha = 0.5*(dt**2)*Qd;
alpha_alpha = ((dt**3)/3.0)*Qd;
Qd_aug = np.zeros([dw+dw,dw+dw],dtype=np.float64); # The covariance matrix in eqn 44.
Qd_aug[0:dw,0:dw] = beta_beta;
Qd_aug[0:dw,dw:] = beta_alpha;
Qd_aug[dw:,0:dw] = beta_alpha;
Qd_aug[dw:,dw:] = alpha_alpha;
# Generate process noise terms according to eqn 44.
noisevec = np.zeros([dw+dw],dtype=np.float64); # mean vector is zero.
y_plus = np.zeros([dx,dw],dtype=np.float64);
y_minus = np.zeros([dx,dw],dtype=np.float64);
fy_plus = np.zeros([dx,dw],dtype=np.float64);
fy_minus = np.zeros([dx,dw],dtype=np.float64);
f2 = np.zeros([dx,dw],dtype=np.float64); # equal to F2 (eqn 39)
# F3 == F2 because we assume L is a constant matrix.
for index in range(1,len(timevec)):
process_noise = np.random.multivariate_normal(noisevec,Qd_aug);
delta_beta = process_noise[0:dw];
delta_alpha = process_noise[dw:];
summ = np.zeros([dx],dtype=np.float64);
for j in range(0,dw):
# find yj+ and yj-. eqns 42 and 43
y_plus[:,j] = x_state[:,index-1] + (dt/float(dw))*fnD(timevec[index-1],x_state[:,index-1]) + np.sqrt(dt)*L[:,j];
y_minus[:,j] = x_state[:,index-1] + (dt/float(dw))*fnD(timevec[index-1],x_state[:,index-1]) - np.sqrt(dt)*L[:,j];
# expressions in eqns 40 and 38
fy_plus[:,j] = fnD(timevec[index-1],y_plus[:,j]);
fy_minus[:,j] = fnD(timevec[index-1],y_minus[:,j]);
f2[:,j] = (1/(2*np.sqrt(dt)))*(fy_plus[:,j] - fy_minus[:,j]); # eqn 40
# sum term in eqn 38
summ += fy_plus[:,j] - 2*fnD(timevec[index-1],x_state[:,index-1]) + fy_minus[:,j];
f1 = x_state[:,index-1] + dt*fnD(timevec[index-1],x_state[:,index-1]) + 0.25*dt*summ; # eqn 38
x_state[:,index] = f1 + np.dot(L,delta_beta) + np.dot(f2,delta_alpha); # eqn 37
return x_state
| [
"numpy.sqrt",
"numpy.random.multivariate_normal",
"numpy.zeros",
"numpy.dot",
"numpy.shape"
] | [((5184, 5230), 'numpy.zeros', 'np.zeros', (['[dw + dw, dw + dw]'], {'dtype': 'np.float64'}), '([dw + dw, dw + dw], dtype=np.float64)\n', (5192, 5230), True, 'import numpy as np\n'), ((5480, 5517), 'numpy.zeros', 'np.zeros', (['[dw + dw]'], {'dtype': 'np.float64'}), '([dw + dw], dtype=np.float64)\n', (5488, 5517), True, 'import numpy as np\n'), ((5555, 5591), 'numpy.zeros', 'np.zeros', (['[dx, dw]'], {'dtype': 'np.float64'}), '([dx, dw], dtype=np.float64)\n', (5563, 5591), True, 'import numpy as np\n'), ((5606, 5642), 'numpy.zeros', 'np.zeros', (['[dx, dw]'], {'dtype': 'np.float64'}), '([dx, dw], dtype=np.float64)\n', (5614, 5642), True, 'import numpy as np\n'), ((5657, 5693), 'numpy.zeros', 'np.zeros', (['[dx, dw]'], {'dtype': 'np.float64'}), '([dx, dw], dtype=np.float64)\n', (5665, 5693), True, 'import numpy as np\n'), ((5709, 5745), 'numpy.zeros', 'np.zeros', (['[dx, dw]'], {'dtype': 'np.float64'}), '([dx, dw], dtype=np.float64)\n', (5717, 5745), True, 'import numpy as np\n'), ((5755, 5791), 'numpy.zeros', 'np.zeros', (['[dx, dw]'], {'dtype': 'np.float64'}), '([dx, dw], dtype=np.float64)\n', (5763, 5791), True, 'import numpy as np\n'), ((3633, 3644), 'numpy.shape', 'np.shape', (['L'], {}), '(L)\n', (3641, 3644), True, 'import numpy as np\n'), ((3688, 3699), 'numpy.shape', 'np.shape', (['L'], {}), '(L)\n', (3696, 3699), True, 'import numpy as np\n'), ((4767, 4778), 'numpy.shape', 'np.shape', (['L'], {}), '(L)\n', (4775, 4778), True, 'import numpy as np\n'), ((4829, 4840), 'numpy.shape', 'np.shape', (['L'], {}), '(L)\n', (4837, 4840), True, 'import numpy as np\n'), ((5945, 5992), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['noisevec', 'Qd_aug'], {}), '(noisevec, Qd_aug)\n', (5974, 5992), True, 'import numpy as np\n'), ((6095, 6127), 'numpy.zeros', 'np.zeros', (['[dx]'], {'dtype': 'np.float64'}), '([dx], dtype=np.float64)\n', (6103, 6127), True, 'import numpy as np\n'), ((7021, 7044), 'numpy.dot', 'np.dot', (['f2', 'delta_alpha'], {}), '(f2, delta_alpha)\n', (7027, 7044), True, 'import numpy as np\n'), ((6998, 7019), 'numpy.dot', 'np.dot', (['L', 'delta_beta'], {}), '(L, delta_beta)\n', (7004, 7019), True, 'import numpy as np\n'), ((6324, 6335), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (6331, 6335), True, 'import numpy as np\n'), ((6451, 6462), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (6458, 6462), True, 'import numpy as np\n'), ((6674, 6685), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (6681, 6685), True, 'import numpy as np\n'), ((4019, 4030), 'numpy.shape', 'np.shape', (['L'], {}), '(L)\n', (4027, 4030), True, 'import numpy as np\n')] |
from pathlib import Path
from neurolight.gunpowder.swc_file_source import SwcFileSource
from neurolight.gunpowder.grow_labels import GrowLabels
from neurolight.gunpowder.rasterize_skeleton import RasterizeSkeleton
from neurolight.gunpowder.get_neuron_pair import GetNeuronPair
from neurolight.gunpowder.binarize_labels import BinarizeLabels
from gunpowder import (
ArrayKey,
ArraySpec,
PointsKey,
PointsSpec,
BatchRequest,
Roi,
build,
Coordinate,
RandomLocation,
MergeProvider,
)
from spimagine import volshow
import numpy as np
def test_get_neuron_pair():
path = Path(self.path_to("test_swc_source.swc"))
# write test swc
self._write_swc(path, self._toy_swc_points())
# read arrays
swc_source = PointsKey("SWC_SOURCE")
labels_source = ArrayKey("LABELS_SOURCE")
img_source = ArrayKey("IMG_SOURCE")
img_swc = PointsKey("IMG_SWC")
label_swc = PointsKey("LABEL_SWC")
imgs = ArrayKey("IMGS")
labels = ArrayKey("LABELS")
points_a = PointsKey("SKELETON_A")
points_b = PointsKey("SKELETON_B")
img_a = ArrayKey("VOLUME_A")
img_b = ArrayKey("VOLUME_B")
labels_a = ArrayKey("LABELS_A")
labels_b = ArrayKey("LABELS_B")
# Get points from test swc
swc_file_source = SwcFileSource(
path, [swc_source], [PointsSpec(roi=Roi((-10, -10, -10), (31, 31, 31)))]
)
# Create an artificial image source by rasterizing the points
image_source = (
SwcFileSource(
path, [img_swc], [PointsSpec(roi=Roi((-10, -10, -10), (31, 31, 31)))]
)
+ RasterizeSkeleton(
points=img_swc,
array=img_source,
array_spec=ArraySpec(
interpolatable=True, dtype=np.uint32, voxel_size=Coordinate((1, 1, 1))
),
)
+ BinarizeLabels(labels=img_source, labels_binary=imgs)
+ GrowLabels(array=imgs, radius=0)
)
# Create an artificial label source by rasterizing the points
label_source = (
SwcFileSource(
path, [label_swc], [PointsSpec(roi=Roi((-10, -10, -10), (31, 31, 31)))]
)
+ RasterizeSkeleton(
points=label_swc,
array=labels_source,
array_spec=ArraySpec(
interpolatable=True, dtype=np.uint32, voxel_size=Coordinate((1, 1, 1))
),
)
+ BinarizeLabels(labels=labels_source, labels_binary=labels)
+ GrowLabels(array=labels, radius=1)
)
skeleton = tuple()
skeleton += (
(swc_file_source, image_source, label_source)
+ MergeProvider()
+ RandomLocation(ensure_nonempty=swc_source, ensure_centered=True)
)
pipeline = skeleton + GetNeuronPair(
point_source=swc_source,
array_source=imgs,
label_source=labels,
points=(points_a, points_b),
arrays=(img_a, img_b),
labels=(labels_a, labels_b),
seperate_by=2,
shift_attempts=50,
)
request = BatchRequest()
data_shape = 5
request.add(points_a, Coordinate((data_shape, data_shape, data_shape)))
request.add(points_b, Coordinate((data_shape, data_shape, data_shape)))
request.add(img_a, Coordinate((data_shape, data_shape, data_shape)))
request.add(img_b, Coordinate((data_shape, data_shape, data_shape)))
request.add(labels_a, Coordinate((data_shape, data_shape, data_shape)))
request.add(labels_b, Coordinate((data_shape, data_shape, data_shape)))
with build(pipeline):
batch = pipeline.request_batch(request)
data_a = batch[img_a].data
data_a = np.pad(data_a, (1,), "constant", constant_values=(0,))
data_b = batch[img_b].data
data_b = np.pad(data_b, (1,), "constant", constant_values=(0,))
data_c = data_a + data_b
data = np.array((data_a, data_b, data_c))
for _, point in batch[points_a].data.items():
assert (
data[(0,) + tuple(int(x) + 1 for x in point.location)] == 1
), "data at {} is not 1, its {}".format(
point.location, data[(0,) + tuple(int(x) for x in point.location)]
)
for _, point in batch[points_b].data.items():
assert (
data[(1,) + tuple(int(x) + 1 for x in point.location)] == 1
), "data at {} is not 1".format(point.location)
# volshow(data)
| [
"gunpowder.BatchRequest",
"gunpowder.ArrayKey",
"gunpowder.RandomLocation",
"neurolight.gunpowder.binarize_labels.BinarizeLabels",
"gunpowder.Roi",
"gunpowder.build",
"neurolight.gunpowder.grow_labels.GrowLabels",
"numpy.array",
"gunpowder.Coordinate",
"gunpowder.PointsKey",
"gunpowder.MergeProv... | [((762, 785), 'gunpowder.PointsKey', 'PointsKey', (['"""SWC_SOURCE"""'], {}), "('SWC_SOURCE')\n", (771, 785), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((806, 831), 'gunpowder.ArrayKey', 'ArrayKey', (['"""LABELS_SOURCE"""'], {}), "('LABELS_SOURCE')\n", (814, 831), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((849, 871), 'gunpowder.ArrayKey', 'ArrayKey', (['"""IMG_SOURCE"""'], {}), "('IMG_SOURCE')\n", (857, 871), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((886, 906), 'gunpowder.PointsKey', 'PointsKey', (['"""IMG_SWC"""'], {}), "('IMG_SWC')\n", (895, 906), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((923, 945), 'gunpowder.PointsKey', 'PointsKey', (['"""LABEL_SWC"""'], {}), "('LABEL_SWC')\n", (932, 945), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((957, 973), 'gunpowder.ArrayKey', 'ArrayKey', (['"""IMGS"""'], {}), "('IMGS')\n", (965, 973), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((987, 1005), 'gunpowder.ArrayKey', 'ArrayKey', (['"""LABELS"""'], {}), "('LABELS')\n", (995, 1005), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1021, 1044), 'gunpowder.PointsKey', 'PointsKey', (['"""SKELETON_A"""'], {}), "('SKELETON_A')\n", (1030, 1044), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1060, 1083), 'gunpowder.PointsKey', 'PointsKey', (['"""SKELETON_B"""'], {}), "('SKELETON_B')\n", (1069, 1083), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1096, 1116), 'gunpowder.ArrayKey', 'ArrayKey', (['"""VOLUME_A"""'], {}), "('VOLUME_A')\n", (1104, 1116), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1129, 1149), 'gunpowder.ArrayKey', 'ArrayKey', (['"""VOLUME_B"""'], {}), "('VOLUME_B')\n", (1137, 1149), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1165, 1185), 'gunpowder.ArrayKey', 'ArrayKey', (['"""LABELS_A"""'], {}), "('LABELS_A')\n", (1173, 1185), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1201, 1221), 'gunpowder.ArrayKey', 'ArrayKey', (['"""LABELS_B"""'], {}), "('LABELS_B')\n", (1209, 1221), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((2998, 3012), 'gunpowder.BatchRequest', 'BatchRequest', ([], {}), '()\n', (3010, 3012), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3604, 3658), 'numpy.pad', 'np.pad', (['data_a', '(1,)', '"""constant"""'], {'constant_values': '(0,)'}), "(data_a, (1,), 'constant', constant_values=(0,))\n", (3610, 3658), True, 'import numpy as np\n'), ((3703, 3757), 'numpy.pad', 'np.pad', (['data_b', '(1,)', '"""constant"""'], {'constant_values': '(0,)'}), "(data_b, (1,), 'constant', constant_values=(0,))\n", (3709, 3757), True, 'import numpy as np\n'), ((3800, 3834), 'numpy.array', 'np.array', (['(data_a, data_b, data_c)'], {}), '((data_a, data_b, data_c))\n', (3808, 3834), True, 'import numpy as np\n'), ((1887, 1919), 'neurolight.gunpowder.grow_labels.GrowLabels', 'GrowLabels', ([], {'array': 'imgs', 'radius': '(0)'}), '(array=imgs, radius=0)\n', (1897, 1919), False, 'from neurolight.gunpowder.grow_labels import GrowLabels\n'), ((2447, 2481), 'neurolight.gunpowder.grow_labels.GrowLabels', 'GrowLabels', ([], {'array': 'labels', 'radius': '(1)'}), '(array=labels, radius=1)\n', (2457, 2481), False, 'from neurolight.gunpowder.grow_labels import GrowLabels\n'), ((2620, 2684), 'gunpowder.RandomLocation', 'RandomLocation', ([], {'ensure_nonempty': 'swc_source', 'ensure_centered': '(True)'}), '(ensure_nonempty=swc_source, ensure_centered=True)\n', (2634, 2684), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((2718, 2921), 'neurolight.gunpowder.get_neuron_pair.GetNeuronPair', 'GetNeuronPair', ([], {'point_source': 'swc_source', 'array_source': 'imgs', 'label_source': 'labels', 'points': '(points_a, points_b)', 'arrays': '(img_a, img_b)', 'labels': '(labels_a, labels_b)', 'seperate_by': '(2)', 'shift_attempts': '(50)'}), '(point_source=swc_source, array_source=imgs, label_source=\n labels, points=(points_a, points_b), arrays=(img_a, img_b), labels=(\n labels_a, labels_b), seperate_by=2, shift_attempts=50)\n', (2731, 2921), False, 'from neurolight.gunpowder.get_neuron_pair import GetNeuronPair\n'), ((3060, 3108), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3070, 3108), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3136, 3184), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3146, 3184), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3209, 3257), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3219, 3257), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3282, 3330), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3292, 3330), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3358, 3406), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3368, 3406), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3434, 3482), 'gunpowder.Coordinate', 'Coordinate', (['(data_shape, data_shape, data_shape)'], {}), '((data_shape, data_shape, data_shape))\n', (3444, 3482), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((3494, 3509), 'gunpowder.build', 'build', (['pipeline'], {}), '(pipeline)\n', (3499, 3509), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1823, 1876), 'neurolight.gunpowder.binarize_labels.BinarizeLabels', 'BinarizeLabels', ([], {'labels': 'img_source', 'labels_binary': 'imgs'}), '(labels=img_source, labels_binary=imgs)\n', (1837, 1876), False, 'from neurolight.gunpowder.binarize_labels import BinarizeLabels\n'), ((2378, 2436), 'neurolight.gunpowder.binarize_labels.BinarizeLabels', 'BinarizeLabels', ([], {'labels': 'labels_source', 'labels_binary': 'labels'}), '(labels=labels_source, labels_binary=labels)\n', (2392, 2436), False, 'from neurolight.gunpowder.binarize_labels import BinarizeLabels\n'), ((2594, 2609), 'gunpowder.MergeProvider', 'MergeProvider', ([], {}), '()\n', (2607, 2609), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1335, 1369), 'gunpowder.Roi', 'Roi', (['(-10, -10, -10)', '(31, 31, 31)'], {}), '((-10, -10, -10), (31, 31, 31))\n', (1338, 1369), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1533, 1567), 'gunpowder.Roi', 'Roi', (['(-10, -10, -10)', '(31, 31, 31)'], {}), '((-10, -10, -10), (31, 31, 31))\n', (1536, 1567), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((1766, 1787), 'gunpowder.Coordinate', 'Coordinate', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (1776, 1787), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((2083, 2117), 'gunpowder.Roi', 'Roi', (['(-10, -10, -10)', '(31, 31, 31)'], {}), '((-10, -10, -10), (31, 31, 31))\n', (2086, 2117), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n'), ((2321, 2342), 'gunpowder.Coordinate', 'Coordinate', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (2331, 2342), False, 'from gunpowder import ArrayKey, ArraySpec, PointsKey, PointsSpec, BatchRequest, Roi, build, Coordinate, RandomLocation, MergeProvider\n')] |
import pandas as pd
import numpy as np
def is_same_df(df1,df2):
assert np.array_equal(df1.columns.values,df2.columns.values), "not same columns"
assert np.array_equal(df1.index.values ,df2.index.values), "not same index"
return np.linalg.norm(df1.values-df2.values) < 0.0001
| [
"numpy.array_equal",
"numpy.linalg.norm"
] | [((76, 130), 'numpy.array_equal', 'np.array_equal', (['df1.columns.values', 'df2.columns.values'], {}), '(df1.columns.values, df2.columns.values)\n', (90, 130), True, 'import numpy as np\n'), ((161, 211), 'numpy.array_equal', 'np.array_equal', (['df1.index.values', 'df2.index.values'], {}), '(df1.index.values, df2.index.values)\n', (175, 211), True, 'import numpy as np\n'), ((244, 283), 'numpy.linalg.norm', 'np.linalg.norm', (['(df1.values - df2.values)'], {}), '(df1.values - df2.values)\n', (258, 283), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import umap
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from data.data_loader import get_data_ts_onh_mac
from losses import elbo_general_timeseries
from losses import mae_globalmean, mae_loss
from models.multiodal_latentodegru_pretrain import MultimodalLatentODE
from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample
# from ode.viz_latent import latent_viz, latent_viz_random
adjoint = False
if adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
print("CCC GPU:", os.environ.get('CUDA_VISIBLE_DEVICES'))
GPU = 0
device = torch.device('cuda:' + str(GPU)
if torch.cuda.is_available() else 'cpu')
print('Device:', device.type)
print('HHYU torch.cuda.device:', torch.cuda.device(GPU))
if device.type != 'cpu':
print('HHYU torch.cuda.get_device_name:', torch.cuda.get_device_name(GPU))
BATCH_SIZE = 32
image_dim = 64
number_pixel_out = image_dim * image_dim * 1
one_ = torch.tensor(1.0).to(device)
zero_ = torch.tensor(0.0).to(device)
def create_vft_mask(vft_imgs):
N, t, c, H, W = vft_imgs.shape
temp = torch.sum(vft_imgs, dim=[0, 1, 2], keepdim=True)
temp = torch.where(temp > 0, one_, zero_)
temp = temp.repeat((N, t, c, 1, 1))
return temp
def evaluate_reconstruction_error(config, data, mode='rec', nv_fc=-1):
"""
Compute the reconstruction from different modality inputs
:param config:
:param data:
:return:
"""
assert mode in ['rec', 'forecast'], 'expected one of [rec, forecast]'
if (mode == 'forecast'):
assert nv_fc > 0, 'number of visits to used for forecastin as input shoulld be provided'
model = config.model.eval()
comb = [[1, 1, 1], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
comb = select_modalities(comb, config.MODALITIES)
with torch.no_grad():
ds_val_batches, ts_val, val_dx = data.get_val(dx_filter=None)
rnfl_xi_val = ds_val_batches[0]
gcl_xi_val = ds_val_batches[1]
#vft_xi_val = ds_val_batches[2]
def evaluate_rec_error(inputs, modalities):
inputs_ = subsample(inputs, modalities)
outlist, mulogvar = model(ts=ts_val, x_list=inputs_)
[pred_rnfl, pred_gcl, pred_vft] = map(lambda x: x if x is None else torch.sigmoid(x), outlist)
NoneError = torch.from_numpy(np.asanyarray([-100]))
reshape = lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2], x.shape[3], x.shape[4]))
#mae_globalmean
rnfl_xi_val, gcl_xi_val, vft_xi_val = [inp[:, :nv_fc, :, :, :] if inp is not None else None for inp in ds_val_batches]
vft_mask = create_vft_mask(vft_xi_val)
error0 = torch.mean(
mae_loss(reshape(rnfl_xi_val[:, :]) * 200,
reshape(pred_rnfl[:, :]) * 200)) if pred_rnfl is not None else NoneError
error1 = torch.mean(mae_loss(gcl_xi_val * 200, pred_gcl * 200)) if pred_gcl is not None else NoneError
error2 = torch.mean(mae_loss(reshape(vft_xi_val[:, :]) * 40, reshape(pred_vft[:, :]) * 40,
mask=reshape(vft_mask[:, :]))) if pred_vft is not None else NoneError
return [error0, error1, error2], [pred_rnfl, pred_gcl, pred_vft]
masks = [None, None, None]
errors = [];
inputs_modalities = []
preds_all = []
inputs_all=[]
# if (config.MODALITIES[1] == 1): # if RNFL is used in training
for c in comb:
x_list_c = subsample(ds_val_batches, c)
if(mode=='rec'):
error, preds = evaluate_rec_error(x_list_c, c)
else:
error, preds = evaluate_forecast_error(model,ts_val,x_list_c,masks,c,nv_fc)
error = [e.cpu().numpy() for e in error]
error = [float(e) for e in error]
error = subsample(error, config.MODALITIES)
preds = subsample(preds, config.MODALITIES)
preds_all.append(preds)
inputs_modalities.append(c)
errors.append(error)
inputs_all.append(x_list_c)
return errors, inputs_modalities, preds_all, inputs_all
def plot_z(mu, val_dx, save_path):
labels = {2: 'Glaucoma', 1: 'GS', 0: 'Normal'}
replace = lambda x: labels[x]
groups = np.array(list(map(replace, val_dx)))
cdict = {'Glaucoma': 'red', 'Normal': 'green', 'GS': 'yellow'}
fig, ax = plt.subplots()
for g in np.unique(groups):
ix = np.where(groups == g)
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.scatter(mu[ix, 0], mu[ix, 1], color="None", edgecolors=cdict[g], linewidth=2, label=g, s=100)
ax.legend()
plt.savefig(save_path)
plt.close()
def plot_zembedded(mu, val_dx, save_path, type='tsne'):
assert type in ['tsne', 'umap'], 'type can be one of umap or tsne'
labels = {2: 'Glaucoma', 1: 'GS', 0: 'Normal'}
if (type == 'tsne'):
mu = TSNE(n_components=2, perplexity=30).fit_transform(mu)
else:
mu = umap.UMAP(n_neighbors=10,
min_dist=0.1, n_components=2,
metric='euclidean').fit_transform(mu)
replace = lambda x: labels[x]
groups = np.array(list(map(replace, val_dx)))
cdict = {'Glaucoma': 'red', 'Normal': 'green', 'GS': 'yellow'}
fig, ax = plt.subplots()
for g in np.unique(groups):
ix = np.where(groups == g)
ax.scatter(mu[ix, 0], mu[ix, 1], color="None", edgecolors=cdict[g], linewidth=2, label=g, s=100)
ax.legend()
plt.savefig(save_path)
plt.close()
def subsampled_elbo_losses(model, t_ts, inputs, modality_flags, annealing_factor=1.,
batch_size=None):
"""
:param model:
:param t_ts:
:param inputs: list of input modalities; see config and data loader for the order
:param modality_flags: array of binary number, e.g [1,1,0]; 0 entry denotes the modailty not to be used
:param annealing_factor:
:param batch_size:
:return:
"""
assert len(inputs) == 3, 'size of input list should be 3'
comb = [[1, 1, 1], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
# comb = [[1, 1, 0], [0, 1, 0], [1, 0, 0] ]
comb = select_modalities(comb, modality_flags)
#print(hhyu subsampled_elbo_losses, comb=", comb)
assert len(comb) > 0, 'no modality is selected'
loss = 0
KLD = 0
for c in comb:
# print('LOss for ', str(c))
assert len(c) == len(modality_flags), 'modality_flags array size should match combintation array'
# c = [a * b for a, b in zip(c, modality_flags)]
input_xi_sub = subsample(inputs, c)
pred_list, [mu, logvar] = model(t_ts, input_xi_sub)
# if the loss of gcl or vft not reqired then set the correspndng index in pred_list and input_ts to None before passing below
pred_list_ = subsample(pred_list, c)
inputs_ = subsample(inputs, c)
# pred_list[2] = None
# inputs_ts[2] = None
# pred_list[1] = None
# inputs_ts[1] = None
inputs_ = [inp[:, :1, :, :, :] if inp is not None else None for inp in inputs_]
loss_, KLD_ = elbo_general_timeseries(preds=pred_list_, gts=inputs_, mu=mu, logvar=logvar,
annealing_factor=annealing_factor, loss_type='ce')
loss += loss_
KLD += KLD_
return loss, KLD
class VAEData:
def get_train_minibatch(self, batch_size):
raise NotImplementedError('imlimentation required')
def get_val(self):
raise NotImplementedError('imlimentation required')
def size_train(self):
raise NotImplementedError('imlimentation required')
def size_val(self):
raise NotImplementedError('imlimentation required')
from data.utils import resize_stack
def process_maps(train, val, test):
train = np.vstack([train, test])
N, t, H, W, c = train.shape
train = train.transpose([0, 1, 4, 2, 3]) # (N,t, c, H,W)
N, t, H, W, c = val.shape
val = val.transpose([0, 1, 4, 2, 3]) # (N,t,c,H,W)
train = torch.from_numpy(train).float().to(device)
val = torch.from_numpy(val).float().to(device)
return train, val
def process_labels(train, val, test):
train = np.vstack([train, test])
train = torch.from_numpy(train).float().to(device)
val = torch.from_numpy(val).float().to(device)
return train, val
from cutils.common import normalize_range
#import kornia as K
class MultimodalTimeSeriesData(VAEData):
def __init__(self, fold_seed=4):
train, val, test = get_data_ts_onh_mac(mask_onhrnfl_disc=True, fold_seed=fold_seed)
rnfls_onh = train[0][0], val[0][0], test[0][0]
rnfls_onh= [resize_stack(d, (32, 32)) for d in rnfls_onh]
self.train_rnflonh, self.val_rnflonh = process_maps(rnfls_onh[0], rnfls_onh[1],rnfls_onh[2])
# self.train_gclmac, self.val_gclmac = process_maps(train[0][3], val[0][3], test[0][3])
self.train_rnflmac, self.val_rnflmac = process_maps(train[0][2], val[0][2], test[0][2])
self.train_vft, self.val_vft = process_maps(train[0][4], val[0][4], test[0][4])
assert self.train_rnflonh.shape[0] == self.train_rnflmac.shape[0], ' Number of maps should be same '
self.train_dx, self.val_dx = process_labels(train[2][0], val[2][0], test[2][0])
self.age_at_vd_train, self.age_at_vd_val = process_labels(train[1], val[1], test[1])
# to years
self.age_at_vd_train = self.age_at_vd_train / 12.0
self.age_at_vd_val = self.age_at_vd_val / 12.0
# transform in [-1, 1]
self.age_at_vd_train = normalize_range(self.age_at_vd_train, [20, 80], [-1, 1])
self.age_at_vd_val = normalize_range(self.age_at_vd_val, [20, 80], [-1, 1])
#self.rotate = K.augmentation.RandomRotation(4, same_on_batch=True)
def augment_rot(self, amap):
#apply same rotation for a sample for all the time point
temp = [ self.rotate (d) for d in amap]
return torch.stack(temp)
def get_train_minibatch(self, batch_size):
"""
:param dx_filter:
:return: maps each os size (batch_size, t,c,H,W) and ts of size (batch_size, t)
"""
# idx = np.random.permutation(len(self.train))
idx = torch.randperm(self.train_rnflonh.shape[0], device=device)
aug_rnfl_batch = self.train_rnflonh[idx[:batch_size]] #self.augment_rot(self.train_rnflonh[idx[:batch_size]])
maps = aug_rnfl_batch, self.train_rnflmac[idx[:batch_size]], self.train_vft[
idx[:batch_size]]
ts = self.age_at_vd_train[idx[:batch_size]]
return maps, ts
def get_val(self, dx_filter=None):
"""
:param dx_filter:
:return: maps each os size (N, t,c,H,W) and dx of size (N,)
"""
maps = [self.val_rnflonh, self.val_rnflmac, self.val_vft]
# reduce from N,t to N,1 ie one diagnosis for time sample
ts = self.age_at_vd_val
dx = torch.max(self.val_dx, dim=1)[0] # (N,1)
if (dx_filter):
maps = [m[dx == dx_filter] for m in maps]
dx = dx[dx == dx_filter]
ts = ts[dx == dx_filter]
return maps, ts, dx
def size_train(self):
return self.train_rnflonh.shape[0]
def size_val(self):
return self.val_rnflonh.shape[0]
def plot_losses_(train_loss, val_loss, save_path):
fig, ax = plt.subplots()
ax.plot(train_loss, label='train loss')
ax.plot(val_loss, label='val loss')
ax.legend()
plt.savefig(save_path)
plt.close()
def save_losses(config):
df = pd.DataFrame(list(zip(config.loss_meter.loss_history, config.kl_loss_meter.loss_history,
config.loss_meter_test.loss_history,
config.kl_loss_meter_test.loss_history)),
columns=['trainloss_elbo', 'trainloss_kl', 'val_loss_elbo', 'val_loss_kl'])
df.to_csv(os.path.join(config.RESULT_DIR, 'losses.csv'))
class Config:
def create_model(self, load_weights=False):
raise NotImplementedError('imlimentation required')
def plot_losses(self):
train_loss = self.loss_meter.loss_history
val_loss = self.loss_meter_test.loss_history
plot_losses_(train_loss, val_loss, save_path=os.path.join(self.RESULT_DIR, 'lossplot.jpeg'))
train_loss_kld = self.kl_loss_meter.loss_history
val_loss_kld = self.kl_loss_meter_test.loss_history
plot_losses_(train_loss_kld, val_loss_kld, save_path=os.path.join(self.RESULT_DIR, 'lossplot_kld.jpeg'))
from train_multimodalvae import getConfig as getConfigMVAE
def getConfig(modalities_, expert_, latent_dim_, fold_seed_=4):
class MyConfig(Config):
EPOCHS = 150
learning_rate = 0.001
annealing_epochs = 10
latent_dim = latent_dim_
image_dim = 64
vfim_dim = 32
_suffix = '_foldseed' + str(fold_seed_) if fold_seed_ != 4 else ''
LOG_ROOT_DIR = 'pretrain_temp_mlode_fold2' + str(latent_dim)+_suffix
MODALITIES = modalities_ # [0, 0, 1]
expert = expert_ # 'moe'
ode_method = 'euler'
modalities_str = ''.join([str(xi) for xi in MODALITIES])
prefix = 'multimoda_latentode' + str(latent_dim) + modalities_str + '_' + expert
RESULT_DIR = os.path.join(LOG_ROOT_DIR, prefix)
MODEL_DIR = os.path.join(RESULT_DIR, 'trained_models')
loss_meter = RunningAverageMeter()
loss_meter_test = RunningAverageMeter()
kl_loss_meter = RunningAverageMeter()
kl_loss_meter_test = RunningAverageMeter()
# dx_loss_meter = RunningAverageMeter()
def __str__(self):
fields = ['EPOCHS', 'latent_dim', 'image_dim']
def __init__(self, initialize_model=True):
mkdir = lambda x: os.mkdir(x) if not os.path.exists(x) else 0
mkdir(self.LOG_ROOT_DIR)
mkdir(self.RESULT_DIR)
mkdir(self.MODEL_DIR)
def initialize_model_optimizer(self):
"""
Initializes model and optimizer for trainin
:return:
"""
self.model = self.create_model(load_weights=False)
#params = (list(self.model.parameters()))
#params = (list(self.model.get_params_finetune()))
#self.optimizer = optim.Adam(params, lr=self.learning_rate)
#copy rnfl encoder and decoder from mvae
print("HHYU", self.latent_dim)
ConfigMVAE = getConfigMVAE(self.MODALITIES, self.expert, self.latent_dim)
configmvae = ConfigMVAE()
configmvae.model = configmvae.create_model(load_weights=True)
source_model = configmvae.model.vae_rnfl.encoder
dst_model = self.model.rnn_rnfl.rnn.encoder
dst_model.load_state_dict(source_model.state_dict())
#for param in dst_model.parameters():
# param.requires_grad = False
source_model = configmvae.model.vae_rnfl.decoder
dst_model = self.model.decoder_rnfl
dst_model.load_state_dict(source_model.state_dict())
#for param in dst_model.parameters():
# param.requires_grad = False
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.learning_rate)
#rnn_encoder_path ='temp_mvaeld32'
#self.model.rnn_rnfl.rnn.load_inner_encoder(rnn_encoder_path)
def create_model(self, load_weights=False, suffix_model_path=''):
config = self
ode_solver = [odeint, config.ode_method]
latentode = MultimodalLatentODE(latent_dim=config.latent_dim,
device=device, expert=config.expert, ode_solver=ode_solver).to(device)
if (load_weights):
model_file = os.path.join(config.RESULT_DIR, 'trained_models',
config.prefix + suffix_model_path + '.pt')
print('loading weights ', model_file)
# latentode.load_state_dict(torch.load(model_file, map_location='cpu'))
latentode.load_state_dict(torch.load(model_file, map_location=torch.device('cpu')))
return latentode
return MyConfig
def train(epoch, config: Config, data: VAEData, niter_per_epoch=None):
assert hasattr(config, 'model') and hasattr(config,
'optimizer'), 'model not initialized for training. Call config.initialize_model_optimizer() before '
model = config.model.train()
if (niter_per_epoch is None): niter_per_epoch = int(data.size_train() / BATCH_SIZE)
for itr in range(1, niter_per_epoch + 1):
if epoch < config.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(itr + (epoch - 1) * niter_per_epoch + 1) /
float(config.annealing_epochs * niter_per_epoch))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
minibatches, ts = data.get_train_minibatch(BATCH_SIZE)
minibatches = subsample(minibatches, config.MODALITIES)
train_elbo_loss, kld = subsampled_elbo_losses(model, ts, minibatches, config.MODALITIES,
annealing_factor=annealing_factor)
config.optimizer.zero_grad()
train_elbo_loss.backward()
config.optimizer.step()
config.loss_meter.update(train_elbo_loss.item(), accumulate=itr == niter_per_epoch, smooth=True)
config.kl_loss_meter.update(torch.mean(kld).item(), accumulate=itr == niter_per_epoch, smooth=True)
def visualize_embedding(config: Config, data: VAEData, epoch=None, type='umap'):
# if the latent dimension >2 the type algorithm is used to embed the latent space to 2 dimensions
if epoch is None: epoch = ''
model = config.model.eval()
with torch.no_grad():
ds_val_batches, ts_val, val_dx = data.get_val()
ds_val_batches_sub = subsample(ds_val_batches, config.MODALITIES)
val_dx = val_dx.cpu().numpy()
mu, _ = model.infer(ts_val, ds_val_batches_sub)
mu = mu.cpu().numpy()
if (config.latent_dim == 2): plot_z(mu, val_dx, save_path=os.path.join(config.RESULT_DIR,
'zplots' + str(epoch) + '.jpeg'))
if (config.latent_dim > 2): plot_zembedded(mu, val_dx, save_path=os.path.join(config.RESULT_DIR,
'embed_zplots' + str(
epoch) + '.jpeg'), type=type)
def test(epoch, config: Config, data: VAEData):
model = config.model.eval()
with torch.no_grad():
ds_val_batches, ts_val, val_dx = data.get_val()
# preds, [mu, logvar] = model(ds_val_batches)
val_elbo_loss, kld = subsampled_elbo_losses(model, ts_val, ds_val_batches, config.MODALITIES)
val_elbo_loss = np.round(val_elbo_loss.item(), 2)
config.loss_meter_test.update(val_elbo_loss, accumulate=True, smooth=False)
config.kl_loss_meter_test.update(torch.mean(kld).item(), accumulate=True, smooth=False)
import io
def print_to_string(*args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue()
output.close()
return contents
def viz_latent_space(model, modalities, save_path, image_size):
from utils.viz_latent import generate_random_reconstructions, generate_latent_space
import cv2
generators = [model.decoder_rnfl, model.decoder_gcl, model.decoder_vft]
assert len(generators) == len(modalities), 'check modality numbers '
generators = [g for g, m in zip(generators, modalities) if m]
file_ext = save_path.split('.')
if (model.latent_dim >= 2):
viz_rand = generate_random_reconstructions(generators, device=device, latent_dim=model.latent_dim,
image_size=image_size)
cv2.imwrite(file_ext[0] + 'randsample' + '.' + file_ext[1], viz_rand)
if (model.latent_dim == 2):
viz_rand = generate_latent_space(generators, device=device, latent_dim=model.latent_dim, image_size=image_size,
grid_size=16)
cv2.imwrite(file_ext[0] + 'latentspace' + '.' + file_ext[1], viz_rand)
def print_losses(config, start, end, extra_str=''):
disp = [['Epoch', epoch], ['trainloss', config.loss_meter.avg], ['kl_loss', config.kl_loss_meter.avg],
['valloss', config.loss_meter_test.avg], ['time/ep', end - start]
]
print(''.join([str(d[0]) + ': ' + '{:.2f}'.format(d[1]) + ', ' for d in disp]).rstrip(', '), extra_str, flush=True)
def get_rec_errors(config, data, mode, nv_fc):
# recontructon error
out=''
errors, inputs_c, preds,_ = evaluate_reconstruction_error(config, data, mode=mode, nv_fc=nv_fc)
for i, e in zip(inputs_c, errors):
out += str(i)+' ' + str(["{0:0.2f}".format(i) if i is not None else None for i in e])
#print(i, ["{0:0.2f}".format(i) if i is not None else None for i in e])
return out
# Experiments parameters:
# error type in loss : ce or se
#freeze weights of encoder and decoder
# RNN : turn ode transistion on/off
#ode method of forecasting function : rk4/euler
if (__name__ == '__main__'):
# vftdata = VFTData()
# modalities_exp = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
# experts = ['moe', 'poe']
# running on elboall
experts = ['poe']#, 'moe']
# running on mvae
# modalities_exp = [[0,0,1] ]
# experts = ['moe']
fold_seed=5
#latent_dims=[2, 4,8, 16, 32, 64]
#latent_dim = 8
latent_dims = [32]
for latent_dim in latent_dims:
for expert in experts:
if (expert == 'moe'): modalities_exp = [[1, 0, 0], [0, 0, 1]]#[1, 0, 1]
if (expert == 'poe'): modalities_exp = [[1, 0, 1]]
for mm in modalities_exp:
data = MultimodalTimeSeriesData(fold_seed=fold_seed)
Config = getConfig(mm, expert, latent_dim_=latent_dim, fold_seed_=fold_seed)
config = Config()
BATCH_SIZE = 32
nepochs = config.EPOCHS
niter_per_epoch = int(data.train_rnflonh.shape[0] / BATCH_SIZE) #30000#
print('Training data size', data.size_train())
print('Val data size', data.size_val())
print('#Total iterations ', nepochs * niter_per_epoch)
print('Log directory', config.RESULT_DIR)
print('Prefix', config.prefix)
config.initialize_model_optimizer()
for epoch in range(1, nepochs + 1):
start = time.time()
print("HHYU train")
train(epoch, config, data, niter_per_epoch=niter_per_epoch)
print("HHYU test")
test(epoch, config, data)
end = time.time()
print("HHYU get_rec_errors")
rec_eval = get_rec_errors(config, data, mode='rec', nv_fc=1)
print_losses(config, start, end, extra_str = rec_eval)
if (epoch % 5 == 0):
save_model(config.model, config.MODEL_DIR, config.prefix)
viz_latent_space(config.model.eval(), config.MODALITIES,
os.path.join(config.RESULT_DIR, 'latent' + str(epoch) + '.png'), image_size=32)
config.plot_losses()
save_losses(config)
visualize_embedding(config, data, epoch=epoch, type='umap')
#recontructon error
#errors, inputs, preds = evaluate_reconstruction_error(config, data, mode='rec', nv_fc=3)
#for i, e in zip(inputs, errors):
# print(i, ["{0:0.2f}".format(i) if i is not None else None for i in e])
if (epoch == 25):
save_model(config.model, config.MODEL_DIR, config.prefix, epoch=epoch)
#https://kornia.readthedocs.io/en/latest/augmentation.html
| [
"torch.randperm",
"torch.max",
"torch.from_numpy",
"numpy.asanyarray",
"torch.cuda.is_available",
"torch.sum",
"losses.elbo_general_timeseries",
"umap.UMAP",
"losses.mae_loss",
"train_multimodalvae.getConfig",
"os.path.exists",
"torch.cuda.device",
"numpy.where",
"data.data_loader.get_data... | [((683, 721), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (697, 721), False, 'import os\n'), ((900, 922), 'torch.cuda.device', 'torch.cuda.device', (['GPU'], {}), '(GPU)\n', (917, 922), False, 'import torch\n'), ((1262, 1310), 'torch.sum', 'torch.sum', (['vft_imgs'], {'dim': '[0, 1, 2]', 'keepdim': '(True)'}), '(vft_imgs, dim=[0, 1, 2], keepdim=True)\n', (1271, 1310), False, 'import torch\n'), ((1322, 1356), 'torch.where', 'torch.where', (['(temp > 0)', 'one_', 'zero_'], {}), '(temp > 0, one_, zero_)\n', (1333, 1356), False, 'import torch\n'), ((1949, 1991), 'utils.utils.select_modalities', 'select_modalities', (['comb', 'config.MODALITIES'], {}), '(comb, config.MODALITIES)\n', (1966, 1991), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((4646, 4660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4658, 4660), True, 'from matplotlib import pyplot as plt\n'), ((4674, 4691), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (4683, 4691), True, 'import numpy as np\n'), ((4911, 4933), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (4922, 4933), True, 'from matplotlib import pyplot as plt\n'), ((4938, 4949), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4947, 4949), True, 'from matplotlib import pyplot as plt\n'), ((5552, 5566), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5564, 5566), True, 'from matplotlib import pyplot as plt\n'), ((5580, 5597), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (5589, 5597), True, 'import numpy as np\n'), ((5759, 5781), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (5770, 5781), True, 'from matplotlib import pyplot as plt\n'), ((5786, 5797), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5795, 5797), True, 'from matplotlib import pyplot as plt\n'), ((6459, 6498), 'utils.utils.select_modalities', 'select_modalities', (['comb', 'modality_flags'], {}), '(comb, modality_flags)\n', (6476, 6498), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((8103, 8127), 'numpy.vstack', 'np.vstack', (['[train, test]'], {}), '([train, test])\n', (8112, 8127), True, 'import numpy as np\n'), ((8492, 8516), 'numpy.vstack', 'np.vstack', (['[train, test]'], {}), '([train, test])\n', (8501, 8516), True, 'import numpy as np\n'), ((11669, 11683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11681, 11683), True, 'from matplotlib import pyplot as plt\n'), ((11788, 11810), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (11799, 11810), True, 'from matplotlib import pyplot as plt\n'), ((11815, 11826), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11824, 11826), True, 'from matplotlib import pyplot as plt\n'), ((19774, 19787), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (19785, 19787), False, 'import io\n'), ((798, 823), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (821, 823), False, 'import torch\n'), ((996, 1027), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['GPU'], {}), '(GPU)\n', (1022, 1027), False, 'import torch\n'), ((1118, 1135), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (1130, 1135), False, 'import torch\n'), ((1155, 1172), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1167, 1172), False, 'import torch\n'), ((2002, 2017), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2015, 2017), False, 'import torch\n'), ((4706, 4727), 'numpy.where', 'np.where', (['(groups == g)'], {}), '(groups == g)\n', (4714, 4727), True, 'import numpy as np\n'), ((5612, 5633), 'numpy.where', 'np.where', (['(groups == g)'], {}), '(groups == g)\n', (5620, 5633), True, 'import numpy as np\n'), ((6873, 6893), 'utils.utils.subsample', 'subsample', (['inputs', 'c'], {}), '(inputs, c)\n', (6882, 6893), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((7111, 7134), 'utils.utils.subsample', 'subsample', (['pred_list', 'c'], {}), '(pred_list, c)\n', (7120, 7134), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((7153, 7173), 'utils.utils.subsample', 'subsample', (['inputs', 'c'], {}), '(inputs, c)\n', (7162, 7173), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((7406, 7537), 'losses.elbo_general_timeseries', 'elbo_general_timeseries', ([], {'preds': 'pred_list_', 'gts': 'inputs_', 'mu': 'mu', 'logvar': 'logvar', 'annealing_factor': 'annealing_factor', 'loss_type': '"""ce"""'}), "(preds=pred_list_, gts=inputs_, mu=mu, logvar=logvar,\n annealing_factor=annealing_factor, loss_type='ce')\n", (7429, 7537), False, 'from losses import elbo_general_timeseries\n'), ((8819, 8883), 'data.data_loader.get_data_ts_onh_mac', 'get_data_ts_onh_mac', ([], {'mask_onhrnfl_disc': '(True)', 'fold_seed': 'fold_seed'}), '(mask_onhrnfl_disc=True, fold_seed=fold_seed)\n', (8838, 8883), False, 'from data.data_loader import get_data_ts_onh_mac\n'), ((9878, 9934), 'cutils.common.normalize_range', 'normalize_range', (['self.age_at_vd_train', '[20, 80]', '[-1, 1]'], {}), '(self.age_at_vd_train, [20, 80], [-1, 1])\n', (9893, 9934), False, 'from cutils.common import normalize_range\n'), ((9964, 10018), 'cutils.common.normalize_range', 'normalize_range', (['self.age_at_vd_val', '[20, 80]', '[-1, 1]'], {}), '(self.age_at_vd_val, [20, 80], [-1, 1])\n', (9979, 10018), False, 'from cutils.common import normalize_range\n'), ((10259, 10276), 'torch.stack', 'torch.stack', (['temp'], {}), '(temp)\n', (10270, 10276), False, 'import torch\n'), ((10534, 10592), 'torch.randperm', 'torch.randperm', (['self.train_rnflonh.shape[0]'], {'device': 'device'}), '(self.train_rnflonh.shape[0], device=device)\n', (10548, 10592), False, 'import torch\n'), ((12205, 12250), 'os.path.join', 'os.path.join', (['config.RESULT_DIR', '"""losses.csv"""'], {}), "(config.RESULT_DIR, 'losses.csv')\n", (12217, 12250), False, 'import os\n'), ((13590, 13624), 'os.path.join', 'os.path.join', (['LOG_ROOT_DIR', 'prefix'], {}), '(LOG_ROOT_DIR, prefix)\n', (13602, 13624), False, 'import os\n'), ((13645, 13687), 'os.path.join', 'os.path.join', (['RESULT_DIR', '"""trained_models"""'], {}), "(RESULT_DIR, 'trained_models')\n", (13657, 13687), False, 'import os\n'), ((13710, 13731), 'utils.utils.RunningAverageMeter', 'RunningAverageMeter', ([], {}), '()\n', (13729, 13731), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((13758, 13779), 'utils.utils.RunningAverageMeter', 'RunningAverageMeter', ([], {}), '()\n', (13777, 13779), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((13805, 13826), 'utils.utils.RunningAverageMeter', 'RunningAverageMeter', ([], {}), '()\n', (13824, 13826), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((13856, 13877), 'utils.utils.RunningAverageMeter', 'RunningAverageMeter', ([], {}), '()\n', (13875, 13877), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((17525, 17566), 'utils.utils.subsample', 'subsample', (['minibatches', 'config.MODALITIES'], {}), '(minibatches, config.MODALITIES)\n', (17534, 17566), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((18333, 18348), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18346, 18348), False, 'import torch\n'), ((18435, 18479), 'utils.utils.subsample', 'subsample', (['ds_val_batches', 'config.MODALITIES'], {}), '(ds_val_batches, config.MODALITIES)\n', (18444, 18479), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((19239, 19254), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19252, 19254), False, 'import torch\n'), ((20373, 20488), 'utils.viz_latent.generate_random_reconstructions', 'generate_random_reconstructions', (['generators'], {'device': 'device', 'latent_dim': 'model.latent_dim', 'image_size': 'image_size'}), '(generators, device=device, latent_dim=model\n .latent_dim, image_size=image_size)\n', (20404, 20488), False, 'from utils.viz_latent import generate_random_reconstructions, generate_latent_space\n'), ((20543, 20612), 'cv2.imwrite', 'cv2.imwrite', (["(file_ext[0] + 'randsample' + '.' + file_ext[1])", 'viz_rand'], {}), "(file_ext[0] + 'randsample' + '.' + file_ext[1], viz_rand)\n", (20554, 20612), False, 'import cv2\n'), ((20665, 20784), 'utils.viz_latent.generate_latent_space', 'generate_latent_space', (['generators'], {'device': 'device', 'latent_dim': 'model.latent_dim', 'image_size': 'image_size', 'grid_size': '(16)'}), '(generators, device=device, latent_dim=model.\n latent_dim, image_size=image_size, grid_size=16)\n', (20686, 20784), False, 'from utils.viz_latent import generate_random_reconstructions, generate_latent_space\n'), ((20829, 20899), 'cv2.imwrite', 'cv2.imwrite', (["(file_ext[0] + 'latentspace' + '.' + file_ext[1])", 'viz_rand'], {}), "(file_ext[0] + 'latentspace' + '.' + file_ext[1], viz_rand)\n", (20840, 20899), False, 'import cv2\n'), ((2287, 2316), 'utils.utils.subsample', 'subsample', (['inputs', 'modalities'], {}), '(inputs, modalities)\n', (2296, 2316), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((3740, 3768), 'utils.utils.subsample', 'subsample', (['ds_val_batches', 'c'], {}), '(ds_val_batches, c)\n', (3749, 3768), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((4090, 4125), 'utils.utils.subsample', 'subsample', (['error', 'config.MODALITIES'], {}), '(error, config.MODALITIES)\n', (4099, 4125), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((4146, 4181), 'utils.utils.subsample', 'subsample', (['preds', 'config.MODALITIES'], {}), '(preds, config.MODALITIES)\n', (4155, 4181), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((8960, 8985), 'data.utils.resize_stack', 'resize_stack', (['d', '(32, 32)'], {}), '(d, (32, 32))\n', (8972, 8985), False, 'from data.utils import resize_stack\n'), ((11243, 11272), 'torch.max', 'torch.max', (['self.val_dx'], {'dim': '(1)'}), '(self.val_dx, dim=1)\n', (11252, 11272), False, 'import torch\n'), ((14778, 14838), 'train_multimodalvae.getConfig', 'getConfigMVAE', (['self.MODALITIES', 'self.expert', 'self.latent_dim'], {}), '(self.MODALITIES, self.expert, self.latent_dim)\n', (14791, 14838), True, 'from train_multimodalvae import getConfig as getConfigMVAE\n'), ((2539, 2560), 'numpy.asanyarray', 'np.asanyarray', (['[-100]'], {}), '([-100])\n', (2552, 2560), True, 'import numpy as np\n'), ((5169, 5204), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'perplexity': '(30)'}), '(n_components=2, perplexity=30)\n', (5173, 5204), False, 'from sklearn.manifold import TSNE\n'), ((5246, 5321), 'umap.UMAP', 'umap.UMAP', ([], {'n_neighbors': '(10)', 'min_dist': '(0.1)', 'n_components': '(2)', 'metric': '"""euclidean"""'}), "(n_neighbors=10, min_dist=0.1, n_components=2, metric='euclidean')\n", (5255, 5321), False, 'import umap\n'), ((12561, 12607), 'os.path.join', 'os.path.join', (['self.RESULT_DIR', '"""lossplot.jpeg"""'], {}), "(self.RESULT_DIR, 'lossplot.jpeg')\n", (12573, 12607), False, 'import os\n'), ((12787, 12837), 'os.path.join', 'os.path.join', (['self.RESULT_DIR', '"""lossplot_kld.jpeg"""'], {}), "(self.RESULT_DIR, 'lossplot_kld.jpeg')\n", (12799, 12837), False, 'import os\n'), ((16155, 16251), 'os.path.join', 'os.path.join', (['config.RESULT_DIR', '"""trained_models"""', "(config.prefix + suffix_model_path + '.pt')"], {}), "(config.RESULT_DIR, 'trained_models', config.prefix +\n suffix_model_path + '.pt')\n", (16167, 16251), False, 'import os\n'), ((3111, 3153), 'losses.mae_loss', 'mae_loss', (['(gcl_xi_val * 200)', '(pred_gcl * 200)'], {}), '(gcl_xi_val * 200, pred_gcl * 200)\n', (3119, 3153), False, 'from losses import mae_globalmean, mae_loss\n'), ((8324, 8347), 'torch.from_numpy', 'torch.from_numpy', (['train'], {}), '(train)\n', (8340, 8347), False, 'import torch\n'), ((8377, 8398), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (8393, 8398), False, 'import torch\n'), ((8530, 8553), 'torch.from_numpy', 'torch.from_numpy', (['train'], {}), '(train)\n', (8546, 8553), False, 'import torch\n'), ((8583, 8604), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (8599, 8604), False, 'import torch\n'), ((14097, 14108), 'os.mkdir', 'os.mkdir', (['x'], {}), '(x)\n', (14105, 14108), False, 'import os\n'), ((15930, 16044), 'models.multiodal_latentodegru_pretrain.MultimodalLatentODE', 'MultimodalLatentODE', ([], {'latent_dim': 'config.latent_dim', 'device': 'device', 'expert': 'config.expert', 'ode_solver': 'ode_solver'}), '(latent_dim=config.latent_dim, device=device, expert=\n config.expert, ode_solver=ode_solver)\n', (15949, 16044), False, 'from models.multiodal_latentodegru_pretrain import MultimodalLatentODE\n'), ((18001, 18016), 'torch.mean', 'torch.mean', (['kld'], {}), '(kld)\n', (18011, 18016), False, 'import torch\n'), ((19651, 19666), 'torch.mean', 'torch.mean', (['kld'], {}), '(kld)\n', (19661, 19666), False, 'import torch\n'), ((23329, 23340), 'time.time', 'time.time', ([], {}), '()\n', (23338, 23340), False, 'import time\n'), ((23572, 23583), 'time.time', 'time.time', ([], {}), '()\n', (23581, 23583), False, 'import time\n'), ((2470, 2486), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (2483, 2486), False, 'import torch\n'), ((14116, 14133), 'os.path.exists', 'os.path.exists', (['x'], {}), '(x)\n', (14130, 14133), False, 'import os\n'), ((23856, 23913), 'utils.utils.save_model', 'save_model', (['config.model', 'config.MODEL_DIR', 'config.prefix'], {}), '(config.model, config.MODEL_DIR, config.prefix)\n', (23866, 23913), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((24673, 24743), 'utils.utils.save_model', 'save_model', (['config.model', 'config.MODEL_DIR', 'config.prefix'], {'epoch': 'epoch'}), '(config.model, config.MODEL_DIR, config.prefix, epoch=epoch)\n', (24683, 24743), False, 'from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample\n'), ((16510, 16529), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16522, 16529), False, 'import torch\n')] |
"""Generic protoplanetary disk model
The density is given by
.. math::
\\rho = \\frac{\\Sigma(r,\\phi)}{H_p\\sqrt{(2\\pi)}} \\exp{\\left(-\\frac{z^2}{2H_p^2}\\right)}
* :math:`\Sigma` - surface density
* :math:`H_{\\rm p}` - Pressure scale height
There are two options for the functional form of surface density as a function of radius. For a simple
power-law the surface density is given by
* :math:`\Sigma(r) = \\Sigma_0\\left(\\frac{r}{r_{\\rm out}}\\right)^p`
alternatively the surface density can also have an exponential outer tapering:
* :math:`\Sigma(r) = \\Sigma_0\\left(\\frac{r}{r_{\\rm out}}\\right)^p\\exp{\\left\\{-\\left(\\frac{r}{r_{\\rm out}}\\right)^{2+p}\\right\\}}`
The molecular abundance function takes into account dissociation and freeze-out of the molecules
For photodissociation only the continuum (dust) shielding is taken into account in a way that
whenever the continuum optical depth radially drops below a threshold value the molecular abundance
is dropped to zero. For freeze-out the molecular abundance below a threshold temperature is decreased
by a given fractor.
"""
from __future__ import absolute_import
from __future__ import print_function
import warnings
import traceback
try:
import numpy as np
except ImportError:
np = None
print(' Numpy cannot be imported ')
print(' To use the python module of RADMC-3D you need to install Numpy')
print(traceback.format_exc())
from .. natconst import *
from .. import analyze
def getModelDesc():
"""Returns the brief description of the model.
"""
return "Generic protoplanetary disk model"
def getDefaultParams():
"""Function to provide default parameter values of the model.
Returns a list whose elements are also lists with three elements:
1) parameter name, 2) parameter value, 3) parameter description
All three elements should be strings. The string of the parameter
value will be directly written out to the parameter file if requested,
and the value of the string expression will be evaluated and be put
to radmc3dData.ppar. The third element contains the description of the
parameter which will be written in the comment field of the line when
a parameter file is written.
"""
defpar = [
['xres_nlev', '3', 'Number of refinement levels'],
['xres_nspan', '3', 'Number of the original grid cells to refine'],
['xres_nstep', '3', 'Number of grid cells to create in a refinement level'],
['nx', '[30,50]', 'Number of grid points in the first dimension'],
['xbound', '[1.0*au,1.05*au, 100.0*au]', 'Number of radial grid points'],
['ny', '[10,30,30,10]', 'Number of grid points in the first dimension'],
['ybound', '[0., pi/3., pi/2., 2.*pi/3., pi]', 'Number of radial grid points'],
['nz', '30', 'Number of grid points in the first dimension'],
['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],
['gasspec_mol_name', "['co']", ''],
['gasspec_mol_abun', '[1e-4]', ''],
['gasspec_mol_dbase_type', "['leiden']", ''],
['gasspec_mol_dissoc_taulim', '[1.0]', 'Continuum optical depth limit below which all molecules dissociate'],
['gasspec_mol_freezeout_temp', '[19.0]', 'Freeze-out temperature of the molecules in Kelvin'],
['gasspec_mol_freezeout_dfact', '[1e-3]',
'Factor by which the molecular abundance should be decreased in the frezze-out zone'],
['gasspec_vturb', '0.2e5', 'Microturbulent line width'],
['rin', '1.0*au', ' Inner radius of the disk'],
['rdisk', '100.0*au', ' Outer radius of the disk'],
['hrdisk', '0.1', ' Ratio of the pressure scale height over radius at hrpivot'],
['hrpivot', "100.0*au", ' Reference radius at which Hp/R is taken'],
['plh', '1./7.', ' Flaring index'],
['plsig1', '-1.0', ' Power exponent of the surface density distribution as a function of radius'],
['sig0', '0.0', ' Surface density at rdisk'],
['mdisk', '1e-3*ms', ' Mass of the disk (either sig0 or mdisk should be set to zero or commented out)'],
['bgdens', '1e-30', ' Background density (g/cm^3)'],
['srim_rout', '0.0', 'Outer boundary of the smoothing in the inner rim in terms of rin'],
['srim_plsig', '0.0', 'Power exponent of the density reduction inside of srim_rout*rin'],
['prim_rout', '0.0', 'Outer boundary of the puffed-up inner rim in terms of rin'],
['hpr_prim_rout', '0.0', 'Pressure scale height at rin'],
['gap_rin', '[0e0*au]', ' Inner radius of the gap'],
['gap_rout', '[0e0*au]', ' Outer radius of the gap'],
['gap_drfact', '[0e0]', ' Density reduction factor in the gap'],
['sigma_type', '0',
' Surface density type (0 - polynomial, 1 - exponential outer edge (viscous self-similar solution)'],
['dusttogas', '0.01', ' Dust-to-gas mass ratio']]
return defpar
def getDustDensity(grid=None, ppar=None):
"""Calculates the dust density distribution in a protoplanetary disk.
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns the volume density in g/cm^3
"""
# Get the gas density
rhogas = getGasDensity(grid=grid, ppar=ppar)
rho = np.array(rhogas) * ppar['dusttogas']
# Split up the disk density distribution according to the given abundances
if 'ngs' in ppar:
if ppar['ngs'] > 1:
ngs = ppar['ngs']
#
# WARNING!!!!!!
# At the moment I assume that the multiple dust population differ from each other only in
# grain size but not in bulk density thus when I calculate the abundances / mass fractions
# they are independent of the grains bulk density since abundances/mass fractions are normalized
# to the total mass. Thus I use 1g/cm^3 for all grain sizes.
# TODO: Add the possibility to handle multiple dust species with different bulk densities and
# with multiple grain sizes.
#
gdens = np.zeros(ngs, dtype=float) + 1.0
gs = ppar['gsmin'] * (ppar['gsmax']/ppar['gsmin'])**(np.arange(ppar['ngs'], dtype=np.float64)
/ (float(ppar['ngs'])-1.))
gmass = 4./3.*np.pi*gs**3. * gdens
gsfact = gmass * gs**(ppar['gsdist_powex']+1)
gsfact = gsfact / gsfact.sum()
else:
gsfact = [1.0]
ngs = 1
elif 'mfrac' in ppar:
ngs = len(ppar['mfrac'])
gsfact = ppar['mfrac'] / ppar['mfrac'].sum()
else:
ngs = 1
gsfact = [1.0]
# if ppar.has_key('dustkappa_ext'):
# ngs = len(ppar['dustkappa_ext'])
# if ppar.has_key('mfrac'):
# gsfact = ppar['mfrac'] / ppar['mfrac'].sum()
# else:
# ngs = 1
# gsfact = [1.0]
# else:
# ngs = ppar['ngs']
#
# WARNING!!!!!!
# At the moment I assume that the multiple dust population differ from each other only in
# grain size but not in bulk density thus when I calculate the abundances / mass fractions
# they are independent of the grains bulk density since abundances/mass fractions are normalized
# to the total mass. Thus I use 1g/cm^3 for all grain sizes.
# TODO: Add the possibility to handle multiple dust species with different bulk densities and
# with multiple grain sizes.
#
# gdens = zeros(ngs, dtype=float) + 1.0
# gs = ppar['gsmin'] * (ppar['gsmax']/ppar['gsmin'])**(arange(ppar['ngs'], dtype=float64)
# / (float(ppar['ngs'])-1.))
# gmass = 4./3.*np.pi*gs**3. * gdens
# gsfact = gmass * gs**(ppar['gsdist_powex']+1)
# gsfact = gsfact / gsfact.sum()
rho_old = np.array(rho)
rho = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64)
for igs in range(ngs):
rho[:, :, :, igs] = rho_old[:, :, :] * gsfact[igs]
return rho
def getGasDensity(grid=None, ppar=None):
"""Calculates the gas density distribution in a protoplanetary disk.
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns the volume density in g/cm^3
"""
rr, th = np.meshgrid(grid.x, grid.y)
zz = rr * np.cos(th)
rcyl = rr * np.sin(th)
# Calculate the pressure scale height as a function of r, phi
hp = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
dum = ppar['hrdisk'] * (rcyl/ppar['hrpivot'])**ppar['plh'] * rcyl
if 'prim_rout' in ppar:
if ppar['prim_rout'] >= 1.:
dum_hrdisk = ppar['hrdisk'] * (rcyl/ppar['hrpivot'])**ppar['plh']
hpr0 = ppar['hrdisk'] * (ppar['prim_rout'] * ppar['rin']/ppar['hrpivot'])**ppar['plh']
dummy = np.log10(hpr0 / ppar['hpr_prim_rout']) / np.log10(ppar['prim_rout'])
dum_prim = ppar['hpr_prim_rout'] * (rcyl/ppar['rin'])**dummy
dum = (dum_hrdisk**8. + dum_prim**8.)**(1./8.) * rcyl
dum = dum.swapaxes(0, 1)
for iz in range(grid.nz):
hp[:, :, iz] = dum
# Calculate the surface density
sigma = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
# Calculate sigma from sig0, rdisk and plsig1
if 'sig0' in ppar:
if ppar['sig0'] != 0.:
if 'sigma_type' in ppar:
if ppar['sigma_type'] == 0:
dum1 = ppar['sig0'] * (rcyl/ppar['rdisk'])**ppar['plsig1']
else:
expterm = np.exp(-(rcyl/ppar['rdisk'])**(2.0 - ppar['plsig1']))
dum1 = ppar['sig0'] * (rcyl/ppar['rdisk'])**(-ppar['plsig1']) * expterm
else:
dum1 = ppar['sig0'] * (rcyl/ppar['rdisk'])**ppar['plsig1']
if ('srim_rout' in ppar) & ('srim_plsig' in ppar):
if ppar['srim_rout'] != 0.:
if 'sigma_type' in ppar:
if ppar['sigma_type'] == 0:
# Adding the smoothed inner rim
sig_srim = ppar['sig0'] * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**ppar['plsig1']
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
else:
# sig_srim = 1.0 * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**ppar['plsig1']
sig_srim = ppar['sig0'] * (ppar['srim_rout']*ppar['rin']
/ ppar['rdisk'])**(-ppar['plsig1']) \
* np.exp(-(rcyl/ppar['rdisk'])**(2.0 - ppar['plsig1']))
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
else:
# Adding the smoothed inner rim
sig_srim = ppar['sig0'] * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**ppar['plsig1']
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
p = -5.0
dum = (dum1**p + dum2**p)**(1./p)
else:
dum = dum1
else:
dum = dum1
dum = dum.swapaxes(0, 1)
for iz in range(grid.nz):
sigma[:, :, iz] = dum
else:
if 'sigma_type' in ppar:
if ppar['sigma_type'] == 0:
dum1 = 1.0 * (rcyl/ppar['rdisk'])**ppar['plsig1']
else:
dum1 = 1.0 * (rcyl/ppar['rdisk'])**(-ppar['plsig1']) \
* np.exp(-(rcyl/ppar['rdisk'])**(2.0 - ppar['plsig1']))
else:
dum1 = 1.0 * (rcyl/ppar['rdisk'])**ppar['plsig1']
if ('srim_rout' in ppar) & ('srim_plsig' in ppar):
if ppar['srim_rout'] != 0.:
if 'sigma_type' in ppar:
if ppar['sigma_type'] == 0:
# Adding the smoothed inner rim
sig_srim = 1.0 * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**ppar['plsig1']
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
else:
sig_srim = 1.0 * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**(-ppar['plsig1']) \
* np.exp(-(rcyl/ppar['rdisk'])**(2.0 - ppar['plsig1']))
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
else:
# Adding the smoothed inner rim
sig_srim = 1.0 * (ppar['srim_rout']*ppar['rin'] / ppar['rdisk'])**ppar['plsig1']
dum2 = sig_srim * (rcyl / (ppar['srim_rout']*ppar['rin']))**ppar['srim_plsig']
p = -5.0
dum = (dum1**p + dum2**p)**(1./p)
else:
dum = dum1
else:
dum = dum1
dum = dum.swapaxes(0, 1)
for iz in range(grid.nz):
sigma[:, :, iz] = dum
if 'sigma_type' in ppar:
if ppar['sigma_type'] == 0:
for iy in range(grid.ny):
ii = (rcyl[iy, :] < ppar['rin']) | (rcyl[iy, :] > ppar['rdisk'])
sigma[ii, iy, :] = 0.0
else:
for iy in range(grid.ny):
ii = (rcyl[iy, :] < ppar['rin']) | (rcyl[iy, :] > ppar['rdisk'])
sigma[ii, iy, :] = 0.0
z0 = np.zeros([grid.nx, grid.nz, grid.ny], dtype=np.float64)
rho = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
for iz in range(grid.nz):
for iy in range(grid.ny):
rho[:, iy, iz] = sigma[:, iy, iz] / (hp[:, iy, iz] * np.sqrt(2.0*np.pi)) * \
np.exp(-0.5 * ((zz[iy, :])-z0[:, iz, iy])*((zz[iy, :])-z0[:, iz, iy])
/ (hp[:, iy, iz]*hp[:, iy, iz])) + ppar['bgdens']
# Normalize the disk to mdisk if it is given instead of sig0
if 'mdisk' in ppar:
if ppar['mdisk'] != 0.:
# Calculate the volume of each grid cell
vol = grid.getCellVolume()
mass = (rho * vol).sum(0).sum(0).sum(0)
rho = rho * (ppar['mdisk'] / mass)
if np.abs(ppar['ybound'][-1]-(np.pi/2.)) < 1e-8:
rho = rho*0.5
for igap in range(len(ppar['gap_rout'])):
for ix in range(grid.nx):
if (grid.x[ix] >= ppar['gap_rin'][igap]) & (grid.x[ix] <= ppar['gap_rout'][igap]):
rho[ix, :, :] = rho[ix, :, :] * ppar['gap_drfact'][igap]
return rho
def getGasAbundance(grid=None, ppar=None, ispec=''):
"""Calculates the molecular abundance.
The number density of a molecule is rhogas * abun
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and wavelength grid
ppar : dictionary
Dictionary containing all parameters of the model
ispec : str
The name of the gas species whose abundance should be calculated
Returns
-------
Returns an ndarray containing the molecular abundance at each grid point
"""
# Read the dust density and temperature
try:
data = analyze.readData(ddens=True, dtemp=True, binary=True)
except:
try:
data = analyze.readData(ddens=True, dtemp=True, binary=False)
except:
msg = 'Gas abundance cannot be calculated as the required dust density and/or temperature '\
+ 'could not be read in binary or in formatted ascii format.'
raise RuntimeError(msg)
# Calculate continuum optical depth
data.getTau(axis='xy', wav=0.55)
nspec = len(ppar['gasspec_mol_name'])
if ppar['gasspec_mol_name'].__contains__(ispec):
sid = ppar['gasspec_mol_name'].index(ispec)
# Check where the radial and vertical optical depth is below unity
gasabun = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
for spec in range(nspec):
gasabun[:, :, :] = ppar['gasspec_mol_abun'][sid]
for iz in range(data.grid.nz):
for iy in range(data.grid.ny):
ii = (data.taux[:, iy, iz] < ppar['gasspec_mol_dissoc_taulim'][sid])
gasabun[ii, iy, iz] = 1e-90
ii = (data.dusttemp[:, iy, iz, 0] < ppar['gasspec_mol_freezeout_temp'][sid])
gasabun[ii, iy, iz] = ppar['gasspec_mol_abun'][sid] * ppar['gasspec_mol_freezeout_dfact'][sid]
else:
gasabun = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64) + 1e-10
txt = 'Molecule name "'+ispec+'" is not found in gasspec_mol_name \n A default 1e-10 abundance will be used'
warnings.warn(txt, RuntimeWarning)
# gasabun = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
# gasabun[:,:,:] = ppar['gasspec_mol_abun'][0] / (2.4*mp)
return gasabun
def getVTurb(grid=None, ppar=None):
"""Calculates the turbulent velocity field
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns an ndarray with the turbulent velocity in cm/s
"""
vturb = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64) + ppar['gasspec_vturb']
return vturb
def getVelocity(grid=None, ppar=None):
"""Calculates the velocity field in a protoplanetary disk.
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns the gas velocity in cm/s
"""
nr = grid.nx
nphi = grid.nz
nz = grid.ny
rcyl = grid.x
vel = np.zeros([nr, nz, nphi, 3], dtype=np.float64)
vkep = np.sqrt(gg * ppar['mstar'][0]/rcyl)
for iz in range(nz):
for ip in range(nphi):
vel[:, iz, ip, 2] = vkep
return vel
| [
"traceback.format_exc",
"numpy.abs",
"numpy.log10",
"numpy.sqrt",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"warnings.warn",
"numpy.arange"
] | [((8135, 8148), 'numpy.array', 'np.array', (['rho'], {}), '(rho)\n', (8143, 8148), True, 'import numpy as np\n'), ((8159, 8219), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz, ngs]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64)\n', (8167, 8219), True, 'import numpy as np\n'), ((8783, 8810), 'numpy.meshgrid', 'np.meshgrid', (['grid.x', 'grid.y'], {}), '(grid.x, grid.y)\n', (8794, 8810), True, 'import numpy as np\n'), ((8939, 8994), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (8947, 8994), True, 'import numpy as np\n'), ((9673, 9728), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (9681, 9728), True, 'import numpy as np\n'), ((14183, 14238), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.nz, grid.ny]'], {'dtype': 'np.float64'}), '([grid.nx, grid.nz, grid.ny], dtype=np.float64)\n', (14191, 14238), True, 'import numpy as np\n'), ((14249, 14304), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (14257, 14304), True, 'import numpy as np\n'), ((18759, 18804), 'numpy.zeros', 'np.zeros', (['[nr, nz, nphi, 3]'], {'dtype': 'np.float64'}), '([nr, nz, nphi, 3], dtype=np.float64)\n', (18767, 18804), True, 'import numpy as np\n'), ((18816, 18853), 'numpy.sqrt', 'np.sqrt', (["(gg * ppar['mstar'][0] / rcyl)"], {}), "(gg * ppar['mstar'][0] / rcyl)\n", (18823, 18853), True, 'import numpy as np\n'), ((5536, 5552), 'numpy.array', 'np.array', (['rhogas'], {}), '(rhogas)\n', (5544, 5552), True, 'import numpy as np\n'), ((8825, 8835), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (8831, 8835), True, 'import numpy as np\n'), ((8852, 8862), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (8858, 8862), True, 'import numpy as np\n'), ((16711, 16766), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (16719, 16766), True, 'import numpy as np\n'), ((17519, 17553), 'warnings.warn', 'warnings.warn', (['txt', 'RuntimeWarning'], {}), '(txt, RuntimeWarning)\n', (17532, 17553), False, 'import warnings\n'), ((18152, 18207), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (18160, 18207), True, 'import numpy as np\n'), ((1455, 1477), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1475, 1477), False, 'import traceback\n'), ((17330, 17385), 'numpy.zeros', 'np.zeros', (['[grid.nx, grid.ny, grid.nz]'], {'dtype': 'np.float64'}), '([grid.nx, grid.ny, grid.nz], dtype=np.float64)\n', (17338, 17385), True, 'import numpy as np\n'), ((6343, 6369), 'numpy.zeros', 'np.zeros', (['ngs'], {'dtype': 'float'}), '(ngs, dtype=float)\n', (6351, 6369), True, 'import numpy as np\n'), ((9328, 9366), 'numpy.log10', 'np.log10', (["(hpr0 / ppar['hpr_prim_rout'])"], {}), "(hpr0 / ppar['hpr_prim_rout'])\n", (9336, 9366), True, 'import numpy as np\n'), ((9369, 9396), 'numpy.log10', 'np.log10', (["ppar['prim_rout']"], {}), "(ppar['prim_rout'])\n", (9377, 9396), True, 'import numpy as np\n'), ((14977, 15017), 'numpy.abs', 'np.abs', (["(ppar['ybound'][-1] - np.pi / 2.0)"], {}), "(ppar['ybound'][-1] - np.pi / 2.0)\n", (14983, 15017), True, 'import numpy as np\n'), ((10045, 10102), 'numpy.exp', 'np.exp', (["(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))"], {}), "(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))\n", (10051, 10102), True, 'import numpy as np\n'), ((14488, 14599), 'numpy.exp', 'np.exp', (['(-0.5 * (zz[iy, :] - z0[:, iz, iy]) * (zz[iy, :] - z0[:, iz, iy]) / (hp[:,\n iy, iz] * hp[:, iy, iz]))'], {}), '(-0.5 * (zz[iy, :] - z0[:, iz, iy]) * (zz[iy, :] - z0[:, iz, iy]) / (\n hp[:, iy, iz] * hp[:, iy, iz]))\n', (14494, 14599), True, 'import numpy as np\n'), ((6441, 6481), 'numpy.arange', 'np.arange', (["ppar['ngs']"], {'dtype': 'np.float64'}), "(ppar['ngs'], dtype=np.float64)\n", (6450, 6481), True, 'import numpy as np\n'), ((12204, 12261), 'numpy.exp', 'np.exp', (["(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))"], {}), "(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))\n", (12210, 12261), True, 'import numpy as np\n'), ((14434, 14454), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (14441, 14454), True, 'import numpy as np\n'), ((11156, 11213), 'numpy.exp', 'np.exp', (["(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))"], {}), "(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))\n", (11162, 11213), True, 'import numpy as np\n'), ((13008, 13065), 'numpy.exp', 'np.exp', (["(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))"], {}), "(-(rcyl / ppar['rdisk']) ** (2.0 - ppar['plsig1']))\n", (13014, 13065), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import misc.utils as utils
from collections import OrderedDict
import torch
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
CiderD_scorer = None
Bleu_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0:
break
return out.strip()
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[batch_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(2 * batch_size)]
res__ = {i: res[i] for i in range(2 * batch_size)}
gts = {i: gts[i % batch_size // seq_per_img] for i in range(2 * batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:batch_size] - scores[batch_size:]
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
| [
"collections.OrderedDict",
"numpy.repeat",
"pycocoevalcap.bleu.bleu.Bleu",
"pyciderevalcap.ciderD.ciderD.CiderD",
"numpy.array",
"sys.path.append"
] | [((229, 253), 'sys.path.append', 'sys.path.append', (['"""cider"""'], {}), "('cider')\n", (244, 253), False, 'import sys\n'), ((302, 333), 'sys.path.append', 'sys.path.append', (['"""coco-caption"""'], {}), "('coco-caption')\n", (317, 333), False, 'import sys\n'), ((1005, 1018), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1016, 1018), False, 'from collections import OrderedDict\n'), ((1300, 1313), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1311, 1313), False, 'from collections import OrderedDict\n'), ((2192, 2248), 'numpy.repeat', 'np.repeat', (['scores[:, np.newaxis]', 'gen_result.shape[1]', '(1)'], {}), '(scores[:, np.newaxis], gen_result.shape[1], 1)\n', (2201, 2248), True, 'import numpy as np\n'), ((548, 572), 'pyciderevalcap.ciderD.ciderD.CiderD', 'CiderD', ([], {'df': 'cached_tokens'}), '(df=cached_tokens)\n', (554, 572), False, 'from pyciderevalcap.ciderD.ciderD import CiderD\n'), ((629, 636), 'pycocoevalcap.bleu.bleu.Bleu', 'Bleu', (['(4)'], {}), '(4)\n', (633, 636), False, 'from pycocoevalcap.bleu.bleu import Bleu\n'), ((1935, 1959), 'numpy.array', 'np.array', (['bleu_scores[3]'], {}), '(bleu_scores[3])\n', (1943, 1959), True, 'import numpy as np\n')] |
import cv2
import matplotlib.pyplot as plt
import os
import numpy as np
from keras.models import load_model
from keras.optimizers import Adam
from model.iou_loss import IoU
from utils import image
from utils import metrics
INPUT_FILE = "./dataset/img-test/KS01_30.png"
OUTPUT_FILE = INPUT_FILE[:-4] + "_detection.png"
MODEL_FILE = "unet_model_whole_100epochs.h5"
def load_image(file):
img = cv2.imread(file)
height,width = img.shape[:2]
img = cv2.resize(img, (256,256))
img = img / 255.0
img.reshape(1,256,256,3)
return img, height, width
def predict_image(model, image):
predict = model.predict(image.reshape(1,256,256,3))
return predict[0]
def main():
# INPUT_FILE = "./dataset/img-test/" + input("\n\nEnter file name: ") + ".png"
# print(INPUT_FILE)
# OUTPUT_FILE = INPUT_FILE[:-4] + "_detection.png"
if not os.path.isfile(INPUT_FILE):
print('Input image not found ', INPUT_FILE)
else:
if not os.path.isfile(MODEL_FILE):
print('Model not found ', MODEL_FILE)
exit(1)
else:
print('Loading model... ', MODEL_FILE)
model = load_model(MODEL_FILE, compile=False)
model.compile(optimizer=Adam(1e-4), loss=IoU, metrics=['binary_accuracy'])
print('Loading image... ', INPUT_FILE)
img, height, width = load_image(INPUT_FILE)
print('Predicting...')
mask_image = predict_image(model, img)
print('Cropping...')
mask_image = cv2.resize(mask_image, (width, height))
mask_image = cv2.convertScaleAbs(mask_image, alpha = 255)
cropped_img = image.convert_object(mask_image, cv2.imread(INPUT_FILE))
cv2.imshow('Cropped', cropped_img)
cv2.waitKey(0)
# ------ resize -------
NEW_SIZE = 1000
print('Resizing image...')
height, width = cropped_img.shape[:2]
longest_side = width
if (height > width): longest_side = height
ratio = NEW_SIZE / longest_side
resized_img = cv2.resize(cropped_img, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_CUBIC)
cv2.imshow('Resized', resized_img)
cv2.waitKey(0)
# ----- create threshold -----
print('Thresholding...')
thresh = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)
# _, thresh = cv2.threshold(thresh, 170, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(thresh, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 49, 5)
cv2.imshow('Binarized', thresh)
cv2.waitKey(0)
# ----- morphs -----
print('Morphing...')
closing_kernel = np.ones((4, 4), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, closing_kernel)
dilate_kernel = np.ones((5, 8), np.uint8)
dilation = cv2.dilate(opening, dilate_kernel, iterations=1)
cv2.imshow('Dilate', dilation)
cv2.waitKey(0)
# ----- find contours -----
print('Finding contours...')
contours, hierarchy = cv2.findContours(dilation,
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for c in contours:
rect = cv2.boundingRect(c)
x, y, width, height = rect
cv2.rectangle(resized_img, (x, y), (x + width, y + height), (0, 255, 0), 2)
cv2.imshow('Text detection', resized_img)
cv2.waitKey(0)
# ----- save -----
print('Saving output file...', OUTPUT_FILE)
plt.imsave(OUTPUT_FILE, resized_img)
print('Done.')
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"keras.optimizers.Adam",
"utils.image.reshape",
"keras.models.load_model",
"cv2.convertScaleAbs",
"numpy.ones",
"cv2.dilate",
"matplotlib.pyplot.imsave",
"cv2.imshow",
"os.path.isfile",
"cv2.morphologyEx",
"cv2.waitKey",
"cv2.adaptiveThreshold",
"cv2.cvtColor",
"cv2.find... | [((401, 417), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (411, 417), False, 'import cv2\n'), ((461, 488), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (471, 488), False, 'import cv2\n'), ((632, 661), 'utils.image.reshape', 'image.reshape', (['(1)', '(256)', '(256)', '(3)'], {}), '(1, 256, 256, 3)\n', (645, 661), False, 'from utils import image\n'), ((872, 898), 'os.path.isfile', 'os.path.isfile', (['INPUT_FILE'], {}), '(INPUT_FILE)\n', (886, 898), False, 'import os\n'), ((978, 1004), 'os.path.isfile', 'os.path.isfile', (['MODEL_FILE'], {}), '(MODEL_FILE)\n', (992, 1004), False, 'import os\n'), ((1162, 1199), 'keras.models.load_model', 'load_model', (['MODEL_FILE'], {'compile': '(False)'}), '(MODEL_FILE, compile=False)\n', (1172, 1199), False, 'from keras.models import load_model\n'), ((1541, 1580), 'cv2.resize', 'cv2.resize', (['mask_image', '(width, height)'], {}), '(mask_image, (width, height))\n', (1551, 1580), False, 'import cv2\n'), ((1606, 1648), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['mask_image'], {'alpha': '(255)'}), '(mask_image, alpha=255)\n', (1625, 1648), False, 'import cv2\n'), ((1748, 1782), 'cv2.imshow', 'cv2.imshow', (['"""Cropped"""', 'cropped_img'], {}), "('Cropped', cropped_img)\n", (1758, 1782), False, 'import cv2\n'), ((1795, 1809), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1806, 1809), False, 'import cv2\n'), ((2234, 2268), 'cv2.imshow', 'cv2.imshow', (['"""Resized"""', 'resized_img'], {}), "('Resized', resized_img)\n", (2244, 2268), False, 'import cv2\n'), ((2281, 2295), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2292, 2295), False, 'import cv2\n'), ((2398, 2443), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_img', 'cv2.COLOR_BGR2GRAY'], {}), '(resized_img, cv2.COLOR_BGR2GRAY)\n', (2410, 2443), False, 'import cv2\n'), ((2546, 2647), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['thresh', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(49)', '(5)'], {}), '(thresh, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 49, 5)\n', (2567, 2647), False, 'import cv2\n'), ((2656, 2687), 'cv2.imshow', 'cv2.imshow', (['"""Binarized"""', 'thresh'], {}), "('Binarized', thresh)\n", (2666, 2687), False, 'import cv2\n'), ((2700, 2714), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2711, 2714), False, 'import cv2\n'), ((2811, 2836), 'numpy.ones', 'np.ones', (['(4, 4)', 'np.uint8'], {}), '((4, 4), np.uint8)\n', (2818, 2836), True, 'import numpy as np\n'), ((2859, 2915), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'closing_kernel'], {}), '(thresh, cv2.MORPH_OPEN, closing_kernel)\n', (2875, 2915), False, 'import cv2\n'), ((2945, 2970), 'numpy.ones', 'np.ones', (['(5, 8)', 'np.uint8'], {}), '((5, 8), np.uint8)\n', (2952, 2970), True, 'import numpy as np\n'), ((2994, 3042), 'cv2.dilate', 'cv2.dilate', (['opening', 'dilate_kernel'], {'iterations': '(1)'}), '(opening, dilate_kernel, iterations=1)\n', (3004, 3042), False, 'import cv2\n'), ((3056, 3086), 'cv2.imshow', 'cv2.imshow', (['"""Dilate"""', 'dilation'], {}), "('Dilate', dilation)\n", (3066, 3086), False, 'import cv2\n'), ((3099, 3113), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3110, 3113), False, 'import cv2\n'), ((3231, 3299), 'cv2.findContours', 'cv2.findContours', (['dilation', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (3247, 3299), False, 'import cv2\n'), ((3574, 3615), 'cv2.imshow', 'cv2.imshow', (['"""Text detection"""', 'resized_img'], {}), "('Text detection', resized_img)\n", (3584, 3615), False, 'import cv2\n'), ((3628, 3642), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3639, 3642), False, 'import cv2\n'), ((3743, 3779), 'matplotlib.pyplot.imsave', 'plt.imsave', (['OUTPUT_FILE', 'resized_img'], {}), '(OUTPUT_FILE, resized_img)\n', (3753, 3779), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1732), 'cv2.imread', 'cv2.imread', (['INPUT_FILE'], {}), '(INPUT_FILE)\n', (1720, 1732), False, 'import cv2\n'), ((3406, 3425), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3422, 3425), False, 'import cv2\n'), ((3485, 3560), 'cv2.rectangle', 'cv2.rectangle', (['resized_img', '(x, y)', '(x + width, y + height)', '(0, 255, 0)', '(2)'], {}), '(resized_img, (x, y), (x + width, y + height), (0, 255, 0), 2)\n', (3498, 3560), False, 'import cv2\n'), ((1236, 1248), 'keras.optimizers.Adam', 'Adam', (['(0.0001)'], {}), '(0.0001)\n', (1240, 1248), False, 'from keras.optimizers import Adam\n')] |
#!/usr/bin/env python
"""
Machine Learning models compatible with the Genetic Algorithm implemented using Keras
"""
import keras.backend as K
import numpy as np
from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout
from keras.optimizers import Adam
from keras.models import Model
from sklearn.model_selection import StratifiedKFold
from .generic_models import GentunModel
K.set_image_data_format('channels_last')
class GeneticCnnModel(GentunModel):
def __init__(self, x_train, y_train, genes, nodes, input_shape, kernels_per_layer, kernel_sizes, dense_units,
dropout_probability, classes, kfold=5, epochs=(3,), learning_rate=(1e-3,), batch_size=32):
super(GeneticCnnModel, self).__init__(x_train, y_train)
self.model = self.build_model(
genes, nodes, input_shape, kernels_per_layer, kernel_sizes,
dense_units, dropout_probability, classes
)
self.name = '-'.join(gene for gene in genes.values())
self.kfold = kfold
if type(epochs) is int and type(learning_rate) is int:
self.epochs = (epochs,)
self.learning_rate = (learning_rate,)
elif type(epochs) is tuple and type(learning_rate) is tuple:
self.epochs = epochs
self.learning_rate = learning_rate
else:
print(epochs, learning_rate)
raise ValueError("epochs and learning_rate must be both either integers or tuples of integers.")
self.batch_size = batch_size
def plot(self):
"""Draw model to validate gene-to-DAG."""
from keras.utils import plot_model
plot_model(self.model, to_file='{}.png'.format(self.name))
@staticmethod
def build_dag(x, nodes, connections, kernels):
# Get number of nodes (K_s) using the fact that K_s*(K_s-1)/2 == #bits
# nodes = int((1 + (1 + 8 * len(connections)) ** 0.5) / 2)
# Separate bits by whose input they represent (GeneticCNN paper uses a dash)
ctr = 0
idx = 0
separated_connections = []
while idx + ctr < len(connections):
ctr += 1
separated_connections.append(connections[idx:idx + ctr])
idx += ctr
# Get outputs by node (dummy output ignored)
outputs = []
for node in range(nodes - 1):
node_outputs = []
for i, node_connections in enumerate(separated_connections[node:]):
if node_connections[node] == '1':
node_outputs.append(node + i + 1)
outputs.append(node_outputs)
outputs.append([])
# Get inputs by node (dummy input, x, ignored)
inputs = [[]]
for node in range(1, nodes):
node_inputs = []
for i, connection in enumerate(separated_connections[node - 1]):
if connection == '1':
node_inputs.append(i)
inputs.append(node_inputs)
# Build DAG
output_vars = []
all_vars = [None] * nodes
for i, (ins, outs) in enumerate(zip(inputs, outputs)):
if ins or outs:
if not ins:
tmp = x
else:
add_vars = [all_vars[i] for i in ins]
if len(add_vars) > 1:
tmp = Add()(add_vars)
else:
tmp = add_vars[0]
tmp = Conv2D(kernels, kernel_size=(3, 3), strides=(1, 1), padding='same')(tmp)
tmp = Activation('relu')(tmp)
all_vars[i] = tmp
if not outs:
output_vars.append(tmp)
if len(output_vars) > 1:
return Add()(output_vars)
return output_vars[0]
def build_model(self, genes, nodes, input_shape, kernels_per_layer, kernel_sizes,
dense_units, dropout_probability, classes):
x_input = Input(input_shape)
x = x_input
for layer, kernels in enumerate(kernels_per_layer):
# Default input node
x = Conv2D(kernels, kernel_size=kernel_sizes[layer], strides=(1, 1), padding='same')(x)
x = Activation('relu')(x)
# Decode internal connections
connections = genes['S_{}'.format(layer + 1)]
# If at least one bit is 1, then we need to construct the Directed Acyclic Graph
if not all([not bool(int(connection)) for connection in connections]):
x = self.build_dag(x, nodes[layer], connections, kernels)
# Output node
x = Conv2D(kernels, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Flatten()(x)
x = Dense(dense_units, activation='relu')(x)
x = Dropout(dropout_probability)(x)
x = Dense(classes, activation='softmax')(x)
return Model(inputs=x_input, outputs=x, name='GeneticCNN')
def reset_weights(self):
"""Initialize model weights."""
session = K.get_session()
for layer in self.model.layers:
if hasattr(layer, 'kernel_initializer'):
layer.kernel.initializer.run(session=session)
def cross_validate(self):
"""Train model using k-fold cross validation and
return mean value of the validation accuracy.
"""
acc = .0
kfold = StratifiedKFold(n_splits=self.kfold, shuffle=True)
for fold, (train, validation) in enumerate(kfold.split(self.x_train, np.where(self.y_train == 1)[1])):
print("KFold {}/{}".format(fold + 1, self.kfold))
self.reset_weights()
for epochs, learning_rate in zip(self.epochs, self.learning_rate):
print("Training {} epochs with learning rate {}".format(epochs, learning_rate))
self.model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy'])
self.model.fit(
self.x_train[train], self.y_train[train], epochs=epochs, batch_size=self.batch_size, verbose=1
)
acc += self.model.evaluate(self.x_train[validation], self.y_train[validation], verbose=0)[1] / self.kfold
return acc
| [
"keras.backend.set_image_data_format",
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"numpy.where",
"sklearn.model_selection.StratifiedKFold",
"keras.layers.Input",
"keras.backend.get_session",
"keras.models.Model",
"keras.layers.Activation"... | [((417, 457), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (440, 457), True, 'import keras.backend as K\n'), ((3965, 3983), 'keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (3970, 3983), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((5003, 5054), 'keras.models.Model', 'Model', ([], {'inputs': 'x_input', 'outputs': 'x', 'name': '"""GeneticCNN"""'}), "(inputs=x_input, outputs=x, name='GeneticCNN')\n", (5008, 5054), False, 'from keras.models import Model\n'), ((5143, 5158), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (5156, 5158), True, 'import keras.backend as K\n'), ((5501, 5551), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.kfold', 'shuffle': '(True)'}), '(n_splits=self.kfold, shuffle=True)\n', (5516, 5551), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((4826, 4835), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4833, 4835), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4851, 4888), 'keras.layers.Dense', 'Dense', (['dense_units'], {'activation': '"""relu"""'}), "(dense_units, activation='relu')\n", (4856, 4888), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4904, 4932), 'keras.layers.Dropout', 'Dropout', (['dropout_probability'], {}), '(dropout_probability)\n', (4911, 4932), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4948, 4984), 'keras.layers.Dense', 'Dense', (['classes'], {'activation': '"""softmax"""'}), "(classes, activation='softmax')\n", (4953, 4984), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((3747, 3752), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3750, 3752), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4113, 4198), 'keras.layers.Conv2D', 'Conv2D', (['kernels'], {'kernel_size': 'kernel_sizes[layer]', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(kernels, kernel_size=kernel_sizes[layer], strides=(1, 1), padding='same'\n )\n", (4119, 4198), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4213, 4231), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4223, 4231), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4764, 4810), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (4776, 4810), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((3469, 3536), 'keras.layers.Conv2D', 'Conv2D', (['kernels'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(kernels, kernel_size=(3, 3), strides=(1, 1), padding='same')\n", (3475, 3536), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((3564, 3582), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3574, 3582), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4635, 4702), 'keras.layers.Conv2D', 'Conv2D', (['kernels'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(kernels, kernel_size=(3, 3), strides=(1, 1), padding='same')\n", (4641, 4702), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((4726, 4744), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4736, 4744), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n'), ((5629, 5656), 'numpy.where', 'np.where', (['(self.y_train == 1)'], {}), '(self.y_train == 1)\n', (5637, 5656), True, 'import numpy as np\n'), ((5978, 6000), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5982, 6000), False, 'from keras.optimizers import Adam\n'), ((3363, 3368), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3366, 3368), False, 'from keras.layers import Input, Conv2D, Activation, Add, MaxPooling2D, Flatten, Dense, Dropout\n')] |
import numpy as np
from sklearn import linear_model
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
load_path = "E:\\Research\\Paper 03\\AA_CNN_github\\runs\\ML_One_ORIGIN_NEW\\170613_1497377078_labels.csv\\"
train_dist = np.loadtxt(load_path + "train_dist.txt")
train_label = np.loadtxt(load_path + "train_label.txt")
test_dist = np.loadtxt(load_path + "test_dist.txt")
test_label = np.loadtxt(load_path + "test_label.txt")
svm_results = []
lr_results = []
knc_results = []
for i in range(20):
clf = svm.SVC()
clf.fit(train_dist, train_label[:, i])
r = clf.predict(test_dist)
svm_results.append(r)
lr = linear_model.LogisticRegression()
lr.fit(train_dist, train_label[:, i])
r = lr.predict(test_dist)
lr_results.append(r)
knc = KNeighborsClassifier()
knc.fit(train_dist, train_label[:, i])
t = knc.predict(test_dist)
knc_results.append(r)
np.concatenate(svm_results, axis=1)
| [
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"numpy.concatenate",
"numpy.loadtxt",
"sklearn.svm.SVC"
] | [((251, 291), 'numpy.loadtxt', 'np.loadtxt', (["(load_path + 'train_dist.txt')"], {}), "(load_path + 'train_dist.txt')\n", (261, 291), True, 'import numpy as np\n'), ((306, 347), 'numpy.loadtxt', 'np.loadtxt', (["(load_path + 'train_label.txt')"], {}), "(load_path + 'train_label.txt')\n", (316, 347), True, 'import numpy as np\n'), ((360, 399), 'numpy.loadtxt', 'np.loadtxt', (["(load_path + 'test_dist.txt')"], {}), "(load_path + 'test_dist.txt')\n", (370, 399), True, 'import numpy as np\n'), ((413, 453), 'numpy.loadtxt', 'np.loadtxt', (["(load_path + 'test_label.txt')"], {}), "(load_path + 'test_label.txt')\n", (423, 453), True, 'import numpy as np\n'), ((921, 956), 'numpy.concatenate', 'np.concatenate', (['svm_results'], {'axis': '(1)'}), '(svm_results, axis=1)\n', (935, 956), True, 'import numpy as np\n'), ((535, 544), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (542, 544), False, 'from sklearn import svm\n'), ((655, 688), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (686, 688), False, 'from sklearn import linear_model\n'), ((797, 819), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (817, 819), False, 'from sklearn.neighbors import KNeighborsClassifier\n')] |
import csv
import math
import numpy as np
from visnav.algo import tools
MAX_ROTATION_ERR = 7
FAIL_ERRS = {
'rel shift error (m/km)': 100,
'altitude error': 1000,
'dist error (m/km)': 100,
'lat error (m/km)': 5,
'rot error': 15,
}
# read logfiles
def read_data(sm, logfile, predictors, targets):
X, Y, rot_err, labels = [], [], [], []
with open(logfile, newline='') as csvfile:
data = csv.reader(csvfile, delimiter='\t')
first = True
for row in data:
if len(row) > 10:
if first:
first = False
prd_i = [row.index(p) for p in predictors if p not in ('distance', 'visible')]
trg_i = [row.index(t) for t in targets]
rot_i = row.index('rot error')
pos_i = [row.index(p + ' sc pos') for p in ('x', 'y', 'z')]
lbl_i = row.index('iter')
else:
row = np.array(row)
try:
pos = row[pos_i].astype(np.float)
except ValueError as e:
print('Can\'t convert cols %s to float on row %s' % (pos_i, row[0]))
raise e
distance = np.sqrt(np.sum(pos ** 2))
visib = sm.calc_visibility(pos)
j = 0
x = [None] * len(predictors)
for i, p in enumerate(predictors):
if p == 'distance':
x[i] = distance
elif p == 'visible':
x[i] = visib
elif p == 'total dev angle':
x[i] = abs(tools.wrap_degs(row[prd_i[j]].astype(np.float)))
j += 1
elif p == 'sol elong':
x[i] = 180 - row[prd_i[j]].astype(np.float)
j += 1
else:
x[i] = row[prd_i[j]].astype(np.float)
j += 1
X.append(x)
Y.append([row[t].astype(np.float) if len(row) > t else float('nan') for t in trg_i])
rot_err.append(row[rot_i].astype(np.float))
labels.append(row[lbl_i])
X = np.array(X)
Y = np.array(Y)
rot_err = np.array(rot_err)
# for classification of fails
yc = np.any(np.isnan(Y), axis=1)
if MAX_ROTATION_ERR > 0:
I = np.logical_not(yc)
yc[I] = np.abs(tools.wrap_degs(rot_err[I])) > MAX_ROTATION_ERR
# for regression, set failed to max err
for i, tn in enumerate(targets):
Y[np.isnan(Y[:, i]), i] = FAIL_ERRS[tn]
if tn == 'rot error':
Y[:, i] = np.abs(tools.wrap_degs(Y[:, i]))
elif tn == 'dist error (m/km)':
Y[:, i] = np.abs(Y[:, i])
return X, Y, yc, labels
| [
"numpy.abs",
"numpy.logical_not",
"numpy.array",
"numpy.sum",
"numpy.isnan",
"visnav.algo.tools.wrap_degs",
"csv.reader"
] | [((2445, 2456), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2453, 2456), True, 'import numpy as np\n'), ((2466, 2477), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (2474, 2477), True, 'import numpy as np\n'), ((2493, 2510), 'numpy.array', 'np.array', (['rot_err'], {}), '(rot_err)\n', (2501, 2510), True, 'import numpy as np\n'), ((450, 485), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (460, 485), False, 'import csv\n'), ((2565, 2576), 'numpy.isnan', 'np.isnan', (['Y'], {}), '(Y)\n', (2573, 2576), True, 'import numpy as np\n'), ((2629, 2647), 'numpy.logical_not', 'np.logical_not', (['yc'], {}), '(yc)\n', (2643, 2647), True, 'import numpy as np\n'), ((2672, 2699), 'visnav.algo.tools.wrap_degs', 'tools.wrap_degs', (['rot_err[I]'], {}), '(rot_err[I])\n', (2687, 2699), False, 'from visnav.algo import tools\n'), ((2816, 2833), 'numpy.isnan', 'np.isnan', (['Y[:, i]'], {}), '(Y[:, i])\n', (2824, 2833), True, 'import numpy as np\n'), ((2915, 2939), 'visnav.algo.tools.wrap_degs', 'tools.wrap_degs', (['Y[:, i]'], {}), '(Y[:, i])\n', (2930, 2939), False, 'from visnav.algo import tools\n'), ((3005, 3020), 'numpy.abs', 'np.abs', (['Y[:, i]'], {}), '(Y[:, i])\n', (3011, 3020), True, 'import numpy as np\n'), ((1018, 1031), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (1026, 1031), True, 'import numpy as np\n'), ((1329, 1345), 'numpy.sum', 'np.sum', (['(pos ** 2)'], {}), '(pos ** 2)\n', (1335, 1345), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import unittest
import numpy as np
from lights.tests.testing_data import CreateTestingData
from lights.model.e_step_functions import EstepFunctions
class Test(unittest.TestCase):
"""A class to test E_step functions
"""
def setUp(self):
data = CreateTestingData()
alpha, L = data.fixed_effect_time_order, data.n_long_features
theta, asso_functions = data.theta, data.asso_functions
self.n_samples = data.n_samples
self.S, self.n_MC = data.S, data.S.shape[0]
self.E_func = EstepFunctions(data.T_u, L, alpha, asso_functions, theta)
self.E_func.compute_AssociationFunctions(self.S)
self.ind_1, self.ind_2 = data.ind_1, data.ind_2
self.data = data
self.g5_0_1 = np.array(
[[1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3],
[1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3],
[1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3]])
self.g5_1_3 = np.array(
[[1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9],
[1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9],
[1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9]])
self.g6_0_1 = np.exp(49) * self.g5_0_1
def test_g4(self):
"""Tests the g4 function
"""
self.setUp()
theta = self.data.theta
gamma_0, gamma_1 = theta["gamma_0"], theta["gamma_1"]
g4 = self.E_func.g4(gamma_0, gamma_1)
g4_0 = np.exp(np.array([-1.899, 0.961, -9.492, 4.563]))
g4_1 = np.exp(np.array([0.89 , 1.098, 11.164, -8.141]))
np.testing.assert_almost_equal(np.log(g4[:, 0, 0]), np.log(g4_0), 3)
np.testing.assert_almost_equal(np.log(g4[:, 1, 1]), np.log(g4_1), 3)
def test_Lambda_g(self):
"""Tests the Lambda_g function
"""
self.setUp()
g4 = np.arange(1, 17).reshape((4, 2, 2))
f = .02 * np.arange(1, 25).reshape(3, 2, 4)
Lambda_g4 = self.E_func.Lambda_g(g4, f)
Lambda_g4_ = np.array([[0.45, 0.5], [1.01, 1.14]])
np.testing.assert_almost_equal(Lambda_g4[0, :, 0], Lambda_g4_)
def test_Eg(self):
"""Tests the expection of g functions
"""
self.setUp()
g4 = np.arange(1, 17).reshape((4, 2, 2))
f = .02 * np.arange(1, 25).reshape(3, 2, 4)
Lambda_1 = self.E_func.Lambda_g(np.ones(shape=(self.n_MC)), f)
pi_xi = 1 / (1 + np.exp(np.array([-3, -4, -6])))
Eg4 = self.E_func.Eg(g4, Lambda_1, pi_xi, f)
Eg4_ = np.array([7.792, 8.792])
np.testing.assert_almost_equal(Eg4[0, 0], Eg4_, 3)
if __name__ == "main":
unittest.main()
| [
"numpy.ones",
"numpy.log",
"lights.tests.testing_data.CreateTestingData",
"lights.model.e_step_functions.EstepFunctions",
"numpy.exp",
"numpy.array",
"numpy.testing.assert_almost_equal",
"unittest.main",
"numpy.arange"
] | [((2687, 2702), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2700, 2702), False, 'import unittest\n'), ((318, 337), 'lights.tests.testing_data.CreateTestingData', 'CreateTestingData', ([], {}), '()\n', (335, 337), False, 'from lights.tests.testing_data import CreateTestingData\n'), ((586, 643), 'lights.model.e_step_functions.EstepFunctions', 'EstepFunctions', (['data.T_u', 'L', 'alpha', 'asso_functions', 'theta'], {}), '(data.T_u, L, alpha, asso_functions, theta)\n', (600, 643), False, 'from lights.model.e_step_functions import EstepFunctions\n'), ((804, 975), 'numpy.array', 'np.array', (['[[1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3], [1, 2, 4, 0, 0, 0, 0, 0,\n 0, 0, 1, 4, 2, 2, 8 / 3], [1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3]\n ]'], {}), '([[1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3], [1, 2, 4, 0, 0,\n 0, 0, 0, 0, 0, 1, 4, 2, 2, 8 / 3], [1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 1, 4,\n 2, 2, 8 / 3]])\n', (812, 975), True, 'import numpy as np\n'), ((1029, 1200), 'numpy.array', 'np.array', (['[[1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9], [1, 3, 9, 0, 0, 0, 0, 0,\n 0, 0, 1, 6, 3, 9 / 2, 9], [1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9]\n ]'], {}), '([[1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9], [1, 3, 9, 0, 0,\n 0, 0, 0, 0, 0, 1, 6, 3, 9 / 2, 9], [1, 3, 9, 0, 0, 0, 0, 0, 0, 0, 1, 6,\n 3, 9 / 2, 9]])\n', (1037, 1200), True, 'import numpy as np\n'), ((2066, 2103), 'numpy.array', 'np.array', (['[[0.45, 0.5], [1.01, 1.14]]'], {}), '([[0.45, 0.5], [1.01, 1.14]])\n', (2074, 2103), True, 'import numpy as np\n'), ((2112, 2174), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Lambda_g4[0, :, 0]', 'Lambda_g4_'], {}), '(Lambda_g4[0, :, 0], Lambda_g4_)\n', (2142, 2174), True, 'import numpy as np\n'), ((2575, 2599), 'numpy.array', 'np.array', (['[7.792, 8.792]'], {}), '([7.792, 8.792])\n', (2583, 2599), True, 'import numpy as np\n'), ((2608, 2658), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Eg4[0, 0]', 'Eg4_', '(3)'], {}), '(Eg4[0, 0], Eg4_, 3)\n', (2638, 2658), True, 'import numpy as np\n'), ((1254, 1264), 'numpy.exp', 'np.exp', (['(49)'], {}), '(49)\n', (1260, 1264), True, 'import numpy as np\n'), ((1531, 1571), 'numpy.array', 'np.array', (['[-1.899, 0.961, -9.492, 4.563]'], {}), '([-1.899, 0.961, -9.492, 4.563])\n', (1539, 1571), True, 'import numpy as np\n'), ((1597, 1636), 'numpy.array', 'np.array', (['[0.89, 1.098, 11.164, -8.141]'], {}), '([0.89, 1.098, 11.164, -8.141])\n', (1605, 1636), True, 'import numpy as np\n'), ((1679, 1698), 'numpy.log', 'np.log', (['g4[:, 0, 0]'], {}), '(g4[:, 0, 0])\n', (1685, 1698), True, 'import numpy as np\n'), ((1700, 1712), 'numpy.log', 'np.log', (['g4_0'], {}), '(g4_0)\n', (1706, 1712), True, 'import numpy as np\n'), ((1756, 1775), 'numpy.log', 'np.log', (['g4[:, 1, 1]'], {}), '(g4[:, 1, 1])\n', (1762, 1775), True, 'import numpy as np\n'), ((1777, 1789), 'numpy.log', 'np.log', (['g4_1'], {}), '(g4_1)\n', (1783, 1789), True, 'import numpy as np\n'), ((2419, 2443), 'numpy.ones', 'np.ones', ([], {'shape': 'self.n_MC'}), '(shape=self.n_MC)\n', (2426, 2443), True, 'import numpy as np\n'), ((1909, 1925), 'numpy.arange', 'np.arange', (['(1)', '(17)'], {}), '(1, 17)\n', (1918, 1925), True, 'import numpy as np\n'), ((2291, 2307), 'numpy.arange', 'np.arange', (['(1)', '(17)'], {}), '(1, 17)\n', (2300, 2307), True, 'import numpy as np\n'), ((1963, 1979), 'numpy.arange', 'np.arange', (['(1)', '(25)'], {}), '(1, 25)\n', (1972, 1979), True, 'import numpy as np\n'), ((2345, 2361), 'numpy.arange', 'np.arange', (['(1)', '(25)'], {}), '(1, 25)\n', (2354, 2361), True, 'import numpy as np\n'), ((2482, 2504), 'numpy.array', 'np.array', (['[-3, -4, -6]'], {}), '([-3, -4, -6])\n', (2490, 2504), True, 'import numpy as np\n')] |
# Others
import math
import time
import random
import pickle
import statistics
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from collections import Counter
from scipy.spatial import distance
# Pytorch
from torch.utils.data import DataLoader
# Sklearn
from sklearn import tree
from sklearn.svm import SVC
from sklearn.base import clone
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import euclidean_distances
def encode_categorical(column, gamma):
"""
Encodig of a categorical column: One-hot-encoding plus uniform noise
Args:
column (numpy.array)
1D dataframe containing labels
gamma (int):
Max value for uniform noise for categorical variables
Return:
ohe_noisy (numpy.array)
1D containing transformed data
ohe (OheHotEndocer object):
scaler object for ohe-hot-encoding the categorical features
list_label (list)
Array/list with the sorted label values
"""
list_label = np.unique(column)
ohe = OneHotEncoder(sparse=False)
one_cat = ohe.fit_transform(column)
# adds uniform noise to the columns
noise = np.random.uniform(0, gamma, one_cat.shape)
ohe_noisy = (one_cat + noise) / np.sum(one_cat + noise, keepdims=True, axis=1)
return ohe_noisy, ohe, list_label
def data_transform(dataframe, cat_cols=None, scaling='2minmax', gamma=0.3):
"""
Prepares, shuffles, and arrange data into batches. Assumes label is the last column
Categorical columns are encoded using a scikit-learn OneHotEncoder and added some uniform noise.
Continous columns are scale using the 'scaling' argument
Args:
dataframe (pandas.DataFrame):
Source dataframe with both features and labels
cat_cols (list):
List of categorical features indices
scaling (string):
Type of scaling, if can either be 'minmax', 'standard', or '2minmax'
gamma (int):
Max value for uniform noise for categorical variables
Return:
X_train (numpy.ndarray):
Numpy array of features with categorical features already one-hot encoded with noise
scaler (scaler object):
Scaler object used for continous columns
ohe_cat (OneHotEncoder object):
OnehotEncoder object for categorical variables
ohe_label (OneHotEncoder object):
OnehotEncoder object for label column
list_label (list):
List of all labels values
num_cont (int):
number of continuous columns
"""
columns = dataframe.columns.values
dataframe_copy = dataframe.copy()
# remove categorical columns and label
for cat_col in cat_cols:
dataframe_copy.drop(dataframe_copy.columns[cat_col], axis=1, inplace=True)
dataframe_copy.drop(dataframe_copy.columns[-1], axis=1, inplace=True)
num_cont = dataframe_copy.shape[1]
# continous columns scaling
X = np.array(dataframe_copy)
minus_one_one = False
if scaling == 'minmax':
scaler = MinMaxScaler() # bounded in [0, 1]
elif scaling == '2minmax':
scaler = MinMaxScaler() # bounded in [0, 1] and later to [-1, 1]
minus_one_one = True
elif scaling == 'standard':
scaler = StandardScaler() # zero mean unit variance
else:
print("Please type a valid supporterd scaling method: minmax or standard")
exit()
X_scaled = scaler.fit_transform(X)
if minus_one_one:
X_scaled = -1 + 2*X_scaled # bounded in [-1, 1]
# if there are categorical columns, do encoding
if cat_cols:
cat_columns = dataframe.iloc[:, cat_cols].to_numpy().reshape(-1, 1)
cat_encoded, ohe_cat, _ = encode_categorical(cat_columns, gamma)
# label encoding
labels = dataframe.iloc[:, -1].to_numpy().reshape(-1, 1)
labels_encoded, ohe_label, list_label = encode_categorical(labels, gamma)
# concatenate continuous + categorical + label
if cat_cols:
X_encoded = np.concatenate((X_scaled, cat_encoded, labels_encoded), axis=1)
else:
X_encoded = np.concatenate((X_scaled, labels_encoded), axis=1)
ohe_cat = None
return X_encoded, scaler, ohe_cat, ohe_label, list_label, num_cont
DEFAULT_K = 10 # default number of folds
def train_test_split_holistics(dataframe, list_complete_participants, train_test_split=0.7, user_split=False):
"""
Prepare a dataframe and split it into train_test_split. Return both sets.
It's assumed the dataframe does have the participant_no feature
"""
df = dataframe.copy()
if user_split:
random.seed(75)
random.shuffle(list_complete_participants)
random.seed(75)
test_participants = random.sample(set(list_complete_participants),
int(round((1 - train_test_split) * len(list_complete_participants))))
print("Num participants in test set: {}".format(len(test_participants)))
# only pick the train_test_split% of the complete participants for testing
df_test = df[df['Participant_No'].isin(test_participants)]
print("Testing on participants:")
print(df_test['Participant_No'].unique())
# use the rest for training (the negate of above)
df_train = df[~df['Participant_No'].isin(test_participants)]
else:
# shuffle
df = df.sample(frac=1, random_state=100).reset_index(drop=True)
# determine split
idx_split = int(df.shape[0] * train_test_split)
# split the dataframe
df_train = df.iloc[:idx_split, :]
df_test = df.iloc[idx_split:, :]
# removing the participant number since it's a holistic model
del df_test['Participant_No']
del df_train['Participant_No']
# shuffle
df_train = df_train.sample(frac=1, random_state=100).reset_index(drop=True)
df_test = df_test.sample(frac=1, random_state=100).reset_index(drop=True)
# create binary label versions of the sets
df_train_binary = df_train.copy()
df_test_binary = df_test.copy()
df_train_binary['Discrete Thermal Comfort_TA'] = df_train['Discrete Thermal Comfort_TA'].map(lambda x: 1 if x != 0 else 0)
df_test_binary['Discrete Thermal Comfort_TA'] = df_test['Discrete Thermal Comfort_TA'].map(lambda x: 1 if x != 0 else 0)
return df_train, df_test, df_train_binary, df_test_binary
def choose_k(train_labels):
"""
Determine number of folds
"""
DEFAULT_K = 10
class_counter = Counter(train_labels)
num_least_common_class = min(class_counter.values())
return min(num_least_common_class, DEFAULT_K)
def find_model_param(train_vectors, train_labels, trainclf, parameters, scorer, useSampleWeight=False, log=False):
"""
Choose the best combination of parameters for a given model
"""
k = choose_k(train_labels) # get number of folds
stratifiedKFold = StratifiedKFold(n_splits = k)
if useSampleWeight:
n_samples = len(train_labels)
n_classes = len(set(train_labels))
classCounter = Counter(train_labels)
sampleWeights = [n_samples / (n_classes * classCounter[label]) for label in train_labels]
chosen_cv = stratifiedKFold
gridSearch = GridSearchCV(trainclf, parameters, cv = chosen_cv, scoring = scorer, fit_params = {'sample_weight' : sampleWeights})
else:
chosen_cv = stratifiedKFold
gridSearch = GridSearchCV(trainclf, parameters, cv = chosen_cv, scoring = scorer)
gridSearch.fit(train_vectors, train_labels)
if log:
print("Number of folds: " + str(k))
print("Best parameters set found on development set:")
print(gridSearch.best_params_)
return gridSearch.best_estimator_
def train_nb(dataframe, test_size_percentage=0.2, log=False):
"""
Breakdown the dataframe into X and y arrays. Later split them in train and test set. Train the model with CV
and report the accuracy
"""
DEFAULT_K = 10
# create design matrix X and target vector y
X = np.array(dataframe.iloc[:, 0:dataframe.shape[1] - 1]) # minus 1 for the comfort label
y = np.array(dataframe.iloc[:, -1])
scaler = StandardScaler()
scaled_X = scaler.fit_transform(X)
# split into train and test
# X_train = train + cv set
# X_test = test set
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size = test_size_percentage, random_state = 100, stratify = y)
# instantiate learning model
nb_classifier = GaussianNB()
# k-fold cross validation
scores = cross_val_score(nb_classifier, X_train, y_train, cv = DEFAULT_K, scoring = 'accuracy') # accuracy here is f1 micro
# fitting the model
nb_classifier.fit(X_train, y_train)
# predict the response
y_pred = nb_classifier.predict(X_test)
# Metrics
nb_acc = clf_metrics(y_test, y_pred, log)
if log:
print("Features: {}".format(dataframe.columns.values[:-1])) # minus 1 for the comfort label
print("Expected accuracy (f1 micro) based on Cross-Validation: ", scores.mean())
print(nb_classifier)
return nb_acc, nb_classifier
def train_knn(dataframe, test_size_percentage=0.2, tuning=False, log=False):
"""
Breakdown the dataframe into X and y arrays. Later split them in train and test set. Train the model with CV
and report the accuracy
"""
# create design matrix X and target vector y
X = np.array(dataframe.iloc[:, 0:dataframe.shape[1] - 1]) # minus 1 for the comfort label
y = np.array(dataframe.iloc[:, -1])
# split into train and test
# X_train = train + cv set (train_vectors)
# X_test = test set (test_vectors)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size_percentage, random_state = 100, stratify = y)
# from occutherm:
# k-NN models had for FS1: brute-force search as algorithm, standard Euclidean distance as metric and K = 14;
# for FS2: K changed to 5; for
# FS3: K changed to 13;
# for FS4: K changed to 4; and
# for FS5 K changed to 15
parameters = {'n_neighbors' : [4, 5, 13, 14, 15], # [3, 5, 7, 9, 10, 11, 12, 13, 14, 15],
'weights' : ['uniform', 'distance'],
'metric' : ['seuclidean'],
'algorithm' : ['brute']}
scorer = 'f1_micro'
clf = KNeighborsClassifier(n_neighbors = 3, weights = 'uniform', metric = 'seuclidean', algorithm = 'brute')
if tuning:
knn_classifier = find_model_param(X_train, y_train, clf, parameters, scorer)
else:
knn_classifier = clone(clf)
# fitting the model
knn_classifier.fit(X_train, y_train)
# predict the response
y_pred = knn_classifier.predict(X_test)
# evaluate accuracyt
knn_acc = clf_metrics(y_test, y_pred, log)
if log:
print("Features: {}".format(dataframe.columns.values[:-1])) # minus 1 for the comfort label
print(knn_classifier)
return knn_acc, knn_classifier
def train_svm(dataframe, test_size_percentage=0.2, tuning=False, log=False):
"""
Breakdown the dataframe into X and y arrays. Later split them in train and test set. Train the model with CV
and report the accuracy
"""
# create design matrix X and target vector y
X = np.array(dataframe.iloc[:, 0:dataframe.shape[1] - 1]) # minus 1 for the comfort label
y = np.array(dataframe.iloc[:, -1])
scaler = StandardScaler()
scaled_X = scaler.fit_transform(X)
# split into train and test
# X_train = train + cv set (train_vectors)
# X_test = test set (test_vectors)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size = test_size_percentage, random_state = 100, stratify = y)
# from occutherm:
# SVM models had for all first four FS: C = 1000, balanced class weight, gamma of 0.1, radial basis function kernel, and one-versus-all decision function shape, with the exception that C = 1 and gamma of 0.001 for FS5
parameters = {'C' : [1, 1000],
'kernel' : ['rbf'],
'gamma' : [0.1, 0.01],
'class_weight' : ['balanced']}
# parameters = [{'C' : [1, 10, 100, 1000],
# 'kernel' : ['linear'],
# 'class_weight' : ['balanced']},
# {'C' : [1, 10, 100, 1000],
# 'kernel' : ['rbf'],
# 'gamma' : [0.1, 0.01, 0.001, 0.0001],
# 'class_weight' : ['balanced']}]
clf = SVC(C = 1, kernel = 'linear', class_weight = None, random_state = 100)
scorer = 'f1_micro'
if tuning:
svm_classifier = find_model_param(X_train, y_train, clf, parameters, scorer)
else:
svm_classifier = clone(clf)
# fitting the model
svm_classifier.fit(X_train, y_train)
# predict the response
y_pred = svm_classifier.predict(X_test)
# evaluate accuracy
svm_acc = clf_metrics(y_test, y_pred, log)
if log:
print("Features: {}".format(dataframe.columns.values[:-1])) # minus 1 for the comfort label
print(svm_classifier)
return svm_acc, svm_classifier
def train_rdf(dataframe, rdf_depth=None, depth_file_name='default', test_size_percentage=0.2, tuning=False, log=False):
"""
Breakdown the dataframe into X and y arrays. Later split them in train and test set. Train the model with CV
and then find the optimal tree depth and report the accuracy
"""
# create design matrix X and target vector y
X = np.array(dataframe.iloc[:, 0:dataframe.shape[1] - 1]) # minus 1 for the comfort label
y = np.array(dataframe.iloc[:, -1])
# split into train and test CV
# X_train = train + cv set (train_vectors)
# X_test = test set (test_vectors)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size_percentage, random_state = 100, stratify = y)
# from occutherm:
# FS1: Balanced class weights, Gini Index criterion, 2 minimum sample split, 100 estimators
# FS2: changed to 1000 estimators;
# FS3: changed to entropy criterion, and 100 estimators;
# FS4: changed to balanced subsamples, 100 estimators;
# FS5: changed to 1000 estimators, Gini criterion
parameters = {'n_estimators' : [100], #[10, 100, 1000],
'criterion' : ['gini'], # ['entropy', 'gini'],
'min_samples_split' : [2], # [2, 10, 20, 30],
'class_weight' : ['balanced']} # ['balanced', 'balanced_subsample']}
clf = RandomForestClassifier(n_estimators=100, criterion='gini', min_samples_split=2, class_weight='balanced') #random_state = 100)
scorer = 'f1_micro'
if tuning:
rdf_classifier = find_model_param(X_train, y_train, clf, parameters, scorer)
else:
# RDF is fixed for all models, uncomment if a tuned model is needed
rdf_classifier = clone(clf)
if rdf_depth is None:
# find optimal depth and generate model
optimal_depth = optimal_tree_depth(rdf_classifier, X_train, y_train, depth_file_name)
# generate the model with the selected paramters plus the optimal depth and do the model fitting
rdf_optimal = rdf_classifier.set_params(max_depth = optimal_depth)
else:
# this statement will be executed when the user inputs a number as the depth based on elbow method plot
rdf_optimal = rdf_classifier.set_params(max_depth = rdf_depth)
# fitting the model
rdf_optimal.fit(X_train, y_train)
# predict the response
y_pred = rdf_optimal.predict(X_test)
# evaluate accuracy
rdf_acc, _ = clf_metrics(y_test, y_pred, log)
if log:
print("Features: {}".format(dataframe.columns.values[:-1])) # minus 1 for the comfort label
print(rdf_optimal)
return rdf_acc, rdf_optimal
def optimal_tree_depth(clf, train_vectors, train_labels, file_name):
"""
Choose the optimal depth of a tree model
"""
DEFAULT_K = 10
# generate a list of potential depths to calculate the optimal
depths = list(range(1, 20))
# empty list that will hold cv scores
cv_scores = []
print("Finding optimal tree depth")
# find optimal tree depth
for d in depths:
clf_depth = clf.set_params(max_depth = d) # use previous parameters while changing depth
scores = cross_val_score(clf_depth, train_vectors,
train_labels, cv = choose_k(train_labels),
scoring = 'accuracy') # accuracy here is f1 micro
cv_scores.append(scores.mean())
# changing to misclassification error and determining best depth
MSE = [1 - x for x in cv_scores] # MSE = 1 - f1_micro
optimal_depth = depths[MSE.index(min(MSE))]
print("The optimal depth is: {}".format(optimal_depth))
print("Expected accuracy (f1 micro) based on Cross-Validation: {}".format(cv_scores[depths.index(optimal_depth)]))
# plot misclassification error vs depths
fig = plt.figure(figsize=(12, 10))
plt.plot(depths, MSE)
plt.xlabel('Tree Depth', fontsize = 20)
plt.ylabel('Misclassification Error', fontsize = 20)
plt.legend(fontsize = 15)
plt.savefig("depth_tree-" + file_name + ".png")
# plt.show()
return optimal_depth
def test_clf(df_test, clf_optimal, log=False):
# last column is the thermal comfort label
X_test = np.array(df_test.iloc[:, 0:df_test.shape[1] - 1])
y_test = np.array(df_test.iloc[:,-1])
#predict the response on test set
y_pred = clf_optimal.predict(X_test)
# get metrics
acc, class_report = clf_metrics(y_test, y_pred, log)
return acc, class_report, y_pred
def clf_metrics(test_labels, pred_labels, log=False):
"""
Compute different validation metrics for a classification problem.
Metrics:
- micro and macro F1 score
- Confusion Matrix
- Classification Report
"""
acc = accuracy_score(test_labels, pred_labels)
class_report = classification_report(test_labels, pred_labels, output_dict=True, zero_division=0)
if log:
print("Accuracy (f1 micro) on test set: ", acc)
print("F1 micro on test set: ", f1_score(test_labels, pred_labels, average = 'micro'))
print("F1 macro on test set: ", f1_score(test_labels, pred_labels, average = 'macro'))
print("Confusion Matrix: ")
print(confusion_matrix(test_labels, pred_labels))
print("Classification Metrics: ")
print(classification_report(test_labels, pred_labels, zero_division=0))
return acc, class_report
def evaluation_accuracy(df_synth, dataset_string="occutherm"):
"""
Source:
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
BAGAN: Data Augmentation with Balancing GAN, 1–9. Retrieved from http://arxiv.org/abs/1803.09655
To verify that the generated samples are representative of the
original dataset, we classify them by a model trained on the original
dataset and we verify if the predicted class (model output) match
the target ones (grount truth synthetic y).
For ease of calculations, the number of samples per class on df_synth
is determined by the highest numnber of instances among all classes
on df_test
Models: Naive-Bayes, K-Nearest Neighbours, Support Vector Machine, based on:
<NAME>., <NAME>., <NAME>, & <NAME>. (2019).
OccuTherm : Occupant Thermal Comfort Inference using Body Shape Information.
In BuildSys ’19 Proceedings of the 6th ACM International Conference on Systems
for Energy-Efficient Built Environments]. New York, NY, USA. https://doi.org/10.1145/3360322.3360858
"""
# load models trained on real data and their train accuracy
nb_optimal = pickle.load(open( "models/" + dataset_string + "_nb_reall_full.pkl", "rb" ))
acc_train_nb = pickle.load(open( "metrics/" + dataset_string + "_nb_reall_full_acc.pkl", "rb" ))
knn_optimal = pickle.load(open( "models/" + dataset_string + "_knn_reall_full.pkl", "rb" ))
acc_train_knn = pickle.load(open( "metrics/" + dataset_string + "_knn_reall_full_acc.pkl", "rb" ))
svm_optimal = pickle.load(open( "models/" + dataset_string + "_svm_reall_full.pkl", "rb" ))
acc_train_svm = pickle.load(open( "metrics/" + dataset_string + "_svm_reall_full_acc.pkl", "rb" ))
rdf_optimal = pickle.load(open( "models/" + dataset_string + "_rdf_reall_full.pkl", "rb" ))
acc_train_rdf = pickle.load(open( "metrics/" + dataset_string + "_rdf_reall_full_acc.pkl", "rb" ))
# using lodead models, test on synthetic data
acc_test_nb, _, _ = test_clf(df_synth, nb_optimal)
acc_test_knn, _, _ = test_clf(df_synth, knn_optimal)
acc_test_svm, _, _ = test_clf(df_synth, svm_optimal)
acc_test_rdf, _, _ = test_clf(df_synth, rdf_optimal)
return [acc_test_nb, acc_test_knn, acc_test_svm, acc_test_rdf], [acc_train_nb, acc_train_knn, acc_train_svm, acc_train_rdf], [nb_optimal, knn_optimal, svm_optimal, rdf_optimal]
def evaluation_variability(df, max_k=30):
"""
Source: <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
BAGAN: Data Augmentation with Balancing GAN, 1–9. Retrieved from http://arxiv.org/abs/1803.09655
For each class, randomly sample two instances and calculate the euclidean distance between them.
Repeat the process k times and average the resullts across al k * c samples.
The baseline value is determined by sampling from the original dataset.
The higher the value thebetter, and also the closer to the baseline.
"""
distances = []
all_classes = df.iloc[:,-1].unique()
# for each class sample 2 instances randomly for k times
for c in all_classes:
df_c = df[df.iloc[:, -1] == c]
k = 0
# print('Thermal Comfort: {}'.format(c))
while k < max_k:
rows = df_c.sample(2)
euclidean_distance = distance.euclidean(rows.iloc[0, :].values, rows.iloc[1, :].values) # returns an array
######## DEBUG
# print(rows.iloc[0, :])
# print(rows.iloc[1, :])
# print(euclidean_distance)
########
# save value
distances.append(euclidean_distance)
k += 1
avg_distances = statistics.mean(distances)
return avg_distances
def evaluation_diversity(df_source, df_target, baseline=False, max_k=30):
"""
Source: <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
BAGAN: Data Augmentation with Balancing GAN, 1–9. Retrieved from http://arxiv.org/abs/1803.09655
For df_source randomly sample one instance and find the euclidean distance to the
closest datapoint from df_target.
Repeat the process k times and average the results.
The reference value is determined by doing this with df_source and df_target being the original
train set.
The closer these values are, the better: it means there is no overfitting.
"""
k = 0
min_distances = []
while k < max_k:
curr_row = df_source.sample()
distances = euclidean_distances(curr_row, df_target) # returns an array
if baseline:
# when the source and target datasets are the same (baseline scenario)
# curr_row is also in df_target, therefore there will be one diff
# that is 0: the distance to that same datapount (curr_row).
# thus we look for the 2nd smallet distance
min_dist = second_smallest(distances[0, :])
else:
min_dist = np.amin(distances[0, :])
######## DEBUG
# pd.DataFrame(distances.T).to_csv("test-files/dist_diver.csv", mode="a")
# curr_row.to_csv("test-files/curr_row_diver.csv", mode='a')
# df_target.to_csv("test-files/df_target.csv")
# print(distances)
# print(min_dist)
# print(second_min_dist)
# return
########
# save value
min_distances.append(min_dist)
k += 1
avg_min_dist = statistics.mean(min_distances)
return avg_min_dist
def second_smallest(numbers):
"""
Find second smallest number on a list
"""
m1, m2 = float('inf'), float('inf')
for x in numbers:
if x <= m1:
m1, m2 = x, m1
elif x < m2:
m2 = x
return m2
def evaluation_classification(df_train, df_test, rdf_depth=None, depth_file_name='default', test_size_percentage=0.2):
"""
Compute the accuracy (f1-micro score) for multiple classification models based on datasets with
synthetic and real samples
Baseline accuracy: classifier trained on imbalanced set
Models: Naive-Bayes, K-Nearest Neighbours, Support Vector Machine (based on Occutherm)
"""
# train models
acc_train_nb, nb_optimal = train_nb(df_train, test_size_percentage)
acc_train_knn, knn_optimal = train_knn(df_train, test_size_percentage)
acc_train_svm, svm_optimal = train_svm(df_train, test_size_percentage)
acc_train_rdf, rdf_optimal = train_rdf(df_train, rdf_depth, depth_file_name, test_size_percentage)
# using the optimal model, test on test split
acc_test_nb, _, _ = test_clf(df_test, nb_optimal)
acc_test_knn, _, _ = test_clf(df_test, knn_optimal)
acc_test_svm, _, _ = test_clf(df_test, svm_optimal)
acc_test_rdf, class_report_rdf, _ = test_clf(df_test, rdf_optimal)
return [acc_test_nb, acc_test_knn, acc_test_svm, acc_test_rdf], [acc_train_nb, acc_train_knn, acc_train_svm, acc_train_rdf], [nb_optimal, knn_optimal, svm_optimal, rdf_optimal], class_report_rdf
def print_network(nn):
num_params = 0
for param in nn.parameters():
num_params += param.numel()
print(nn)
print('Total number of parameters: %d' % num_params)
return
def save_pickle(variable, filename):
with open(filename, 'wb') as f:
pickle.dump(variable, f)
| [
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.prepr... | [((2018, 2035), 'numpy.unique', 'np.unique', (['column'], {}), '(column)\n', (2027, 2035), True, 'import numpy as np\n'), ((2047, 2074), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (2060, 2074), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2167, 2209), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'gamma', 'one_cat.shape'], {}), '(0, gamma, one_cat.shape)\n', (2184, 2209), True, 'import numpy as np\n'), ((4019, 4043), 'numpy.array', 'np.array', (['dataframe_copy'], {}), '(dataframe_copy)\n', (4027, 4043), True, 'import numpy as np\n'), ((7616, 7637), 'collections.Counter', 'Counter', (['train_labels'], {}), '(train_labels)\n', (7623, 7637), False, 'from collections import Counter\n'), ((8022, 8049), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k'}), '(n_splits=k)\n', (8037, 8049), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((9202, 9255), 'numpy.array', 'np.array', (['dataframe.iloc[:, 0:dataframe.shape[1] - 1]'], {}), '(dataframe.iloc[:, 0:dataframe.shape[1] - 1])\n', (9210, 9255), True, 'import numpy as np\n'), ((9296, 9327), 'numpy.array', 'np.array', (['dataframe.iloc[:, -1]'], {}), '(dataframe.iloc[:, -1])\n', (9304, 9327), True, 'import numpy as np\n'), ((9342, 9358), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9356, 9358), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9526, 9622), 'sklearn.model_selection.train_test_split', 'train_test_split', (['scaled_X', 'y'], {'test_size': 'test_size_percentage', 'random_state': '(100)', 'stratify': 'y'}), '(scaled_X, y, test_size=test_size_percentage, random_state=\n 100, stratify=y)\n', (9542, 9622), False, 'from sklearn.model_selection import train_test_split\n'), ((9678, 9690), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (9688, 9690), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((9735, 9822), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['nb_classifier', 'X_train', 'y_train'], {'cv': 'DEFAULT_K', 'scoring': '"""accuracy"""'}), "(nb_classifier, X_train, y_train, cv=DEFAULT_K, scoring=\n 'accuracy')\n", (9750, 9822), False, 'from sklearn.model_selection import cross_val_score\n'), ((10626, 10679), 'numpy.array', 'np.array', (['dataframe.iloc[:, 0:dataframe.shape[1] - 1]'], {}), '(dataframe.iloc[:, 0:dataframe.shape[1] - 1])\n', (10634, 10679), True, 'import numpy as np\n'), ((10720, 10751), 'numpy.array', 'np.array', (['dataframe.iloc[:, -1]'], {}), '(dataframe.iloc[:, -1])\n', (10728, 10751), True, 'import numpy as np\n'), ((10914, 11002), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size_percentage', 'random_state': '(100)', 'stratify': 'y'}), '(X, y, test_size=test_size_percentage, random_state=100,\n stratify=y)\n', (10930, 11002), False, 'from sklearn.model_selection import train_test_split\n'), ((11548, 11646), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(3)', 'weights': '"""uniform"""', 'metric': '"""seuclidean"""', 'algorithm': '"""brute"""'}), "(n_neighbors=3, weights='uniform', metric='seuclidean',\n algorithm='brute')\n", (11568, 11646), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((12494, 12547), 'numpy.array', 'np.array', (['dataframe.iloc[:, 0:dataframe.shape[1] - 1]'], {}), '(dataframe.iloc[:, 0:dataframe.shape[1] - 1])\n', (12502, 12547), True, 'import numpy as np\n'), ((12588, 12619), 'numpy.array', 'np.array', (['dataframe.iloc[:, -1]'], {}), '(dataframe.iloc[:, -1])\n', (12596, 12619), True, 'import numpy as np\n'), ((12634, 12650), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12648, 12650), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12849, 12945), 'sklearn.model_selection.train_test_split', 'train_test_split', (['scaled_X', 'y'], {'test_size': 'test_size_percentage', 'random_state': '(100)', 'stratify': 'y'}), '(scaled_X, y, test_size=test_size_percentage, random_state=\n 100, stratify=y)\n', (12865, 12945), False, 'from sklearn.model_selection import train_test_split\n'), ((13723, 13785), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1)', 'kernel': '"""linear"""', 'class_weight': 'None', 'random_state': '(100)'}), "(C=1, kernel='linear', class_weight=None, random_state=100)\n", (13726, 13785), False, 'from sklearn.svm import SVC\n'), ((14752, 14805), 'numpy.array', 'np.array', (['dataframe.iloc[:, 0:dataframe.shape[1] - 1]'], {}), '(dataframe.iloc[:, 0:dataframe.shape[1] - 1])\n', (14760, 14805), True, 'import numpy as np\n'), ((14846, 14877), 'numpy.array', 'np.array', (['dataframe.iloc[:, -1]'], {}), '(dataframe.iloc[:, -1])\n', (14854, 14877), True, 'import numpy as np\n'), ((15048, 15136), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size_percentage', 'random_state': '(100)', 'stratify': 'y'}), '(X, y, test_size=test_size_percentage, random_state=100,\n stratify=y)\n', (15064, 15136), False, 'from sklearn.model_selection import train_test_split\n'), ((15774, 15882), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'criterion': '"""gini"""', 'min_samples_split': '(2)', 'class_weight': '"""balanced"""'}), "(n_estimators=100, criterion='gini',\n min_samples_split=2, class_weight='balanced')\n", (15796, 15882), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((18288, 18316), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (18298, 18316), True, 'import matplotlib.pyplot as plt\n'), ((18321, 18342), 'matplotlib.pyplot.plot', 'plt.plot', (['depths', 'MSE'], {}), '(depths, MSE)\n', (18329, 18342), True, 'import matplotlib.pyplot as plt\n'), ((18347, 18384), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tree Depth"""'], {'fontsize': '(20)'}), "('Tree Depth', fontsize=20)\n", (18357, 18384), True, 'import matplotlib.pyplot as plt\n'), ((18391, 18441), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Misclassification Error"""'], {'fontsize': '(20)'}), "('Misclassification Error', fontsize=20)\n", (18401, 18441), True, 'import matplotlib.pyplot as plt\n'), ((18448, 18471), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (18458, 18471), True, 'import matplotlib.pyplot as plt\n'), ((18478, 18525), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('depth_tree-' + file_name + '.png')"], {}), "('depth_tree-' + file_name + '.png')\n", (18489, 18525), True, 'import matplotlib.pyplot as plt\n'), ((18677, 18726), 'numpy.array', 'np.array', (['df_test.iloc[:, 0:df_test.shape[1] - 1]'], {}), '(df_test.iloc[:, 0:df_test.shape[1] - 1])\n', (18685, 18726), True, 'import numpy as np\n'), ((18740, 18769), 'numpy.array', 'np.array', (['df_test.iloc[:, -1]'], {}), '(df_test.iloc[:, -1])\n', (18748, 18769), True, 'import numpy as np\n'), ((19218, 19258), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'pred_labels'], {}), '(test_labels, pred_labels)\n', (19232, 19258), False, 'from sklearn.metrics import accuracy_score\n'), ((19278, 19364), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels'], {'output_dict': '(True)', 'zero_division': '(0)'}), '(test_labels, pred_labels, output_dict=True,\n zero_division=0)\n', (19299, 19364), False, 'from sklearn.metrics import classification_report\n'), ((23578, 23604), 'statistics.mean', 'statistics.mean', (['distances'], {}), '(distances)\n', (23593, 23604), False, 'import statistics\n'), ((25344, 25374), 'statistics.mean', 'statistics.mean', (['min_distances'], {}), '(min_distances)\n', (25359, 25374), False, 'import statistics\n'), ((2246, 2292), 'numpy.sum', 'np.sum', (['(one_cat + noise)'], {'keepdims': '(True)', 'axis': '(1)'}), '(one_cat + noise, keepdims=True, axis=1)\n', (2252, 2292), True, 'import numpy as np\n'), ((4115, 4129), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4127, 4129), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5088, 5151), 'numpy.concatenate', 'np.concatenate', (['(X_scaled, cat_encoded, labels_encoded)'], {'axis': '(1)'}), '((X_scaled, cat_encoded, labels_encoded), axis=1)\n', (5102, 5151), True, 'import numpy as np\n'), ((5182, 5232), 'numpy.concatenate', 'np.concatenate', (['(X_scaled, labels_encoded)'], {'axis': '(1)'}), '((X_scaled, labels_encoded), axis=1)\n', (5196, 5232), True, 'import numpy as np\n'), ((5715, 5730), 'random.seed', 'random.seed', (['(75)'], {}), '(75)\n', (5726, 5730), False, 'import random\n'), ((5739, 5781), 'random.shuffle', 'random.shuffle', (['list_complete_participants'], {}), '(list_complete_participants)\n', (5753, 5781), False, 'import random\n'), ((5790, 5805), 'random.seed', 'random.seed', (['(75)'], {}), '(75)\n', (5801, 5805), False, 'import random\n'), ((8180, 8201), 'collections.Counter', 'Counter', (['train_labels'], {}), '(train_labels)\n', (8187, 8201), False, 'from collections import Counter\n'), ((8375, 8489), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['trainclf', 'parameters'], {'cv': 'chosen_cv', 'scoring': 'scorer', 'fit_params': "{'sample_weight': sampleWeights}"}), "(trainclf, parameters, cv=chosen_cv, scoring=scorer, fit_params\n ={'sample_weight': sampleWeights})\n", (8387, 8489), False, 'from sklearn.model_selection import GridSearchCV\n'), ((8568, 8632), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['trainclf', 'parameters'], {'cv': 'chosen_cv', 'scoring': 'scorer'}), '(trainclf, parameters, cv=chosen_cv, scoring=scorer)\n', (8580, 8632), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11791, 11801), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (11796, 11801), False, 'from sklearn.base import clone\n'), ((13958, 13968), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (13963, 13968), False, 'from sklearn.base import clone\n'), ((16140, 16150), 'sklearn.base.clone', 'clone', (['clf'], {}), '(clf)\n', (16145, 16150), False, 'from sklearn.base import clone\n'), ((24396, 24436), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['curr_row', 'df_target'], {}), '(curr_row, df_target)\n', (24415, 24436), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((27187, 27211), 'pickle.dump', 'pickle.dump', (['variable', 'f'], {}), '(variable, f)\n', (27198, 27211), False, 'import pickle\n'), ((4198, 4212), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4210, 4212), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((19469, 19520), 'sklearn.metrics.f1_score', 'f1_score', (['test_labels', 'pred_labels'], {'average': '"""micro"""'}), "(test_labels, pred_labels, average='micro')\n", (19477, 19520), False, 'from sklearn.metrics import f1_score\n'), ((19564, 19615), 'sklearn.metrics.f1_score', 'f1_score', (['test_labels', 'pred_labels'], {'average': '"""macro"""'}), "(test_labels, pred_labels, average='macro')\n", (19572, 19615), False, 'from sklearn.metrics import f1_score\n'), ((19669, 19711), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_labels', 'pred_labels'], {}), '(test_labels, pred_labels)\n', (19685, 19711), False, 'from sklearn.metrics import confusion_matrix\n'), ((19769, 19833), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels'], {'zero_division': '(0)'}), '(test_labels, pred_labels, zero_division=0)\n', (19790, 19833), False, 'from sklearn.metrics import classification_report\n'), ((23202, 23268), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['rows.iloc[0, :].values', 'rows.iloc[1, :].values'], {}), '(rows.iloc[0, :].values, rows.iloc[1, :].values)\n', (23220, 23268), False, 'from scipy.spatial import distance\n'), ((24871, 24895), 'numpy.amin', 'np.amin', (['distances[0, :]'], {}), '(distances[0, :])\n', (24878, 24895), True, 'import numpy as np\n'), ((4332, 4348), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4346, 4348), False, 'from sklearn.preprocessing import StandardScaler\n')] |
"""
Diagnostic Maps.
Diagnostic to produce images of a map with coastlines from a cube.
These plost show latitude vs longitude and the cube value is used as the colour
scale.
Note that this diagnostic assumes that the preprocessors do the bulk of the
hard work, and that the cube received by this diagnostic (via the settings.yml
and metadata.yml files) has no time component, a small number of depth layers,
and a latitude and longitude coordinates.
An approproate preprocessor for a 3D+time field would be:
preprocessors:
prep_map:
extract_levels:
levels: [100., ]
scheme: linear_extrap
time_average:
This tool is part of the ocean diagnostic tools package in the ESMValTool.
Author: <NAME> (PML)
<EMAIL>
"""
import logging
import os
import sys
from itertools import product
import cartopy
import iris
import iris.quickplot as qplt
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def create_ice_cmap(threshold):
"""Create colour map with ocean blue below 15% and white above 15%."""
threshold = threshold / 100.
ice_cmap_dict = {'red': ((0., 0.0313, 0.0313),
(threshold, 0.0313, 1.),
(1., 1., 1.)),
'green': ((0., 0.237, 0.237),
(threshold, 0.237, 1.),
(1., 1., 1.)),
'blue': ((0., 0.456, 0.456),
(threshold, 0.456, 1.),
(1., 1., 1.))}
return matplotlib.colors.LinearSegmentedColormap('ice_cmap', ice_cmap_dict)
def calculate_area_time_series(cube, plot_type, threshold):
"""
Calculate the area of unmasked cube cells.
Requires a cube with two spacial dimensions. (no depth coordinate).
Parameters
----------
cube: iris.cube.Cube
Original data
Returns
-------
iris.cube.Cube
collapsed cube, in units of m^2
"""
data = []
times = diagtools.cube_time_to_float(cube)
for time_itr, time in enumerate(times):
icedata = cube[time_itr].data
area = iris.analysis.cartography.area_weights(cube[time_itr])
if plot_type.lower() == 'ice extent':
# Ice extend is the area with more than 15% ice cover.
icedata = np.ma.masked_where(icedata < threshold, icedata)
total_area = np.ma.masked_where(icedata.mask, area.data).sum()
if plot_type.lower() == 'ice area':
# Ice area is cover * cell area
total_area = np.sum(icedata * area)
logger.debug('Calculating time series area: %s, %s, %s,',
time_itr, time, total_area)
data.append(total_area)
######
# Create a small dummy output array
data = np.array(data)
return times, data
def make_ts_plots(
cfg,
metadata,
filename,
):
"""
Make a ice extent time series plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# # Load threshold, pole, season.
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Making plots for each layer
for plot_type in ['Ice Extent', 'Ice Area']:
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
times, data = calculate_area_time_series(cube_layer,
plot_type,
threshold)
plt.plot(times, data)
# Add title to plot
title = ' '.join([metadata['dataset'], pole, 'hemisphere',
season, plot_type])
if layer:
title = ' '.join(
[title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'])
plt.title(title)
# y axis label:
plt.ylabel(' '.join([plot_type, 'm^2']))
# Determine image filename:
suffix = '_'.join(['ts', metadata['preprocessor'], season, pole,
plot_type, str(layer_index)])\
+ image_extention
suffix = suffix.replace(' ', '')
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def make_polar_map(
cube,
pole='North',
cmap='Blues_r',
):
"""
Make a polar map plot.
The cube is the opened cube (two dimensional),
pole is the polar region (North/South)
cmap is the colourmap,
"""
fig = plt.figure()
fig.set_size_inches(7, 7)
# ####
# Set limits, based on https://nedbatchelder.com/blog/200806/pylint.html
if pole not in ['North', 'South']:
logger.fatal('make_polar_map: hemisphere not provided.')
if pole == 'North': # North Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.NorthPolarStereo())
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.SouthPolarStereo())
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
linrange = np.linspace(0., 100., 21.)
qplt.contourf(cube,
linrange,
cmap=cmap,
linewidth=0,
rasterized=True)
plt.tight_layout()
ax1.add_feature(cartopy.feature.LAND,
zorder=10,
facecolor=[0.8, 0.8, 0.8], )
ax1.gridlines(linewidth=0.5,
color='black',
zorder=20,
alpha=0.5,
linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
return fig, ax1
def get_pole(cube):
"""Return a hemisphere name as a string (Either North or South)."""
margin = 5.
if np.max(cube.coord('latitude').points) < 0. + margin:
return 'South'
if np.min(cube.coord('latitude').points) > 0. - margin:
return 'North'
logger.fatal('get_pole: Not able to determine hemisphere.')
return False
def get_time_string(cube):
"""Return a climatological season string in the format: "year season"."""
season = cube.coord('clim_season').points
year = cube.coord('year').points
return str(int(year[0])) + ' ' + season[0].upper()
def get_year(cube):
"""Return the cube year as a string."""
year = cube.coord('year').points
return str(int(year))
def get_season(cube):
"""Return a climatological season time string."""
season = cube.coord('clim_season').points
return season[0].upper()
def make_map_plots(
cfg,
metadata,
filename,
):
"""
Make a simple map plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention and threshold.
image_extention = diagtools.get_image_format(cfg)
threshold = float(cfg['threshold'])
# Making plots for each layer
plot_types = ['Fractional cover', 'Ice Extent']
plot_times = [0, -1]
for plot_type, plot_time in product(plot_types, plot_times):
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
if plot_type == 'Fractional cover':
cmap = 'Blues_r'
if plot_type == 'Ice Extent':
cmap = create_ice_cmap(threshold)
cube = cube_layer[plot_time]
# use cube to determine which hemisphere, season and year.
pole = get_pole(cube)
time_str = get_time_string(cube)
# Make the polar map.
fig, ax1 = make_polar_map(cube,
pole=pole,
cmap=cmap)
# Add title to plot
title = ' '.join([metadata['dataset'], plot_type, time_str])
if layer:
title = ' '.join([title, '(', layer,
str(cube_layer.coords('depth')[0].units),
')'])
plt.title(title)
# Determine image filename:
suffix = '_'.join(['ortho_map', plot_type, time_str,
str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def agregate_by_season(cube):
"""
Aggregate the cube into seasonal means.
Note that it is not currently possible to do this in the preprocessor,
as the seasonal mean changes the cube units.
"""
if not cube.coords('clim_season'):
iris.coord_categorisation.add_season(cube,
'time',
name='clim_season')
if not cube.coords('season_year'):
iris.coord_categorisation.add_season_year(cube,
'time',
name='season_year')
return cube.aggregated_by(['clim_season', 'season_year'],
iris.analysis.MEAN)
def make_map_extent_plots(
cfg,
metadata,
filename,
):
"""
Make an extent map plot showing several times for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Load threshold, pole and season
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Start making figure
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
fig = plt.figure()
fig.set_size_inches(7, 7)
if pole == 'North': # North Hemisphere
projection = cartopy.crs.NorthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
projection = cartopy.crs.SouthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
ax1.add_feature(cartopy.feature.LAND,
zorder=10,
facecolor=[0.8, 0.8, 0.8])
ax1.gridlines(linewidth=0.5,
color='black',
zorder=20,
alpha=0.5,
linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
times = np.array(cube.coord('time').points.astype(float))
plot_desc = {}
for time_itr, time in enumerate(times):
cube = cube_layer[time_itr]
line_width = 1
color = plt.cm.jet(float(time_itr) / float(len(times)))
label = get_year(cube)
plot_desc[time] = {'label': label,
'c': [color, ],
'lw': [line_width, ],
'ls': ['-', ]}
layer = str(layer)
qplt.contour(cube,
[threshold, ],
colors=plot_desc[time]['c'],
linewidths=plot_desc[time]['lw'],
linestyles=plot_desc[time]['ls'],
rasterized=True)
# Add legend
legend_size = len(plot_desc.keys()) + 1
ncols = int(legend_size / 25) + 1
ax1.set_position([ax1.get_position().x0,
ax1.get_position().y0,
ax1.get_position().width * (1. - 0.1 * ncols),
ax1.get_position().height])
fig.set_size_inches(7 + ncols * 1.2, 7)
# Construct dummy plots.
for i in sorted(plot_desc.keys()):
plt.plot([], [],
c=plot_desc[i]['c'][0],
lw=plot_desc[i]['lw'][0],
ls=plot_desc[i]['ls'][0],
label=plot_desc[i]['label'],)
legd = ax1.legend(loc='center left',
ncol=ncols,
prop={'size': 10},
bbox_to_anchor=(1., 0.5))
legd.draw_frame(False)
legd.get_frame().set_alpha(0.)
# Add title to plot
title = ' '.join([metadata['dataset'], ])
if layer:
title = ' '.join([title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'])
plt.title(title)
# Determine image filename:
suffix = '_'.join(['ortho_map', pole, season, str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
"""
Load the config file, and send it to the plot maker.
The cfg is the opened global config.
"""
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename,
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas.keys()):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
######
# extent maps plots of individual models
make_map_extent_plots(cfg, metadatas[filename], filename)
######
# maps plots of individual models
make_map_plots(cfg, metadatas[filename], filename)
######
# time series plots o
make_ts_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
| [
"logging.getLogger",
"logging.StreamHandler",
"diagnostic_tools.folder",
"numpy.array",
"esmvaltool.diag_scripts.shared.run_diagnostic",
"diagnostic_tools.get_image_format",
"cartopy.crs.NorthPolarStereo",
"iris.quickplot.contour",
"diagnostic_tools.bgc_units",
"itertools.product",
"matplotlib.p... | [((1109, 1135), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1125, 1135), False, 'import os\n'), ((1168, 1201), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1189, 1201), False, 'import logging\n'), ((1810, 1878), 'matplotlib.colors.LinearSegmentedColormap', 'matplotlib.colors.LinearSegmentedColormap', (['"""ice_cmap"""', 'ice_cmap_dict'], {}), "('ice_cmap', ice_cmap_dict)\n", (1851, 1878), False, 'import matplotlib\n'), ((2281, 2315), 'diagnostic_tools.cube_time_to_float', 'diagtools.cube_time_to_float', (['cube'], {}), '(cube)\n', (2309, 2315), True, 'import diagnostic_tools as diagtools\n'), ((3075, 3089), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3083, 3089), True, 'import numpy as np\n'), ((3439, 3463), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (3453, 3463), False, 'import iris\n'), ((3475, 3524), 'diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (3494, 3524), True, 'import diagnostic_tools as diagtools\n'), ((3725, 3761), 'diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (3755, 3761), True, 'import diagnostic_tools as diagtools\n'), ((3819, 3850), 'diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (3845, 3850), True, 'import diagnostic_tools as diagtools\n'), ((5898, 5910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5908, 5910), True, 'import matplotlib.pyplot as plt\n'), ((6534, 6563), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(21.0)'], {}), '(0.0, 100.0, 21.0)\n', (6545, 6563), True, 'import numpy as np\n'), ((6565, 6635), 'iris.quickplot.contourf', 'qplt.contourf', (['cube', 'linrange'], {'cmap': 'cmap', 'linewidth': '(0)', 'rasterized': '(True)'}), '(cube, linrange, cmap=cmap, linewidth=0, rasterized=True)\n', (6578, 6635), True, 'import iris.quickplot as qplt\n'), ((6712, 6730), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6728, 6730), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8392), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (8382, 8392), False, 'import iris\n'), ((8404, 8453), 'diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (8423, 8453), True, 'import diagnostic_tools as diagtools\n'), ((8654, 8690), 'diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (8684, 8690), True, 'import diagnostic_tools as diagtools\n'), ((8763, 8794), 'diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (8789, 8794), True, 'import diagnostic_tools as diagtools\n'), ((8979, 9010), 'itertools.product', 'product', (['plot_types', 'plot_times'], {}), '(plot_types, plot_times)\n', (8986, 9010), False, 'from itertools import product\n'), ((11858, 11882), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (11872, 11882), False, 'import iris\n'), ((11894, 11943), 'diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (11913, 11943), True, 'import diagnostic_tools as diagtools\n'), ((12144, 12180), 'diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (12174, 12180), True, 'import diagnostic_tools as diagtools\n'), ((12238, 12269), 'diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (12264, 12269), True, 'import diagnostic_tools as diagtools\n'), ((1137, 1156), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1154, 1156), False, 'import logging\n'), ((2414, 2468), 'iris.analysis.cartography.area_weights', 'iris.analysis.cartography.area_weights', (['cube[time_itr]'], {}), '(cube[time_itr])\n', (2452, 2468), False, 'import iris\n'), ((11017, 11087), 'iris.coord_categorisation.add_season', 'iris.coord_categorisation.add_season', (['cube', '"""time"""'], {'name': '"""clim_season"""'}), "(cube, 'time', name='clim_season')\n", (11053, 11087), False, 'import iris\n'), ((11225, 11300), 'iris.coord_categorisation.add_season_year', 'iris.coord_categorisation.add_season_year', (['cube', '"""time"""'], {'name': '"""season_year"""'}), "(cube, 'time', name='season_year')\n", (11266, 11300), False, 'import iris\n'), ((12517, 12529), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12527, 12529), True, 'import matplotlib.pyplot as plt\n'), ((15490, 15506), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (15499, 15506), True, 'import matplotlib.pyplot as plt\n'), ((16150, 16161), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16159, 16161), True, 'import matplotlib.pyplot as plt\n'), ((16482, 16525), 'diagnostic_tools.get_input_files', 'diagtools.get_input_files', (['cfg'], {'index': 'index'}), '(cfg, index=index)\n', (16507, 16525), True, 'import diagnostic_tools as diagtools\n'), ((17180, 17196), 'esmvaltool.diag_scripts.shared.run_diagnostic', 'run_diagnostic', ([], {}), '()\n', (17194, 17196), False, 'from esmvaltool.diag_scripts.shared import run_diagnostic\n'), ((2604, 2652), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(icedata < threshold)', 'icedata'], {}), '(icedata < threshold, icedata)\n', (2622, 2652), True, 'import numpy as np\n'), ((2841, 2863), 'numpy.sum', 'np.sum', (['(icedata * area)'], {}), '(icedata * area)\n', (2847, 2863), True, 'import numpy as np\n'), ((4382, 4403), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'data'], {}), '(times, data)\n', (4390, 4403), True, 'import matplotlib.pyplot as plt\n'), ((4735, 4751), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4744, 4751), True, 'import matplotlib.pyplot as plt\n'), ((5626, 5637), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5635, 5637), True, 'import matplotlib.pyplot as plt\n'), ((6299, 6324), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (6322, 6324), False, 'import cartopy\n'), ((6491, 6516), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (6514, 6516), False, 'import cartopy\n'), ((9970, 9986), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (9979, 9986), True, 'import matplotlib.pyplot as plt\n'), ((10741, 10752), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10750, 10752), True, 'import matplotlib.pyplot as plt\n'), ((12638, 12668), 'cartopy.crs.NorthPolarStereo', 'cartopy.crs.NorthPolarStereo', ([], {}), '()\n', (12666, 12668), False, 'import cartopy\n'), ((12687, 12726), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'projection'}), '(111, projection=projection)\n', (12698, 12726), True, 'import matplotlib.pyplot as plt\n'), ((12876, 12906), 'cartopy.crs.SouthPolarStereo', 'cartopy.crs.SouthPolarStereo', ([], {}), '()\n', (12904, 12906), False, 'import cartopy\n'), ((12925, 12964), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'projection'}), '(111, projection=projection)\n', (12936, 12964), True, 'import matplotlib.pyplot as plt\n'), ((14052, 14202), 'iris.quickplot.contour', 'qplt.contour', (['cube', '[threshold]'], {'colors': "plot_desc[time]['c']", 'linewidths': "plot_desc[time]['lw']", 'linestyles': "plot_desc[time]['ls']", 'rasterized': '(True)'}), "(cube, [threshold], colors=plot_desc[time]['c'], linewidths=\n plot_desc[time]['lw'], linestyles=plot_desc[time]['ls'], rasterized=True)\n", (14064, 14202), True, 'import iris.quickplot as qplt\n'), ((14800, 14926), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'c': "plot_desc[i]['c'][0]", 'lw': "plot_desc[i]['lw'][0]", 'ls': "plot_desc[i]['ls'][0]", 'label': "plot_desc[i]['label']"}), "([], [], c=plot_desc[i]['c'][0], lw=plot_desc[i]['lw'][0], ls=\n plot_desc[i]['ls'][0], label=plot_desc[i]['label'])\n", (14808, 14926), True, 'import matplotlib.pyplot as plt\n'), ((15719, 15752), 'diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (15735, 15752), True, 'import diagnostic_tools as diagtools\n'), ((15886, 15940), 'diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (15910, 15940), True, 'import diagnostic_tools as diagtools\n'), ((16124, 16141), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (16135, 16141), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5379), 'diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (5349, 5379), True, 'import diagnostic_tools as diagtools\n'), ((5595, 5612), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (5606, 5612), True, 'import matplotlib.pyplot as plt\n'), ((6223, 6253), 'cartopy.crs.NorthPolarStereo', 'cartopy.crs.NorthPolarStereo', ([], {}), '()\n', (6251, 6253), False, 'import cartopy\n'), ((6413, 6443), 'cartopy.crs.SouthPolarStereo', 'cartopy.crs.SouthPolarStereo', ([], {}), '()\n', (6441, 6443), False, 'import cartopy\n'), ((7030, 7039), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7037, 7039), True, 'import matplotlib.pyplot as plt\n'), ((10257, 10290), 'diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (10273, 10290), True, 'import diagnostic_tools as diagtools\n'), ((10440, 10494), 'diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (10464, 10494), True, 'import diagnostic_tools as diagtools\n'), ((10710, 10727), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (10721, 10727), True, 'import matplotlib.pyplot as plt\n'), ((12775, 12800), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (12798, 12800), False, 'import cartopy\n'), ((13015, 13040), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (13038, 13040), False, 'import cartopy\n'), ((15779, 15805), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (15795, 15805), False, 'import os\n'), ((2678, 2721), 'numpy.ma.masked_where', 'np.ma.masked_where', (['icedata.mask', 'area.data'], {}), '(icedata.mask, area.data)\n', (2696, 2721), True, 'import numpy as np\n'), ((5149, 5182), 'diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (5165, 5182), True, 'import diagnostic_tools as diagtools\n'), ((5206, 5232), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (5222, 5232), False, 'import os\n'), ((10321, 10347), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (10337, 10347), False, 'import os\n'), ((13380, 13389), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13387, 13389), True, 'import matplotlib.pyplot as plt\n')] |
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
"""
Helper function to read an audio file through ffmpeg.
"""
ar = f"{sampling_rate}"
ac = "1"
format_for_conversion = "f32le"
ffmpeg_command = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process:
output_stream = ffmpeg_process.communicate(bpayload)
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error
out_bytes = output_stream[0]
audio = np.frombuffer(out_bytes, np.float32)
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile")
return audio
def ffmpeg_microphone(
sampling_rate: int,
chunk_length_s: float,
format_for_conversion: str = "f32le",
):
"""
Helper function ro read raw microphone data.
"""
ar = f"{sampling_rate}"
ac = "1"
if format_for_conversion == "s16le":
size_of_sample = 2
elif format_for_conversion == "f32le":
size_of_sample = 4
else:
raise ValueError("Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
system = platform.system()
if system == "Linux":
format_ = "alsa"
input_ = "default"
elif system == "Darwin":
format_ = "avfoundation"
input_ = ":0"
elif system == "Windows":
format_ = "dshow"
input_ = "default"
ffmpeg_command = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
iterator = _ffmpeg_stream(ffmpeg_command, chunk_len)
for item in iterator:
yield item
def ffmpeg_microphone_live(
sampling_rate: int,
chunk_length_s: float,
stream_chunk_s: Optional[int] = None,
stride_length_s: Optional[Union[Tuple[float, float], float]] = None,
format_for_conversion: str = "f32le",
):
"""
Helper function to read audio from the microphone file through ffmpeg. This will output `partial` overlapping
chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of
striding to avoid errors on the "sides" of the various chunks.
Arguments:
sampling_rate (`int`):
The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to
avoid resampling later.
chunk_length_s (`float` or `int`):
The length of the maximum chunk of audio to be sent returned. This includes the eventual striding.
stream_chunk_s (`float` or `int`)
The length of the minimal temporary audio to be returned.
stride_length_s (`float` or `int` or `(float, float)`, *optional*, defaults to `None`)
The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of
an audio sample but without using that part to actually make the prediction. Setting this does not change
the length of the chunk.
format_for_conversion: (`str`, defalts to `f32le`)
The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le`
could also be used.
Return:
A generator yielding dictionaries of the following form
`{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionnally a `"stride" (int, int)` key if
`stride_length_s` is defined.
`stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item
is a whole chunk, or a partial temporary result to be later replaced by another larger chunk.
"""
if stream_chunk_s is not None:
chunk_s = stream_chunk_s
else:
chunk_s = chunk_length_s
microphone = ffmpeg_microphone(sampling_rate, chunk_s, format_for_conversion=format_for_conversion)
if format_for_conversion == "s16le":
dtype = np.int16
size_of_sample = 2
elif format_for_conversion == "f32le":
dtype = np.float32
size_of_sample = 4
else:
raise ValueError("Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
if isinstance(stride_length_s, (int, float)):
stride_length_s = [stride_length_s, stride_length_s]
stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample
stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample
for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True):
# Put everything back in numpy scale
item["raw"] = np.frombuffer(item["raw"], dtype=dtype)
item["stride"] = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
item["sampling_rate"] = sampling_rate
yield item
def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[int, int], stream: bool = False):
"""
Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to
get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available.
"""
acc = b""
stride_left, stride_right = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}"
)
_stride_left = 0
for raw in iterator:
acc += raw
if stream and len(acc) < chunk_len:
stride = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(acc) >= chunk_len:
# We are flushing the accumulator
stride = (_stride_left, stride_right)
item = {"raw": acc[:chunk_len], "stride": stride}
if stream:
item["partial"] = False
yield item
_stride_left = stride_left
acc = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(acc) > stride_left:
item = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
item["partial"] = False
yield item
def _ffmpeg_stream(ffmpeg_command, buflen: int):
"""
Internal function to create the generator of data through ffmpeg
"""
bufsize = 2 ** 24 # 16Mo
try:
with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process:
while True:
raw = ffmpeg_process.stdout.read(buflen)
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error
| [
"numpy.frombuffer",
"platform.system",
"subprocess.Popen"
] | [((954, 990), 'numpy.frombuffer', 'np.frombuffer', (['out_bytes', 'np.float32'], {}), '(out_bytes, np.float32)\n', (967, 990), True, 'import numpy as np\n'), ((1575, 1592), 'platform.system', 'platform.system', ([], {}), '()\n', (1590, 1592), False, 'import platform\n'), ((5518, 5557), 'numpy.frombuffer', 'np.frombuffer', (["item['raw']"], {'dtype': 'dtype'}), "(item['raw'], dtype=dtype)\n", (5531, 5557), True, 'import numpy as np\n'), ((596, 675), 'subprocess.Popen', 'subprocess.Popen', (['ffmpeg_command'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n', (612, 675), False, 'import subprocess\n'), ((7378, 7451), 'subprocess.Popen', 'subprocess.Popen', (['ffmpeg_command'], {'stdout': 'subprocess.PIPE', 'bufsize': 'bufsize'}), '(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize)\n', (7394, 7451), False, 'import subprocess\n')] |
import numpy
from amuse.units import units
from amuse.units.quantities import is_quantity, value_in, to_quantity
from amuse.datamodel import UnstructuredGrid, StructuredGrid,StructuredBaseGrid
try:
import matplotlib
from matplotlib import tri
if not hasattr(tri, "LinearTriInterpolator"):
raise Exception("LinearTriInterpolator not in matplotlib.tri")
matplotlib_available=True
except:
matplotlib_available=False
class interpolating_2D_remapper(object):
def __init__(self, source, target,axes_names="xy"):
""" this class maps a source grid to a target grid using linear
interpolation on a triangulation generated by adding a
midpoint to every cell (source should be a structured grid)
and thus generating 4 triangles for each cell. Values of the
midpoints are averaged from the corners.
"""
if len(source.shape) !=2:
raise Exception("source grid is not 2D")
if not isinstance(source, StructuredBaseGrid):
raise Exception("source grid is not instance of StructuredBaseGrid")
self.source=source
self.target=target
self._axes_names=list(axes_names)
self.generate_triangulation()
def _generate_nodes(self,grid,attributes):
Nx,Ny=grid.shape
x,y=numpy.mgrid[0:Nx,0:Ny]
x1,y1=numpy.mgrid[0:Nx-1,0:Ny-1]
x_=x.flatten()
y_=y.flatten()
x1_=x1.flatten()
y1_=y1.flatten()
l1=Nx*Ny
i=numpy.arange(Nx*Ny).reshape((Nx,Ny))
i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))
nodes=UnstructuredGrid(len(x_)+len(x1_))
for name in attributes:
values1=getattr(grid,name)[x_,y_]
values2=getattr(grid,name)[x1_,y1_]+getattr(grid,name)[x1_+1,y1_]+\
getattr(grid,name)[x1_,y1_+1]+getattr(grid,name)[x1_+1,y1_+1]
setattr(nodes[0], name, 0.*values1[0])
setattr(nodes[:l1], name, 1.*values1)
setattr(nodes[l1:], name, values2/4)
return nodes
def _generate_elements_and_boundaries(self,grid):
Nx,Ny=grid.shape
l1=Nx*Ny
i=numpy.arange(Nx*Ny).reshape((Nx,Ny))
i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))
e1=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e2=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e3=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e4=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e1[:,0]=i[:-1,:-1].flatten()
e1[:,1]=i[1:,:-1].flatten()
e1[:,2]=i1[:,:].flatten()
e2[:,0]=i[1:,:-1].flatten()
e2[:,1]=i[1:,1:].flatten()
e2[:,2]=i1[:,:].flatten()
e3[:,0]=i[1:,1:].flatten()
e3[:,1]=i[:-1,1:].flatten()
e3[:,2]=i1[:,:].flatten()
e4[:,0]=i[:-1,:-1].flatten()
e4[:,1]=i1[:,:].flatten()
e4[:,2]=i[:-1,1:].flatten()
elements=numpy.zeros((4*(Nx-1)*(Ny-1),3),dtype='i8')
elements[0::4,:]=e1
elements[1::4,:]=e2
elements[2::4,:]=e3
elements[3::4,:]=e4
boundaries=[xx.flatten() for xx in [i[:,0],i[-1,:],i[::-1,-1],i[0,::-1]] ]
elem=UnstructuredGrid(len(elements))
elem.nodes=elements
return elem,boundaries
def convert_grid_to_nodes_and_elements(self, grid, attributes=None):
if attributes is None:
attributes=grid.get_attribute_names_defined_in_store()
nodes=self._generate_nodes(grid, attributes)
elements,boundaries=self._generate_elements_and_boundaries(grid)
return nodes,elements,boundaries
def generate_triangulation(self):
nodes,elements,boundaries=self.convert_grid_to_nodes_and_elements(self.source, self._axes_names)
xpos=to_quantity(getattr(nodes,self._axes_names[0]))
ypos=to_quantity(getattr(nodes,self._axes_names[1]))
self._xpos_unit=xpos.unit
xpos=xpos.number
self._ypos_unit=ypos.unit
ypos=ypos.number
n1=elements.nodes[:,0]
n2=elements.nodes[:,1]
n3=elements.nodes[:,2]
elem=numpy.column_stack((n1,n2,n3))
self._triangulation=tri.Triangulation(xpos,ypos,elem)
def sample(self, values, xpos, ypos):
interpolator=tri.LinearTriInterpolator(self._triangulation,values)
return interpolator(xpos,ypos)
def forward_mapping(self, attributes):
if attributes is None:
attributes=self.source.get_attribute_names_defined_in_store()
source=self.source.empty_copy()
channel1=self.source.new_channel_to(source)
target=self.target.empty_copy()
channel2=self.target.new_channel_to(target)
channel3=target.new_channel_to(self.target)
channel1.copy_attributes(attributes)
channel2.copy_attributes(self._axes_names)
nodes=self._generate_nodes(source,attributes)
xpos=value_in( getattr(target,self._axes_names[0]), self._xpos_unit)
ypos=value_in( getattr(target,self._axes_names[1]), self._ypos_unit)
for attribute in attributes:
values=to_quantity( getattr(nodes,attribute) )
unit=values.unit
values=values.number
samples=self.sample(values,xpos,ypos)
setattr(target, attribute, (samples if unit is units.none else (samples | unit)))
channel3.copy_attributes(attributes)
def conservative_spherical_remapper(*args,**kwargs):
raise Exception("conservative_spherical_remapper has moved to omuse.ext")
| [
"matplotlib.tri.Triangulation",
"numpy.column_stack",
"numpy.zeros",
"matplotlib.tri.LinearTriInterpolator",
"numpy.arange"
] | [((2317, 2365), 'numpy.zeros', 'numpy.zeros', (['((Nx - 1) * (Ny - 1), 3)'], {'dtype': '"""i"""'}), "(((Nx - 1) * (Ny - 1), 3), dtype='i')\n", (2328, 2365), False, 'import numpy\n'), ((2369, 2417), 'numpy.zeros', 'numpy.zeros', (['((Nx - 1) * (Ny - 1), 3)'], {'dtype': '"""i"""'}), "(((Nx - 1) * (Ny - 1), 3), dtype='i')\n", (2380, 2417), False, 'import numpy\n'), ((2421, 2469), 'numpy.zeros', 'numpy.zeros', (['((Nx - 1) * (Ny - 1), 3)'], {'dtype': '"""i"""'}), "(((Nx - 1) * (Ny - 1), 3), dtype='i')\n", (2432, 2469), False, 'import numpy\n'), ((2473, 2521), 'numpy.zeros', 'numpy.zeros', (['((Nx - 1) * (Ny - 1), 3)'], {'dtype': '"""i"""'}), "(((Nx - 1) * (Ny - 1), 3), dtype='i')\n", (2484, 2521), False, 'import numpy\n'), ((3000, 3053), 'numpy.zeros', 'numpy.zeros', (['(4 * (Nx - 1) * (Ny - 1), 3)'], {'dtype': '"""i8"""'}), "((4 * (Nx - 1) * (Ny - 1), 3), dtype='i8')\n", (3011, 3053), False, 'import numpy\n'), ((4216, 4248), 'numpy.column_stack', 'numpy.column_stack', (['(n1, n2, n3)'], {}), '((n1, n2, n3))\n', (4234, 4248), False, 'import numpy\n'), ((4276, 4311), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['xpos', 'ypos', 'elem'], {}), '(xpos, ypos, elem)\n', (4293, 4311), False, 'from matplotlib import tri\n'), ((4382, 4436), 'matplotlib.tri.LinearTriInterpolator', 'tri.LinearTriInterpolator', (['self._triangulation', 'values'], {}), '(self._triangulation, values)\n', (4407, 4436), False, 'from matplotlib import tri\n'), ((1536, 1557), 'numpy.arange', 'numpy.arange', (['(Nx * Ny)'], {}), '(Nx * Ny)\n', (1548, 1557), False, 'import numpy\n'), ((2203, 2224), 'numpy.arange', 'numpy.arange', (['(Nx * Ny)'], {}), '(Nx * Ny)\n', (2215, 2224), False, 'import numpy\n'), ((1585, 1618), 'numpy.arange', 'numpy.arange', (['((Nx - 1) * (Ny - 1))'], {}), '((Nx - 1) * (Ny - 1))\n', (1597, 1618), False, 'import numpy\n'), ((2252, 2285), 'numpy.arange', 'numpy.arange', (['((Nx - 1) * (Ny - 1))'], {}), '((Nx - 1) * (Ny - 1))\n', (2264, 2285), False, 'import numpy\n')] |
import sys
import os
import math
import random
import numpy as np
import matplotlib.pyplot as plt
# our implementations
import run_ridge
import ridge
import theory
import covar
import output_pert
import naive_covar
#MIN_EPS = 0.00001
#MAX_EPS_CAP = 20.0
#MAX_NAIVE_EPS = 1000.0 # this one can be larger due to doubling
usage_str = """
Usage: python3 run_many.py datafilename lambda alpha gamma max_norm max_steps num_trials outputdir
Runs 'num_trials' separate trials, writing the output to the files
outputdir/trial_1.txt, outputdir/trial_2.txt, ....
--------
For other parameters:
""" + run_ridge.usage_str
def main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir):
n = len(X)
dim = len(X[0])
if max_norm <= 0.0:
max_norm = ridge.compute_max_norm(lamb)
sv_sens = ridge.get_sv_sensitivity(max_norm, n)
opt_beta_sens = ridge.compute_opt_sensitivity(n, dim, lamb, max_norm)
compute_err_func = lambda X,Y,beta_hat: ridge.compute_err(X, Y, lamb, beta_hat)
# Compute opt
Sigma, R, opt_beta, opt_res = run_ridge.get_matrices_and_opt(X, Y, lamb)
opt_err = opt_res[0]
data = (X, Y, opt_err)
min_eps = 1.0 / n
max_covar_eps = 4.0 * theory.covar_get_epsilon(alpha, n, dim, max_norm)
max_naive_eps = max_covar_eps
max_output_eps = 4.0 * theory.output_pert_linreg_get_epsilon(alpha, n, dim, lamb, max_norm)
# Create output folder and write value of alpha
os.makedirs(outputdir)
with open(outputdir + "/alpha.txt", "w") as f:
f.write(str(alpha) + "\n")
# Compute results of methods and save them
for trial_ind in range(num_trials):
covar_beta_hat, covar_res = covar.run_covar(Sigma, R, alpha, gamma, max_norm, max_steps, min_eps, max_covar_eps, sv_sens, data, compute_err_func)
output_beta_hat, output_res = output_pert.run_output_pert(opt_beta, alpha, gamma, max_norm, max_steps, min_eps, max_output_eps, sv_sens, opt_beta_sens, data, compute_err_func)
naive_beta_hat, naive_res = naive_covar.run_naive(Sigma, R, alpha, gamma, max_norm, min_eps, max_naive_eps, sv_sens, data, compute_err_func)
with open(outputdir + "/trial_" + str(trial_ind+1) + ".txt", "w") as f:
f.write(run_ridge.stringify(opt_res))
f.write("\n")
f.write(run_ridge.stringify(opt_beta))
f.write("\n")
for beta, res in [(covar_beta_hat, covar_res), (output_beta_hat, output_res), (naive_beta_hat, naive_res)]:
success, excess_err, sv_eps, my_eps, index = res
two_norm = np.linalg.norm(beta)
mse = ridge.compute_err(X, Y, 0.0, beta)
f.write(run_ridge.stringify(("1" if success else "0", excess_err, sv_eps, my_eps, index, two_norm, mse)))
f.write("\n")
f.write(run_ridge.stringify(beta))
f.write("\n")
# when run as script, read parameters from input
# (other python scripts can call main(), above, directly)
if __name__ == "__main__":
X, Y, lamb, alpha, gamma, max_norm, max_steps = run_ridge.parse_inputs(sys.argv)
try:
num_trials = int(sys.argv[7])
outputdir = sys.argv[8]
except:
print(usage_str)
exit(0)
main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir)
| [
"os.makedirs",
"run_ridge.get_matrices_and_opt",
"ridge.compute_err",
"naive_covar.run_naive",
"numpy.linalg.norm",
"ridge.get_sv_sensitivity",
"ridge.compute_opt_sensitivity",
"output_pert.run_output_pert",
"ridge.compute_max_norm",
"theory.covar_get_epsilon",
"covar.run_covar",
"run_ridge.st... | [((811, 848), 'ridge.get_sv_sensitivity', 'ridge.get_sv_sensitivity', (['max_norm', 'n'], {}), '(max_norm, n)\n', (835, 848), False, 'import ridge\n'), ((867, 920), 'ridge.compute_opt_sensitivity', 'ridge.compute_opt_sensitivity', (['n', 'dim', 'lamb', 'max_norm'], {}), '(n, dim, lamb, max_norm)\n', (896, 920), False, 'import ridge\n'), ((1052, 1094), 'run_ridge.get_matrices_and_opt', 'run_ridge.get_matrices_and_opt', (['X', 'Y', 'lamb'], {}), '(X, Y, lamb)\n', (1082, 1094), False, 'import run_ridge\n'), ((1417, 1439), 'os.makedirs', 'os.makedirs', (['outputdir'], {}), '(outputdir)\n', (1428, 1439), False, 'import os\n'), ((2945, 2977), 'run_ridge.parse_inputs', 'run_ridge.parse_inputs', (['sys.argv'], {}), '(sys.argv)\n', (2967, 2977), False, 'import run_ridge\n'), ((770, 798), 'ridge.compute_max_norm', 'ridge.compute_max_norm', (['lamb'], {}), '(lamb)\n', (792, 798), False, 'import ridge\n'), ((963, 1002), 'ridge.compute_err', 'ridge.compute_err', (['X', 'Y', 'lamb', 'beta_hat'], {}), '(X, Y, lamb, beta_hat)\n', (980, 1002), False, 'import ridge\n'), ((1188, 1237), 'theory.covar_get_epsilon', 'theory.covar_get_epsilon', (['alpha', 'n', 'dim', 'max_norm'], {}), '(alpha, n, dim, max_norm)\n', (1212, 1237), False, 'import theory\n'), ((1295, 1363), 'theory.output_pert_linreg_get_epsilon', 'theory.output_pert_linreg_get_epsilon', (['alpha', 'n', 'dim', 'lamb', 'max_norm'], {}), '(alpha, n, dim, lamb, max_norm)\n', (1332, 1363), False, 'import theory\n'), ((1636, 1757), 'covar.run_covar', 'covar.run_covar', (['Sigma', 'R', 'alpha', 'gamma', 'max_norm', 'max_steps', 'min_eps', 'max_covar_eps', 'sv_sens', 'data', 'compute_err_func'], {}), '(Sigma, R, alpha, gamma, max_norm, max_steps, min_eps,\n max_covar_eps, sv_sens, data, compute_err_func)\n', (1651, 1757), False, 'import covar\n'), ((1793, 1942), 'output_pert.run_output_pert', 'output_pert.run_output_pert', (['opt_beta', 'alpha', 'gamma', 'max_norm', 'max_steps', 'min_eps', 'max_output_eps', 'sv_sens', 'opt_beta_sens', 'data', 'compute_err_func'], {}), '(opt_beta, alpha, gamma, max_norm, max_steps,\n min_eps, max_output_eps, sv_sens, opt_beta_sens, data, compute_err_func)\n', (1820, 1942), False, 'import output_pert\n'), ((1976, 2092), 'naive_covar.run_naive', 'naive_covar.run_naive', (['Sigma', 'R', 'alpha', 'gamma', 'max_norm', 'min_eps', 'max_naive_eps', 'sv_sens', 'data', 'compute_err_func'], {}), '(Sigma, R, alpha, gamma, max_norm, min_eps,\n max_naive_eps, sv_sens, data, compute_err_func)\n', (1997, 2092), False, 'import naive_covar\n'), ((2180, 2208), 'run_ridge.stringify', 'run_ridge.stringify', (['opt_res'], {}), '(opt_res)\n', (2199, 2208), False, 'import run_ridge\n'), ((2244, 2273), 'run_ridge.stringify', 'run_ridge.stringify', (['opt_beta'], {}), '(opt_beta)\n', (2263, 2273), False, 'import run_ridge\n'), ((2485, 2505), 'numpy.linalg.norm', 'np.linalg.norm', (['beta'], {}), '(beta)\n', (2499, 2505), True, 'import numpy as np\n'), ((2520, 2554), 'ridge.compute_err', 'ridge.compute_err', (['X', 'Y', '(0.0)', 'beta'], {}), '(X, Y, 0.0, beta)\n', (2537, 2554), False, 'import ridge\n'), ((2571, 2671), 'run_ridge.stringify', 'run_ridge.stringify', (["('1' if success else '0', excess_err, sv_eps, my_eps, index, two_norm, mse)"], {}), "(('1' if success else '0', excess_err, sv_eps, my_eps,\n index, two_norm, mse))\n", (2590, 2671), False, 'import run_ridge\n'), ((2707, 2732), 'run_ridge.stringify', 'run_ridge.stringify', (['beta'], {}), '(beta)\n', (2726, 2732), False, 'import run_ridge\n')] |
import numpy as np
import config
def index2to1(y, x):
nx = config.nx
ny = config.ny
if (x < 0 or y < 0 or x >= nx or y >= ny):
raise (IndexError)
return (x * ny + y)
def index1to2y(k):
ny = config.ny
nx = config.nx
if (k > nx * ny - 1 or k < 0):
raise (IndexError)
return int(k % ny)
def index1to2x(k):
ny = config.ny
nx = config.nx
if (k > nx * ny - 1 or k < 0):
raise (IndexError)
return int(k / ny)
def fieldToVec(field):
vec = np.ravel(field, order='F')
vec.shape = (vec.size, 1)
return vec
def vecToField(vec):
nx = config.nx
ny = config.ny
return (np.reshape(vec, (ny, nx), order='F'))
| [
"numpy.ravel",
"numpy.reshape"
] | [((522, 548), 'numpy.ravel', 'np.ravel', (['field'], {'order': '"""F"""'}), "(field, order='F')\n", (530, 548), True, 'import numpy as np\n'), ((667, 703), 'numpy.reshape', 'np.reshape', (['vec', '(ny, nx)'], {'order': '"""F"""'}), "(vec, (ny, nx), order='F')\n", (677, 703), True, 'import numpy as np\n')] |
"""
Test cases for module queens
Testing utilities
"""
from modules.queens.utilities.json_array_operations import (
convert_array_in_json,
convert_json_in_array,
)
from modules.queens.utilities.time import count_elapsed_time
import json
import numpy as np
from . import app
def test_converter_in_json(app):
"""Test if Json converter works"""
test_array = np.zeros((2, 2))
testing_json = convert_array_in_json(test_array)
result = True
try:
json.loads(testing_json)
except ValueError as err:
result = False
assert result
def test_converter_in_array(app):
test_array = np.zeros((2, 2))
testing_json = convert_array_in_json(test_array)
result = convert_json_in_array(testing_json)
assert result[0][0] == 0
@count_elapsed_time
def timer():
"""Test function"""
print("hello")
def test_elapsed_time_decorator(app):
"""Test if decorator time works"""
res = timer()
assert res is None
| [
"modules.queens.utilities.json_array_operations.convert_array_in_json",
"numpy.zeros",
"modules.queens.utilities.json_array_operations.convert_json_in_array",
"json.loads"
] | [((373, 389), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (381, 389), True, 'import numpy as np\n'), ((409, 442), 'modules.queens.utilities.json_array_operations.convert_array_in_json', 'convert_array_in_json', (['test_array'], {}), '(test_array)\n', (430, 442), False, 'from modules.queens.utilities.json_array_operations import convert_array_in_json, convert_json_in_array\n'), ((628, 644), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (636, 644), True, 'import numpy as np\n'), ((664, 697), 'modules.queens.utilities.json_array_operations.convert_array_in_json', 'convert_array_in_json', (['test_array'], {}), '(test_array)\n', (685, 697), False, 'from modules.queens.utilities.json_array_operations import convert_array_in_json, convert_json_in_array\n'), ((711, 746), 'modules.queens.utilities.json_array_operations.convert_json_in_array', 'convert_json_in_array', (['testing_json'], {}), '(testing_json)\n', (732, 746), False, 'from modules.queens.utilities.json_array_operations import convert_array_in_json, convert_json_in_array\n'), ((478, 502), 'json.loads', 'json.loads', (['testing_json'], {}), '(testing_json)\n', (488, 502), False, 'import json\n')] |
import pandas as pd
import numpy as np
from sklearn import preprocessing, cross_validation
from sklearn.ensemble import RandomForestRegressor
df = pd.read_csv('../equity.csv')
df_close = df[[3]]
forecast_out = int(30) # predicting 30 days into future
df['Prediction'] = df_close.shift(-forecast_out) # label column with data shifted 30 units up
X = np.array(df.drop(['Prediction'], 1))
X = preprocessing.scale(X)
X_forecast = X[-forecast_out:] # set X_forecast equal to last 30
X = X[:-forecast_out] # remove last 30 from X
y = np.array(df['Prediction'])
y = y[:-forecast_out]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.3)
# Training
clf = RandomForestRegressor()
clf.fit(X_train,y_train)
# Testing
confidence = clf.score(X_test, y_test)
print("confidence: ", confidence)
forecast_prediction = clf.predict(X_forecast)
print(forecast_prediction)
| [
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_csv",
"numpy.array",
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.scale"
] | [((149, 177), 'pandas.read_csv', 'pd.read_csv', (['"""../equity.csv"""'], {}), "('../equity.csv')\n", (160, 177), True, 'import pandas as pd\n'), ((400, 422), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (419, 422), False, 'from sklearn import preprocessing, cross_validation\n'), ((543, 569), 'numpy.array', 'np.array', (["df['Prediction']"], {}), "(df['Prediction'])\n", (551, 569), True, 'import numpy as np\n'), ((630, 684), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (663, 684), False, 'from sklearn import preprocessing, cross_validation\n'), ((705, 728), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (726, 728), False, 'from sklearn.ensemble import RandomForestRegressor\n')] |
'''
This is a self-contained orbit fitting routine.
This orbit fitter is unique compared to other common orbit fitters in that it
uses a galactocentric generalized plane coordinate system when fitting data
'''
#TODO: Allow fitting on b, ra, or dec instead of l
import numpy as np
import scipy as sc
import scipy.optimize as scopt
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
import galpy
from galpy.orbit import Orbit
from .flags import verbose
from .pot import mwahpy_default_pot
'''
================================================================================
FLAGS
================================================================================
'''
#-------------------------------------------------------------------------------
#DO NOT TOUCH
#Any routine meant to be used by an end user will configure this from input data
vx_flag = 0
vy_flag = 0
vz_flag = 0
vgsr_flag = 0
#-------------------------------------------------------------------------------
'''
================================================================================
PARAMETERS FOR OPTIMIZATION
================================================================================
'''
t_length = 0.5 #Gyr
resolution = 1000
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
punishment = 1000 #multiplier for every degree that the lambda fitting function is off
#this can be tweaked based on how many degrees of freedom you have - fewer DOF
#means punishment can/should be smaller
'''
================================================================================
HELPER FUNCTIONS FOR OPTIMIZATION
================================================================================
'''
class OrbitData():
#all of these parameters can be np.arrays of floats
def __init__(self, l, b, d, vx, vy, vz, vgsr, b_err, d_err, vx_err, vy_err, vz_err, vgsr_err):
self.l = l
self.b = b
self.d = d #heliocentric distance
self.vx = vx
self.vy = vy
self.vz = vz
self.vgsr = vgsr
self.b_err = b_err
self.d_err = d_err
self.vx_err = vx_err
self.vy_err = vy_err
self.vz_err = vz_err
self.vgsr_err = vgsr_err
self.x = self.d*np.cos(np.pi/180*self.l)*np.cos(np.pi/180*self.b) - 8
self.y = self.d*np.sin(np.pi/180*self.l)*np.cos(np.pi/180*self.b)
self.z = self.d*np.sin(np.pi/180*self.b)
#add the icrs converted coordinates to this data instance
def icrs(self):
s = SkyCoord(self.l, self.b, frame='galactic', unit=(u.deg, u.deg))
s = s.transform_to('icrs')
self.ra = s.ra
self.dec = s.dec
#gets the orbit with the correct vgsr values
#since you can't just multiply vgsr by -1 to get the correct value along an orbit
#this needs to be run to compare the vgsr of reverse orbits
def correctVgsr(self):
self.vgsr = ((self.x + 8) * self.vx + self.y * self.vy + self.z * self.vz)/self.d
#this function just makes a few places in the code cleaner
def getOrbitDataFromOrbit(o, o_rev):
#o: the orbit
#o: the reversed orbit
#both of these should be integrated prior to calling this function
data_orbit = OrbitData(np.array(o.ll(ts)), np.array(o.bb(ts)), np.array(o.dist(ts)), np.array(o.vx(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array(o.vy(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o.vz(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o.vlos(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))
data_orbit_rev = OrbitData(np.array(o_rev.ll(ts)), np.array(o_rev.bb(ts)), np.array(o_rev.dist(ts)), np.array(o_rev.vx(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o_rev.vy(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array(o_rev.vz(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))
data_orbit_rev.correctVgsr()
return data_orbit, data_orbit_rev
def getClosestIndex(val, Lam):
L = np.abs(Lam - val)
m = np.min(L)
ind = np.where(L == m)
return ind[0], m*punishment
#getPointList: np.array([]), np.array([]), int, int -> np.array([])
#given the full list of Lambdas, outputs the indices of the points within that list closest to our data's Lambdas
#(while keeping in mind that it may wrap from 0 to 360 degrees and vice versa)
def getPointList(vals, Lam):
#vals: the Lambda values that you want to find the closest indices to
#Lam: the array of Lambda values that you are searching through
point_list = []
Lam_list = []
costs = 0
for val in vals:
#within that segment, find the index which produces the value closest to val
point, c = getClosestIndex(val, Lam)
costs += c
#toss it in the list
point_list.append(point)
Lam_list.append(Lam[point])
return point_list, costs
#getModelFromOrbit: data, orbit, vector, vector -> list(int) x3
#take in data, orbit, and plane info: Output model data corresponding to each data point
def getModelFromOrbit(data, o):
#data: the data that the orbit is being fit to
#o: the test orbit that we are calculating the goodness-of-fit of
#normal: the normal vector to the plane of the Great Circle we are estimating for the orbit
#point: parameter for the axis generation of the Great Circle coordinates
#initialize the orbit we are fitting --
#we flip it around so that we are fitting both the forwards and the backwards orbit
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
#grab full lists so that we can select the closest points once we get a list
Lam = np.append(np.flip(data_orbit_rev.l), data_orbit.l)
#get the list of points closest to each data point in Lambda
point_list, costs = getPointList(data.l, Lam)
#grab the model points from the point list we grabbed
Bet = np.append(np.flip(data_orbit_rev.b), data_orbit.b)
B_model = np.array([Bet[p] for p in point_list]).flatten()
D = np.append(np.flip(data_orbit_rev.d), data_orbit.d)
D_model = np.array([D[p] for p in point_list]).flatten()
if vx_flag:
vx = np.append(np.flip(data_orbit_rev.vx), data_orbit.vx)
vx_model = np.array([vx[p] for p in point_list]).flatten()
else:
vx_model = np.zeros(len(B_model))
if vy_flag:
vy = np.append(np.flip(data_orbit_rev.vy), data_orbit.vy)
vy_model = np.array([vy[p] for p in point_list]).flatten()
else:
vy_model = np.zeros(len(B_model))
if vz_flag:
vz = np.append(np.flip(data_orbit_rev.vz), data_orbit.vz)
vz_model = np.array([vz[p] for p in point_list]).flatten()
else:
vz_model = np.zeros(len(B_model))
if vgsr_flag:
vgsr = np.append(np.flip(data_orbit_rev.vgsr), data_orbit.vgsr)
vgsr_model = np.array([vgsr[p] for p in point_list]).flatten()
else:
vgsr_model = np.zeros(len(B_model))
return B_model, D_model, vx_model, vy_model, vz_model, vgsr_model, costs
#chi_squared: data, galpy.Orbit() --> float
#takes in the observed data and a test orbit and calculates the goodness-of-fit using a chi-squared method
def chiSquared(params, data=[]):
#data: the data that the orbit is being fit to
#o: the test orbit that we are calculating the goodness-of-fit of
#normal: the normal vector to the plane of the Great Circle we are estimating for the orbit
#point: parameter for the axis generation of the Great Circle coordinates
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4]-220, params[5]], uvw=True, lb=True, ro=8., vo=220., zo=0.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
B_model, d_model, vx_model, vy_model, vz_model, vgsr_model, costs = getModelFromOrbit(data, o) #get model data from orbit
#B_model sometimes has different length than data.b, no idea why
#I think it might be a race condition
#this keeps the script running and tells the optimizer that the parameters are bad
if len(B_model) != len(data.b):
return 1e10
x2_B = sum(((B_model - data.b)/data.b_err)**2)
x2_d = sum(((d_model - data.d)/data.d_err)**2)
if vx_flag:
x2_vx = sum(((vx_model - data.vx)/data.vx_err)**2)
else:
x2_vx = 0
if vy_flag:
x2_vy = sum(((vy_model - data.vy)/data.vy_err)**2)
else:
x2_vy = 0
if vz_flag:
x2_vz = sum(((vz_model - data.vz)/data.vz_err)**2)
else:
x2_vz = 0
if vgsr_flag:
x2_vgsr = sum(((vgsr_model - data.vgsr)/data.vgsr_err)**2)
else:
x2_vgsr = 0
#get normalization factor
N = len(data.l) #number of data points
n = 5 #number of parameters
eta = N - n - 1 #normalizing parameter
if eta <= 0:
eta = 1 #if you use fewer data points than needed to constrain the problem, then this will still work but it won't be normalized correctly
x2 = (1/eta) * (x2_B + x2_d + x2_vx + x2_vy + x2_vz + x2_vgsr) + costs #Willett et al. 2009, give or take
#there's a weird edge case where occasionally x2 is a short array of floats
#this bit prevents scipy from throwing an error
#no idea what causes that
if type(x2) == type(np.array([])):
x2 = x2[0]
if flags.verbose:
print('X^2: ' + str(x2))
return x2
#optimize: data -> [float, float, float, float, float], (float, float, float), (float, float, float)
#takes in data, then fits a Great Circle to that data and minimizes the chi_squared to fit an orbit to the data
def optimize(data_opt, max_it, bounds, **kwargs):
'''
============================================================================
DIFFERENTIAL EVOLUTION CONSTANTS
============================================================================
'''
#DO NOT TOUCH
pop_size = 50 #10 times number of parameters
diff_scaling_factor = 0.8
crossover_rate = 0.9
'''
============================================================================
'''
params = scopt.differential_evolution(chiSquared, bounds, args=(data_opt,), strategy='rand1bin', maxiter=max_it, popsize=pop_size, mutation=diff_scaling_factor, recombination=crossover_rate, workers=-1, disp=not(flags.verbose), **kwargs).x
#'''
x2 = chiSquared(params, data_opt)
return params, x2
'''
================================================================================
FUNCTIONS
================================================================================
'''
def fit_orbit(l, b, b_err, d, d_err, vx=None, vy=None, vz=None, vgsr=None, \
vx_err=None, vy_err=None, vz_err=None, vgsr_err=None, max_it=20, \
bounds=[(0, 360), (-90, 90), (0, 100), (-500, 500), (-500, 500), (-500, 500)], \
t_len=None, **kwargs):
#construct data
#set proper flags based on input data
if type(vx) == type(np.array([])):
global vx_flag
vx_flag = 1
if type(vy) == type(np.array([])):
global vy_flag
vy_flag = 1
if type(vz) == type(np.array([])):
global vz_flag
vz_flag = 1
if type(vgsr) == type(np.array([])):
global vgsr_flag
vgsr_flag = 1
#update t_length if necessary
if t_len != None:
global t_length
t_length = t_len
global ts
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
if flags.verbose:
print('===================================')
print('Optimizing:')
print('===================================')
data_opt = OrbitData(l, b, d, vx, vy, vz, vgsr, b_err, d_err, vx_err, vy_err, vz_err, vgsr_err)
#optimization
params, x2 = optimize(data_opt, max_it, bounds, **kwargs)
print('===================================')
print('Params: l, b, d, vx, vy, vz')
print(params)
print()
print('Chi Squared:')
print(x2)
print('===================================')
return params, x2
'''
================================================================================
PLOTTING
================================================================================
'''
#TODO: Implement unwrap in a way that actually makes sense
#splits every array in the list of arrays, a, every time that the position wraps
#from 0 to 360 or vice versa in a[0]
#therefore, a[0] should be the parameter you are trying to unwrap (longitude)
#returns a list of lists of the unwrapped arrays
def unwrap(a, threshold=10):
#t: difference in position needed to trigger a split
split = np.nonzero(np.abs(a[0][:-1] - a[0][1:]) > threshold)[0] + 1
out = []
for arr in a:
if len(split) > 0:
out.append(np.split(arr, split))
else:
out.append(np.array([arr])) #didn't find a place to split on
return out
#TODO: Expand this and plotOrbiticrs to allow other velocities
#possibly make them the same function with a switch
#TODO: Split values so wrapping lines don't happen
def plotOrbitgal(l, b, d, params, vgsr=None):
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220, params[5]], uvw=True, lb=True, ro=8., vo=220.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
fig = plt.figure(figsize=(24, 6))
nplots = 2
if type(vgsr) == type(np.array([])):
nplots += 1
ax1 = fig.add_subplot(1, nplots, 1)
ax2 = fig.add_subplot(1, nplots, 2)
if type(vgsr) == type(np.array([])):
ax3 = fig.add_subplot(1, nplots, 3)
ax1.plot(data_orbit.l, data_orbit.b, c='b')
ax1.plot(data_orbit_rev.l, data_orbit_rev.b, c='r')
ax1.scatter(l, b, c='k')
ax1.set_xlim(0, 360)
ax1.set_ylim(-90, 90)
ax1.set_xlabel('l')
ax1.set_ylabel('b')
ax2.plot(data_orbit.l, data_orbit.d, c='b')
ax2.plot(data_orbit_rev.l, data_orbit_rev.d, c='r')
ax2.scatter(l, d, c='k')
ax2.set_xlim(0, 360)
ax2.set_xlabel('l')
ax2.set_ylabel('d (helio)')
if type(vgsr) == type(np.array([])):
ax3.plot(data_orbit.l, data_orbit.vgsr, c='b')
ax3.plot(data_orbit_rev.l, data_orbit_rev.vgsr, c='r')
ax3.scatter(l, vgsr, c='k')
ax3.set_xlim(0, 360)
ax3.set_xlabel('l')
ax3.set_ylabel('vgsr (km/s)')
plt.show()
def plotOrbiticrs(l, b, d, params, vgsr=None):
s = SkyCoord(l, b, frame='galactic', unit=(u.deg, u.deg))
s = s.transform_to('icrs')
ra = s.ra
dec = s.dec
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220, params[5]], uvw=True, lb=True, ro=8., vo=220.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
fig = plt.figure(figsize=(24, 6))
nplots=2
if type(vgsr) == type(np.array([])):
nplots += 1
ax1 = fig.add_subplot(1,nplots,1)
ax2 = fig.add_subplot(1,nplots,2)
if type(vgsr) == type(np.array([])):
ax3 = fig.add_subplot(1,nplots,3)
data_orbit.icrs()
data_orbit_rev.icrs()
#TODO: This will break if vgsr isn't used
#TODO: Unwrap should really be a orbit data method
o_unwrapped = unwrap([data_orbit.ra, data_orbit.dec, data_orbit.d, data_orbit.vgsr])
o_rev_unwrapped = unwrap([data_orbit_rev.ra, data_orbit_rev.dec, data_orbit_rev.d, data_orbit_rev.vgsr])
for o_ra, o_dec in zip(o_unwrapped[0], o_unwrapped[1]):
ax1.plot(o_ra, o_dec, c='b')
for o_ra, o_dec in zip(o_rev_unwrapped[0], o_rev_unwrapped[1]):
ax1.plot(o_ra, o_dec, c='r')
ax1.scatter(ra, dec, c='k')
ax1.set_xlim(360, 0)
ax1.set_ylim(-90, 90)
ax1.set_xlabel('ra')
ax1.set_ylabel('dec')
for o_ra, o_d in zip(o_unwrapped[0], o_unwrapped[2]):
ax2.plot(o_ra, o_d, c='b')
for o_ra, o_d in zip(o_rev_unwrapped[0], o_rev_unwrapped[2]):
ax2.plot(o_ra, o_d, c='r')
ax2.scatter(ra, d, c='k')
ax2.set_xlim(360, 0)
ax2.set_xlabel('ra')
ax2.set_ylabel('d (helio)')
if type(vgsr) == type(np.array([])):
for o_ra, o_vgsr in zip(o_unwrapped[0], o_unwrapped[-1]):
ax1.plot(o_ra, o_vgsr, c='b')
for o_ra, o_vgsr in zip(o_rev_unwrapped[0], o_rev_unwrapped[-1]):
ax1.plot(o_ra, o_vgsr, c='r')
ax3.scatter(ra, d, c='k')
ax3.set_xlim(360, 0)
ax3.set_xlabel('ra')
ax3.set_ylabel('vgsr (km/s)')
plt.show()
'''
================================================================================
TESTING
================================================================================
'''
def test():
l = np.array([0, 20, 40, 60, 80, 100, 120, 140, 160, 180])
b = np.array([0, 10, 20, 10, 0, -10, -20, -10, 0, 10])
b_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
d = np.array([20, 18, 15, 12, 10, 12, 15, 18, 20, 23])
d_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
params, x2 = fit_orbit(l, b, b_err, d, d_err)
plotOrbitgal(l, b, d, params)
'''
================================================================================
RUNTIME
================================================================================
'''
if __name__ == "__main__":
test()
| [
"numpy.abs",
"numpy.flip",
"scipy.optimize.differential_evolution",
"numpy.where",
"astropy.coordinates.SkyCoord",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.split",
"numpy.cos",
"numpy.min",
"numpy.sin",
"galpy.orbit.Orbit",
"matplotlib.pyplot.show"
] | [((1275, 1315), 'numpy.linspace', 'np.linspace', (['(0)', 't_length'], {'num': 'resolution'}), '(0, t_length, num=resolution)\n', (1286, 1315), True, 'import numpy as np\n'), ((4089, 4106), 'numpy.abs', 'np.abs', (['(Lam - val)'], {}), '(Lam - val)\n', (4095, 4106), True, 'import numpy as np\n'), ((4115, 4124), 'numpy.min', 'np.min', (['L'], {}), '(L)\n', (4121, 4124), True, 'import numpy as np\n'), ((4135, 4151), 'numpy.where', 'np.where', (['(L == m)'], {}), '(L == m)\n', (4143, 4151), True, 'import numpy as np\n'), ((7838, 7971), 'galpy.orbit.Orbit', 'Orbit', ([], {'vxvv': '[params[0], params[1], params[2], params[3], params[4] - 220, params[5]]', 'uvw': '(True)', 'lb': '(True)', 'ro': '(8.0)', 'vo': '(220.0)', 'zo': '(0.0)'}), '(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220,\n params[5]], uvw=True, lb=True, ro=8.0, vo=220.0, zo=0.0)\n', (7843, 7971), False, 'from galpy.orbit import Orbit\n'), ((13388, 13513), 'galpy.orbit.Orbit', 'Orbit', ([], {'vxvv': '[params[0], params[1], params[2], params[3], params[4] - 220, params[5]]', 'uvw': '(True)', 'lb': '(True)', 'ro': '(8.0)', 'vo': '(220.0)'}), '(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220,\n params[5]], uvw=True, lb=True, ro=8.0, vo=220.0)\n', (13393, 13513), False, 'from galpy.orbit import Orbit\n'), ((13845, 13872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 6)'}), '(figsize=(24, 6))\n', (13855, 13872), True, 'import matplotlib.pyplot as plt\n'), ((14861, 14871), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14869, 14871), True, 'import matplotlib.pyplot as plt\n'), ((14929, 14982), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['l', 'b'], {'frame': '"""galactic"""', 'unit': '(u.deg, u.deg)'}), "(l, b, frame='galactic', unit=(u.deg, u.deg))\n", (14937, 14982), False, 'from astropy.coordinates import SkyCoord\n'), ((15053, 15178), 'galpy.orbit.Orbit', 'Orbit', ([], {'vxvv': '[params[0], params[1], params[2], params[3], params[4] - 220, params[5]]', 'uvw': '(True)', 'lb': '(True)', 'ro': '(8.0)', 'vo': '(220.0)'}), '(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220,\n params[5]], uvw=True, lb=True, ro=8.0, vo=220.0)\n', (15058, 15178), False, 'from galpy.orbit import Orbit\n'), ((15510, 15537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 6)'}), '(figsize=(24, 6))\n', (15520, 15537), True, 'import matplotlib.pyplot as plt\n'), ((17167, 17177), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17175, 17177), True, 'import matplotlib.pyplot as plt\n'), ((17378, 17432), 'numpy.array', 'np.array', (['[0, 20, 40, 60, 80, 100, 120, 140, 160, 180]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140, 160, 180])\n', (17386, 17432), True, 'import numpy as np\n'), ((17441, 17491), 'numpy.array', 'np.array', (['[0, 10, 20, 10, 0, -10, -20, -10, 0, 10]'], {}), '([0, 10, 20, 10, 0, -10, -20, -10, 0, 10])\n', (17449, 17491), True, 'import numpy as np\n'), ((17504, 17544), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (17512, 17544), True, 'import numpy as np\n'), ((17553, 17603), 'numpy.array', 'np.array', (['[20, 18, 15, 12, 10, 12, 15, 18, 20, 23]'], {}), '([20, 18, 15, 12, 10, 12, 15, 18, 20, 23])\n', (17561, 17603), True, 'import numpy as np\n'), ((17616, 17656), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (17624, 17656), True, 'import numpy as np\n'), ((2550, 2613), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['self.l', 'self.b'], {'frame': '"""galactic"""', 'unit': '(u.deg, u.deg)'}), "(self.l, self.b, frame='galactic', unit=(u.deg, u.deg))\n", (2558, 2613), False, 'from astropy.coordinates import SkyCoord\n'), ((3522, 3534), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3530, 3534), True, 'import numpy as np\n'), ((3536, 3548), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3544, 3548), True, 'import numpy as np\n'), ((3550, 3562), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3558, 3562), True, 'import numpy as np\n'), ((3564, 3576), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3572, 3576), True, 'import numpy as np\n'), ((3578, 3590), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3586, 3590), True, 'import numpy as np\n'), ((3592, 3604), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3600, 3604), True, 'import numpy as np\n'), ((3879, 3891), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3887, 3891), True, 'import numpy as np\n'), ((3893, 3905), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3901, 3905), True, 'import numpy as np\n'), ((3907, 3919), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3915, 3919), True, 'import numpy as np\n'), ((3921, 3933), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3929, 3933), True, 'import numpy as np\n'), ((3935, 3947), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3943, 3947), True, 'import numpy as np\n'), ((3949, 3961), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3957, 3961), True, 'import numpy as np\n'), ((3963, 3975), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3971, 3975), True, 'import numpy as np\n'), ((5595, 5635), 'numpy.linspace', 'np.linspace', (['(0)', 't_length'], {'num': 'resolution'}), '(0, t_length, num=resolution)\n', (5606, 5635), True, 'import numpy as np\n'), ((5988, 6013), 'numpy.flip', 'np.flip', (['data_orbit_rev.l'], {}), '(data_orbit_rev.l)\n', (5995, 6013), True, 'import numpy as np\n'), ((6224, 6249), 'numpy.flip', 'np.flip', (['data_orbit_rev.b'], {}), '(data_orbit_rev.b)\n', (6231, 6249), True, 'import numpy as np\n'), ((6347, 6372), 'numpy.flip', 'np.flip', (['data_orbit_rev.d'], {}), '(data_orbit_rev.d)\n', (6354, 6372), True, 'import numpy as np\n'), ((10386, 10627), 'scipy.optimize.differential_evolution', 'scopt.differential_evolution', (['chiSquared', 'bounds'], {'args': '(data_opt,)', 'strategy': '"""rand1bin"""', 'maxiter': 'max_it', 'popsize': 'pop_size', 'mutation': 'diff_scaling_factor', 'recombination': 'crossover_rate', 'workers': '(-1)', 'disp': '(not flags.verbose)'}), "(chiSquared, bounds, args=(data_opt,), strategy\n ='rand1bin', maxiter=max_it, popsize=pop_size, mutation=\n diff_scaling_factor, recombination=crossover_rate, workers=-1, disp=not\n flags.verbose, **kwargs)\n", (10414, 10627), True, 'import scipy.optimize as scopt\n'), ((2381, 2409), 'numpy.cos', 'np.cos', (['(np.pi / 180 * self.b)'], {}), '(np.pi / 180 * self.b)\n', (2387, 2409), True, 'import numpy as np\n'), ((2430, 2458), 'numpy.sin', 'np.sin', (['(np.pi / 180 * self.b)'], {}), '(np.pi / 180 * self.b)\n', (2436, 2458), True, 'import numpy as np\n'), ((6279, 6317), 'numpy.array', 'np.array', (['[Bet[p] for p in point_list]'], {}), '([Bet[p] for p in point_list])\n', (6287, 6317), True, 'import numpy as np\n'), ((6402, 6438), 'numpy.array', 'np.array', (['[D[p] for p in point_list]'], {}), '([D[p] for p in point_list])\n', (6410, 6438), True, 'import numpy as np\n'), ((6489, 6515), 'numpy.flip', 'np.flip', (['data_orbit_rev.vx'], {}), '(data_orbit_rev.vx)\n', (6496, 6515), True, 'import numpy as np\n'), ((6691, 6717), 'numpy.flip', 'np.flip', (['data_orbit_rev.vy'], {}), '(data_orbit_rev.vy)\n', (6698, 6717), True, 'import numpy as np\n'), ((6893, 6919), 'numpy.flip', 'np.flip', (['data_orbit_rev.vz'], {}), '(data_orbit_rev.vz)\n', (6900, 6919), True, 'import numpy as np\n'), ((7099, 7127), 'numpy.flip', 'np.flip', (['data_orbit_rev.vgsr'], {}), '(data_orbit_rev.vgsr)\n', (7106, 7127), True, 'import numpy as np\n'), ((9568, 9580), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9576, 9580), True, 'import numpy as np\n'), ((11248, 11260), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11256, 11260), True, 'import numpy as np\n'), ((11330, 11342), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11338, 11342), True, 'import numpy as np\n'), ((11412, 11424), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11420, 11424), True, 'import numpy as np\n'), ((11496, 11508), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11504, 11508), True, 'import numpy as np\n'), ((11695, 11735), 'numpy.linspace', 'np.linspace', (['(0)', 't_length'], {'num': 'resolution'}), '(0, t_length, num=resolution)\n', (11706, 11735), True, 'import numpy as np\n'), ((13914, 13926), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13922, 13926), True, 'import numpy as np\n'), ((14055, 14067), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14063, 14067), True, 'import numpy as np\n'), ((14591, 14603), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14599, 14603), True, 'import numpy as np\n'), ((15577, 15589), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15585, 15589), True, 'import numpy as np\n'), ((15714, 15726), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15722, 15726), True, 'import numpy as np\n'), ((16792, 16804), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16800, 16804), True, 'import numpy as np\n'), ((2303, 2331), 'numpy.cos', 'np.cos', (['(np.pi / 180 * self.b)'], {}), '(np.pi / 180 * self.b)\n', (2309, 2331), True, 'import numpy as np\n'), ((2356, 2384), 'numpy.sin', 'np.sin', (['(np.pi / 180 * self.l)'], {}), '(np.pi / 180 * self.l)\n', (2362, 2384), True, 'import numpy as np\n'), ((6551, 6588), 'numpy.array', 'np.array', (['[vx[p] for p in point_list]'], {}), '([vx[p] for p in point_list])\n', (6559, 6588), True, 'import numpy as np\n'), ((6753, 6790), 'numpy.array', 'np.array', (['[vy[p] for p in point_list]'], {}), '([vy[p] for p in point_list])\n', (6761, 6790), True, 'import numpy as np\n'), ((6955, 6992), 'numpy.array', 'np.array', (['[vz[p] for p in point_list]'], {}), '([vz[p] for p in point_list])\n', (6963, 6992), True, 'import numpy as np\n'), ((7167, 7206), 'numpy.array', 'np.array', (['[vgsr[p] for p in point_list]'], {}), '([vgsr[p] for p in point_list])\n', (7175, 7206), True, 'import numpy as np\n'), ((13041, 13061), 'numpy.split', 'np.split', (['arr', 'split'], {}), '(arr, split)\n', (13049, 13061), True, 'import numpy as np\n'), ((13100, 13115), 'numpy.array', 'np.array', (['[arr]'], {}), '([arr])\n', (13108, 13115), True, 'import numpy as np\n'), ((2278, 2306), 'numpy.cos', 'np.cos', (['(np.pi / 180 * self.l)'], {}), '(np.pi / 180 * self.l)\n', (2284, 2306), True, 'import numpy as np\n'), ((12910, 12938), 'numpy.abs', 'np.abs', (['(a[0][:-1] - a[0][1:])'], {}), '(a[0][:-1] - a[0][1:])\n', (12916, 12938), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020-2022 <NAME>. All Rights Reserved.
See LICENCE file for details
"""
import numpy as np
import sys
import scipy.stats as stats
import random
import pandas as pd
import matplotlib.pyplot as plt
sys.path.append('../../')
from Likelihood import log_likelihood_models
from PDE_solver import SIR_PDEroutine
if __name__=="__main__":
np.random.seed(20131989)
random.seed(4011994)
path='../Data/case_time_series.csv'
full_data = pd.read_csv(path)
date_start = '2021-02-15' #first day to consider
date_end = '2021-07-01' #last day (excluded)
#date_start = '2020-03-01'
#date_end = '2020-10-20'
day_start= np.where(full_data["Date_YMD"] == date_start)[0][0]
day_end = np.where(full_data["Date_YMD"] == date_end)[0][0]
#These instructions are specific to the data set, should be self-explanatory
date_datetime = np.array(full_data["Date_YMD"][day_start:day_end])
date_days = np.arange(day_start,day_end,1)
infected = np.array(full_data["Daily Confirmed"][day_start:day_end])
recov_deceased = np.array(full_data["Daily Recovered"][day_start:day_end])+ \
np.array(full_data["Daily Deceased"][day_start:day_end])
total_recov = np.array(full_data["Total Recovered"][day_start:day_end])+ \
np.array(full_data["Total Deceased"][day_start:day_end])
total_deaths = np.array(full_data["Total Deceased"][day_start:day_end])
total_infected = np.array(full_data["Total Confirmed"][day_start:day_end])
deaths = np.array(full_data["Daily Deceased"][day_start:day_end])
recov = np.array(full_data["Daily Recovered"][day_start:day_end])
tot_rec = np.array(full_data["Total Recovered"][day_start:day_end])
dataframe = pd.DataFrame({'I':infected, 'I_t':total_infected, \
'RD':recov_deceased,'RD_t':total_recov,\
'R':recov,'R_t':tot_rec, 'D':deaths, \
'D_t':total_deaths},index=date_datetime)
day = np.arange(day_start, day_end,1)
day = day - day[0] #Shift of initial day to match PDE
#Distribute infection and recovery times
#uniformly through the days (to avoid having atoms of probability)
time_I_extended = np.zeros(sum(infected))
time_R_extended = np.zeros(sum(recov_deceased))
position=0
'''
#Room for improvement here, just a quick and dirty loop
for dailycases in range(len(infected)):
random_times = np.sort(np.random.uniform(0,1, infected[dailycases]))
for time in random_times:
if dailycases ==0:
time_I_extended[position] = time
else:
time_I_extended[position] = day[dailycases-1]+time*(day[dailycases]-day[dailycases-1])
position = position+1
position=0
for dailycases in range(len(recov_deceased)):
random_times = np.sort(np.random.uniform(0,1, recov_deceased[dailycases]))
for time in random_times:
if dailycases ==0:
time_R_extended[position] = time
else:
time_R_extended[position] = day[dailycases-1]+time*(day[dailycases]-day[dailycases-1])
position = position+1
np.save("Data/India_infected",time_I_extended)
np.save("Data/India_rec",time_R_extended)
'''
time_I_total = np.load('../Data/India_infected.npy')
time_R_total = np.load('../Data/India_rec.npy')
time_I_extended = np.sort(time_I_total)
time_R_extended = np.sort(time_R_total)
#take 10000 samples equispaced in the space of indices
size = 3000
time_I_extended = np.sort(np.random.choice(time_I_extended,size,replace=False))
time_R_extended = np.sort(np.random.choice(time_R_extended,size,replace=False))
#sample_every = int(time_I_total.size/size)
#time_I_extended = time_I_extended[::sample_every]
#sample_every = int(time_R_total.size/size)
#time_R_extended = time_R_extended[::sample_every]
def rec_haz(u, *recovDistParams):
a = float(recovDistParams[0])**2/float(recovDistParams[1])
scale = float(recovDistParams[1])/float(recovDistParams[0])
tol = 1e-10
#Basically: use de l'hopital when the ratio becomes 0/0
#Otherwise go with definition. This regularises a lot the numerics
x = np.where(stats.gamma.cdf(u,a=a,scale=scale)>1-tol,
1/scale - (a-1)/u,
stats.gamma.pdf(u,a=a,scale=scale)/(1- stats.gamma.cdf(u,a=a,scale=scale)))
return x
def rec_distr(u, *recovDistParams):
a = float(recovDistParams[0])**2/float(recovDistParams[1])
scale = float(recovDistParams[1])/float(recovDistParams[0])
#a = float(recovDistParams[0])
#scale = float(recovDistParams[1])
return stats.gamma.pdf(u,a=a,scale=scale)
def inf_distr(u,*CIdistParms):
a = float(CIdistParms[0])
scale = float(CIdistParms[1])
tol = 1e-10
#Basically: use de l'hopital when the ratio becomes 0/0
#Otherwise go with definition. This regularises a lot the numerics
x = np.where(stats.gamma.cdf(u,a=a,scale=scale)>1-tol,
1/scale - (a-1)/u,
stats.gamma.pdf(u,a=a,scale=scale)/(1- stats.gamma.cdf(u,a=a,scale=scale)))
x[0]=x[1]
return x
grids=3000
ll=log_likelihood_models(grids,hazard_inf=inf_distr,hazard_rec=rec_haz, rec_distr = rec_distr,
T=day[-1], infect_times=time_I_extended, recov_times=time_R_extended, hazard_inf_par=2,rec_parms=2)
result = ll.minimize_likelihood(np.array([1e-4,1,1,3,2]), np.array([1e-2,10,6,12, 25]), maxiter=100,swarmsize=500)
#print(result)
print(result)
#result_x=[4.06914629e-04, 2.81346094e+00, 1.57537281e+00, 5.69340232e+00,
# 1.89918160e+01]
# 26642.522743303794)
result_x=result[0]
pde= SIR_PDEroutine(result_x[0], CIdist=inf_distr, CIdistParms=[result_x[1], result_x[2]],\
recovDist=rec_haz, recovDistParms=[result_x[3],result_x[4]],\
nTgrid=grids, nUgrid=grids, T=day[-1])
initialcondition1=np.exp(-pde.tgrids)
X,Y=pde.finDiffUpdate(initialcond=initialcondition1)
'''
#Plots: I(t)
plt.figure()
plt.plot(pde.tgrids,Y, color='blue')
plt.xlabel('time')
plt.ylabel('cases')
'''
#Plots: Recovery distribution
plt.figure()
x = np.linspace(0,30,grids)
plt.plot(x,rec_distr(x,*[result_x[3],result_x[4]]), label = 'Gamma: mean %.3f, var %.3f)'%(result_x[3],result_x[4]))
plt.title("Foot and mouth")
plt.xlabel('time')
plt.legend()
plt.ylabel('Recovery pdf')
plt.title('time to recovery')
#Plots: Infectiousness distribution
x = np.linspace(0,30,grids)
plt.figure()
plt.plot(x,stats.gamma.pdf(x,a=result_x[1],scale=result_x[2]), label = 'Gamma: mean %.3f, var %.3f'%(result_x[1]*result_x[2],result_x[1]*result_x[2]**2))
plt.xlabel('time')
plt.ylabel('Infection pdf')
plt.legend()
plt.title("infectiousness in time")
#Plots: f_t
from scipy.stats import gaussian_kde
dense = gaussian_kde(time_I_extended)
denseval = list(dense(x) for x in day)
x=np.linspace(0,day,3000)
plt.figure()
empirical_density = -(np.diff(X)/pde.dx) /(1-X[-1])
plt.plot(pde.tgrids[1:], empirical_density, color='b',label='MLE density uniform in cond' )
plt.scatter(day,denseval, color='black', label = 'Empirical density')
plt.xlabel("T")
plt.ylabel("Conditional density")
plt.legend()
#Gamma gamma
#Likelihood 26557
#[4.93174744e-04, 5.05115136e+00, 1.00000000e+00, 6.05378065e+00,1.93962008e+01]
#Likelihood 26505
#result_x=[5.44645257e-04, 3.59625285e+00, 1.64603099e+00, 8.21525320e+00,2.11575966e+01]
#These are the best results for the first 250 days
#GAMMA
#array([3.16410516e-05, 1.41516534e-01, 8.36858743e+00, 7.32729328e+00])
#date_start = '2020-03-01'
#date_end = '2020-10-20'
#[2.75066276e-04, 2.07164788e-01, 6.42516240e+00, 1.05904792e+01]
#date_start = '2021-02-15' #first day to consider
#date_end = '2021-06-15' #last day (excluded)
#array([8.63431947e-04, 2.10000000e+00, 6.83694671e+00, 7.82871068e+00,
# 1.58827459e+01])
#[5.92080750e-04 1.50000000e+00 5.60343470e+00 6.91161839e+00
# 2.10000000e+01]
'''
import matplotlib.pyplot as plt
pdeObj_1 = SIR_PDEroutine(result[0], CIdist=inf_distr, CIdistParms=[result[1]],\
recovDist=rec_haz, recovDistParms=[result[2],result[3]],\
nTgrid=10000, nUgrid=10000, T=day[-1])
X,Y=pdeObj_1.finDiffUpdate()
#Effective N for DSA
effective_N=(total_deaths[-1]+total_infected[-1])/(1-X[-1])
print(effective_N/10**6)
plt.plot(day, total_deaths+total_infected, color='r', label='cumulative cases observed')
plt.plot(pdeObj_1.tgrids, effective_N*(1-X), label='estimate from PDE')
plt.xlabel("Time (days)")
plt.ylabel("Cumulative cases")
plt.legend()
#plt.savefig("India_fit_pde.pdf", format='pdf')
plt.figure()
#This is - \dot{S}/(1-X[-1]), should give the empirical density of infected
empirical_density = -(np.diff(X)/pdeObj_1.dx) /(1-X[-1])
from scipy.stats import gaussian_kde
dense = gaussian_kde(time_I_extended)
denseval = list(dense(x) for x in day)
plt.plot(pdeObj_1.tgrids[1:], empirical_density, color='b',label='PDE density' )
plt.scatter(day,denseval, color='r', label = 'Empirical density')
plt.xlabel("T")
plt.ylabel("Conditional density")
plt.legend()
#Plot the recovery distribution as inferred from ML
plt.figure()
x = np.linspace(0,20,1000)
plt.plot(x,rec_distr(x,*[result[2],result[3]]), label = 'Weibull(%.3f, %.3f)'%(result[2],result[3]))
plt.title("India - first 250 days")
plt.legend()
plt.xlabel('Time')
plt.ylabel('pdf')
plt.xlim(0,30)
'''
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"PDE_solver.SIR_PDEroutine",
"sys.path.append",
"numpy.arange",
"scipy.stats.gaussian_kde",
"numpy.where",
"numpy.sort",
"Likelihood.log_likelihood_models",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"scipy.stats.gamma.... | [((262, 287), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (277, 287), False, 'import sys\n'), ((404, 428), 'numpy.random.seed', 'np.random.seed', (['(20131989)'], {}), '(20131989)\n', (418, 428), True, 'import numpy as np\n'), ((433, 453), 'random.seed', 'random.seed', (['(4011994)'], {}), '(4011994)\n', (444, 453), False, 'import random\n'), ((525, 542), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (536, 542), True, 'import pandas as pd\n'), ((953, 1003), 'numpy.array', 'np.array', (["full_data['Date_YMD'][day_start:day_end]"], {}), "(full_data['Date_YMD'][day_start:day_end])\n", (961, 1003), True, 'import numpy as np\n'), ((1020, 1052), 'numpy.arange', 'np.arange', (['day_start', 'day_end', '(1)'], {}), '(day_start, day_end, 1)\n', (1029, 1052), True, 'import numpy as np\n'), ((1071, 1128), 'numpy.array', 'np.array', (["full_data['Daily Confirmed'][day_start:day_end]"], {}), "(full_data['Daily Confirmed'][day_start:day_end])\n", (1079, 1128), True, 'import numpy as np\n'), ((1463, 1519), 'numpy.array', 'np.array', (["full_data['Total Deceased'][day_start:day_end]"], {}), "(full_data['Total Deceased'][day_start:day_end])\n", (1471, 1519), True, 'import numpy as np\n'), ((1541, 1598), 'numpy.array', 'np.array', (["full_data['Total Confirmed'][day_start:day_end]"], {}), "(full_data['Total Confirmed'][day_start:day_end])\n", (1549, 1598), True, 'import numpy as np\n'), ((1612, 1668), 'numpy.array', 'np.array', (["full_data['Daily Deceased'][day_start:day_end]"], {}), "(full_data['Daily Deceased'][day_start:day_end])\n", (1620, 1668), True, 'import numpy as np\n'), ((1687, 1744), 'numpy.array', 'np.array', (["full_data['Daily Recovered'][day_start:day_end]"], {}), "(full_data['Daily Recovered'][day_start:day_end])\n", (1695, 1744), True, 'import numpy as np\n'), ((1760, 1817), 'numpy.array', 'np.array', (["full_data['Total Recovered'][day_start:day_end]"], {}), "(full_data['Total Recovered'][day_start:day_end])\n", (1768, 1817), True, 'import numpy as np\n'), ((1844, 2030), 'pandas.DataFrame', 'pd.DataFrame', (["{'I': infected, 'I_t': total_infected, 'RD': recov_deceased, 'RD_t':\n total_recov, 'R': recov, 'R_t': tot_rec, 'D': deaths, 'D_t': total_deaths}"], {'index': 'date_datetime'}), "({'I': infected, 'I_t': total_infected, 'RD': recov_deceased,\n 'RD_t': total_recov, 'R': recov, 'R_t': tot_rec, 'D': deaths, 'D_t':\n total_deaths}, index=date_datetime)\n", (1856, 2030), True, 'import pandas as pd\n'), ((2131, 2163), 'numpy.arange', 'np.arange', (['day_start', 'day_end', '(1)'], {}), '(day_start, day_end, 1)\n', (2140, 2163), True, 'import numpy as np\n'), ((3481, 3518), 'numpy.load', 'np.load', (['"""../Data/India_infected.npy"""'], {}), "('../Data/India_infected.npy')\n", (3488, 3518), True, 'import numpy as np\n'), ((3538, 3570), 'numpy.load', 'np.load', (['"""../Data/India_rec.npy"""'], {}), "('../Data/India_rec.npy')\n", (3545, 3570), True, 'import numpy as np\n'), ((3598, 3619), 'numpy.sort', 'np.sort', (['time_I_total'], {}), '(time_I_total)\n', (3605, 3619), True, 'import numpy as np\n'), ((3642, 3663), 'numpy.sort', 'np.sort', (['time_R_total'], {}), '(time_R_total)\n', (3649, 3663), True, 'import numpy as np\n'), ((5651, 5851), 'Likelihood.log_likelihood_models', 'log_likelihood_models', (['grids'], {'hazard_inf': 'inf_distr', 'hazard_rec': 'rec_haz', 'rec_distr': 'rec_distr', 'T': 'day[-1]', 'infect_times': 'time_I_extended', 'recov_times': 'time_R_extended', 'hazard_inf_par': '(2)', 'rec_parms': '(2)'}), '(grids, hazard_inf=inf_distr, hazard_rec=rec_haz,\n rec_distr=rec_distr, T=day[-1], infect_times=time_I_extended,\n recov_times=time_R_extended, hazard_inf_par=2, rec_parms=2)\n', (5672, 5851), False, 'from Likelihood import log_likelihood_models\n'), ((6217, 6412), 'PDE_solver.SIR_PDEroutine', 'SIR_PDEroutine', (['result_x[0]'], {'CIdist': 'inf_distr', 'CIdistParms': '[result_x[1], result_x[2]]', 'recovDist': 'rec_haz', 'recovDistParms': '[result_x[3], result_x[4]]', 'nTgrid': 'grids', 'nUgrid': 'grids', 'T': 'day[-1]'}), '(result_x[0], CIdist=inf_distr, CIdistParms=[result_x[1],\n result_x[2]], recovDist=rec_haz, recovDistParms=[result_x[3], result_x[\n 4]], nTgrid=grids, nUgrid=grids, T=day[-1])\n', (6231, 6412), False, 'from PDE_solver import SIR_PDEroutine\n'), ((6496, 6515), 'numpy.exp', 'np.exp', (['(-pde.tgrids)'], {}), '(-pde.tgrids)\n', (6502, 6515), True, 'import numpy as np\n'), ((6775, 6787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6785, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6796, 6821), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', 'grids'], {}), '(0, 30, grids)\n', (6807, 6821), True, 'import numpy as np\n'), ((6945, 6972), 'matplotlib.pyplot.title', 'plt.title', (['"""Foot and mouth"""'], {}), "('Foot and mouth')\n", (6954, 6972), True, 'import matplotlib.pyplot as plt\n'), ((6977, 6995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (6987, 6995), True, 'import matplotlib.pyplot as plt\n'), ((7000, 7012), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7010, 7012), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7043), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recovery pdf"""'], {}), "('Recovery pdf')\n", (7027, 7043), True, 'import matplotlib.pyplot as plt\n'), ((7052, 7081), 'matplotlib.pyplot.title', 'plt.title', (['"""time to recovery"""'], {}), "('time to recovery')\n", (7061, 7081), True, 'import matplotlib.pyplot as plt\n'), ((7135, 7160), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', 'grids'], {}), '(0, 30, grids)\n', (7146, 7160), True, 'import numpy as np\n'), ((7168, 7180), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7178, 7180), True, 'import matplotlib.pyplot as plt\n'), ((7343, 7361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (7353, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7366, 7393), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Infection pdf"""'], {}), "('Infection pdf')\n", (7376, 7393), True, 'import matplotlib.pyplot as plt\n'), ((7398, 7410), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7408, 7410), True, 'import matplotlib.pyplot as plt\n'), ((7415, 7450), 'matplotlib.pyplot.title', 'plt.title', (['"""infectiousness in time"""'], {}), "('infectiousness in time')\n", (7424, 7450), True, 'import matplotlib.pyplot as plt\n'), ((7520, 7549), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['time_I_extended'], {}), '(time_I_extended)\n', (7532, 7549), False, 'from scipy.stats import gaussian_kde\n'), ((7608, 7633), 'numpy.linspace', 'np.linspace', (['(0)', 'day', '(3000)'], {}), '(0, day, 3000)\n', (7619, 7633), True, 'import numpy as np\n'), ((7636, 7648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7646, 7648), True, 'import matplotlib.pyplot as plt\n'), ((7714, 7810), 'matplotlib.pyplot.plot', 'plt.plot', (['pde.tgrids[1:]', 'empirical_density'], {'color': '"""b"""', 'label': '"""MLE density uniform in cond"""'}), "(pde.tgrids[1:], empirical_density, color='b', label=\n 'MLE density uniform in cond')\n", (7722, 7810), True, 'import matplotlib.pyplot as plt\n'), ((7810, 7878), 'matplotlib.pyplot.scatter', 'plt.scatter', (['day', 'denseval'], {'color': '"""black"""', 'label': '"""Empirical density"""'}), "(day, denseval, color='black', label='Empirical density')\n", (7821, 7878), True, 'import matplotlib.pyplot as plt\n'), ((7884, 7899), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (7894, 7899), True, 'import matplotlib.pyplot as plt\n'), ((7904, 7937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Conditional density"""'], {}), "('Conditional density')\n", (7914, 7937), True, 'import matplotlib.pyplot as plt\n'), ((7942, 7954), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7952, 7954), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1207), 'numpy.array', 'np.array', (["full_data['Daily Recovered'][day_start:day_end]"], {}), "(full_data['Daily Recovered'][day_start:day_end])\n", (1158, 1207), True, 'import numpy as np\n'), ((1231, 1287), 'numpy.array', 'np.array', (["full_data['Daily Deceased'][day_start:day_end]"], {}), "(full_data['Daily Deceased'][day_start:day_end])\n", (1239, 1287), True, 'import numpy as np\n'), ((1306, 1363), 'numpy.array', 'np.array', (["full_data['Total Recovered'][day_start:day_end]"], {}), "(full_data['Total Recovered'][day_start:day_end])\n", (1314, 1363), True, 'import numpy as np\n'), ((1387, 1443), 'numpy.array', 'np.array', (["full_data['Total Deceased'][day_start:day_end]"], {}), "(full_data['Total Deceased'][day_start:day_end])\n", (1395, 1443), True, 'import numpy as np\n'), ((3789, 3843), 'numpy.random.choice', 'np.random.choice', (['time_I_extended', 'size'], {'replace': '(False)'}), '(time_I_extended, size, replace=False)\n', (3805, 3843), True, 'import numpy as np\n'), ((3878, 3932), 'numpy.random.choice', 'np.random.choice', (['time_R_extended', 'size'], {'replace': '(False)'}), '(time_R_extended, size, replace=False)\n', (3894, 3932), True, 'import numpy as np\n'), ((5051, 5087), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (5066, 5087), True, 'import scipy.stats as stats\n'), ((5916, 5946), 'numpy.array', 'np.array', (['[0.0001, 1, 1, 3, 2]'], {}), '([0.0001, 1, 1, 3, 2])\n', (5924, 5946), True, 'import numpy as np\n'), ((5942, 5973), 'numpy.array', 'np.array', (['[0.01, 10, 6, 12, 25]'], {}), '([0.01, 10, 6, 12, 25])\n', (5950, 5973), True, 'import numpy as np\n'), ((7196, 7248), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['x'], {'a': 'result_x[1]', 'scale': 'result_x[2]'}), '(x, a=result_x[1], scale=result_x[2])\n', (7211, 7248), True, 'import scipy.stats as stats\n'), ((735, 780), 'numpy.where', 'np.where', (["(full_data['Date_YMD'] == date_start)"], {}), "(full_data['Date_YMD'] == date_start)\n", (743, 780), True, 'import numpy as np\n'), ((801, 844), 'numpy.where', 'np.where', (["(full_data['Date_YMD'] == date_end)"], {}), "(full_data['Date_YMD'] == date_end)\n", (809, 844), True, 'import numpy as np\n'), ((4537, 4573), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (4552, 4573), True, 'import scipy.stats as stats\n'), ((4642, 4678), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (4657, 4678), True, 'import scipy.stats as stats\n'), ((5399, 5435), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (5414, 5435), True, 'import scipy.stats as stats\n'), ((5494, 5530), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (5509, 5530), True, 'import scipy.stats as stats\n'), ((7680, 7690), 'numpy.diff', 'np.diff', (['X'], {}), '(X)\n', (7687, 7690), True, 'import numpy as np\n'), ((4681, 4717), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (4696, 4717), True, 'import scipy.stats as stats\n'), ((5533, 5569), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['u'], {'a': 'a', 'scale': 'scale'}), '(u, a=a, scale=scale)\n', (5548, 5569), True, 'import scipy.stats as stats\n')] |
import numpy as np
from scipy.stats import beta, chi2
from scipy.stats import combine_pvalues as scipy_combine_pvalues
from scipy.stats import ks_1samp, uniform
def combine_pvalues(pvalues, method="fisher"):
pvalues = np.array(pvalues)
scipy_methods = ["fisher", "pearson", "tippett", "stouffer", "mudholkar_george"]
# scipy has a bug in these two methods
if method == "pearson": # HACK: https://github.com/scipy/scipy/pull/15452
stat = 2 * np.sum(np.log1p(-pvalues))
pvalue = chi2.cdf(-stat, 2 * len(pvalues))
elif method == "tippett": # HACK: https://github.com/scipy/scipy/pull/15452
stat = np.min(pvalues)
pvalue = beta.cdf(stat, 1, len(pvalues))
elif method in scipy_methods:
stat, pvalue = scipy_combine_pvalues(pvalues, method=method)
elif method == "eric": # provided for some experiments, not recommended
stat, pvalue = ks_1samp(pvalues, uniform(0, 1).cdf, alternative="greater")
elif method == "min": # provided for some experiments, not recommended
# very similar to Tippett's
pvalue = min(pvalues.min() * len(pvalues), 1)
stat = pvalue
else:
raise NotImplementedError()
return stat, pvalue
| [
"scipy.stats.uniform",
"numpy.array",
"scipy.stats.combine_pvalues",
"numpy.min",
"numpy.log1p"
] | [((224, 241), 'numpy.array', 'np.array', (['pvalues'], {}), '(pvalues)\n', (232, 241), True, 'import numpy as np\n'), ((644, 659), 'numpy.min', 'np.min', (['pvalues'], {}), '(pvalues)\n', (650, 659), True, 'import numpy as np\n'), ((477, 495), 'numpy.log1p', 'np.log1p', (['(-pvalues)'], {}), '(-pvalues)\n', (485, 495), True, 'import numpy as np\n'), ((766, 811), 'scipy.stats.combine_pvalues', 'scipy_combine_pvalues', (['pvalues'], {'method': 'method'}), '(pvalues, method=method)\n', (787, 811), True, 'from scipy.stats import combine_pvalues as scipy_combine_pvalues\n'), ((930, 943), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (937, 943), False, 'from scipy.stats import ks_1samp, uniform\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Stinespring representation of a Quantum Channel.
"""
import copy
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_identity_matrix
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.channel.kraus import Kraus
from qiskit.quantum_info.operators.channel.choi import Choi
from qiskit.quantum_info.operators.channel.superop import SuperOp
from qiskit.quantum_info.operators.channel.transformations import _to_stinespring
class Stinespring(QuantumChannel):
r"""Stinespring representation of a quantum channel.
The Stinespring representation of a quantum channel :math:`\mathcal{E}`
is a rectangular matrix :math:`A` such that the evolution of a
:class:`~qiskit.quantum_info.DensityMatrix` :math:`\rho` is given by
.. math::
\mathcal{E}(ρ) = \mbox{Tr}_2\left[A ρ A^\dagger\right]
where :math:`\mbox{Tr}_2` is the :func:`partial_trace` over subsystem 2.
A general operator map :math:`\mathcal{G}` can also be written using the
generalized Stinespring representation which is given by two matrices
:math:`A`, :math:`B` such that
.. math::
\mathcal{G}(ρ) = \mbox{Tr}_2\left[A ρ B^\dagger\right]
See reference [1] for further details.
References:
1. <NAME>, <NAME>, <NAME>, *Tensor networks and graphical calculus
for open quantum systems*, Quant. Inf. Comp. 15, 0579-0811 (2015).
`arXiv:1111.6950 [quant-ph] <https://arxiv.org/abs/1111.6950>`_
"""
def __init__(self, data, input_dims=None, output_dims=None):
"""Initialize a quantum channel Stinespring operator.
Args:
data (QuantumCircuit or
Instruction or
BaseOperator or
matrix): data to initialize superoperator.
input_dims (tuple): the input subsystem dimensions.
[Default: None]
output_dims (tuple): the output subsystem dimensions.
[Default: None]
Raises:
QiskitError: if input data cannot be initialized as a
a list of Kraus matrices.
Additional Information:
If the input or output dimensions are None, they will be
automatically determined from the input data. This can fail for the
Stinespring operator if the output dimension cannot be automatically
determined.
"""
# If the input is a list or tuple we assume it is a pair of general
# Stinespring matrices. If it is a numpy array we assume that it is
# a single Stinespring matrix.
if isinstance(data, (list, tuple, np.ndarray)):
if not isinstance(data, tuple):
# Convert single Stinespring set to length 1 tuple
stine = (np.asarray(data, dtype=complex), None)
if isinstance(data, tuple) and len(data) == 2:
if data[1] is None:
stine = (np.asarray(data[0], dtype=complex), None)
else:
stine = (np.asarray(data[0], dtype=complex),
np.asarray(data[1], dtype=complex))
dim_left, dim_right = stine[0].shape
# If two Stinespring matrices check they are same shape
if stine[1] is not None:
if stine[1].shape != (dim_left, dim_right):
raise QiskitError("Invalid Stinespring input.")
input_dim = dim_right
if output_dims:
output_dim = np.product(output_dims)
else:
output_dim = input_dim
if dim_left % output_dim != 0:
raise QiskitError("Invalid output_dim")
else:
# Otherwise we initialize by conversion from another Qiskit
# object into the QuantumChannel.
if isinstance(data, (QuantumCircuit, Instruction)):
# If the input is a Terra QuantumCircuit or Instruction we
# convert it to a SuperOp
data = SuperOp._init_instruction(data)
else:
# We use the QuantumChannel init transform to intialize
# other objects into a QuantumChannel or Operator object.
data = self._init_transformer(data)
data = self._init_transformer(data)
input_dim, output_dim = data.dim
# Now that the input is an operator we convert it to a
# Stinespring operator
rep = getattr(data, '_channel_rep', 'Operator')
stine = _to_stinespring(rep, data._data, input_dim, output_dim)
if input_dims is None:
input_dims = data.input_dims()
if output_dims is None:
output_dims = data.output_dims()
# Check and format input and output dimensions
input_dims = self._automatic_dims(input_dims, input_dim)
output_dims = self._automatic_dims(output_dims, output_dim)
# Initialize either single or general Stinespring
if stine[1] is None or (stine[1] == stine[0]).all():
# Standard Stinespring map
super().__init__((stine[0], None),
input_dims=input_dims,
output_dims=output_dims,
channel_rep='Stinespring')
else:
# General (non-CPTP) Stinespring map
super().__init__(stine,
input_dims=input_dims,
output_dims=output_dims,
channel_rep='Stinespring')
@property
def data(self):
# Override to deal with data being either tuple or not
if self._data[1] is None:
return self._data[0]
else:
return self._data
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
if self._data[1] is not None:
return False
check = np.dot(np.transpose(np.conj(self._data[0])), self._data[0])
return is_identity_matrix(check, rtol=self.rtol, atol=self.atol)
def conjugate(self):
"""Return the conjugate of the QuantumChannel."""
# pylint: disable=assignment-from-no-return
stine_l = np.conjugate(self._data[0])
stine_r = None
if self._data[1] is not None:
stine_r = np.conjugate(self._data[1])
return Stinespring((stine_l, stine_r), self.input_dims(),
self.output_dims())
def transpose(self):
"""Return the transpose of the QuantumChannel."""
din, dout = self.dim
dtr = self._data[0].shape[0] // dout
stine = [None, None]
for i, mat in enumerate(self._data):
if mat is not None:
stine[i] = np.reshape(
np.transpose(np.reshape(mat, (dout, dtr, din)), (2, 1, 0)),
(din * dtr, dout))
return Stinespring(tuple(stine),
input_dims=self.output_dims(),
output_dims=self.input_dims())
def compose(self, other, qargs=None, front=False):
"""Return the composed quantum channel self @ other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
front (bool): If True compose using right operator multiplication,
instead of left multiplication [default: False].
Returns:
Stinespring: The quantum channel self @ other.
Raises:
QiskitError: if other cannot be converted to a Stinespring or has
incompatible dimensions.
Additional Information:
Composition (``@``) is defined as `left` matrix multiplication for
:class:`SuperOp` matrices. That is that ``A @ B`` is equal to ``B * A``.
Setting ``front=True`` returns `right` matrix multiplication
``A * B`` and is equivalent to the :meth:`dot` method.
"""
if qargs is None:
qargs = getattr(other, 'qargs', None)
if qargs is not None:
return Stinespring(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Otherwise we convert via Kraus representation rather than
# superoperator to avoid unnecessary representation conversions
return Stinespring(Kraus(self).compose(other, front=front))
def dot(self, other, qargs=None):
"""Return the right multiplied quantum channel self * other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list or None): a list of subsystem positions to apply
other on. If None apply on all
subsystems [default: None].
Returns:
Stinespring: The quantum channel self * other.
Raises:
QiskitError: if other cannot be converted to a Stinespring or has
incompatible dimensions.
"""
return super().dot(other, qargs=qargs)
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Stinespring: the matrix power of the SuperOp converted to a
Stinespring channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not
an integer.
"""
if n > 0:
return super().power(n)
return Stinespring(SuperOp(self).power(n))
def tensor(self, other):
"""Return the tensor product channel self ⊗ other.
Args:
other (QuantumChannel): a quantum channel subclass.
Returns:
Stinespring: the tensor product channel other ⊗ self as a
Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
return self._tensor_product(other, reverse=False)
def expand(self, other):
"""Return the tensor product channel other ⊗ self.
Args:
other (QuantumChannel): a quantum channel subclass.
Returns:
Stinespring: the tensor product channel other ⊗ self as a
Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
return self._tensor_product(other, reverse=True)
def _add(self, other, qargs=None):
"""Return the QuantumChannel self + other.
If ``qargs`` are specified the other operator will be added
assuming it is identity on all other subsystems.
Args:
other (QuantumChannel): a quantum channel subclass.
qargs (None or list): optional subsystems to add on
(Default: None)
Returns:
Stinespring: the linear addition channel self + other.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
"""
# Since we cannot directly add two channels in the Stinespring
# representation we convert to the Choi representation
return Stinespring(Choi(self)._add(other, qargs=qargs))
def _multiply(self, other):
"""Return the QuantumChannel other * self.
Args:
other (complex): a complex number.
Returns:
Stinespring: the scalar multiplication other * self.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
ret = copy.copy(self)
# If the number is complex or negative we need to convert to
# general Stinespring representation so we first convert to
# the Choi representation
if isinstance(other, complex) or other < 1:
# Convert to Choi-matrix
ret._data = Stinespring(Choi(self)._multiply(other))._data
return ret
# If the number is real we can update the Kraus operators
# directly
num = np.sqrt(other)
stine_l, stine_r = self._data
stine_l = num * self._data[0]
stine_r = None
if self._data[1] is not None:
stine_r = num * self._data[1]
ret._data = (stine_l, stine_r)
return ret
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the quantum channel.
Args:
state (DensityMatrix or Statevector): The input state.
qargs (list): a list of quantum state subsystem positions to apply
the quantum channel on.
Returns:
DensityMatrix: the output quantum state as a density matrix.
Raises:
QiskitError: if the quantum channel dimension does not match the
specified quantum state subsystem dimensions.
"""
return SuperOp(self)._evolve(state, qargs)
def _tensor_product(self, other, reverse=False):
"""Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel subclass.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False]
Returns:
Stinespring: the tensor product channel as a Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel.
"""
# Convert other to Stinespring
if not isinstance(other, Stinespring):
other = Stinespring(other)
# Tensor Stinespring ops
sa_l, sa_r = self._data
sb_l, sb_r = other._data
# Reshuffle tensor dimensions
din_a, dout_a = self.dim
din_b, dout_b = other.dim
dtr_a = sa_l.shape[0] // dout_a
dtr_b = sb_l.shape[0] // dout_b
if reverse:
shape_in = (dout_b, dtr_b, dout_a, dtr_a, din_b * din_a)
shape_out = (dout_b * dtr_b * dout_a * dtr_a, din_b * din_a)
else:
shape_in = (dout_a, dtr_a, dout_b, dtr_b, din_a * din_b)
shape_out = (dout_a * dtr_a * dout_b * dtr_b, din_a * din_b)
# Compute left Stinespring op
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
sab_l = np.kron(sb_l, sa_l)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
sab_l = np.kron(sa_l, sb_l)
# Reravel indices
sab_l = np.reshape(
np.transpose(np.reshape(sab_l, shape_in), (0, 2, 1, 3, 4)),
shape_out)
# Compute right Stinespring op
if sa_r is None and sb_r is None:
sab_r = None
else:
if sa_r is None:
sa_r = sa_l
elif sb_r is None:
sb_r = sb_l
if reverse:
sab_r = np.kron(sb_r, sa_r)
else:
sab_r = np.kron(sa_r, sb_r)
# Reravel indices
sab_r = np.reshape(
np.transpose(np.reshape(sab_r, shape_in), (0, 2, 1, 3, 4)),
shape_out)
return Stinespring((sab_l, sab_r), input_dims, output_dims)
| [
"numpy.product",
"qiskit.quantum_info.operators.predicates.is_identity_matrix",
"numpy.sqrt",
"numpy.reshape",
"numpy.conj",
"numpy.conjugate",
"numpy.asarray",
"qiskit.exceptions.QiskitError",
"numpy.kron",
"qiskit.quantum_info.operators.channel.transformations._to_stinespring",
"qiskit.quantum... | [((6973, 7030), 'qiskit.quantum_info.operators.predicates.is_identity_matrix', 'is_identity_matrix', (['check'], {'rtol': 'self.rtol', 'atol': 'self.atol'}), '(check, rtol=self.rtol, atol=self.atol)\n', (6991, 7030), False, 'from qiskit.quantum_info.operators.predicates import is_identity_matrix\n'), ((7185, 7212), 'numpy.conjugate', 'np.conjugate', (['self._data[0]'], {}), '(self._data[0])\n', (7197, 7212), True, 'import numpy as np\n'), ((12959, 12974), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (12968, 12974), False, 'import copy\n'), ((13428, 13442), 'numpy.sqrt', 'np.sqrt', (['other'], {}), '(other)\n', (13435, 13442), True, 'import numpy as np\n'), ((5349, 5404), 'qiskit.quantum_info.operators.channel.transformations._to_stinespring', '_to_stinespring', (['rep', 'data._data', 'input_dim', 'output_dim'], {}), '(rep, data._data, input_dim, output_dim)\n', (5364, 5404), False, 'from qiskit.quantum_info.operators.channel.transformations import _to_stinespring\n'), ((7296, 7323), 'numpy.conjugate', 'np.conjugate', (['self._data[1]'], {}), '(self._data[1])\n', (7308, 7323), True, 'import numpy as np\n'), ((12907, 12943), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""other is not a number"""'], {}), "('other is not a number')\n", (12918, 12943), False, 'from qiskit.exceptions import QiskitError\n'), ((15768, 15787), 'numpy.kron', 'np.kron', (['sb_l', 'sa_l'], {}), '(sb_l, sa_l)\n', (15775, 15787), True, 'import numpy as np\n'), ((15953, 15972), 'numpy.kron', 'np.kron', (['sa_l', 'sb_l'], {}), '(sa_l, sb_l)\n', (15960, 15972), True, 'import numpy as np\n'), ((4310, 4333), 'numpy.product', 'np.product', (['output_dims'], {}), '(output_dims)\n', (4320, 4333), True, 'import numpy as np\n'), ((4456, 4489), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Invalid output_dim"""'], {}), "('Invalid output_dim')\n", (4467, 4489), False, 'from qiskit.exceptions import QiskitError\n'), ((4826, 4857), 'qiskit.quantum_info.operators.channel.superop.SuperOp._init_instruction', 'SuperOp._init_instruction', (['data'], {}), '(data)\n', (4851, 4857), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((6918, 6940), 'numpy.conj', 'np.conj', (['self._data[0]'], {}), '(self._data[0])\n', (6925, 6940), True, 'import numpy as np\n'), ((14276, 14289), 'qiskit.quantum_info.operators.channel.superop.SuperOp', 'SuperOp', (['self'], {}), '(self)\n', (14283, 14289), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((16052, 16079), 'numpy.reshape', 'np.reshape', (['sab_l', 'shape_in'], {}), '(sab_l, shape_in)\n', (16062, 16079), True, 'import numpy as np\n'), ((16407, 16426), 'numpy.kron', 'np.kron', (['sb_r', 'sa_r'], {}), '(sb_r, sa_r)\n', (16414, 16426), True, 'import numpy as np\n'), ((16469, 16488), 'numpy.kron', 'np.kron', (['sa_r', 'sb_r'], {}), '(sa_r, sb_r)\n', (16476, 16488), True, 'import numpy as np\n'), ((3579, 3610), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'complex'}), '(data, dtype=complex)\n', (3589, 3610), True, 'import numpy as np\n'), ((4177, 4218), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Invalid Stinespring input."""'], {}), "('Invalid Stinespring input.')\n", (4188, 4218), False, 'from qiskit.exceptions import QiskitError\n'), ((9506, 9517), 'qiskit.quantum_info.operators.channel.kraus.Kraus', 'Kraus', (['self'], {}), '(self)\n', (9511, 9517), False, 'from qiskit.quantum_info.operators.channel.kraus import Kraus\n'), ((10777, 10790), 'qiskit.quantum_info.operators.channel.superop.SuperOp', 'SuperOp', (['self'], {}), '(self)\n', (10784, 10790), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((12495, 12505), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['self'], {}), '(self)\n', (12499, 12505), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n'), ((16580, 16607), 'numpy.reshape', 'np.reshape', (['sab_r', 'shape_in'], {}), '(sab_r, shape_in)\n', (16590, 16607), True, 'import numpy as np\n'), ((3742, 3776), 'numpy.asarray', 'np.asarray', (['data[0]'], {'dtype': 'complex'}), '(data[0], dtype=complex)\n', (3752, 3776), True, 'import numpy as np\n'), ((3835, 3869), 'numpy.asarray', 'np.asarray', (['data[0]'], {'dtype': 'complex'}), '(data[0], dtype=complex)\n', (3845, 3869), True, 'import numpy as np\n'), ((3900, 3934), 'numpy.asarray', 'np.asarray', (['data[1]'], {'dtype': 'complex'}), '(data[1], dtype=complex)\n', (3910, 3934), True, 'import numpy as np\n'), ((7773, 7806), 'numpy.reshape', 'np.reshape', (['mat', '(dout, dtr, din)'], {}), '(mat, (dout, dtr, din))\n', (7783, 7806), True, 'import numpy as np\n'), ((9282, 9295), 'qiskit.quantum_info.operators.channel.superop.SuperOp', 'SuperOp', (['self'], {}), '(self)\n', (9289, 9295), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((13271, 13281), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['self'], {}), '(self)\n', (13275, 13281), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n')] |
from . import Kmeans
from . import GM
from . import radarPlot
from . import heatmap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pa
from scipy.sparse import csr_matrix, isspmatrix
from scipy.sparse import csgraph
from sklearn.preprocessing import normalize
from sklearn.metrics import pairwise_distances
import mpl_toolkits.mplot3d.axes3d as p3
import pylab as p
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
import sys
import os
import time
from matplotlib.lines import Line2D
from matplotlib.pyplot import cm
from collections import Counter
from gprofiler import gprofiler
import copy
import operator
import scipy
import seaborn as sns
import random
from gensim.models import HdpModel,LdaModel
from sklearn import cluster
from sklearn.neighbors import kneighbors_graph
from sklearn import metrics
class MICTI:
def __init__(self,data,geneNames,cellNames,k=None,cluster_label=None,cluster_assignment=None, th=0,seed=None, ensembel=False, organisum="hsapiens"):
self.data=data
self.k=k
self.th=th
self.geneNames=geneNames
self.cellNames=cellNames
self.seed=seed
self.ensembl=ensembel
self.organsm=organisum
self.cluster_assignment=cluster_assignment
self.cluster_label=cluster_label
self.color=cluster_assignment
self.color_dict={}
self.data_ICF=self.ICF(self.data)
self.initialize_colors()
def get_cluster_assignment(self):
return self.cluster_assignment
def initialize_colors(self):
colors=['#ffe119','#0082c8','#f58231','#911eb4','#46f0f0','#f032e6','#d2f53c','#fabebe','#008080','#e6beff',
'#aa6e28','#fffac8','#800000','#aaffc3','#808000','#ffd8b1','#000080','#808080','#FFFFFF','#000000'][:self.k]
cell_type=pa.Series([self.cluster_label[j] for j in self.cluster_assignment])
cell_type=cell_type.sort_values()
lut2=dict(zip(cell_type.sort_values().unique(), colors))
lut2=dict(sorted(lut2.items()))
col_colors= cell_type.map(lut2)
col_colors.index=pa.Series(self.cellNames)[cell_type.index]
mycol=[{k:tuple(np.array(self.hex_to_rgb(v))/255)} for k, v in lut2.items()]
self.color_dict={}
[self.color_dict.update(c) for c in mycol]
self.color=[lut2[self.cluster_label[i]] for i in self.cluster_assignment]
self.color_dict=lut2
return None
def cellMatrix2cellCorpus(self, datamatrix):
cell_Courpus=[]
for k in range(datamatrix.shape[0]):
cell_Courpus.append([(i,j) for i, j in enumerate(datamatrix.iloc[k,:])])
id2gene={i:j for i,j in enumerate(datamatrix.columns)}
id2cell={i:j for i,j in enumerate(datamatrix.index)}
return cell_Courpus, id2gene, id2cell
def gene_symbol_to_ENSEMBLID(self, symbol, organisum="hsapiens"):
if(organisum=="hsapiens"):
genes=pa.read_csv("https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt", sep="\t")
genes.index=genes["Gene name"]
ENSEBMLID=genes.loc[symbol,"Gene stable ID"]
Genes=ENSEBMLID.dropna().drop_duplicates()
elif(organisum=="mmusculus"):
genes=pa.read_csv("https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt", sep="\t")
genes.index=genes["Gene name"]
ENSEBMLID=genes.loc[symbol,"Gene stable ID"]
Genes=ENSEBMLID.dropna().drop_duplicates()
else:
Genes=symbol
#print("give organisum")
return Genes
def ENSEMBLID_to_geneSymbol(self, ENSEMBL, organisum="hsapiens"):
if(organisum=="hsapiens"):
genes=pa.read_csv("https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt", sep="\t")
genes.index=genes["Gene stable ID"]
gene_symbol=genes.loc[ENSEMBL,"Gene name"]
Genes=gene_symbol.dropna().drop_duplicates()
elif(organisum=="mmusculus"):
genes=pa.read_csv("https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt", sep="\t")
genes.index=genes["Gene stable ID"]
gene_symbol=genes.loc[ENSEMBL,"Gene name"]
Genes=gene_symbol.dropna().drop_duplicates()
else:
Genes=ENSEMBL
#print("give organisum")
return Genes
def ICF(self,data):
matrixx=pa.DataFrame((data.T.toarray()))
totalCells=matrixx.shape[1]
idf=np.log(totalCells/np.array(matrixx[matrixx > self.th].count(axis=1).add(1)))
icf_matrix=matrixx.T*np.array(idf)
return np.array(icf_matrix)
def get_Visualization(self,dim=2,method="PCA"):
if method=="PCA":
if dim>3:
print ("Please give at most three dimentions")
else:
svd = TruncatedSVD(n_components=dim)
if isspmatrix(self.data):
svd_fit = svd.fit(self.data.toarray())
svdTransform=svd.fit_transform(self.data.toarray())
else:
svd_fit = svd.fit(self.data)
svdTransform=svd.fit_transform(self.data)
if dim==3:
fig=p.figure()
ax = p3.Axes3D(fig)
ax.scatter(svdTransform[:,0], svdTransform[:,1], svdTransform[:,2], c=self.color)
ax.set_xlabel("PCA1")
ax.set_ylabel("PCA2")
ax.set_zlabel("PCA3")
fig.add_axes(ax)
p.show()
elif dim==2:
plt.scatter(svdTransform[:,0], svdTransform[:,1], c=self.color)
plt.xlabel("PCA1")
plt.ylabel("PCA2")
plt.suptitle("MICTI with k={0:d}".format(self.k), fontsize=8)
plt.legend(bbox_to_anchor=(1.65, 1.65), loc='center', ncol=1)
plt.legend(list(self.cluster_assignment))
plt.show()
else:
print ("dimentionality error")
elif method=="tsne":
if dim>3:
print ("Please give at most three dimentions")
else:
svd = TruncatedSVD(n_components=5)
if isspmatrix(self.data):
svd_fit = svd.fit(self.data.toarray())
svdTransformTsne=svd.fit_transform(self.data.toarray())
else:
svd_fit = svd.fit(self.data)
svdTransformTsne=svd.fit_transform(self.data)
X_tsne=TSNE(n_components=dim, random_state=0)
x_tsne=X_tsne.fit_transform(svdTransformTsne)
if dim==3:
fig=p.figure()
ax = p3.Axes3D(fig)
ax.scatter(x_tsne[:,0], x_tsne[:,1], x_tsne[:,2], c=self.color)
ax.set_xlabel("tsne1")
ax.set_xlabel("tsne2")
ax.set_xlabel("tsne3")
fig.add_axes(ax)
p.show()
elif dim==2:
data = pa.DataFrame(columns=['tsne_1','tsne_2','cell type'])
data['cell type']=[self.cluster_label[i] for i in list(self.cluster_assignment)]
data['tsne_1']=x_tsne[:,0]
data['tsne_2']=x_tsne[:,1]
if len(self.color_dict)>0:
facet = sns.lmplot(data=data, x='tsne_1', y='tsne_2', hue='cell type', fit_reg=False, legend=True, legend_out=True, palette=self.color_dict, order=5)
else:
facet = sns.lmplot(data=data, x='tsne_1', y='tsne_2', hue='cell type', fit_reg=False, legend=True, legend_out=True)
plt.savefig("MICTI_Plot.pdf", format="pdf", dpi=300, bbox_inches='tight')
plt.show()
else:
print ("dimetionality error")
else:
print ("Please give method==pca or method=tsne")
def get_cluster_data(self, cluster_number):
return self.data.toarray()[np.in1d(self.cluster_assignment, cluster_number),:], self.cellNames[np.in1d(self.cluster_assignment, cluster_number)]
def get_cluster_ICF_data(self, cluster_number):
return self.ICF(self.data[np.in1d(self.cluster_assignment, cluster_number),:])
def get_cluster_CF_data(self,cluster_number):
return self.CF(self.data[np.in1d(self.cluster_assignment, cluster_number),:])
def get_selected_cluster_marker(self, clusters):
datta=self.data[np.in1d(np.array(self.cluster_assignment), clusters),:]
index=self.cluster_assignment[np.in1d(np.array(self.cluster_assignment), clusters)]
dat_common=self.CF(datta)
dat_identity=self.ICF(datta)
idd_com=[]
idd_j=[]
for j in clusters:
datt=dat_identity[np.in1d(np.array(index), [j]),:]
idxx=np.mean(datt, axis=0)
idxx=np.array(idxx).reshape(idxx.shape[0],)
idx = idxx.argsort()[::-1]
iD=[]
print('Cluster identifier',j)
if self.ensembl:
for i in range(18): # Print each gene along with the feature-encoding weight
print('{0:s}:{1:.2e}'.format(list(self.ENSEMBLID_to_geneSymbol([self.geneNames[idx[i]]]))[0], idxx[idx[i]]))
iD.append(list(self.ENSEMBLID_to_geneSymbol([self.geneNames[idx[i]]]))[0])
else:
for i in range(18): # Print each gene along with the feature-encoding weight
print('{0:s}:{1:.2e}'.format(self.geneNames[idx[i]], idxx[idx[i]]))
iD.append(self.geneNames[idx[i]])
idd_j.append(iD)
return datt, idxx
def get_gene_over_representation(self,topn=10):
enrichmentTable={}
for i in range(self.k):
top10Genes=[]
print('Cluster {0:s} ({1:d} cells)'.format(self.cluster_label[i], int(np.sum(self.cluster_assignment==i))))
idxx=np.mean(self.data_ICF[self.cluster_assignment==i,:], axis=0)
idxx=np.array(idxx).reshape(idxx.shape[0],)
idx = idxx.argsort()[::-1]
for j in range(topn):
top10Genes.append(self.geneNames[idx[j]])
if self.ensembl:
top10Genes=list(self.ENSEMBLID_to_geneSymbol(top10Genes,organisum=self.organsm))
print(top10Genes)
else:
top10Genes=top10Genes
print(top10Genes)
enrichment = gprofiler(top10Genes, organism=self.organsm)
enrichmentTable[i]=enrichment.sort_values(by=['p.value'])[["term.id","p.value","domain","term.name","intersection"]]
print('')
return enrichmentTable
def get_MICTI_standardized_mean_over_var(self, clusters):
datta=self.data_ICF[np.in1d(np.array(self.cluster_assignment), clusters),:]
ccc=np.array(pa.DataFrame(datta).loc[~(pa.DataFrame(datta)==0).all(axis=0)])
val=np.mean(ccc, axis=0)/(np.log(np.var(ccc, axis=0)+2))
z_score=(val-np.mean(val))/np.sqrt(np.var(val))
return z_score
def calculate_pvalue(self, scores):
return 2*(1-scipy.special.ndtr(abs(scores)))
def FDR_BH(self, p):
"""Benjamini-Hochberg p-value correction for multiple hypothesis testing."""
p = np.asfarray(p)
by_descend = p.argsort()[::-1]
by_orig = by_descend.argsort()
steps = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(steps * p[by_descend]))
return q[by_orig]
def marker_gene_FDR_p_value(self, clusterNo):
z_score=self.get_MICTI_standardized_mean_over_var(clusterNo)
p_val=self.calculate_pvalue(z_score)
FDR_pvalue=self.FDR_BH(p_val)
result=pa.DataFrame({"Z_scores":z_score,"p_value":p_val,"Adj P-value":FDR_pvalue}, index=self.geneNames)
return result.sort_values("Adj P-value")
def get_gene_over_representation_for_topn_genes(self,topn=10):
enrichmentTable={}
for i in range(self.k):
print('Cluster {0:s} ({1:d} cells)'.format(str(self.cluster_label[i]), int(np.sum(self.cluster_assignment==i))))
genes=list(self.marker_gene_FDR_p_value(i).index)
top10Genes=genes[:topn]
if self.ensembl:
top10Genes=list(self.ENSEMBLID_to_geneSymbol(top10Genes,organisum=self.organsm))
print(top10Genes)
else:
top10Genes=top10Genes
print(top10Genes)
enrichment = gprofiler(top10Genes, organism=self.organsm)
enrichmentTable[i]=enrichment.sort_values(by=['p.value'])[["term.id","p.value","domain","term.name","intersection"]]
print('')
return enrichmentTable
def get_gene_list_over_representation_analysis(self, gene_list):
enrichment = gprofiler(gene_list, organism=self.organsm)
enrichmentTable=enrichment.sort_values(by=['p.value'])
return enrichmentTable
def get_markers_by_Pvalues_and_Zscore(self,cluster,threshold_pvalue=.01, threshold_z_score=0):
result=self.marker_gene_FDR_p_value(cluster)
genenames=result.loc[list(np.array(result["Adj P-value"]<threshold_pvalue) & np.array(result["Z_scores"]>threshold_z_score)),:].sort_values("Adj P-value")
genenames = genenames[~genenames.index.duplicated(keep='first')]
return genenames
def cluster_cells(self, numberOfCluster=None,subspace=False, min_sample=10, method="kmeans", maxiter=10e3, alpha=1, gamma=1, eta=0.01, eps=0.5, min_samples=5, metric='euclidean', xi=.05, min_cluster_size=.05):
if(subspace==False):
data=self.data
else:
svd = TruncatedSVD(n_components=500)
data=svd.fit_transform(mictiObject_1.data.toarray())
if method=="kmeans":
kmean=Kmeans.Kmeans(data, numberOfCluster, self.geneNames, self.cellNames)
_, self.cluster_assignment=kmean.kmeans_multiple_runs(maxiter,5)
self.k=len(set(self.cluster_assignment))
elif method=="GM":
EM_GM=GM.GM(data, numberOfCluster, self.geneNames, self.cellNames)
EM_GMM=EM_GM.EM_for_high_dimension()
self.cluster_assignment=np.argmax(EM_GMM["resp"], axis=1)
self.k=len(set(self.cluster_assignment))
elif method=="hdp":
corpusData=pa.DataFrame(data.toarray())
corpusData.columns=self.geneNames
corpusData.index=self.cellNames
cc, id2g,id2c =self.cellMatrix2cellCorpus(corpusData)
hdp=HdpModel(cc,id2g, alpha=alpha, gamma=gamma, eta=eta)
tp_dist=hdp.__getitem__(cc)
cell_tp=[max(dict(i), key=dict(i).get) for i in tp_dist]
low_conf_cluster=np.where(np.bincount(cell_tp)<min_sample)
filter_noise=[False if i in low_conf_cluster[0] else True for i in cell_tp]
new_assignment=np.array([cell_tp[i] if filter_noise[i] else 100 for i in range(len(filter_noise))])
new_assignment[new_assignment > sorted(set(new_assignment))[-2]] = sorted(set(new_assignment))[-2]+1
self.cluster_assignment=new_assignment
self.k=len(new_assignment)
elif method=="lda":
corpusData=pa.DataFrame(data.toarray())
corpusData.columns=self.geneNames
corpusData.index=self.cellNames
cc, id2g,id2c =self.cellMatrix2cellCorpus(corpusData)
lda = LdaModel(corpus=cc, id2word=id2g, num_topics=numberOfCluster, update_every=1, passes=1, alpha=alpha, eta=eta)
cell_type=lda.get_document_topics(cc)
cell_type_lda=[max(dict(i), key=dict(i).get) for i in cell_type]
self.cluster_assignment=cell_type_lda
self.k=len(set(cell_type_lda))
elif method=="aggl":
aggl_clustering=cluster.AgglomerativeClustering(n_clusters=numberOfCluster).fit(data.toarray())
self.cluster_assignment=aggl_clustering.labels_
self.k=len(set(aggl_clustering.labels_))
elif method=="birch":
birch_clustering=cluster.Birch(n_clusters=numberOfCluster).fit(data.toarray())
self.cluster_assignment=birch_clustering.predict(data.toarray())
self.k=len(set(list(self.cluster_assignment)))
elif method=="dbscan":
dbscan_clustering=cluster.DBSCAN(eps=eps, min_samples=min_samples, metric=metric).fit(data.toarray())
dbscan_lables=dbscan_clustering.labels_
dbscan_lables[dbscan_lables < 0] = dbscan_lables.max()+1
self.cluster_assignment=dbscan_lables
self.k=len(set(dbscan_lables))
elif method=="knn":
knn_sparce_connectivity=kneighbors_graph(data.toarray(), min_sample)
n_components, labels = csgraph.connected_components(knn_sparce_connectivity)
labels[labels < 0] = labels.max()+1
self.cluster_assignment=labels
self.k=len(set(labels))
elif method=="optics":
optics_clustering=cluster.OPTICS(min_samples=min_samples, xi=xi, min_cluster_size=min_cluster_size, metric=metric).fit(data.toarray())
optics_label=optics_clustering.labels_[optics_clustering.ordering_]
optics_label[optics_label < 0] = optics_label.max()+1
self.cluster_assignment = optics_label
self.k=len(set(optics_label))
self.cluster_label=[str(i) for i in range(self.k)]
return None
def get_Radar_plot(self):
sig_genes=[dict({self.cluster_label[i]:list(self.get_markers_by_Pvalues_and_Zscore(i).index)}) for i in range(len(self.cluster_label))]
sig_genes=dict(j for i in sig_genes for j in i.items())
#sig_genes=sorted(sig_genes)
genes_by_cell_type=[]
data=[]
data.append(sorted(list(sig_genes.keys())))
#print(sig_genes)
cell_typ=[self.cluster_label[j] for j in self.cluster_assignment]
for k in sorted(sig_genes.keys()):
#print(k,v)
genes_by_cell_type.append(sig_genes[k])
try:
my_data=self.get_selected_data(sig_genes[k]).T
if(my_data.empty):
data.append((k, np.array([])))
#data[0].remove(k)
continue
my_data["cell_type"]=cell_typ
#print(my_data.head())
my_data=my_data.groupby("cell_type").mean().T
my_data=(my_data.T/my_data.sum(axis=1)).T
data.append((k, np.array(my_data)))
del my_data
except ValueError:
print(k, "does not have markers")
#print(genes_by_cell_type,data)
radarPlot.radarPlot(data,genes_by_cell_type)
return data,genes_by_cell_type
def get_selected_data(self, geneLists):
my_data=pa.DataFrame(self.data.toarray()).T
my_data.index=self.geneNames
my_data.columns=self.cellNames
return my_data.loc[geneLists,:]
def heatMap(self, cluster_marker=None, row_cluster=False, col_cluster=False):
colors=['#ffe119','#0082c8','#f58231','#911eb4','#46f0f0','#f032e6','#d2f53c','#fabebe','#008080','#e6beff',
'#aa6e28','#fffac8','#800000','#aaffc3','#808000','#ffd8b1','#000080','#808080','#FFFFFF','#000000'][:self.k]
cell_type=pa.Series([self.cluster_label[j] for j in self.cluster_assignment])
cell_type=cell_type.sort_values()
lut2=dict(zip(cell_type.sort_values().unique(), colors))
lut2=dict(sorted(lut2.items()))
col_colors= cell_type.map(lut2)
col_colors.index=pa.Series(self.cellNames)[cell_type.index]
if(cluster_marker==None):
markers=[list(self.get_markers_by_Pvalues_and_Zscore(i, threshold_pvalue=.01,threshold_z_score=0).index) for i in range(self.k)]
markers_label=[list(np.repeat(self.cluster_label[i], len(markers[i]), axis=0)) for i in range(len(markers))]
markers = pa.Series(sum(markers, []))
markers_label=pa.Series(sum(markers_label, []))
cell_type=pa.Series([self.cluster_label[j] for j in self.cluster_assignment])
lut = dict(zip(markers_label.sort_values().unique(), colors[:self.k]))
row_colors = markers_label.map(lut)
marker_data=self.get_selected_data(markers)
row_colors.index=list(markers)
marker_data=marker_data.T.loc[(col_colors.index),:].T
marker_data=marker_data[~marker_data.index.duplicated(keep='first')]
row_colors=row_colors[~row_colors.index.duplicated(keep='first')]
mycol=[{k:tuple(np.array(self.hex_to_rgb(v))/255)} for k, v in lut2.items()]
self.color_dict={}
[self.color_dict.update(c) for c in mycol]
g=heatmap.heatmap(marker_data, row_color=row_colors, col_color=col_colors, color_label=lut2)
self.color=[lut2[self.cluster_label[i]] for i in self.cluster_assignment]
self.color_dict=lut2
plt.savefig('MICTI_heatmap.pdf', format="pdf", dpi=300, bbox_inches='tight')
else:
markers=self.get_markers_by_Pvalues_and_Zscore(cluster_marker, threshold_pvalue=.01,threshold_z_score=0).index
marker_data=self.get_selected_data(list(markers))
marker_data=marker_data.T.loc[(col_colors.index),:].T
self.color_dict=lut2
mycol=[{k:tuple(np.array(self.hex_to_rgb(v))/255)} for k, v in lut2.items()]
self.color_dict={}
[self.color_dict.update(c) for c in mycol]
g=heatmap.heatmap(marker_data, row_color=None, col_color=col_colors, color_label=lut2)
self.color=[lut2[self.cluster_label[i]] for i in self.cluster_assignment]
plt.savefig('MICTI_heatmap.pdf', format="pdf", dpi=300, bbox_inches='tight')
return plt.show()
def cluster_extrinsic_evaluation(self, trueLable):
return dict(
Jaccard_score=metrics.jaccard_similarity_score(trueLable, self.cluster_assignment),
FM_index=metrics.fowlkes_mallows_score(trueLable, self.cluster_assignment),
F_measure=metrics.f1_score(trueLable, self.cluster_assignment, average="weighted"),
V_measure=metrics.v_measure_score(trueLable, self.cluster_assignment),
ARI=metrics.adjusted_rand_score(trueLable, self.cluster_assignment),
AMI=metrics.adjusted_mutual_info_score(trueLable, self.cluster_assignment)
)
def cluster_intrinsic_evaluation(self):
return dict(
silhouette_score=metrics.silhouette_score(self.data.toarray(), self.cluster_assignment, metric='euclidean'),
DB_index=metrics.davies_bouldin_score(self.data.toarray(), self.cluster_assignment) ,
CH_index=metrics.calinski_harabasz_score(self.data.toarray(), self.cluster_assignment)
)
def hex_to_rgb(self, hex):
hex = hex.lstrip('#')
hlen = len(hex)
return tuple(int(hex[i:i+hlen//3], 16) for i in range(0, hlen, hlen//3))
| [
"gprofiler.gprofiler",
"pandas.read_csv",
"sklearn.metrics.jaccard_similarity_score",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.adjusted_rand_score",
"numpy.asfarray",
"numpy.array",
"sklearn.cluster.DBSCAN",
"numpy.mean",
"sklearn.cluster.AgglomerativeClustering",
"scipy.sparse.isspmatrix",
... | [((1870, 1937), 'pandas.Series', 'pa.Series', (['[self.cluster_label[j] for j in self.cluster_assignment]'], {}), '([self.cluster_label[j] for j in self.cluster_assignment])\n', (1879, 1937), True, 'import pandas as pa\n'), ((4878, 4898), 'numpy.array', 'np.array', (['icf_matrix'], {}), '(icf_matrix)\n', (4886, 4898), True, 'import numpy as np\n'), ((12576, 12590), 'numpy.asfarray', 'np.asfarray', (['p'], {}), '(p)\n', (12587, 12590), True, 'import numpy as np\n'), ((13041, 13147), 'pandas.DataFrame', 'pa.DataFrame', (["{'Z_scores': z_score, 'p_value': p_val, 'Adj P-value': FDR_pvalue}"], {'index': 'self.geneNames'}), "({'Z_scores': z_score, 'p_value': p_val, 'Adj P-value':\n FDR_pvalue}, index=self.geneNames)\n", (13053, 13147), True, 'import pandas as pa\n'), ((14164, 14207), 'gprofiler.gprofiler', 'gprofiler', (['gene_list'], {'organism': 'self.organsm'}), '(gene_list, organism=self.organsm)\n', (14173, 14207), False, 'from gprofiler import gprofiler\n'), ((20790, 20857), 'pandas.Series', 'pa.Series', (['[self.cluster_label[j] for j in self.cluster_assignment]'], {}), '([self.cluster_label[j] for j in self.cluster_assignment])\n', (20799, 20857), True, 'import pandas as pa\n'), ((23511, 23521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23519, 23521), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2175), 'pandas.Series', 'pa.Series', (['self.cellNames'], {}), '(self.cellNames)\n', (2159, 2175), True, 'import pandas as pa\n'), ((2996, 3138), 'pandas.read_csv', 'pa.read_csv', (['"""https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt"""'], {'sep': '"""\t"""'}), "(\n 'https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt'\n , sep='\\t')\n", (3007, 3138), True, 'import pandas as pa\n'), ((3867, 4009), 'pandas.read_csv', 'pa.read_csv', (['"""https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt"""'], {'sep': '"""\t"""'}), "(\n 'https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_stable_genes_human.txt'\n , sep='\\t')\n", (3878, 4009), True, 'import pandas as pa\n'), ((4849, 4862), 'numpy.array', 'np.array', (['idf'], {}), '(idf)\n', (4857, 4862), True, 'import numpy as np\n'), ((10079, 10100), 'numpy.mean', 'np.mean', (['datt'], {'axis': '(0)'}), '(datt, axis=0)\n', (10086, 10100), True, 'import numpy as np\n'), ((11203, 11266), 'numpy.mean', 'np.mean', (['self.data_ICF[self.cluster_assignment == i, :]'], {'axis': '(0)'}), '(self.data_ICF[self.cluster_assignment == i, :], axis=0)\n', (11210, 11266), True, 'import numpy as np\n'), ((11740, 11784), 'gprofiler.gprofiler', 'gprofiler', (['top10Genes'], {'organism': 'self.organsm'}), '(top10Genes, organism=self.organsm)\n', (11749, 11784), False, 'from gprofiler import gprofiler\n'), ((12224, 12244), 'numpy.mean', 'np.mean', (['ccc'], {'axis': '(0)'}), '(ccc, axis=0)\n', (12231, 12244), True, 'import numpy as np\n'), ((12612, 12623), 'pylab.argsort', 'p.argsort', ([], {}), '()\n', (12621, 12623), True, 'import pylab as p\n'), ((12752, 12796), 'numpy.minimum.accumulate', 'np.minimum.accumulate', (['(steps * p[by_descend])'], {}), '(steps * p[by_descend])\n', (12773, 12796), True, 'import numpy as np\n'), ((13847, 13891), 'gprofiler.gprofiler', 'gprofiler', (['top10Genes'], {'organism': 'self.organsm'}), '(top10Genes, organism=self.organsm)\n', (13856, 13891), False, 'from gprofiler import gprofiler\n'), ((15031, 15061), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(500)'}), '(n_components=500)\n', (15043, 15061), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((21070, 21095), 'pandas.Series', 'pa.Series', (['self.cellNames'], {}), '(self.cellNames)\n', (21079, 21095), True, 'import pandas as pa\n'), ((21585, 21652), 'pandas.Series', 'pa.Series', (['[self.cluster_label[j] for j in self.cluster_assignment]'], {}), '([self.cluster_label[j] for j in self.cluster_assignment])\n', (21594, 21652), True, 'import pandas as pa\n'), ((22585, 22661), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MICTI_heatmap.pdf"""'], {'format': '"""pdf"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('MICTI_heatmap.pdf', format='pdf', dpi=300, bbox_inches='tight')\n", (22596, 22661), True, 'import matplotlib.pyplot as plt\n'), ((23410, 23486), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MICTI_heatmap.pdf"""'], {'format': '"""pdf"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('MICTI_heatmap.pdf', format='pdf', dpi=300, bbox_inches='tight')\n", (23421, 23486), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3482), 'pandas.read_csv', 'pa.read_csv', (['"""https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt"""'], {'sep': '"""\t"""'}), "(\n 'https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt'\n , sep='\\t')\n", (3352, 3482), True, 'import pandas as pa\n'), ((4217, 4358), 'pandas.read_csv', 'pa.read_csv', (['"""https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt"""'], {'sep': '"""\t"""'}), "(\n 'https://media.githubusercontent.com/media/insilicolife/micti/master/data/mart_export_mouse_stable_gene.txt'\n , sep='\\t')\n", (4228, 4358), True, 'import pandas as pa\n'), ((5141, 5171), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'dim'}), '(n_components=dim)\n', (5153, 5171), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((5199, 5220), 'scipy.sparse.isspmatrix', 'isspmatrix', (['self.data'], {}), '(self.data)\n', (5209, 5220), False, 'from scipy.sparse import csr_matrix, isspmatrix\n'), ((9289, 9337), 'numpy.in1d', 'np.in1d', (['self.cluster_assignment', 'cluster_number'], {}), '(self.cluster_assignment, cluster_number)\n', (9296, 9337), True, 'import numpy as np\n'), ((9818, 9851), 'numpy.array', 'np.array', (['self.cluster_assignment'], {}), '(self.cluster_assignment)\n', (9826, 9851), True, 'import numpy as np\n'), ((12298, 12310), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (12305, 12310), True, 'import numpy as np\n'), ((12320, 12331), 'numpy.var', 'np.var', (['val'], {}), '(val)\n', (12326, 12331), True, 'import numpy as np\n'), ((15577, 15610), 'numpy.argmax', 'np.argmax', (["EM_GMM['resp']"], {'axis': '(1)'}), "(EM_GMM['resp'], axis=1)\n", (15586, 15610), True, 'import numpy as np\n'), ((23638, 23706), 'sklearn.metrics.jaccard_similarity_score', 'metrics.jaccard_similarity_score', (['trueLable', 'self.cluster_assignment'], {}), '(trueLable, self.cluster_assignment)\n', (23670, 23706), False, 'from sklearn import metrics\n'), ((23729, 23794), 'sklearn.metrics.fowlkes_mallows_score', 'metrics.fowlkes_mallows_score', (['trueLable', 'self.cluster_assignment'], {}), '(trueLable, self.cluster_assignment)\n', (23758, 23794), False, 'from sklearn import metrics\n'), ((23818, 23890), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['trueLable', 'self.cluster_assignment'], {'average': '"""weighted"""'}), "(trueLable, self.cluster_assignment, average='weighted')\n", (23834, 23890), False, 'from sklearn import metrics\n'), ((23914, 23973), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['trueLable', 'self.cluster_assignment'], {}), '(trueLable, self.cluster_assignment)\n', (23937, 23973), False, 'from sklearn import metrics\n'), ((23991, 24054), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['trueLable', 'self.cluster_assignment'], {}), '(trueLable, self.cluster_assignment)\n', (24018, 24054), False, 'from sklearn import metrics\n'), ((24072, 24142), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['trueLable', 'self.cluster_assignment'], {}), '(trueLable, self.cluster_assignment)\n', (24106, 24142), False, 'from sklearn import metrics\n'), ((5630, 5640), 'pylab.figure', 'p.figure', ([], {}), '()\n', (5638, 5640), True, 'import pylab as p\n'), ((5678, 5692), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['fig'], {}), '(fig)\n', (5687, 5692), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((6050, 6058), 'pylab.show', 'p.show', ([], {}), '()\n', (6056, 6058), True, 'import pylab as p\n'), ((6871, 6899), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(5)'}), '(n_components=5)\n', (6883, 6899), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((6927, 6948), 'scipy.sparse.isspmatrix', 'isspmatrix', (['self.data'], {}), '(self.data)\n', (6937, 6948), False, 'from scipy.sparse import csr_matrix, isspmatrix\n'), ((7322, 7360), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'dim', 'random_state': '(0)'}), '(n_components=dim, random_state=0)\n', (7326, 7360), False, 'from sklearn.manifold import TSNE\n'), ((9221, 9269), 'numpy.in1d', 'np.in1d', (['self.cluster_assignment', 'cluster_number'], {}), '(self.cluster_assignment, cluster_number)\n', (9228, 9269), True, 'import numpy as np\n'), ((9430, 9478), 'numpy.in1d', 'np.in1d', (['self.cluster_assignment', 'cluster_number'], {}), '(self.cluster_assignment, cluster_number)\n', (9437, 9478), True, 'import numpy as np\n'), ((9571, 9619), 'numpy.in1d', 'np.in1d', (['self.cluster_assignment', 'cluster_number'], {}), '(self.cluster_assignment, cluster_number)\n', (9578, 9619), True, 'import numpy as np\n'), ((9724, 9757), 'numpy.array', 'np.array', (['self.cluster_assignment'], {}), '(self.cluster_assignment)\n', (9732, 9757), True, 'import numpy as np\n'), ((10118, 10132), 'numpy.array', 'np.array', (['idxx'], {}), '(idxx)\n', (10126, 10132), True, 'import numpy as np\n'), ((11281, 11295), 'numpy.array', 'np.array', (['idxx'], {}), '(idxx)\n', (11289, 11295), True, 'import numpy as np\n'), ((12079, 12112), 'numpy.array', 'np.array', (['self.cluster_assignment'], {}), '(self.cluster_assignment)\n', (12087, 12112), True, 'import numpy as np\n'), ((12148, 12167), 'pandas.DataFrame', 'pa.DataFrame', (['datta'], {}), '(datta)\n', (12160, 12167), True, 'import pandas as pa\n'), ((12253, 12272), 'numpy.var', 'np.var', (['ccc'], {'axis': '(0)'}), '(ccc, axis=0)\n', (12259, 12272), True, 'import numpy as np\n'), ((15916, 15969), 'gensim.models.HdpModel', 'HdpModel', (['cc', 'id2g'], {'alpha': 'alpha', 'gamma': 'gamma', 'eta': 'eta'}), '(cc, id2g, alpha=alpha, gamma=gamma, eta=eta)\n', (15924, 15969), False, 'from gensim.models import HdpModel, LdaModel\n'), ((6128, 6193), 'matplotlib.pyplot.scatter', 'plt.scatter', (['svdTransform[:, 0]', 'svdTransform[:, 1]'], {'c': 'self.color'}), '(svdTransform[:, 0], svdTransform[:, 1], c=self.color)\n', (6139, 6193), True, 'import matplotlib.pyplot as plt\n'), ((6224, 6242), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""PCA1"""'], {}), "('PCA1')\n", (6234, 6242), True, 'import matplotlib.pyplot as plt\n'), ((6275, 6293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PCA2"""'], {}), "('PCA2')\n", (6285, 6293), True, 'import matplotlib.pyplot as plt\n'), ((6420, 6481), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.65, 1.65)', 'loc': '"""center"""', 'ncol': '(1)'}), "(bbox_to_anchor=(1.65, 1.65), loc='center', ncol=1)\n", (6430, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6588, 6598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6596, 6598), True, 'import matplotlib.pyplot as plt\n'), ((7502, 7512), 'pylab.figure', 'p.figure', ([], {}), '()\n', (7510, 7512), True, 'import pylab as p\n'), ((7550, 7564), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['fig'], {}), '(fig)\n', (7559, 7564), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((7907, 7915), 'pylab.show', 'p.show', ([], {}), '()\n', (7913, 7915), True, 'import pylab as p\n'), ((10037, 10052), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (10045, 10052), True, 'import numpy as np\n'), ((11148, 11184), 'numpy.sum', 'np.sum', (['(self.cluster_assignment == i)'], {}), '(self.cluster_assignment == i)\n', (11154, 11184), True, 'import numpy as np\n'), ((13423, 13459), 'numpy.sum', 'np.sum', (['(self.cluster_assignment == i)'], {}), '(self.cluster_assignment == i)\n', (13429, 13459), True, 'import numpy as np\n'), ((16807, 16921), 'gensim.models.LdaModel', 'LdaModel', ([], {'corpus': 'cc', 'id2word': 'id2g', 'num_topics': 'numberOfCluster', 'update_every': '(1)', 'passes': '(1)', 'alpha': 'alpha', 'eta': 'eta'}), '(corpus=cc, id2word=id2g, num_topics=numberOfCluster, update_every=\n 1, passes=1, alpha=alpha, eta=eta)\n', (16815, 16921), False, 'from gensim.models import HdpModel, LdaModel\n'), ((19928, 19945), 'numpy.array', 'np.array', (['my_data'], {}), '(my_data)\n', (19936, 19945), True, 'import numpy as np\n'), ((7992, 8047), 'pandas.DataFrame', 'pa.DataFrame', ([], {'columns': "['tsne_1', 'tsne_2', 'cell type']"}), "(columns=['tsne_1', 'tsne_2', 'cell type'])\n", (8004, 8047), True, 'import pandas as pa\n'), ((8840, 8913), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MICTI_Plot.pdf"""'], {'format': '"""pdf"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('MICTI_Plot.pdf', format='pdf', dpi=300, bbox_inches='tight')\n", (8851, 8913), True, 'import matplotlib.pyplot as plt\n'), ((8946, 8956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8954, 8956), True, 'import matplotlib.pyplot as plt\n'), ((16116, 16136), 'numpy.bincount', 'np.bincount', (['cell_tp'], {}), '(cell_tp)\n', (16127, 16136), True, 'import numpy as np\n'), ((19608, 19620), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (19616, 19620), True, 'import numpy as np\n'), ((8446, 8592), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'data', 'x': '"""tsne_1"""', 'y': '"""tsne_2"""', 'hue': '"""cell type"""', 'fit_reg': '(False)', 'legend': '(True)', 'legend_out': '(True)', 'palette': 'self.color_dict', 'order': '(5)'}), "(data=data, x='tsne_1', y='tsne_2', hue='cell type', fit_reg=\n False, legend=True, legend_out=True, palette=self.color_dict, order=5)\n", (8456, 8592), True, 'import seaborn as sns\n'), ((8670, 8782), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'data', 'x': '"""tsne_1"""', 'y': '"""tsne_2"""', 'hue': '"""cell type"""', 'fit_reg': '(False)', 'legend': '(True)', 'legend_out': '(True)'}), "(data=data, x='tsne_1', y='tsne_2', hue='cell type', fit_reg=\n False, legend=True, legend_out=True)\n", (8680, 8782), True, 'import seaborn as sns\n'), ((12174, 12193), 'pandas.DataFrame', 'pa.DataFrame', (['datta'], {}), '(datta)\n', (12186, 12193), True, 'import pandas as pa\n'), ((14488, 14538), 'numpy.array', 'np.array', (["(result['Adj P-value'] < threshold_pvalue)"], {}), "(result['Adj P-value'] < threshold_pvalue)\n", (14496, 14538), True, 'import numpy as np\n'), ((14539, 14587), 'numpy.array', 'np.array', (["(result['Z_scores'] > threshold_z_score)"], {}), "(result['Z_scores'] > threshold_z_score)\n", (14547, 14587), True, 'import numpy as np\n'), ((17194, 17253), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'n_clusters': 'numberOfCluster'}), '(n_clusters=numberOfCluster)\n', (17225, 17253), False, 'from sklearn import cluster\n'), ((17446, 17487), 'sklearn.cluster.Birch', 'cluster.Birch', ([], {'n_clusters': 'numberOfCluster'}), '(n_clusters=numberOfCluster)\n', (17459, 17487), False, 'from sklearn import cluster\n'), ((18148, 18201), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['knn_sparce_connectivity'], {}), '(knn_sparce_connectivity)\n', (18176, 18201), False, 'from scipy.sparse import csgraph\n'), ((17705, 17768), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': 'eps', 'min_samples': 'min_samples', 'metric': 'metric'}), '(eps=eps, min_samples=min_samples, metric=metric)\n', (17719, 17768), False, 'from sklearn import cluster\n'), ((18391, 18492), 'sklearn.cluster.OPTICS', 'cluster.OPTICS', ([], {'min_samples': 'min_samples', 'xi': 'xi', 'min_cluster_size': 'min_cluster_size', 'metric': 'metric'}), '(min_samples=min_samples, xi=xi, min_cluster_size=\n min_cluster_size, metric=metric)\n', (18405, 18492), False, 'from sklearn import cluster\n')] |
################################################################################
## Copyright (c) 2019, <NAME>
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice
## this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and#or other materials provided with the distribution.
## 3. Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
################################################################################
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
def get_map(filename):
f = open(filename, 'r')
lines = f.readlines()
width = len(lines[0].split())
height = len(lines)
data = [p.strip().split() for p in lines]
flattened_values = map(int, [item for sublist in data for item in
sublist])
map_values = np.array(flattened_values).reshape(height, width)
return map_values
def plot_path(filename, ax):
f = open(filename, 'r')
lines = f.readlines()
sol_values = np.array([map(int, p.strip().split()) for p in lines])
ax.plot(sol_values[:,0], sol_values[:,1], 'y-')
if __name__ == '__main__':
map_values = get_map(sys.argv[1])
fig, ax = plt.subplots(figsize=(10,10))
plt.imshow(map_values, vmin=0, vmax=1)
plt.ylim([0, map_values.shape[0]])
plt.xlim([0, map_values.shape[1]])
plot_path(sys.argv[2], ax)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.array",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2502, 2532), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2514, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2574), 'matplotlib.pyplot.imshow', 'plt.imshow', (['map_values'], {'vmin': '(0)', 'vmax': '(1)'}), '(map_values, vmin=0, vmax=1)\n', (2546, 2574), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2613), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, map_values.shape[0]]'], {}), '([0, map_values.shape[0]])\n', (2587, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2652), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, map_values.shape[1]]'], {}), '([0, map_values.shape[1]])\n', (2626, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2697, 2699), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2168), 'numpy.array', 'np.array', (['flattened_values'], {}), '(flattened_values)\n', (2150, 2168), True, 'import numpy as np\n')] |
import os
from typing import List
from typing import Tuple
import logging
from collections import defaultdict
from collections import Counter
import json
import torch
import numpy as np
from GroundedScan.dataset import GroundedScan
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger = logging.getLogger(__name__)
class Vocabulary(object):
"""
Object that maps words in string form to indices to be processed by numerical models.
"""
def __init__(self, sos_token="<SOS>", eos_token="<EOS>", pad_token="<PAD>"):
"""
NB: <PAD> token is by construction idx 0.
"""
self.sos_token = sos_token
self.eos_token = eos_token
self.pad_token = pad_token
self._idx_to_word = [pad_token, sos_token, eos_token]
self._word_to_idx = defaultdict(lambda: self._idx_to_word.index(self.pad_token))
self._word_to_idx[sos_token] = 1
self._word_to_idx[eos_token] = 2
self._word_frequencies = Counter()
def word_to_idx(self, word: str) -> int:
return self._word_to_idx[word]
def idx_to_word(self, idx: int) -> str:
return self._idx_to_word[idx]
def contains_word(self, word: str) -> bool:
if self._word_to_idx[word] != 0:
return True
else:
return False
def add_sentence(self, sentence: List[str]):
for word in sentence:
if word not in self._word_to_idx:
self._word_to_idx[word] = self.size
self._idx_to_word.append(word)
self._word_frequencies[word] += 1
def most_common(self, n=10):
return self._word_frequencies.most_common(n=n)
@property
def pad_idx(self):
return self.word_to_idx(self.pad_token)
@property
def sos_idx(self):
return self.word_to_idx(self.sos_token)
@property
def eos_idx(self):
return self.word_to_idx(self.eos_token)
@property
def size(self):
return len(self._idx_to_word)
@classmethod
def load(cls, path: str):
assert os.path.exists(path), "Trying to load a vocabulary from a non-existing file {}".format(path)
with open(path, 'r') as infile:
all_data = json.load(infile)
sos_token = all_data["sos_token"]
eos_token = all_data["eos_token"]
pad_token = all_data["pad_token"]
vocab = cls(sos_token=sos_token, eos_token=eos_token, pad_token=pad_token)
vocab._idx_to_word = all_data["idx_to_word"]
vocab._word_to_idx = defaultdict(int)
for word, idx in all_data["word_to_idx"].items():
vocab._word_to_idx[word] = idx
vocab._word_frequencies = Counter(all_data["word_frequencies"])
return vocab
def to_dict(self) -> dict:
return {
"sos_token": self.sos_token,
"eos_token": self.eos_token,
"pad_token": self.pad_token,
"idx_to_word": self._idx_to_word,
"word_to_idx": self._word_to_idx,
"word_frequencies": self._word_frequencies
}
def save(self, path: str) -> str:
with open(path, 'w') as outfile:
json.dump(self.to_dict(), outfile, indent=4)
return path
class GroundedScanDataset(object):
"""
Loads a GroundedScan instance from a specified location.
"""
def __init__(self, path_to_data: str, save_directory: str, k: int, upsample_isolated=100, split="train",
input_vocabulary_file="", target_vocabulary_file="", generate_vocabulary=False,
isolate_examples_with="cautiously", simplified_objective=False):
assert os.path.exists(path_to_data), "Trying to read a gSCAN dataset from a non-existing file {}.".format(
path_to_data)
self.simplified_objective = simplified_objective
assert not simplified_objective, "Simplified objective for debugging purposes only."
if not generate_vocabulary:
assert os.path.exists(os.path.join(save_directory, input_vocabulary_file)) and os.path.exists(
os.path.join(save_directory, target_vocabulary_file)), \
"Trying to load vocabularies from non-existing files."
if split == "test" and generate_vocabulary:
logger.warning("WARNING: generating a vocabulary from the test set.")
self.dataset = GroundedScan.load_dataset_from_file(path_to_data, save_directory=save_directory, k=k,
upsample_isolated=upsample_isolated,
isolate_examples_with=isolate_examples_with)
if self.dataset._data_statistics.get("adverb_1"):
logger.info("Verb-adverb combinations in training set: ")
for adverb, items in self.dataset._data_statistics["train"]["verb_adverb_combinations"].items():
logger.info("Verbs for adverb: {}".format(adverb))
for key, count in items.items():
logger.info(" {}: {} occurrences.".format(key, count))
logger.info("Verb-adverb combinations in dev set: ")
for adverb, items in self.dataset._data_statistics["dev"]["verb_adverb_combinations"].items():
logger.info("Verbs for adverb: {}".format(adverb))
for key, count in items.items():
logger.info(" {}: {} occurrences.".format(key, count))
actual_k = self.dataset._data_statistics["train"]["manners_in_command"][isolate_examples_with]
expected_k = k * upsample_isolated
if split in ["train", "dev"]:
assert actual_k == expected_k, \
"Chose k=%d and upsample=%d (expected k=%d) but actual number of examples with %s in training set is %d." % (
k, upsample_isolated, expected_k, isolate_examples_with, actual_k
)
self.image_dimensions = None
self.image_channels = 16
self.split = split
self.directory = save_directory
# Keeping track of data.
self._examples = np.array([])
self._input_lengths = np.array([])
self._target_lengths = np.array([])
if generate_vocabulary:
logger.info("Generating vocabularies...")
self.input_vocabulary = Vocabulary()
self.target_vocabulary = Vocabulary()
self.read_vocabularies()
logger.info("Done generating vocabularies.")
else:
logger.info("Loading vocabularies...")
self.input_vocabulary = Vocabulary.load(os.path.join(save_directory, input_vocabulary_file))
self.target_vocabulary = Vocabulary.load(os.path.join(save_directory, target_vocabulary_file))
logger.info("Done loading vocabularies.")
def convert_target_to_simple(self, example):
verb_in_command = example["input_command"][0]
adverb_in_command = example["input_command"][-1]
if adverb_in_command not in ["while spinning", "while zigzagging", "cautiously", "hesitantly"]:
adverb_in_command = ""
if verb_in_command == "push" or verb_in_command == "pull":
interactions = [command for command in example["target_command"] if command == verb_in_command]
else:
interactions = []
interaction_target = []
if verb_in_command not in interaction_target:
interaction_target += interactions
if adverb_in_command == "while zigzagging":
interaction_target = interactions
return interaction_target
def read_vocabularies(self) -> {}:
"""
Loop over all examples in the dataset and add the words in them to the vocabularies.
"""
logger.info("Populating vocabulary...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split,
simple_situation_representation=True)):
self.input_vocabulary.add_sentence(example["input_command"])
if not self.simplified_objective:
self.target_vocabulary.add_sentence(example["target_command"])
else:
interaction_target = self.convert_target_to_simple(example)
self.target_vocabulary.add_sentence(interaction_target)
def save_vocabularies(self, input_vocabulary_file: str, target_vocabulary_file: str):
self.input_vocabulary.save(os.path.join(self.directory, input_vocabulary_file))
self.target_vocabulary.save(os.path.join(self.directory, target_vocabulary_file))
def get_vocabulary(self, vocabulary: str) -> Vocabulary:
if vocabulary == "input":
vocab = self.input_vocabulary
elif vocabulary == "target":
vocab = self.target_vocabulary
else:
raise ValueError("Specified unknown vocabulary in sentence_to_array: {}".format(vocabulary))
return vocab
def shuffle_data(self) -> {}:
"""
Reorder the data examples and reorder the lengths of the input and target commands accordingly.
"""
random_permutation = np.random.permutation(len(self._examples))
self._examples = self._examples[random_permutation]
self._target_lengths = self._target_lengths[random_permutation]
self._input_lengths = self._input_lengths[random_permutation]
def get_data_iterator(self, batch_size=None, max_examples=None,
simple_situation_representation=True, shuffle=False) -> {}:
"""
Loop over the data examples in GroundedScan and convert them to tensors, also save the lengths
for input and target sequences that are needed for padding.
:param batch_size
:param max_examples: how many examples to read maximally, read all if None.
:param simple_situation_representation: whether to read the full situation image in RGB or the simplified
:param shuffle:
smaller representation.
"""
assert isinstance(batch_size, int), "Provide a batch size."
logger.info("Converting dataset to tensors...")
current_examples_batch = np.array([])
current_input_lengths = np.array([])
current_target_lengths = np.array([])
for i, example in enumerate(self.dataset.get_examples_with_image(self.split,
shuffle=shuffle,
simple_situation_representation=simple_situation_representation,
adverb_inputs=False)):
if max_examples:
if len(self._examples) > max_examples:
return
empty_example = {}
input_commands = example["input_command"]
if not self.simplified_objective:
target_commands = example["target_command"]
else:
target_commands = self.convert_target_to_simple(example)
example_information = {
# "adverb": example["adverb"],
# "type_adverb": example["type_adverb"],
"original_input": input_commands,
"original_output": target_commands,
"gscan_final_target": example["target_command"],
# "verb_in_command": example["verb_in_command"],
"derivation_representation": example["derivation_representation"],
"situation_representation": example["situation_representation"]
}
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["situation_image"] = torch.tensor(example["situation_image"],
dtype=torch.float, device=device).unsqueeze(dim=0)
empty_example["example_information"] = example_information
current_input_lengths = np.append(current_input_lengths, [len(input_array)])
current_target_lengths = np.append(current_target_lengths, [len(target_array)])
current_examples_batch = np.append(current_examples_batch, [empty_example])
if len(current_examples_batch) == batch_size:
yield self.make_batch(current_examples_batch, current_input_lengths, current_target_lengths)
current_examples_batch = np.array([])
current_input_lengths = np.array([])
current_target_lengths = np.array([])
def make_batch(self, examples, input_lengths, target_lengths) -> Tuple[torch.Tensor, List[int],
torch.Tensor, List[dict],
torch.Tensor, List[int]]:
"""
Iterate over batches of example tensors, pad them to the max length in the batch and yield.
:param batch_size: how many examples to put in each batch.
:return: tuple of input commands batch, corresponding input lengths, adverb batch,
target commands batch and corresponding target lengths.
"""
max_input_length = np.max(input_lengths)
max_target_length = np.max(target_lengths)
input_batch = []
adverb_batch = []
target_batch = []
situation_representation_batch = []
derivation_representation_batch = []
agent_positions_batch = []
target_positions_batch = []
situation_batch = []
original_input_batch = []
original_output_batch = []
verb_in_command_batch = []
adverb_type_batch = []
gscan_final_targets_batch = []
for example in examples:
to_pad_input = max_input_length - example["input_tensor"].size(1)
to_pad_target = max_target_length - example["target_tensor"].size(1)
padded_input = torch.cat([
example["input_tensor"],
torch.zeros(int(to_pad_input), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
padded_target = torch.cat([
example["target_tensor"],
torch.zeros(int(to_pad_target), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
input_batch.append(padded_input)
target_batch.append(padded_target)
# adverb_batch.append(example["adverb_input"])
situation_repr = example["example_information"]["situation_representation"]
situation_representation_batch.append(situation_repr)
situation_batch.append(example["situation_image"])
agent_position = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
agent_positions_batch.append(agent_position)
target_position = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
target_positions_batch.append(target_position)
# adverb_type_batch.append(example["example_information"]["type_adverb"])
derivation_representation_batch.append(example["example_information"]["derivation_representation"])
# original_input_batch.append(example["example_information"]["original_input"])
# original_output_batch.append(example["example_information"]["original_output"])
# verb_in_command_batch.append(example["example_information"]["verb_in_command"])
# gscan_final_targets_batch.append(example["example_information"]["gscan_final_target"])
return (torch.cat(input_batch, dim=0), input_lengths, derivation_representation_batch,
torch.cat(situation_batch, dim=0), situation_representation_batch, torch.cat(target_batch, dim=0),
target_lengths, torch.cat(agent_positions_batch, dim=0), torch.cat(target_positions_batch, dim=0))
def read_dataset(self, max_examples=None, simple_situation_representation=True) -> {}:
"""
Loop over the data examples in GroundedScan and convert them to tensors, also save the lengths
for input and target sequences that are needed for padding.
:param max_examples: how many examples to read maximally, read all if None.
:param simple_situation_representation: whether to read the full situation image in RGB or the simplified
smaller representation.
"""
logger.info("Converting dataset to tensors...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split, simple_situation_representation)):
if max_examples:
if len(self._examples) > max_examples:
return
empty_example = {}
input_commands = example["input_command"]
target_commands = example["target_command"]
#equivalent_target_commands = example["equivalent_target_command"]
situation_image = example["situation_image"]
if i == 0:
self.image_dimensions = situation_image.shape[0]
self.image_channels = situation_image.shape[-1]
situation_repr = example["situation_representation"]
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
#equivalent_target_array = self.sentence_to_array(equivalent_target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
#empty_example["equivalent_target_tensor"] = torch.tensor(equivalent_target_array, dtype=torch.long,
# device=device).unsqueeze(dim=0)
empty_example["situation_tensor"] = torch.tensor(situation_image, dtype=torch.float, device=device
).unsqueeze(dim=0)
empty_example["situation_representation"] = situation_repr
empty_example["derivation_representation"] = example["derivation_representation"]
empty_example["agent_position"] = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
empty_example["target_position"] = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
self._input_lengths = np.append(self._input_lengths, [len(input_array)])
self._target_lengths = np.append(self._target_lengths, [len(target_array)])
self._examples = np.append(self._examples, [empty_example])
def sentence_to_array(self, sentence: List[str], vocabulary: str) -> List[int]:
"""
Convert each string word in a sentence to the corresponding integer from the vocabulary and append
a start-of-sequence and end-of-sequence token.
:param sentence: the sentence in words (strings)
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in integers.
"""
vocab = self.get_vocabulary(vocabulary)
sentence_array = [vocab.sos_idx]
for word in sentence:
sentence_array.append(vocab.word_to_idx(word))
sentence_array.append(vocab.eos_idx)
return sentence_array
def array_to_sentence(self, sentence_array: List[int], vocabulary: str) -> List[str]:
"""
Translate each integer in a sentence array to the corresponding word.
:param sentence_array: array with integers representing words from the vocabulary.
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in words.
"""
vocab = self.get_vocabulary(vocabulary)
return [vocab.idx_to_word(word_idx) for word_idx in sentence_array]
@property
def num_examples(self):
return len(self._examples)
@property
def input_vocabulary_size(self):
return self.input_vocabulary.size
@property
def target_vocabulary_size(self):
return self.target_vocabulary.size
| [
"logging.getLogger",
"os.path.exists",
"os.path.join",
"numpy.max",
"collections.Counter",
"numpy.array",
"numpy.append",
"torch.cuda.is_available",
"collections.defaultdict",
"torch.tensor",
"json.load",
"GroundedScan.dataset.GroundedScan.load_dataset_from_file",
"torch.cat"
] | [((313, 340), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (330, 340), False, 'import logging\n'), ((266, 291), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (289, 291), False, 'import torch\n'), ((1002, 1011), 'collections.Counter', 'Counter', ([], {}), '()\n', (1009, 1011), False, 'from collections import Counter\n'), ((2087, 2107), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2101, 2107), False, 'import os\n'), ((3703, 3731), 'os.path.exists', 'os.path.exists', (['path_to_data'], {}), '(path_to_data)\n', (3717, 3731), False, 'import os\n'), ((4423, 4599), 'GroundedScan.dataset.GroundedScan.load_dataset_from_file', 'GroundedScan.load_dataset_from_file', (['path_to_data'], {'save_directory': 'save_directory', 'k': 'k', 'upsample_isolated': 'upsample_isolated', 'isolate_examples_with': 'isolate_examples_with'}), '(path_to_data, save_directory=\n save_directory, k=k, upsample_isolated=upsample_isolated,\n isolate_examples_with=isolate_examples_with)\n', (4458, 4599), False, 'from GroundedScan.dataset import GroundedScan\n'), ((6159, 6171), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6167, 6171), True, 'import numpy as np\n'), ((6202, 6214), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6210, 6214), True, 'import numpy as np\n'), ((6246, 6258), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6254, 6258), True, 'import numpy as np\n'), ((10271, 10283), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10279, 10283), True, 'import numpy as np\n'), ((10316, 10328), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10324, 10328), True, 'import numpy as np\n'), ((10362, 10374), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10370, 10374), True, 'import numpy as np\n'), ((13720, 13741), 'numpy.max', 'np.max', (['input_lengths'], {}), '(input_lengths)\n', (13726, 13741), True, 'import numpy as np\n'), ((13770, 13792), 'numpy.max', 'np.max', (['target_lengths'], {}), '(target_lengths)\n', (13776, 13792), True, 'import numpy as np\n'), ((2243, 2260), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (2252, 2260), False, 'import json\n'), ((2576, 2592), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2587, 2592), False, 'from collections import defaultdict\n'), ((2740, 2777), 'collections.Counter', 'Counter', (["all_data['word_frequencies']"], {}), "(all_data['word_frequencies'])\n", (2747, 2777), False, 'from collections import Counter\n'), ((8546, 8597), 'os.path.join', 'os.path.join', (['self.directory', 'input_vocabulary_file'], {}), '(self.directory, input_vocabulary_file)\n', (8558, 8597), False, 'import os\n'), ((8635, 8687), 'os.path.join', 'os.path.join', (['self.directory', 'target_vocabulary_file'], {}), '(self.directory, target_vocabulary_file)\n', (8647, 8687), False, 'import os\n'), ((12664, 12714), 'numpy.append', 'np.append', (['current_examples_batch', '[empty_example]'], {}), '(current_examples_batch, [empty_example])\n', (12673, 12714), True, 'import numpy as np\n'), ((16442, 16471), 'torch.cat', 'torch.cat', (['input_batch'], {'dim': '(0)'}), '(input_batch, dim=0)\n', (16451, 16471), False, 'import torch\n'), ((16536, 16569), 'torch.cat', 'torch.cat', (['situation_batch'], {'dim': '(0)'}), '(situation_batch, dim=0)\n', (16545, 16569), False, 'import torch\n'), ((16603, 16633), 'torch.cat', 'torch.cat', (['target_batch'], {'dim': '(0)'}), '(target_batch, dim=0)\n', (16612, 16633), False, 'import torch\n'), ((16666, 16705), 'torch.cat', 'torch.cat', (['agent_positions_batch'], {'dim': '(0)'}), '(agent_positions_batch, dim=0)\n', (16675, 16705), False, 'import torch\n'), ((16707, 16747), 'torch.cat', 'torch.cat', (['target_positions_batch'], {'dim': '(0)'}), '(target_positions_batch, dim=0)\n', (16716, 16747), False, 'import torch\n'), ((19985, 20027), 'numpy.append', 'np.append', (['self._examples', '[empty_example]'], {}), '(self._examples, [empty_example])\n', (19994, 20027), True, 'import numpy as np\n'), ((6655, 6706), 'os.path.join', 'os.path.join', (['save_directory', 'input_vocabulary_file'], {}), '(save_directory, input_vocabulary_file)\n', (6667, 6706), False, 'import os\n'), ((6761, 6813), 'os.path.join', 'os.path.join', (['save_directory', 'target_vocabulary_file'], {}), '(save_directory, target_vocabulary_file)\n', (6773, 6813), False, 'import os\n'), ((12923, 12935), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12931, 12935), True, 'import numpy as np\n'), ((12976, 12988), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12984, 12988), True, 'import numpy as np\n'), ((13030, 13042), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13038, 13042), True, 'import numpy as np\n'), ((4049, 4100), 'os.path.join', 'os.path.join', (['save_directory', 'input_vocabulary_file'], {}), '(save_directory, input_vocabulary_file)\n', (4061, 4100), False, 'import os\n'), ((4138, 4190), 'os.path.join', 'os.path.join', (['save_directory', 'target_vocabulary_file'], {}), '(save_directory, target_vocabulary_file)\n', (4150, 4190), False, 'import os\n'), ((11944, 12002), 'torch.tensor', 'torch.tensor', (['input_array'], {'dtype': 'torch.long', 'device': 'device'}), '(input_array, dtype=torch.long, device=device)\n', (11956, 12002), False, 'import torch\n'), ((12082, 12141), 'torch.tensor', 'torch.tensor', (['target_array'], {'dtype': 'torch.long', 'device': 'device'}), '(target_array, dtype=torch.long, device=device)\n', (12094, 12141), False, 'import torch\n'), ((12223, 12297), 'torch.tensor', 'torch.tensor', (["example['situation_image']"], {'dtype': 'torch.float', 'device': 'device'}), "(example['situation_image'], dtype=torch.float, device=device)\n", (12235, 12297), False, 'import torch\n'), ((18375, 18433), 'torch.tensor', 'torch.tensor', (['input_array'], {'dtype': 'torch.long', 'device': 'device'}), '(input_array, dtype=torch.long, device=device)\n', (18387, 18433), False, 'import torch\n'), ((18513, 18572), 'torch.tensor', 'torch.tensor', (['target_array'], {'dtype': 'torch.long', 'device': 'device'}), '(target_array, dtype=torch.long, device=device)\n', (18525, 18572), False, 'import torch\n'), ((18870, 18933), 'torch.tensor', 'torch.tensor', (['situation_image'], {'dtype': 'torch.float', 'device': 'device'}), '(situation_image, dtype=torch.float, device=device)\n', (18882, 18933), False, 'import torch\n')] |
import os
import sys
import numpy as np
import scipy.io
from PIL import Image
sys.path.append('libs')
import utils
nImgs_map = {'train': 11264, 'val': 2273, 'test': 541}
cate = ["bicycle", "car", "motorcycle", "airplane", "fire hydrant", "traffic light", "cat", "dog",
"horse", "sheep", "cow", "elephant", "other", "zebra", "giraffe", "cloud", "grass", "cloud", "tree"]
class SketchDataset(utils.Dataset):
"""Generates the sketchyscene dataset."""
def __init__(self, dataset_base_dir):
self.dataset_base_dir = dataset_base_dir
super(SketchDataset, self).__init__()
def load_sketches(self, mode):
assert mode in ["train", "val", "test"]
# Add classes
for i in range(len(cate)):
cat_name = cate[i]
self.add_class("sketchyscene", i + 1, cat_name)
# Add images
nImgs = nImgs_map[mode]
for i in range(nImgs):
self.add_image("sketchyscene", image_id=i, path="", mode=mode)
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
info = self.image_info[image_id]
mode = info['mode']
image_name = str(image_id + 1) + '.png' # e.g. L0_sample5564.png
images_base_dir = os.path.join(self.dataset_base_dir, mode, 'DRAWING_GT')
image_path = os.path.join(images_base_dir, image_name)
# print(image_path)
image = Image.open(image_path)
image = image.convert("RGB")
image = np.array(image, dtype=np.float32) # shape = [H, W, 3]
# plt.imshow(image.astype(np.uint8)) #
# plt.show()
return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "sketchyscene":
return info['mode']
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
mode = info['mode']
mask_class_name = str(image_id + 1) + '.mat'
mask_instance_name = str(image_id + 1) + '.mat'
class_base_dir = os.path.join(self.dataset_base_dir, mode, 'CLASS_GT')
instance_base_dir = os.path.join(self.dataset_base_dir, mode, 'INSTANCE_GT')
mask_class_path = os.path.join(class_base_dir, mask_class_name)
mask_instance_path = os.path.join(instance_base_dir, mask_instance_name)
INSTANCE_GT = scipy.io.loadmat(mask_instance_path)['INSTANCE_GT']
INSTANCE_GT = np.array(INSTANCE_GT, dtype=np.uint8) # shape=(750, 750)
CLASS_GT = scipy.io.loadmat(mask_class_path)['CLASS_GT'] # (750, 750)
# print(np.max(INSTANCE_GT)) # e.g. 101
instance_count = np.bincount(INSTANCE_GT.flatten())
# print(instance_count.shape) # e.g. shape=(102,)
instance_count = instance_count[1:] # e.g. shape=(101,)
nonzero_count = np.count_nonzero(instance_count) # e.g. 16
# print("nonzero_count", nonzero_count) # e.g. shape=(102,)
mask_set = np.zeros([nonzero_count, INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]], dtype=np.uint8)
class_id_set = np.zeros([nonzero_count], dtype=np.uint8)
real_instanceIdx = 0
for i in range(instance_count.shape[0]):
if instance_count[i] == 0:
continue
instanceIdx = i + 1
## mask
mask = np.zeros([INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]], dtype=np.uint8)
mask[INSTANCE_GT == instanceIdx] = 1
mask_set[real_instanceIdx] = mask
class_gt_filtered = CLASS_GT * mask
class_gt_filtered = np.bincount(class_gt_filtered.flatten())
class_gt_filtered = class_gt_filtered[1:]
class_id = np.argmax(class_gt_filtered) + 1
class_id_set[real_instanceIdx] = class_id
real_instanceIdx += 1
mask_set = np.transpose(mask_set, (1, 2, 0))
return mask_set, class_id_set
| [
"PIL.Image.open",
"os.path.join",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.transpose",
"sys.path.append"
] | [((79, 102), 'sys.path.append', 'sys.path.append', (['"""libs"""'], {}), "('libs')\n", (94, 102), False, 'import sys\n'), ((1286, 1341), 'os.path.join', 'os.path.join', (['self.dataset_base_dir', 'mode', '"""DRAWING_GT"""'], {}), "(self.dataset_base_dir, mode, 'DRAWING_GT')\n", (1298, 1341), False, 'import os\n'), ((1363, 1404), 'os.path.join', 'os.path.join', (['images_base_dir', 'image_name'], {}), '(images_base_dir, image_name)\n', (1375, 1404), False, 'import os\n'), ((1449, 1471), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1459, 1471), False, 'from PIL import Image\n'), ((1525, 1558), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (1533, 1558), True, 'import numpy as np\n'), ((2476, 2529), 'os.path.join', 'os.path.join', (['self.dataset_base_dir', 'mode', '"""CLASS_GT"""'], {}), "(self.dataset_base_dir, mode, 'CLASS_GT')\n", (2488, 2529), False, 'import os\n'), ((2558, 2614), 'os.path.join', 'os.path.join', (['self.dataset_base_dir', 'mode', '"""INSTANCE_GT"""'], {}), "(self.dataset_base_dir, mode, 'INSTANCE_GT')\n", (2570, 2614), False, 'import os\n'), ((2641, 2686), 'os.path.join', 'os.path.join', (['class_base_dir', 'mask_class_name'], {}), '(class_base_dir, mask_class_name)\n', (2653, 2686), False, 'import os\n'), ((2716, 2767), 'os.path.join', 'os.path.join', (['instance_base_dir', 'mask_instance_name'], {}), '(instance_base_dir, mask_instance_name)\n', (2728, 2767), False, 'import os\n'), ((2865, 2902), 'numpy.array', 'np.array', (['INSTANCE_GT'], {'dtype': 'np.uint8'}), '(INSTANCE_GT, dtype=np.uint8)\n', (2873, 2902), True, 'import numpy as np\n'), ((3261, 3293), 'numpy.count_nonzero', 'np.count_nonzero', (['instance_count'], {}), '(instance_count)\n', (3277, 3293), True, 'import numpy as np\n'), ((3394, 3484), 'numpy.zeros', 'np.zeros', (['[nonzero_count, INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]]'], {'dtype': 'np.uint8'}), '([nonzero_count, INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]], dtype\n =np.uint8)\n', (3402, 3484), True, 'import numpy as np\n'), ((3503, 3544), 'numpy.zeros', 'np.zeros', (['[nonzero_count]'], {'dtype': 'np.uint8'}), '([nonzero_count], dtype=np.uint8)\n', (3511, 3544), True, 'import numpy as np\n'), ((4269, 4302), 'numpy.transpose', 'np.transpose', (['mask_set', '(1, 2, 0)'], {}), '(mask_set, (1, 2, 0))\n', (4281, 4302), True, 'import numpy as np\n'), ((3761, 3831), 'numpy.zeros', 'np.zeros', (['[INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]]'], {'dtype': 'np.uint8'}), '([INSTANCE_GT.shape[0], INSTANCE_GT.shape[1]], dtype=np.uint8)\n', (3769, 3831), True, 'import numpy as np\n'), ((4126, 4154), 'numpy.argmax', 'np.argmax', (['class_gt_filtered'], {}), '(class_gt_filtered)\n', (4135, 4154), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019 Karlsruhe Institute of Technology - Steinbuch Centre for Computing
# This code is distributed under the MIT License
# Please, see the LICENSE file
#
import os
import numpy as np
import dogs_breed_det.config as cfg
import dogs_breed_det.dataset.data_utils as dutils
def set_features_file(dataset_type, network='Resnet50', return_type='path'):
""" Function
Returns according to the dataset_type and network either
directory with the file, filename, or full path to the file (default)
"""
# directory where file is, not the full path!
file_dir = os.path.join('data', 'bottleneck_features')
# only file name
file_name = 'Dogs_' + network + '_features_' + dataset_type + '.npz'
# full path to the file
file_path = os.path.join(cfg.BASE_DIR, file_dir, file_name)
if return_type == 'dir':
return file_dir
elif return_type == 'file':
return file_name
else:
return file_path
def build_features(data_type, network='Resnet50'):
"""Build bottleneck_features for set of files"""
nets = {'VGG16': extract_VGG16,
'VGG19': extract_VGG19,
'Resnet50': extract_Resnet50,
'InceptionV3': extract_InceptionV3,
'Xception': extract_Xception,
}
data_dir = os.path.join(cfg.BASE_DIR,'data', cfg.Dog_DataDir, data_type)
img_files = dutils.load_data_files(data_dir)
print("[DEBUG] build_features, img_files: ", img_files[:5])
bottleneck_features = nets[network](dutils.paths_to_tensor(img_files))
bottleneck_path = set_features_file(data_type, network,
return_type='path')
if data_type == 'train':
np.savez(bottleneck_path, train=bottleneck_features)
elif data_type == 'test':
np.savez(bottleneck_path, test=bottleneck_features)
elif data_type == 'valid':
np.savez(bottleneck_path, valid=bottleneck_features)
else:
np.savez(bottleneck_path, features=bottleneck_features)
print("[INFO] Bottleneck features size (build_features):",
bottleneck_features.shape)
return bottleneck_features
def load_features(data_type, network = 'Resnet50'):
"""Load features from the file
Only one dataset, e.g. train, valid, test is loaded
"""
bottleneck_path = set_features_file(data_type, network)
print("[INFO] Using %s" % bottleneck_path)
bottleneck_features = np.load(bottleneck_path)[data_type]
return bottleneck_features
def extract_VGG16(tensor):
from keras.applications.vgg16 import VGG16, preprocess_input
return VGG16(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_VGG19(tensor):
from keras.applications.vgg19 import VGG19, preprocess_input
return VGG19(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_Resnet50(tensor):
from keras.applications.resnet50 import ResNet50, preprocess_input
return ResNet50(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_Xception(tensor):
from keras.applications.xception import Xception, preprocess_input
return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_InceptionV3(tensor):
from keras.applications.inception_v3 import InceptionV3, preprocess_input
return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
| [
"numpy.savez",
"keras.applications.vgg16.VGG16",
"dogs_breed_det.dataset.data_utils.load_data_files",
"keras.applications.inception_v3.preprocess_input",
"os.path.join",
"keras.applications.xception.Xception",
"dogs_breed_det.dataset.data_utils.paths_to_tensor",
"keras.applications.vgg19.VGG19",
"ke... | [((632, 675), 'os.path.join', 'os.path.join', (['"""data"""', '"""bottleneck_features"""'], {}), "('data', 'bottleneck_features')\n", (644, 675), False, 'import os\n'), ((814, 861), 'os.path.join', 'os.path.join', (['cfg.BASE_DIR', 'file_dir', 'file_name'], {}), '(cfg.BASE_DIR, file_dir, file_name)\n', (826, 861), False, 'import os\n'), ((1340, 1402), 'os.path.join', 'os.path.join', (['cfg.BASE_DIR', '"""data"""', 'cfg.Dog_DataDir', 'data_type'], {}), "(cfg.BASE_DIR, 'data', cfg.Dog_DataDir, data_type)\n", (1352, 1402), False, 'import os\n'), ((1418, 1450), 'dogs_breed_det.dataset.data_utils.load_data_files', 'dutils.load_data_files', (['data_dir'], {}), '(data_dir)\n', (1440, 1450), True, 'import dogs_breed_det.dataset.data_utils as dutils\n'), ((1556, 1589), 'dogs_breed_det.dataset.data_utils.paths_to_tensor', 'dutils.paths_to_tensor', (['img_files'], {}), '(img_files)\n', (1578, 1589), True, 'import dogs_breed_det.dataset.data_utils as dutils\n'), ((1749, 1801), 'numpy.savez', 'np.savez', (['bottleneck_path'], {'train': 'bottleneck_features'}), '(bottleneck_path, train=bottleneck_features)\n', (1757, 1801), True, 'import numpy as np\n'), ((2481, 2505), 'numpy.load', 'np.load', (['bottleneck_path'], {}), '(bottleneck_path)\n', (2488, 2505), True, 'import numpy as np\n'), ((2700, 2724), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['tensor'], {}), '(tensor)\n', (2716, 2724), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((2877, 2901), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['tensor'], {}), '(tensor)\n', (2893, 2901), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((3066, 3090), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['tensor'], {}), '(tensor)\n', (3082, 3090), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((3255, 3279), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['tensor'], {}), '(tensor)\n', (3271, 3279), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((3457, 3481), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['tensor'], {}), '(tensor)\n', (3473, 3481), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((1840, 1891), 'numpy.savez', 'np.savez', (['bottleneck_path'], {'test': 'bottleneck_features'}), '(bottleneck_path, test=bottleneck_features)\n', (1848, 1891), True, 'import numpy as np\n'), ((2647, 2691), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2652, 2691), False, 'from keras.applications.vgg16 import VGG16, preprocess_input\n'), ((2824, 2868), 'keras.applications.vgg19.VGG19', 'VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2829, 2868), False, 'from keras.applications.vgg19 import VGG19, preprocess_input\n'), ((3010, 3057), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3018, 3057), False, 'from keras.applications.resnet50 import ResNet50, preprocess_input\n'), ((3199, 3246), 'keras.applications.xception.Xception', 'Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3207, 3246), False, 'from keras.applications.xception import Xception, preprocess_input\n'), ((3398, 3448), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3409, 3448), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((1931, 1983), 'numpy.savez', 'np.savez', (['bottleneck_path'], {'valid': 'bottleneck_features'}), '(bottleneck_path, valid=bottleneck_features)\n', (1939, 1983), True, 'import numpy as np\n'), ((2002, 2057), 'numpy.savez', 'np.savez', (['bottleneck_path'], {'features': 'bottleneck_features'}), '(bottleneck_path, features=bottleneck_features)\n', (2010, 2057), True, 'import numpy as np\n')] |
"""Example calculation of coating SWPR and power enhancement. Makes figure
from paper.
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from scipy.interpolate import interp1d
from pvarc import thin_film_reflectance
from pvarc.materials import refractive_index_porous_silica, \
refractive_index_glass
from pvarc.metrics import solar_weighted_photon_reflectance
# Set thickness and porosity values.
thickness = np.arange(0, 196, 5).astype('float')
porosity = np.arange(0, 0.51, 0.025).astype('float')
swpr = np.zeros((len(thickness), len(porosity)))
wavelength = np.linspace(200,1250,200)
# Integration limits for SWPR
swpr_wavelength_min = 400
swpr_wavelength_max = 1100
for k in tqdm(range(len(porosity))):
index_film = refractive_index_porous_silica(wavelength, porosity[k])
index_substrate = refractive_index_glass(wavelength)
for j in range(len(thickness)):
# Calculate reflectance at rough.
reflectance = thin_film_reflectance(index_film=index_film,
index_substrate=index_substrate,
film_thickness=thickness[j],
aoi=8,
wavelength=wavelength)
swpr[j, k] = solar_weighted_photon_reflectance(
wavelength,
reflectance,
wavelength_min=swpr_wavelength_min,
wavelength_max=swpr_wavelength_max)
power_enhancement = swpr[0, 0] - swpr
plt.figure(6, figsize=(3.7, 3))
plt.clf()
plt.contourf(thickness, porosity * 100,
power_enhancement.transpose() * 100,
levels=15
)
cbar = plt.colorbar()
cbar.set_label('Nominal Power Enhancement (%)', fontsize=9)
plt.xlabel('Coating Thickness (nm)', fontsize=9)
plt.ylabel('Porosity (%)', fontsize=9)
plt.xticks(fontsize=9)
plt.yticks(fontsize=9)
plt.show()
plt.savefig('figure_power_enhancement_thickness_porosity.pdf',
bbox_inches='tight',
pad_inches=0)
# Make a table of optimal performance
plt.figure(11)
plt.clf()
dfm = pd.DataFrame()
x_smooth = np.linspace(thickness.min(), thickness.max(),1000)
for j in [0, 2,4,6,8, 10,12,14,16]:
x = thickness
y = power_enhancement[:,j]
y2 = swpr[:,j]
f = interp1d(x,y, 'cubic')
f2 = interp1d(x,y2, 'cubic')
idx_max = np.argmax(f(x_smooth))
thickness_max_pe = x_smooth[idx_max]
plt.plot(thickness, power_enhancement[:,j]*100)
plt.plot(thickness_max_pe, f(x_smooth[idx_max])*100,'r.')
dfm.loc[j,'Porosity'] = '{:.0%}'.format(porosity[j])
dfm.loc[j, 'Max NPE (%)'] = '{:.1%}'.format(f(x_smooth).max())
dfm.loc[j, 'Min SWPR (%)'] = '{:.1%}'.format(f2(x_smooth).min())
dfm.loc[j,'Thickness'] = '{:.1f}'.format(thickness_max_pe)
# print('Porosity: {}, Thickness of max PE: {}'.format(porosity[j], thickness_max_pe))
plt.xlabel('Thickness (nm)')
plt.ylabel('Power Enhancement (%)')
plt.show()
print(dfm.to_latex(index=False))
| [
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"pandas.DataFrame",
"pvarc.materials.refractive_index_glass",
"matplotlib.pyplot.savefig",
"pvarc.thin_film_reflectance",
... | [((131, 154), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (145, 154), False, 'import matplotlib\n'), ((661, 688), 'numpy.linspace', 'np.linspace', (['(200)', '(1250)', '(200)'], {}), '(200, 1250, 200)\n', (672, 688), True, 'import numpy as np\n'), ((1595, 1626), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {'figsize': '(3.7, 3)'}), '(6, figsize=(3.7, 3))\n', (1605, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1636), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1786), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1784, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coating Thickness (nm)"""'], {'fontsize': '(9)'}), "('Coating Thickness (nm)', fontsize=9)\n", (1857, 1895), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Porosity (%)"""'], {'fontsize': '(9)'}), "('Porosity (%)', fontsize=9)\n", (1906, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1957), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(9)'}), '(fontsize=9)\n', (1945, 1957), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1980), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(9)'}), '(fontsize=9)\n', (1968, 1980), True, 'import matplotlib.pyplot as plt\n'), ((1982, 1992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1990, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2095), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure_power_enhancement_thickness_porosity.pdf"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('figure_power_enhancement_thickness_porosity.pdf', bbox_inches=\n 'tight', pad_inches=0)\n", (2004, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2170), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {}), '(11)\n', (2166, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2171, 2180), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2178, 2180), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2201), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2199, 2201), True, 'import pandas as pd\n'), ((2976, 3004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Thickness (nm)"""'], {}), "('Thickness (nm)')\n", (2986, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Enhancement (%)"""'], {}), "('Power Enhancement (%)')\n", (3015, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3041, 3051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3049, 3051), True, 'import matplotlib.pyplot as plt\n'), ((827, 882), 'pvarc.materials.refractive_index_porous_silica', 'refractive_index_porous_silica', (['wavelength', 'porosity[k]'], {}), '(wavelength, porosity[k])\n', (857, 882), False, 'from pvarc.materials import refractive_index_porous_silica, refractive_index_glass\n'), ((905, 939), 'pvarc.materials.refractive_index_glass', 'refractive_index_glass', (['wavelength'], {}), '(wavelength)\n', (927, 939), False, 'from pvarc.materials import refractive_index_porous_silica, refractive_index_glass\n'), ((2378, 2401), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y', '"""cubic"""'], {}), "(x, y, 'cubic')\n", (2386, 2401), False, 'from scipy.interpolate import interp1d\n'), ((2410, 2434), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y2', '"""cubic"""'], {}), "(x, y2, 'cubic')\n", (2418, 2434), False, 'from scipy.interpolate import interp1d\n'), ((2518, 2568), 'matplotlib.pyplot.plot', 'plt.plot', (['thickness', '(power_enhancement[:, j] * 100)'], {}), '(thickness, power_enhancement[:, j] * 100)\n', (2526, 2568), True, 'import matplotlib.pyplot as plt\n'), ((509, 529), 'numpy.arange', 'np.arange', (['(0)', '(196)', '(5)'], {}), '(0, 196, 5)\n', (518, 529), True, 'import numpy as np\n'), ((557, 582), 'numpy.arange', 'np.arange', (['(0)', '(0.51)', '(0.025)'], {}), '(0, 0.51, 0.025)\n', (566, 582), True, 'import numpy as np\n'), ((1041, 1182), 'pvarc.thin_film_reflectance', 'thin_film_reflectance', ([], {'index_film': 'index_film', 'index_substrate': 'index_substrate', 'film_thickness': 'thickness[j]', 'aoi': '(8)', 'wavelength': 'wavelength'}), '(index_film=index_film, index_substrate=\n index_substrate, film_thickness=thickness[j], aoi=8, wavelength=wavelength)\n', (1062, 1182), False, 'from pvarc import thin_film_reflectance\n'), ((1372, 1507), 'pvarc.metrics.solar_weighted_photon_reflectance', 'solar_weighted_photon_reflectance', (['wavelength', 'reflectance'], {'wavelength_min': 'swpr_wavelength_min', 'wavelength_max': 'swpr_wavelength_max'}), '(wavelength, reflectance, wavelength_min=\n swpr_wavelength_min, wavelength_max=swpr_wavelength_max)\n', (1405, 1507), False, 'from pvarc.metrics import solar_weighted_photon_reflectance\n')] |
#
# Copyright (C) 2020 RFI
#
# Author: <NAME>
#
# This code is distributed under the GPLv3 license, a copy of
# which is included in the root directory of this package.
#
import logging
import numpy
import scipy.ndimage.morphology
from maptools.util import read, write
# Get the logger
logger = logging.getLogger(__name__)
def array_dilate(data, kernel=3, num_iter=1):
"""
Dilate the map
Args:
data (array): The array
kernel (int): The kernel size
num_iter (int): The number of iterations
"""
# Generate a mask
z, y, x = numpy.mgrid[0:kernel, 0:kernel, 0:kernel]
z = z - kernel // 2
y = y - kernel // 2
x = x - kernel // 2
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 2)
mask = r <= kernel // 2
# Do the dilation
return scipy.ndimage.morphology.binary_dilation(data, mask, num_iter)
def mapfile_dilate(input_map_filename, output_map_filename, kernel=3, num_iter=1):
"""
Dilate the map
Args:
input_map_filename (str): The input map filename
output_map_filename (str): The output map filename
kernel (tuple): The kernel size
num_iter (int): The number of iterations
"""
# Open the input file
infile = read(input_map_filename)
# Get the subset of data
logger.info("Dilating map")
data = array_dilate(infile.data, kernel=kernel, num_iter=num_iter)
# Write the output file
write(output_map_filename, data.astype("uint8"), infile=infile)
def dilate(*args, **kwargs):
"""
Dilate the map
"""
if len(args) > 0 and type(args[0]) == "str" or "input_map_filename" in kwargs:
func = mapfile_dilate
else:
func = array_dilate
return func(*args, **kwargs)
| [
"logging.getLogger",
"numpy.sqrt",
"maptools.util.read"
] | [((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n'), ((697, 733), 'numpy.sqrt', 'numpy.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (707, 733), False, 'import numpy\n'), ((1236, 1260), 'maptools.util.read', 'read', (['input_map_filename'], {}), '(input_map_filename)\n', (1240, 1260), False, 'from maptools.util import read, write\n')] |
import numpy as np
import os
import pickle
import re
import sys
import argparse
class Preprocess():
def __init__(self, path_to_babi):
# path_to_babi example: '././babi_original'
self.path_to_babi = os.path.join(path_to_babi, "tasks_1-20_v1-2/en-valid-10k")
self.train_paths = None
self.val_paths = None
self.test_paths = None
self.path_to_processed = "./babi_processed"
self._c_word_set = set()
self._q_word_set = set()
self._a_word_set = set()
self._cqa_word_set = set()
self.c_max_len = 20
self.s_max_len = 0
self.q_max_len = 0
self.mask_index = 0
def set_path(self):
"""
set list of train, val, and test dataset paths
Returns
train_paths: list of train dataset paths for all task 1 to 20
val_paths: list of val dataset paths for all task 1 to 20
test_paths: list of test dataset paths for all task 1 to 20
"""
train_paths = []
val_paths = []
test_paths= []
for dirpath, dirnames, filenames in os.walk(self.path_to_babi):
for filename in filenames:
if 'train' in filename:
train_paths.append(os.path.join(dirpath, filename))
elif 'val' in filename:
val_paths.append(os.path.join(dirpath, filename))
else:
test_paths.append(os.path.join(dirpath, filename))
self.train_paths = sorted(train_paths)
self.val_paths = sorted(val_paths)
self.test_paths = sorted(test_paths)
def _split_paragraphs(self, path_to_file):
"""
split into paragraphs as babi dataset consists of multiple 1~n sentences
Args
file_path: path of the data
Returns
paragraphs: list of paragraph
"""
with open(path_to_file, 'r') as f:
babi = f.readlines()
paragraph = []
paragraphs = []
alphabet = re.compile('[a-zA-Z]')
for d in babi:
if d.startswith('1 '):
if paragraph:
paragraphs.append(paragraph)
paragraph = []
mark = re.search(alphabet, d).span()[0]
paragraph.append(d[mark:])
return paragraphs
def _split_clqa(self, paragraphs, show_print= True):
"""
for each paragraph, split into context, label, question and answer
Args
paragraphs: list of paragraphs
Returns
context: list of contexts
label: list of labels
question: list of questions
answer: list of answers
"""
context = []
label = []
question = []
answer = []
for paragraph in paragraphs:
for i, sent in enumerate(paragraph):
if '?' in sent:
related_para = [para.strip().lower() for para in paragraph[:i] if '?' not in para][::-1]
if len(related_para) > 20:
related_para = related_para[:20]
context.append(related_para)
label.append([i for i in range(len(related_para))])
q_a_ah = sent.split('\t')
question.append(q_a_ah[0].strip().lower())
answer.append(q_a_ah[1].strip().lower())
# check
if show_print:
if (len(question) == len(answer)) & (len(answer) == len(context)) & (len(context) == len(label)):
print("bAbI is well separated into question, answer, context, and label!")
print("total: {}".format(len(label)))
else:
print("Something is missing! check again")
print("the number of questions: {}".format(len(question)))
print("the number of answers: {}".format(len(answer)))
print("the number of contexts: {}".format(len(context)))
print("the number of labels: {}".format(len(label)))
return context, label, question, answer
def split_all_clqa(self, paths, show_print= True):
"""
merge all 20 babi tasks into one dataset
Args
paths: list of path of 1 to 20 task dataset
Returns
contexts: list of contexts of all 20 tasks
labels: list of labels of all 20 tasks
questions: list of questions of all 20 tasks
answers: list of answers of all 20 tasks
"""
if paths == None:
print('path is None, run set_path() first!')
else:
contexts = []
labels = []
questions = []
answers = []
for path in paths:
if show_print:
print('=================')
paragraphs = self._split_paragraphs(path)
if show_print:
print("data: {}".format(os.path.basename(path)))
context, label, question, answer = self._split_clqa(paragraphs, show_print=show_print)
contexts.extend(context)
labels.extend(label)
questions.extend(question)
answers.extend(answer)
return contexts, labels, questions, answers
def _set_word_set(self):
c_word_set = set()
q_word_set = set()
a_word_set = set()
train_context, train_label, train_question, train_answer = self.split_all_clqa(self.train_paths, show_print=False)
val_context, val_label, val_question, val_answer = self.split_all_clqa(self.val_paths, show_print=False)
test_context, test_label, test_question, test_answer = self.split_all_clqa(self.test_paths, show_print=False)
list_of_context = [train_context, val_context, test_context]
list_of_question = [train_question, val_question, test_question]
list_of_answer = [train_answer, val_answer, test_answer]
for list_ in list_of_context:
for para in list_:
for sent in para:
sent = sent.replace(".", " .")
sent = sent.replace("?", " ?")
sent = sent.split()
c_word_set.update(sent)
for list_ in list_of_question:
for sent in list_:
sent = sent.replace(".", " .")
sent = sent.replace("?", " ?")
sent = sent.split()
q_word_set.update(sent)
for answers in list_of_answer:
for answer in answers:
answer = answer.split(',')
a_word_set.update(answer)
a_word_set.add(',')
self._c_word_set = c_word_set
self._q_word_set = q_word_set
self._a_word_set = a_word_set
self._cqa_word_set = c_word_set.union(q_word_set).union(a_word_set)
def _index_context(self, contexts):
c_word_index = dict()
for i, word in enumerate(self._c_word_set):
c_word_index[word] = i+1 # index 0 for zero padding
indexed_cs = []
for context in contexts:
indexed_c = []
for sentence in context:
sentence = sentence.replace(".", " .")
sentence = sentence.replace("?", " ?")
sentence = sentence.split()
indexed_s = []
for word in sentence:
indexed_s.append(c_word_index[word])
indexed_c.append(indexed_s)
indexed_cs.append(np.array(indexed_c))
return indexed_cs
def _index_label(self, labels):
indexed_ls = []
for label in labels:
indexed_ls.append(np.eye(self.c_max_len)[label])
return indexed_ls
def _index_question(self, questions):
q_word_index = dict()
for i, word in enumerate(self._q_word_set):
q_word_index[word] = i+1 # index 0 for zero padding
indexed_qs = []
for sentence in questions:
sentence = sentence.replace(".", " .")
sentence = sentence.replace("?", " ?")
sentence = sentence.split()
indexed_s = []
for word in sentence:
indexed_s.append(q_word_index[word])
indexed_qs.append(np.array(indexed_s))
return indexed_qs
def _index_answer(self, answers):
a_word_index = dict()
a_word_dict = dict()
for i, word in enumerate(self._cqa_word_set):
a_word_dict[i] = word
if word in self._a_word_set:
answer_one_hot = np.zeros(len(self._cqa_word_set), dtype=np.float32)
answer_one_hot[i] = 1
a_word_index[word] = answer_one_hot
indexed_as = []
for answer in answers:
if ',' in answer:
multiple_answer = [a_word_index[',']]
for a in answer.split(','):
indexed_a = a_word_index[a]
multiple_answer.append(indexed_a)
indexed_as.append(np.sum(multiple_answer, axis=0))
else:
indexed_a = a_word_index[answer]
indexed_as.append(indexed_a)
if not os.path.exists(self.path_to_processed):
os.makedirs(self.path_to_processed)
with open(os.path.join(self.path_to_processed, 'answer_word_dict.pkl'), 'wb') as f:
pickle.dump(a_word_dict, f)
return indexed_as
def masking(self, context_index, label_index, question_index):
context_masked = []
question_masked = []
label_masked = []
context_real_len = []
question_real_len = []
# cs: one context
for cs, l, q in zip(context_index, label_index, question_index):
context_masked_tmp = []
context_real_length_tmp = []
# cs: many sentences
for context in cs:
context_real_length_tmp.append(len(context))
diff = self.s_max_len - len(context)
if (diff > 0):
context_mask = np.append(context, [self.mask_index]*diff, axis=0)
context_masked_tmp.append(context_mask.tolist())
else:
context_masked_tmp.append(context)
diff_c = self.c_max_len - len(cs)
context_masked_tmp.extend([[0]*self.s_max_len]*diff_c)
context_masked.append(context_masked_tmp)
diff_q = self.q_max_len - len(q)
question_real_len.append(len(q))
question_masked_tmp = np.array(np.append(q, [self.mask_index]*diff_q, axis=0))
question_masked.append(question_masked_tmp.tolist())
diff_l = self.c_max_len - len(l)
label_masked_tmp = np.append(l, np.zeros((diff_l, self.c_max_len)), axis= 0)
label_masked.append(label_masked_tmp.tolist())
context_real_length_tmp.extend([0]*diff_l)
context_real_len.append(context_real_length_tmp)
return context_masked, question_masked, label_masked, context_real_len, question_real_len
def load(self, mode):
if mode == 'train':
path = self.train_paths
elif mode == 'val':
path = self.val_paths
else:
path = self.test_paths
contexts, labels, questions, answers = self.split_all_clqa(path)
context_index = self._index_context(contexts)
label_index = self._index_label(labels)
question_index = self._index_question(questions)
answer_index = self._index_answer(answers)
if mode == 'train':
# check max sentence length
for context in context_index:
for sentence in context:
if len(sentence) > self.s_max_len:
self.s_max_len = len(sentence)
# check max question length
for question in question_index:
if len(question) > self.q_max_len:
self.q_max_len = len(question)
context_masked, question_masked, label_masked, context_real_len, question_real_len = self.masking(context_index, label_index, question_index)
# check masking
cnt = 0
for c, q, l in zip(context_masked, question_masked, label_masked):
for context in c :
if (len(context) != self.s_max_len) | (len(q) != self.q_max_len) | (len(l) != self.c_max_len):
cnt += 1
if cnt == 0:
print("Masking success!")
else:
print("Masking process error")
dataset = (question_masked, answer_index, context_masked, label_masked, context_real_len, question_real_len)
if not os.path.exists(self.path_to_processed):
os.makedirs(self.path_to_processed)
with open(os.path.join(self.path_to_processed, mode + '_dataset.pkl'), 'wb') as f:
pickle.dump(dataset, f)
def get_args_parser():
"""
python preprocessing.py --path ../ --batch_size 64 --hidden_units 32 --learning_rate 2e-4 --iter_time 150 --display_step 100
:return:
"""
_parser = argparse.ArgumentParser()
_parser.add_argument('--path', '--path_to_babi')
_parser.add_argument('--batch_size', '--batch_size')
_parser.add_argument('--hidden_units', '--hidden_units')
_parser.add_argument('--learning_rate', '--learning_rate')
_parser.add_argument('--iter_time', '--iter_time')
_parser.add_argument('--display_step', '--display_step')
return _parser
def default_write(f, string, default_value):
if string == None:
f.write(str(default_value) + "\t")
else:
f.write(str(string) + "\t")
def main():
args = get_args_parser().parse_args()
preprocess = Preprocess(args.path)
preprocess.set_path()
preprocess._set_word_set()
preprocess.load(mode='train')
preprocess.load(mode='val')
preprocess.load(mode='test')
with open(os.path.join('config.txt'), 'w') as f:
f.write(str(preprocess.c_max_len)+"\t")
f.write(str(preprocess.s_max_len)+"\t")
f.write(str(preprocess.q_max_len)+"\t")
f.write(str(preprocess.path_to_processed)+'\t')
default_write(f, args.batch_size, 64)
default_write(f, args.hidden_units, 32)
default_write(f, args.learning_rate, 2e-4)
default_write(f, args.iter_time, 150)
default_write(f, args.display_step, 100)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"numpy.eye",
"pickle.dump",
"argparse.ArgumentParser",
"re.compile",
"os.makedirs",
"os.path.join",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"os.path.basename",
"os.walk",
"re.search"
] | [((13229, 13254), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13252, 13254), False, 'import argparse\n'), ((221, 279), 'os.path.join', 'os.path.join', (['path_to_babi', '"""tasks_1-20_v1-2/en-valid-10k"""'], {}), "(path_to_babi, 'tasks_1-20_v1-2/en-valid-10k')\n", (233, 279), False, 'import os\n'), ((1121, 1147), 'os.walk', 'os.walk', (['self.path_to_babi'], {}), '(self.path_to_babi)\n', (1128, 1147), False, 'import os\n'), ((2046, 2068), 're.compile', 're.compile', (['"""[a-zA-Z]"""'], {}), "('[a-zA-Z]')\n", (2056, 2068), False, 'import re\n'), ((9288, 9326), 'os.path.exists', 'os.path.exists', (['self.path_to_processed'], {}), '(self.path_to_processed)\n', (9302, 9326), False, 'import os\n'), ((9340, 9375), 'os.makedirs', 'os.makedirs', (['self.path_to_processed'], {}), '(self.path_to_processed)\n', (9351, 9375), False, 'import os\n'), ((9481, 9508), 'pickle.dump', 'pickle.dump', (['a_word_dict', 'f'], {}), '(a_word_dict, f)\n', (9492, 9508), False, 'import pickle\n'), ((12818, 12856), 'os.path.exists', 'os.path.exists', (['self.path_to_processed'], {}), '(self.path_to_processed)\n', (12832, 12856), False, 'import os\n'), ((12870, 12905), 'os.makedirs', 'os.makedirs', (['self.path_to_processed'], {}), '(self.path_to_processed)\n', (12881, 12905), False, 'import os\n'), ((13009, 13032), 'pickle.dump', 'pickle.dump', (['dataset', 'f'], {}), '(dataset, f)\n', (13020, 13032), False, 'import pickle\n'), ((14047, 14073), 'os.path.join', 'os.path.join', (['"""config.txt"""'], {}), "('config.txt')\n", (14059, 14073), False, 'import os\n'), ((7601, 7620), 'numpy.array', 'np.array', (['indexed_c'], {}), '(indexed_c)\n', (7609, 7620), True, 'import numpy as np\n'), ((8359, 8378), 'numpy.array', 'np.array', (['indexed_s'], {}), '(indexed_s)\n', (8367, 8378), True, 'import numpy as np\n'), ((9395, 9455), 'os.path.join', 'os.path.join', (['self.path_to_processed', '"""answer_word_dict.pkl"""'], {}), "(self.path_to_processed, 'answer_word_dict.pkl')\n", (9407, 9455), False, 'import os\n'), ((10665, 10713), 'numpy.append', 'np.append', (['q', '([self.mask_index] * diff_q)'], {'axis': '(0)'}), '(q, [self.mask_index] * diff_q, axis=0)\n', (10674, 10713), True, 'import numpy as np\n'), ((10880, 10914), 'numpy.zeros', 'np.zeros', (['(diff_l, self.c_max_len)'], {}), '((diff_l, self.c_max_len))\n', (10888, 10914), True, 'import numpy as np\n'), ((12924, 12983), 'os.path.join', 'os.path.join', (['self.path_to_processed', "(mode + '_dataset.pkl')"], {}), "(self.path_to_processed, mode + '_dataset.pkl')\n", (12936, 12983), False, 'import os\n'), ((7768, 7790), 'numpy.eye', 'np.eye', (['self.c_max_len'], {}), '(self.c_max_len)\n', (7774, 7790), True, 'import numpy as np\n'), ((9127, 9158), 'numpy.sum', 'np.sum', (['multiple_answer'], {'axis': '(0)'}), '(multiple_answer, axis=0)\n', (9133, 9158), True, 'import numpy as np\n'), ((10167, 10219), 'numpy.append', 'np.append', (['context', '([self.mask_index] * diff)'], {'axis': '(0)'}), '(context, [self.mask_index] * diff, axis=0)\n', (10176, 10219), True, 'import numpy as np\n'), ((1267, 1298), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (1279, 1298), False, 'import os\n'), ((2256, 2278), 're.search', 're.search', (['alphabet', 'd'], {}), '(alphabet, d)\n', (2265, 2278), False, 'import re\n'), ((1377, 1408), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (1389, 1408), False, 'import os\n'), ((1470, 1501), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (1482, 1501), False, 'import os\n'), ((5017, 5039), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5033, 5039), False, 'import os\n')] |
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Tests for forcefield class
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import copy
import os
from simtk import openmm, unit
import numpy as np
import pytest
from tempfile import NamedTemporaryFile
from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry
from openforcefield.utils import get_data_file_path
from openforcefield.topology import Molecule, Topology
from openforcefield.typing.engines.smirnoff import ForceField, IncompatibleParameterError, SMIRNOFFSpecError
from openforcefield.typing.engines.smirnoff import XMLParameterIOHandler
#======================================================================
# GLOBAL CONSTANTS
#======================================================================
# File paths.
TIP3P_SDF_FILE_PATH = get_data_file_path(os.path.join('systems', 'monomers', 'water.sdf'))
XML_FF_GENERICS = """<?xml version='1.0' encoding='ASCII'?>
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<Bonds version="0.3">
<Bond smirks="[*:1]~[*:2]" id="b1" k="680.0 * kilocalories_per_mole/angstrom**2" length="1.09 * angstrom"/>
</Bonds>
<Angles version="0.3">
<Angle smirks="[*:1]~[*:2]~[*:3]" angle="109.5 * degree" id="a1" k="100.0 * kilocalories_per_mole/radian**2"/>
</Angles>
<ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Proper smirks="[*:1]~[*:2]~[*:3]-[*:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>
</ProperTorsions>
<ImproperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Improper smirks="[*:1]~[*:2](~[*:3])~[*:4]" id="i1" k1="1.1 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
</ImproperTorsions>
<vdW version="0.3" potential="Lennard-Jones-12-6" combining_rules="Lorentz-Berthelot" scale12="0.0" scale13="0.0" scale14="0.5" scale15="1" switch_width="1.0 * angstrom" cutoff="9.0 * angstrom" method="cutoff">
<Atom smirks="[*:1]" epsilon="0.0157 * kilocalories_per_mole" id="n1" rmin_half="0.6000 * angstrom"/>
</vdW>
<Electrostatics version="0.3" method="PME" scale12="0.0" scale13="0.0" scale14="0.833333" cutoff="9.0 * angstrom"/>
<ToolkitAM1BCC version="0.3"/>
</SMIRNOFF>
"""
simple_xml_ff = str.encode('''<?xml version='1.0' encoding='ASCII'?>
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<Bonds version="0.3">
<Bond smirks="[#6X4:1]-[#6X4:2]" id="b1" k="620.0 * kilocalories_per_mole/angstrom**2" length="1.526 * angstrom"/>
<Bond smirks="[#6X4:1]-[#6X3:2]" id="b2" k="634.0 * kilocalories_per_mole/angstrom**2" length="1.51 * angstrom"/>
</Bonds>
<Angles version="0.3">
<Angle smirks="[*:1]~[#6X4:2]-[*:3]" angle="109.5 * degree" id="a1" k="100.0 * kilocalories_per_mole/radian**2"/>
<Angle smirks="[#1:1]-[#6X4:2]-[#1:3]" angle="109.5 * degree" id="a2" k="70.0 * kilocalories_per_mole/radian**2"/>
</Angles>
<ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Proper smirks="[*:1]-[#6X4:2]-[#6X4:3]-[*:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>
<Proper smirks="[#6X4:1]-[#6X4:2]-[#6X4:3]-[#6X4:4]" id="t2" idivf1="1" k1="0.180 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree" periodicity2="2" phase2="180.0 * degree" idivf2="1" k2="0.250 * kilocalories_per_mole" periodicity3="1" phase3="180.0 * degree" idivf3="1" k3="0.200 * kilocalories_per_mole"/>
</ProperTorsions>
<ImproperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Improper smirks="[*:1]~[#6X3:2](~[*:3])~[*:4]" id="i1" k1="1.1 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
<Improper smirks="[*:1]~[#6X3:2](~[#8X1:3])~[#8:4]" id="i2" k1="10.5 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
</ImproperTorsions>
<vdW version="0.3" potential="Lennard-Jones-12-6" combining_rules="Lorentz-Berthelot" scale12="0.0" scale13="0.0" scale14="0.5" scale15="1" switch_width="1.0 * angstrom" cutoff="9.0 * angstrom" method="cutoff">
<Atom smirks="[#1:1]" epsilon="0.0157 * kilocalories_per_mole" id="n1" rmin_half="0.6000 * angstrom"/>
<Atom smirks="[#1:1]-[#6X4]" epsilon="0.0157 * kilocalories_per_mole" id="n2" rmin_half="1.4870 * angstrom"/>
</vdW>
<Electrostatics version="0.3" method="PME" scale12="0.0" scale13="0.0" scale14="0.833333" cutoff="9.0 * angstrom"/>
<ToolkitAM1BCC version="0.3"/>
</SMIRNOFF>
''')
xml_ff_w_comments = '''<?xml version='1.0' encoding='ASCII'?>
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<!-- SMIRNOFF (SMIRKS Native Open Force Field) template file -->
<Date>2018-07-14</Date>
<Author><NAME>, OpenEye/UC Irvine; <NAME>, UC Irvine; <NAME>, UC Irvine</Author>
<!-- This file is meant for processing via openforcefield.typing.engines.smirnoff -->
<!-- WARNING: AMBER functional forms drop the factor of 2 in the bond energy term, so cross-comparing this file with a corresponding .frcmod file, it will appear that the values here are twice as large as they should be. -->
<Bonds version="0.3">
<Bond smirks="[#6X4:1]-[#6X4:2]" id="b1" k="620.0 * kilocalories_per_mole/angstrom**2" length="1.526 * angstrom" />
<Bond smirks="[#6X4:1]-[#6X3:2]" id="b2" k="634.0 * kilocalories_per_mole/angstrom**2" length="1.51 * angstrom"/>
</Bonds>
<!-- WARNING: AMBER functional forms drop the factor of 2 in the angle energy term, so cross-comparing this file with a corresponding .frcmod file, it will appear that the values here are twice as large as they should be. -->
<Angles version="0.3">
<Angle smirks="[*:1]~[#6X4:2]-[*:3]" angle="109.5 * degree" id="a1" k="100.0 * kilocalories_per_mole/radian**2"/>
<Angle smirks="[#1:1]-[#6X4:2]-[#1:3]" angle="109.5 * degree" id="a2" k="70.0 * kilocalories_per_mole/radian**2"/>
</Angles>
<ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Proper smirks="[*:1]-[#6X4:2]-[#6X4:3]-[*:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>
<Proper smirks="[#6X4:1]-[#6X4:2]-[#6X4:3]-[#6X4:4]" id="t2" idivf1="1" k1="0.180 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree" periodicity2="2" phase2="180.0 * degree" idivf2="1" k2="0.250 * kilocalories_per_mole" periodicity3="1" phase3="180.0 * degree" idivf3="1" k3="0.200 * kilocalories_per_mole"/>
</ProperTorsions>
<ImproperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Improper smirks="[*:1]~[#6X3:2](~[*:3])~[*:4]" id="i1" k1="1.1 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
<Improper smirks="[*:1]~[#6X3:2](~[#8X1:3])~[#8:4]" id="i2" k1="10.5 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
</ImproperTorsions>
<vdW version="0.3" potential="Lennard-Jones-12-6" combining_rules="Lorentz-Berthelot" scale12="0.0" scale13="0.0" scale14="0.5" scale15="1" switch_width="1.0 * angstrom" cutoff="9.0 * angstrom" method="cutoff">
<Atom smirks="[#1:1]" epsilon="0.0157 * kilocalories_per_mole" id="n1" rmin_half="0.6000 * angstrom"/>
<Atom smirks="[#1:1]-[#6X4]" epsilon="0.0157 * kilocalories_per_mole" id="n2" rmin_half="1.4870 * angstrom"/>
</vdW>
<Electrostatics version="0.3" method="PME" scale12="0.0" scale13="0.0" scale14="0.833333" scale15="1" cutoff="9.0 * angstrom" pme_tolerance="0.00001"/>
<ToolkitAM1BCC version="0.3"/>
</SMIRNOFF>
'''
xml_ff_w_cosmetic_elements = '''<?xml version='1.0' encoding='ASCII'?>
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<!-- SMIRNOFF (SMIRKS Native Open Force Field) template file -->
<Date>MMXVIII-VII-XIV</Date>
<Author><NAME></Author>
<!-- This file is meant for processing via openforcefield.typing.engines.smirnoff -->
<!-- WARNING: AMBER functional forms drop the factor of 2 in the bond energy term, so cross-comparing this file with a corresponding .frcmod file, it will appear that the values here are twice as large as they should be. -->
<Bonds version="0.3">
<Bond smirks="[#6X4:1]-[#6X4:2]" id="b1" k="620.0 * kilocalories_per_mole/angstrom**2" length="1.526 * angstrom" parameters="k, length" parameterize_eval="blah=blah2"/>
<Bond smirks="[#6X4:1]-[#6X3:2]" id="b2" k="634.0 * kilocalories_per_mole/angstrom**2" length="1.51 * angstrom"/>
</Bonds>
<!-- WARNING: AMBER functional forms drop the factor of 2 in the angle energy term, so cross-comparing this file with a corresponding .frcmod file, it will appear that the values here are twice as large as they should be. -->
<Angles version="0.3" cosmetic_element="why not?">
<Angle smirks="[*:1]~[#6X4:2]-[*:3]" angle="109.5 * degree" id="a1" k="100.0 * kilocalories_per_mole/radian**2"/>
<Angle smirks="[#1:1]-[#6X4:2]-[#1:3]" angle="109.5 * degree" id="a2" k="70.0 * kilocalories_per_mole/radian**2"/>
</Angles>
<ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Proper smirks="[*:1]-[#6X4:2]-[#6X4:3]-[*:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>
<Proper smirks="[#6X4:1]-[#6X4:2]-[#6X4:3]-[#6X4:4]" id="t2" idivf1="1" k1="0.180 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree" periodicity2="2" phase2="180.0 * degree" idivf2="1" k2="0.250 * kilocalories_per_mole" periodicity3="1" phase3="180.0 * degree" idivf3="1" k3="0.200 * kilocalories_per_mole"/>
</ProperTorsions>
<ImproperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Improper smirks="[*:1]~[#6X3:2](~[*:3])~[*:4]" id="i1" k1="1.1 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
<Improper smirks="[*:1]~[#6X3:2](~[#8X1:3])~[#8:4]" id="i2" k1="10.5 * kilocalories_per_mole" periodicity1="2" phase1="180. * degree"/>
</ImproperTorsions>
<vdW version="0.3" potential="Lennard-Jones-12-6" combining_rules="Lorentz-Berthelot" scale12="0.0" scale13="0.0" scale14="0.5" scale15="1" switch_width="1.0 * angstrom" cutoff="9.0 * angstrom" method="cutoff">
<Atom smirks="[#1:1]" epsilon="0.0157 * kilocalories_per_mole" id="n1" rmin_half="0.6000 * angstrom"/>
<Atom smirks="[#1:1]-[#6X4]" epsilon="0.0157 * kilocalories_per_mole" id="n2" rmin_half="1.4870 * angstrom"/>
</vdW>
<Electrostatics version="0.3" method="PME" scale12="0.0" scale13="0.0" scale14="0.833333" scale15="1" cutoff="9.0 * angstrom" pme_tolerance="0.00001"/>
<ToolkitAM1BCC version="0.3"/>
</SMIRNOFF>
'''
xml_toolkitam1bcc_ff = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<ToolkitAM1BCC version="0.3"/>
</SMIRNOFF>
'''
xml_ethanol_library_charges_ff = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge smirks="[#1:1]-[#6:2](-[#1:3])(-[#1:4])-[#6:5](-[#1:6])(-[#1:7])-[#8:8]-[#1:9]" charge1="-0.02*elementary_charge" charge2="-0.2*elementary_charge" charge3="-0.02*elementary_charge" charge4="-0.02*elementary_charge" charge5="-0.1*elementary_charge" charge6="-0.01*elementary_charge" charge7="-0.01*elementary_charge" charge8="0.3*elementary_charge" charge9="0.08*elementary_charge" />
</LibraryCharges>
</SMIRNOFF>
'''
xml_ethanol_library_charges_in_parts_ff = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<!-- Note that the oxygen is covered twice here. The correct behavior should be to take the charge from the SECOND LibraryCharge, as it should overwrite the first -->
<LibraryCharge smirks="[#1:1]-[#6:2](-[#1:3])(-[#1:4])-[#6:5](-[#1:6])(-[#1:7])-[#8:8]" charge1="-0.02*elementary_charge" charge2="-0.2*elementary_charge" charge3="-0.02*elementary_charge" charge4="-0.02*elementary_charge" charge5="-0.1*elementary_charge" charge6="-0.01*elementary_charge" charge7="-0.01*elementary_charge" charge8="-999*elementary_charge" />
<LibraryCharge smirks="[#8:1]-[#1:2]" charge1="0.3*elementary_charge" charge2="0.08*elementary_charge" />
</LibraryCharges>
</SMIRNOFF>
'''
xml_ethanol_library_charges_by_atom_ff = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge smirks="[#1:1]-[#6]" charge1="-0.02*elementary_charge" />
<LibraryCharge smirks="[#6X4:1]" charge1="-0.2*elementary_charge" />
<LibraryCharge smirks="[#1:1]-[#6]-[#8]" charge1="-0.01*elementary_charge" />
<LibraryCharge smirks="[#6X4:1]-[#8]" charge1="-0.1*elementary_charge" />
<LibraryCharge smirks="[#8X2:1]" charge1="0.3*elementary_charge" />
<LibraryCharge smirks="[#1:1]-[#8]" charge1="0.08*elementary_charge" />
</LibraryCharges>
</SMIRNOFF>
'''
xml_OH_library_charges_xml = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge smirks="[#1:1]" charge1="1.*elementary_charge" />
<LibraryCharge smirks="[#8:1]" charge1="-2.*elementary_charge" />
</LibraryCharges>
</SMIRNOFF>
'''
xml_CH_zeroes_library_charges_xml = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge smirks="[#1:1]" charge1="0.*elementary_charge" />
<LibraryCharge smirks="[#6:1]" charge1="0.*elementary_charge" />
</LibraryCharges>
</SMIRNOFF>
'''
xml_spec_docs_ala_library_charges_xml = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge name="ALA" smirks="[NX3:1]([#1:2])([#6])[#6H1:3]([#1:4])([#6:5]([#1:6])([#1:7])[#1:8])[#6:9](=[#8:10])[#7]" charge1="-0.4157*elementary_charge" charge2="0.2719*elementary_charge" charge3="0.0337*elementary_charge" charge4="0.0823*elementary_charge" charge5="-0.1825*elementary_charge" charge6="0.0603*elementary_charge" charge7="0.0603*elementary_charge" charge8="0.0603*elementary_charge" charge9="0.5973*elementary_charge" charge10="-0.5679*elementary_charge"/>
</LibraryCharges>
</SMIRNOFF>
'''
xml_spec_docs_tip3p_library_charges_xml = '''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<LibraryCharges version="0.3">
<LibraryCharge name="TIP3P" smirks="[#1:1]-[#8X2H2+0:2]-[#1:3]" charge1="0.417*elementary_charge" charge2="-0.834*elementary_charge" charge3="0.417*elementary_charge"/>
</LibraryCharges>
</SMIRNOFF>
'''
#======================================================================
# TEST UTILITY FUNCTIONS
#======================================================================
def round_charge(xml):
"""Round charge fields in a serialized OpenMM system to 2 decimal places"""
# Example Particle line: <Particle eps=".4577296" q="-.09709000587463379" sig=".1908"/>
xmlsp = xml.split(' q="')
for index, chunk in enumerate(xmlsp):
# Skip file before first q=
if index == 0:
continue
chunksp = chunk.split('" sig')
chunksp[0] = f"{float(chunksp[0]):.2f}"
chunk = '" sig'.join(chunksp)
xmlsp[index] = chunk
return ' q="'.join(xmlsp)
def create_ethanol():
"""
Creates an openforcefield.topology.Molecule representation of
ethanol without the use of a cheminformatics toolkit
"""
# Create an ethanol molecule without using a toolkit
ethanol = Molecule()
ethanol.add_atom(6, 0, False) # C0
ethanol.add_atom(6, 0, False) # C1
ethanol.add_atom(8, 0, False) # O2
ethanol.add_atom(1, 0, False) # H3
ethanol.add_atom(1, 0, False) # H4
ethanol.add_atom(1, 0, False) # H5
ethanol.add_atom(1, 0, False) # H6
ethanol.add_atom(1, 0, False) # H7
ethanol.add_atom(1, 0, False) # H8
ethanol.add_bond(0, 1, 1, False) # C0 - C1
ethanol.add_bond(1, 2, 1, False) # C1 - O2
ethanol.add_bond(0, 3, 1, False) # C0 - H3
ethanol.add_bond(0, 4, 1, False) # C0 - H4
ethanol.add_bond(0, 5, 1, False) # C0 - H5
ethanol.add_bond(1, 6, 1, False) # C1 - H6
ethanol.add_bond(1, 7, 1, False) # C1 - H7
ethanol.add_bond(2, 8, 1, False) # O2 - H8
charges = unit.Quantity(np.array([-0.4, -0.3, -0.2, -0.1, 0.00001, 0.1, 0.2, 0.3, 0.4]), unit.elementary_charge)
ethanol.partial_charges = charges
return ethanol
def create_reversed_ethanol():
"""
Creates an openforcefield.topology.Molecule representation of
ethanol without the use of a cheminformatics toolkit. This function
reverses the atom indexing of create_ethanol
"""
# Create an ethanol molecule without using a toolkit
ethanol = Molecule()
ethanol.add_atom(1, 0, False) # H0
ethanol.add_atom(1, 0, False) # H1
ethanol.add_atom(1, 0, False) # H2
ethanol.add_atom(1, 0, False) # H3
ethanol.add_atom(1, 0, False) # H4
ethanol.add_atom(1, 0, False) # H5
ethanol.add_atom(8, 0, False) # O6
ethanol.add_atom(6, 0, False) # C7
ethanol.add_atom(6, 0, False) # C8
ethanol.add_bond(8, 7, 1, False) # C8 - C7
ethanol.add_bond(7, 6, 1, False) # C7 - O6
ethanol.add_bond(8, 5, 1, False) # C8 - H5
ethanol.add_bond(8, 4, 1, False) # C8 - H4
ethanol.add_bond(8, 3, 1, False) # C8 - H3
ethanol.add_bond(7, 2, 1, False) # C7 - H2
ethanol.add_bond(7, 1, 1, False) # C7 - H1
ethanol.add_bond(6, 0, 1, False) # O6 - H0
charges = unit.Quantity(np.array([0.4, 0.3, 0.2, 0.1, 0.00001, -0.1, -0.2, -0.3, -0.4]), unit.elementary_charge)
ethanol.partial_charges = charges
return ethanol
def create_benzene_no_aromatic():
"""
Creates an openforcefield.topology.Molecule representation of benzene through the API with aromatic bonds
not defied, used to test the levels of isomorphic matching.
"""
benzene = Molecule()
benzene.add_atom(6, 0, False) # C0
benzene.add_atom(6, 0, False) # C1
benzene.add_atom(6, 0, False) # C2
benzene.add_atom(6, 0, False) # C3
benzene.add_atom(6, 0, False) # C4
benzene.add_atom(6, 0, False) # C5
benzene.add_atom(1, 0, False) # H6
benzene.add_atom(1, 0, False) # H7
benzene.add_atom(1, 0, False) # H8
benzene.add_atom(1, 0, False) # H9
benzene.add_atom(1, 0, False) # H10
benzene.add_atom(1, 0, False) # H11
benzene.add_bond(0, 5, 1, False) # C0 - C5
benzene.add_bond(0, 1, 1, False) # C0 - C1
benzene.add_bond(1, 2, 1, False) # C1 - C2
benzene.add_bond(2, 3, 1, False) # C2 - C3
benzene.add_bond(3, 4, 1, False) # C3 - C4
benzene.add_bond(4, 5, 1, False) # C4 - C5
benzene.add_bond(0, 6, 1, False) # C0 - H6
benzene.add_bond(1, 7, 1, False) # C1 - C7
benzene.add_bond(2, 8, 1, False) # C2 - C8
benzene.add_bond(3, 9, 1, False) # C3 - C9
benzene.add_bond(4, 10, 1, False) # C4 - C10
benzene.add_bond(5, 11, 1, False) # C5 - C11
return benzene
def create_acetaldehyde():
"""
Creates an openforcefield.topology.Molecule representation of acetaldehyde through the API
"""
acetaldehyde = Molecule()
acetaldehyde.add_atom(6, 0, False) # C0
acetaldehyde.add_atom(6, 0, False) # C1
acetaldehyde.add_atom(8, 0, False) # O2
acetaldehyde.add_atom(1, 0, False) # H3
acetaldehyde.add_atom(1, 0, False) # H4
acetaldehyde.add_atom(1, 0, False) # H5
acetaldehyde.add_atom(1, 0, False) # H6
acetaldehyde.add_bond(0, 1, 1, False) # C0 - C1
acetaldehyde.add_bond(1, 2, 2, False) # C1 = O2
acetaldehyde.add_bond(0, 3, 1, False) # C0 - H3
acetaldehyde.add_bond(0, 4, 1, False) # C0 - H4
acetaldehyde.add_bond(0, 5, 1, False) # C0 - H5
acetaldehyde.add_bond(1, 6, 1, False) # C1 - H6
charges = unit.Quantity(np.array([0, 0, 0, 0, 0, 0, 0]), unit.elementary_charge)
acetaldehyde.partial_charges = charges
return acetaldehyde
def create_cyclohexane():
"""
Creates an openforcefield.topology.Molecule representation of
cyclohexane without the use of a cheminformatics toolkit
"""
cyclohexane = Molecule()
cyclohexane.add_atom(6, 0, False) # C0
cyclohexane.add_atom(6, 0, False) # C1
cyclohexane.add_atom(6, 0, False) # C2
cyclohexane.add_atom(6, 0, False) # C3
cyclohexane.add_atom(6, 0, False) # C4
cyclohexane.add_atom(6, 0, False) # C5
cyclohexane.add_atom(1, 0, False) # H6
cyclohexane.add_atom(1, 0, False) # H7
cyclohexane.add_atom(1, 0, False) # H8
cyclohexane.add_atom(1, 0, False) # H9
cyclohexane.add_atom(1, 0, False) # H10
cyclohexane.add_atom(1, 0, False) # H11
cyclohexane.add_atom(1, 0, False) # H12
cyclohexane.add_atom(1, 0, False) # H13
cyclohexane.add_atom(1, 0, False) # H14
cyclohexane.add_atom(1, 0, False) # H15
cyclohexane.add_atom(1, 0, False) # H16
cyclohexane.add_atom(1, 0, False) # H17
cyclohexane.add_bond(0, 1, 1, False) # C0 - C1
cyclohexane.add_bond(1, 2, 1, False) # C1 - C2
cyclohexane.add_bond(2, 3, 1, False) # C2 - C3
cyclohexane.add_bond(3, 4, 1, False) # C3 - C4
cyclohexane.add_bond(4, 5, 1, False) # C4 - C5
cyclohexane.add_bond(5, 0, 1, False) # C5 - C0
cyclohexane.add_bond(0, 6, 1, False) # C0 - H6
cyclohexane.add_bond(0, 7, 1, False) # C0 - H7
cyclohexane.add_bond(1, 8, 1, False) # C1 - H8
cyclohexane.add_bond(1, 9, 1, False) # C1 - H9
cyclohexane.add_bond(2, 10, 1, False) # C2 - H10
cyclohexane.add_bond(2, 11, 1, False) # C2 - H11
cyclohexane.add_bond(3, 12, 1, False) # C3 - H12
cyclohexane.add_bond(3, 13, 1, False) # C3 - H13
cyclohexane.add_bond(4, 14, 1, False) # C4 - H14
cyclohexane.add_bond(4, 15, 1, False) # C4 - H15
cyclohexane.add_bond(5, 16, 1, False) # C5 - H16
cyclohexane.add_bond(5, 17, 1, False) # C5 - H17
return cyclohexane
nonbonded_resolution_matrix = [
{'vdw_method': 'cutoff', 'electrostatics_method': 'Coulomb', 'has_periodic_box': True,
'omm_force': None, 'exception': IncompatibleParameterError, 'exception_match': ''},
{'vdw_method': 'cutoff', 'electrostatics_method': 'Coulomb', 'has_periodic_box': False,
'omm_force': openmm.NonbondedForce.NoCutoff, 'exception': None, 'exception_match': ''},
{'vdw_method': 'cutoff', 'electrostatics_method': 'reaction-field', 'has_periodic_box': True,
'omm_force': None, 'exception': SMIRNOFFSpecError, 'exception_match': 'reaction-field'},
{'vdw_method': 'cutoff', 'electrostatics_method': 'reaction-field', 'has_periodic_box': False,
'omm_force': None, 'exception': SMIRNOFFSpecError, 'exception_match': 'reaction-field'},
{'vdw_method': 'cutoff', 'electrostatics_method': 'PME', 'has_periodic_box': True,
'omm_force': openmm.NonbondedForce.PME, 'exception': None, 'exception_match': ''},
{'vdw_method': 'cutoff', 'electrostatics_method': 'PME', 'has_periodic_box': False,
'omm_force': openmm.NonbondedForce.NoCutoff, 'exception': None, 'exception_match': ''},
{'vdw_method': 'PME', 'electrostatics_method': 'Coulomb', 'has_periodic_box': True,
'omm_force': None, 'exception': IncompatibleParameterError, 'exception_match': ''},
{'vdw_method': 'PME', 'electrostatics_method': 'Coulomb', 'has_periodic_box': False,
'omm_force': openmm.NonbondedForce.NoCutoff, 'exception': None, 'exception_match': ''},
{'vdw_method': 'PME', 'electrostatics_method': 'reaction-field', 'has_periodic_box': True,
'omm_force': None, 'exception': SMIRNOFFSpecError, 'exception_match': 'reaction-field'},
{'vdw_method': 'PME', 'electrostatics_method': 'reaction-field', 'has_periodic_box': False,
'omm_force': None, 'exception': SMIRNOFFSpecError, 'exception_match': 'reaction-field'},
{'vdw_method': 'PME', 'electrostatics_method': 'PME', 'has_periodic_box': True,
'omm_force': openmm.NonbondedForce.LJPME, 'exception': None, 'exception_match': ''},
{'vdw_method': 'PME', 'electrostatics_method': 'PME', 'has_periodic_box': False,
'omm_force': openmm.NonbondedForce.NoCutoff, 'exception': None, 'exception_match': ''},
]
#=============================================================================================
# TESTS
#=============================================================================================
toolkit_registries = []
if OpenEyeToolkitWrapper.is_available():
toolkit_registries.append((ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper]), "OE"))
if RDKitToolkitWrapper.is_available() and AmberToolsToolkitWrapper.is_available():
toolkit_registries.append((ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]),
'RDKit+AmberTools'))
class TestForceField():
"""Test the ForceField class"""
def test_create_forcefield_no_args(self):
"""Test empty constructor"""
forcefield = ForceField()
# Should find BondHandler and AngleHandler, since they're default classes
forcefield.get_parameter_handler('Bonds')
forcefield.get_parameter_handler('Angles')
# Shouldn't find InvalidKey handler, since it doesn't exist
with pytest.raises(KeyError) as excinfo:
forcefield.get_parameter_handler('InvalidKey')
def test_create_forcefield_custom_handler_classes(self):
"""Test constructor given specific classes to register"""
from openforcefield.typing.engines.smirnoff import BondHandler
forcefield = ForceField(parameter_handler_classes=[BondHandler])
# Should find BondHandler, since we registered it
forcefield.get_parameter_handler('Bonds')
# Shouldn't find AngleHandler, since we didn't allow that to be registered
with pytest.raises(KeyError) as excinfo:
forcefield.get_parameter_handler('Angles')
def test_create_forcefield_from_file(self):
"""Test basic file loading in constructor"""
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
assert len(forcefield._parameter_handlers['Bonds']._parameters) == 87
assert len(forcefield._parameter_handlers['Angles']._parameters) == 38
assert len(forcefield._parameter_handlers['ProperTorsions']._parameters) == 158
assert len(forcefield._parameter_handlers['ImproperTorsions']._parameters) == 4
assert len(forcefield._parameter_handlers['vdW']._parameters) == 35
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_forcefield_from_file_list(self):
# These offxml files are located in package data path, which is automatically installed and searched
file_paths = [smirnoff99Frosst_offxml_file_path, tip3p_offxml_file_path]
# Create a forcefield from multiple offxml files
forcefield = ForceField(file_paths)
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_forcefield_from_file_path_iterator(self):
# These offxml files are located in package data path, which is automatically installed and searched
file_paths = [smirnoff99Frosst_offxml_file_path, tip3p_offxml_file_path]
# A generator should work as well
forcefield = ForceField(iter(file_paths))
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_gbsa():
"""Test reading of ffxml files with GBSA support.
"""
forcefield = ForceField('test_forcefields/Frosst_AlkEthOH_GBSA.offxml')
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_forcefield_from_url(self):
urls = [
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/smirnoff99Frosst.offxml',
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/tip3p.offxml'
]
# Test creation with smirnoff99frosst URL
forcefield = ForceField(urls[0])
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_forcefield_from_url_list(self):
urls = [
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/smirnoff99Frosst.offxml',
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/tip3p.offxml'
]
# Test creation with multiple URLs
forcefield = ForceField(urls)
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_forcefield_from_url_iterator(self):
urls = [
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/smirnoff99Frosst.offxml',
'https://raw.githubusercontent.com/openforcefield/openforcefield/master/openforcefield/data/test_forcefields/tip3p.offxml'
]
# A generator should work as well
forcefield = ForceField(iter(urls))
def test_create_forcefield_from_xml_string(self):
forcefield = ForceField(simple_xml_ff)
assert len(forcefield._parameter_handlers['Bonds']._parameters) == 2
assert len(forcefield._parameter_handlers['Angles']._parameters) == 2
assert len(forcefield._parameter_handlers['ProperTorsions']._parameters) == 2
assert len(forcefield._parameter_handlers['ImproperTorsions']._parameters) == 2
assert len(forcefield._parameter_handlers['vdW']._parameters) == 2
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_deep_copy(self):
forcefield = ForceField(smirnoff99Frosst_offxml_file_path)
# Deep copy
forcefield2 = copy.deepcopy(cls.forcefield)
assert_forcefields_equal(cls.forcefield, forcefield2,
"ForceField deep copy does not match original ForceField")
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
# TODO: This should check the output of forcefield.to_dict
def test_serialize(self):
forcefield = ForceField(smirnoff99Frosst_offxml_file_path)
# Serialize/deserialize
serialized_forcefield = cls.forcefield.__getstate__()
forcefield2 = ForceField.__setstate__(serialized_forcefield)
assert_forcefields_equal(cls.forcefield, forcefield2,
"Deserialized serialized ForceField does not match original ForceField")
def test_pickle(self):
"""
Test pickling and unpickling a forcefield
"""
import pickle
forcefield_1 = ForceField(simple_xml_ff)
pickled = pickle.dumps(forcefield_1)
forcefield_2 = pickle.loads(pickled)
assert forcefield_1.to_string() == forcefield_2.to_string()
def test_pickle_with_cosmetic_attributes(self):
"""
Test pickling and unpickling a forcefield with cosmetic attributes
"""
import pickle
forcefield_1 = ForceField(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)
pickled = pickle.dumps(forcefield_1)
forcefield_2 = pickle.loads(pickled)
assert forcefield_1.to_string() == forcefield_2.to_string()
# Ensure that the cosmetic attributes stuck around
assert 'blah=blah2' in forcefield_2.to_string()
def test_xml_string_roundtrip(self):
"""
Test writing a ForceField to an XML string
"""
forcefield_1 = ForceField(simple_xml_ff)
string_1 = forcefield_1.to_string('XML')
forcefield_2 = ForceField(string_1)
string_2 = forcefield_2.to_string('XML')
assert string_1 == string_2
def test_xml_string_roundtrip_keep_cosmetic(self):
"""
Test roundtripping a forcefield to an XML string with and without retaining cosmetic elements
"""
# Ensure an exception is raised if we try to read the XML string with cosmetic attributes
with pytest.raises(SMIRNOFFSpecError, match="Unexpected kwarg [(]parameters: k, length[)] passed") as excinfo:
forcefield = ForceField(xml_ff_w_cosmetic_elements)
# Create a forcefield from XML successfully, by explicitly permitting cosmetic attributes
forcefield_1 = ForceField(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)
# Convert the forcefield back to XML
string_1 = forcefield_1.to_string('XML', discard_cosmetic_attributes=False)
# Ensure that the new XML string has cosmetic attributes in it
assert 'cosmetic_element="why not?"' in string_1
assert 'parameterize_eval="blah=blah2"' in string_1
with pytest.raises(SMIRNOFFSpecError, match="Unexpected kwarg [(]parameters: k, length[)] passed") as excinfo:
forcefield = ForceField(string_1, allow_cosmetic_attributes=False)
# Complete the forcefield_1 --> string --> forcefield_2 roundtrip
forcefield_2 = ForceField(string_1, allow_cosmetic_attributes=True)
# Ensure that the forcefield remains the same after the roundtrip
string_2 = forcefield_2.to_string('XML', discard_cosmetic_attributes=False)
assert string_1 == string_2
# Discard the cosmetic attributes and ensure that the string is different
string_3 = forcefield_2.to_string('XML', discard_cosmetic_attributes=True)
assert string_1 != string_3
# Ensure that the new XML string does NOT have cosmetic attributes in it
assert 'cosmetic_element="why not?"' not in string_3
assert 'parameterize_eval="blah=blah2"' not in string_3
def test_read_0_1_smirnoff(self):
"""Test reading an 0.1 spec OFFXML file"""
ff = ForceField('test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml')
def test_read_0_1_smirff(self):
"""Test reading an 0.1 spec OFFXML file, enclosed by the legacy "SMIRFF" tag"""
ff = ForceField('test_forcefields/smirff99Frosst_reference_0_1_spec.offxml')
def test_read_0_2_smirnoff(self):
"""Test reading an 0.2 spec OFFXML file"""
ff = ForceField('test_forcefields/smirnoff99Frosst_reference_0_2_spec.offxml')
@pytest.mark.parametrize('file_path_extension', ['xml', 'XML', 'offxml', 'OFFXML'])
@pytest.mark.parametrize('specified_format', [None, 'xml', 'XML', '.xml', '.XML',
'offxml', 'OFFXML', '.offxml', '.OFFXML',
XMLParameterIOHandler()])
def test_xml_file_roundtrip(self, file_path_extension, specified_format):
"""
Test roundtripping a ForceField to and from an XML file
"""
# These files will be deleted once garbage collection runs (end of this function)
iofile1 = NamedTemporaryFile(suffix='.' + file_path_extension)
iofile2 = NamedTemporaryFile(suffix='.' + file_path_extension)
forcefield_1 = ForceField(simple_xml_ff)
forcefield_1.to_file(iofile1.name, io_format=specified_format)
forcefield_2 = ForceField(iofile1.name)
forcefield_2.to_file(iofile2.name, io_format=specified_format)
assert open(iofile1.name).read() == open(iofile2.name).read()
@pytest.mark.parametrize('file_path_extension', ['xml', 'XML', 'offxml', 'OFFXML'])
@pytest.mark.parametrize('specified_format', [None, 'xml', 'XML', '.xml', '.XML',
'offxml', 'OFFXML', '.offxml', '.OFFXML',
XMLParameterIOHandler()])
def test_xml_file_roundtrip_keep_cosmetic(self, file_path_extension, specified_format):
"""
Test roundtripping a forcefield to an XML file with and without retaining cosmetic elements
"""
# These files will be deleted once garbage collection runs (end of this function)
iofile1 = NamedTemporaryFile(suffix='.' + file_path_extension)
iofile2 = NamedTemporaryFile(suffix='.' + file_path_extension)
iofile3 = NamedTemporaryFile(suffix='.' + file_path_extension)
# Ensure an exception is raised if we try to read the XML string with cosmetic attributes
with pytest.raises(SMIRNOFFSpecError, match="Unexpected kwarg [(]parameters: k, length[)] passed") as excinfo:
forcefield = ForceField(xml_ff_w_cosmetic_elements)
# Create a forcefield from XML successfully
forcefield_1 = ForceField(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)
# Convert the forcefield back to XML, keeping cosmetic attributes
forcefield_1.to_file(iofile1.name, discard_cosmetic_attributes=False, io_format=specified_format)
# Ensure that the new XML string has cosmetic attributes in it
assert 'cosmetic_element="why not?"' in open(iofile1.name).read()
assert 'parameterize_eval="blah=blah2"' in open(iofile1.name).read()
with pytest.raises(SMIRNOFFSpecError, match="Unexpected kwarg [(]parameters: k, length[)] passed") as excinfo:
forcefield = ForceField(iofile1.name, allow_cosmetic_attributes=False)
# Complete the forcefield_1 --> file --> forcefield_2 roundtrip
forcefield_2 = ForceField(iofile1.name, allow_cosmetic_attributes=True)
# Ensure that the forcefield remains the same after the roundtrip
forcefield_2.to_file(iofile2.name, discard_cosmetic_attributes=False, io_format=specified_format)
assert open(iofile1.name).read() == open(iofile2.name).read()
# Discard the cosmetic attributes and ensure that the string is different
forcefield_2.to_file(iofile3.name, discard_cosmetic_attributes=True, io_format=specified_format)
assert open(iofile1.name).read() != open(iofile3.name).read()
# Ensure that the new XML string does NOT have cosmetic attributes in it
assert 'cosmetic_element="why not?"' not in open(iofile3.name).read()
assert 'parameterize_eval="blah=blah2"' not in open(iofile3.name).read()
def test_load_section_without_section_version(self):
"""Ensure that a SMIRNOFFSpecError is raised if we try to load a SMIRNOFF section without a version.
Section versions are a requirement added in the 0.3 spec."""
with pytest.raises(SMIRNOFFSpecError, match="Missing version while trying to construct "
"<class 'openforcefield.typing.engines."
"smirnoff.parameters.ToolkitAM1BCCHandler'>.") as excinfo:
ff = ForceField('<?xml version="1.0" encoding="ASCII"?>'
'<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">'
' <ToolkitAM1BCC/>'
'</SMIRNOFF>')
def test_load_two_sources(self):
"""Test loading data from two SMIRNOFF data sources"""
ff = ForceField(simple_xml_ff, xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)
assert len(ff.get_parameter_handler('Bonds').parameters) == 4
def test_load_two_sources_authors_dates(self):
"""Test that authors and dates are handled properly"""
ff = ForceField(xml_ff_w_cosmetic_elements, xml_ff_w_comments, allow_cosmetic_attributes=True)
xml_str = ff.to_string('XML')
assert '<Author>Alice and Bob AND <NAME>, OpenEye/UC Irvine; <NAME>, ' \
'UC Irvine; <NAME>, UC Irvine</Author>' in xml_str
assert '<Date>MMXVIII-VII-XIV AND 2018-07-14</Date>' in xml_str
# Test property getters
assert 'Alice and Bob AND <NAME>, OpenEye/UC Irvine; <NAME>, ' \
'UC Irvine; <NAME>, UC Irvine' == ff.author
assert 'MMXVIII-VII-XIV AND 2018-07-14' == ff.date
# Test property setters
ff.author = 'Me'
ff.date = 'yesteryear'
xml_str = ff.to_string('XML')
assert '<Author>Me</Author>' in xml_str
assert '<Date>yesteryear</Date>' in xml_str
# Unset both author and date and ensure they don't get written out.
ff.author = None
ff.date = None
xml_str = ff.to_string('XML')
assert '<Author>' not in xml_str
assert '<Date>' not in xml_str
def test_load_two_sources_incompatible_tags(self):
"""Test loading data from two SMIRNOFF data sources which have incompatible physics"""
# Make an XML forcefield with a modifiedvdW 1-4 scaling factor
nonstandard_xml_ff = xml_ff_w_comments.replace('scale14="0.5"', 'scale14="1.0"')
with pytest.raises(IncompatibleParameterError, match="handler value: 0.5, incompatible value: 1.0") as excinfo:
ff = ForceField(simple_xml_ff, nonstandard_xml_ff)
def test_gbsahandler_sa_model_none(self):
"""
Ensure that string values of "None" are correctly interpreted in the GBSAHandler's sa_model field
"""
gbsa_ff_xml = '''<?xml version='1.0' encoding='ASCII'?>
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<GBSA version="0.3" gb_model="HCT" solvent_dielectric="78.5" solute_dielectric="1" sa_model="None" surface_area_penalty="5.4*calories/mole/angstroms**2" solvent_radius="1.4*angstroms">
<Atom smirks="[*:1]" radius="0.15*nanometer" scale="0.8"/>
</GBSA>
</SMIRNOFF>
'''
from openforcefield.typing.engines.smirnoff import ForceField
ff = ForceField(gbsa_ff_xml)
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_parameterize_ethanol(self, toolkit_registry, registry_description):
from simtk.openmm import app
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
molecules = [create_ethanol()]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
omm_system = forcefield.create_openmm_system(topology, toolkit_registry=toolkit_registry)
@pytest.fixture()
def create_circular_handler_dependencies(self):
from openforcefield.typing.engines.smirnoff.parameters import BondHandler, AngleHandler, ConstraintHandler
# Modify the BondHandler and AngleHandler classes to depend on the other one running first during
# system parameterization. Unfortunately, I can't figure out how to do this just to these _instances_
# of the Handlers, so I modify them at the class level, and then un-modify them at the end of the test.
orig_bh_depends = copy.deepcopy(BondHandler._DEPENDENCIES)
orig_ah_depends = copy.deepcopy(AngleHandler._DEPENDENCIES)
BondHandler._DEPENDENCIES = [ConstraintHandler, AngleHandler]
AngleHandler._DEPENDENCIES = [ConstraintHandler, BondHandler]
# The tests run here. Regardless of outcome, the code after `yield` runs after the test completes
yield
# Return handler dependencies to their original states
BondHandler._DEPENDENCIES = orig_bh_depends
AngleHandler._DEPENDENCIES = orig_ah_depends
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_parameterize_ethanol_handler_dependency_loop(self, create_circular_handler_dependencies, toolkit_registry, registry_description):
"""Test parameterizing ethanol, but failing because custom handler classes can not resolve
which order to run in"""
from simtk.openmm import app
# from openforcefield.typing.engines.smirnoff.parameters import BondHandler, AngleHandler, ConstraintHandler
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
molecules = [create_ethanol()]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
with pytest.raises(RuntimeError, match="Unable to resolve order in which to run ParameterHandlers. "
"Dependencies do not form a directed acyclic graph") as excinfo:
omm_system = forcefield.create_openmm_system(topology, toolkit_registry=toolkit_registry)
def test_parameterize_ethanol_missing_torsion(self):
from simtk.openmm import app
from openforcefield.typing.engines.smirnoff.parameters import UnassignedProperTorsionParameterException
forcefield = ForceField('''
<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">
<ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">
<Proper smirks="[#99:1]-[#99X4:2]-[#99:3]-[#99:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>
</ProperTorsions>
</SMIRNOFF>
''')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
molecules = [create_ethanol()]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
with pytest.raises(UnassignedProperTorsionParameterException,
match='- Topology indices [(]5, 0, 1, 6[)]: '
'names and elements [(](H\d+)? H[)], [(](C\d+)? C[)], [(](C\d+)? C[)], [(](H\d+)? H[)],') \
as excinfo:
omm_system = forcefield.create_openmm_system(topology)
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_parameterize_1_cyclohexane_1_ethanol(self, toolkit_registry, registry_description):
"""Test parameterizing a periodic system of two distinct molecules"""
from simtk.openmm import app
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_cyclohexane_1_ethanol.pdb'))
# toolkit_wrapper = RDKitToolkitWrapper()
molecules = [create_ethanol(), create_cyclohexane()]
# molecules = [Molecule.from_file(get_data_file_path(name)) for name in ('molecules/ethanol.mol2',
# 'molecules/cyclohexane.mol2')]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
omm_system = forcefield.create_openmm_system(topology)
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_parameterize_1_cyclohexane_1_ethanol_vacuum(self, toolkit_registry, registry_description):
"""Test parametrizing a nonperiodic system of two distinct molecules"""
from simtk.openmm import app
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_cyclohexane_1_ethanol.pdb'))
molecules = [create_ethanol(), create_cyclohexane()]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
topology.box_vectors = None
omm_system = forcefield.create_openmm_system(topology)
@pytest.mark.slow
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
@pytest.mark.parametrize("box", ['ethanol_water.pdb',
'cyclohexane_water.pdb',
'cyclohexane_ethanol_0.4_0.6.pdb',
'propane_methane_butanol_0.2_0.3_0.5.pdb'])
def test_parameterize_large_system(self, toolkit_registry, registry_description, box):
"""Test parameterizing a large system of several distinct molecules.
This test is very slow, so it is only run if the --runslow option is provided to pytest.
"""
from simtk.openmm import app
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
box_file_path = get_data_file_path(os.path.join('systems', 'packmol_boxes', box))
pdbfile = app.PDBFile(box_file_path)
mol_names = ['water', 'cyclohexane', 'ethanol', 'propane', 'methane', 'butanol']
sdf_files = [get_data_file_path(os.path.join('systems', 'monomers', name+'.sdf')) for name in mol_names]
molecules = [Molecule.from_file(sdf_file) for sdf_file in sdf_files]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules, )
omm_system = forcefield.create_openmm_system(topology, toolkit_registry=toolkit_registry)
# TODO: Add check to ensure system energy is finite
@pytest.mark.skipif( not(OpenEyeToolkitWrapper.is_available()), reason='Test requires OE toolkit')
def test_parameterize_ethanol_different_reference_ordering_openeye(self):
"""
Test parameterizing the same PDB, using reference mol2s that have different atom orderings.
The results of both should be identical.
"""
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
from simtk.openmm import app
from simtk.openmm import XmlSerializer
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
# Load the unique molecules with one atom ordering
molecules1 = [Molecule.from_file(get_data_file_path('molecules/ethanol.sdf'))]
topology1 = Topology.from_openmm(pdbfile.topology,
unique_molecules=molecules1,
)
omm_system1 = forcefield.create_openmm_system(topology1,
toolkit_registry=toolkit_registry)
# Load the unique molecules with a different atom ordering
molecules2 = [Molecule.from_file(get_data_file_path('molecules/ethanol_reordered.sdf'))]
topology2 = Topology.from_openmm(pdbfile.topology,
unique_molecules=molecules2,
)
omm_system2 = forcefield.create_openmm_system(topology2,
toolkit_registry=toolkit_registry)
serialized_1 = XmlSerializer.serialize(omm_system1)
serialized_2 = XmlSerializer.serialize(omm_system2)
serialized_1 = round_charge(serialized_1)
serialized_2 = round_charge(serialized_2)
assert serialized_1 == serialized_2
@pytest.mark.skipif(not RDKitToolkitWrapper.is_available(), reason='Test requires RDKit toolkit')
def test_parameterize_ethanol_different_reference_ordering_rdkit(self):
"""
Test parameterizing the same PDB, using reference mol2s that have different atom orderings.
The results of both should be identical.
"""
from simtk.openmm import app
from simtk.openmm import XmlSerializer
toolkit_registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper])
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
# Load the unique molecules with one atom ordering
molecules1 = [Molecule.from_file(get_data_file_path('molecules/ethanol.sdf'))]
topology1 = Topology.from_openmm(pdbfile.topology,
unique_molecules=molecules1,
)
omm_system1 = forcefield.create_openmm_system(topology1,
toolkit_registry=toolkit_registry)
# Load the unique molecules with a different atom ordering
molecules2 = [Molecule.from_file(get_data_file_path('molecules/ethanol_reordered.sdf'))]
topology2 = Topology.from_openmm(pdbfile.topology,
unique_molecules=molecules2,
)
omm_system2 = forcefield.create_openmm_system(topology2,
toolkit_registry=toolkit_registry)
serialized_1 = XmlSerializer.serialize(omm_system1)
serialized_2 = XmlSerializer.serialize(omm_system2)
serialized_1 = round_charge(serialized_1)
serialized_2 = round_charge(serialized_2)
assert serialized_1 == serialized_2
@pytest.mark.skip(reason="We will not support going directly to ParmEd for now."
"We will instead feed OpenMM System objects to ParmEd "
"for further processing.")
def test_parameterize_ethanol_to_parmed(self):
from simtk.openmm import app
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
#toolkit_wrapper = RDKitToolkitWrapper()
molecules = [ Molecule.from_file(get_data_file_path(name)) for name in ('molecules/ethanol.mol2',) ]
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
parmed_system = forcefield.create_parmed_structure(topology, positions=pdbfile.getPositions())
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_pass_invalid_kwarg_to_create_openmm_system(self, toolkit_registry, registry_description):
"""Test to ensure an exception is raised when an unrecognized kwarg is passed """
from simtk.openmm import app
file_path = get_data_file_path('test_forcefields/smirnoff99Frosst.offxml')
forcefield = ForceField(file_path)
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
molecules = []
molecules.append(Molecule.from_smiles('CCO'))
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
with pytest.raises(ValueError, match=".* not used by any registered force Handler: {'invalid_kwarg'}.*") as e:
omm_system = forcefield.create_openmm_system(topology, invalid_kwarg='aaa', toolkit_registry=toolkit_registry)
@pytest.mark.parametrize("inputs", nonbonded_resolution_matrix)
def test_nonbonded_method_resolution(self,
inputs
):
"""Test predefined permutations of input options to ensure nonbonded handling is correctly resolved"""
from simtk.openmm import app
vdw_method = inputs['vdw_method']
electrostatics_method = inputs['electrostatics_method']
has_periodic_box = inputs['has_periodic_box']
omm_force = inputs['omm_force']
exception = inputs['exception']
exception_match= inputs['exception_match']
molecules = [create_ethanol()]
forcefield = ForceField('test_forcefields/smirnoff99Frosst.offxml')
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
if not(has_periodic_box):
topology.box_vectors = None
if exception is None:
# The method is validated and may raise an exception if it's not supported.
forcefield.get_parameter_handler('vdW', {}).method = vdw_method
forcefield.get_parameter_handler('Electrostatics', {}).method = electrostatics_method
omm_system = forcefield.create_openmm_system(topology)
nonbond_method_matched = False
for f_idx in range(omm_system.getNumForces()):
force = omm_system.getForce(f_idx)
if isinstance(force, openmm.NonbondedForce):
if force.getNonbondedMethod() == omm_force:
nonbond_method_matched = True
assert nonbond_method_matched
else:
with pytest.raises(exception, match=exception_match) as excinfo:
# The method is validated and may raise an exception if it's not supported.
forcefield.get_parameter_handler('vdW', {}).method = vdw_method
forcefield.get_parameter_handler('Electrostatics', {}).method = electrostatics_method
omm_system = forcefield.create_openmm_system(topology)
class TestForceFieldChargeAssignment:
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_charges_from_molecule(self, toolkit_registry, registry_description):
"""Test skipping charge generation and instead getting charges from the original Molecule"""
# Create an ethanol molecule without using a toolkit
molecules = [create_ethanol()]
from simtk.openmm import app, NonbondedForce
file_path = get_data_file_path('test_forcefields/smirnoff99Frosst.offxml')
forcefield = ForceField(file_path)
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
topology = Topology.from_openmm(pdbfile.topology,
unique_molecules=molecules)
omm_system = forcefield.create_openmm_system(topology,
charge_from_molecules=molecules,
toolkit_registry=toolkit_registry)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = ((0, -0.4 * unit.elementary_charge),
(1, -0.3 * unit.elementary_charge),
(2, -0.2 * unit.elementary_charge),
)
for particle_index, expected_charge in expected_charges:
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
# In 1_ethanol_reordered.pdb, the first three atoms go O-C-C instead of C-C-O. This part of the test ensures
# that the charges are correctly mapped according to this PDB in the resulting system.
pdbfile2 = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol_reordered.pdb'))
topology2 = Topology.from_openmm(pdbfile2.topology,
unique_molecules=molecules)
omm_system2 = forcefield.create_openmm_system(topology2,
charge_from_molecules=molecules,
toolkit_registry=toolkit_registry)
nonbondedForce2 = [f for f in omm_system2.getForces() if type(f) == NonbondedForce][0]
expected_charges2 = ((0, -0.2*unit.elementary_charge),
(1, -0.4*unit.elementary_charge),
(2, -0.3*unit.elementary_charge),
)
for particle_index, expected_charge in expected_charges2:
q, sigma, epsilon = nonbondedForce2.getParticleParameters(particle_index)
assert q == expected_charge
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_nonintegral_charge_exception(self, toolkit_registry, registry_description):
"""Test skipping charge generation and instead getting charges from the original Molecule"""
from simtk.openmm import app
from openforcefield.typing.engines.smirnoff.parameters import NonintegralMoleculeChargeException
# Create an ethanol molecule without using a toolkit
ethanol = create_ethanol()
ethanol.partial_charges[0] = 1. * unit.elementary_charge
file_path = get_data_file_path('test_forcefields/smirnoff99Frosst.offxml')
forcefield = ForceField(file_path)
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_ethanol.pdb'))
topology = Topology.from_openmm(pdbfile.topology,
unique_molecules=[ethanol])
# Fail because nonintegral charges aren't allowed
with pytest.raises(NonintegralMoleculeChargeException,
match="Partial charge sum [(]1.40001 e[)] for molecule"):
omm_system = forcefield.create_openmm_system(topology,
charge_from_molecules=[ethanol],
toolkit_registry=toolkit_registry)
# Pass when the `allow_nonintegral_charges` keyword is included
omm_system = forcefield.create_openmm_system(topology,
charge_from_molecules=[ethanol],
toolkit_registry=toolkit_registry,
allow_nonintegral_charges=True)
@pytest.mark.parametrize("toolkit_registry,registry_description", toolkit_registries)
def test_some_charges_from_molecule(self, toolkit_registry, registry_description):
"""
Test creating an OpenMM system where some charges come from a Molecule, but others come from toolkit
calculation
"""
ethanol = create_ethanol()
cyclohexane = create_cyclohexane()
molecules = [ethanol, cyclohexane]
from simtk.openmm import app, NonbondedForce
file_path = get_data_file_path('test_forcefields/smirnoff99Frosst.offxml')
forcefield = ForceField(file_path)
pdbfile = app.PDBFile(get_data_file_path('systems/test_systems/1_cyclohexane_1_ethanol.pdb'))
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules, )
omm_system = forcefield.create_openmm_system(topology,
charge_from_molecules=[ethanol],
toolkit_registry=toolkit_registry)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = ((18, -0.4 * unit.elementary_charge),
(19, -0.3 * unit.elementary_charge),
(20, -0.2 * unit.elementary_charge),
)
for particle_index, expected_charge in expected_charges:
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
for particle_index in range(topology.n_topology_particles):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q != (0. * unit.elementary_charge)
def test_library_charges_to_single_water(self):
"""Test assigning charges to one water molecule using library charges"""
from simtk.openmm import NonbondedForce
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', 'test_forcefields/tip3p.offxml')
mol = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers','water.sdf')))
omm_system = ff.create_openmm_system(mol.to_topology())
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.834, 0.417, 0.417] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_parse_library_charges_from_spec_docs(self):
"""Ensure that the examples for librarycharges in the SMIRNOFF spec page are still valid"""
# TODO: This test is practically useless while the XML strings are hard-coded at the top of this file.
# We should implement something like doctests for the XML snippets on the SMIRNOFF spec page.
ff = ForceField(xml_spec_docs_ala_library_charges_xml)
ff = ForceField(xml_spec_docs_tip3p_library_charges_xml)
def test_library_charge_hierarchy(self):
"""Test assigning charges to one water molecule using library charges, where two LCs match and the
assignment is determined by order they are added to the force field"""
from simtk.openmm import NonbondedForce
# Test with xml_OH_library_charges_xml loaded last, which should assign dummy partial charges
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml',
'test_forcefields/tip3p.offxml',
xml_OH_library_charges_xml)
mol = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers','water.sdf')))
omm_system = ff.create_openmm_system(mol.to_topology())
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-2., 1., 1.] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
# Test again, but with tip3p.offxml loaded last (loading the correct partial charges)
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_OH_library_charges_xml, 'test_forcefields/tip3p.offxml', )
omm_system = ff.create_openmm_system(mol.to_topology())
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.834, 0.417, 0.417] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_library_charges_to_two_waters(self):
"""Test assigning charges to two water molecules using library charges"""
from simtk.openmm import NonbondedForce
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', 'test_forcefields/tip3p.offxml')
mol = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers','water.sdf')))
top = Topology.from_molecules([mol, mol])
omm_system = ff.create_openmm_system(top)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.834, 0.417, 0.417, -0.834, 0.417, 0.417] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_library_charges_to_two_ethanols_different_atom_ordering(self):
"""Test assigning charges to two ethanols with different atom orderings"""
from simtk.openmm import NonbondedForce
# Define a library charge parameter for ethanol (C1-C2-O3) where C1 has charge -0.2, and its Hs have -0.02,
# C2 has charge -0.1 and its Hs have -0.01, and O3 has charge 0.3, and its H has charge 0.08
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_ethanol_library_charges_ff)
# ethanol.sdf
# H5 H8
# | |
# H6 - C1 - C2 - O3 - H4
# | |
# H7 H9
#
# ethanol_reordered.sdf (The middle C and O switch indices)
# H5 H8
# | |
# H6 - C1 - C3 - O2 - H4
# | |
# H7 H9
molecules = [Molecule.from_file(get_data_file_path('molecules/ethanol.sdf')),
Molecule.from_file(get_data_file_path('molecules/ethanol_reordered.sdf'))]
top = Topology.from_molecules(molecules)
omm_system = ff.create_openmm_system(top)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.2, -0.1, 0.3, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01, -0.2,
0.3, -0.1, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_charge_method_hierarchy(self):
"""Ensure that molecules are parameterized by charge_from_molecules first, then library charges
if not applicable, then AM1BCC otherwise"""
from simtk.openmm import NonbondedForce
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml',
xml_CH_zeroes_library_charges_xml,
'test_forcefields/tip3p.offxml'
)
cyclohexane = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers','cyclohexane.sdf')))
butanol = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers', 'butanol.sdf')))
propane = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers', 'propane.sdf')))
water = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers', 'water.sdf')))
ethanol = Molecule.from_file(get_data_file_path(os.path.join('systems', 'monomers', 'ethanol.sdf')))
# Assign dummy partial charges to cyclohexane, which we expect to find in the final system since it
# is included in the charge_from_molecules kwarg to create_openmm_system
cyclohexane.partial_charges = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) * unit.elementary_charge
# There were previously known issues when parameterizing molecules with all zero charges,
# so test this explicitly with butanol. Since butanol will be in the charge_from_molecules kwarg,
# we expect to find these charges in the final system.
butanol.partial_charges = np.array([0.0] * 15) * unit.elementary_charge
# Add dummy partial charges to propane, which should be IGNORED since it
# isn't in the charge_from_molecules kwarg
propane.partial_charges = np.array([99.] * 11) * unit.elementary_charge
# Add dummy partial charges to water, which should be IGNORED since it
# isn't in the charge_from_molecules kwarg
water.partial_charges = np.array([99.] * 3) * unit.elementary_charge
# molecule correct charge method
molecules = [cyclohexane, # charge_from_molecules kwarg
butanol, # charge_from_molecules kwarg
propane, # library charges
water, # library charges
ethanol] # AM1-BCC
top = Topology.from_molecules(molecules)
omm_system = ff.create_openmm_system(top, charge_from_molecules=[cyclohexane, butanol])
existing = [f for f in omm_system.getForces() if type(f) == NonbondedForce]
# Ensure that the handlers do not make multiple NonbondedForce objects
assert len(existing) == 1
nonbondedForce = existing[0]
expected_charges = [# cyclohexane (18 atoms) should have the following values from charge_from_mols
-0.2, -0.2, -0.2, -0.2, -0.2, -0.2,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
# butanol (15 atoms) should have the following values from charge_from_mols
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
# propane (11 atoms) should have the following values from xml_CH_zeroes_library_charges_xml
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
# water (3 atoms) should have the following charges from tip3p.offxml
-0.834, 0.417, 0.417] * unit.elementary_charge
# Ensure that the first three molecules have exactly the charges we intended
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
# Ensure the last molecule (ethanol) had _some_ nonzero charge assigned by an AM1BCC implementation
for particle_index in range(len(expected_charges), top.n_topology_atoms):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q != 0 * unit.elementary_charge
def test_assign_charges_to_molecule_in_parts_using_multiple_library_charges(self):
"""Test assigning charges to parts of a molecule using two library charge lines. Note that these LibraryCharge
SMIRKS have partial overlap, so this also tests that the hierarchy is correctly obeyed."""
from simtk.openmm import NonbondedForce
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_ethanol_library_charges_in_parts_ff)
molecules = [Molecule.from_file(get_data_file_path('molecules/ethanol.sdf')),
Molecule.from_file(get_data_file_path('molecules/ethanol_reordered.sdf'))]
top = Topology.from_molecules(molecules)
omm_system = ff.create_openmm_system(top)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.2, -0.1, 0.3, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01, -0.2,
0.3, -0.1, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_assign_charges_using_library_charges_by_single_atoms(self):
"""Test assigning charges to parts of a molecule using per-atom library charges. Note that these LibraryCharge
SMIRKS will match multiple atoms, so this is also a test of correct usage of the parameter hierarchy.."""
from simtk.openmm import NonbondedForce
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_ethanol_library_charges_by_atom_ff)
molecules = [Molecule.from_file(get_data_file_path('molecules/ethanol.sdf')),
Molecule.from_file(get_data_file_path('molecules/ethanol_reordered.sdf'))]
top = Topology.from_molecules(molecules)
omm_system = ff.create_openmm_system(top)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
expected_charges = [-0.2, -0.1, 0.3, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01, -0.2,
0.3, -0.1, 0.08, -0.02, -0.02, -0.02, -0.01, -0.01] * unit.elementary_charge
for particle_index, expected_charge in enumerate(expected_charges):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q == expected_charge
def test_library_charges_dont_parameterize_molecule_because_of_incomplete_coverage(self):
"""Fail to assign charges to a molecule because not all atoms can be assigned"""
from simtk.openmm import NonbondedForce
from openforcefield.typing.engines.smirnoff.parameters import UnassignedMoleculeChargeException
molecules = [Molecule.from_file(get_data_file_path('molecules/toluene.sdf'))]
top = Topology.from_molecules(molecules)
# The library charges in the FF should not be able to fully cover toluene
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_ethanol_library_charges_by_atom_ff)
# Delete the ToolkitAM1BCCHandler so the molecule won't get charges from anywhere
del ff._parameter_handlers['ToolkitAM1BCC']
with pytest.raises(UnassignedMoleculeChargeException,
match="did not have charges assigned by any ParameterHandler") as excinfo:
omm_system = ff.create_openmm_system(top)
# If we do NOT delete the ToolkiAM1BCCHandler, then toluene should be assigned some nonzero partial charges.
# The exact value will vary by toolkit, so we don't test that here.
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml', xml_ethanol_library_charges_by_atom_ff)
omm_system = ff.create_openmm_system(top)
nonbondedForce = [f for f in omm_system.getForces() if type(f) == NonbondedForce][0]
for particle_index in range(top.n_topology_atoms):
q, sigma, epsilon = nonbondedForce.getParticleParameters(particle_index)
assert q != 0 * unit.elementary_charge
#======================================================================
# TEST CONSTRAINTS
#======================================================================
class TestForceFieldConstraints:
"""Tests that constraints are correctly applied and behave correctly."""
@classmethod
def check_molecule_constraints(cls, molecule, system, bond_elements, bond_length):
"""Check that the bonds in the molecule is correctly constrained."""
for constraint_idx in range(system.getNumConstraints()):
atom1_idx, atom2_idx, distance = system.getConstraintParameters(constraint_idx)
atom_elements = {molecule.atoms[atom1_idx].element.symbol,
molecule.atoms[atom2_idx].element.symbol}
assert atom_elements == bond_elements
assert np.isclose(distance/unit.angstrom, bond_length/unit.angstrom)
def test_constraints_hbonds(self):
"""Test that hydrogen bonds constraints are applied correctly to a ethane molecule."""
# Parametrize an ethane molecule.
ethane = Molecule.from_smiles('CC')
topology = Topology.from_molecules([ethane])
ff = ForceField(XML_FF_GENERICS, 'test_forcefields/old/hbonds.offxml')
system = ff.create_openmm_system(topology)
# Check that all C-H bonds have been constrained to the FF bond length.
self.check_molecule_constraints(ethane, system,
bond_elements={'C', 'H'},
bond_length= 1.09 * unit.angstrom)
#======================================================================
# TEST PARAMETER ASSIGNMENT
#======================================================================
def generate_alkethoh_parameters_assignment_cases():
"""Create dynamically all test cases that should be ran for the AlkEthOH set."""
# These AlkEthOH molecules are always run by test_alkethoh_parameters_assignment.
fast_test_cases = [
'r0',
'r12',
'r118',
'c38',
'c100',
'c1161',
'c1266'
]
def extract_id(file_path):
"""Extract the AlkEthOH molecule ID from the file path."""
# An example of file path is AlkEthOH_tripos/AlkEthOH_chain_filt1/AlkEthOH_c555.crd
return os.path.splitext(os.path.basename(file_path))[0][9:]
# Get all the molecules ids from the tarfiles. The tarball is extracted
# in conftest.py if slow tests are activated.
import tarfile
alkethoh_tar_file_path = get_data_file_path(os.path.join('molecules', 'AlkEthOH_tripos.tar.gz'))
with tarfile.open(alkethoh_tar_file_path, 'r:gz') as tar:
# Collect all the files discarding the duplicates in the test_filt1 folder.
slow_test_cases = {extract_id(m.name) for m in tar.getmembers()
if 'crd' in m.name and 'test_filt1' not in m.name}
# Remove fast test cases from slow ones to avoid duplicate tests.
# Remove also water (c1302), which was reparameterized in AlkEthOH
# to be TIP3P (not covered by Frosst_AlkEthOH_parmAtFrosst.
for fast_test_case in fast_test_cases + ['c1302']:
slow_test_cases.remove(fast_test_case)
# Mark all slow cases as slow.
slow_test_cases = [pytest.param(case, marks=pytest.mark.slow)
for case in sorted(slow_test_cases)]
# Isolate the AlkEthOH ID.
return fast_test_cases + slow_test_cases
def generate_freesolv_parameters_assignment_cases():
"""Create dynamically all test cases that should be ran for the FreeSolv set."""
import tarfile
# For these tests, UndefinedStereochemistryError is ignored.
# The chirality was manually checked (see issue #175).
ignore_undefined_stereo = {
'2501588',
'3266352',
'7829570'
}
# These molecules are always tested by test_freesolv_parameters_assignment().
# Each test case is (freesolv_id, force_field_version, allow_undefined_stereo).
fast_test_cases = [
('1019269', '0_0_4_fixed', False),
('63712', '0_0_2', False), # The XML was regenerated after fixing the issue described in #179.
('1723043', '0_0_2', False),
('2501588', '0_0_2', True), # Test impropers and undefined stereochemistry.
('3323117', '0_0_2', False), # The XML was regenerated after fixing the issue described in #179.
('1107178', '0_0_2', False), # Molecule with iodine
('1036761', '0_0_2', False), # Molecule with primary amine
]
def extract_id(file_path):
"""Extract the FreeSolv ID and force field version from the file subpath."""
# An example of file path is FreeSolv/xml_0_0_4_fixed/mobley_7913234_vacuum.xml
freesolv_id = os.path.basename(file_path).split('_')[1]
force_field_version = os.path.basename(os.path.dirname(file_path))[4:]
allow_undefined_stereo = freesolv_id in ignore_undefined_stereo
return (freesolv_id, force_field_version, allow_undefined_stereo)
# Get all the tarball XML files available. The tarball is extracted
# in conftest.py if slow tests are activated.
freesolv_tar_file_path = get_data_file_path(os.path.join('molecules', 'FreeSolv.tar.gz'))
with tarfile.open(freesolv_tar_file_path, 'r:gz') as tar:
slow_test_cases = {extract_id(m.name) for m in tar.getmembers() if '.xml' in m.name}
# Remove fast test cases from slow ones to avoid duplicate tests.
for fast_test_case in fast_test_cases:
slow_test_cases.remove(fast_test_case)
# Mark all slow cases as slow.
slow_test_cases = [pytest.param(*case, marks=pytest.mark.slow)
for case in sorted(slow_test_cases)]
return fast_test_cases + slow_test_cases
class TestForceFieldParameterAssignment:
"""Regression tests checking that parameters are assigned correctly."""
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
@pytest.mark.parametrize('alkethoh_id', generate_alkethoh_parameters_assignment_cases())
def test_alkethoh_parameters_assignment(self, alkethoh_id):
"""Test that ForceField assign parameters correctly in the AlkEthOH set.
The test compares the System parameters of a AlkEthOH molecule
parameterized with AMBER and Frosst_AlkEthOH_parmAtFrosst.offxml.
The AMBER files were prepared following the pipeline described here:
https://github.com/openforcefield/open-forcefield-data/tree/master/Model-Systems/AlkEthOH_distrib/
They were built for the SMIRNOFF parametrization to yield exact same
parameters.
The AlkEthOH set, however, does not have impropers, which should be
tested separately. Currently, test_freesolv_parameters_assignment
does the job.
"""
from openforcefield.tests.utils import get_alkethoh_file_path, compare_amber_smirnoff
# Obtain the path to the input files.
alkethoh_name = 'AlkEthOH_' + alkethoh_id
mol2_filepath, top_filepath, crd_filepath = get_alkethoh_file_path(alkethoh_name, get_amber=True)
# Load molecule.
molecule = Molecule.from_file(mol2_filepath)
# Load forcefield
forcefield = ForceField('test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml')
# Compare parameters. Skip the energy checks as the parameter check should be
# sufficient. We test both energies and parameters in the slow test.
# We ignore the charges for now as they are not included in the force field.
# TODO: Reactivate the charge check when we'll be able to load charges from files.
compare_amber_smirnoff(top_filepath, crd_filepath, forcefield, molecule,
check_energies=False, ignore_charges=True)
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
def test_multi_alkethoh_parameters_assignment(self):
"""Test that systems with multiple reference molecules are parametrized correctly.
The test relies on the fact that we have already verified we can
parametrize correctly single AlkEthOH molecules in
test_alkethoh_parameters_assignment(). We use ParmEd to merge
the AMBER files to be used as reference parameters.
"""
import parmed
from openforcefield.tests.utils import (get_alkethoh_file_path,
compare_system_parameters,
compare_system_energies)
# The AlkEthOH molecule ids to mix in the systems.
alketoh_ids = ['r0', 'c38', 'c1161']
# Load molecules and structures.
molecules = []
structures = []
for alkethoh_id in alketoh_ids:
mol2_filepath, top_filepath, crd_filepath = get_alkethoh_file_path(
'AlkEthOH_'+alkethoh_id, get_amber=True)
molecules.append(Molecule.from_file(mol2_filepath))
amber_parm = parmed.load_file(top_filepath, crd_filepath)
# Convert this into a real structure as mixing AmberParm objects is bugged (see ParmEd#1045).
structures.append(amber_parm.copy(parmed.Structure))
# Merge the structures into a single system with two copies of the last molecule.
structure_mixture = structures[0] + structures[1] + structures[2] + structures[-1]
amber_system = structure_mixture.createSystem(nonbondedMethod=openmm.app.NoCutoff)
# Create the OpenFF System through ForceField.
topology = Topology.from_openmm(structure_mixture.topology, unique_molecules=molecules)
topology.box_vectors = None
ff = ForceField('test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml')
off_system = ff.create_openmm_system(topology)
# Translate the molecules a little to avoid overlapping atoms.
positions = copy.deepcopy(structure_mixture.positions)
translate_vectors = [
np.array([1.0, 0.0, 0.0])*unit.nanometer,
np.array([0.0, 1.0, 0.0])*unit.nanometer,
np.array([0.0, 0.0, 1.0])*unit.nanometer,
# Leave the fourth molecule where it is.
]
current_atom_idx = 0
for mol_idx, (translate_vector, mol) in enumerate(zip(translate_vectors, molecules)):
n_mol_atoms = len(mol.atoms)
positions[current_atom_idx:current_atom_idx+n_mol_atoms] += translate_vector
current_atom_idx += n_mol_atoms
# Compare parameters and systems.
# TODO: Reactivate charges comparison when we'll be able to read them from the file.
compare_system_parameters(amber_system, off_system,
systems_labels=('AMBER', 'SMIRNOFF'),
ignore_charges=True)
compare_system_energies(amber_system, off_system, positions,
ignore_charges=True)
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
@pytest.mark.parametrize(('freesolv_id', 'forcefield_version', 'allow_undefined_stereo'),
generate_freesolv_parameters_assignment_cases())
def test_freesolv_parameters_assignment(self, freesolv_id, forcefield_version, allow_undefined_stereo):
"""Regression test on parameters assignment based on the FreeSolv set used in the 0.1 paper.
This, contrarily to the similar AlkEthOH test, checks also constraints
and improper torsions.
"""
from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters
mol2_file_path, xml_file_path = get_freesolv_file_path(freesolv_id, forcefield_version)
# Load molecules.
molecule = Molecule.from_file(mol2_file_path, allow_undefined_stereo=allow_undefined_stereo)
# Create OpenFF System with the current toolkit.
forcefield_file_path = 'test_forcefields/old/test_ff_' + forcefield_version + '_spec_0_2.offxml'
ff = ForceField(forcefield_file_path, 'test_forcefields/old/hbonds.offxml')
ff_system = ff.create_openmm_system(molecule.to_topology())
# Load OpenMM System created with the 0.1 version of the toolkit.
from simtk import openmm
with open(xml_file_path, 'r') as f:
xml_system = openmm.XmlSerializer.deserialize(f.read())
# Compare parameters. We ignore the improper folds as in 0.0.3 we
# used a six-fold implementation while we now use a three-fold way.
# TODO: Reactivate charge comparison once we'll be able to read them from file.
compare_system_parameters(ff_system, xml_system,
systems_labels=('current OpenFF', 'SMIRNOFF 0.0.4'),
ignore_charges=True, ignore_improper_folds=True)
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
@pytest.mark.parametrize(('is_periodic'), (False, True))
@pytest.mark.parametrize(('gbsa_model'), ['HCT', 'OBC1', 'OBC2'])
@pytest.mark.parametrize(('freesolv_id', 'forcefield_version', 'allow_undefined_stereo'),
generate_freesolv_parameters_assignment_cases())
def test_freesolv_gbsa_energies(self, gbsa_model, is_periodic, freesolv_id, forcefield_version, allow_undefined_stereo):
"""
Regression test on HCT, OBC1, and OBC2 GBSA models. This test ensures that the
SMIRNOFF-based GBSA models match the equivalent OpenMM implementations.
"""
from openforcefield.tests.utils import (get_freesolv_file_path,
compare_system_energies, create_system_from_amber,
get_context_potential_energy
)
import parmed as pmd
from simtk import openmm
from simtk.openmm import Platform
mol2_file_path, _ = get_freesolv_file_path(freesolv_id, forcefield_version)
# Load molecules.
molecule = Molecule.from_file(mol2_file_path, allow_undefined_stereo=allow_undefined_stereo)
# Give each atom a unique name, otherwise OpenMM will complain
for idx, atom in enumerate(molecule.atoms):
atom.name = f'{atom.element.symbol}{idx}'
positions = molecule.conformers[0]
off_gbsas = {'HCT': 'test_forcefields/GBSA_HCT-1.0.offxml',
'OBC1': 'test_forcefields/GBSA_OBC1-1.0.offxml',
'OBC2': 'test_forcefields/GBSA_OBC2-1.0.offxml'
}
# Create OpenFF System with the current toolkit.
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml',
off_gbsas[gbsa_model])
off_top = molecule.to_topology()
if is_periodic:
off_top.box_vectors = ((30., 0, 0), (0, 30., 0), (0, 0, 30.)) * unit.angstrom
else:
off_top.box_vectors = None
off_omm_system = ff.create_openmm_system(off_top, charge_from_molecules=[molecule])
off_nonbonded_force = [force for force in off_omm_system.getForces() if
isinstance(force, openmm.NonbondedForce)][0]
omm_top = off_top.to_openmm()
pmd_struct = pmd.openmm.load_topology(omm_top, off_omm_system, positions)
prmtop_file = NamedTemporaryFile(suffix='.prmtop')
inpcrd_file = NamedTemporaryFile(suffix='.inpcrd')
pmd_struct.save(prmtop_file.name, overwrite=True)
pmd_struct.save(inpcrd_file.name, overwrite=True)
openmm_gbsas = {'HCT': openmm.app.HCT,
'OBC1': openmm.app.OBC1,
'OBC2': openmm.app.OBC2,
}
# The functional form of the nonbonded force will change depending on whether the cutoff
# is None during initialization. Therefore, we need to figure that out here.
# WARNING: The NonbondedMethod enums at openmm.app.forcefield and openmm.CustomGBForce
# aren't necessarily the same, and could be misinterpreted if the wrong one is used. For
# create_system_from_amber, we must provide the app.forcefield version.
if is_periodic:
amber_nb_method = openmm.app.forcefield.CutoffPeriodic
amber_cutoff = off_nonbonded_force.getCutoffDistance()
else:
amber_nb_method = openmm.app.forcefield.NoCutoff
amber_cutoff = None
(amber_omm_system,
amber_omm_topology,
amber_positions) = create_system_from_amber(prmtop_file.name,
inpcrd_file.name,
implicitSolvent=openmm_gbsas[gbsa_model],
nonbondedMethod=amber_nb_method,
nonbondedCutoff=amber_cutoff,
gbsaModel='ACE',
implicitSolventKappa=0.,
)
# Retrieve the GBSAForce from both the AMBER and OpenForceField systems
off_gbsa_forces = [force for force in off_omm_system.getForces() if
(isinstance(force, openmm.GBSAOBCForce) or
isinstance(force, openmm.openmm.CustomGBForce))]
assert len(off_gbsa_forces) == 1
off_gbsa_force = off_gbsa_forces[0]
amber_gbsa_forces = [force for force in amber_omm_system.getForces() if
(isinstance(force, openmm.GBSAOBCForce) or
isinstance(force, openmm.openmm.CustomGBForce))]
assert len(amber_gbsa_forces) == 1
amber_gbsa_force = amber_gbsa_forces[0]
# We get radius and screen values from each model's getStandardParameters method
if gbsa_model == 'HCT':
gb_params = openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters(omm_top)
elif gbsa_model == 'OBC1':
gb_params = openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters(omm_top)
elif gbsa_model == 'OBC2':
gb_params = openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters(omm_top)
# Use GB params from OpenMM GBSA classes to populate parameters
for idx, (radius, screen) in enumerate(gb_params):
# Keep the charge, but throw out the old radius and screen values
q, old_radius, old_screen = amber_gbsa_force.getParticleParameters(idx)
if isinstance(amber_gbsa_force, openmm.GBSAOBCForce):
# Note that in GBSAOBCForce, the per-particle parameters are separate
# arguments, while in CustomGBForce they're a single iterable
amber_gbsa_force.setParticleParameters(idx, q, radius, screen)
elif isinstance(amber_gbsa_force, openmm.CustomGBForce):
# !!! WARNING: CustomAmberGBForceBase expects different per-particle parameters
# depending on whether you use addParticle or setParticleParameters. In
# setParticleParameters, we have to apply the offset and scale BEFORE setting
# parameters, whereas in addParticle, it is applied afterwards, and the particle
# parameters are not set until an auxillary finalize() method is called. !!!
amber_gbsa_force.setParticleParameters(idx, (q, radius - 0.009, screen * (radius - 0.009)))
# Put the GBSA force into a separate group so we can specifically compare GBSA energies
amber_gbsa_force.setForceGroup(1)
off_gbsa_force.setForceGroup(1)
# Some manual overrides to get the OFF system's NonbondedForce matched up with the AMBER system
if is_periodic:
off_nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.CutoffPeriodic)
else:
off_nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
off_nonbonded_force.setReactionFieldDielectric(1.0)
# Not sure if zeroing the switching width is essential -- This might only make a difference
# in the energy if we tested on a molecule larger than the 9A cutoff
#off_nonbonded_force.setSwitchingDistance(0)
# Create Contexts
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
platform = Platform.getPlatformByName('Reference')
amber_context = openmm.Context(amber_omm_system, integrator, platform)
off_context = openmm.Context(off_omm_system, copy.deepcopy(integrator), platform)
# Get context energies
amber_energy = get_context_potential_energy(amber_context, positions)
off_energy = get_context_potential_energy(off_context, positions)
# Very handy for debugging
# print(openmm.XmlSerializer.serialize(off_gbsa_force))
# print(openmm.XmlSerializer.serialize(amber_gbsa_force))
# Ensure that the GBSA energies (which we put into ForceGroup 1) are identical
# For Platform=OpenCL, we do get "=="-level identical numbers, but for "Reference", we don't.
#assert amber_energy[1] == off_energy[1]
assert abs(amber_energy[1] - off_energy[1]) < 1e-5 * unit.kilojoule/unit.mole
# Ensure that all system energies are the same
compare_system_energies(off_omm_system, amber_omm_system, positions, by_force_type=False)
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
@pytest.mark.parametrize('zero_charges', [True, False])
@pytest.mark.parametrize(('gbsa_model'), ['HCT', 'OBC1', 'OBC2'])
def test_molecule_energy_gb_no_sa(self, zero_charges, gbsa_model):
"""Test creating a GBSA system without a surface energy term, and validate its energy
against the same system made using OpenMM's AMBER GBSA functionality"""
from openforcefield.tests.utils import (compare_system_energies, create_system_from_amber,
get_context_potential_energy
)
import parmed as pmd
from simtk import openmm
from simtk.openmm import Platform
import numpy as np
# Load an arbitrary molecule from the freesolv set
molecule = Molecule.from_file(get_data_file_path('molecules/FreeSolv/mol2files_sybyl/mobley_1036761.mol2'))
molecule.name = 'mobley_1036761' # Name the molecule, otherwise OpenMM will complain
if zero_charges:
molecule.partial_charges = np.zeros(molecule.n_atoms) * unit.elementary_charge
# Give each atom a unique name, otherwise OpenMM will complain
for idx, atom in enumerate(molecule.atoms):
atom.name = f'{atom.element.symbol}{idx}'
positions = np.concatenate((molecule.conformers[0], molecule.conformers[0] + (10 * unit.angstrom)))
# Create OpenFF System with the current toolkit.
off_gbsas = {'HCT': 'test_forcefields/GBSA_HCT-1.0.offxml',
'OBC1': 'test_forcefields/GBSA_OBC1-1.0.offxml',
'OBC2': 'test_forcefields/GBSA_OBC2-1.0.offxml'
}
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml',
off_gbsas[gbsa_model])
ff.get_parameter_handler('GBSA').sa_model = None
off_top = Topology.from_molecules([molecule, molecule])
off_omm_system = ff.create_openmm_system(off_top, charge_from_molecules=[molecule])
omm_top = off_top.to_openmm()
pmd_struct = pmd.openmm.load_topology(omm_top, off_omm_system, positions)
prmtop_file = NamedTemporaryFile(suffix='.prmtop')
inpcrd_file = NamedTemporaryFile(suffix='.inpcrd')
pmd_struct.save(prmtop_file.name, overwrite=True)
pmd_struct.save(inpcrd_file.name, overwrite=True)
openmm_gbsas = {'HCT': openmm.app.HCT,
'OBC1': openmm.app.OBC1,
'OBC2': openmm.app.OBC2,
}
(amber_omm_system,
amber_omm_topology,
amber_positions) = create_system_from_amber(prmtop_file.name,
inpcrd_file.name,
implicitSolvent=openmm_gbsas[gbsa_model],
nonbondedMethod = openmm.app.forcefield.NoCutoff,
nonbondedCutoff=None,
gbsaModel=None,
implicitSolventKappa=0.,
)
# Retrieve the GBSAForce from both the AMBER and OpenForceField systems
off_gbsa_forces = [force for force in off_omm_system.getForces() if
(isinstance(force, openmm.GBSAOBCForce) or
isinstance(force, openmm.openmm.CustomGBForce))]
assert len(off_gbsa_forces) == 1
off_gbsa_force = off_gbsa_forces[0]
amber_gbsa_forces = [force for force in amber_omm_system.getForces() if
(isinstance(force, openmm.GBSAOBCForce) or
isinstance(force, openmm.openmm.CustomGBForce))]
assert len(amber_gbsa_forces) == 1
amber_gbsa_force = amber_gbsa_forces[0]
# We get radius and screen values from each model's getStandardParameters method
if gbsa_model == 'HCT':
gb_params = openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters(omm_top)
elif gbsa_model == 'OBC1':
gb_params = openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters(omm_top)
elif gbsa_model == 'OBC2':
gb_params = openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters(omm_top)
# This is only necessary until https://github.com/openmm/openmm/pull/2362 is bundled into a conda release
amber_gbsa_force.setSurfaceAreaEnergy(0)
# Use GB params from OpenMM GBSA classes to populate parameters
for idx, (radius, screen) in enumerate(gb_params):
# Keep the charge, but throw out the old radius and screen values
q, old_radius, old_screen = amber_gbsa_force.getParticleParameters(idx)
if isinstance(amber_gbsa_force, openmm.GBSAOBCForce):
# Note that in GBSAOBCForce, the per-particle parameters are separate
# arguments, while in CustomGBForce they're a single iterable
amber_gbsa_force.setParticleParameters(idx, q, radius, screen)
elif isinstance(amber_gbsa_force, openmm.CustomGBForce):
# !!! WARNING: CustomAmberGBForceBase expects different per-particle parameters
# depending on whether you use addParticle or setParticleParameters. In
# setParticleParameters, we have to apply the offset and scale BEFORE setting
# parameters, whereas in addParticle, it is applied afterwards, and the particle
# parameters are not set until an auxillary finalize() method is called. !!!
amber_gbsa_force.setParticleParameters(idx, (q, radius - 0.009, screen * (radius - 0.009)))
# Put the GBSA force into a separate group so we can specifically compare GBSA energies
amber_gbsa_force.setForceGroup(1)
off_gbsa_force.setForceGroup(1)
# Create Contexts
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
platform = Platform.getPlatformByName('Reference')
amber_context = openmm.Context(amber_omm_system, integrator, platform)
off_context = openmm.Context(off_omm_system, copy.deepcopy(integrator), platform)
# Get context energies
amber_energy = get_context_potential_energy(amber_context, positions)
off_energy = get_context_potential_energy(off_context, positions)
# Very handy for debugging
# print(openmm.XmlSerializer.serialize(off_gbsa_force))
# print(openmm.XmlSerializer.serialize(amber_gbsa_force))
# Ensure that the GBSA energies (which we put into ForceGroup 1) are identical
# For Platform=OpenCL, we do get "=="-level identical numbers, but for "Reference", we don't.
#assert amber_energy[1] == off_energy[1]
assert abs(amber_energy[1] - off_energy[1]) < 1e-5 * unit.kilojoule/unit.mole
# If charges are zero, the GB energy component should be 0, so the total GBSA energy should be 0
if zero_charges:
assert amber_energy[1] == 0. * unit.kilojoule / unit.mole
else:
assert amber_energy[1] != 0. * unit.kilojoule / unit.mole
# Ensure that all system energies are the same
compare_system_energies(off_omm_system, amber_omm_system, positions, by_force_type=False)
class TestSmirnoffVersionConverter:
@pytest.mark.skipif(not OpenEyeToolkitWrapper.is_available(),
reason='Test requires OE toolkit to read mol2 files')
@pytest.mark.parametrize(('freesolv_id', 'forcefield_version', 'allow_undefined_stereo'),
generate_freesolv_parameters_assignment_cases())
@pytest.mark.parametrize(('spec'), ['0_1', '0_2', '0_3'])
def test_read_smirnoff_spec_freesolv(self, freesolv_id, forcefield_version, allow_undefined_stereo, spec):
"""
Test reading an 0.2 smirnoff spec file, by reading an 0.1 spec representation of a set of parameters,
and ensuring that it parameterizes molecules identically to the same FF in the most recent spec
Regression test on parameters assignment based on the FreeSolv set used in the 0.1 paper.
This, contrarily to the similar AlkEthOH test, checks also constraints
and improper torsions.
The AlkEthOH set, however, does not have impropers, which should be
tested separately. Currently, test_freesolv_parameters_assignment
does the job.
"""
from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters
mol2_file_path, xml_file_path = get_freesolv_file_path(freesolv_id, forcefield_version)
# Load molecules.
molecule = Molecule.from_file(mol2_file_path, allow_undefined_stereo=allow_undefined_stereo)
# Create OpenFF System with the current toolkit.
#forcefield_file_path = 'test_forcefields/old/smirnoff99Frosst_1_0_8_spec_0_2.offxml'
forcefield_file_path = f'test_forcefields/old/test_ff_{forcefield_version}_spec_{spec}.offxml'
ff = ForceField(forcefield_file_path, 'test_forcefields/old/hbonds.offxml')
ff_system = ff.create_openmm_system(molecule.to_topology())
# Load OpenMM System created with the 0.1 version of the toolkit.
from simtk import openmm
with open(xml_file_path, 'r') as f:
xml_system = openmm.XmlSerializer.deserialize(f.read())
# Compare parameters. We ignore the improper folds as in 0.0.3 we
# used a six-fold implementation while we now use a three-fold way.
# TODO: Reactivate charge comparison once we'll be able to read them from file.
compare_system_parameters(ff_system, xml_system,
systems_labels=('current OpenFF', 'SMIRNOFF 0.0.4'),
ignore_charges=True, ignore_improper_folds=True)
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_electrostatics_options(self):
"""Test parameter assignment using smirnoff99Frosst on laromustine with various long-range electrostatics options.
"""
molecules_file_path = get_data_file_path('molecules/laromustine_tripos.mol2')
molecule = openforcefield.topology.Molecule.from_file(molecules_file_path)
forcefield = ForceField([smirnoff99Frosst_offxml_file_path, chargeincrement_offxml_file_path])
for method in ['PME', 'reaction-field', 'Coulomb']:
# Change electrostatics method
forcefield.forces['Electrostatics'].method = method
f = partial(check_system_creation_from_molecule, forcefield, molecule)
f.description = 'Testing {} parameter assignment using molecule {}'.format(offxml_file_path, molecule.name)
#yield f
# TODO: Implement a similar test, where we compare OpenMM energy evals from an
# AMBER-parameterized system to OFF-parameterized systems
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_chargeincrement(self):
"""Test parameter assignment using smirnoff99Frosst on laromustine with ChargeIncrementModel.
"""
molecules_file_path = get_data_file_path('molecules/laromustine_tripos.mol2')
molecule = openforcefield.topology.Molecule.from_file(molecules_file_path)
forcefield = ForceField(['test_forcefields/smirnoff99Frosst.offxml', 'chargeincrement-test'])
check_system_creation_from_molecule(forcefield, molecule)
# TODO: We can't implement a test for chargeincrement yet because we
# haven't settled on a SMIRNOFF spec for chargeincrementmodel
@pytest.mark.skip(reason='Needs to be updated for 0.2.0 syntax')
def test_create_system_molecules_parmatfrosst_gbsa(self):
"""Test creation of a System object from small molecules to test parm@frosst forcefield with GBSA support.
"""
molecules_file_path = get_data_file_path('molecules/AlkEthOH_test_filt1_tripos.mol2')
check_parameter_assignment(
offxml_file_path='test_forcefields/Frosst_AlkEthOH_GBSA.offxml', molecules_file_path=molecules_file_path)
# TODO: Figure out if we just want to check that energy is finite (this is what the original test did,
# or compare numerically to a reference system.
# TODO: test_get_new_parameterhandler
# TODO: test_get_existing_parameterhandler
# TODO: test_get_parameter
# TODO: test_add_parameter
# TODO: test_add_parameter_fractional_bondorder
# TODO: test_create_force_fractional_bondorder
# TODO: test_store_cosmetic_attribs
# TODO: test_write_cosmetic_attribs
# TODO: test_store_cosmetic_elements (eg. Author)
# TODO: test_write_cosmetic_elements (eg. Author)
# TODO: add_handler_with_incompatible_kwargs (for example different scale14 vals)
# TODO: test_invalid aromaticity_model
# TODO: test_invalid_file_version
# TODO: test_library_charges
# TODO: test_forcefield_to_dict (ensure that ParameterHandlers serialize without collisions
# and header-level attribs include handler attribs as well as attached units,
# note that header attribs are not ordered)
# TODO: test_create_gbsa
| [
"tarfile.open",
"openforcefield.tests.utils.compare_system_energies",
"simtk.openmm.VerletIntegrator",
"openforcefield.utils.toolkits.AmberToolsToolkitWrapper.is_available",
"simtk.openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters",
"pickle.dumps",
"simtk.openmm.Platform.getPlatform... | [((24623, 24659), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (24657, 24659), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((109331, 109394), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (109347, 109394), False, 'import pytest\n'), ((110341, 110404), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (110357, 110404), False, 'import pytest\n'), ((111012, 111075), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (111028, 111075), False, 'import pytest\n'), ((1246, 1294), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""water.sdf"""'], {}), "('systems', 'monomers', 'water.sdf')\n", (1258, 1294), False, 'import os\n'), ((15728, 15738), 'openforcefield.topology.Molecule', 'Molecule', ([], {}), '()\n', (15736, 15738), False, 'from openforcefield.topology import Molecule, Topology\n'), ((16964, 16974), 'openforcefield.topology.Molecule', 'Molecule', ([], {}), '()\n', (16972, 16974), False, 'from openforcefield.topology import Molecule, Topology\n'), ((18133, 18143), 'openforcefield.topology.Molecule', 'Molecule', ([], {}), '()\n', (18141, 18143), False, 'from openforcefield.topology import Molecule, Topology\n'), ((19384, 19394), 'openforcefield.topology.Molecule', 'Molecule', ([], {}), '()\n', (19392, 19394), False, 'from openforcefield.topology import Molecule, Topology\n'), ((20369, 20379), 'openforcefield.topology.Molecule', 'Molecule', ([], {}), '()\n', (20377, 20379), False, 'from openforcefield.topology import Molecule, Topology\n'), ((24763, 24797), 'openforcefield.utils.toolkits.RDKitToolkitWrapper.is_available', 'RDKitToolkitWrapper.is_available', ([], {}), '()\n', (24795, 24797), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((24802, 24841), 'openforcefield.utils.toolkits.AmberToolsToolkitWrapper.is_available', 'AmberToolsToolkitWrapper.is_available', ([], {}), '()\n', (24839, 24841), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((26714, 26777), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (26730, 26777), False, 'import pytest\n'), ((27128, 27191), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (27144, 27191), False, 'import pytest\n'), ((27542, 27605), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (27558, 27605), False, 'import pytest\n'), ((27790, 27853), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (27806, 27853), False, 'import pytest\n'), ((28307, 28370), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (28323, 28370), False, 'import pytest\n'), ((28819, 28882), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (28835, 28882), False, 'import pytest\n'), ((29847, 29910), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (29863, 29910), False, 'import pytest\n'), ((30241, 30304), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Needs to be updated for 0.2.0 syntax"""'}), "(reason='Needs to be updated for 0.2.0 syntax')\n", (30257, 30304), False, 'import pytest\n'), ((34517, 34603), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""file_path_extension"""', "['xml', 'XML', 'offxml', 'OFFXML']"], {}), "('file_path_extension', ['xml', 'XML', 'offxml',\n 'OFFXML'])\n", (34540, 34603), False, 'import pytest\n'), ((35568, 35654), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""file_path_extension"""', "['xml', 'XML', 'offxml', 'OFFXML']"], {}), "('file_path_extension', ['xml', 'XML', 'offxml',\n 'OFFXML'])\n", (35591, 35654), False, 'import pytest\n'), ((41790, 41878), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (41813, 41878), False, 'import pytest\n'), ((42388, 42404), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (42402, 42404), False, 'import pytest\n'), ((43472, 43560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (43495, 43560), False, 'import pytest\n'), ((45755, 45843), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (45778, 45843), False, 'import pytest\n'), ((46715, 46803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (46738, 46803), False, 'import pytest\n'), ((47477, 47565), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (47500, 47565), False, 'import pytest\n'), ((47567, 47731), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""box"""', "['ethanol_water.pdb', 'cyclohexane_water.pdb',\n 'cyclohexane_ethanol_0.4_0.6.pdb',\n 'propane_methane_butanol_0.2_0.3_0.5.pdb']"], {}), "('box', ['ethanol_water.pdb',\n 'cyclohexane_water.pdb', 'cyclohexane_ethanol_0.4_0.6.pdb',\n 'propane_methane_butanol_0.2_0.3_0.5.pdb'])\n", (47590, 47731), False, 'import pytest\n'), ((52760, 52926), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""We will not support going directly to ParmEd for now.We will instead feed OpenMM System objects to ParmEd for further processing."""'}), "(reason=\n 'We will not support going directly to ParmEd for now.We will instead feed OpenMM System objects to ParmEd for further processing.'\n )\n", (52776, 52926), False, 'import pytest\n'), ((53591, 53679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (53614, 53679), False, 'import pytest\n'), ((54534, 54596), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputs"""', 'nonbonded_resolution_matrix'], {}), "('inputs', nonbonded_resolution_matrix)\n", (54557, 54596), False, 'import pytest\n'), ((56758, 56846), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (56781, 56846), False, 'import pytest\n'), ((59458, 59546), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (59481, 59546), False, 'import pytest\n'), ((61235, 61323), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""toolkit_registry,registry_description"""', 'toolkit_registries'], {}), "('toolkit_registry,registry_description',\n toolkit_registries)\n", (61258, 61323), False, 'import pytest\n'), ((90470, 90523), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_periodic"""', '(False, True)'], {}), "('is_periodic', (False, True))\n", (90493, 90523), False, 'import pytest\n'), ((90531, 90593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gbsa_model"""', "['HCT', 'OBC1', 'OBC2']"], {}), "('gbsa_model', ['HCT', 'OBC1', 'OBC2'])\n", (90554, 90593), False, 'import pytest\n'), ((99272, 99326), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""zero_charges"""', '[True, False]'], {}), "('zero_charges', [True, False])\n", (99295, 99326), False, 'import pytest\n'), ((99332, 99394), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gbsa_model"""', "['HCT', 'OBC1', 'OBC2']"], {}), "('gbsa_model', ['HCT', 'OBC1', 'OBC2'])\n", (99355, 99394), False, 'import pytest\n'), ((107122, 107176), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spec"""', "['0_1', '0_2', '0_3']"], {}), "('spec', ['0_1', '0_2', '0_3'])\n", (107145, 107176), False, 'import pytest\n'), ((109587, 109642), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/laromustine_tripos.mol2"""'], {}), "('molecules/laromustine_tripos.mol2')\n", (109605, 109642), False, 'from openforcefield.utils import get_data_file_path\n'), ((109739, 109824), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['[smirnoff99Frosst_offxml_file_path, chargeincrement_offxml_file_path]'], {}), '([smirnoff99Frosst_offxml_file_path,\n chargeincrement_offxml_file_path])\n', (109749, 109824), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((110569, 110624), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/laromustine_tripos.mol2"""'], {}), "('molecules/laromustine_tripos.mol2')\n", (110587, 110624), False, 'from openforcefield.utils import get_data_file_path\n'), ((110721, 110806), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (["['test_forcefields/smirnoff99Frosst.offxml', 'chargeincrement-test']"], {}), "(['test_forcefields/smirnoff99Frosst.offxml', 'chargeincrement-test']\n )\n", (110731, 110806), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((111279, 111342), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/AlkEthOH_test_filt1_tripos.mol2"""'], {}), "('molecules/AlkEthOH_test_filt1_tripos.mol2')\n", (111297, 111342), False, 'from openforcefield.utils import get_data_file_path\n'), ((16511, 16572), 'numpy.array', 'np.array', (['[-0.4, -0.3, -0.2, -0.1, 1e-05, 0.1, 0.2, 0.3, 0.4]'], {}), '([-0.4, -0.3, -0.2, -0.1, 1e-05, 0.1, 0.2, 0.3, 0.4])\n', (16519, 16572), True, 'import numpy as np\n'), ((17747, 17808), 'numpy.array', 'np.array', (['[0.4, 0.3, 0.2, 0.1, 1e-05, -0.1, -0.2, -0.3, -0.4]'], {}), '([0.4, 0.3, 0.2, 0.1, 1e-05, -0.1, -0.2, -0.3, -0.4])\n', (17755, 17808), True, 'import numpy as np\n'), ((20056, 20087), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (20064, 20087), True, 'import numpy as np\n'), ((25178, 25190), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', ([], {}), '()\n', (25188, 25190), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((25772, 25823), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', ([], {'parameter_handler_classes': '[BondHandler]'}), '(parameter_handler_classes=[BondHandler])\n', (25782, 25823), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((26244, 26298), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (26254, 26298), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((27099, 27121), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['file_paths'], {}), '(file_paths)\n', (27109, 27121), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((27725, 27783), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/Frosst_AlkEthOH_GBSA.offxml"""'], {}), "('test_forcefields/Frosst_AlkEthOH_GBSA.offxml')\n", (27735, 27783), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((28281, 28300), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['urls[0]'], {}), '(urls[0])\n', (28291, 28300), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((28796, 28812), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['urls'], {}), '(urls)\n', (28806, 28812), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((29411, 29436), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff'], {}), '(simple_xml_ff)\n', (29421, 29436), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((29962, 30007), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['smirnoff99Frosst_offxml_file_path'], {}), '(smirnoff99Frosst_offxml_file_path)\n', (29972, 30007), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((30050, 30079), 'copy.deepcopy', 'copy.deepcopy', (['cls.forcefield'], {}), '(cls.forcefield)\n', (30063, 30079), False, 'import copy\n'), ((30420, 30465), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['smirnoff99Frosst_offxml_file_path'], {}), '(smirnoff99Frosst_offxml_file_path)\n', (30430, 30465), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((30582, 30628), 'openforcefield.typing.engines.smirnoff.ForceField.__setstate__', 'ForceField.__setstate__', (['serialized_forcefield'], {}), '(serialized_forcefield)\n', (30605, 30628), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((30944, 30969), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff'], {}), '(simple_xml_ff)\n', (30954, 30969), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((30988, 31014), 'pickle.dumps', 'pickle.dumps', (['forcefield_1'], {}), '(forcefield_1)\n', (31000, 31014), False, 'import pickle\n'), ((31038, 31059), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (31050, 31059), False, 'import pickle\n'), ((31325, 31395), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements'], {'allow_cosmetic_attributes': '(True)'}), '(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)\n', (31335, 31395), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((31414, 31440), 'pickle.dumps', 'pickle.dumps', (['forcefield_1'], {}), '(forcefield_1)\n', (31426, 31440), False, 'import pickle\n'), ((31464, 31485), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (31476, 31485), False, 'import pickle\n'), ((31809, 31834), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff'], {}), '(simple_xml_ff)\n', (31819, 31834), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((31907, 31927), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['string_1'], {}), '(string_1)\n', (31917, 31927), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((32600, 32670), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements'], {'allow_cosmetic_attributes': '(True)'}), '(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)\n', (32610, 32670), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((33287, 33339), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['string_1'], {'allow_cosmetic_attributes': '(True)'}), '(string_1, allow_cosmetic_attributes=True)\n', (33297, 33339), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((34047, 34120), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml')\n", (34057, 34120), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((34260, 34331), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirff99Frosst_reference_0_1_spec.offxml"""'], {}), "('test_forcefields/smirff99Frosst_reference_0_1_spec.offxml')\n", (34270, 34331), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((34436, 34509), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst_reference_0_2_spec.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst_reference_0_2_spec.offxml')\n", (34446, 34509), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((35128, 35180), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': "('.' + file_path_extension)"}), "(suffix='.' + file_path_extension)\n", (35146, 35180), False, 'from tempfile import NamedTemporaryFile\n'), ((35199, 35251), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': "('.' + file_path_extension)"}), "(suffix='.' + file_path_extension)\n", (35217, 35251), False, 'from tempfile import NamedTemporaryFile\n'), ((35275, 35300), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff'], {}), '(simple_xml_ff)\n', (35285, 35300), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((35395, 35419), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['iofile1.name'], {}), '(iofile1.name)\n', (35405, 35419), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((36229, 36281), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': "('.' + file_path_extension)"}), "(suffix='.' + file_path_extension)\n", (36247, 36281), False, 'from tempfile import NamedTemporaryFile\n'), ((36300, 36352), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': "('.' + file_path_extension)"}), "(suffix='.' + file_path_extension)\n", (36318, 36352), False, 'from tempfile import NamedTemporaryFile\n'), ((36371, 36423), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': "('.' + file_path_extension)"}), "(suffix='.' + file_path_extension)\n", (36389, 36423), False, 'from tempfile import NamedTemporaryFile\n'), ((36783, 36853), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements'], {'allow_cosmetic_attributes': '(True)'}), '(xml_ff_w_cosmetic_elements, allow_cosmetic_attributes=True)\n', (36793, 36853), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((37557, 37613), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['iofile1.name'], {'allow_cosmetic_attributes': '(True)'}), '(iofile1.name, allow_cosmetic_attributes=True)\n', (37567, 37613), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((39267, 39356), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff', 'xml_ff_w_cosmetic_elements'], {'allow_cosmetic_attributes': '(True)'}), '(simple_xml_ff, xml_ff_w_cosmetic_elements,\n allow_cosmetic_attributes=True)\n', (39277, 39356), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((39552, 39645), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements', 'xml_ff_w_comments'], {'allow_cosmetic_attributes': '(True)'}), '(xml_ff_w_cosmetic_elements, xml_ff_w_comments,\n allow_cosmetic_attributes=True)\n', (39562, 39645), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((41759, 41782), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['gbsa_ff_xml'], {}), '(gbsa_ff_xml)\n', (41769, 41782), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((42015, 42069), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (42025, 42069), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((42216, 42282), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (42236, 42282), False, 'from openforcefield.topology import Molecule, Topology\n'), ((42927, 42967), 'copy.deepcopy', 'copy.deepcopy', (['BondHandler._DEPENDENCIES'], {}), '(BondHandler._DEPENDENCIES)\n', (42940, 42967), False, 'import copy\n'), ((42994, 43035), 'copy.deepcopy', 'copy.deepcopy', (['AngleHandler._DEPENDENCIES'], {}), '(AngleHandler._DEPENDENCIES)\n', (43007, 43035), False, 'import copy\n'), ((44009, 44063), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (44019, 44063), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((44211, 44277), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (44231, 44277), False, 'from openforcefield.topology import Molecule, Topology\n'), ((44831, 45182), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""\n<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">\n <ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">\n <Proper smirks="[#99:1]-[#99X4:2]-[#99:3]-[#99:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>\n </ProperTorsions>\n</SMIRNOFF>\n"""'], {}), '(\n """\n<SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL">\n <ProperTorsions version="0.3" potential="k*(1+cos(periodicity*theta-phase))">\n <Proper smirks="[#99:1]-[#99X4:2]-[#99:3]-[#99:4]" id="t1" idivf1="1" k1="0.156 * kilocalories_per_mole" periodicity1="3" phase1="0.0 * degree"/>\n </ProperTorsions>\n</SMIRNOFF>\n"""\n )\n', (44841, 45182), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((45319, 45385), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (45339, 45385), False, 'from openforcefield.topology import Molecule, Topology\n'), ((46074, 46128), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (46084, 46128), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((46578, 46644), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (46598, 46644), False, 'from openforcefield.topology import Molecule, Topology\n'), ((47043, 47097), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (47053, 47097), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((47280, 47346), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (47300, 47346), False, 'from openforcefield.topology import Molecule, Topology\n'), ((48171, 48225), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (48181, 48225), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((48334, 48360), 'simtk.openmm.app.PDBFile', 'app.PDBFile', (['box_file_path'], {}), '(box_file_path)\n', (48345, 48360), False, 'from simtk.openmm import app, NonbondedForce\n'), ((48659, 48725), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (48679, 48725), False, 'from openforcefield.topology import Molecule, Topology\n'), ((49270, 49329), 'openforcefield.utils.toolkits.ToolkitRegistry', 'ToolkitRegistry', ([], {'toolkit_precedence': '[OpenEyeToolkitWrapper]'}), '(toolkit_precedence=[OpenEyeToolkitWrapper])\n', (49285, 49329), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((49436, 49490), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (49446, 49490), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((49745, 49812), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules1'}), '(pdbfile.topology, unique_molecules=molecules1)\n', (49765, 49812), False, 'from openforcefield.topology import Molecule, Topology\n'), ((50235, 50302), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules2'}), '(pdbfile.topology, unique_molecules=molecules2)\n', (50255, 50302), False, 'from openforcefield.topology import Molecule, Topology\n'), ((50565, 50601), 'simtk.openmm.XmlSerializer.serialize', 'XmlSerializer.serialize', (['omm_system1'], {}), '(omm_system1)\n', (50588, 50601), False, 'from simtk.openmm import XmlSerializer\n'), ((50625, 50661), 'simtk.openmm.XmlSerializer.serialize', 'XmlSerializer.serialize', (['omm_system2'], {}), '(omm_system2)\n', (50648, 50661), False, 'from simtk.openmm import XmlSerializer\n'), ((51273, 51360), 'openforcefield.utils.toolkits.ToolkitRegistry', 'ToolkitRegistry', ([], {'toolkit_precedence': '[RDKitToolkitWrapper, AmberToolsToolkitWrapper]'}), '(toolkit_precedence=[RDKitToolkitWrapper,\n AmberToolsToolkitWrapper])\n', (51288, 51360), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((51378, 51432), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (51388, 51432), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((51688, 51755), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules1'}), '(pdbfile.topology, unique_molecules=molecules1)\n', (51708, 51755), False, 'from openforcefield.topology import Molecule, Topology\n'), ((52180, 52247), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules2'}), '(pdbfile.topology, unique_molecules=molecules2)\n', (52200, 52247), False, 'from openforcefield.topology import Molecule, Topology\n'), ((52510, 52546), 'simtk.openmm.XmlSerializer.serialize', 'XmlSerializer.serialize', (['omm_system1'], {}), '(omm_system1)\n', (52533, 52546), False, 'from simtk.openmm import XmlSerializer\n'), ((52570, 52606), 'simtk.openmm.XmlSerializer.serialize', 'XmlSerializer.serialize', (['omm_system2'], {}), '(omm_system2)\n', (52593, 52606), False, 'from simtk.openmm import XmlSerializer\n'), ((53091, 53145), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (53101, 53145), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((53411, 53477), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (53431, 53477), False, 'from openforcefield.topology import Molecule, Topology\n'), ((53927, 53989), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (53945, 53989), False, 'from openforcefield.utils import get_data_file_path\n'), ((54011, 54032), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['file_path'], {}), '(file_path)\n', (54021, 54032), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((54217, 54283), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (54237, 54283), False, 'from openforcefield.topology import Molecule, Topology\n'), ((55237, 55291), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (55247, 55291), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((55400, 55466), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (55420, 55466), False, 'from openforcefield.topology import Molecule, Topology\n'), ((57201, 57263), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (57219, 57263), False, 'from openforcefield.utils import get_data_file_path\n'), ((57285, 57306), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['file_path'], {}), '(file_path)\n', (57295, 57306), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((57414, 57480), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (57434, 57480), False, 'from openforcefield.topology import Molecule, Topology\n'), ((58595, 58662), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile2.topology'], {'unique_molecules': 'molecules'}), '(pdbfile2.topology, unique_molecules=molecules)\n', (58615, 58662), False, 'from openforcefield.topology import Molecule, Topology\n'), ((60058, 60120), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (60076, 60120), False, 'from openforcefield.utils import get_data_file_path\n'), ((60142, 60163), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['file_path'], {}), '(file_path)\n', (60152, 60163), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((60271, 60337), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': '[ethanol]'}), '(pdbfile.topology, unique_molecules=[ethanol])\n', (60291, 60337), False, 'from openforcefield.topology import Molecule, Topology\n'), ((61756, 61818), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""test_forcefields/smirnoff99Frosst.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml')\n", (61774, 61818), False, 'from openforcefield.utils import get_data_file_path\n'), ((61840, 61861), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['file_path'], {}), '(file_path)\n', (61850, 61861), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((61983, 62049), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['pdbfile.topology'], {'unique_molecules': 'molecules'}), '(pdbfile.topology, unique_molecules=molecules)\n', (62003, 62049), False, 'from openforcefield.topology import Molecule, Topology\n'), ((63201, 63292), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', '"""test_forcefields/tip3p.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n 'test_forcefields/tip3p.offxml')\n", (63211, 63292), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((64214, 64263), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_spec_docs_ala_library_charges_xml'], {}), '(xml_spec_docs_ala_library_charges_xml)\n', (64224, 64263), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((64277, 64328), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_spec_docs_tip3p_library_charges_xml'], {}), '(xml_spec_docs_tip3p_library_charges_xml)\n', (64287, 64328), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((64725, 64844), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', '"""test_forcefields/tip3p.offxml"""', 'xml_OH_library_charges_xml'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n 'test_forcefields/tip3p.offxml', xml_OH_library_charges_xml)\n", (64735, 64844), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((65523, 65642), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_OH_library_charges_xml', '"""test_forcefields/tip3p.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_OH_library_charges_xml, 'test_forcefields/tip3p.offxml')\n", (65533, 65642), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((66269, 66360), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', '"""test_forcefields/tip3p.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n 'test_forcefields/tip3p.offxml')\n", (66279, 66360), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((66473, 66508), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['[mol, mol]'], {}), '([mol, mol])\n', (66496, 66508), False, 'from openforcefield.topology import Molecule, Topology\n'), ((67390, 67480), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_ethanol_library_charges_ff'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_ethanol_library_charges_ff)\n", (67400, 67480), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((68021, 68055), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['molecules'], {}), '(molecules)\n', (68044, 68055), False, 'from openforcefield.topology import Molecule, Topology\n'), ((68860, 68986), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_CH_zeroes_library_charges_xml', '"""test_forcefields/tip3p.offxml"""'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_CH_zeroes_library_charges_xml, 'test_forcefields/tip3p.offxml')\n", (68870, 68986), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((71171, 71205), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['molecules'], {}), '(molecules)\n', (71194, 71205), False, 'from openforcefield.topology import Molecule, Topology\n'), ((73481, 73580), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_ethanol_library_charges_in_parts_ff'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_ethanol_library_charges_in_parts_ff)\n", (73491, 73580), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((73775, 73809), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['molecules'], {}), '(molecules)\n', (73798, 73809), False, 'from openforcefield.topology import Molecule, Topology\n'), ((74719, 74817), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_ethanol_library_charges_by_atom_ff'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_ethanol_library_charges_by_atom_ff)\n", (74729, 74817), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((75011, 75045), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['molecules'], {}), '(molecules)\n', (75034, 75045), False, 'from openforcefield.topology import Molecule, Topology\n'), ((76024, 76058), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['molecules'], {}), '(molecules)\n', (76047, 76058), False, 'from openforcefield.topology import Molecule, Topology\n'), ((76155, 76253), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_ethanol_library_charges_by_atom_ff'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_ethanol_library_charges_by_atom_ff)\n", (76165, 76253), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((76817, 76915), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'xml_ethanol_library_charges_by_atom_ff'], {}), "('test_forcefields/smirnoff99Frosst.offxml',\n xml_ethanol_library_charges_by_atom_ff)\n", (76827, 76915), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((78334, 78360), 'openforcefield.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['"""CC"""'], {}), "('CC')\n", (78354, 78360), False, 'from openforcefield.topology import Molecule, Topology\n'), ((78380, 78413), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['[ethane]'], {}), '([ethane])\n', (78403, 78413), False, 'from openforcefield.topology import Molecule, Topology\n'), ((78427, 78492), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['XML_FF_GENERICS', '"""test_forcefields/old/hbonds.offxml"""'], {}), "(XML_FF_GENERICS, 'test_forcefields/old/hbonds.offxml')\n", (78437, 78492), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((79813, 79864), 'os.path.join', 'os.path.join', (['"""molecules"""', '"""AlkEthOH_tripos.tar.gz"""'], {}), "('molecules', 'AlkEthOH_tripos.tar.gz')\n", (79825, 79864), False, 'import os\n'), ((79875, 79919), 'tarfile.open', 'tarfile.open', (['alkethoh_tar_file_path', '"""r:gz"""'], {}), "(alkethoh_tar_file_path, 'r:gz')\n", (79887, 79919), False, 'import tarfile\n'), ((80529, 80571), 'pytest.param', 'pytest.param', (['case'], {'marks': 'pytest.mark.slow'}), '(case, marks=pytest.mark.slow)\n', (80541, 80571), False, 'import pytest\n'), ((82453, 82497), 'os.path.join', 'os.path.join', (['"""molecules"""', '"""FreeSolv.tar.gz"""'], {}), "('molecules', 'FreeSolv.tar.gz')\n", (82465, 82497), False, 'import os\n'), ((82508, 82552), 'tarfile.open', 'tarfile.open', (['freesolv_tar_file_path', '"""r:gz"""'], {}), "(freesolv_tar_file_path, 'r:gz')\n", (82520, 82552), False, 'import tarfile\n'), ((82874, 82917), 'pytest.param', 'pytest.param', (['*case'], {'marks': 'pytest.mark.slow'}), '(*case, marks=pytest.mark.slow)\n', (82886, 82917), False, 'import pytest\n'), ((84386, 84439), 'openforcefield.tests.utils.get_alkethoh_file_path', 'get_alkethoh_file_path', (['alkethoh_name'], {'get_amber': '(True)'}), '(alkethoh_name, get_amber=True)\n', (84408, 84439), False, 'from openforcefield.tests.utils import get_alkethoh_file_path, compare_system_parameters, compare_system_energies\n'), ((84485, 84518), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['mol2_filepath'], {}), '(mol2_filepath)\n', (84503, 84518), False, 'from openforcefield.topology import Molecule, Topology\n'), ((84567, 84633), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml"""'], {}), "('test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml')\n", (84577, 84633), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((84982, 85101), 'openforcefield.tests.utils.compare_amber_smirnoff', 'compare_amber_smirnoff', (['top_filepath', 'crd_filepath', 'forcefield', 'molecule'], {'check_energies': '(False)', 'ignore_charges': '(True)'}), '(top_filepath, crd_filepath, forcefield, molecule,\n check_energies=False, ignore_charges=True)\n', (85004, 85101), False, 'from openforcefield.tests.utils import get_alkethoh_file_path, compare_amber_smirnoff\n'), ((86965, 87041), 'openforcefield.topology.Topology.from_openmm', 'Topology.from_openmm', (['structure_mixture.topology'], {'unique_molecules': 'molecules'}), '(structure_mixture.topology, unique_molecules=molecules)\n', (86985, 87041), False, 'from openforcefield.topology import Molecule, Topology\n'), ((87091, 87157), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml"""'], {}), "('test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml')\n", (87101, 87157), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((87305, 87347), 'copy.deepcopy', 'copy.deepcopy', (['structure_mixture.positions'], {}), '(structure_mixture.positions)\n', (87318, 87347), False, 'import copy\n'), ((88044, 88158), 'openforcefield.tests.utils.compare_system_parameters', 'compare_system_parameters', (['amber_system', 'off_system'], {'systems_labels': "('AMBER', 'SMIRNOFF')", 'ignore_charges': '(True)'}), "(amber_system, off_system, systems_labels=('AMBER',\n 'SMIRNOFF'), ignore_charges=True)\n", (88069, 88158), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((88231, 88317), 'openforcefield.tests.utils.compare_system_energies', 'compare_system_energies', (['amber_system', 'off_system', 'positions'], {'ignore_charges': '(True)'}), '(amber_system, off_system, positions, ignore_charges\n =True)\n', (88254, 88317), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((89133, 89188), 'openforcefield.tests.utils.get_freesolv_file_path', 'get_freesolv_file_path', (['freesolv_id', 'forcefield_version'], {}), '(freesolv_id, forcefield_version)\n', (89155, 89188), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((89235, 89321), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['mol2_file_path'], {'allow_undefined_stereo': 'allow_undefined_stereo'}), '(mol2_file_path, allow_undefined_stereo=\n allow_undefined_stereo)\n', (89253, 89321), False, 'from openforcefield.topology import Molecule, Topology\n'), ((89493, 89563), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['forcefield_file_path', '"""test_forcefields/old/hbonds.offxml"""'], {}), "(forcefield_file_path, 'test_forcefields/old/hbonds.offxml')\n", (89503, 89563), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((90099, 90258), 'openforcefield.tests.utils.compare_system_parameters', 'compare_system_parameters', (['ff_system', 'xml_system'], {'systems_labels': "('current OpenFF', 'SMIRNOFF 0.0.4')", 'ignore_charges': '(True)', 'ignore_improper_folds': '(True)'}), "(ff_system, xml_system, systems_labels=(\n 'current OpenFF', 'SMIRNOFF 0.0.4'), ignore_charges=True,\n ignore_improper_folds=True)\n", (90124, 90258), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((91516, 91571), 'openforcefield.tests.utils.get_freesolv_file_path', 'get_freesolv_file_path', (['freesolv_id', 'forcefield_version'], {}), '(freesolv_id, forcefield_version)\n', (91538, 91571), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((91618, 91704), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['mol2_file_path'], {'allow_undefined_stereo': 'allow_undefined_stereo'}), '(mol2_file_path, allow_undefined_stereo=\n allow_undefined_stereo)\n', (91636, 91704), False, 'from openforcefield.topology import Molecule, Topology\n'), ((92226, 92303), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'off_gbsas[gbsa_model]'], {}), "('test_forcefields/smirnoff99Frosst.offxml', off_gbsas[gbsa_model])\n", (92236, 92303), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((92848, 92908), 'parmed.openmm.load_topology', 'pmd.openmm.load_topology', (['omm_top', 'off_omm_system', 'positions'], {}), '(omm_top, off_omm_system, positions)\n', (92872, 92908), True, 'import parmed as pmd\n'), ((92931, 92967), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".prmtop"""'}), "(suffix='.prmtop')\n", (92949, 92967), False, 'from tempfile import NamedTemporaryFile\n'), ((92990, 93026), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".inpcrd"""'}), "(suffix='.inpcrd')\n", (93008, 93026), False, 'from tempfile import NamedTemporaryFile\n'), ((94122, 94343), 'openforcefield.tests.utils.create_system_from_amber', 'create_system_from_amber', (['prmtop_file.name', 'inpcrd_file.name'], {'implicitSolvent': 'openmm_gbsas[gbsa_model]', 'nonbondedMethod': 'amber_nb_method', 'nonbondedCutoff': 'amber_cutoff', 'gbsaModel': '"""ACE"""', 'implicitSolventKappa': '(0.0)'}), "(prmtop_file.name, inpcrd_file.name,\n implicitSolvent=openmm_gbsas[gbsa_model], nonbondedMethod=\n amber_nb_method, nonbondedCutoff=amber_cutoff, gbsaModel='ACE',\n implicitSolventKappa=0.0)\n", (94146, 94343), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((98015, 98063), 'simtk.openmm.VerletIntegrator', 'openmm.VerletIntegrator', (['(1.0 * unit.femtoseconds)'], {}), '(1.0 * unit.femtoseconds)\n', (98038, 98063), False, 'from simtk import openmm\n'), ((98083, 98122), 'simtk.openmm.Platform.getPlatformByName', 'Platform.getPlatformByName', (['"""Reference"""'], {}), "('Reference')\n", (98109, 98122), False, 'from simtk.openmm import Platform\n'), ((98147, 98201), 'simtk.openmm.Context', 'openmm.Context', (['amber_omm_system', 'integrator', 'platform'], {}), '(amber_omm_system, integrator, platform)\n', (98161, 98201), False, 'from simtk import openmm\n'), ((98347, 98401), 'openforcefield.tests.utils.get_context_potential_energy', 'get_context_potential_energy', (['amber_context', 'positions'], {}), '(amber_context, positions)\n', (98375, 98401), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((98423, 98475), 'openforcefield.tests.utils.get_context_potential_energy', 'get_context_potential_energy', (['off_context', 'positions'], {}), '(off_context, positions)\n', (98451, 98475), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((99032, 99125), 'openforcefield.tests.utils.compare_system_energies', 'compare_system_energies', (['off_omm_system', 'amber_omm_system', 'positions'], {'by_force_type': '(False)'}), '(off_omm_system, amber_omm_system, positions,\n by_force_type=False)\n', (99055, 99125), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((100584, 100674), 'numpy.concatenate', 'np.concatenate', (['(molecule.conformers[0], molecule.conformers[0] + 10 * unit.angstrom)'], {}), '((molecule.conformers[0], molecule.conformers[0] + 10 * unit.\n angstrom))\n', (100598, 100674), True, 'import numpy as np\n'), ((100978, 101055), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""test_forcefields/smirnoff99Frosst.offxml"""', 'off_gbsas[gbsa_model]'], {}), "('test_forcefields/smirnoff99Frosst.offxml', off_gbsas[gbsa_model])\n", (100988, 101055), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((101155, 101200), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['[molecule, molecule]'], {}), '([molecule, molecule])\n', (101178, 101200), False, 'from openforcefield.topology import Molecule, Topology\n'), ((101353, 101413), 'parmed.openmm.load_topology', 'pmd.openmm.load_topology', (['omm_top', 'off_omm_system', 'positions'], {}), '(omm_top, off_omm_system, positions)\n', (101377, 101413), True, 'import parmed as pmd\n'), ((101436, 101472), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".prmtop"""'}), "(suffix='.prmtop')\n", (101454, 101472), False, 'from tempfile import NamedTemporaryFile\n'), ((101495, 101531), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".inpcrd"""'}), "(suffix='.inpcrd')\n", (101513, 101531), False, 'from tempfile import NamedTemporaryFile\n'), ((101905, 102132), 'openforcefield.tests.utils.create_system_from_amber', 'create_system_from_amber', (['prmtop_file.name', 'inpcrd_file.name'], {'implicitSolvent': 'openmm_gbsas[gbsa_model]', 'nonbondedMethod': 'openmm.app.forcefield.NoCutoff', 'nonbondedCutoff': 'None', 'gbsaModel': 'None', 'implicitSolventKappa': '(0.0)'}), '(prmtop_file.name, inpcrd_file.name,\n implicitSolvent=openmm_gbsas[gbsa_model], nonbondedMethod=openmm.app.\n forcefield.NoCutoff, nonbondedCutoff=None, gbsaModel=None,\n implicitSolventKappa=0.0)\n', (101929, 102132), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((105371, 105419), 'simtk.openmm.VerletIntegrator', 'openmm.VerletIntegrator', (['(1.0 * unit.femtoseconds)'], {}), '(1.0 * unit.femtoseconds)\n', (105394, 105419), False, 'from simtk import openmm\n'), ((105439, 105478), 'simtk.openmm.Platform.getPlatformByName', 'Platform.getPlatformByName', (['"""Reference"""'], {}), "('Reference')\n", (105465, 105478), False, 'from simtk.openmm import Platform\n'), ((105503, 105557), 'simtk.openmm.Context', 'openmm.Context', (['amber_omm_system', 'integrator', 'platform'], {}), '(amber_omm_system, integrator, platform)\n', (105517, 105557), False, 'from simtk import openmm\n'), ((105703, 105757), 'openforcefield.tests.utils.get_context_potential_energy', 'get_context_potential_energy', (['amber_context', 'positions'], {}), '(amber_context, positions)\n', (105731, 105757), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((105779, 105831), 'openforcefield.tests.utils.get_context_potential_energy', 'get_context_potential_energy', (['off_context', 'positions'], {}), '(off_context, positions)\n', (105807, 105831), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((106672, 106765), 'openforcefield.tests.utils.compare_system_energies', 'compare_system_energies', (['off_omm_system', 'amber_omm_system', 'positions'], {'by_force_type': '(False)'}), '(off_omm_system, amber_omm_system, positions,\n by_force_type=False)\n', (106695, 106765), False, 'from openforcefield.tests.utils import compare_system_energies, create_system_from_amber, get_context_potential_energy\n'), ((108049, 108104), 'openforcefield.tests.utils.get_freesolv_file_path', 'get_freesolv_file_path', (['freesolv_id', 'forcefield_version'], {}), '(freesolv_id, forcefield_version)\n', (108071, 108104), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((108151, 108237), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['mol2_file_path'], {'allow_undefined_stereo': 'allow_undefined_stereo'}), '(mol2_file_path, allow_undefined_stereo=\n allow_undefined_stereo)\n', (108169, 108237), False, 'from openforcefield.topology import Molecule, Topology\n'), ((108501, 108571), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['forcefield_file_path', '"""test_forcefields/old/hbonds.offxml"""'], {}), "(forcefield_file_path, 'test_forcefields/old/hbonds.offxml')\n", (108511, 108571), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((109107, 109266), 'openforcefield.tests.utils.compare_system_parameters', 'compare_system_parameters', (['ff_system', 'xml_system'], {'systems_labels': "('current OpenFF', 'SMIRNOFF 0.0.4')", 'ignore_charges': '(True)', 'ignore_improper_folds': '(True)'}), "(ff_system, xml_system, systems_labels=(\n 'current OpenFF', 'SMIRNOFF 0.0.4'), ignore_charges=True,\n ignore_improper_folds=True)\n", (109132, 109266), False, 'from openforcefield.tests.utils import get_freesolv_file_path, compare_system_parameters\n'), ((24692, 24751), 'openforcefield.utils.toolkits.ToolkitRegistry', 'ToolkitRegistry', ([], {'toolkit_precedence': '[OpenEyeToolkitWrapper]'}), '(toolkit_precedence=[OpenEyeToolkitWrapper])\n', (24707, 24751), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((24874, 24961), 'openforcefield.utils.toolkits.ToolkitRegistry', 'ToolkitRegistry', ([], {'toolkit_precedence': '[RDKitToolkitWrapper, AmberToolsToolkitWrapper]'}), '(toolkit_precedence=[RDKitToolkitWrapper,\n AmberToolsToolkitWrapper])\n', (24889, 24961), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((25457, 25480), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (25470, 25480), False, 'import pytest\n'), ((26030, 26053), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (26043, 26053), False, 'import pytest\n'), ((32307, 32406), 'pytest.raises', 'pytest.raises', (['SMIRNOFFSpecError'], {'match': '"""Unexpected kwarg [(]parameters: k, length[)] passed"""'}), "(SMIRNOFFSpecError, match=\n 'Unexpected kwarg [(]parameters: k, length[)] passed')\n", (32320, 32406), False, 'import pytest\n'), ((32439, 32477), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements'], {}), '(xml_ff_w_cosmetic_elements)\n', (32449, 32477), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((33003, 33102), 'pytest.raises', 'pytest.raises', (['SMIRNOFFSpecError'], {'match': '"""Unexpected kwarg [(]parameters: k, length[)] passed"""'}), "(SMIRNOFFSpecError, match=\n 'Unexpected kwarg [(]parameters: k, length[)] passed')\n", (33016, 33102), False, 'import pytest\n'), ((33135, 33188), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['string_1'], {'allow_cosmetic_attributes': '(False)'}), '(string_1, allow_cosmetic_attributes=False)\n', (33145, 33188), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((34828, 34851), 'openforcefield.typing.engines.smirnoff.XMLParameterIOHandler', 'XMLParameterIOHandler', ([], {}), '()\n', (34849, 34851), False, 'from openforcefield.typing.engines.smirnoff import XMLParameterIOHandler\n'), ((36536, 36635), 'pytest.raises', 'pytest.raises', (['SMIRNOFFSpecError'], {'match': '"""Unexpected kwarg [(]parameters: k, length[)] passed"""'}), "(SMIRNOFFSpecError, match=\n 'Unexpected kwarg [(]parameters: k, length[)] passed')\n", (36549, 36635), False, 'import pytest\n'), ((36668, 36706), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['xml_ff_w_cosmetic_elements'], {}), '(xml_ff_w_cosmetic_elements)\n', (36678, 36706), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((37271, 37370), 'pytest.raises', 'pytest.raises', (['SMIRNOFFSpecError'], {'match': '"""Unexpected kwarg [(]parameters: k, length[)] passed"""'}), "(SMIRNOFFSpecError, match=\n 'Unexpected kwarg [(]parameters: k, length[)] passed')\n", (37284, 37370), False, 'import pytest\n'), ((37403, 37460), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['iofile1.name'], {'allow_cosmetic_attributes': '(False)'}), '(iofile1.name, allow_cosmetic_attributes=False)\n', (37413, 37460), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((35879, 35902), 'openforcefield.typing.engines.smirnoff.XMLParameterIOHandler', 'XMLParameterIOHandler', ([], {}), '()\n', (35900, 35902), False, 'from openforcefield.typing.engines.smirnoff import XMLParameterIOHandler\n'), ((38613, 38788), 'pytest.raises', 'pytest.raises', (['SMIRNOFFSpecError'], {'match': '"""Missing version while trying to construct <class \'openforcefield.typing.engines.smirnoff.parameters.ToolkitAM1BCCHandler\'>."""'}), '(SMIRNOFFSpecError, match=\n "Missing version while trying to construct <class \'openforcefield.typing.engines.smirnoff.parameters.ToolkitAM1BCCHandler\'>."\n )\n', (38626, 38788), False, 'import pytest\n'), ((38918, 39068), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['"""<?xml version="1.0" encoding="ASCII"?><SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL"> <ToolkitAM1BCC/></SMIRNOFF>"""'], {}), '(\n \'<?xml version="1.0" encoding="ASCII"?><SMIRNOFF version="0.3" aromaticity_model="OEAroModel_MDL"> <ToolkitAM1BCC/></SMIRNOFF>\'\n )\n', (38928, 39068), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((40918, 41017), 'pytest.raises', 'pytest.raises', (['IncompatibleParameterError'], {'match': '"""handler value: 0.5, incompatible value: 1.0"""'}), "(IncompatibleParameterError, match=\n 'handler value: 0.5, incompatible value: 1.0')\n", (40931, 41017), False, 'import pytest\n'), ((41042, 41087), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['simple_xml_ff', 'nonstandard_xml_ff'], {}), '(simple_xml_ff, nonstandard_xml_ff)\n', (41052, 41087), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((42100, 42156), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (42118, 42156), False, 'from openforcefield.utils import get_data_file_path\n'), ((44095, 44151), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (44113, 44151), False, 'from openforcefield.utils import get_data_file_path\n'), ((44291, 44446), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Unable to resolve order in which to run ParameterHandlers. Dependencies do not form a directed acyclic graph"""'}), "(RuntimeError, match=\n 'Unable to resolve order in which to run ParameterHandlers. Dependencies do not form a directed acyclic graph'\n )\n", (44304, 44446), False, 'import pytest\n'), ((45203, 45259), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (45221, 45259), False, 'from openforcefield.utils import get_data_file_path\n'), ((45399, 45602), 'pytest.raises', 'pytest.raises', (['UnassignedProperTorsionParameterException'], {'match': '"""- Topology indices [(]5, 0, 1, 6[)]: names and elements [(](H\\\\d+)? H[)], [(](C\\\\d+)? C[)], [(](C\\\\d+)? C[)], [(](H\\\\d+)? H[)],"""'}), "(UnassignedProperTorsionParameterException, match=\n '- Topology indices [(]5, 0, 1, 6[)]: names and elements [(](H\\\\d+)? H[)], [(](C\\\\d+)? C[)], [(](C\\\\d+)? C[)], [(](H\\\\d+)? H[)],'\n )\n", (45412, 45602), False, 'import pytest\n'), ((46159, 46229), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_cyclohexane_1_ethanol.pdb"""'], {}), "('systems/test_systems/1_cyclohexane_1_ethanol.pdb')\n", (46177, 46229), False, 'from openforcefield.utils import get_data_file_path\n'), ((47128, 47198), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_cyclohexane_1_ethanol.pdb"""'], {}), "('systems/test_systems/1_cyclohexane_1_ethanol.pdb')\n", (47146, 47198), False, 'from openforcefield.utils import get_data_file_path\n'), ((48269, 48314), 'os.path.join', 'os.path.join', (['"""systems"""', '"""packmol_boxes"""', 'box'], {}), "('systems', 'packmol_boxes', box)\n", (48281, 48314), False, 'import os\n'), ((48584, 48612), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['sdf_file'], {}), '(sdf_file)\n', (48602, 48612), False, 'from openforcefield.topology import Molecule, Topology\n'), ((49521, 49577), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (49539, 49577), False, 'from openforcefield.utils import get_data_file_path\n'), ((48918, 48954), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (48952, 48954), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((51463, 51519), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (51481, 51519), False, 'from openforcefield.utils import get_data_file_path\n'), ((50838, 50872), 'openforcefield.utils.toolkits.RDKitToolkitWrapper.is_available', 'RDKitToolkitWrapper.is_available', ([], {}), '()\n', (50870, 50872), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((53176, 53232), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (53194, 53232), False, 'from openforcefield.utils import get_data_file_path\n'), ((54063, 54119), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (54081, 54119), False, 'from openforcefield.utils import get_data_file_path\n'), ((54169, 54196), 'openforcefield.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['"""CCO"""'], {}), "('CCO')\n", (54189, 54196), False, 'from openforcefield.topology import Molecule, Topology\n'), ((54298, 54402), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".* not used by any registered force Handler: {\'invalid_kwarg\'}.*"""'}), '(ValueError, match=\n ".* not used by any registered force Handler: {\'invalid_kwarg\'}.*")\n', (54311, 54402), False, 'import pytest\n'), ((55323, 55379), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (55341, 55379), False, 'from openforcefield.utils import get_data_file_path\n'), ((57337, 57393), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (57355, 57393), False, 'from openforcefield.utils import get_data_file_path\n'), ((58507, 58573), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol_reordered.pdb"""'], {}), "('systems/test_systems/1_ethanol_reordered.pdb')\n", (58525, 58573), False, 'from openforcefield.utils import get_data_file_path\n'), ((60194, 60250), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_ethanol.pdb"""'], {}), "('systems/test_systems/1_ethanol.pdb')\n", (60212, 60250), False, 'from openforcefield.utils import get_data_file_path\n'), ((60450, 60561), 'pytest.raises', 'pytest.raises', (['NonintegralMoleculeChargeException'], {'match': '"""Partial charge sum [(]1.40001 e[)] for molecule"""'}), "(NonintegralMoleculeChargeException, match=\n 'Partial charge sum [(]1.40001 e[)] for molecule')\n", (60463, 60561), False, 'import pytest\n'), ((61892, 61962), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""systems/test_systems/1_cyclohexane_1_ethanol.pdb"""'], {}), "('systems/test_systems/1_cyclohexane_1_ethanol.pdb')\n", (61910, 61962), False, 'from openforcefield.utils import get_data_file_path\n'), ((69831, 69941), 'numpy.array', 'np.array', (['[-0.2, -0.2, -0.2, -0.2, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,\n 0.1, 0.1, 0.1, 0.1]'], {}), '([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,\n 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n', (69839, 69941), True, 'import numpy as np\n'), ((70355, 70375), 'numpy.array', 'np.array', (['([0.0] * 15)'], {}), '([0.0] * 15)\n', (70363, 70375), True, 'import numpy as np\n'), ((70568, 70589), 'numpy.array', 'np.array', (['([99.0] * 11)'], {}), '([99.0] * 11)\n', (70576, 70589), True, 'import numpy as np\n'), ((70777, 70797), 'numpy.array', 'np.array', (['([99.0] * 3)'], {}), '([99.0] * 3)\n', (70785, 70797), True, 'import numpy as np\n'), ((76405, 76521), 'pytest.raises', 'pytest.raises', (['UnassignedMoleculeChargeException'], {'match': '"""did not have charges assigned by any ParameterHandler"""'}), "(UnassignedMoleculeChargeException, match=\n 'did not have charges assigned by any ParameterHandler')\n", (76418, 76521), False, 'import pytest\n'), ((78078, 78143), 'numpy.isclose', 'np.isclose', (['(distance / unit.angstrom)', '(bond_length / unit.angstrom)'], {}), '(distance / unit.angstrom, bond_length / unit.angstrom)\n', (78088, 78143), True, 'import numpy as np\n'), ((83172, 83208), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (83206, 83208), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((86231, 86296), 'openforcefield.tests.utils.get_alkethoh_file_path', 'get_alkethoh_file_path', (["('AlkEthOH_' + alkethoh_id)"], {'get_amber': '(True)'}), "('AlkEthOH_' + alkethoh_id, get_amber=True)\n", (86253, 86296), False, 'from openforcefield.tests.utils import get_alkethoh_file_path, compare_system_parameters, compare_system_energies\n'), ((86401, 86445), 'parmed.load_file', 'parmed.load_file', (['top_filepath', 'crd_filepath'], {}), '(top_filepath, crd_filepath)\n', (86417, 86445), False, 'import parmed\n'), ((85159, 85195), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (85193, 85195), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((88375, 88411), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (88409, 88411), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((95574, 95652), 'simtk.openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (95643, 95652), False, 'from simtk import openmm\n'), ((98255, 98280), 'copy.deepcopy', 'copy.deepcopy', (['integrator'], {}), '(integrator)\n', (98268, 98280), False, 'import copy\n'), ((90349, 90385), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (90383, 90385), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((100097, 100173), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/FreeSolv/mol2files_sybyl/mobley_1036761.mol2"""'], {}), "('molecules/FreeSolv/mol2files_sybyl/mobley_1036761.mol2')\n", (100115, 100173), False, 'from openforcefield.utils import get_data_file_path\n'), ((103365, 103443), 'simtk.openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAHCTForce.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (103434, 103443), False, 'from simtk import openmm\n'), ((105611, 105636), 'copy.deepcopy', 'copy.deepcopy', (['integrator'], {}), '(integrator)\n', (105624, 105636), False, 'import copy\n'), ((99151, 99187), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (99185, 99187), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((106829, 106865), 'openforcefield.utils.toolkits.OpenEyeToolkitWrapper.is_available', 'OpenEyeToolkitWrapper.is_available', ([], {}), '()\n', (106863, 106865), False, 'from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, ToolkitRegistry\n'), ((48490, 48540), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', "(name + '.sdf')"], {}), "('systems', 'monomers', name + '.sdf')\n", (48502, 48540), False, 'import os\n'), ((49679, 49722), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (49697, 49722), False, 'from openforcefield.utils import get_data_file_path\n'), ((50159, 50212), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol_reordered.sdf"""'], {}), "('molecules/ethanol_reordered.sdf')\n", (50177, 50212), False, 'from openforcefield.utils import get_data_file_path\n'), ((51622, 51665), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (51640, 51665), False, 'from openforcefield.utils import get_data_file_path\n'), ((52104, 52157), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol_reordered.sdf"""'], {}), "('molecules/ethanol_reordered.sdf')\n", (52122, 52157), False, 'from openforcefield.utils import get_data_file_path\n'), ((53324, 53348), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['name'], {}), '(name)\n', (53342, 53348), False, 'from openforcefield.utils import get_data_file_path\n'), ((56307, 56354), 'pytest.raises', 'pytest.raises', (['exception'], {'match': 'exception_match'}), '(exception, match=exception_match)\n', (56320, 56354), False, 'import pytest\n'), ((63341, 63389), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""water.sdf"""'], {}), "('systems', 'monomers', 'water.sdf')\n", (63353, 63389), False, 'import os\n'), ((64941, 64989), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""water.sdf"""'], {}), "('systems', 'monomers', 'water.sdf')\n", (64953, 64989), False, 'import os\n'), ((66409, 66457), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""water.sdf"""'], {}), "('systems', 'monomers', 'water.sdf')\n", (66421, 66457), False, 'import os\n'), ((67865, 67908), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (67883, 67908), False, 'from openforcefield.utils import get_data_file_path\n'), ((67951, 68004), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol_reordered.sdf"""'], {}), "('molecules/ethanol_reordered.sdf')\n", (67969, 68004), False, 'from openforcefield.utils import get_data_file_path\n'), ((69116, 69170), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""cyclohexane.sdf"""'], {}), "('systems', 'monomers', 'cyclohexane.sdf')\n", (69128, 69170), False, 'import os\n'), ((69228, 69278), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""butanol.sdf"""'], {}), "('systems', 'monomers', 'butanol.sdf')\n", (69240, 69278), False, 'import os\n'), ((69337, 69387), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""propane.sdf"""'], {}), "('systems', 'monomers', 'propane.sdf')\n", (69349, 69387), False, 'import os\n'), ((69444, 69492), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""water.sdf"""'], {}), "('systems', 'monomers', 'water.sdf')\n", (69456, 69492), False, 'import os\n'), ((69551, 69601), 'os.path.join', 'os.path.join', (['"""systems"""', '"""monomers"""', '"""ethanol.sdf"""'], {}), "('systems', 'monomers', 'ethanol.sdf')\n", (69563, 69601), False, 'import os\n'), ((73619, 73662), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (73637, 73662), False, 'from openforcefield.utils import get_data_file_path\n'), ((73705, 73758), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol_reordered.sdf"""'], {}), "('molecules/ethanol_reordered.sdf')\n", (73723, 73758), False, 'from openforcefield.utils import get_data_file_path\n'), ((74855, 74898), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (74873, 74898), False, 'from openforcefield.utils import get_data_file_path\n'), ((74941, 74994), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol_reordered.sdf"""'], {}), "('molecules/ethanol_reordered.sdf')\n", (74959, 74994), False, 'from openforcefield.utils import get_data_file_path\n'), ((75964, 76007), 'openforcefield.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/toluene.sdf"""'], {}), "('molecules/toluene.sdf')\n", (75982, 76007), False, 'from openforcefield.utils import get_data_file_path\n'), ((82104, 82130), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (82119, 82130), False, 'import os\n'), ((86341, 86374), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['mol2_filepath'], {}), '(mol2_filepath)\n', (86359, 86374), False, 'from openforcefield.topology import Molecule, Topology\n'), ((87390, 87415), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (87398, 87415), True, 'import numpy as np\n'), ((87444, 87469), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (87452, 87469), True, 'import numpy as np\n'), ((87498, 87523), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (87506, 87523), True, 'import numpy as np\n'), ((95712, 95791), 'simtk.openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (95782, 95791), False, 'from simtk import openmm\n'), ((100333, 100359), 'numpy.zeros', 'np.zeros', (['molecule.n_atoms'], {}), '(molecule.n_atoms)\n', (100341, 100359), True, 'import numpy as np\n'), ((103503, 103582), 'simtk.openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAOBC1Force.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (103573, 103582), False, 'from simtk import openmm\n'), ((79583, 79610), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (79599, 79610), False, 'import os\n'), ((82015, 82042), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (82031, 82042), False, 'import os\n'), ((95851, 95930), 'simtk.openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (95921, 95930), False, 'from simtk import openmm\n'), ((103642, 103721), 'simtk.openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters', 'openmm.app.internal.customgbforces.GBSAOBC2Force.getStandardParameters', (['omm_top'], {}), '(omm_top)\n', (103712, 103721), False, 'from simtk import openmm\n')] |
#!/usr/bin/env python
# ==============================================================================
# MIT License
#
# Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import importlib
import os
import sys
from datetime import datetime
import numpy as np
import cv2
import tensorflow as tf
import configargparse
import utils
# parse parameters from config file or CLI
parser = configargparse.ArgParser()
parser.add("-c", "--config", is_config_file=True, help="config file")
parser.add("-it", "--input-training", type=str, required=True, nargs="+", help="directory/directories of input samples for training")
parser.add("-lt", "--label-training", type=str, required=True, help="directory of label samples for training")
parser.add("-nt", "--max-samples-training", type=int, default=None, help="maximum number of training samples")
parser.add("-iv", "--input-validation", type=str, required=True, nargs="+", help="directory/directories of input samples for validation")
parser.add("-lv", "--label-validation", type=str, required=True, help="directory of label samples for validation")
parser.add("-nv", "--max-samples-validation", type=int, default=None, help="maximum number of validation samples")
parser.add("-is", "--image-shape", type=int, required=True, nargs=2, help="image dimensions (HxW) of inputs and labels for network")
parser.add("-ohi", "--one-hot-palette-input", type=str, required=True, help="xml-file for one-hot-conversion of input images")
parser.add("-ohl", "--one-hot-palette-label", type=str, required=True, help="xml-file for one-hot-conversion of label images")
parser.add("-m", "--model", type=str, required=True, help="Python file defining the neural network")
parser.add("-uh", "--unetxst-homographies", type=str, default=None, help="Python file defining a list H of homographies to be used in uNetXST model")
parser.add("-e", "--epochs", type=int, required=True, help="number of epochs for training")
parser.add("-bs", "--batch-size", type=int, required=True, help="batch size for training")
parser.add("-lr", "--learning-rate", type=float, default=1e-4, help="learning rate of Adam optimizer for training")
parser.add("-lw", "--loss-weights", type=float, default=None, nargs="+", help="factors for weighting classes differently in loss function")
parser.add("-esp", "--early-stopping-patience", type=int, default=10, help="patience for early-stopping due to converged validation mIoU")
parser.add("-si", "--save-interval", type=int, default=5, help="epoch interval between exports of the model")
parser.add("-o", "--output-dir", type=str, required=True, help="output dir for TensorBoard and models")
parser.add("-mw", "--model-weights", type=str, default=None, help="weights file of trained model for training continuation")
conf, unknown = parser.parse_known_args()
# determine absolute filepaths
conf.input_training = [utils.abspath(path) for path in conf.input_training]
conf.label_training = utils.abspath(conf.label_training)
conf.input_validation = [utils.abspath(path) for path in conf.input_validation]
conf.label_validation = utils.abspath(conf.label_validation)
conf.one_hot_palette_input = utils.abspath(conf.one_hot_palette_input)
conf.one_hot_palette_label = utils.abspath(conf.one_hot_palette_label)
conf.model = utils.abspath(conf.model)
conf.unetxst_homographies = utils.abspath(conf.unetxst_homographies) if conf.unetxst_homographies is not None else conf.unetxst_homographies
conf.model_weights = utils.abspath(conf.model_weights) if conf.model_weights is not None else conf.model_weights
conf.output_dir = utils.abspath(conf.output_dir)
# load network architecture module
architecture = utils.load_module(conf.model)
# get max_samples_training random training samples
n_inputs = len(conf.input_training)
files_train_input = [utils.get_files_in_folder(folder) for folder in conf.input_training]
files_train_label = utils.get_files_in_folder(conf.label_training)
_, idcs = utils.sample_list(files_train_label, n_samples=conf.max_samples_training)
files_train_input = [np.take(f, idcs) for f in files_train_input]
files_train_label = np.take(files_train_label, idcs)
image_shape_original_input = utils.load_image(files_train_input[0][0]).shape[0:2]
image_shape_original_label = utils.load_image(files_train_label[0]).shape[0:2]
print(f"Found {len(files_train_label)} training samples")
# get max_samples_validation random validation samples
files_valid_input = [utils.get_files_in_folder(folder) for folder in conf.input_validation]
files_valid_label = utils.get_files_in_folder(conf.label_validation)
_, idcs = utils.sample_list(files_valid_label, n_samples=conf.max_samples_validation)
files_valid_input = [np.take(f, idcs) for f in files_valid_input]
files_valid_label = np.take(files_valid_label, idcs)
print(f"Found {len(files_valid_label)} validation samples")
# parse one-hot-conversion.xml
conf.one_hot_palette_input = utils.parse_convert_xml(conf.one_hot_palette_input)
conf.one_hot_palette_label = utils.parse_convert_xml(conf.one_hot_palette_label)
n_classes_input = len(conf.one_hot_palette_input)
n_classes_label = len(conf.one_hot_palette_label)
# build dataset pipeline parsing functions
def parse_sample(input_files, label_file):
# parse and process input images
inputs = []
for inp in input_files:
inp = utils.load_image_op(inp)
inp = utils.resize_image_op(inp, image_shape_original_input, conf.image_shape, interpolation=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
inp = utils.one_hot_encode_image_op(inp, conf.one_hot_palette_input)
inputs.append(inp)
inputs = inputs[0] if n_inputs == 1 else tuple(inputs)
# parse and process label image
label = utils.load_image_op(label_file)
label = utils.resize_image_op(label, image_shape_original_label, conf.image_shape, interpolation=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
label = utils.one_hot_encode_image_op(label, conf.one_hot_palette_label)
return inputs, label
# build training data pipeline
dataTrain = tf.data.Dataset.from_tensor_slices((tuple(files_train_input), files_train_label))
dataTrain = dataTrain.shuffle(buffer_size=conf.max_samples_training, reshuffle_each_iteration=True)
dataTrain = dataTrain.map(parse_sample, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataTrain = dataTrain.batch(conf.batch_size, drop_remainder=True)
dataTrain = dataTrain.repeat(conf.epochs)
dataTrain = dataTrain.prefetch(1)
print("Built data pipeline for training")
# build validation data pipeline
dataValid = tf.data.Dataset.from_tensor_slices((tuple(files_valid_input), files_valid_label))
dataValid = dataValid.map(parse_sample, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataValid = dataValid.batch(1)
dataValid = dataValid.repeat(conf.epochs)
dataValid = dataValid.prefetch(1)
print("Built data pipeline for validation")
# build model
if conf.unetxst_homographies is not None:
uNetXSTHomographies = utils.load_module(conf.unetxst_homographies)
model = architecture.get_network((conf.image_shape[0], conf.image_shape[1], n_classes_input), n_classes_label, n_inputs=n_inputs, thetas=uNetXSTHomographies.H)
else:
model = architecture.get_network((conf.image_shape[0], conf.image_shape[1], n_classes_input), n_classes_label)
if conf.model_weights is not None:
model.load_weights(conf.model_weights)
optimizer = tf.keras.optimizers.Adam(learning_rate=conf.learning_rate)
if conf.loss_weights is not None:
loss = utils.weighted_categorical_crossentropy(conf.loss_weights)
else:
loss = tf.keras.losses.CategoricalCrossentropy()
metrics = [tf.keras.metrics.CategoricalAccuracy(), utils.MeanIoUWithOneHotLabels(num_classes=n_classes_label)]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
print(f"Compiled model {os.path.basename(conf.model)}")
# create output directories
model_output_dir = os.path.join(conf.output_dir, datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
tensorboard_dir = os.path.join(model_output_dir, "TensorBoard")
checkpoint_dir = os.path.join(model_output_dir, "Checkpoints")
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# create callbacks to be called after each epoch
tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_dir, update_freq="epoch", profile_batch=0)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(os.path.join(checkpoint_dir, "e{epoch:03d}_weights.hdf5"), period=conf.save_interval, save_weights_only=True)
best_checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(os.path.join(checkpoint_dir, "best_weights.hdf5"), save_best_only=True, monitor="val_mean_io_u_with_one_hot_labels", mode="max", save_weights_only=True)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor="val_mean_io_u_with_one_hot_labels", mode="max", patience=conf.early_stopping_patience, verbose=1)
callbacks = [tensorboard_cb, checkpoint_cb, best_checkpoint_cb, early_stopping_cb]
# start training
print("Starting training...")
n_batches_train = len(files_train_label) // conf.batch_size
n_batches_valid = len(files_valid_label)
model.fit(dataTrain,
epochs=conf.epochs, steps_per_epoch=n_batches_train,
validation_data=dataValid, validation_freq=1, validation_steps=n_batches_valid,
callbacks=callbacks)
| [
"utils.parse_convert_xml",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.losses.CategoricalCrossentropy",
"os.path.exists",
"utils.load_module",
"numpy.take",
"utils.resize_image_op",
"utils.load_image_op",
"utils.sample_list",
"utils.get_files_in_folder",
"tensorflow.keras.callb... | [((1552, 1578), 'configargparse.ArgParser', 'configargparse.ArgParser', ([], {}), '()\n', (1576, 1578), False, 'import configargparse\n'), ((4465, 4499), 'utils.abspath', 'utils.abspath', (['conf.label_training'], {}), '(conf.label_training)\n', (4478, 4499), False, 'import utils\n'), ((4616, 4652), 'utils.abspath', 'utils.abspath', (['conf.label_validation'], {}), '(conf.label_validation)\n', (4629, 4652), False, 'import utils\n'), ((4683, 4724), 'utils.abspath', 'utils.abspath', (['conf.one_hot_palette_input'], {}), '(conf.one_hot_palette_input)\n', (4696, 4724), False, 'import utils\n'), ((4755, 4796), 'utils.abspath', 'utils.abspath', (['conf.one_hot_palette_label'], {}), '(conf.one_hot_palette_label)\n', (4768, 4796), False, 'import utils\n'), ((4827, 4852), 'utils.abspath', 'utils.abspath', (['conf.model'], {}), '(conf.model)\n', (4840, 4852), False, 'import utils\n'), ((5148, 5178), 'utils.abspath', 'utils.abspath', (['conf.output_dir'], {}), '(conf.output_dir)\n', (5161, 5178), False, 'import utils\n'), ((5231, 5260), 'utils.load_module', 'utils.load_module', (['conf.model'], {}), '(conf.model)\n', (5248, 5260), False, 'import utils\n'), ((5460, 5506), 'utils.get_files_in_folder', 'utils.get_files_in_folder', (['conf.label_training'], {}), '(conf.label_training)\n', (5485, 5506), False, 'import utils\n'), ((5517, 5590), 'utils.sample_list', 'utils.sample_list', (['files_train_label'], {'n_samples': 'conf.max_samples_training'}), '(files_train_label, n_samples=conf.max_samples_training)\n', (5534, 5590), False, 'import utils\n'), ((5677, 5709), 'numpy.take', 'np.take', (['files_train_label', 'idcs'], {}), '(files_train_label, idcs)\n', (5684, 5709), True, 'import numpy as np\n'), ((6097, 6145), 'utils.get_files_in_folder', 'utils.get_files_in_folder', (['conf.label_validation'], {}), '(conf.label_validation)\n', (6122, 6145), False, 'import utils\n'), ((6156, 6231), 'utils.sample_list', 'utils.sample_list', (['files_valid_label'], {'n_samples': 'conf.max_samples_validation'}), '(files_valid_label, n_samples=conf.max_samples_validation)\n', (6173, 6231), False, 'import utils\n'), ((6318, 6350), 'numpy.take', 'np.take', (['files_valid_label', 'idcs'], {}), '(files_valid_label, idcs)\n', (6325, 6350), True, 'import numpy as np\n'), ((6473, 6524), 'utils.parse_convert_xml', 'utils.parse_convert_xml', (['conf.one_hot_palette_input'], {}), '(conf.one_hot_palette_input)\n', (6496, 6524), False, 'import utils\n'), ((6554, 6605), 'utils.parse_convert_xml', 'utils.parse_convert_xml', (['conf.one_hot_palette_label'], {}), '(conf.one_hot_palette_label)\n', (6577, 6605), False, 'import utils\n'), ((8906, 8964), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'conf.learning_rate'}), '(learning_rate=conf.learning_rate)\n', (8930, 8964), True, 'import tensorflow as tf\n'), ((9501, 9546), 'os.path.join', 'os.path.join', (['model_output_dir', '"""TensorBoard"""'], {}), "(model_output_dir, 'TensorBoard')\n", (9513, 9546), False, 'import os\n'), ((9565, 9610), 'os.path.join', 'os.path.join', (['model_output_dir', '"""Checkpoints"""'], {}), "(model_output_dir, 'Checkpoints')\n", (9577, 9610), False, 'import os\n'), ((9828, 9917), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', (['tensorboard_dir'], {'update_freq': '"""epoch"""', 'profile_batch': '(0)'}), "(tensorboard_dir, update_freq='epoch',\n profile_batch=0)\n", (9858, 9917), True, 'import tensorflow as tf\n'), ((10313, 10462), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_mean_io_u_with_one_hot_labels"""', 'mode': '"""max"""', 'patience': 'conf.early_stopping_patience', 'verbose': '(1)'}), "(monitor=\n 'val_mean_io_u_with_one_hot_labels', mode='max', patience=conf.\n early_stopping_patience, verbose=1)\n", (10345, 10462), True, 'import tensorflow as tf\n'), ((4382, 4401), 'utils.abspath', 'utils.abspath', (['path'], {}), '(path)\n', (4395, 4401), False, 'import utils\n'), ((4531, 4550), 'utils.abspath', 'utils.abspath', (['path'], {}), '(path)\n', (4544, 4550), False, 'import utils\n'), ((4883, 4923), 'utils.abspath', 'utils.abspath', (['conf.unetxst_homographies'], {}), '(conf.unetxst_homographies)\n', (4896, 4923), False, 'import utils\n'), ((5026, 5059), 'utils.abspath', 'utils.abspath', (['conf.model_weights'], {}), '(conf.model_weights)\n', (5039, 5059), False, 'import utils\n'), ((5371, 5404), 'utils.get_files_in_folder', 'utils.get_files_in_folder', (['folder'], {}), '(folder)\n', (5396, 5404), False, 'import utils\n'), ((5612, 5628), 'numpy.take', 'np.take', (['f', 'idcs'], {}), '(f, idcs)\n', (5619, 5628), True, 'import numpy as np\n'), ((6006, 6039), 'utils.get_files_in_folder', 'utils.get_files_in_folder', (['folder'], {}), '(folder)\n', (6031, 6039), False, 'import utils\n'), ((6253, 6269), 'numpy.take', 'np.take', (['f', 'idcs'], {}), '(f, idcs)\n', (6260, 6269), True, 'import numpy as np\n'), ((7266, 7297), 'utils.load_image_op', 'utils.load_image_op', (['label_file'], {}), '(label_file)\n', (7285, 7297), False, 'import utils\n'), ((7310, 7442), 'utils.resize_image_op', 'utils.resize_image_op', (['label', 'image_shape_original_label', 'conf.image_shape'], {'interpolation': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(label, image_shape_original_label, conf.image_shape,\n interpolation=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n', (7331, 7442), False, 'import utils\n'), ((7451, 7515), 'utils.one_hot_encode_image_op', 'utils.one_hot_encode_image_op', (['label', 'conf.one_hot_palette_label'], {}), '(label, conf.one_hot_palette_label)\n', (7480, 7515), False, 'import utils\n'), ((8492, 8536), 'utils.load_module', 'utils.load_module', (['conf.unetxst_homographies'], {}), '(conf.unetxst_homographies)\n', (8509, 8536), False, 'import utils\n'), ((9010, 9068), 'utils.weighted_categorical_crossentropy', 'utils.weighted_categorical_crossentropy', (['conf.loss_weights'], {}), '(conf.loss_weights)\n', (9049, 9068), False, 'import utils\n'), ((9086, 9127), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (9125, 9127), True, 'import tensorflow as tf\n'), ((9139, 9177), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (9175, 9177), True, 'import tensorflow as tf\n'), ((9179, 9237), 'utils.MeanIoUWithOneHotLabels', 'utils.MeanIoUWithOneHotLabels', ([], {'num_classes': 'n_classes_label'}), '(num_classes=n_classes_label)\n', (9208, 9237), False, 'import utils\n'), ((9618, 9649), 'os.path.exists', 'os.path.exists', (['tensorboard_dir'], {}), '(tensorboard_dir)\n', (9632, 9649), False, 'import os\n'), ((9655, 9683), 'os.makedirs', 'os.makedirs', (['tensorboard_dir'], {}), '(tensorboard_dir)\n', (9666, 9683), False, 'import os\n'), ((9691, 9721), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9705, 9721), False, 'import os\n'), ((9727, 9754), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9738, 9754), False, 'import os\n'), ((9971, 10028), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""e{epoch:03d}_weights.hdf5"""'], {}), "(checkpoint_dir, 'e{epoch:03d}_weights.hdf5')\n", (9983, 10028), False, 'import os\n'), ((10138, 10187), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""best_weights.hdf5"""'], {}), "(checkpoint_dir, 'best_weights.hdf5')\n", (10150, 10187), False, 'import os\n'), ((5739, 5780), 'utils.load_image', 'utils.load_image', (['files_train_input[0][0]'], {}), '(files_train_input[0][0])\n', (5755, 5780), False, 'import utils\n'), ((5821, 5859), 'utils.load_image', 'utils.load_image', (['files_train_label[0]'], {}), '(files_train_label[0])\n', (5837, 5859), False, 'import utils\n'), ((6889, 6913), 'utils.load_image_op', 'utils.load_image_op', (['inp'], {}), '(inp)\n', (6908, 6913), False, 'import utils\n'), ((6928, 7058), 'utils.resize_image_op', 'utils.resize_image_op', (['inp', 'image_shape_original_input', 'conf.image_shape'], {'interpolation': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(inp, image_shape_original_input, conf.image_shape,\n interpolation=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n', (6949, 7058), False, 'import utils\n'), ((7069, 7131), 'utils.one_hot_encode_image_op', 'utils.one_hot_encode_image_op', (['inp', 'conf.one_hot_palette_input'], {}), '(inp, conf.one_hot_palette_input)\n', (7098, 7131), False, 'import utils\n'), ((9326, 9354), 'os.path.basename', 'os.path.basename', (['conf.model'], {}), '(conf.model)\n', (9342, 9354), False, 'import os\n'), ((9437, 9451), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9449, 9451), False, 'from datetime import datetime\n')] |
import argparse
import numpy as np
from BeesEtAl.MartinGaddy import MartinGaddy
from BeesEtAl.Schwefel import Schwefel
from BeesEtAl.Viennet import Viennet
parser = argparse.ArgumentParser(description="Simple tests for the Bees Algorithm.")
parser.add_argument('--function', help='Function to be optimised [Martin-Gaddy].', default='Martin-Gaddy', choices=['Martin-Gaddy', 'Schwefel', 'Viennet'])
parser.add_argument('--iterations', help='How many iterations to do [100].', default=100, type=int)
parser.add_argument('--neighborhood', help='Shape of neighborhood [gauss].', default='gauss', choices=['gauss', 'cube', 'ball', 'sphere'])
parser.add_argument('--fail-at', help='Abandon patch at specified number of failures [6].', default=6, type=int)
parser.add_argument('--non-dynamic', help='Do not update patch before all new bees have been evaluated.', action='store_true')
parser.add_argument('--history-out', help='Save expanded patch plot history to specified file.', default=None, dest='out', type=str)
parser.add_argument('--f3', help='Use F3 optimiser instead of BA.', action='store_true')
args = parser.parse_args()
test = args.function
save = args.out
hood = args.neighborhood
fail = args.fail_at
Nit = args.iterations
if args.non_dynamic:
bDynamic = False
else:
bDynamic = True
# These functions normalise the costs to the range 0-1 for the expanded patch plot BA_Plotter.history()
def MG_norm(cost):
return np.arctan(cost[0]) * 2 / np.pi
def Schwefel_norm(cost):
return np.arctan((cost[0] + 2513.9) / 2513.9) * 2 / np.pi
def Viennet_norm(cost):
return np.arctan(np.linalg.norm(cost - [0,15,-0.1])) * 2 / np.pi
# To set up the optimiser, first need to define the design variable ranges and choose patch priorities
# Plotting is optional, but if there are more than two variables, need to decide which to plot
if test == 'Martin-Gaddy':
minima, maxima = MartinGaddy.extents()
plotaxes = [0, 1]
if test == 'Schwefel':
minima, maxima = Schwefel.extents(2)
plotaxes = [5, 2]
if test == 'Viennet':
minima, maxima = Viennet.extents()
plotaxes = [0, 1]
if args.f3:
from BeesEtAl.F3_Garden import F3_Garden
from BeesEtAl.F3_Plotter import F3_Plotter
flies_bees = [2,6,2]
G = F3_Garden(minima, maxima, flies_bees)
P = F3_Plotter(G, plotaxes)
params = { 'neighborhood': hood }
else:
from BeesEtAl.BA_Garden import BA_Garden
from BeesEtAl.BA_Plotter import BA_Plotter
priorities = [5,2,2,1]
# priorities = [5,2,2,1]; # 3 active patches, one extra scout
# the last item is the number of extra scouts
# the other items are the number of bees in each elite patch
G = BA_Garden(minima, maxima, priorities)
P = BA_Plotter(G, plotaxes)
# initial radius, cooling factor, number of failures allowed, etc.
rf = 0.01 # smallest patch radius
r0, sf = G.initial_radius_and_shrinking(fail, rf, hood) # or set your own initial radius & shrinking factor
params = { 'radius': r0, 'shrink': sf, 'fail_at': fail, 'neighborhood': hood, 'dynamic': bDynamic }
G.set_search_params(**params)
# you can also specify that only a subset of the design variables are to
# be varied by the optimiser and that the rest should have default values:
#G.set_mask_and_defaults([1,0], [0,6])
# the cost function must subclass Base_Coster (which is very easy to do)
if test == 'Martin-Gaddy':
G.costfn = MartinGaddy(G)
norm_fn = MG_norm
if test == 'Schwefel':
G.costfn = Schwefel(G)
norm_fn = Schwefel_norm
if test == 'Viennet':
G.costfn = Viennet(G)
norm_fn = Viennet_norm
# ==== We're ready to optimise ====
for it in range(1, (Nit+1)):
solver_runs = G.iterate()
best_cost, best_X = G.best()
print('Iteration {:4d}: Global best = {c} @ {x}'.format(it, c=best_cost, x=best_X))
# ==== Plot the results ====
if not args.f3:
# first an expanded patch plot:
G.flush_history()
P.history((45, 315), 'blue', norm_fn)
if save is not None:
P.save(save)
# for multi-objective optimisation, i.e., when the cost is not a scalar, it's more interesting
# to look at the set of pareto-optimal solutions; you can choose two or three cost indices to plot
if test == 'Viennet':
P.pareto([0,1,2])
P.sync(10)
| [
"argparse.ArgumentParser",
"BeesEtAl.Viennet.Viennet",
"numpy.linalg.norm",
"BeesEtAl.BA_Garden.BA_Garden",
"BeesEtAl.MartinGaddy.MartinGaddy.extents",
"BeesEtAl.BA_Plotter.BA_Plotter",
"BeesEtAl.Viennet.Viennet.extents",
"BeesEtAl.MartinGaddy.MartinGaddy",
"BeesEtAl.Schwefel.Schwefel",
"BeesEtAl.... | [((183, 258), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple tests for the Bees Algorithm."""'}), "(description='Simple tests for the Bees Algorithm.')\n", (206, 258), False, 'import argparse\n'), ((2104, 2125), 'BeesEtAl.MartinGaddy.MartinGaddy.extents', 'MartinGaddy.extents', ([], {}), '()\n', (2123, 2125), False, 'from BeesEtAl.MartinGaddy import MartinGaddy\n'), ((2197, 2216), 'BeesEtAl.Schwefel.Schwefel.extents', 'Schwefel.extents', (['(2)'], {}), '(2)\n', (2213, 2216), False, 'from BeesEtAl.Schwefel import Schwefel\n'), ((2287, 2304), 'BeesEtAl.Viennet.Viennet.extents', 'Viennet.extents', ([], {}), '()\n', (2302, 2304), False, 'from BeesEtAl.Viennet import Viennet\n'), ((2479, 2516), 'BeesEtAl.F3_Garden.F3_Garden', 'F3_Garden', (['minima', 'maxima', 'flies_bees'], {}), '(minima, maxima, flies_bees)\n', (2488, 2516), False, 'from BeesEtAl.F3_Garden import F3_Garden\n'), ((2526, 2549), 'BeesEtAl.F3_Plotter.F3_Plotter', 'F3_Plotter', (['G', 'plotaxes'], {}), '(G, plotaxes)\n', (2536, 2549), False, 'from BeesEtAl.F3_Plotter import F3_Plotter\n'), ((2920, 2957), 'BeesEtAl.BA_Garden.BA_Garden', 'BA_Garden', (['minima', 'maxima', 'priorities'], {}), '(minima, maxima, priorities)\n', (2929, 2957), False, 'from BeesEtAl.BA_Garden import BA_Garden\n'), ((2967, 2990), 'BeesEtAl.BA_Plotter.BA_Plotter', 'BA_Plotter', (['G', 'plotaxes'], {}), '(G, plotaxes)\n', (2977, 2990), False, 'from BeesEtAl.BA_Plotter import BA_Plotter\n'), ((3676, 3690), 'BeesEtAl.MartinGaddy.MartinGaddy', 'MartinGaddy', (['G'], {}), '(G)\n', (3687, 3690), False, 'from BeesEtAl.MartinGaddy import MartinGaddy\n'), ((3756, 3767), 'BeesEtAl.Schwefel.Schwefel', 'Schwefel', (['G'], {}), '(G)\n', (3764, 3767), False, 'from BeesEtAl.Schwefel import Schwefel\n'), ((3838, 3848), 'BeesEtAl.Viennet.Viennet', 'Viennet', (['G'], {}), '(G)\n', (3845, 3848), False, 'from BeesEtAl.Viennet import Viennet\n'), ((1631, 1649), 'numpy.arctan', 'np.arctan', (['cost[0]'], {}), '(cost[0])\n', (1640, 1649), True, 'import numpy as np\n'), ((1702, 1740), 'numpy.arctan', 'np.arctan', (['((cost[0] + 2513.9) / 2513.9)'], {}), '((cost[0] + 2513.9) / 2513.9)\n', (1711, 1740), True, 'import numpy as np\n'), ((1802, 1838), 'numpy.linalg.norm', 'np.linalg.norm', (['(cost - [0, 15, -0.1])'], {}), '(cost - [0, 15, -0.1])\n', (1816, 1838), True, 'import numpy as np\n')] |
import math
import pandas
import random
import os
import numpy as np
import tensorflow
from pandas import DataFrame
from tensorflow.keras.utils import Sequence
class DataSequence(Sequence):
"""
Keras Sequence object to train a model on a list of csv files
"""
def __init__(self, rootdir, batch_size, shuffle=False, class_format='categorical', classes=['Electronic', 'Experimental', 'Folk', 'Hip-Hop', 'Instrumental', 'International', 'Pop', 'Rock']):
"""
df = dataframe with two columns: the labels and a list of filenames
"""
df = DataFrame(columns=['file_names', 'label'])
for root, subdirs, files in os.walk(rootdir):
for subdir in subdirs:
for r, s, f in os.walk(os.path.join(root, subdir)):
paths = [os.path.join(r, name) for name in f]
temp = DataFrame(data=paths, columns=['file_names'])
temp['label'] = classes.index(subdir)
df = df.append(temp, ignore_index=True)
self.df = df
self.classes = classes
self.bsz = batch_size
self.shuffle = shuffle
self.n = self.round(len(df.index), batch_size)
# self.indexes = random.sample(range(self.n), k=self.n)
self.indexes = range(self.n)
# Take labels and a list of image locations in memory
self.labels = tensorflow.keras.utils.to_categorical(self.df['label'].values, num_classes=len(self.classes)) if class_format=='categorical' else self.df['label'].values
self.file_list = self.df['file_names']
def __len__(self):
return int(math.floor(self.n / float(self.bsz)))
def round(self, n, multiple):
# Smaller multiple
a = (n // multiple) * multiple
# Return of closest of two
return a
def on_epoch_end(self):
self.indexes = range(self.n)
if self.shuffle:
# Shuffles indexes after each epoch if in training mode
self.indexes = random.sample(self.indexes, k=len(self.indexes))
def get_batch_labels(self, idx, arr):
# Fetch a batch of labels
return arr[idx * self.bsz: (idx + 1) * self.bsz]
def get_batch_features(self, arr):
# Fetch a batch of inputs
feats = np.array([self.read_csv_data(f) for f in arr])
return feats
def __getitem__(self, idx):
indexes = self.indexes[idx*self.bsz:(idx+1)*self.bsz]
files_temp = np.array([self.file_list[k] for k in indexes])
y = np.array([self.labels[k] for k in indexes])
batch_x = self.get_batch_features(files_temp)
return batch_x, y
def read_csv_data(self, filename):
df = pandas.read_csv(filename, index_col=0).fillna(0.00000000000000001)
df = self.normalize(df)
return df.values
def normalize(self, df: DataFrame):
return (df - df.mean()) / (df.std())
if __name__=='__main__':
DATASET_DIR = "dataset/"
cwd = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(cwd, DATASET_DIR, 'mfcc_fma_small', 'train')
gen = DataSequence(base_dir, 64, True)
print(int(np.floor(gen.n / float(64))))
#
for i in range(gen.__len__() - 1, 0,-1):
x, y = gen.__getitem__(i)
print("{}: {}".format(i, x.shape)) | [
"pandas.read_csv",
"os.path.join",
"os.path.realpath",
"numpy.array",
"pandas.DataFrame",
"os.walk"
] | [((3052, 3109), 'os.path.join', 'os.path.join', (['cwd', 'DATASET_DIR', '"""mfcc_fma_small"""', '"""train"""'], {}), "(cwd, DATASET_DIR, 'mfcc_fma_small', 'train')\n", (3064, 3109), False, 'import os\n'), ((584, 626), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['file_names', 'label']"}), "(columns=['file_names', 'label'])\n", (593, 626), False, 'from pandas import DataFrame\n'), ((663, 679), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (670, 679), False, 'import os\n'), ((2477, 2523), 'numpy.array', 'np.array', (['[self.file_list[k] for k in indexes]'], {}), '([self.file_list[k] for k in indexes])\n', (2485, 2523), True, 'import numpy as np\n'), ((2536, 2579), 'numpy.array', 'np.array', (['[self.labels[k] for k in indexes]'], {}), '([self.labels[k] for k in indexes])\n', (2544, 2579), True, 'import numpy as np\n'), ((3009, 3035), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3025, 3035), False, 'import os\n'), ((2715, 2753), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {'index_col': '(0)'}), '(filename, index_col=0)\n', (2730, 2753), False, 'import pandas\n'), ((755, 781), 'os.path.join', 'os.path.join', (['root', 'subdir'], {}), '(root, subdir)\n', (767, 781), False, 'import os\n'), ((877, 922), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'paths', 'columns': "['file_names']"}), "(data=paths, columns=['file_names'])\n", (886, 922), False, 'from pandas import DataFrame\n'), ((813, 834), 'os.path.join', 'os.path.join', (['r', 'name'], {}), '(r, name)\n', (825, 834), False, 'import os\n')] |
#!/usr/bin/env python
#
# cropimage.py - The CropImagePanel class
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`CropImagePanel` class.
The ``CropImagePanel`` is a a FSLeyes control which is used in conjunction
with the :class:`.OrthoCropProfile`, allowing the user to crop an image.
This module also provides the standalone :func:`loadCropParameters` function,
for loading cropping parameters from a file.
"""
import os
import os.path as op
import itertools as it
import wx
import numpy as np
import fsl.utils.idle as idle
import fsl.data.image as fslimage
import fsleyes_props as props
import fsleyes_widgets.rangeslider as rslider
import fsleyes_widgets.utils.status as status
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.displaycontext as displaycontext
import fsleyes.views.orthopanel as orthopanel
import fsleyes.strings as strings
import fsleyes.actions as actions
import fsleyes.actions.copyoverlay as copyoverlay
import fsleyes.controls.displayspacewarning as dswarning
import fsleyes.plugins.profiles.orthocropprofile as orthocropprofile
class CropImageAction(actions.ToggleControlPanelAction):
"""The ``CropImageAction`` just toggles a :class:`.CropImagePanel`. It is
added under the FSLeyes Tools menu.
"""
@staticmethod
def supportedViews():
"""The ``CropImageAction`` is restricted for use with
:class:`.OrthoPanel` views.
"""
return [orthopanel.OrthoPanel]
def __init__(self, overlayList, displayCtx, ortho):
"""Create ``CropImageAction``. """
super().__init__(overlayList, displayCtx, ortho, CropImagePanel)
self.__ortho = ortho
self.__name = '{}_{}'.format(type(self).__name__, id(self))
displayCtx.addListener('selectedOverlay', self.__name,
self.__selectedOverlayChanged)
def destroy(self):
"""Called when the :class:`.OrthoPanel` that owns this action is
closed. Clears references, removes listeners, and calls the base
class ``destroy`` method.
"""
if self.destroyed:
return
self.__ortho = None
self.displayCtx.removeListener('selectedOverlay', self.__name)
super().destroy()
def __selectedOverlayChanged(self, *a):
"""Called when the selected overlay changes. Enables/disables this
action (and hence the bound Tools menu item) depending on whether the
overlay is an image.
"""
ovl = self.displayCtx.getSelectedOverlay()
self.enabled = isinstance(ovl, fslimage.Image)
class CropImagePanel(ctrlpanel.ControlPanel):
"""The ``CropImagePanel`` class is a FSLeyes control for use in an
:class:`.OrthoPanel`, with the associated :class:`.CropImageProfile`. It
contains controls allowing the user to define a cropping box for the
currently selected overlay (if it is an :class:`.Image`), and "Crop",
"Load", "Save", and "Cancel" buttons.
"""
@staticmethod
def supportedViews():
"""Overrides :meth:`.ControlMixin.supportedViews`. The
``CropImagePanel`` is only intended to be added to
:class:`.OrthoPanel` views.
"""
from fsleyes.views.orthopanel import OrthoPanel
return [OrthoPanel]
@staticmethod
def ignoreControl():
"""Tells FSLeyes not to add the ``CropImagePanel`` as an option to
the Settings menu. Instead, the :class:`CropImageAction` is added as
an option to the Tools menu.
"""
return True
@staticmethod
def profileCls():
"""Returns the :class:`.OrthoCropProfile` class, which needs to be
activated in conjunction with the ``CropImagePanel``.
"""
return orthocropprofile.OrthoCropProfile
@staticmethod
def defaultLayout():
"""Returns a dictionary containing layout settings to be passed to
:class:`.ViewPanel.togglePanel`.
"""
return {'floatPane' : True,
'floatOnly' : True}
def __init__(self, parent, overlayList, displayCtx, ortho):
"""Create a ``CropImagePanel``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
:arg ortho: The :class:`.OrthoPanel` instance.
"""
ctrlpanel.ControlPanel.__init__(
self, parent, overlayList, displayCtx, ortho)
profile = ortho.currentProfile
self.__ortho = ortho
self.__profile = profile
self.__overlay = None
self.__cropBoxWidget = props.makeWidget(
self,
profile,
'cropBox',
showLimits=False,
labels=['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'])
self.__volumeWidget = rslider.RangeSliderSpinPanel(
self,
minValue=0,
maxValue=1,
minDistance=1,
lowLabel='tmin',
highLabel='tmax',
style=rslider.RSSP_INTEGER)
self.__dsWarning = dswarning.DisplaySpaceWarning(
self,
overlayList,
displayCtx,
ortho.frame,
strings.messages[self, 'dsWarning'],
'not like overlay',
'overlay')
self.__cropLabel = wx.StaticText(self)
self.__sizeLabel = wx.StaticText(self)
self.__cropButton = wx.Button( self, id=wx.ID_OK)
self.__robustFovButton = wx.Button( self)
self.__loadButton = wx.Button( self)
self.__saveButton = wx.Button( self)
self.__cancelButton = wx.Button( self, id=wx.ID_CANCEL)
self.__cropButton .SetLabel(strings.labels[self, 'crop'])
self.__robustFovButton.SetLabel(strings.labels[self, 'robustFov'])
self.__loadButton .SetLabel(strings.labels[self, 'load'])
self.__saveButton .SetLabel(strings.labels[self, 'save'])
self.__cancelButton .SetLabel(strings.labels[self, 'cancel'])
self.__sizer = wx.BoxSizer(wx.VERTICAL)
self.__btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.__sizer.Add((1, 10))
self.__sizer.Add(self.__cropLabel, flag=wx.CENTRE)
self.__sizer.Add((1, 10))
self.__sizer.Add(self.__dsWarning, flag=wx.CENTRE)
self.__sizer.Add((1, 10), proportion=1)
self.__sizer.Add(self.__cropBoxWidget, flag=wx.EXPAND)
self.__sizer.Add(self.__volumeWidget, flag=wx.EXPAND)
self.__sizer.Add((1, 10))
self.__sizer.Add(self.__sizeLabel, flag=wx.CENTRE, proportion=1)
self.__sizer.Add((1, 10))
self.__sizer.Add(self.__btnSizer, flag=wx.CENTRE)
self.__sizer.Add((1, 10))
self.__btnSizer.Add((10, 1), flag=wx.EXPAND,
proportion=1)
self.__btnSizer.Add(self.__cropButton, flag=wx.EXPAND)
self.__btnSizer.Add((10, 1), flag=wx.EXPAND)
self.__btnSizer.Add(self.__robustFovButton, flag=wx.EXPAND)
self.__btnSizer.Add((10, 1), flag=wx.EXPAND)
self.__btnSizer.Add(self.__loadButton, flag=wx.EXPAND)
self.__btnSizer.Add((10, 1), flag=wx.EXPAND)
self.__btnSizer.Add(self.__saveButton, flag=wx.EXPAND)
self.__btnSizer.Add((10, 1), flag=wx.EXPAND)
self.__btnSizer.Add(self.__cancelButton, flag=wx.EXPAND)
self.__btnSizer.Add((10, 1), flag=wx.EXPAND,
proportion=1)
self.SetSizer(self.__sizer)
self.SetMinSize(self.__sizer.GetMinSize())
self.__cropButton.SetDefault()
self.__cropButton .Bind(wx.EVT_BUTTON, self.__onCrop)
self.__loadButton .Bind(wx.EVT_BUTTON, self.__onLoad)
self.__saveButton .Bind(wx.EVT_BUTTON, self.__onSave)
self.__cancelButton.Bind(wx.EVT_BUTTON, self.__onCancel)
self.__volumeWidget.Bind(rslider.EVT_RANGE, self.__onVolume)
self.__volumeWidget.Bind(rslider.EVT_LOW_RANGE, self.__onVolume)
self.__volumeWidget.Bind(rslider.EVT_HIGH_RANGE, self.__onVolume)
profile.robustfov.bindToWidget(self,
wx.EVT_BUTTON,
self.__robustFovButton)
displayCtx .addListener('selectedOverlay',
self.name,
self.__selectedOverlayChanged)
overlayList.addListener('overlays',
self.name,
self.__selectedOverlayChanged)
profile .addListener('cropBox',
self.name,
self.__cropBoxChanged)
self.__selectedOverlayChanged()
self.__cropBoxChanged()
def destroy(self):
"""Must be called when this ``CropImagePanel`` is no longer needed.
Removes property listeners and clears references.
"""
profile = self.__profile
displayCtx = self.displayCtx
overlayList = self.overlayList
dsWarning = self.__dsWarning
profile .removeListener('cropBox', self.name)
displayCtx .removeListener('selectedOverlay', self.name)
overlayList.removeListener('overlays', self.name)
self.__ortho = None
self.__profile = None
self.__dsWarning = None
dsWarning.destroy()
ctrlpanel.ControlPanel.destroy(self)
def __registerOverlay(self, overlay):
"""Called by :meth:`__selectedOverlayChanged`. Registers the
given overlay.
"""
self.__overlay = overlay
display = self.displayCtx.getDisplay(overlay)
is4D = overlay.ndim >= 4
if is4D:
self.__volumeWidget.SetLimits(0, overlay.shape[3])
self.__volumeWidget.SetRange( 0, overlay.shape[3])
self.__volumeWidget.Enable(is4D)
display.addListener('name', self.name, self.__overlayNameChanged)
self.__overlayNameChanged()
def __deregisterOverlay(self):
"""Called by :meth:`__selectedOverlayChanged`. Deregisters the
current overlay.
"""
if self.__overlay is None:
return
try:
display = self.displayCtx.getDisplay(self.__overlay)
display.removeListener('name', self.name)
except displaycontext.InvalidOverlayError:
pass
self.__cropLabel.SetLabel(strings.labels[self, 'image.noImage'])
self.__overlay = None
def __overlayNameChanged(self, *a):
"""Called when the :attr:`.Display.name` of the currently selected
overlay changes. Updates the name label.
"""
display = self.displayCtx.getDisplay(self.__overlay)
label = strings.labels[self, 'image']
label = label.format(display.name)
self.__cropLabel.SetLabel(label)
def __selectedOverlayChanged(self, *a):
"""Called when the :attr:`.DisplayContext.selectedOverlay` changes.
Updates labels appropriately.
"""
displayCtx = self.displayCtx
overlay = displayCtx.getSelectedOverlay()
if overlay is self.__overlay:
return
self.__deregisterOverlay()
if not isinstance(overlay, fslimage.Image):
self.Disable()
else:
self.Enable()
self.__registerOverlay(overlay)
def __updateSizeLabel(self):
"""Called by the crop region and volume widget event handlers. Updates
a label which displays the current crop region size.
"""
overlay = self.__overlay
profile = self.__profile
xlen = profile.cropBox.xlen
ylen = profile.cropBox.ylen
zlen = profile.cropBox.zlen
tlo = self.__volumeWidget.GetLow()
thi = self.__volumeWidget.GetHigh()
tlen = thi - tlo
if overlay.ndim >= 4:
label = strings.labels[self, 'cropSize4d']
label = label.format(xlen, ylen, zlen, tlen)
else:
label = strings.labels[self, 'cropSize3d']
label = label.format(xlen, ylen, zlen)
self.__sizeLabel.SetLabel(label)
def __cropBoxChanged(self, *a):
"""Called when the :attr:`.OrthoCropProfile.cropBox` changes.
Updates labels appropriately.
"""
self.__updateSizeLabel()
def __onVolume(self, ev):
"""Called when the user changes the volume limit, for 4D images.
Updates the label which displays the crop region size.
"""
self.__updateSizeLabel()
def __onLoad(self, ev):
"""Called when the Save button is pushed. Prompts the user to select
a file to load crop parameters from.
"""
overlay = self.__overlay
cropBox = self.__profile.cropBox
fileName = '{}_crop.txt'.format(overlay.name)
if overlay.dataSource is not None:
dirName = op.dirname(overlay.dataSource)
else:
dirName = os.getcwd()
if not op.exists(op.join(dirName, fileName)):
fileName = ''
dlg = wx.FileDialog(
self,
defaultDir=dirName,
defaultFile=fileName,
message=strings.messages[self, 'saveCrop'],
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal() != wx.ID_OK:
return
filePath = dlg.GetPath()
errTitle = strings.titles[ self, 'loadError']
errMsg = strings.messages[self, 'loadError']
with status.reportIfError(errTitle, errMsg, raiseError=False):
params = loadCropParameters(filePath, overlay)
cropBox[:] = params[:6]
if overlay.ndim >= 4:
tlo, thi = params[6:]
self.__volumeWidget.SetLow(tlo)
self.__volumeWidget.SetHigh(thi)
def __onSave(self, ev):
"""Called when the Save button is pushed. Saves the current crop
parameters to a text file.
"""
overlay = self.__overlay
cropBox = self.__profile.cropBox
fileName = '{}_crop.txt'.format(overlay.name)
if overlay.dataSource is not None:
dirName = op.dirname(overlay.dataSource)
else:
dirName = os.getcwd()
dlg = wx.FileDialog(
self,
defaultDir=dirName,
defaultFile=fileName,
message=strings.messages[self, 'saveCrop'],
style=wx.FD_SAVE)
if dlg.ShowModal() != wx.ID_OK:
return
filePath = dlg.GetPath()
# The crop parameters are saved
# in a fslroi-compatible manner.
params = [cropBox.xlo,
cropBox.xhi - cropBox.xlo,
cropBox.ylo,
cropBox.yhi - cropBox.ylo,
cropBox.zlo,
cropBox.zhi - cropBox.zlo]
if overlay.ndim >= 4:
tlo = self.__volumeWidget.GetLow()
thi = self.__volumeWidget.GetHigh()
params.extend((tlo, thi - tlo))
errTitle = strings.titles[ self, 'saveError']
errMsg = strings.messages[self, 'saveError']
with status.reportIfError(errTitle, errMsg, raiseError=False):
np.savetxt(filePath, [params], fmt='%i')
def __onCancel(self, ev=None):
"""Called when the Cancel button is pushed. Calls
:meth:`.OrthoPanel.togglePanel` - this will result in
this ``CropImagePanel`` being destroyed.
This method is also called programmatically from the :meth:`__onCrop`
method after the image is cropped.
"""
# Do asynchronously, because we don't want
# this CropImagePanel being destroyed from
# its own event handler.
idle.idle(self.__ortho.togglePanel, CropImagePanel)
def __onCrop(self, ev):
"""Crops the selected image. This is done via a call to
:func:`.copyoverlay.copyImage`. Also calls :meth:`__onCancel`,
to finish cropping.
"""
overlayList = self.overlayList
displayCtx = self.displayCtx
overlay = displayCtx.getSelectedOverlay()
display = displayCtx.getDisplay(overlay)
name = '{}_roi'.format(display.name)
cropBox = self.__profile.cropBox
roi = [cropBox.x,
cropBox.y,
cropBox.z]
if overlay.ndim >= 4:
roi.append(self.__volumeWidget.GetRange())
copyoverlay.copyImage(
overlayList,
displayCtx,
overlay,
createMask=False,
copy4D=True,
copyDisplay=True,
name=name,
roi=roi)
self.__onCancel()
def loadCropParameters(filename, overlay):
"""Load in crop values from a text file assumed to contain ``fslroi``-
compatible parameters. Any parameters which may be passed to ``fslroi``
are accepted::
fslroi in out tmin tlen
fslroi in out xmin xlen ymin ylen zmin zlen
fslroi in out xmin xlen ymin ylen zmin zlen tmin tlen
Any of the ``len`` parameters may be equal to -1, in which case it is
interpreted as continuing from the low index
:arg filename: File to load crop parameters from.
:arg overlay: An :class:`.Image` which is the cropping target.
:returns: A sequence of ``lo, hi`` crop parameters.
"""
is4D = overlay.ndim >= 4
shape = overlay.shape[:4]
params = list(np.loadtxt(filename).flatten())
if len(params) not in (2, 6, 8):
raise ValueError('File contains the wrong number of crop parameters')
if len(params) in (2, 8) and not is4D:
raise ValueError('File contains the wrong number of crop parameters')
if len(params) == 2:
params = [0, -1, 0, -1, 0, -1] + params
if is4D and len(params) == 6:
params = params + [0, -1]
los = []
his = []
for dim in range(len(shape)):
dlo = params[dim * 2]
dlen = params[dim * 2 + 1]
if dlen == -1:
dlen = shape[dim] - dlo
dhi = dlo + dlen
los.append(dlo)
his.append(dhi)
for lo, hi, lim in zip(los, his, shape):
if lo < 0 or hi > lim:
raise ValueError('Crop parameters are out of bounds for image '
'shape ({} < 0 or {} > {}'.format(lo, hi, lim))
return list(it.chain(*zip(los, his)))
| [
"wx.Button",
"fsleyes.controls.controlpanel.ControlPanel.__init__",
"fsleyes.controls.displayspacewarning.DisplaySpaceWarning",
"wx.BoxSizer",
"os.path.join",
"fsleyes_props.makeWidget",
"wx.FileDialog",
"os.getcwd",
"fsleyes_widgets.rangeslider.RangeSliderSpinPanel",
"wx.StaticText",
"fsl.utils... | [((4683, 4760), 'fsleyes.controls.controlpanel.ControlPanel.__init__', 'ctrlpanel.ControlPanel.__init__', (['self', 'parent', 'overlayList', 'displayCtx', 'ortho'], {}), '(self, parent, overlayList, displayCtx, ortho)\n', (4714, 4760), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((4941, 5062), 'fsleyes_props.makeWidget', 'props.makeWidget', (['self', 'profile', '"""cropBox"""'], {'showLimits': '(False)', 'labels': "['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax']"}), "(self, profile, 'cropBox', showLimits=False, labels=['xmin',\n 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'])\n", (4957, 5062), True, 'import fsleyes_props as props\n'), ((5151, 5291), 'fsleyes_widgets.rangeslider.RangeSliderSpinPanel', 'rslider.RangeSliderSpinPanel', (['self'], {'minValue': '(0)', 'maxValue': '(1)', 'minDistance': '(1)', 'lowLabel': '"""tmin"""', 'highLabel': '"""tmax"""', 'style': 'rslider.RSSP_INTEGER'}), "(self, minValue=0, maxValue=1, minDistance=1,\n lowLabel='tmin', highLabel='tmax', style=rslider.RSSP_INTEGER)\n", (5179, 5291), True, 'import fsleyes_widgets.rangeslider as rslider\n'), ((5401, 5546), 'fsleyes.controls.displayspacewarning.DisplaySpaceWarning', 'dswarning.DisplaySpaceWarning', (['self', 'overlayList', 'displayCtx', 'ortho.frame', "strings.messages[self, 'dsWarning']", '"""not like overlay"""', '"""overlay"""'], {}), "(self, overlayList, displayCtx, ortho.frame,\n strings.messages[self, 'dsWarning'], 'not like overlay', 'overlay')\n", (5430, 5546), True, 'import fsleyes.controls.displayspacewarning as dswarning\n'), ((5662, 5681), 'wx.StaticText', 'wx.StaticText', (['self'], {}), '(self)\n', (5675, 5681), False, 'import wx\n'), ((5715, 5734), 'wx.StaticText', 'wx.StaticText', (['self'], {}), '(self)\n', (5728, 5734), False, 'import wx\n'), ((5768, 5796), 'wx.Button', 'wx.Button', (['self'], {'id': 'wx.ID_OK'}), '(self, id=wx.ID_OK)\n', (5777, 5796), False, 'import wx\n'), ((5834, 5849), 'wx.Button', 'wx.Button', (['self'], {}), '(self)\n', (5843, 5849), False, 'import wx\n'), ((5887, 5902), 'wx.Button', 'wx.Button', (['self'], {}), '(self)\n', (5896, 5902), False, 'import wx\n'), ((5940, 5955), 'wx.Button', 'wx.Button', (['self'], {}), '(self)\n', (5949, 5955), False, 'import wx\n'), ((5993, 6025), 'wx.Button', 'wx.Button', (['self'], {'id': 'wx.ID_CANCEL'}), '(self, id=wx.ID_CANCEL)\n', (6002, 6025), False, 'import wx\n'), ((6415, 6439), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (6426, 6439), False, 'import wx\n'), ((6466, 6492), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (6477, 6492), False, 'import wx\n'), ((9917, 9953), 'fsleyes.controls.controlpanel.ControlPanel.destroy', 'ctrlpanel.ControlPanel.destroy', (['self'], {}), '(self)\n', (9947, 9953), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((13665, 13822), 'wx.FileDialog', 'wx.FileDialog', (['self'], {'defaultDir': 'dirName', 'defaultFile': 'fileName', 'message': "strings.messages[self, 'saveCrop']", 'style': '(wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)'}), "(self, defaultDir=dirName, defaultFile=fileName, message=\n strings.messages[self, 'saveCrop'], style=wx.FD_OPEN | wx.\n FD_FILE_MUST_EXIST)\n", (13678, 13822), False, 'import wx\n'), ((14861, 14989), 'wx.FileDialog', 'wx.FileDialog', (['self'], {'defaultDir': 'dirName', 'defaultFile': 'fileName', 'message': "strings.messages[self, 'saveCrop']", 'style': 'wx.FD_SAVE'}), "(self, defaultDir=dirName, defaultFile=fileName, message=\n strings.messages[self, 'saveCrop'], style=wx.FD_SAVE)\n", (14874, 14989), False, 'import wx\n'), ((16348, 16399), 'fsl.utils.idle.idle', 'idle.idle', (['self.__ortho.togglePanel', 'CropImagePanel'], {}), '(self.__ortho.togglePanel, CropImagePanel)\n', (16357, 16399), True, 'import fsl.utils.idle as idle\n'), ((17084, 17212), 'fsleyes.actions.copyoverlay.copyImage', 'copyoverlay.copyImage', (['overlayList', 'displayCtx', 'overlay'], {'createMask': '(False)', 'copy4D': '(True)', 'copyDisplay': '(True)', 'name': 'name', 'roi': 'roi'}), '(overlayList, displayCtx, overlay, createMask=False,\n copy4D=True, copyDisplay=True, name=name, roi=roi)\n', (17105, 17212), True, 'import fsleyes.actions.copyoverlay as copyoverlay\n'), ((13490, 13520), 'os.path.dirname', 'op.dirname', (['overlay.dataSource'], {}), '(overlay.dataSource)\n', (13500, 13520), True, 'import os.path as op\n'), ((13557, 13568), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13566, 13568), False, 'import os\n'), ((14092, 14148), 'fsleyes_widgets.utils.status.reportIfError', 'status.reportIfError', (['errTitle', 'errMsg'], {'raiseError': '(False)'}), '(errTitle, errMsg, raiseError=False)\n', (14112, 14148), True, 'import fsleyes_widgets.utils.status as status\n'), ((14767, 14797), 'os.path.dirname', 'op.dirname', (['overlay.dataSource'], {}), '(overlay.dataSource)\n', (14777, 14797), True, 'import os.path as op\n'), ((14834, 14845), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14843, 14845), False, 'import os\n'), ((15753, 15809), 'fsleyes_widgets.utils.status.reportIfError', 'status.reportIfError', (['errTitle', 'errMsg'], {'raiseError': '(False)'}), '(errTitle, errMsg, raiseError=False)\n', (15773, 15809), True, 'import fsleyes_widgets.utils.status as status\n'), ((15823, 15863), 'numpy.savetxt', 'np.savetxt', (['filePath', '[params]'], {'fmt': '"""%i"""'}), "(filePath, [params], fmt='%i')\n", (15833, 15863), True, 'import numpy as np\n'), ((13595, 13621), 'os.path.join', 'op.join', (['dirName', 'fileName'], {}), '(dirName, fileName)\n', (13602, 13621), True, 'import os.path as op\n'), ((18092, 18112), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (18102, 18112), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve
from skfem.helpers import dd, ddot
from skfem.mesh import MeshQuad, MeshTri, MeshLine
from skfem.element import (ElementQuadBFS, ElementTriArgyris,
ElementTriMorley, ElementLineHermite,
ElementTri15ParamPlate)
from skfem.assembly import InteriorBasis
class ConvergenceMorley(TestCase):
case = (MeshTri, ElementTriMorley)
prerefs = 3
limits = (1.9, 2.1)
abs_limit = 8e-5
def runTest(self):
m = self.case[0]().refined(self.prerefs)
hs = []
L2s = []
for itr in range(3):
e = self.case[1]()
ib = InteriorBasis(m, e)
t = 1.
E = 1.
nu = 0.3
D = E * t ** 3 / (12. * (1. - nu ** 2))
@BilinearForm
def bilinf(u, v, w):
def C(T):
trT = T[0, 0] + T[1, 1]
return E / (1. + nu) * \
np.array([[T[0, 0] + nu / (1. - nu) * trT, T[0, 1]],
[T[1, 0], T[1, 1] + nu / (1. - nu) * trT]])
return t ** 3 / 12.0 * ddot(C(dd(u)), dd(v))
def load(x):
return np.sin(np.pi * x[0]) * np.sin(np.pi * x[1])
@LinearForm
def linf(v, w):
return load(w.x) * v
K = asm(bilinf, ib)
f = asm(linf, ib)
# TODO fix boundary conditions
# u_x should be zero on top/bottom
# u_y should be zero on left/right
x = solve(*condense(K, f, D=ib.get_dofs().all('u')))
X = ib.interpolate(x)
def exact(x):
return 1. / (4. * D * np.pi ** 4) * load(x)
@Functional
def error(w):
return (w.w - exact(w.x)) ** 2
L2 = np.sqrt(error.assemble(ib, w=X))
L2s.append(L2)
hs.append(m.param())
m = m.refined()
hs = np.array(hs)
L2s = np.array(L2s)
pfit = np.polyfit(np.log10(hs), np.log10(L2s), 1)
self.assertGreater(pfit[0], self.limits[0])
self.assertLess(pfit[0], self.limits[1])
self.assertLess(L2s[-1], self.abs_limit)
class ConvergenceArgyris(ConvergenceMorley):
case = (MeshTri, ElementTriArgyris)
preref = 0
limits = (2.9, 3.1)
abs_limit = 5e-7
class Convergence15Param(ConvergenceMorley):
case = (MeshTri, ElementTri15ParamPlate)
preref = 1
limits = (1.9, 2.1)
abs_limit = 5e-6
class ConvergenceBFS(ConvergenceMorley):
case = (MeshQuad, ElementQuadBFS)
preref = 1
limits = (3.9, 4.5)
abs_limit = 5e-9
class ConvergenceHermite(TestCase):
case = (MeshLine, ElementLineHermite)
prerefs = 3
limits = (3.9, 4.1)
abs_limit = 8e-5
def runTest(self):
m = self.case[0]().refined(self.prerefs)
hs = []
L2s = []
for itr in range(3):
e = self.case[1]()
ib = InteriorBasis(m, e)
@BilinearForm
def bilinf(u, v, w):
return ddot(dd(u), dd(v))
@LinearForm
def linf(v, w):
return 1. * v
K = asm(bilinf, ib)
f = asm(linf, ib)
x = solve(*condense(K, f, D=ib.get_dofs().all()))
X = ib.interpolate(x)
def exact(x):
return (x ** 2 - 2. * x ** 3 + x ** 4) / 24.
@Functional
def error(w):
return (w.w - exact(w.x)) ** 2
L2 = np.sqrt(error.assemble(ib, w=X))
L2s.append(L2)
hs.append(m.param())
m = m.refined()
hs = np.array(hs)
L2s = np.array(L2s)
pfit = np.polyfit(np.log10(hs), np.log10(L2s), 1)
self.assertGreater(pfit[0], self.limits[0])
self.assertLess(pfit[0], self.limits[1])
self.assertLess(L2s[-1], self.abs_limit)
| [
"numpy.log10",
"skfem.asm",
"skfem.assembly.InteriorBasis",
"numpy.array",
"numpy.sin",
"skfem.helpers.dd"
] | [((2103, 2115), 'numpy.array', 'np.array', (['hs'], {}), '(hs)\n', (2111, 2115), True, 'import numpy as np\n'), ((2130, 2143), 'numpy.array', 'np.array', (['L2s'], {}), '(L2s)\n', (2138, 2143), True, 'import numpy as np\n'), ((3828, 3840), 'numpy.array', 'np.array', (['hs'], {}), '(hs)\n', (3836, 3840), True, 'import numpy as np\n'), ((3855, 3868), 'numpy.array', 'np.array', (['L2s'], {}), '(L2s)\n', (3863, 3868), True, 'import numpy as np\n'), ((755, 774), 'skfem.assembly.InteriorBasis', 'InteriorBasis', (['m', 'e'], {}), '(m, e)\n', (768, 774), False, 'from skfem.assembly import InteriorBasis\n'), ((1480, 1495), 'skfem.asm', 'asm', (['bilinf', 'ib'], {}), '(bilinf, ib)\n', (1483, 1495), False, 'from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve\n'), ((1512, 1525), 'skfem.asm', 'asm', (['linf', 'ib'], {}), '(linf, ib)\n', (1515, 1525), False, 'from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve\n'), ((2170, 2182), 'numpy.log10', 'np.log10', (['hs'], {}), '(hs)\n', (2178, 2182), True, 'import numpy as np\n'), ((2184, 2197), 'numpy.log10', 'np.log10', (['L2s'], {}), '(L2s)\n', (2192, 2197), True, 'import numpy as np\n'), ((3122, 3141), 'skfem.assembly.InteriorBasis', 'InteriorBasis', (['m', 'e'], {}), '(m, e)\n', (3135, 3141), False, 'from skfem.assembly import InteriorBasis\n'), ((3344, 3359), 'skfem.asm', 'asm', (['bilinf', 'ib'], {}), '(bilinf, ib)\n', (3347, 3359), False, 'from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve\n'), ((3376, 3389), 'skfem.asm', 'asm', (['linf', 'ib'], {}), '(linf, ib)\n', (3379, 3389), False, 'from skfem import BilinearForm, LinearForm, Functional, asm, condense, solve\n'), ((3895, 3907), 'numpy.log10', 'np.log10', (['hs'], {}), '(hs)\n', (3903, 3907), True, 'import numpy as np\n'), ((3909, 3922), 'numpy.log10', 'np.log10', (['L2s'], {}), '(L2s)\n', (3917, 3922), True, 'import numpy as np\n'), ((1329, 1349), 'numpy.sin', 'np.sin', (['(np.pi * x[0])'], {}), '(np.pi * x[0])\n', (1335, 1349), True, 'import numpy as np\n'), ((1352, 1372), 'numpy.sin', 'np.sin', (['(np.pi * x[1])'], {}), '(np.pi * x[1])\n', (1358, 1372), True, 'import numpy as np\n'), ((3230, 3235), 'skfem.helpers.dd', 'dd', (['u'], {}), '(u)\n', (3232, 3235), False, 'from skfem.helpers import dd, ddot\n'), ((3237, 3242), 'skfem.helpers.dd', 'dd', (['v'], {}), '(v)\n', (3239, 3242), False, 'from skfem.helpers import dd, ddot\n'), ((1087, 1190), 'numpy.array', 'np.array', (['[[T[0, 0] + nu / (1.0 - nu) * trT, T[0, 1]], [T[1, 0], T[1, 1] + nu / (1.0 -\n nu) * trT]]'], {}), '([[T[0, 0] + nu / (1.0 - nu) * trT, T[0, 1]], [T[1, 0], T[1, 1] + \n nu / (1.0 - nu) * trT]])\n', (1095, 1190), True, 'import numpy as np\n'), ((1273, 1278), 'skfem.helpers.dd', 'dd', (['v'], {}), '(v)\n', (1275, 1278), False, 'from skfem.helpers import dd, ddot\n'), ((1265, 1270), 'skfem.helpers.dd', 'dd', (['u'], {}), '(u)\n', (1267, 1270), False, 'from skfem.helpers import dd, ddot\n')] |
# imports
import numpy as np
import tensorflow as tf
from numpy import random
import math
import time
import matplotlib.pyplot as plt
"""Part 1 - Forward Propagation"""
def initialize_parameters(layer_dims):
"""
Description: This function initializes weights and biases
:param layer_dims: an array of the dimensions of each layer in the /
network (layer 0 is the size of the flattened input, layer L is the output softmax)
:return: a dictionary containing the initialized W and b parameters of each layer (W1…WL, b1…bL).
"""
parameters = {}
for l in range(1, len(layer_dims)):
parameters[f'W{l}'] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(2 / layer_dims[l - 1])
parameters[f'b{l}'] = np.zeros(shape=(layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Description: Implement the linear part of a layer's forward propagation.
:param A: the activations of the previous layer
:param W: the weight matrix of the current layer (of shape [size of current layer, size of previous layer])
:param b: the bias vector of the current layer (of shape [size of current layer, 1])
:return: Z: the linear component of the activation function (i.e., the value before applying the non-linear function)
:return: linear_cache: a dictionary containing A, W, b (stored for making the backpropagation easier to compute)
"""
Z = np.dot(W, A) + b
linear_cache = dict({'A': A, 'W': W, 'b': b})
return Z, linear_cache
def softmax(Z):
"""
Description: Implementation of softmax function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
numerator = np.exp(Z)
denominator = np.sum(numerator, axis=0, keepdims=True)
A = numerator / denominator
activation_cache = Z
return A, activation_cache
def relu(Z):
"""
Description: Implementation of relu function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
A = np.maximum(0, Z)
activation_cache = Z
return A, activation_cache
def linear_activation_forward(A_prev, W, B, activation):
"""
Description: Implement the forward propagation for the LINEAR->ACTIVATION layer
:param A_prev: activations of the previous layer
:param W: the weights matrix of the current layer
:param B: the bias vector of the current layer
:param activation: the activation function to be used (a string, either “softmax” or “relu”)
:return: A: the activations of the current layer
:return: cache: a joint dictionary containing both linear_cache and activation_cache
"""
if activation in ['relu', 'softmax']:
Z, linear_cache = linear_forward(A_prev, W, B)
A, activation_cache = globals()[activation](Z)
cache = dict({'linear_cache': linear_cache, 'activation_cache': activation_cache})
return A, cache
else:
raise NotImplementedError(
"The given actiavtion function was not implemented. please choose one between {relu} and {softmax}")
def l_model_forward(X, parameters, use_batchnorm):
"""
Description: Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SOFTMAX computation
:param X: the data, numpy array of shape (input size, number of examples)
:param parameters: the initialized W and b parameters of each layer
:param use_batchnorm: a boolean flag used to determine whether to apply batchnorm after the activation/
:return:AL: the last post-activation value
:return: caches: a list of all the cache objects generated by the linear_forward function
"""
caches = list()
A_prev = X
num_layers = int(len(parameters.keys()) / 2)
for l in range(1, num_layers):
W = parameters[f'W{l}']
B = parameters[f'b{l}']
A_prev, cache = linear_activation_forward(A_prev, W, B, 'relu')
if use_batchnorm:
A_prev = apply_batchnorm(A_prev)
caches.append(cache)
W = parameters[f'W{num_layers}']
B = parameters[f'b{num_layers}']
AL, cache = linear_activation_forward(A_prev, W, B, 'softmax')
caches.append(cache)
return AL, caches
def compute_cost(AL, Y):
"""
Description: Implement the cost function defined by equation. The requested cost function is categorical cross-entropy loss.
:param AL: probability vector corresponding to your label predictions, shape (num_of_classes, number of examples)
:param Y: the labels vector (i.e. the ground truth)
:return: cost: the cross-entropy cost
"""
inner_sum_classes = np.sum(Y * np.log(AL), axis=0, keepdims=True)
outer_sum_samples = np.sum(inner_sum_classes, axis=1)
m = AL.shape[1]
cost = -1 / m * outer_sum_samples
return cost
def apply_batchnorm(A):
"""
Description: performs batchnorm on the received activation values of a given layer.
:param A: the activation values of a given layer
:return: NA: the normalized activation values, based on the formula learned in class
"""
mu = np.mean(A, axis=0, keepdims=True)
variance = np.var(A, axis=0, keepdims=True)
epsilon = 0.01
NA = (A - mu) / np.sqrt(variance + epsilon)
return NA
"""Part 2 - Backward Propagation"""
def linear_backward(dZ, cache):
"""
Description: Implements the linear part of the backward propagation process for a single layer
:param dZ: the gradient of the cost with respect to the linear output of the current layer (layer l)
:param cache: tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
:return:dA_prev: Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
:return: dW: Gradient of the cost with respect to W (current layer l), same shape as W
:return: db: Gradient of the cost with respect to b (current layer l), same shape as b
"""
m = dZ.shape[1]
dA_prev = np.dot(cache['W'].T, dZ)
dW = (1 / m) * np.dot(dZ, cache['A'].T)
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Description: Implements the backward propagation for the LINEAR->ACTIVATION layer.
The function first computes dZ and then applies the linear_backward function.
:param dA: post activation gradient of the current layer
:param cache: contains both the linear cache and the activations cache
:param activation: the activation function name = ['relu' or 'softmax']
:return: dA_prev: Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
:return: dW: Gradient of the cost with respect to W (current layer l), same shape as W
:return: db: Gradient of the cost with respect to b (current layer l), same shape as b
"""
if activation == 'softmax':
dZ = softmax_backward(dA, cache['activation_cache'])
elif activation == 'relu':
dZ = relu_backward(dA, cache['activation_cache'])
dA_prev, dW, db = linear_backward(dZ, cache['linear_cache'])
return dA_prev, dW, db
def relu_backward(dA, activation_cache):
"""
Description: Implements backward propagation for a ReLU unit
:param dA: the post-activation gradient
:param activation_cache: contains Z (stored during the forward propagation)
:return: dZ: gradient of the cost with respect to Z
"""
derivative = activation_cache
derivative[derivative >= 0] = 1
derivative[derivative < 0] = 0
dZ = dA * derivative
return dZ
def softmax_backward(dA, activation_cache):
"""
Description: Implements backward propagation for a softmax unit
:param dA: the post-activation gradient
:param activation_cache: contains Z (stored during the forward propagation)
:return: dZ: gradient of the cost with respect to Z
"""
AL = activation_cache['AL']
Y = activation_cache['Y']
dZ = AL - Y
return dZ
def l_model_backward(AL, Y, caches):
"""
Description: Implement the backward propagation process for the entire network.
:param AL: the probabilities vector, the output of the forward propagation (L_model_forward)
:param Y: the true labels vector (the "ground truth" - true classifications)
:param caches: list of caches containing for each layer: a) the linear cache; b) the activation cache
:return: Grads: a dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = dict()
num_of_layers = len(caches)
last_cache = caches[-1]
last_cache['activation_cache'] = dict({'AL': AL, 'Y': Y, 'Z': last_cache['activation_cache']})
dA_prev, dW, db = linear_activation_backward(None, last_cache, "softmax") # dA = None cause not necessary
grads["dA" + str(num_of_layers)] = dA_prev
grads["dW" + str(num_of_layers)] = dW
grads["db" + str(num_of_layers)] = db
for l in range(num_of_layers - 1, 0, -1):
dA_prev, dW, db = linear_activation_backward(dA_prev, caches[l - 1], "relu")
grads["dA" + str(l)] = dA_prev
grads["dW" + str(l)] = dW
grads["db" + str(l)] = db
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Description: Updates parameters using gradient descent
:param parameters: a python dictionary containing the DNN architecture’s parameters
:param grads: a python dictionary containing the gradients (generated by L_model_backward)
:param learning_rate: the learning rate used to update the parameters (the “alpha”)
:return: parameters: the updated values of the parameters object provided as input
"""
num_of_layers = int(len(parameters.keys()) / 2)
for l in range(1, num_of_layers + 1):
parameters[f"W{l}"] = parameters[f"W{l}"] - learning_rate * grads[f"dW{l}"]
parameters[f"b{l}"] = parameters[f"b{l}"] - learning_rate * grads[f"db{l}"]
return parameters
"""Part 3 - Train and Predict"""
def train_validation_split(X, Y, train_size):
"""
Description: (auxiliary function) split the train set into train and validation sets
:param X: train set samples
:param Y: train set labels
:param train_size: percentage of the train set
:return: tuples of (x_train, y_train), (x_val, y_val)
"""
# create indices for train and validation sets
indices = list(range(0, X.shape[1]))
random.shuffle(indices)
num_of_x_train_samples = math.ceil(X.shape[1] * train_size)
# split train & validation
x_train, y_train = X[:, indices[0:num_of_x_train_samples]], Y[:, indices[0:num_of_x_train_samples]]
x_val, y_val = X[:, indices[num_of_x_train_samples:X.shape[1]]], Y[:, indices[num_of_x_train_samples:X.shape[1]]]
return (x_train, y_train), (x_val, y_val)
def l_layer_model(X, Y, layers_dims, learning_rate, num_iterations, batch_size):
"""
Description: Implements a L-layer neural network. All layers but the last should have the ReLU activation function,
and the final layer will apply the softmax activation function. The size of the output layer should be equal to
the number of labels in the data. Please select a batch size that enables your code to run well
(i.e. no memory overflows while still running relatively fast).
the function should use the earlier functions in the following order:
initialize -> L_model_forward -> compute_cost -> L_model_backward -> update parameters
:param X: the input data, a numpy array of shape (height*width , number_of_examples)
:param Y: the “real” labels of the data, a vector of shape (num_of_classes, number of examples)
:param layers_dims: a list containing the dimensions of each layer, including the input
:param learning_rate: alpa parameter
:param num_iterations: number of iterations - each iteration equals to one batch
:param batch_size: the number of examples in a single training batch
:return: parameters: the parameters learnt by the system during the training (the same parameters that were updated in the update_parameters function).
:return: costs: the values of the cost function (calculated by the compute_cost function). One value is to be saved after each 100 training iterations (e.g. 3000 iterations -> 30 values).
"""
costs_val = []
costs_train = []
parameters = initialize_parameters(layers_dims)
is_batchnorm = True # change to True when batchnorm is needed
epoch = 0
val_accuracy = -1
train_accuracy = -1
log = []
(x_train, y_train), (x_val, y_val) = train_validation_split(X, Y, 0.8)
# split x and y train sets to batches
num_of_batches = math.ceil(x_train.shape[1] / batch_size)
batches_x = np.array_split(x_train, num_of_batches, axis=1)
batches_y = np.array_split(y_train, num_of_batches, axis=1)
for num_of_iteration in range(0, num_iterations):
batch_num = num_of_iteration % num_of_batches # current batch number
current_batch_x, current_batch_y = batches_x[batch_num], batches_y[batch_num] # get current batches
AL, caches = l_model_forward(current_batch_x, parameters, is_batchnorm)
grads = l_model_backward(AL, current_batch_y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
# each hundred iterations compute costs for current batch and validation set, and accuracy for validation and
# train sets
if num_of_iteration % 100 == 0:
AL_val, caches_val = l_model_forward(x_val, parameters, is_batchnorm)
costs_val.append(compute_cost(AL_val, y_val))
val_accuracy = predict(x_val, y_val, parameters)
AL_batch, caches_train = l_model_forward(current_batch_x, parameters, is_batchnorm)
costs_train.append(compute_cost(AL_batch, current_batch_y))
log.append(f"Iteration: {num_of_iteration}, Cost: {costs_train[-1]}")
train_accuracy = predict(x_train, y_train, parameters)
print(
f"Epoch: {epoch}, Iteration: {num_of_iteration}, batch_loss: {costs_train[-1]}, train_accuracy: {train_accuracy}, val_loss: {costs_val[-1]}, validation_accuracy: {val_accuracy}")
# stopping criterion - no improvement on the validation set - threshold = 0.05
if len(costs_val) > 2 and (costs_val[-1] - costs_val[-2] >= 0.005) and (costs_val[-2] - costs_val[-3] >= 0.005):
print("Early stopping reached.")
break
# count epochs
if num_of_iteration % num_of_batches == 0 and num_of_iteration > 0:
epoch += 1
print(f"val_accuracy {val_accuracy}")
plot_model_history(costs_val, "Validation")
print(f"train_accuracy {train_accuracy}")
plot_model_history(costs_train, "Training")
print(*log, sep="\n") # for report
return parameters, costs_train
def predict(X, Y, parameters):
"""
Description: The function receives an input data and the true labels and calculates the accuracy of the trained neural network on the data.
:param X: the input data, a numpy array of shape (height*width, number_of_examples)
:param Y: the “real” labels of the data, a vector of shape (num_of_classes, number of examples)
:param parameters: a python dictionary containing the DNN architecture’s parameters
:return: accuracy – the accuracy measure of the neural net on the provided data
(i.e. the percentage of the samples for which the correct label receives the highest confidence score).
Use the softmax function to normalize the output values.
"""
is_batchnorm = True # change to True when batchnorm is needed
AL, caches = l_model_forward(X, parameters, is_batchnorm)
y_predict = (AL == np.amax(AL, axis=0)).astype(int) # the class with the maximum prob is the predicted class
accuracy = np.sum(y_predict * Y) / AL.shape[1] # sum number of correct predictions
return accuracy
def plot_model_history(costs_list, type):
"""
Description: (auxiliary function) Plot graph of cost per 100 iterations
:param costs_list:
:param type: str - validation or training
"""
x_index = range(0, len(costs_list)*100, 100)
plt.plot(x_index, costs_list)
plt.title(f'{type} Model Costs')
plt.ylabel('Costs')
plt.xlabel('Iterations')
plt.show()
"""Part 4 - MNIST classification - W/out batchnorm"""
# load MNIST data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path='mnist.npz')
# normalize train and test sets
x_train = x_train / 255
x_test = x_test / 255
# flatten matrices
x_train = x_train.reshape(x_train.shape[0], 784).T
x_test = x_test.reshape(x_test.shape[0], 784).T
# encode y vectors to one hot vector
num_of_classes = len(np.unique(y_train))
y_train_one_hot = np.zeros((num_of_classes, y_train.shape[0]))
y_train_one_hot[y_train, np.arange(y_train.shape[0])] = 1
y_test_one_hot = np.zeros((num_of_classes, y_test.shape[0]))
y_test_one_hot[y_test, np.arange(y_test.shape[0])] = 1
# run network
layers_dims = [x_train.shape[0], 20, 7, 5, 10]
learning_rate = 0.009
num_iterations = 50000
batch_size = 32
start = time.time()
parameters, costs = l_layer_model(x_train, y_train_one_hot, layers_dims, learning_rate, num_iterations, batch_size)
accuracy = predict(x_test, y_test_one_hot, parameters)
print(f"Total time: {(time.time() - start)} seconds.")
print(f"Test Accuracy: {accuracy}") | [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array_split",
"numpy.arange",
"numpy.mean",
"tensorflow.keras.datasets.mnist.load_data",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.dot",
"numpy.maximum",
"matplotlib.pyplot.title",
"time.time",
... | [((16830, 16881), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {'path': '"""mnist.npz"""'}), "(path='mnist.npz')\n", (16863, 16881), True, 'import tensorflow as tf\n'), ((17177, 17221), 'numpy.zeros', 'np.zeros', (['(num_of_classes, y_train.shape[0])'], {}), '((num_of_classes, y_train.shape[0]))\n', (17185, 17221), True, 'import numpy as np\n'), ((17297, 17340), 'numpy.zeros', 'np.zeros', (['(num_of_classes, y_test.shape[0])'], {}), '((num_of_classes, y_test.shape[0]))\n', (17305, 17340), True, 'import numpy as np\n'), ((17528, 17539), 'time.time', 'time.time', ([], {}), '()\n', (17537, 17539), False, 'import time\n'), ((1829, 1838), 'numpy.exp', 'np.exp', (['Z'], {}), '(Z)\n', (1835, 1838), True, 'import numpy as np\n'), ((1857, 1897), 'numpy.sum', 'np.sum', (['numerator'], {'axis': '(0)', 'keepdims': '(True)'}), '(numerator, axis=0, keepdims=True)\n', (1863, 1897), True, 'import numpy as np\n'), ((2269, 2285), 'numpy.maximum', 'np.maximum', (['(0)', 'Z'], {}), '(0, Z)\n', (2279, 2285), True, 'import numpy as np\n'), ((4931, 4964), 'numpy.sum', 'np.sum', (['inner_sum_classes'], {'axis': '(1)'}), '(inner_sum_classes, axis=1)\n', (4937, 4964), True, 'import numpy as np\n'), ((5321, 5354), 'numpy.mean', 'np.mean', (['A'], {'axis': '(0)', 'keepdims': '(True)'}), '(A, axis=0, keepdims=True)\n', (5328, 5354), True, 'import numpy as np\n'), ((5370, 5402), 'numpy.var', 'np.var', (['A'], {'axis': '(0)', 'keepdims': '(True)'}), '(A, axis=0, keepdims=True)\n', (5376, 5402), True, 'import numpy as np\n'), ((6222, 6246), 'numpy.dot', 'np.dot', (["cache['W'].T", 'dZ'], {}), "(cache['W'].T, dZ)\n", (6228, 6246), True, 'import numpy as np\n'), ((10770, 10793), 'numpy.random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (10784, 10793), False, 'from numpy import random\n'), ((10823, 10857), 'math.ceil', 'math.ceil', (['(X.shape[1] * train_size)'], {}), '(X.shape[1] * train_size)\n', (10832, 10857), False, 'import math\n'), ((13030, 13070), 'math.ceil', 'math.ceil', (['(x_train.shape[1] / batch_size)'], {}), '(x_train.shape[1] / batch_size)\n', (13039, 13070), False, 'import math\n'), ((13087, 13134), 'numpy.array_split', 'np.array_split', (['x_train', 'num_of_batches'], {'axis': '(1)'}), '(x_train, num_of_batches, axis=1)\n', (13101, 13134), True, 'import numpy as np\n'), ((13151, 13198), 'numpy.array_split', 'np.array_split', (['y_train', 'num_of_batches'], {'axis': '(1)'}), '(y_train, num_of_batches, axis=1)\n', (13165, 13198), True, 'import numpy as np\n'), ((16581, 16610), 'matplotlib.pyplot.plot', 'plt.plot', (['x_index', 'costs_list'], {}), '(x_index, costs_list)\n', (16589, 16610), True, 'import matplotlib.pyplot as plt\n'), ((16615, 16647), 'matplotlib.pyplot.title', 'plt.title', (['f"""{type} Model Costs"""'], {}), "(f'{type} Model Costs')\n", (16624, 16647), True, 'import matplotlib.pyplot as plt\n'), ((16652, 16671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Costs"""'], {}), "('Costs')\n", (16662, 16671), True, 'import matplotlib.pyplot as plt\n'), ((16676, 16700), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (16686, 16700), True, 'import matplotlib.pyplot as plt\n'), ((16705, 16715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16713, 16715), True, 'import matplotlib.pyplot as plt\n'), ((17139, 17157), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (17148, 17157), True, 'import numpy as np\n'), ((756, 790), 'numpy.zeros', 'np.zeros', ([], {'shape': '(layer_dims[l], 1)'}), '(shape=(layer_dims[l], 1))\n', (764, 790), True, 'import numpy as np\n'), ((1438, 1450), 'numpy.dot', 'np.dot', (['W', 'A'], {}), '(W, A)\n', (1444, 1450), True, 'import numpy as np\n'), ((5442, 5469), 'numpy.sqrt', 'np.sqrt', (['(variance + epsilon)'], {}), '(variance + epsilon)\n', (5449, 5469), True, 'import numpy as np\n'), ((6266, 6290), 'numpy.dot', 'np.dot', (['dZ', "cache['A'].T"], {}), "(dZ, cache['A'].T)\n", (6272, 6290), True, 'import numpy as np\n'), ((6310, 6343), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (6316, 6343), True, 'import numpy as np\n'), ((16230, 16251), 'numpy.sum', 'np.sum', (['(y_predict * Y)'], {}), '(y_predict * Y)\n', (16236, 16251), True, 'import numpy as np\n'), ((17247, 17274), 'numpy.arange', 'np.arange', (['y_train.shape[0]'], {}), '(y_train.shape[0])\n', (17256, 17274), True, 'import numpy as np\n'), ((17364, 17390), 'numpy.arange', 'np.arange', (['y_test.shape[0]'], {}), '(y_test.shape[0])\n', (17373, 17390), True, 'import numpy as np\n'), ((643, 692), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (658, 692), True, 'import numpy as np\n'), ((695, 725), 'numpy.sqrt', 'np.sqrt', (['(2 / layer_dims[l - 1])'], {}), '(2 / layer_dims[l - 1])\n', (702, 725), True, 'import numpy as np\n'), ((4872, 4882), 'numpy.log', 'np.log', (['AL'], {}), '(AL)\n', (4878, 4882), True, 'import numpy as np\n'), ((16124, 16143), 'numpy.amax', 'np.amax', (['AL'], {'axis': '(0)'}), '(AL, axis=0)\n', (16131, 16143), True, 'import numpy as np\n'), ((17735, 17746), 'time.time', 'time.time', ([], {}), '()\n', (17744, 17746), False, 'import time\n')] |
from contextualized_topic_models.models.ctm import CombinedTM
from contextualized_topic_models.utils.data_preparation import QuickText
from contextualized_topic_models.utils.data_preparation import bert_embeddings_from_file
from contextualized_topic_models.datasets.dataset import CTMDataset
from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing
import pickle, json
import numpy as np
N_component = 10
with open("aligned_ds_details.pkl", "rb") as f_in:
data = pickle.load(f_in)
documents = []
profiles = []
for item in data:
profiles.append({
"model_input_range": len(item["model_input"]),
"pred": item["pred"],
"label": item["label"],
"dataset_type": item["dataset_type"]
})
documents.extend(item["model_input"])
sp = WhiteSpacePreprocessing(documents)
preprocessed_docs, unpreprocessed_doc, vocab = sp.preprocess()
qt = QuickText("bert-base-nli-mean-tokens",
text_for_bert=preprocessed_docs,
text_for_bow=unpreprocessed_doc)
training_dataset = qt.load_dataset()
ctm = CombinedTM(input_size=len(qt.vocab), bert_input_size=768, n_components=10)
ctm.fit(training_dataset) # run the model
ctm.save("models/ctm.pth")
print("------Topic-Word Distribution-------")
print(ctm.get_topics())
doc_topic_distribution = ctm.get_doc_topic_distribution(training_dataset)
user_dist = dict()
start = 0
label_user_dict = dict()
pred_user_dict = dict()
confusion_user_dict = dict()
for i, item in enumerate(profiles):
user_dist[i] = np.mean([doc_topic_distribution[start: start + item["model_input_range"]]])
label, pred = item["label"], item["pred"]
if label != pred:
key = (label, pred)
if key not in confusion_user_dict:
confusion_user_dict[key] = list()
confusion_user_dict[key].append(i)
if label not in label_user_dict:
label_user_dict[label] = list()
if pred not in pred_user_dict:
pred_user_dict[pred] = list()
label_user_dict[label].append(i)
pred_user_dict[pred].append(i)
print("------Category-Topic Distribution (True Label)-------")
label_topic_dist = []
for i in range(4):
label_topic_dist.append(np.mean([user_dist[x] for x in label_user_dict[i]]))
print(np.stack(label_topic_dist, axis=0))
print("------Category-Topic Distribution (Pred Label)-------")
pred_topic_dist = []
for i in range(4):
pred_topic_dist.append(np.mean([user_dist[x] for x in pred_user_dict[i]]))
print(np.stack(pred_topic_dist, axis=0))
print("------Category-Topic Distribution (Confusion)-------")
for key in confusion_user_dict:
# confusion_topic_dist.append(np.mean([user_dist[x] for x in confusion_user_dict[i]]))
_key = "{} (label) -> {} (pred)".format(key[0], key[1])
_item = {
"confusion_type": _key,
"label_dist": label_topic_dist[key[0]],
"pred_dist": pred_topic_dist[key[1]]
}
print(json.dumps(_item, indent=4))
| [
"numpy.mean",
"json.dumps",
"pickle.load",
"contextualized_topic_models.utils.preprocessing.WhiteSpacePreprocessing",
"numpy.stack",
"contextualized_topic_models.utils.data_preparation.QuickText"
] | [((838, 872), 'contextualized_topic_models.utils.preprocessing.WhiteSpacePreprocessing', 'WhiteSpacePreprocessing', (['documents'], {}), '(documents)\n', (861, 872), False, 'from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing\n'), ((942, 1050), 'contextualized_topic_models.utils.data_preparation.QuickText', 'QuickText', (['"""bert-base-nli-mean-tokens"""'], {'text_for_bert': 'preprocessed_docs', 'text_for_bow': 'unpreprocessed_doc'}), "('bert-base-nli-mean-tokens', text_for_bert=preprocessed_docs,\n text_for_bow=unpreprocessed_doc)\n", (951, 1050), False, 'from contextualized_topic_models.utils.data_preparation import QuickText\n'), ((494, 511), 'pickle.load', 'pickle.load', (['f_in'], {}), '(f_in)\n', (505, 511), False, 'import pickle, json\n'), ((1575, 1649), 'numpy.mean', 'np.mean', (["[doc_topic_distribution[start:start + item['model_input_range']]]"], {}), "([doc_topic_distribution[start:start + item['model_input_range']]])\n", (1582, 1649), True, 'import numpy as np\n'), ((2293, 2327), 'numpy.stack', 'np.stack', (['label_topic_dist'], {'axis': '(0)'}), '(label_topic_dist, axis=0)\n', (2301, 2327), True, 'import numpy as np\n'), ((2518, 2551), 'numpy.stack', 'np.stack', (['pred_topic_dist'], {'axis': '(0)'}), '(pred_topic_dist, axis=0)\n', (2526, 2551), True, 'import numpy as np\n'), ((2234, 2285), 'numpy.mean', 'np.mean', (['[user_dist[x] for x in label_user_dict[i]]'], {}), '([user_dist[x] for x in label_user_dict[i]])\n', (2241, 2285), True, 'import numpy as np\n'), ((2460, 2510), 'numpy.mean', 'np.mean', (['[user_dist[x] for x in pred_user_dict[i]]'], {}), '([user_dist[x] for x in pred_user_dict[i]])\n', (2467, 2510), True, 'import numpy as np\n'), ((2954, 2981), 'json.dumps', 'json.dumps', (['_item'], {'indent': '(4)'}), '(_item, indent=4)\n', (2964, 2981), False, 'import pickle, json\n')] |
import numpy as np
import matplotlib.pyplot as plt
import csv
file = open("data/aggregated/agg-weeKdays.csv")
csvreader = csv.reader(file)
setEQUIPMENTID = set()
for row in csvreader:
if row[0] != 'EQUIPMENTID':
setEQUIPMENTID.add(row[0])
print(setEQUIPMENTID)
daysOfWeek = {'Mon': 1, 'Tue': 2, 'Wed':3, 'Thu':4, 'Fri':5 }
for direction in ['C', 'D']:
for equipmentID in setEQUIPMENTID:
x = {1: [], 2: [], 3: [], 4: [], 5: []}
y = {1: [], 2: [], 3: [], 4: [], 5: []}
file = open("data/aggregated/agg-weeKdays.csv")
csvreader = csv.reader(file)
for row in csvreader:
if row[0] == equipmentID and row[1] == direction:
x[daysOfWeek[row[2]]].append(row[3])
y[daysOfWeek[row[2]]].append(round(np.double(row[4])))
# set width of bar
barWidth = 0.15
fig = plt.subplots(figsize =(20, 10))
# Set position of bar on X axis
br1 = np.arange(len(y[1]))
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
br4 = [x + barWidth for x in br3]
br5 = [x + barWidth for x in br4]
# Make the plot
plt.bar(br1, y[1], color ='r', width = barWidth,
edgecolor ='grey', label ='Mon')
plt.bar(br2, y[2], color ='g', width = barWidth,
edgecolor ='grey', label ='Tue')
plt.bar(br3, y[3], color ='b', width = barWidth,
edgecolor ='grey', label ='Wed')
plt.bar(br4, y[4], color ='y', width = barWidth,
edgecolor ='grey', label ='Thu')
plt.bar(br5, y[5], color ='c', width = barWidth,
edgecolor ='grey', label ='Fri')
# Adding Xticks
plt.xlabel('Hour', fontweight ='bold', fontsize = 15)
plt.ylabel('Total volume', fontweight ='bold', fontsize = 15)
plt.title('Number of cars in the week days per hour in equipment id:' + equipmentID + " and direction:" + direction)
plt.xticks([r + barWidth for r in range(len(y[1]))], x[1] )
plt.legend()
# plt.show()
plt.savefig('plots/equipment_id_'+ equipmentID + '_direction_' + direction + '.png')
file.close()
| [
"matplotlib.pyplot.savefig",
"numpy.double",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.title",
"csv.reader",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((123, 139), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (133, 139), False, 'import csv\n'), ((593, 609), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (603, 609), False, 'import csv\n'), ((913, 943), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (925, 943), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1298), 'matplotlib.pyplot.bar', 'plt.bar', (['br1', 'y[1]'], {'color': '"""r"""', 'width': 'barWidth', 'edgecolor': '"""grey"""', 'label': '"""Mon"""'}), "(br1, y[1], color='r', width=barWidth, edgecolor='grey', label='Mon')\n", (1229, 1298), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1404), 'matplotlib.pyplot.bar', 'plt.bar', (['br2', 'y[2]'], {'color': '"""g"""', 'width': 'barWidth', 'edgecolor': '"""grey"""', 'label': '"""Tue"""'}), "(br2, y[2], color='g', width=barWidth, edgecolor='grey', label='Tue')\n", (1335, 1404), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1510), 'matplotlib.pyplot.bar', 'plt.bar', (['br3', 'y[3]'], {'color': '"""b"""', 'width': 'barWidth', 'edgecolor': '"""grey"""', 'label': '"""Wed"""'}), "(br3, y[3], color='b', width=barWidth, edgecolor='grey', label='Wed')\n", (1441, 1510), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1616), 'matplotlib.pyplot.bar', 'plt.bar', (['br4', 'y[4]'], {'color': '"""y"""', 'width': 'barWidth', 'edgecolor': '"""grey"""', 'label': '"""Thu"""'}), "(br4, y[4], color='y', width=barWidth, edgecolor='grey', label='Thu')\n", (1547, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1722), 'matplotlib.pyplot.bar', 'plt.bar', (['br5', 'y[5]'], {'color': '"""c"""', 'width': 'barWidth', 'edgecolor': '"""grey"""', 'label': '"""Fri"""'}), "(br5, y[5], color='c', width=barWidth, edgecolor='grey', label='Fri')\n", (1653, 1722), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hour"""'], {'fontweight': '"""bold"""', 'fontsize': '(15)'}), "('Hour', fontweight='bold', fontsize=15)\n", (1787, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total volume"""'], {'fontweight': '"""bold"""', 'fontsize': '(15)'}), "('Total volume', fontweight='bold', fontsize=15)\n", (1849, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1909, 2029), 'matplotlib.pyplot.title', 'plt.title', (["('Number of cars in the week days per hour in equipment id:' + equipmentID +\n ' and direction:' + direction)"], {}), "('Number of cars in the week days per hour in equipment id:' +\n equipmentID + ' and direction:' + direction)\n", (1918, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2115), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2113, 2115), True, 'import matplotlib.pyplot as plt\n'), ((2145, 2234), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/equipment_id_' + equipmentID + '_direction_' + direction + '.png')"], {}), "('plots/equipment_id_' + equipmentID + '_direction_' + direction +\n '.png')\n", (2156, 2234), True, 'import matplotlib.pyplot as plt\n'), ((827, 844), 'numpy.double', 'np.double', (['row[4]'], {}), '(row[4])\n', (836, 844), True, 'import numpy as np\n')] |
from argparse import ArgumentError
import os
import numpy as np
import gym
# import cv2
import matplotlib.pyplot as plt
gym.logger.set_level(40)
os.environ['GYM_CONFIG_CLASS'] = 'Train'
os.environ["global_timeout"] = "1000"
os.environ["global_experiment_number"] = "1"
os.environ["global_dataset_name"] = "custom"
os.environ["global_population_density"] = "0.7"
# os.environ['GYM_CONFIG_CLASS'] = 'Example'
# os.environ['GYM_CONFIG_PATH'] = '/home/mjana/ws/src/Social-Navigation-Simulator/gym_collision_avoidance/experiments/src/train_basic.py'
from gym_collision_avoidance.envs import test_cases as tc
from gym_collision_avoidance.envs import Config
# from gym_collision_avoidance.envs.config import Config as EnvConfig
from gym_collision_avoidance.experiments.src.env_utils import create_env
def distance_penalty(start,goal,state):
return np.abs((goal[0]-start[0])*(start[1]-state[1])-(goal[1]-start[1])*(start[0]-state[0]))/np.sqrt((goal[0]-start[0])**2+(goal[1]-start[1])**2)
def main():
'''
Minimum working example:
2 agents: 1 training a DQN, 1 running external policy
'''
env, one_env = create_env()
# In case you want to save plots, choose the directory
one_env.set_plot_save_dir(
os.path.dirname(os.path.realpath(__file__)) + '/../../experiments/results/train/')
# Set agent configuration (start/goal pos, radius, size, policy)
agents = tc.get_testcase_one_train()
init_state = []
# print(agents[0].to_vector())
for i in range(len(agents)):
state = []
state.append(agents[i].to_vector()[0][1])
state.append(agents[i].to_vector()[0][2])
state.append(agents[i].to_vector()[0][10])
init_state.append(state)
[agent.policy.initialize_network() for agent in agents if hasattr(agent.policy, 'initialize_network')]
one_env.set_agents(agents)
# Training environment parameters
num_episodes = 500
num_steps = 150
eps_start = 0.95 # exploration probability at start
eps_end = 0.1 # exploration probability at end
eps_dec = 0.993 # exploration probability decay factor
eps = eps_start # exploration probability
init_pos_min_x = -1.5 # minimum initial position
init_pos_min_y = -1.5
init_pos_max_x = 1.5 # maximum initial position
init_pos_max_y = 1.5
distance_reward_factor = -0.005 # reward factor for deviating from straight line path
# Repeatedly send actions to the environment based on agents' observations
set_point = -0.74*np.pi
P = 1.0
int_err = 0.0
err = 0.0
# Learning agent parameters
num_actions = 6
num_state_vector = 13
agents[0].policy.init_network(num_actions, num_state_vector)
next_state = np.zeros(num_state_vector, dtype=np.float32)
curr_state = np.zeros(num_state_vector, dtype=np.float32)
goal_count = 0
gx = 1
# training metrics
scores = []
success_rate_list = []
time_to_goal_list = []
end = False
for k in range(num_episodes):
obs = one_env.reset() # Get agents' initial observations
for j in range(len(agents)-1):
agents[j].reset(px=np.random.uniform(init_pos_min_x, init_pos_max_x),
py=np.random.uniform(init_pos_min_y, init_pos_max_y),
# heading=np.random.uniform(0, 2*np.pi),
gx = np.random.uniform(init_pos_min_x, init_pos_max_x),
gy = np.random.uniform(init_pos_min_y, init_pos_max_y))
agents[j].is_done = False
agents[j].is_out_of_bounds = False
agents[j].ran_out_of_time = False
cumul_reward = 0
print("agent, x,y,theta, theta_global: ", agents[0].pos_global_frame, agents[0].heading_ego_frame, agents[0].heading_global_frame)
print("goal: ", agents[0].goal_global_frame)
time_to_goal = num_steps
start = agents[0].pos_global_frame
goal = agents[0].goal_global_frame
for i in range(num_steps):
# Query the external agents' policies
# e.g., actions[0] = external_policy(dict_obs[0])
actions = {}
print(":::::::::::::::::")
print("\nepisode number", k+1, "agents: ", len(agents))
p_err = err
curr = agents[1].heading_global_frame
err = set_point - agents[1].heading_global_frame
control = np.clip(P*err, -1, 1)/2.0 + 0.5
# control = -np.pi/6
actions[1] = np.array([0.0, control])
curr_state = next_state.copy()
# TODO: get the RL action from the policy [argmax_a Q(s,a)]
rl_action = agents[0].policy.get_action(curr_state, eps)
actions[0] = rl_action
# actions[0] = np.random.randint(0, 11)
# Run a simulation step (check for collisions, move sim agents)
obs, rewards, game_over, which_agents_done = one_env.step(actions)
# print("obs", obs)
# rewards+= distance_reward_factor*distance_penalty(init_state[0], init_state[1], agents[0].pos_global_frame)
if agents[0].is_at_goal:
rewards-= i/num_steps
cumul_reward += rewards
print("done 0? ", agents[0].is_at_goal, agents[0].is_done, agents[0].ran_out_of_time, agents[0].is_out_of_bounds)
print("done 1? ", agents[1].is_at_goal, agents[1].is_done, agents[1].ran_out_of_time, agents[1].is_out_of_bounds)
print("which agents done: ",which_agents_done)
next_state[0] = obs[0]['dist_to_goal']
next_state[1] = agents[0].pos_global_frame[0]
next_state[2] = agents[0].pos_global_frame[1]
next_state[3] = agents[0].vel_global_frame[0]
next_state[4] = agents[0].vel_global_frame[1]
next_state[5] = obs[0]['heading_ego_frame']
# next_state[5] = agents[0].heading_global_frame
next_state[6] = agents[0].goal_global_frame[0]
next_state[7] = agents[0].goal_global_frame[1]
for i in range(8,num_state_vector):
next_state[i] = obs[0]['other_agent_states'][i-6]
print("action control", agents[0].policy.external_action_to_action(agents[0], actions[0]))
# print("speed",agents[0].speed_global_frame)
print("s", curr_state)
print("a", actions[0])
print("r", rewards)
print("s'", next_state)
print("other agents", obs)
print("cumul_reward", cumul_reward)
print("at goal?:::::::", agents[0].is_at_goal)
print("eps = ", eps)
print("sucess rate%", 100*goal_count/(k+1))
# TODO: use obs and reward to train learning agent (add to experience replay and learn for 1 step)
if i > 1:
agents[0].policy.learn_step(curr_state, actions[0], rewards, next_state)
if(agents[0].is_at_goal):
goal_count+=1
print("Agent has reached goal")
time_to_goal = i
break
if agents[0].is_done:
print("agent stopped working")
end = True
# break
# if game_over:
# print("All agents finished!")
# break
# if end:
# break
success_rate_list.append(100*goal_count/(k+1))
time_to_goal_list.append(time_to_goal)
if (k+1)%80 == 0:
eps = max(eps_end, eps - 0.15)
scores.append(cumul_reward)
env.reset()
agents[0].policy.save_checkpoint("sns_dqn_1ag_free")
plt.figure(figsize=(12,8))
plt.plot(range(num_episodes), scores)
plt.xlim(-1, num_episodes+1)
plt.ylim(-2,3)
plt.xlabel('Episode')
plt.ylabel('Cumulative Reward')
plt.title('Cumulative Reward vs Episode')
plt.figure(figsize=(12,8))
plt.plot(range(num_episodes), success_rate_list)
plt.xlim(-1, num_episodes+1)
plt.ylim(0,100)
plt.xlabel('Episode')
plt.ylabel('Success Rate')
plt.title('Success Rate vs Episode')
plt.figure(figsize=(12,8))
plt.plot(range(num_episodes), time_to_goal_list)
plt.xlim(-1, num_episodes+1)
plt.ylim(0,255)
plt.xlabel('Episode')
plt.ylabel('Time to goal')
plt.title('Time to goal vs Episode')
plt.show()
return True
if __name__ == '__main__':
main()
print("Experiment over.") | [
"numpy.clip",
"numpy.abs",
"matplotlib.pyplot.title",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.realpath",
"numpy.array",
"numpy.zeros",
"gym_collision_avoidance.envs.test_cases.get_testcase_one_train",
"matplotlib.pyplot.figure",
"gym.logger.set_level",
... | [((120, 144), 'gym.logger.set_level', 'gym.logger.set_level', (['(40)'], {}), '(40)\n', (140, 144), False, 'import gym\n'), ((1145, 1157), 'gym_collision_avoidance.experiments.src.env_utils.create_env', 'create_env', ([], {}), '()\n', (1155, 1157), False, 'from gym_collision_avoidance.experiments.src.env_utils import create_env\n'), ((1422, 1449), 'gym_collision_avoidance.envs.test_cases.get_testcase_one_train', 'tc.get_testcase_one_train', ([], {}), '()\n', (1447, 1449), True, 'from gym_collision_avoidance.envs import test_cases as tc\n'), ((2844, 2888), 'numpy.zeros', 'np.zeros', (['num_state_vector'], {'dtype': 'np.float32'}), '(num_state_vector, dtype=np.float32)\n', (2852, 2888), True, 'import numpy as np\n'), ((2906, 2950), 'numpy.zeros', 'np.zeros', (['num_state_vector'], {'dtype': 'np.float32'}), '(num_state_vector, dtype=np.float32)\n', (2914, 2950), True, 'import numpy as np\n'), ((7813, 7840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (7823, 7840), True, 'import matplotlib.pyplot as plt\n'), ((7886, 7916), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(num_episodes + 1)'], {}), '(-1, num_episodes + 1)\n', (7894, 7916), True, 'import matplotlib.pyplot as plt\n'), ((7919, 7934), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(3)'], {}), '(-2, 3)\n', (7927, 7934), True, 'import matplotlib.pyplot as plt\n'), ((7938, 7959), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (7948, 7959), True, 'import matplotlib.pyplot as plt\n'), ((7964, 7995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Reward"""'], {}), "('Cumulative Reward')\n", (7974, 7995), True, 'import matplotlib.pyplot as plt\n'), ((8000, 8041), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Reward vs Episode"""'], {}), "('Cumulative Reward vs Episode')\n", (8009, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8047, 8074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (8057, 8074), True, 'import matplotlib.pyplot as plt\n'), ((8131, 8161), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(num_episodes + 1)'], {}), '(-1, num_episodes + 1)\n', (8139, 8161), True, 'import matplotlib.pyplot as plt\n'), ((8164, 8180), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (8172, 8180), True, 'import matplotlib.pyplot as plt\n'), ((8184, 8205), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (8194, 8205), True, 'import matplotlib.pyplot as plt\n'), ((8210, 8236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Success Rate"""'], {}), "('Success Rate')\n", (8220, 8236), True, 'import matplotlib.pyplot as plt\n'), ((8241, 8277), 'matplotlib.pyplot.title', 'plt.title', (['"""Success Rate vs Episode"""'], {}), "('Success Rate vs Episode')\n", (8250, 8277), True, 'import matplotlib.pyplot as plt\n'), ((8283, 8310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (8293, 8310), True, 'import matplotlib.pyplot as plt\n'), ((8367, 8397), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(num_episodes + 1)'], {}), '(-1, num_episodes + 1)\n', (8375, 8397), True, 'import matplotlib.pyplot as plt\n'), ((8400, 8416), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(255)'], {}), '(0, 255)\n', (8408, 8416), True, 'import matplotlib.pyplot as plt\n'), ((8420, 8441), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (8430, 8441), True, 'import matplotlib.pyplot as plt\n'), ((8446, 8472), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time to goal"""'], {}), "('Time to goal')\n", (8456, 8472), True, 'import matplotlib.pyplot as plt\n'), ((8477, 8513), 'matplotlib.pyplot.title', 'plt.title', (['"""Time to goal vs Episode"""'], {}), "('Time to goal vs Episode')\n", (8486, 8513), True, 'import matplotlib.pyplot as plt\n'), ((8520, 8530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8528, 8530), True, 'import matplotlib.pyplot as plt\n'), ((870, 973), 'numpy.abs', 'np.abs', (['((goal[0] - start[0]) * (start[1] - state[1]) - (goal[1] - start[1]) * (\n start[0] - state[0]))'], {}), '((goal[0] - start[0]) * (start[1] - state[1]) - (goal[1] - start[1]) *\n (start[0] - state[0]))\n', (876, 973), True, 'import numpy as np\n'), ((956, 1018), 'numpy.sqrt', 'np.sqrt', (['((goal[0] - start[0]) ** 2 + (goal[1] - start[1]) ** 2)'], {}), '((goal[0] - start[0]) ** 2 + (goal[1] - start[1]) ** 2)\n', (963, 1018), True, 'import numpy as np\n'), ((4629, 4653), 'numpy.array', 'np.array', (['[0.0, control]'], {}), '([0.0, control])\n', (4637, 4653), True, 'import numpy as np\n'), ((1272, 1298), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1288, 1298), False, 'import os\n'), ((3261, 3310), 'numpy.random.uniform', 'np.random.uniform', (['init_pos_min_x', 'init_pos_max_x'], {}), '(init_pos_min_x, init_pos_max_x)\n', (3278, 3310), True, 'import numpy as np\n'), ((3343, 3392), 'numpy.random.uniform', 'np.random.uniform', (['init_pos_min_y', 'init_pos_max_y'], {}), '(init_pos_min_y, init_pos_max_y)\n', (3360, 3392), True, 'import numpy as np\n'), ((3496, 3545), 'numpy.random.uniform', 'np.random.uniform', (['init_pos_min_x', 'init_pos_max_x'], {}), '(init_pos_min_x, init_pos_max_x)\n', (3513, 3545), True, 'import numpy as np\n'), ((3581, 3630), 'numpy.random.uniform', 'np.random.uniform', (['init_pos_min_y', 'init_pos_max_y'], {}), '(init_pos_min_y, init_pos_max_y)\n', (3598, 3630), True, 'import numpy as np\n'), ((4539, 4562), 'numpy.clip', 'np.clip', (['(P * err)', '(-1)', '(1)'], {}), '(P * err, -1, 1)\n', (4546, 4562), True, 'import numpy as np\n')] |
# imports from core
from pyjamas_core import Supermodel
from pyjamas_core.util import Input, Output
# imports for database
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Models.Technology.European_power_plant.V001.db.db_declarative import Base, Kraftwerk, Co2Preis
# all other tables are used indirectly starting at Kraftwerk
# general imports
import time
import numpy as np
from scipy.interpolate import griddata
from dotenv import load_dotenv
from os import environ
import ast
# define the model class and inherit from class "Supermodel"
class Model(Supermodel):
# model constructor
def __init__(self, model_id, name: str):
# instantiate supermodel
super(Model, self).__init__(model_id, name)
# define inputs
self.inputs['t'] = Input('Zeit')
# define outputs
self.outputs['kw_park'] = Output('Kraftwerkspark')
# define persistent variables
self.db = None
self.kwp = None
async def func_birth(self):
# start database connection
self.db = start_db()
async def func_peri(self, prep_to_peri=None):
if self.kwp is None:
# get inputs
t_in = await self.get_input('t')
# use only first time value for interpolation (as list)
kw_time = [t_in[0]]
time0 = time.time()
print(f"start database queries")
"""
Naming conventions for queried and interpolated data:
db_ : Queried directly from database or taken from another db_ value.
kw_ : Value valid for a single power plant
_int : interpolated values
"""
# ---------------------- QUERYS -----------------------------------------------
# query Kraftwerk
db_kw = self.db.query(Kraftwerk).order_by(Kraftwerk.id).all()
db_kw_id = [i.id for i in db_kw]
db_kw_bez = [i.bezeichnung for i in db_kw]
db_kw_fk_kwt = [i.fk_kraftwerkstyp for i in db_kw]
db_kw_long = [i.long for i in db_kw]
db_kw_lat = [i.lat for i in db_kw]
# query Kraftwerkstyp
db_kwt_id = [i.kraftwerkstyp.id for i in db_kw]
db_kwt_bez = [i.kraftwerkstyp.bezeichnung for i in db_kw]
db_kwt_bez_subtyp = [i.kraftwerkstyp.bezeichnung_subtyp for i in db_kw]
db_kwt_fk_brennstofftyp = [i.kraftwerkstyp.fk_brennstofftyp for i in db_kw]
db_kwt_wirkungsgrad = [i.kraftwerkstyp.wirkungsgrad for i in db_kw]
db_kwt_p_typisch = [i.kraftwerkstyp.p_typisch for i in db_kw]
db_kwt_spez_info = [ast.literal_eval(i.kraftwerkstyp.spez_info) for i in db_kw] # change string to dict
# query Brennstofftyp
db_bst_id = [i.kraftwerkstyp.brennstofftyp.id for i in db_kw]
db_bst_bez = [i.kraftwerkstyp.brennstofftyp.bezeichnung for i in db_kw]
db_bst_co2emissfakt = [i.kraftwerkstyp.brennstofftyp.co2emissFakt for i in db_kw]
# query Co2Preis
db_co2 = self.db.query(Co2Preis).all()
db_co2_t = [i.datetime for i in db_co2]
db_co2_preis = [i.preis for i in db_co2]
time1 = time.time()
d_time = time1 - time0
print(f"-> database queries finished successfully in {d_time}s")
print("start interpolation")
# ---------------------- INTERPOLATION ----------------------------------------
# Brennstoffpreis Interpolation
bs_preis_int = []
for kw in db_kw:
if kw.kraftwerkstyp.brennstofftyp.bezeichnung == "None":
kw_bs_preis = [float(0)] # Brennstoffpreis to zero if type equals "None"
else:
db_bsp = kw.kraftwerkstyp.brennstofftyp.brennstoffpreise
db_bsp_t = [i.datetime for i in db_bsp]
db_bsp_lat = [i.lat for i in db_bsp]
db_bsp_long = [i.long for i in db_bsp]
db_bsp_preis = [i.preis for i in db_bsp]
kw_bs_preis = self.interpol_3d(db_bsp_t, db_bsp_lat, db_bsp_long, db_bsp_preis,
kw.lat, kw.long, kw_time)
# append new kw_bs_preis (list) to existing list
bs_preis_int = bs_preis_int + kw_bs_preis
# CO2-Preis Interpolation
co2_preis_int = [self.interpol_1d(db_co2_t, db_co2_preis, kw_time)[0] for _ in db_kw]
# Entsorgungspreis Interpolation
ents_preis_int = []
for kw in db_kw:
db_ents = kw.kraftwerkstyp.entsorgungspreise
# check if values are present (some powerplant types don't have a value, e.g. wind, solar,...)
if len(db_ents) == 0:
kw_ents = [float(0)] # set to zero if no values present
else:
db_ents_t = [i.datetime for i in db_ents]
db_ents_lat = [i.lat for i in db_ents]
db_ents_long = [i.long for i in db_ents]
db_ents_preis = [i.preis for i in db_ents]
kw_ents = self.interpol_3d(db_ents_t, db_ents_lat, db_ents_long, db_ents_preis,
kw.lat, kw.long, kw_time)
# append new ents_preis_kw (list) to existing list
ents_preis_int = ents_preis_int + kw_ents
# Installed power Interpolation
pinst_int = []
for kw in db_kw:
db_pinst = kw.kraftwerksleistungen
db_pinst_t = [i.datetime for i in db_pinst]
db_pinst_p = [i.power_inst for i in db_pinst]
# append new pinst (list) to existing list
pinst_int = pinst_int + self.interpol_1d(db_pinst_t, db_pinst_p, kw_time)
# Variable Opex Interpolation
varopex_int = []
for kw in db_kw:
db_varopex = kw.kraftwerkstyp.var_opex
db_varopex_t = [i.datetime for i in db_varopex]
db_varopex_preis = [i.preis for i in db_varopex]
# append new opex (list) to existing list
varopex_int = varopex_int + self.interpol_1d(db_varopex_t, db_varopex_preis, kw_time)
# Capex Interpolation
capex_int = []
for kw in db_kw:
db_capex = kw.kraftwerkstyp.capex
db_capex_t = [i.datetime for i in db_capex]
db_capex_preis = [i.preis for i in db_capex]
# append new opex (list) to existing list
capex_int = capex_int + self.interpol_1d(db_capex_t, db_capex_preis, kw_time)
time2 = time.time()
d_time = time2 - time1
print(f"-> interpolation finished successfully in {d_time}s")
print("start calculation")
# ---------------------- CALCULATION ------------------------------------------
# calculation CO2-Kosten
co2_kosten = [a*b/c for a, b, c in zip(co2_preis_int, db_bst_co2emissfakt, db_kwt_wirkungsgrad)]
# calculation Entsorgungskosten
ents_kosten = [a/b for a, b in zip(ents_preis_int, db_kwt_wirkungsgrad)]
# calculation Brennstoffkosten
bs_kosten = [a/b for a, b in zip(bs_preis_int, db_kwt_wirkungsgrad)]
# calculation Grenzkosten (Marginal Cost)
grenz_kosten = [a+b+c+d for a, b, c, d in zip(varopex_int, bs_kosten, co2_kosten, ents_kosten)]
time3 = time.time()
d_time = time3 - time2
print(f"-> calculation finished successfully in {d_time}s")
print("start defining output")
# ---------------------- DEFINE OUTPUTS ---------------------------------------
# output sorted by id, units in comments
kwp = {"id": db_kw_id, # [-]
"kw_bezeichnung": db_kw_bez, # [-]
"lat": db_kw_lat, # [deg]
"long": db_kw_long, # [deg]
"p_inst": pinst_int, # [W]
"fk_kraftwerkstyp": db_kw_fk_kwt, # [-]
"kwt_id": db_kwt_id, # [-]
"bez_kraftwerkstyp": db_kwt_bez, # [-]
"bez_subtyp": db_kwt_bez_subtyp, # [-]
"wirkungsgrad": db_kwt_wirkungsgrad, # [-]
"var_opex": varopex_int, # [€/J]
"capex": capex_int, # [€/W_el]
"p_typisch": db_kwt_p_typisch, # [W]
"spez_info": db_kwt_spez_info, # dict with "NH" [m] and "Z0" [m]
"entsorgungspreis": ents_preis_int, # [€/J_bs]
"fk_brennstofftyp": db_kwt_fk_brennstofftyp, # [-]
"brennstofftyp_id": db_bst_id, # [-]
"bez_brennstofftyp": db_bst_bez, # [-]
"co2emissfakt": db_bst_co2emissfakt, # [kg_CO2/J_bs]
"bs_preis": bs_preis_int, # [€/J_bs]
"co2_preis": co2_preis_int, # [€/kg_CO2]
"co2_kosten": co2_kosten, # [€/J_el]
"entsorgungskosten": ents_kosten, # [€/J_el]
"brennstoffkosten": bs_kosten, # [€/J_el]
"grenzkosten": grenz_kosten, # [€/J_el]
}
time4 = time.time()
d_time = time4 - time3
print(f"-> defining output finished successfully in {d_time}s")
d_time = time4 - time0
print(f"-> -> -> eu power plant finished successfully in {d_time}s")
print("")
self.kwp = kwp
self.set_output("kw_park", self.kwp)
# 3D Interpolation
def interpol_3d(self, db_time, db_lat, db_long, db_values, kw_lat, kw_long, kw_time):
"""
This function interpolates in a grid of points (db_lat,db_long,db_time) with assigned values (db_values).
It interpolates for points given by (kw_lat, kw_long, kw_time) and outputs their corresponding value.
Values inside the grid are interpolated linearly and values outside of the grid are interpolated to the
nearest point of the grid.
ATTENTION: If there are less than 4 points in db_... no grid can be formed and everything will be "interpolated"
to nearest.
Also, it is not allowed to have all points forming a plane, they must span a 3dimensional space
| "db_" inputs are things as prices or similar
| "kw_" inputs denote the power plants
INPUTS:
| db_lat: Latitude, list of [float]; nx1
| db_long: Longitude, list of [float]; nx1
| db_time: Time, list of [float](timestamp in [s]); nx1
| db_values: list of [float]; nx1
| kw_lat: Latitude, list of [float]; jx1
| kw_long: Longitude, list of [float]; jx1
| kw_time: Time, list of [float](timestamp in [s]); jx1
OUTPUTS:
kw_values: list of [float]; jx1
"""
# change to ndarray for usage in griddata
db_lat = np.asarray(db_lat)
db_long = np.asarray(db_long)
db_time = np.asarray(db_time)
db_values = np.asarray(db_values)
kw_lat = np.asarray(kw_lat)
kw_long = np.asarray(kw_long)
kw_time = np.asarray(kw_time)
# arrange inputs for griddata
xi = np.vstack((kw_lat, kw_long, kw_time))
gridpoints = np.vstack((db_lat, db_long, db_time))
# interpolate
interp_nearest = griddata(gridpoints.T, db_values.T, xi.T, method='nearest')
# if not enough db-points present only interpolate nearest (see docstring)
if db_values.size < 4:
kw_values = interp_nearest
else:
interp_linear = griddata(gridpoints.T, db_values.T, xi.T, method='linear')
# replace Nan (out of range values) in linear with nearest
kw_values = np.where(np.isnan(interp_linear), interp_nearest, interp_linear)
# make output list
kw_values = kw_values.tolist()
return kw_values
# 2D Interpolation
def interpol_2d(self, db_lat, db_long, db_values, kw_lat, kw_long):
"""
This function interpolates in a grid of points (db_lat,db_long) with assigned values (db_values).
It interpolates for points given by (kw_lat, kw_long) and outputs their corresponding value.
Values inside the grid are interpolated linearly and values outside of the grid are interpolated
to the nearest point of the grid.
ATTENTION: If there are less than 3 points in db_... no grid can be formed and everything will be "interpolated"
to nearest.
Also, it is not allowed to have all points forming a line, they must span a 2dimensional space
| "db_" inputs are things as prices or similar
| "kw_" inputs denote the power plants
INPUTS:
| db_lat: Latitude, list of [float]; nx1
| db_long: Longitude, list of [float]; nx1
| db_values: list of [float]; nx1
| kw_lat: Latitude, list of [float]; jx1
| kw_long: Longitude, list of [float]; jx1
OUTPUTS:
kw_values: list of [float]; jx1
"""
# change to ndarray for usage in griddata
db_lat = np.asarray(db_lat)
db_long = np.asarray(db_long)
db_values = np.asarray(db_values)
kw_lat = np.asarray(kw_lat)
kw_long = np.asarray(kw_long)
# arrange inputs for griddata
xi = np.vstack((kw_lat, kw_long))
gridpoints = np.vstack((db_lat, db_long))
# interpolate
interp_nearest = griddata(gridpoints.T, db_values.T, xi.T, method='nearest')
# if not enough db-points present only interpolate nearest (see docstring)
if db_values.size < 3:
kw_values = interp_nearest
else:
interp_linear = griddata(gridpoints.T, db_values.T, xi.T, method='linear')
# replace Nan (out of range values) in linear with nearest
kw_values = np.where(np.isnan(interp_linear), interp_nearest, interp_linear)
# make output list
kw_values = kw_values.tolist()
return kw_values
# 1D Interpolation
def interpol_1d(self, db_time, db_values, kw_time):
"""
This function interpolates in one dimension.
| X: time
| Y: values
| xi: kw_time
| yi: kw_values (output)
Values inside [X(min), X(max)] are interpolated linearly,
values outside of it are interpolated to the nearest X.
If only one value for X and Y is provided, the output array is filled with the input value (nearest)
INPUTS:
| time: list of [float](timestamp in [s]); nx1
| values: list of [float]; nx1
| kw_time: list of [float](timestamp in [s]); mx1
OUTPUTS:
kw_values: list of [float]; mx1
"""
# change to ndarray for usage in griddata
db_time = np.asarray(db_time)
db_values = np.asarray(db_values)
kw_time = np.asarray(kw_time)
if db_time.size > 1:
# interpolate
interp_nearest = griddata(db_time.T, db_values.T, kw_time.T, method='nearest')
interp_linear = griddata(db_time.T, db_values.T, kw_time.T, method='linear')
# replace Nan (out of range values) in linear with nearest
kw_values = np.where(np.isnan(interp_linear), interp_nearest, interp_linear)
else:
# if only one time and one value is provided, set output to nearest (which in this case is the input value)
kw_values = np.full(kw_time.size, db_values[0])
# make output list
kw_values = kw_values.tolist()
return kw_values
# Start database connection from path provided in .env
def start_db():
load_dotenv()
db_path = environ.get("KW_DB")
# an engine is the real DB
engine = create_engine(db_path)
# a session is used to communicate with the DB
Base.metadata.bind = engine
session = sessionmaker(bind=engine)()
return session
if __name__ == "__main__":
# define Input, build time array
dt = 900
t0 = time.time()
t = [i * dt + t0 for i in range(96)]
inputs = {'t': t}
properties = {}
# test model
test = Model.test(inputs, properties)
stop = 1
| [
"pyjamas_core.util.Input",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"scipy.interpolate.griddata",
"os.environ.get",
"numpy.asarray",
"dotenv.load_dotenv",
"ast.literal_eval",
"pyjamas_core.util.Output",
"numpy.isnan",
"numpy.vstack",
"numpy.full",
"time.time"
] | [((16137, 16150), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (16148, 16150), False, 'from dotenv import load_dotenv\n'), ((16165, 16185), 'os.environ.get', 'environ.get', (['"""KW_DB"""'], {}), "('KW_DB')\n", (16176, 16185), False, 'from os import environ\n'), ((16231, 16253), 'sqlalchemy.create_engine', 'create_engine', (['db_path'], {}), '(db_path)\n', (16244, 16253), False, 'from sqlalchemy import create_engine\n'), ((16488, 16499), 'time.time', 'time.time', ([], {}), '()\n', (16497, 16499), False, 'import time\n'), ((810, 823), 'pyjamas_core.util.Input', 'Input', (['"""Zeit"""'], {}), "('Zeit')\n", (815, 823), False, 'from pyjamas_core.util import Input, Output\n'), ((884, 908), 'pyjamas_core.util.Output', 'Output', (['"""Kraftwerkspark"""'], {}), "('Kraftwerkspark')\n", (890, 908), False, 'from pyjamas_core.util import Input, Output\n'), ((11262, 11280), 'numpy.asarray', 'np.asarray', (['db_lat'], {}), '(db_lat)\n', (11272, 11280), True, 'import numpy as np\n'), ((11299, 11318), 'numpy.asarray', 'np.asarray', (['db_long'], {}), '(db_long)\n', (11309, 11318), True, 'import numpy as np\n'), ((11337, 11356), 'numpy.asarray', 'np.asarray', (['db_time'], {}), '(db_time)\n', (11347, 11356), True, 'import numpy as np\n'), ((11377, 11398), 'numpy.asarray', 'np.asarray', (['db_values'], {}), '(db_values)\n', (11387, 11398), True, 'import numpy as np\n'), ((11416, 11434), 'numpy.asarray', 'np.asarray', (['kw_lat'], {}), '(kw_lat)\n', (11426, 11434), True, 'import numpy as np\n'), ((11453, 11472), 'numpy.asarray', 'np.asarray', (['kw_long'], {}), '(kw_long)\n', (11463, 11472), True, 'import numpy as np\n'), ((11491, 11510), 'numpy.asarray', 'np.asarray', (['kw_time'], {}), '(kw_time)\n', (11501, 11510), True, 'import numpy as np\n'), ((11563, 11600), 'numpy.vstack', 'np.vstack', (['(kw_lat, kw_long, kw_time)'], {}), '((kw_lat, kw_long, kw_time))\n', (11572, 11600), True, 'import numpy as np\n'), ((11622, 11659), 'numpy.vstack', 'np.vstack', (['(db_lat, db_long, db_time)'], {}), '((db_lat, db_long, db_time))\n', (11631, 11659), True, 'import numpy as np\n'), ((11708, 11767), 'scipy.interpolate.griddata', 'griddata', (['gridpoints.T', 'db_values.T', 'xi.T'], {'method': '"""nearest"""'}), "(gridpoints.T, db_values.T, xi.T, method='nearest')\n", (11716, 11767), False, 'from scipy.interpolate import griddata\n'), ((13535, 13553), 'numpy.asarray', 'np.asarray', (['db_lat'], {}), '(db_lat)\n', (13545, 13553), True, 'import numpy as np\n'), ((13572, 13591), 'numpy.asarray', 'np.asarray', (['db_long'], {}), '(db_long)\n', (13582, 13591), True, 'import numpy as np\n'), ((13612, 13633), 'numpy.asarray', 'np.asarray', (['db_values'], {}), '(db_values)\n', (13622, 13633), True, 'import numpy as np\n'), ((13651, 13669), 'numpy.asarray', 'np.asarray', (['kw_lat'], {}), '(kw_lat)\n', (13661, 13669), True, 'import numpy as np\n'), ((13688, 13707), 'numpy.asarray', 'np.asarray', (['kw_long'], {}), '(kw_long)\n', (13698, 13707), True, 'import numpy as np\n'), ((13760, 13788), 'numpy.vstack', 'np.vstack', (['(kw_lat, kw_long)'], {}), '((kw_lat, kw_long))\n', (13769, 13788), True, 'import numpy as np\n'), ((13810, 13838), 'numpy.vstack', 'np.vstack', (['(db_lat, db_long)'], {}), '((db_lat, db_long))\n', (13819, 13838), True, 'import numpy as np\n'), ((13887, 13946), 'scipy.interpolate.griddata', 'griddata', (['gridpoints.T', 'db_values.T', 'xi.T'], {'method': '"""nearest"""'}), "(gridpoints.T, db_values.T, xi.T, method='nearest')\n", (13895, 13946), False, 'from scipy.interpolate import griddata\n'), ((15278, 15297), 'numpy.asarray', 'np.asarray', (['db_time'], {}), '(db_time)\n', (15288, 15297), True, 'import numpy as np\n'), ((15318, 15339), 'numpy.asarray', 'np.asarray', (['db_values'], {}), '(db_values)\n', (15328, 15339), True, 'import numpy as np\n'), ((15358, 15377), 'numpy.asarray', 'np.asarray', (['kw_time'], {}), '(kw_time)\n', (15368, 15377), True, 'import numpy as np\n'), ((16352, 16377), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (16364, 16377), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1365, 1376), 'time.time', 'time.time', ([], {}), '()\n', (1374, 1376), False, 'import time\n'), ((3273, 3284), 'time.time', 'time.time', ([], {}), '()\n', (3282, 3284), False, 'import time\n'), ((6844, 6855), 'time.time', 'time.time', ([], {}), '()\n', (6853, 6855), False, 'import time\n'), ((7682, 7693), 'time.time', 'time.time', ([], {}), '()\n', (7691, 7693), False, 'import time\n'), ((9493, 9504), 'time.time', 'time.time', ([], {}), '()\n', (9502, 9504), False, 'import time\n'), ((11964, 12022), 'scipy.interpolate.griddata', 'griddata', (['gridpoints.T', 'db_values.T', 'xi.T'], {'method': '"""linear"""'}), "(gridpoints.T, db_values.T, xi.T, method='linear')\n", (11972, 12022), False, 'from scipy.interpolate import griddata\n'), ((14143, 14201), 'scipy.interpolate.griddata', 'griddata', (['gridpoints.T', 'db_values.T', 'xi.T'], {'method': '"""linear"""'}), "(gridpoints.T, db_values.T, xi.T, method='linear')\n", (14151, 14201), False, 'from scipy.interpolate import griddata\n'), ((15463, 15524), 'scipy.interpolate.griddata', 'griddata', (['db_time.T', 'db_values.T', 'kw_time.T'], {'method': '"""nearest"""'}), "(db_time.T, db_values.T, kw_time.T, method='nearest')\n", (15471, 15524), False, 'from scipy.interpolate import griddata\n'), ((15553, 15613), 'scipy.interpolate.griddata', 'griddata', (['db_time.T', 'db_values.T', 'kw_time.T'], {'method': '"""linear"""'}), "(db_time.T, db_values.T, kw_time.T, method='linear')\n", (15561, 15613), False, 'from scipy.interpolate import griddata\n'), ((15932, 15967), 'numpy.full', 'np.full', (['kw_time.size', 'db_values[0]'], {}), '(kw_time.size, db_values[0])\n', (15939, 15967), True, 'import numpy as np\n'), ((2694, 2737), 'ast.literal_eval', 'ast.literal_eval', (['i.kraftwerkstyp.spez_info'], {}), '(i.kraftwerkstyp.spez_info)\n', (2710, 2737), False, 'import ast\n'), ((12127, 12150), 'numpy.isnan', 'np.isnan', (['interp_linear'], {}), '(interp_linear)\n', (12135, 12150), True, 'import numpy as np\n'), ((14306, 14329), 'numpy.isnan', 'np.isnan', (['interp_linear'], {}), '(interp_linear)\n', (14314, 14329), True, 'import numpy as np\n'), ((15718, 15741), 'numpy.isnan', 'np.isnan', (['interp_linear'], {}), '(interp_linear)\n', (15726, 15741), True, 'import numpy as np\n')] |
"""
Usage:
1. training
python run_this.py --action='train'\
-p ./train -v ./val
2. prediction
python run_this.py --action='predict'\
-p /test/**.jpg
"""
import time
import json
import argparse
import sys
sys.path.append('..')
from model.mobilenet import MobileNet
import numpy as np
import cv2
import os
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard,SGDLearningRateTracker
from keras.models import load_model
def augmean(img,imgNorm,width,height):
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
elif imgNorm == "divide":
#img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img = img/255.0
img -= 0.5
img *= 2.
return img
def preprocess_input_aug(x):
x=augmean(x,"sub_mean",width=224,height=224)
return x
def preprocess_input(x):
x=augmean(x,"sub_mean",width=224,height=224)
return x
def parse_json(fname):
"""Parse the input profile
@param fname: input profile path
@return data: a dictionary with user-defined data for training
"""
with open(fname) as data_file:
data = json.load(data_file)
return data
class NumpyAwareJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray) and obj.ndim == 1:
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def write_json(data, fname='./output.json'):
"""Write data to json
@param data: object to be written
Keyword arguments:
fname -- output filename (default './output.json')
"""
with open(fname, 'w') as fp:
json.dump(data, fp, cls=NumpyAwareJSONEncoder)
def print_time(t0, s):
"""Print how much time has been spent
@param t0: previous timestamp
@param s: description of this step
"""
print("%.5f seconds to %s" % ((time.time() - t0), s))
return time.time()
def main():
parser = argparse.ArgumentParser(
description="MobileNet example."
)
parser.add_argument(
"--batch-size", type=int, default=32, dest='batchsize',
help="Size of the mini batch. Default: 32."
)
parser.add_argument(
"--action", type=str, default='train',
help="Action to be performed, train/predict"
)
parser.add_argument(
"--epochs", type=int, default=100,
help="Number of epochs, default 20."
)
parser.add_argument(
"--lr", type=float, default=0.01,
help="Learning rate of SGD, default 0.001."
)
parser.add_argument(
"--epsilon", type=float, default=1e-8,
help="Epsilon of Adam epsilon, default 1e-8."
)
parser.add_argument(
"-p", "--path", type=str, default='./train', dest='trainpath',#required=True,
help="Path where the images are. Default: $PWD."
)
parser.add_argument(
"-v", "--val-path", type=str,default='./val',
dest='valpath', help="Path where the val images are. Default: $PWD."
)
parser.add_argument(
"--img-width", type=int, default=224, dest='width',
help="Rows of the images, default: 224."
)
parser.add_argument(
"--img-height", type=int, default=224, dest='height',
help="Columns of the images, default: 224."
)
parser.add_argument(
"--channels", type=int, default=3,
help="Channels of the images, default: 3."
)
args = parser.parse_args()
sgd = SGD(lr=args.lr, decay=0.0001,momentum=0.9)#,
#adma=Adam(args.lr,beta_1=0.9,beta_2=0.999,epsilon=1e-8)
t0 = time.time()
if args.action == 'train':
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input_aug,
#rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
validation_datagen=ImageDataGenerator(preprocessing_function=preprocess_input)
#rescale=1./255)
train_generator=train_datagen.flow_from_directory(args.trainpath,target_size=(224, 224),batch_size=args.batchsize)
validation_generator=validation_datagen.flow_from_directory(args.valpath,target_size=(224, 224),batch_size=args.batchsize)
classes = train_generator.class_indices
nb_train_samples = train_generator.samples
nb_val_samples = validation_generator.samples
print("[demo] N training samples: %i " % nb_train_samples)
print("[demo] N validation samples: %i " % nb_val_samples)
nb_class = train_generator.num_classes
print('[demo] Total classes are %i' % nb_class)
t0 = print_time(t0, 'initialize data')
model = MobileNet(input_shape=(args.height, args.width,args.channels),alpha=0.25,classes=nb_class)
# dp.visualize_model(model)
t0 = print_time(t0, 'build the model')
model.compile(
#optimizer=sgd, loss='categorical_crossentropy',
optimizer=sgd, loss='binary_crossentropy',
metrics=['accuracy'])
t0 = print_time(t0, 'compile model')
tb_cb = TensorBoard(log_dir='./logs',
write_images=True, histogram_freq=0, write_graph=True)
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples//args.batchsize,
epochs=args.epochs,
validation_data=validation_generator,
validation_steps=nb_val_samples//args.batchsize,
callbacks=[tb_cb,SGDLearningRateTracker('./learning.txt')])
t0 = print_time(t0, 'train model')
model.save_weights('./weights/weights.h5', overwrite=True)
model_parms = {'nb_class': nb_class,
'nb_train_samples': nb_train_samples,
'nb_val_samples': nb_val_samples,
'classes': classes,
'channels': args.channels,
'height': args.height,
'width': args.width}
write_json(model_parms, fname='./logs/model_parms.json')
t0 = print_time(t0, 'save model')
elif args.action == 'predict':
_parms = parse_json('./logs/model_parms.json')
model = MobileNet(input_shape=(args.height, args.width,args.channels),alpha=0.25,classes=2)
weights_path='./weights/weights.h5'
model.load_weights(weights_path)
model.compile(
optimizer=sgd, loss='binary_crossentropy',
metrics=['accuracy'])
t0 = print_time(t0, 'prepare data')
subdirs=os.listdir(args.valpath)
imagenames=[]
imagesarray=[]
m_class=[]
for subdir in subdirs:
images=os.listdir(args.valpath+'/'+str(subdir))
for image in images:
img=cv2.imread(args.valpath+'/'+subdir+'/'+image,cv2.COLOR_BGR2RGB)
img=cv2.resize(img,(224,224))
imagenames.append(image)
m_class.append(subdir)
img=preprocess_input(img)
imagesarray.append(img)
classes = _parms['classes']
accpreds=0
for i,m_c in zip(range(len(imagenames)),m_class):
cruentimg=imagesarray[i][np.newaxis,:,:,:]
print(cruentimg.shape)
results = model.predict(imagesarray[i][np.newaxis,:,:,:])
_cls=np.argmax(results,axis=1)
print(m_c)
print(_cls[0])
if(_cls[0]==int(m_c)):
accpreds=accpreds+1
max_prob = results[0][_cls]
print('[demo] %s: %s (%.2f)' % (str(imagenames[i]), str(_cls), max_prob))
print(accpreds)
print(accpreds/len(m_class))
print("total acc :(%.3f)", float(accpreds/len(m_class)))
t0 = print_time(t0, 'predict')
if __name__ == '__main__':
main()
| [
"os.listdir",
"cv2.imread",
"json.JSONEncoder.default",
"argparse.ArgumentParser",
"model.mobilenet.MobileNet",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"keras.callbacks.TensorBoard",
"keras.optimizers.SGD",
"time.time",
"json.load",
"cv2.resize",
"sys.path.append",
... | [((212, 233), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (227, 233), False, 'import sys\n'), ((2294, 2305), 'time.time', 'time.time', ([], {}), '()\n', (2303, 2305), False, 'import time\n'), ((2333, 2390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MobileNet example."""'}), "(description='MobileNet example.')\n", (2356, 2390), False, 'import argparse\n'), ((4073, 4116), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'args.lr', 'decay': '(0.0001)', 'momentum': '(0.9)'}), '(lr=args.lr, decay=0.0001, momentum=0.9)\n', (4076, 4116), False, 'from keras.optimizers import SGD\n'), ((4189, 4200), 'time.time', 'time.time', ([], {}), '()\n', (4198, 4200), False, 'import time\n'), ((1532, 1552), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1541, 1552), False, 'import json\n'), ((1750, 1785), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1774, 1785), False, 'import json\n'), ((2027, 2073), 'json.dump', 'json.dump', (['data', 'fp'], {'cls': 'NumpyAwareJSONEncoder'}), '(data, fp, cls=NumpyAwareJSONEncoder)\n', (2036, 2073), False, 'import json\n'), ((4254, 4377), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input_aug', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(preprocessing_function=preprocess_input_aug, shear_range\n =0.2, zoom_range=0.2, horizontal_flip=True)\n', (4272, 4377), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4584, 4643), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (4602, 4643), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((5413, 5510), 'model.mobilenet.MobileNet', 'MobileNet', ([], {'input_shape': '(args.height, args.width, args.channels)', 'alpha': '(0.25)', 'classes': 'nb_class'}), '(input_shape=(args.height, args.width, args.channels), alpha=0.25,\n classes=nb_class)\n', (5422, 5510), False, 'from model.mobilenet import MobileNet\n'), ((5822, 5910), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./logs"""', 'write_images': '(True)', 'histogram_freq': '(0)', 'write_graph': '(True)'}), "(log_dir='./logs', write_images=True, histogram_freq=0,\n write_graph=True)\n", (5833, 5910), False, 'from keras.callbacks import TensorBoard, SGDLearningRateTracker\n'), ((741, 773), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (751, 773), False, 'import cv2\n'), ((6941, 7031), 'model.mobilenet.MobileNet', 'MobileNet', ([], {'input_shape': '(args.height, args.width, args.channels)', 'alpha': '(0.25)', 'classes': '(2)'}), '(input_shape=(args.height, args.width, args.channels), alpha=0.25,\n classes=2)\n', (6950, 7031), False, 'from model.mobilenet import MobileNet\n'), ((7282, 7306), 'os.listdir', 'os.listdir', (['args.valpath'], {}), '(args.valpath)\n', (7292, 7306), False, 'import os\n'), ((8086, 8112), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (8095, 8112), True, 'import numpy as np\n'), ((642, 674), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (652, 674), False, 'import cv2\n'), ((2260, 2271), 'time.time', 'time.time', ([], {}), '()\n', (2269, 2271), False, 'import time\n'), ((6228, 6268), 'keras.callbacks.SGDLearningRateTracker', 'SGDLearningRateTracker', (['"""./learning.txt"""'], {}), "('./learning.txt')\n", (6250, 6268), False, 'from keras.callbacks import TensorBoard, SGDLearningRateTracker\n'), ((7524, 7596), 'cv2.imread', 'cv2.imread', (["(args.valpath + '/' + subdir + '/' + image)", 'cv2.COLOR_BGR2RGB'], {}), "(args.valpath + '/' + subdir + '/' + image, cv2.COLOR_BGR2RGB)\n", (7534, 7596), False, 'import cv2\n'), ((7608, 7635), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (7618, 7635), False, 'import cv2\n')] |
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer
from sklearn.svm import SVR
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 5
N = 1000
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
X_train, X_test, y_train, y_test = train_test_split(X,z,test_size=0.2)
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train, y_train)
# The mean squared error and R2 score
print("MSE before scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test), y_test)))
print("R2 score before scaling {:.2f}".format(svm.score(X_test,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values after scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values after scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train_scaled, y_train)
print("MSE after scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test_scaled), y_test)))
print("Test set accuracy scaled data: {:.2f}".format(svm.score(X_test_scaled,y_test)))
| [
"os.path.exists",
"numpy.ones",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"numpy.exp",
"os.mkdir",
"numpy.random.uniform",
"numpy.ravel",
"sklearn.svm.SVR"
] | [((1693, 1730), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'z'], {'test_size': '(0.2)'}), '(X, z, test_size=0.2)\n', (1709, 1730), False, 'from sklearn.model_selection import train_test_split\n'), ((1737, 1762), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""auto"""', 'C': '(10.0)'}), "(gamma='auto', C=10.0)\n", (1740, 1762), False, 'from sklearn.svm import SVR\n'), ((2003, 2019), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2017, 2019), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer\n'), ((2455, 2480), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""auto"""', 'C': '(10.0)'}), "(gamma='auto', C=10.0)\n", (2458, 2480), False, 'from sklearn.svm import SVR\n'), ((476, 508), 'os.path.exists', 'os.path.exists', (['PROJECT_ROOT_DIR'], {}), '(PROJECT_ROOT_DIR)\n', (490, 508), False, 'import os\n'), ((514, 540), 'os.mkdir', 'os.mkdir', (['PROJECT_ROOT_DIR'], {}), '(PROJECT_ROOT_DIR)\n', (522, 540), False, 'import os\n'), ((549, 574), 'os.path.exists', 'os.path.exists', (['FIGURE_ID'], {}), '(FIGURE_ID)\n', (563, 574), False, 'import os\n'), ((580, 602), 'os.makedirs', 'os.makedirs', (['FIGURE_ID'], {}), '(FIGURE_ID)\n', (591, 602), False, 'import os\n'), ((611, 634), 'os.path.exists', 'os.path.exists', (['DATA_ID'], {}), '(DATA_ID)\n', (625, 634), False, 'import os\n'), ((640, 660), 'os.makedirs', 'os.makedirs', (['DATA_ID'], {}), '(DATA_ID)\n', (651, 660), False, 'import os\n'), ((697, 728), 'os.path.join', 'os.path.join', (['FIGURE_ID', 'fig_id'], {}), '(FIGURE_ID, fig_id)\n', (709, 728), False, 'import os\n'), ((764, 793), 'os.path.join', 'os.path.join', (['DATA_ID', 'dat_id'], {}), '(DATA_ID, dat_id)\n', (776, 793), False, 'import os\n'), ((1317, 1332), 'numpy.ones', 'np.ones', (['(N, l)'], {}), '((N, l))\n', (1324, 1332), True, 'import numpy as np\n'), ((1537, 1563), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1554, 1563), True, 'import numpy as np\n'), ((1577, 1603), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1594, 1603), True, 'import numpy as np\n'), ((917, 977), 'numpy.exp', 'np.exp', (['(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)'], {}), '(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)\n', (923, 977), True, 'import numpy as np\n'), ((978, 1030), 'numpy.exp', 'np.exp', (['(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))'], {}), '(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))\n', (984, 1030), True, 'import numpy as np\n'), ((1032, 1089), 'numpy.exp', 'np.exp', (['(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)'], {}), '(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)\n', (1038, 1089), True, 'import numpy as np\n'), ((1090, 1134), 'numpy.exp', 'np.exp', (['(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)'], {}), '(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)\n', (1096, 1134), True, 'import numpy as np\n'), ((1215, 1226), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (1223, 1226), True, 'import numpy as np\n'), ((1233, 1244), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (1241, 1244), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# <NAME>
# 7 April 2017
#
# Pacific Biosciences
# Applications Lab
###########################################################################################################
# import libraries
from argparse import ArgumentParser
import csv
import numpy as np
import subprocess
import string
###########################################################################################################
# define command line arguments, program description, and help
desc ='''Identify nested haplotigs.'''
parser = ArgumentParser(description=desc)
parser.add_argument("fileList", help="list of coords files")
args = parser.parse_args()
###########################################################################################################
# get filenames
inFiles = args.fileList
###########################################################################################################
# define functions
def file2list(textFile):
return [line.rstrip('\n') for line in open(textFile)]
def file_len(fname):
#http://stackoverflow.com/questions/845058/how-to-get-line-count-cheaply-in-python
p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
return int(result.strip().split()[0])
def bed(file):
if file_len(file) >= 5:
output=['primaryContig','pstart', 'pend','haplotig']
d=np.loadtxt(open(file, "rb"), skiprows=4, dtype="str", ndmin=2)
output[3] = d[0,10] # haplotig
output[0] = d[0,9] #primary contig
p = np.array(d[:,[0,1]],dtype=int)
output[1] = np.amin(p) # pstart
output[2] = np.amax(p) # pstop
return output
def nested(a,b):
# a nested inside b
nested = 0
if a[1] < b[2] and a[1] > b[1] and a[2] > b[1] and a[2] < b[2]:
nested = 1
return nested
coordsFileList = file2list(inFiles)
nestedHaplotigs = [0] * len(coordsFileList)
for i in range(len(coordsFileList)):
for j in range(i):
bedi = bed(coordsFileList[i])
bedj = bed(coordsFileList[j])
if nested(bedi,bedj):
nestedHaplotigs[i] = 1
if nested(bedj,bedi):
nestedHaplotigs[j] = 1
# ready to write to file
nested = open('nested_haplotigs.txt', 'a')
retained = open('retained_contigs_haplotigs.txt', 'a')
for k in range(len(coordsFileList)):
haplotig = str(coordsFileList[k])
haplotig = haplotig.replace(".coords","")
if nestedHaplotigs[k] == 1:
nested.write(str(haplotig) + "\n")
else:
retained.write(str(haplotig) + "\n")
nested.close()
retained.close()
| [
"numpy.amin",
"argparse.ArgumentParser",
"subprocess.Popen",
"numpy.array",
"numpy.amax"
] | [((539, 571), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (553, 571), False, 'from argparse import ArgumentParser\n'), ((1132, 1222), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', fname]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['wc', '-l', fname], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n", (1148, 1222), False, 'import subprocess\n'), ((1571, 1604), 'numpy.array', 'np.array', (['d[:, [0, 1]]'], {'dtype': 'int'}), '(d[:, [0, 1]], dtype=int)\n', (1579, 1604), True, 'import numpy as np\n'), ((1616, 1626), 'numpy.amin', 'np.amin', (['p'], {}), '(p)\n', (1623, 1626), True, 'import numpy as np\n'), ((1650, 1660), 'numpy.amax', 'np.amax', (['p'], {}), '(p)\n', (1657, 1660), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 21 16:45:24 2021
@author: f004swn
"""
import os
import sys
import os.path as osp
#sys.path.append('psypose/MEVA')
#sys.path.append('psypose/MEVA/meva')
#sys.path.append('psypose/MEVA/meva/cfg')
os.environ['PYOPENGL_PLATFORM'] = 'egl'
import cv2
import time
import torch
import joblib
import shutil
import colorsys
import numpy as np
import atexit
from tqdm import tqdm
from multi_person_tracker import MPT
from torch.utils.data import DataLoader
from psypose.MEVA.meva.lib.meva_model import MEVA_demo
from psypose.MEVA.meva.utils.renderer import Renderer
from psypose.MEVA.meva.dataloaders.inference import Inference
from psypose.MEVA.meva.utils.video_config import update_cfg
from psypose.MEVA.meva.utils.demo_utils import (
convert_crop_cam_to_orig_img,
prepare_rendering_results,
images_to_video
)
from psypose.utils import PSYPOSE_DATA_DIR, video_to_images
out_dir = os.getcwd()
dir_name = osp.dirname(__file__)+'/MEVA/'
def estimate_pose(pose, save_pkl=False, image_folder=None, output_path=None, tracking_method='bbox',
vibe_batch_size=225, tracker_batch_size=12, mesh_out=False, run_smplify=False, render=False, wireframe=False,
sideview=False, display=False, save_obj=False, gpu_id=0, output_folder='MEVA_outputs',
detector='yolo', yolo_img_size=416, exp='train_meva_2', cfg='train_meva_2', num_workers=None):
#return_dir = os.getcwd()
#os.chdir('MEVA')
if not image_folder:
image_folder = osp.join(PSYPOSE_DATA_DIR, pose.vid_name)
video_file = pose.vid_path
# setting minimum number of frames to reflect minimum track length to half a second
MIN_NUM_FRAMES = 25
#MIN_NUM_FRAMES = round(pose.fps/2)
if torch.cuda.is_available():
torch.cuda.set_device(gpu_id)
device = torch.device('cuda')
else:
device = torch.device('cpu')
if not os.path.isfile(video_file):
exit(f'Input video \"{video_file}\" does not exist!')
filename = os.path.splitext(os.path.basename(video_file))[0]
#output_path = os.path.join(output_folder, filename)
#os.makedirs(output_path, exist_ok=True)
image_folder, num_frames, img_shape = video_to_images(video_file, img_folder=image_folder, return_info=True)
print(f'Input video number of frames {num_frames}')
orig_height, orig_width = img_shape[:2]
total_time = time.time()
# ========= Run tracking ========= #
#print("\n")
# run multi object tracker
mot = MPT(
device=device,
batch_size=tracker_batch_size,
display=display,
detector_type=detector,
output_format='dict',
yolo_img_size=yolo_img_size,
)
tracking_results = mot(image_folder)
# remove tracklets if num_frames is less than MIN_NUM_FRAMES
for person_id in list(tracking_results.keys()):
if tracking_results[person_id]['frames'].shape[0] < MIN_NUM_FRAMES:
del tracking_results[person_id]
# print('Track lengths: /n')
# for person_id in list(tracking_results.keys()):
# print(str(tracking_results[person_id]['frames'].shape[0]))
# ========= MEVA Model ========= #
pretrained_file = PSYPOSE_DATA_DIR.joinpath("meva_data", "results", "meva", "train_meva_2", "model_best.pth.tar")
config_file = osp.join(dir_name, "meva", "cfg", f"{cfg}.yml")
cfg = update_cfg(config_file)
model = MEVA_demo(
n_layers=cfg.MODEL.TGRU.NUM_LAYERS,
batch_size=cfg.TRAIN.BATCH_SIZE,
seqlen=cfg.DATASET.SEQLEN,
hidden_size=cfg.MODEL.TGRU.HIDDEN_SIZE,
add_linear=cfg.MODEL.TGRU.ADD_LINEAR,
bidirectional=cfg.MODEL.TGRU.BIDIRECTIONAL,
use_residual=cfg.MODEL.TGRU.RESIDUAL,
cfg = cfg.VAE_CFG,
).to(device)
ckpt = torch.load(pretrained_file, map_location=device)
# print(f'Performance of pretrained model on 3DPW: {ckpt["performance"]}')
ckpt = ckpt['gen_state_dict']
model.load_state_dict(ckpt)
model.eval()
print(f'\nLoaded pretrained weights from \"{pretrained_file}\"')
# ========= MEVA Model ========= #
# ========= Run MEVA on each person ========= #
bbox_scale = 1.2
print('\nRunning MEVA on each tracklet...\n', flush=True)
vibe_time = time.time()
meva_results = {}
for person_id in tqdm(list(tracking_results.keys())):
bboxes = joints2d = None
bboxes = tracking_results[person_id]['bbox']
frames = tracking_results[person_id]['frames']
# if len(frames) < 90:
# print(f"!!!tracklet < 90 frames: {len(frames)} frames")
# continue
dataset = Inference(
image_folder=image_folder,
frames=frames,
bboxes=bboxes,
scale=bbox_scale,
)
bboxes = dataset.bboxes
frames = dataset.frames
if num_workers==None:
num_workers=16
dataloader = DataLoader(dataset, batch_size=vibe_batch_size, num_workers=num_workers, shuffle = False)
with torch.no_grad():
pred_cam, pred_pose, pred_betas, pred_joints3d = [], [], [], []
data_chunks = dataset.iter_data()
for idx in range(len(data_chunks)):
batch = data_chunks[idx]
batch_image = batch['batch'].unsqueeze(0)
cl = batch['cl']
batch_image = batch_image.to(device)
batch_size, seqlen = batch_image.shape[:2]
output = model(batch_image)[-1]
pred_cam.append(output['theta'][0, cl[0]: cl[1], :3])
pred_pose.append(output['theta'][0,cl[0]: cl[1],3:75])
pred_betas.append(output['theta'][0, cl[0]: cl[1],75:])
pred_joints3d.append(output['kp_3d'][0, cl[0]: cl[1]])
pred_cam = torch.cat(pred_cam, dim=0)
pred_pose = torch.cat(pred_pose, dim=0)
pred_betas = torch.cat(pred_betas, dim=0)
pred_joints3d = torch.cat(pred_joints3d, dim=0)
del batch_image
# ========= Save results to a pickle file ========= #
pred_cam = pred_cam.cpu().numpy()
pred_pose = pred_pose.cpu().numpy()
pred_betas = pred_betas.cpu().numpy()
pred_joints3d = pred_joints3d.cpu().numpy()
orig_cam = convert_crop_cam_to_orig_img(
cam=pred_cam,
bbox=bboxes,
img_width=orig_width,
img_height=orig_height
)
output_dict = {
'pred_cam': pred_cam,
'orig_cam': orig_cam,
'pose': pred_pose,
'betas': pred_betas,
'joints3d': pred_joints3d,
'bboxes': bboxes,
'frame_ids': frames,
}
meva_results[person_id] = output_dict
del model
end = time.time()
fps = num_frames / (end - vibe_time)
print(f'VIBE FPS: {fps:.2f}')
total_time = time.time() - total_time
print(f'Total time spent: {total_time:.2f} seconds (including model loading time).')
print(f'Total FPS (including model loading time): {num_frames / total_time:.2f}.')
# if save_pkl:
# print(f'Saving output results to \"{os.path.join(output_path, "meva_output.pkl")}\".')
# joblib.dump(meva_results, os.path.join(output_path, "meva_output.pkl"))
# meva_results = joblib.load(os.path.join(output_path, "meva_output.pkl"))
#if render_preview or not len(meva_results) == 0:
if render:
# ========= Render results as a single video ========= #
renderer = Renderer(resolution=(orig_width, orig_height), orig_img=True, wireframe=wireframe)
output_img_folder = f'{image_folder}_output'
os.makedirs(output_img_folder, exist_ok=True)
print(f'Rendering output video, writing frames to {output_img_folder}')
# prepare results for rendering
frame_results = prepare_rendering_results(meva_results, num_frames)
mesh_color = {k: colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) for k in meva_results.keys()}
image_file_names = sorted([
os.path.join(image_folder, x)
for x in os.listdir(image_folder)
if x.endswith('.png') or x.endswith('.jpg')
])
for frame_idx in tqdm(range(len(image_file_names))):
img_fname = image_file_names[frame_idx]
img = cv2.imread(img_fname)
# img = np.zeros(img.shape)
if sideview:
side_img = np.zeros_like(img)
for person_id, person_data in frame_results[frame_idx].items():
frame_verts = person_data['verts']
frame_cam = person_data['cam']
mc = mesh_color[person_id]
mesh_filename = None
if save_obj:
mesh_folder = os.path.join(output_path, 'meshes', f'{person_id:04d}')
os.makedirs(mesh_folder, exist_ok=True)
mesh_filename = os.path.join(mesh_folder, f'{frame_idx:06d}.obj')
img = renderer.render(
img,
frame_verts,
cam=frame_cam,
color=mc,
mesh_filename=mesh_filename,
)
frame_cam = np.array([ 0.5, 1., 0, 0])
if sideview:
side_img = renderer.render(
side_img,
frame_verts,
cam=frame_cam,
color=mc,
mesh_filename=mesh_filename,
# angle=270,
# axis=[0,1,0],
)
if sideview:
img = np.concatenate([img, side_img], axis=1)
cv2.imwrite(os.path.join(output_img_folder, f'{frame_idx:06d}.png'), img)
if display:
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if display:
cv2.destroyAllWindows()
# ========= Save rendered video ========= #
vid_name = os.path.basename(video_file)
save_name = f'{vid_name.replace(".mp4", "")}_meva_result.mp4'
save_name = os.path.join(output_path, save_name)
print(f'Saving result video to {save_name}')
images_to_video(img_folder=output_img_folder, output_vid_file=save_name)
shutil.rmtree(output_img_folder)
def clean_image_folder():
if osp.exists(image_folder) and osp.isdir(image_folder):
shutil.rmtree(image_folder)
atexit.register(clean_image_folder)
shutil.rmtree(image_folder)
#os.chdir(return_dir)
print('========FINISHED POSE ESTIMATION========')
return meva_results
| [
"numpy.random.rand",
"psypose.MEVA.meva.utils.demo_utils.prepare_rendering_results",
"psypose.MEVA.meva.lib.meva_model.MEVA_demo",
"multi_person_tracker.MPT",
"cv2.imshow",
"numpy.array",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"os.path.exists",
"os.listdir",
"psypose.MEVA.meva.utils... | [((963, 974), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (972, 974), False, 'import os\n'), ((986, 1007), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (997, 1007), True, 'import os.path as osp\n'), ((1768, 1793), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1791, 1793), False, 'import torch\n'), ((2233, 2303), 'psypose.utils.video_to_images', 'video_to_images', (['video_file'], {'img_folder': 'image_folder', 'return_info': '(True)'}), '(video_file, img_folder=image_folder, return_info=True)\n', (2248, 2303), False, 'from psypose.utils import PSYPOSE_DATA_DIR, video_to_images\n'), ((2423, 2434), 'time.time', 'time.time', ([], {}), '()\n', (2432, 2434), False, 'import time\n'), ((2541, 2686), 'multi_person_tracker.MPT', 'MPT', ([], {'device': 'device', 'batch_size': 'tracker_batch_size', 'display': 'display', 'detector_type': 'detector', 'output_format': '"""dict"""', 'yolo_img_size': 'yolo_img_size'}), "(device=device, batch_size=tracker_batch_size, display=display,\n detector_type=detector, output_format='dict', yolo_img_size=yolo_img_size)\n", (2544, 2686), False, 'from multi_person_tracker import MPT\n'), ((3241, 3340), 'psypose.utils.PSYPOSE_DATA_DIR.joinpath', 'PSYPOSE_DATA_DIR.joinpath', (['"""meva_data"""', '"""results"""', '"""meva"""', '"""train_meva_2"""', '"""model_best.pth.tar"""'], {}), "('meva_data', 'results', 'meva', 'train_meva_2',\n 'model_best.pth.tar')\n", (3266, 3340), False, 'from psypose.utils import PSYPOSE_DATA_DIR, video_to_images\n'), ((3356, 3403), 'os.path.join', 'osp.join', (['dir_name', '"""meva"""', '"""cfg"""', 'f"""{cfg}.yml"""'], {}), "(dir_name, 'meva', 'cfg', f'{cfg}.yml')\n", (3364, 3403), True, 'import os.path as osp\n'), ((3414, 3437), 'psypose.MEVA.meva.utils.video_config.update_cfg', 'update_cfg', (['config_file'], {}), '(config_file)\n', (3424, 3437), False, 'from psypose.MEVA.meva.utils.video_config import update_cfg\n'), ((3834, 3882), 'torch.load', 'torch.load', (['pretrained_file'], {'map_location': 'device'}), '(pretrained_file, map_location=device)\n', (3844, 3882), False, 'import torch\n'), ((4310, 4321), 'time.time', 'time.time', ([], {}), '()\n', (4319, 4321), False, 'import time\n'), ((6863, 6874), 'time.time', 'time.time', ([], {}), '()\n', (6872, 6874), False, 'import time\n'), ((10676, 10711), 'atexit.register', 'atexit.register', (['clean_image_folder'], {}), '(clean_image_folder)\n', (10691, 10711), False, 'import atexit\n'), ((10716, 10743), 'shutil.rmtree', 'shutil.rmtree', (['image_folder'], {}), '(image_folder)\n', (10729, 10743), False, 'import shutil\n'), ((1525, 1566), 'os.path.join', 'osp.join', (['PSYPOSE_DATA_DIR', 'pose.vid_name'], {}), '(PSYPOSE_DATA_DIR, pose.vid_name)\n', (1533, 1566), True, 'import os.path as osp\n'), ((1803, 1832), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu_id'], {}), '(gpu_id)\n', (1824, 1832), False, 'import torch\n'), ((1850, 1870), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1862, 1870), False, 'import torch\n'), ((1898, 1917), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1910, 1917), False, 'import torch\n'), ((1931, 1957), 'os.path.isfile', 'os.path.isfile', (['video_file'], {}), '(video_file)\n', (1945, 1957), False, 'import os\n'), ((4684, 4773), 'psypose.MEVA.meva.dataloaders.inference.Inference', 'Inference', ([], {'image_folder': 'image_folder', 'frames': 'frames', 'bboxes': 'bboxes', 'scale': 'bbox_scale'}), '(image_folder=image_folder, frames=frames, bboxes=bboxes, scale=\n bbox_scale)\n', (4693, 4773), False, 'from psypose.MEVA.meva.dataloaders.inference import Inference\n'), ((4973, 5064), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'vibe_batch_size', 'num_workers': 'num_workers', 'shuffle': '(False)'}), '(dataset, batch_size=vibe_batch_size, num_workers=num_workers,\n shuffle=False)\n', (4983, 5064), False, 'from torch.utils.data import DataLoader\n'), ((6360, 6466), 'psypose.MEVA.meva.utils.demo_utils.convert_crop_cam_to_orig_img', 'convert_crop_cam_to_orig_img', ([], {'cam': 'pred_cam', 'bbox': 'bboxes', 'img_width': 'orig_width', 'img_height': 'orig_height'}), '(cam=pred_cam, bbox=bboxes, img_width=\n orig_width, img_height=orig_height)\n', (6388, 6466), False, 'from psypose.MEVA.meva.utils.demo_utils import convert_crop_cam_to_orig_img, prepare_rendering_results, images_to_video\n'), ((6968, 6979), 'time.time', 'time.time', ([], {}), '()\n', (6977, 6979), False, 'import time\n'), ((7604, 7691), 'psypose.MEVA.meva.utils.renderer.Renderer', 'Renderer', ([], {'resolution': '(orig_width, orig_height)', 'orig_img': '(True)', 'wireframe': 'wireframe'}), '(resolution=(orig_width, orig_height), orig_img=True, wireframe=\n wireframe)\n', (7612, 7691), False, 'from psypose.MEVA.meva.utils.renderer import Renderer\n'), ((7749, 7794), 'os.makedirs', 'os.makedirs', (['output_img_folder'], {'exist_ok': '(True)'}), '(output_img_folder, exist_ok=True)\n', (7760, 7794), False, 'import os\n'), ((7941, 7992), 'psypose.MEVA.meva.utils.demo_utils.prepare_rendering_results', 'prepare_rendering_results', (['meva_results', 'num_frames'], {}), '(meva_results, num_frames)\n', (7966, 7992), False, 'from psypose.MEVA.meva.utils.demo_utils import convert_crop_cam_to_orig_img, prepare_rendering_results, images_to_video\n'), ((10204, 10232), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (10220, 10232), False, 'import os\n'), ((10323, 10359), 'os.path.join', 'os.path.join', (['output_path', 'save_name'], {}), '(output_path, save_name)\n', (10335, 10359), False, 'import os\n'), ((10421, 10493), 'psypose.MEVA.meva.utils.demo_utils.images_to_video', 'images_to_video', ([], {'img_folder': 'output_img_folder', 'output_vid_file': 'save_name'}), '(img_folder=output_img_folder, output_vid_file=save_name)\n', (10436, 10493), False, 'from psypose.MEVA.meva.utils.demo_utils import convert_crop_cam_to_orig_img, prepare_rendering_results, images_to_video\n'), ((10502, 10534), 'shutil.rmtree', 'shutil.rmtree', (['output_img_folder'], {}), '(output_img_folder)\n', (10515, 10534), False, 'import shutil\n'), ((2054, 2082), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (2070, 2082), False, 'import os\n'), ((3450, 3752), 'psypose.MEVA.meva.lib.meva_model.MEVA_demo', 'MEVA_demo', ([], {'n_layers': 'cfg.MODEL.TGRU.NUM_LAYERS', 'batch_size': 'cfg.TRAIN.BATCH_SIZE', 'seqlen': 'cfg.DATASET.SEQLEN', 'hidden_size': 'cfg.MODEL.TGRU.HIDDEN_SIZE', 'add_linear': 'cfg.MODEL.TGRU.ADD_LINEAR', 'bidirectional': 'cfg.MODEL.TGRU.BIDIRECTIONAL', 'use_residual': 'cfg.MODEL.TGRU.RESIDUAL', 'cfg': 'cfg.VAE_CFG'}), '(n_layers=cfg.MODEL.TGRU.NUM_LAYERS, batch_size=cfg.TRAIN.\n BATCH_SIZE, seqlen=cfg.DATASET.SEQLEN, hidden_size=cfg.MODEL.TGRU.\n HIDDEN_SIZE, add_linear=cfg.MODEL.TGRU.ADD_LINEAR, bidirectional=cfg.\n MODEL.TGRU.BIDIRECTIONAL, use_residual=cfg.MODEL.TGRU.RESIDUAL, cfg=cfg\n .VAE_CFG)\n', (3459, 3752), False, 'from psypose.MEVA.meva.lib.meva_model import MEVA_demo\n'), ((5077, 5092), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5090, 5092), False, 'import torch\n'), ((5870, 5896), 'torch.cat', 'torch.cat', (['pred_cam'], {'dim': '(0)'}), '(pred_cam, dim=0)\n', (5879, 5896), False, 'import torch\n'), ((5921, 5948), 'torch.cat', 'torch.cat', (['pred_pose'], {'dim': '(0)'}), '(pred_pose, dim=0)\n', (5930, 5948), False, 'import torch\n'), ((5974, 6002), 'torch.cat', 'torch.cat', (['pred_betas'], {'dim': '(0)'}), '(pred_betas, dim=0)\n', (5983, 6002), False, 'import torch\n'), ((6031, 6062), 'torch.cat', 'torch.cat', (['pred_joints3d'], {'dim': '(0)'}), '(pred_joints3d, dim=0)\n', (6040, 6062), False, 'import torch\n'), ((8420, 8441), 'cv2.imread', 'cv2.imread', (['img_fname'], {}), '(img_fname)\n', (8430, 8441), False, 'import cv2\n'), ((10108, 10131), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10129, 10131), False, 'import cv2\n'), ((10577, 10601), 'os.path.exists', 'osp.exists', (['image_folder'], {}), '(image_folder)\n', (10587, 10601), True, 'import os.path as osp\n'), ((10606, 10629), 'os.path.isdir', 'osp.isdir', (['image_folder'], {}), '(image_folder)\n', (10615, 10629), True, 'import os.path as osp\n'), ((10643, 10670), 'shutil.rmtree', 'shutil.rmtree', (['image_folder'], {}), '(image_folder)\n', (10656, 10670), False, 'import shutil\n'), ((8038, 8054), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8052, 8054), True, 'import numpy as np\n'), ((8145, 8174), 'os.path.join', 'os.path.join', (['image_folder', 'x'], {}), '(image_folder, x)\n', (8157, 8174), False, 'import os\n'), ((8535, 8553), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (8548, 8553), True, 'import numpy as np\n'), ((9352, 9378), 'numpy.array', 'np.array', (['[0.5, 1.0, 0, 0]'], {}), '([0.5, 1.0, 0, 0])\n', (9360, 9378), True, 'import numpy as np\n'), ((9802, 9841), 'numpy.concatenate', 'np.concatenate', (['[img, side_img]'], {'axis': '(1)'}), '([img, side_img], axis=1)\n', (9816, 9841), True, 'import numpy as np\n'), ((9867, 9922), 'os.path.join', 'os.path.join', (['output_img_folder', 'f"""{frame_idx:06d}.png"""'], {}), "(output_img_folder, f'{frame_idx:06d}.png')\n", (9879, 9922), False, 'import os\n'), ((9970, 9994), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'img'], {}), "('Video', img)\n", (9980, 9994), False, 'import cv2\n'), ((8196, 8220), 'os.listdir', 'os.listdir', (['image_folder'], {}), '(image_folder)\n', (8206, 8220), False, 'import os\n'), ((8875, 8930), 'os.path.join', 'os.path.join', (['output_path', '"""meshes"""', 'f"""{person_id:04d}"""'], {}), "(output_path, 'meshes', f'{person_id:04d}')\n", (8887, 8930), False, 'import os\n'), ((8951, 8990), 'os.makedirs', 'os.makedirs', (['mesh_folder'], {'exist_ok': '(True)'}), '(mesh_folder, exist_ok=True)\n', (8962, 8990), False, 'import os\n'), ((9027, 9076), 'os.path.join', 'os.path.join', (['mesh_folder', 'f"""{frame_idx:06d}.obj"""'], {}), "(mesh_folder, f'{frame_idx:06d}.obj')\n", (9039, 9076), False, 'import os\n'), ((10014, 10028), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10025, 10028), False, 'import cv2\n')] |
from sklearn.neural_network import MLPClassifier
import numpy as np
#import elice_utils
def main():
# 1
X, Y = read_data('case_2.txt') # try to use different datasets
clf = train_MLP_classifier(X, Y)
report_clf_stats(clf, X, Y)
#elice_utils.visualize(clf, X, Y)
def train_MLP_classifier(X, Y):
# 2
clf = MLPClassifier(hidden_layer_sizes=(1000,1000,1000,1000,1000,1000)) # try changing the number of hidden layers
clf.fit(X, Y)
return clf
def report_clf_stats(clf, X, Y):
# 1. measure accuracy
hit = 0
miss = 0
for x, y in zip(X, Y):
if clf.predict([x])[0] == y:
hit += 1
else:
miss += 1
print("Accuracy: %.1lf%%" % float(100 * hit / (hit + miss)))
def read_data(filename):
X = []
Y = []
with open(filename) as fp:
N, M = fp.readline().split()
N = int(N)
M = int(M)
for i in range(N):
line = fp.readline().split()
for j in range(M):
X.append([i, j])
Y.append(int(line[j]))
X = np.array(X)
Y = np.array(Y)
return (X, Y)
if __name__ == "__main__":
main()
| [
"numpy.array",
"sklearn.neural_network.MLPClassifier"
] | [((335, 405), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(1000, 1000, 1000, 1000, 1000, 1000)'}), '(hidden_layer_sizes=(1000, 1000, 1000, 1000, 1000, 1000))\n', (348, 405), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1088, 1099), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1096, 1099), True, 'import numpy as np\n'), ((1108, 1119), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1116, 1119), True, 'import numpy as np\n')] |
# Importing Libraries
import pathlib
import warnings
from sklearn.model_selection import train_test_split
from tqdm import notebook
import time
import numpy as np
import pandas as pd
from sklearn.ensemble import *
from sklearn.preprocessing import StandardScaler
import socket
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from scipy.stats import pearsonr
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import shap
import time
from joblib import Parallel, delayed
from sklearn.svm import LinearSVR
import random
import optuna
import mlflow
import sys
import os
from mlflow import log_metric, log_param, log_artifacts
#Igonre Warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
#Calculate regression results
def regressionResult(y_true, predicted):
pearson = pearsonr(y_true, predicted)
mae = mean_absolute_error(y_true, predicted)
maepearson=1-mae+np.abs(pearson[0])
return maepearson
#Objective function for Bayesian Search
def objective(trial,data,target):
train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25, random_state=42)
# SVR will take the parameters
param = {
'clf__epsilon': trial.suggest_loguniform('epsilon', 1e-5, 1e-1),
'clf__C': trial.suggest_loguniform('C', 1e0, 1e4)
}
model = Pipeline([('scale', StandardScaler()),('clf', ML_instances["SVR"])])
bst = model.fit(train_x, train_y)
preds = bst.predict(valid_x)
mae = regressionResult(valid_y, preds)
return mae
#Importance Scores from SVR
def feature_importance(cc,column):
print(cc,column)
flag=0
noofsamples=samples
rankThreshold=5
df_sum=pd.DataFrame()
for k in range(noofsamples):
rowfrac=random.uniform(0.2, 0.8)
colfrac=random.uniform(0.2, 0.8)
if fourth_line !="":
if(column in tf_list):
Ntrain_df=train_df[tf_list].copy()
else:
Ntrain_df=pd.concat([train_df[tf_list],train_df[column]],axis=1)
else:
Ntrain_df=train_df.copy()
Ntrain_df=Ntrain_df.sample(frac=rowfrac)
Ntrain_df=pd.concat([Ntrain_df.drop(column, axis = 1).sample(frac=colfrac,axis="columns"),Ntrain_df[column]],axis=1)
y_train=Ntrain_df[column].to_numpy()
X_train = Ntrain_df.drop(column, axis = 1).to_numpy()
New_index=Ntrain_df.drop(column, axis = 1).columns+"_"+column
optuna.logging.set_verbosity(optuna.logging.WARNING)
study = optuna.create_study(direction="maximize")
study.optimize(lambda trial: objective(trial, X_train, y_train), n_trials=notrial, timeout=40,show_progress_bar=False)
# print(study.best_params)
dic=study.best_params
model = make_pipeline(StandardScaler(),LinearSVR(**dic))
clf = model.fit(X_train, y_train)
vals = np.abs(clf[1].coef_)
coeff=pd.DataFrame(vals, index=New_index, columns=['feat_importance'])
coeff.sort_values(by="feat_importance", inplace=True,ascending= False )
coeff[0:rankThreshold]=1
coeff[rankThreshold:len(coeff)]=0
if flag==0:
df_sum=coeff.copy()
flag=1
else:
df_sum = df_sum.add( coeff, fill_value=0)
return df_sum
# %%
#Importance Scores from ETR and RFR
def feature_importance2(cc,column):
print(cc,column)
y_train=train_df[column].to_numpy()
if fourth_line !="":
tftrain_df=train_df[tf_list]
else:
tftrain_df=train_df.copy()
if column in tftrain_df.columns:
X_train = tftrain_df.drop(column, axis = 1).to_numpy()
New_index=tftrain_df.drop(column, axis = 1).columns+"_"+column
else:
X_train = tftrain_df.to_numpy()
New_index=tftrain_df.columns+"_"+column
model = Pipeline([('scale', StandardScaler()),('clf', ML_instances[cc]),])
clf =model.fit(X_train, y_train)
explainer = shap.TreeExplainer(clf[1])
shap_values = explainer.shap_values(X_train,check_additivity=False)
vals1 = np.abs(shap_values).mean(0)
vals2 =clf[1].feature_importances_
df= pd.concat([pd.DataFrame(vals1, index=New_index, columns=['feat_importance']) , pd.DataFrame(vals2, index=New_index, columns=['shap'])],axis=1)
return df
#Calculate Classificiation Accuracy
def classificationResult(y, predicted,predicted_proba,Output_file,FileName,MethodName,flag=None):
auc_score= round(roc_auc_score(y, predicted_proba), 4)
aucPR_score= round(average_precision_score(y, predicted_proba), 4)
if(flag==None):
print("AUCROC (%),",round(auc_score, 3))
print("AUCPR (%),",round(aucPR_score, 3))
print("Average (%),",round((auc_score+aucPR_score)/2, 3))
if(flag==None):
print("AUCROC (%),",round(auc_score, 3),file=Output_file)
print("AUCPR (%),",round(aucPR_score, 3),file=Output_file)
print("Average (%),",round((auc_score+aucPR_score)/2, 3) ,file=Output_file)
mlflow.start_run(run_name=FileName)
mlflow.log_param("Method", MethodName)
log_metric("AUPR", auc_score)
log_metric("AUROC", aucPR_score)
log_metric("Average", (auc_score+aucPR_score)/2)
mlflow.end_run()
return (auc_score+aucPR_score)/2
#Calculate the AUROC and AUPR based on the groudtruth data
def evalresults(groundtruth_path,result_path,Output_file,FileName):
ground_truth=pd.read_csv(groundtruth_path.strip(),sep='\t',header=None)
new_index=ground_truth[0]+"_"+ground_truth[1]
ground_truth.index=new_index
ground_truth=ground_truth.drop([0,1], axis = 1)
ground_truth=ground_truth.sort_index()
ground_truth=ground_truth.rename(columns={2: "GroundTruth"})
ground_truth
ETR=pd.read_csv("./"+result_path+"/ETR.csv",index_col=0)
ETR=ETR.sort_index()
RFR=pd.read_csv("./"+result_path+"/RFR.csv",index_col=0)
RFR=RFR.sort_index()
if int(usr_input)==2:
SVR=pd.read_csv("./"+result_path+"/SVR.csv",index_col=0)
SVR=SVR.sort_index()
threshold=0.5
dic={"a":10,"b":5,"c":1}
print("ShapBasedOnETR+ShapBasedOnRFR+SVR",file=Output_file)
ground_truth["Comb"]=(dic["a"]*RFR["shap_Proba"]+dic["b"]*ETR["shap_Proba"]+dic["c"]*SVR["SVRProba"])/(dic["a"]+dic["b"]+dic["c"])
ground_truth.loc[ground_truth["Comb"]>=threshold,"predict"]=1
ground_truth.loc[ground_truth["Comb"]<threshold,"predict"]=0
ground_truth["Comb"]=ground_truth["Comb"].fillna(0)
classificationResult(ground_truth['GroundTruth'].to_numpy(),ground_truth["predict"].to_numpy(), ground_truth["Comb"].to_numpy(),Output_file,FileName,"ShapBasedOnETR+ShapBasedOnRFR+SVR")
print("*****************************************************************************************\n")
else:
threshold=0.5
print("ShapBasedOnETR+ShapBasedOnRFR",file=Output_file)
ground_truth["Comb"]=(RFR["shap_Proba"]+ETR["shap_Proba"])/2
ground_truth.loc[ground_truth["Comb"]>=threshold,"predict"]=1
ground_truth.loc[ground_truth["Comb"]<threshold,"predict"]=0
ground_truth["Comb"]=ground_truth["Comb"].fillna(0)
classificationResult(ground_truth['GroundTruth'].to_numpy(),ground_truth["predict"].to_numpy(), ground_truth["Comb"].to_numpy(),Output_file,FileName,"ShapBasedOnETR+ShapBasedOnRFR")
print("*****************************************************************************************\n")
if __name__ == "__main__":
# Output Directory
FileName="Output"
# Input file
inputfile='input.txt'
# Test the script for errors
Test=False
# Set parameter based on test flag.
if Test:
samples=10
notrial=10
noofestimator=10
Number_of_processor=1
else:
samples=600
notrial=150
noofestimator=1000
Number_of_processor=1
# Take user choice as input
print("Please select one of the options:")
print("1. ShapBasedOnETR+ShapBasedOnRFR")
print("2. ShapBasedOnETR+ShapBasedOnRFR+SVR")
usr_input = ''
while usr_input not in ['1', '2']:
usr_input = input("Enter Choice: ")
# Ignore wanrings
optuna.logging.set_verbosity(optuna.logging.WARNING)
warnings.filterwarnings("ignore")
#Assign Seed
np.random.seed(100)
# Create output directory and file
result_path="./"+FileName+"/"
pathlib.Path(result_path).mkdir(parents=True, exist_ok=True)
Output_file=open(result_path+FileName+"_Results.txt","w")
t0 = time.time()
host_name = socket.gethostname()
print("Hostname : ",host_name,file=Output_file)
# Read from the file
with open(inputfile) as f:
first_line = f.readline()
second_line = f.readline()
third_line = f.readline()
fourth_line = f.readline()
print("\nFile Location: \n",first_line)
train_df = pd.read_csv(first_line.strip(),sep='\t')
print("train_df Shape:" ,train_df.shape)
train = train_df.values
# print(pd.DataFrame(train).head(5))
if fourth_line !="":
# Read decoy list if exists
decoy=pd.read_csv(fourth_line.strip(),sep='\t')
decoy_list=decoy[decoy.Name.str.startswith('decoy')]["#ID"].tolist()
train_df=train_df[train_df.columns[~train_df.columns.isin(decoy_list)]]
# Read transcription factor list if exist
tf=pd.read_csv(third_line.strip(),sep='\t',header=None)
tf_list=list(np.setdiff1d( tf[0], decoy_list))
# Create ML instances
ML_instances = {}
ML_instances["RFR"] = RandomForestRegressor(n_estimators=noofestimator,random_state=42, n_jobs=Number_of_processor)
ML_instances["ETR"] = ExtraTreesRegressor(n_estimators=noofestimator,random_state=42, n_jobs=Number_of_processor)
ML_instances["SVR"] = LinearSVR(max_iter=noofestimator)
# %%
# Extract Importance scores from RFR and ETR
flag=0
ML=["RFR","ETR"]
for c in ML:
df_feature_importance_all=pd.DataFrame()
print("ML Method Name: ",c)
t = time.time()
results = Parallel(n_jobs=64)(delayed(feature_importance2)(c,column) for column in notebook.tqdm(train_df.columns))
df_feature_importance_all=pd.concat(results)
df_feature_ranking=df_feature_importance_all.copy()
scaler = MinMaxScaler()
scaler.fit(df_feature_ranking["feat_importance"].to_numpy().reshape(-1,1))
z=scaler.transform(df_feature_ranking["feat_importance"].to_numpy().reshape(-1,1))
df_feature_ranking["feat_Proba"]=z
scaler.fit(df_feature_ranking["shap"].to_numpy().reshape(-1,1))
z=scaler.transform(df_feature_ranking["shap"].to_numpy().reshape(-1,1))
df_feature_ranking["shap_Proba"]=z
df_feature_ranking.to_csv(result_path+c+".csv")
print(c+" Time: ", time.time() - t,file=Output_file)
Output_file.flush()
# %%
# Extract Importance scores from SVR
# print(usr_input)
if int(usr_input)==2:
flag2=0
ML=[ "SVR"]
for c in ML:
print("ML Method Name: ",c)
t = time.time()
results = Parallel(n_jobs=64)(delayed(feature_importance)(c,column) for column in notebook.tqdm(train_df.columns))
df_feature_importance_all_SVR=pd.concat(results)
scaler = MinMaxScaler()
scaler.fit(df_feature_importance_all_SVR["feat_importance"].to_numpy().reshape(-1,1))
z=scaler.transform(df_feature_importance_all_SVR["feat_importance"].to_numpy().reshape(-1,1))
df_feature_importance_all_SVR["SVRProba"]=z
df_feature_importance_all_SVR.to_csv(result_path+c+".csv")
print("SVR Time: ", time.time() - t,file=Output_file)
Output_file.flush()
# %%
if int(usr_input)==1:
print("1. ShapBasedOnETR+ShapBasedOnRFR Results:")
else:
print("2. ShapBasedOnETR+ShapBasedOnRFR+SVR Results:")
evalresults(second_line,result_path,Output_file,FileName)
print("Total Time: ", time.time() - t0,file=Output_file)
Output_file.flush()
Output_file.close()
# %%
| [
"pandas.read_csv",
"sklearn.ensemble.ExtraTreesRegressor",
"mlflow.log_param",
"sklearn.metrics.roc_auc_score",
"shap.TreeExplainer",
"scipy.stats.pearsonr",
"tqdm.notebook.tqdm",
"sklearn.ensemble.RandomForestRegressor",
"pathlib.Path",
"mlflow.log_metric",
"mlflow.end_run",
"numpy.random.see... | [((1048, 1079), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1069, 1079), False, 'import warnings\n'), ((1244, 1271), 'scipy.stats.pearsonr', 'pearsonr', (['y_true', 'predicted'], {}), '(y_true, predicted)\n', (1252, 1271), False, 'from scipy.stats import pearsonr\n'), ((1283, 1321), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'predicted'], {}), '(y_true, predicted)\n', (1302, 1321), False, 'from sklearn.metrics import mean_absolute_error\n'), ((1514, 1577), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(data, target, test_size=0.25, random_state=42)\n', (1530, 1577), False, 'from sklearn.model_selection import train_test_split\n'), ((2173, 2187), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2185, 2187), True, 'import pandas as pd\n'), ((4978, 5004), 'shap.TreeExplainer', 'shap.TreeExplainer', (['clf[1]'], {}), '(clf[1])\n', (4996, 5004), False, 'import shap\n'), ((6859, 6916), 'pandas.read_csv', 'pd.read_csv', (["('./' + result_path + '/ETR.csv')"], {'index_col': '(0)'}), "('./' + result_path + '/ETR.csv', index_col=0)\n", (6870, 6916), True, 'import pandas as pd\n'), ((6951, 7008), 'pandas.read_csv', 'pd.read_csv', (["('./' + result_path + '/RFR.csv')"], {'index_col': '(0)'}), "('./' + result_path + '/RFR.csv', index_col=0)\n", (6962, 7008), True, 'import pandas as pd\n'), ((9387, 9439), 'optuna.logging.set_verbosity', 'optuna.logging.set_verbosity', (['optuna.logging.WARNING'], {}), '(optuna.logging.WARNING)\n', (9415, 9439), False, 'import optuna\n'), ((9445, 9478), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9468, 9478), False, 'import warnings\n'), ((9504, 9523), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (9518, 9523), True, 'import numpy as np\n'), ((9744, 9755), 'time.time', 'time.time', ([], {}), '()\n', (9753, 9755), False, 'import time\n'), ((9773, 9793), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (9791, 9793), False, 'import socket\n'), ((10808, 10907), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'noofestimator', 'random_state': '(42)', 'n_jobs': 'Number_of_processor'}), '(n_estimators=noofestimator, random_state=42, n_jobs=\n Number_of_processor)\n', (10829, 10907), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((10930, 11027), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {'n_estimators': 'noofestimator', 'random_state': '(42)', 'n_jobs': 'Number_of_processor'}), '(n_estimators=noofestimator, random_state=42, n_jobs=\n Number_of_processor)\n', (10949, 11027), False, 'from sklearn.ensemble import ExtraTreesRegressor\n'), ((11054, 11087), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {'max_iter': 'noofestimator'}), '(max_iter=noofestimator)\n', (11063, 11087), False, 'from sklearn.svm import LinearSVR\n'), ((1344, 1362), 'numpy.abs', 'np.abs', (['pearson[0]'], {}), '(pearson[0])\n', (1350, 1362), True, 'import numpy as np\n'), ((2262, 2286), 'random.uniform', 'random.uniform', (['(0.2)', '(0.8)'], {}), '(0.2, 0.8)\n', (2276, 2286), False, 'import random\n'), ((2312, 2336), 'random.uniform', 'random.uniform', (['(0.2)', '(0.8)'], {}), '(0.2, 0.8)\n', (2326, 2336), False, 'import random\n'), ((3108, 3160), 'optuna.logging.set_verbosity', 'optuna.logging.set_verbosity', (['optuna.logging.WARNING'], {}), '(optuna.logging.WARNING)\n', (3136, 3160), False, 'import optuna\n'), ((3188, 3229), 'optuna.create_study', 'optuna.create_study', ([], {'direction': '"""maximize"""'}), "(direction='maximize')\n", (3207, 3229), False, 'import optuna\n'), ((3604, 3624), 'numpy.abs', 'np.abs', (['clf[1].coef_'], {}), '(clf[1].coef_)\n', (3610, 3624), True, 'import numpy as np\n'), ((3648, 3712), 'pandas.DataFrame', 'pd.DataFrame', (['vals'], {'index': 'New_index', 'columns': "['feat_importance']"}), "(vals, index=New_index, columns=['feat_importance'])\n", (3660, 3712), True, 'import pandas as pd\n'), ((5522, 5555), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'predicted_proba'], {}), '(y, predicted_proba)\n', (5535, 5555), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((5585, 5628), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y', 'predicted_proba'], {}), '(y, predicted_proba)\n', (5608, 5628), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((6080, 6115), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': 'FileName'}), '(run_name=FileName)\n', (6096, 6115), False, 'import mlflow\n'), ((6125, 6163), 'mlflow.log_param', 'mlflow.log_param', (['"""Method"""', 'MethodName'], {}), "('Method', MethodName)\n", (6141, 6163), False, 'import mlflow\n'), ((6173, 6202), 'mlflow.log_metric', 'log_metric', (['"""AUPR"""', 'auc_score'], {}), "('AUPR', auc_score)\n", (6183, 6202), False, 'from mlflow import log_metric, log_param, log_artifacts\n'), ((6212, 6244), 'mlflow.log_metric', 'log_metric', (['"""AUROC"""', 'aucPR_score'], {}), "('AUROC', aucPR_score)\n", (6222, 6244), False, 'from mlflow import log_metric, log_param, log_artifacts\n'), ((6254, 6306), 'mlflow.log_metric', 'log_metric', (['"""Average"""', '((auc_score + aucPR_score) / 2)'], {}), "('Average', (auc_score + aucPR_score) / 2)\n", (6264, 6306), False, 'from mlflow import log_metric, log_param, log_artifacts\n'), ((6312, 6328), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (6326, 6328), False, 'import mlflow\n'), ((7078, 7135), 'pandas.read_csv', 'pd.read_csv', (["('./' + result_path + '/SVR.csv')"], {'index_col': '(0)'}), "('./' + result_path + '/SVR.csv', index_col=0)\n", (7089, 7135), True, 'import pandas as pd\n'), ((11246, 11260), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11258, 11260), True, 'import pandas as pd\n'), ((11321, 11332), 'time.time', 'time.time', ([], {}), '()\n', (11330, 11332), False, 'import time\n'), ((11493, 11511), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (11502, 11511), True, 'import pandas as pd\n'), ((11598, 11612), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (11610, 11612), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3494, 3510), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3508, 3510), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3511, 3527), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '(**dic)\n', (3520, 3527), False, 'from sklearn.svm import LinearSVR\n'), ((5106, 5125), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (5112, 5125), True, 'import numpy as np\n'), ((5204, 5269), 'pandas.DataFrame', 'pd.DataFrame', (['vals1'], {'index': 'New_index', 'columns': "['feat_importance']"}), "(vals1, index=New_index, columns=['feat_importance'])\n", (5216, 5269), True, 'import pandas as pd\n'), ((5272, 5326), 'pandas.DataFrame', 'pd.DataFrame', (['vals2'], {'index': 'New_index', 'columns': "['shap']"}), "(vals2, index=New_index, columns=['shap'])\n", (5284, 5326), True, 'import pandas as pd\n'), ((9606, 9631), 'pathlib.Path', 'pathlib.Path', (['result_path'], {}), '(result_path)\n', (9618, 9631), False, 'import pathlib\n'), ((10695, 10726), 'numpy.setdiff1d', 'np.setdiff1d', (['tf[0]', 'decoy_list'], {}), '(tf[0], decoy_list)\n', (10707, 10726), True, 'import numpy as np\n'), ((11352, 11371), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(64)'}), '(n_jobs=64)\n', (11360, 11371), False, 'from joblib import Parallel, delayed\n'), ((12432, 12443), 'time.time', 'time.time', ([], {}), '()\n', (12441, 12443), False, 'import time\n'), ((12629, 12647), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (12638, 12647), True, 'import pandas as pd\n'), ((12671, 12685), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (12683, 12685), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((13391, 13402), 'time.time', 'time.time', ([], {}), '()\n', (13400, 13402), False, 'import time\n'), ((1806, 1822), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1820, 1822), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2549, 2605), 'pandas.concat', 'pd.concat', (['[train_df[tf_list], train_df[column]]'], {'axis': '(1)'}), '([train_df[tf_list], train_df[column]], axis=1)\n', (2558, 2605), True, 'import pandas as pd\n'), ((4858, 4874), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4872, 4874), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12120, 12131), 'time.time', 'time.time', ([], {}), '()\n', (12129, 12131), False, 'import time\n'), ((12467, 12486), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(64)'}), '(n_jobs=64)\n', (12475, 12486), False, 'from joblib import Parallel, delayed\n'), ((11372, 11400), 'joblib.delayed', 'delayed', (['feature_importance2'], {}), '(feature_importance2)\n', (11379, 11400), False, 'from joblib import Parallel, delayed\n'), ((11425, 11456), 'tqdm.notebook.tqdm', 'notebook.tqdm', (['train_df.columns'], {}), '(train_df.columns)\n', (11438, 11456), False, 'from tqdm import notebook\n'), ((13054, 13065), 'time.time', 'time.time', ([], {}), '()\n', (13063, 13065), False, 'import time\n'), ((12487, 12514), 'joblib.delayed', 'delayed', (['feature_importance'], {}), '(feature_importance)\n', (12494, 12514), False, 'from joblib import Parallel, delayed\n'), ((12539, 12570), 'tqdm.notebook.tqdm', 'notebook.tqdm', (['train_df.columns'], {}), '(train_df.columns)\n', (12552, 12570), False, 'from tqdm import notebook\n')] |
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from pandapipes import pp_dir
from pandapower.io_utils import JSONSerializableClass
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
class Fluid(JSONSerializableClass):
"""
"""
def __init__(self, name, fluid_type, **kwargs):
"""
:param name:
:type name:
:param fluid_type:
:type fluid_type:
:param kwargs:
:type kwargs:
"""
super(Fluid, self).__init__()
self.name = name
if not isinstance(fluid_type, str) or fluid_type.lower() not in ["gas", "liquid"]:
logger.warning("The fluid %s has the fluid type %s which might cause problems in the "
"pipeflow calculation, as it expects either 'gas' or 'liquid'."
% (name, fluid_type))
self.fluid_type = fluid_type.lower()
self.is_gas = self.fluid_type == "gas"
self.all_properties = kwargs
for prop_name, prop in self.all_properties.items():
if not isinstance(prop, FluidProperty):
logger.warning("The property %s was not defined as a fluid property. This might "
"cause problems when trying to ask for values." % prop_name)
def __repr__(self):
"""
Definition of fluid representation in the console.
:return: representation of fluid in the console
:rtype: str
"""
r = "Fluid %s (%s) with properties:" % (self.name, self.fluid_type)
for key in self.all_properties.keys():
r += "\n - %s (%s)" % (key, self.all_properties[key].__class__.__name__[13:])
return r
def add_property(self, property_name, prop, overwrite=True, warn_on_duplicates=True):
"""
This function adds a new property.
:param property_name: Name of the new property
:type property_name: str
:param prop: Values for the property, for example a curve or just a constant value
:type prop: pandapipes.FluidProperty
:param overwrite: True if existing property with the same name shall be overwritten
:type overwrite: bool
:param warn_on_duplicates: True, if a warning of properties with the same name should be
returned
:type warn_on_duplicates: bool
:Example:
>>> fluid.add_property('water_density', pandapipes.FluidPropertyConstant(998.2061),
overwrite=True, warn_on_duplicates=False)
"""
if property_name in self.all_properties:
if warn_on_duplicates:
ow_string = "It will be overwritten." if overwrite else "It will not be replaced."
logger.warning("The property %s already exists. %s" % (property_name, ow_string))
if not overwrite:
return
self.all_properties[property_name] = prop
def get_property(self, property_name, *at_values):
"""
This function returns the value of the requested property.
:param property_name: Name of the searched property
:type property_name: str
:param at_values: Value for which the property should be returned
:type at_values:
:return: Returns property at the certain value
:rtype: pandapipes.FluidProperty
"""
if property_name not in self.all_properties:
raise UserWarning("The property %s was not defined for the fluid %s"
% (property_name, self.name))
return self.all_properties[property_name].get_at_value(*at_values)
def get_density(self, temperature):
"""
This function returns the density at a certain temperature.
:param temperature: Temperature at which the density is queried
:type temperature: float
:return: Density at the required temperature
"""
return self.get_property("density", temperature)
def get_viscosity(self, temperature):
"""
This function returns the viscosity at a certain temperature.
:param temperature: Temperature at which the viscosity is queried
:type temperature: float
:return: Viscosity at the required temperature
"""
return self.get_property("viscosity", temperature)
def get_heat_capacity(self, temperature):
"""
This function returns the heat capacity at a certain temperature.
:param temperature: Temperature at which the heat capacity is queried
:type temperature: float
:return: Heat capacity at the required temperature
"""
return self.get_property("heat_capacity", temperature)
def get_molar_mass(self):
"""
This function returns the molar mass.
:return: molar mass
"""
return self.get_property("molar_mass")
def get_compressibility(self, p_bar):
"""
This function returns the compressibility at a certain pressure.
:param p_bar: pressure at which the compressibility is queried
:type p_bar: float or array of floats
:return: compressibility at the required pressure
"""
return self.get_property("compressibility", p_bar)
def get_der_compressibility(self):
"""
This function returns the derivative of the compressibility with respect to pressure.
:return: derivative of the compressibility
"""
return self.get_property("der_compressibility")
class FluidProperty(JSONSerializableClass):
"""
Property Base Class
"""
def __init__(self):
"""
"""
super().__init__()
def get_at_value(self, *args):
"""
:param args:
:type args:
:return:
:rtype:
"""
raise NotImplementedError("Please implement a proper fluid property!")
def get_at_integral_value(self, *args):
"""
:param args:
:type args:
:return:
:rtype:
"""
raise NotImplementedError("Please implement a proper fluid property!")
class FluidPropertyInterExtra(FluidProperty):
"""
Creates Property with interpolated or extrapolated values.
"""
json_excludes = JSONSerializableClass.json_excludes + ["prop_getter"]
prop_getter_entries = {"x": "x", "y": "y", "_fill_value_orig": "fill_value"}
def __init__(self, x_values, y_values, method="interpolate_extrapolate"):
"""
:param x_values:
:type x_values:
:param y_values:
:type y_values:
:param method:
:type method:
"""
super(FluidPropertyInterExtra, self).__init__()
if method.lower() == "interpolate_extrapolate":
self.prop_getter = interp1d(x_values, y_values, fill_value="extrapolate")
else:
self.prop_getter = interp1d(x_values, y_values)
def get_at_value(self, arg):
"""
:param arg: Name of the property and one or more values (x-values) for which the y-values \
of the property are to be displayed
:type arg: str, float or array
:return: y-value/s
:rtype: float, array
"""
return self.prop_getter(arg)
def get_at_integral_value(self, upper_limit_arg, lower_limit_arg):
"""
:param arg: one or more values of upper and lower limit values for which the function \
of the property should calculate the integral for
:type arg: float or list-like objects
:return: integral between the limits
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["heat_capacity"].get_at_integral_value(t_upper_k, t_lower_k)
"""
mean = (self.prop_getter(upper_limit_arg) + self.prop_getter(upper_limit_arg)) / 2
return mean * (upper_limit_arg-lower_limit_arg)
@classmethod
def from_path(cls, path, method="interpolate_extrapolate"):
"""
Reads a text file with temperature values in the first column and property values in
second column.
:param path: Target path of the txt file
:type path: str
:param method: Method with which the values are to be interpolated
:type method: str
:return: interpolated values
:rtype: pandapipes.FluidProperty
"""
values = np.loadtxt(path)
return cls(values[:, 0], values[:, 1], method=method)
def to_dict(self):
d = super(FluidPropertyInterExtra, self).to_dict()
d.update({k: self.prop_getter.__dict__[k] for k in self.prop_getter_entries.keys()})
# d.update({"x_values": self.prop_getter.x, "y_values": self.prop_getter.y,
# "method": "interpolate_extrapolate"
# if self.prop_getter.fill_value == "extrapolate" else None})
return d
@classmethod
def from_dict(cls, d):
obj = JSONSerializableClass.__new__(cls)
d2 = {cls.prop_getter_entries[k]: v for k, v in d.items()
if k in cls.prop_getter_entries.keys()}
d3 = {k: v for k, v in d.items() if k not in cls.prop_getter_entries.keys()}
d3["prop_getter"] = interp1d(**d2)
obj.__dict__.update(d3)
return obj
class FluidPropertyConstant(FluidProperty):
"""
Creates Property with a constant value.
"""
def __init__(self, value, warn_dependent_variables=False):
"""
:param value:
:type value:
"""
super(FluidPropertyConstant, self).__init__()
self.value = value
self.warn_dependent_variables = warn_dependent_variables
def get_at_value(self, *args):
"""
:param args: Name of the property
:type args: str
:return: Value of the property
:rtype: float
:Example:
>>> heat_capacity = get_fluid(net).all_properties["heat_capacity"].get_at_value(293.15)
"""
if len(args) > 1:
raise UserWarning('Please define either none or an array-like argument')
elif len(args) == 1:
if self.warn_dependent_variables:
logger.warning('Constant property received several input variables, although it is'
'independent of these')
output = np.array([self.value]) * np.ones(len(args[0]))
else:
output = np.array([self.value])
return output
def get_at_integral_value(self, upper_limit_arg, lower_limit_arg):
"""
:param arg: one or more values of upper and lower limit values for which the function \
of the property should calculate the integral for
:type arg: float or list-like objects
:return: integral between the limits
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["heat_capacity"].get_at_integral_value(t_upper_k, t_lower_k)
"""
if isinstance(upper_limit_arg, pd.Series):
ul = self.value * upper_limit_arg.values
else:
ul = self.value * np.array(upper_limit_arg)
if isinstance(lower_limit_arg, pd.Series):
ll = self.value * lower_limit_arg.values
else:
ll = self.value * np.array(lower_limit_arg)
return ul - ll
@classmethod
def from_path(cls, path):
"""
Reads a text file with temperature values in the first column and property values in
second column.
:param path:
:type path:
:param method:
:type method:
:return:
:rtype:
"""
value = np.loadtxt(path).item()
return cls(value)
@classmethod
def from_dict(cls, d):
obj = super().from_dict(d)
if "warn_dependent_variables" not in obj.__dict__.keys():
obj.__dict__["warn_dependent_variables"] = False
return obj
class FluidPropertyLinear(FluidProperty):
"""
Creates Property with a linear course.
"""
def __init__(self, slope, offset):
"""
:param slope:
:type slope:
:param offset:
:type offset:
"""
super(FluidPropertyLinear, self).__init__()
self.slope = slope
self.offset = offset
def get_at_value(self, arg):
"""
:param arg: Name of the property and one or more values (x-values) for which the function \
of the property should be calculated
:type arg: str, float or array
:return: y-value or function values
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["compressibility"].get_at_value(p_bar)
"""
if isinstance(arg, pd.Series):
return self.offset + self.slope * arg.values
else:
return self.offset + self.slope * np.array(arg)
def get_at_integral_value(self, upper_limit_arg, lower_limit_arg):
"""
:param arg: one or more values of upper and lower limit values for which the function \
of the property should calculate the integral for
:type arg: float or list-like objects
:return: integral between the limits
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["heat_capacity"].get_at_integral_value(t_upper_k, t_lower_k)
"""
if isinstance(upper_limit_arg, pd.Series):
ul = self.offset * upper_limit_arg.values + 0.5 * self.slope * np.power(upper_limit_arg.values, 2)
else:
ul = self.offset * np.array(upper_limit_arg) + 0.5 * self.slope * np.array(
np.power(upper_limit_arg.values, 2))
if isinstance(lower_limit_arg, pd.Series):
ll = self.offset * lower_limit_arg.values + 0.5 * self.slope * np.power(lower_limit_arg.values, 2)
else:
ll = self.offset * np.array(lower_limit_arg) + 0.5 * self.slope * np.array(
np.power(lower_limit_arg.values, 2))
return ul - ll
@classmethod
def from_path(cls, path):
"""
Reads a text file with temperature values in the first column and property values in
second column.
:param path:
:type path:
:return:
:rtype:
"""
slope, offset = np.loadtxt(path)
return cls(slope, offset)
class FluidPropertyPolynominal(FluidProperty):
"""
Creates Property with a polynominal course.
"""
def __init__(self, x_values, y_values, polynominal_degree):
"""
:param x_values:
:type x_values:
:param y_values:
:type y_values:
:param polynominal_degree:
:type polynominal_degree:
"""
super(FluidPropertyPolynominal, self).__init__()
const = np.polyfit(x_values, y_values, polynominal_degree)
self.prop_getter = np.poly1d(const)
self.prop_int_getter = np.polyint(self.prop_getter)
def get_at_value(self, arg):
"""
:param arg: Name of the property and one or more values (x-values) for which the function \
of the property should be calculated
:type arg: float or list-like objects
:return: y-value or function values
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["heat_capacity"].get_at_value(t_k)
"""
return self.prop_getter(arg)
def get_at_integral_value(self, upper_limit_arg, lower_limit_arg):
"""
:param arg: one or more values of upper and lower limit values for which the function \
of the property should calculate the integral for
:type arg: float or list-like objects
:return: integral between the limits
:rtype: float, array
:Example:
>>> comp_fact = get_fluid(net).all_properties["heat_capacity"].get_at_integral_value(t_upper_k, t_lower_k)
"""
return self.prop_int_getter(upper_limit_arg) - self.prop_int_getter(lower_limit_arg)
@classmethod
def from_path(cls, path, polynominal_degree):
"""
Reads a text file with temperature values in the first column and property values in
second column.
:param path: Target path of the txt file
:type path: str
:param polynominal_degree: degree of the polynominal
:type method: float
:return: Fluid object
:rtype: pandapipes.FluidProperty
"""
values = np.loadtxt(path)
return cls(values[:, 0], values[:, 1], polynominal_degree)
def create_constant_property(net, property_name, value, overwrite=True, warn_on_duplicates=True):
"""
Creates a property with a constant value.
:param net: Name of the network to which the property is added
:type net: pandapipesNet
:param property_name: Name of the new property
:type property_name: str
:param value: Constant value of the property
:type value: float
:param overwrite: True if existing property with the same name shall be overwritten
:type overwrite: basestring
:param warn_on_duplicates: True, if a warning of properties with the same name should be
returned
:type warn_on_duplicates: basestring
"""
prop = FluidPropertyConstant(value)
get_fluid(net).add_property(property_name, prop, overwrite=overwrite,
warn_on_duplicates=warn_on_duplicates)
return prop
def create_linear_property(net, property_name, slope, offset, overwrite=True,
warn_on_duplicates=True):
"""
Creates a property with a linear correlation.
:param net: Name of the network to which the property is added
:type net: pandapipesNet
:param property_name: Name of the new property
:type property_name: str
:param slope: Slope of the linear correlation
:type slope: float
:param offset: Offset of the linear function
:type offset: float
:param overwrite: True if existing property with the same name shall be overwritten
:type overwrite: basestring
:param warn_on_duplicates: True, if a warning of properties with the same name should be
returned
:type warn_on_duplicates: basestring
"""
prop = FluidPropertyLinear(slope, offset)
get_fluid(net).add_property(property_name, prop, overwrite=overwrite,
warn_on_duplicates=warn_on_duplicates)
return prop
def create_constant_fluid(name=None, fluid_type=None, **kwargs):
"""
Creates a constant fluid.
:param name: Name of the fluid
:type name: str
:param fluid_type: Type of the fluid
:type fluid_type: str
:param kwargs: Additional information
:return: Fluid
:rtype: Fluid
"""
properties = dict()
for prop_name, prop in kwargs.items():
properties[str(prop_name)] = FluidPropertyConstant(prop)
return Fluid(name=name, fluid_type=fluid_type, **properties)
def call_lib(fluid_name):
"""
Creates a fluid with default fluid properties.
:param fluid_name: Fluid which should be used
:type fluid_name: str
:return: Fluid - Chosen fluid with default fluid properties
:rtype: Fluid
"""
def interextra_property(prop):
return FluidPropertyInterExtra.from_path(
os.path.join(pp_dir, "properties", fluid_name, prop + ".txt"))
def constant_property(prop):
return FluidPropertyConstant.from_path(
os.path.join(pp_dir, "properties", fluid_name, prop + ".txt"))
def linear_property(prop):
return FluidPropertyLinear.from_path(
os.path.join(pp_dir, "properties", fluid_name, prop + ".txt"))
liquids = ["water"]
gases = ["air", "lgas", "hgas", "hydrogen", "methane"]
if fluid_name == "natural_gas":
logger.error("'natural_gas' is ambigious. Please choose 'hgas' or 'lgas' "
"(high- or low calorific natural gas)")
if fluid_name not in liquids and fluid_name not in gases:
raise AttributeError("Fluid '%s' not found in the fluid library. It might not be "
"implemented yet." % fluid_name)
phase = "liquid" if fluid_name in liquids else "gas"
density = interextra_property("density")
viscosity = interextra_property("viscosity")
heat_capacity = interextra_property("heat_capacity")
molar_mass = constant_property("molar_mass")
der_compr = constant_property("der_compressibility")
compr = linear_property("compressibility")
if (phase == 'gas') & (fluid_name != 'air'):
lhv = constant_property("lower_heating_value")
hhv = constant_property("higher_heating_value")
return Fluid(fluid_name, phase, density=density, viscosity=viscosity,
heat_capacity=heat_capacity, molar_mass=molar_mass,
compressibility=compr, der_compressibility=der_compr, lhv=lhv, hhv=hhv)
else:
return Fluid(fluid_name, phase, density=density, viscosity=viscosity,
heat_capacity=heat_capacity, molar_mass=molar_mass, compressibility=compr,
der_compressibility=der_compr)
def get_fluid(net):
"""
This function shows which fluid is used in the net.
:param net: Current network
:type net: pandapipesNet
:return: Fluid - Name of the fluid which is used in the current network
:rtype: Fluid
"""
if "fluid" not in net or net["fluid"] is None:
raise AttributeError("There is no fluid defined for the given net!")
fluid = net["fluid"]
if not isinstance(fluid, Fluid):
logger.warning("The fluid in this net is not of the pandapipes Fluid type. This could lead"
" to errors, as some components might depend on this structure")
return fluid
def _add_fluid_to_net(net, fluid, overwrite=True):
"""
Adds a fluid to a net. If overwrite is False, a warning is printed and the fluid is not set.
:param net: The pandapipes network for which to set fluid
:type net: pandapipesNet
:param fluid: fluid which to insert into the network
:type fluid: Fluid
:param overwrite: If True, an existing fluid will just be overwritten, otherwise a warning is\
printed out and the fluid is not reset.
:type overwrite: bool, default True
:return: No output.
:type: None
"""
if "fluid" in net and net["fluid"] is not None and not overwrite:
fluid_msg = "an existing fluid" if not hasattr(net["fluid"], "name") \
else "the fluid %s" % net["fluid"].name
logger.warning("The fluid %s would replace %s and thus cannot be created. Try to set "
"overwrite to True" % (fluid.name, fluid_msg))
return
if isinstance(fluid, str):
logger.warning("Instead of a pandapipes.Fluid, a string ('%s') was passed to the fluid "
"argument. Internally, it will be passed to call_lib(fluid) to get the "
"respective pandapipes.Fluid." % fluid)
fluid = call_lib(fluid)
net["fluid"] = fluid
| [
"logging.getLogger",
"numpy.polyint",
"pandapower.io_utils.JSONSerializableClass.__new__",
"numpy.polyfit",
"numpy.power",
"os.path.join",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.poly1d",
"numpy.loadtxt"
] | [((537, 564), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (554, 564), False, 'import logging\n'), ((8859, 8875), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (8869, 8875), True, 'import numpy as np\n'), ((9410, 9444), 'pandapower.io_utils.JSONSerializableClass.__new__', 'JSONSerializableClass.__new__', (['cls'], {}), '(cls)\n', (9439, 9444), False, 'from pandapower.io_utils import JSONSerializableClass\n'), ((9678, 9692), 'scipy.interpolate.interp1d', 'interp1d', ([], {}), '(**d2)\n', (9686, 9692), False, 'from scipy.interpolate import interp1d\n'), ((14839, 14855), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (14849, 14855), True, 'import numpy as np\n'), ((15333, 15383), 'numpy.polyfit', 'np.polyfit', (['x_values', 'y_values', 'polynominal_degree'], {}), '(x_values, y_values, polynominal_degree)\n', (15343, 15383), True, 'import numpy as np\n'), ((15411, 15427), 'numpy.poly1d', 'np.poly1d', (['const'], {}), '(const)\n', (15420, 15427), True, 'import numpy as np\n'), ((15459, 15487), 'numpy.polyint', 'np.polyint', (['self.prop_getter'], {}), '(self.prop_getter)\n', (15469, 15487), True, 'import numpy as np\n'), ((17031, 17047), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (17041, 17047), True, 'import numpy as np\n'), ((7237, 7291), 'scipy.interpolate.interp1d', 'interp1d', (['x_values', 'y_values'], {'fill_value': '"""extrapolate"""'}), "(x_values, y_values, fill_value='extrapolate')\n", (7245, 7291), False, 'from scipy.interpolate import interp1d\n'), ((7337, 7365), 'scipy.interpolate.interp1d', 'interp1d', (['x_values', 'y_values'], {}), '(x_values, y_values)\n', (7345, 7365), False, 'from scipy.interpolate import interp1d\n'), ((19913, 19974), 'os.path.join', 'os.path.join', (['pp_dir', '"""properties"""', 'fluid_name', "(prop + '.txt')"], {}), "(pp_dir, 'properties', fluid_name, prop + '.txt')\n", (19925, 19974), False, 'import os\n'), ((20070, 20131), 'os.path.join', 'os.path.join', (['pp_dir', '"""properties"""', 'fluid_name', "(prop + '.txt')"], {}), "(pp_dir, 'properties', fluid_name, prop + '.txt')\n", (20082, 20131), False, 'import os\n'), ((20223, 20284), 'os.path.join', 'os.path.join', (['pp_dir', '"""properties"""', 'fluid_name', "(prop + '.txt')"], {}), "(pp_dir, 'properties', fluid_name, prop + '.txt')\n", (20235, 20284), False, 'import os\n'), ((10879, 10901), 'numpy.array', 'np.array', (['[self.value]'], {}), '([self.value])\n', (10887, 10901), True, 'import numpy as np\n'), ((11586, 11611), 'numpy.array', 'np.array', (['upper_limit_arg'], {}), '(upper_limit_arg)\n', (11594, 11611), True, 'import numpy as np\n'), ((11760, 11785), 'numpy.array', 'np.array', (['lower_limit_arg'], {}), '(lower_limit_arg)\n', (11768, 11785), True, 'import numpy as np\n'), ((12133, 12149), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (12143, 12149), True, 'import numpy as np\n'), ((10797, 10819), 'numpy.array', 'np.array', (['[self.value]'], {}), '([self.value])\n', (10805, 10819), True, 'import numpy as np\n'), ((13367, 13380), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (13375, 13380), True, 'import numpy as np\n'), ((14021, 14056), 'numpy.power', 'np.power', (['upper_limit_arg.values', '(2)'], {}), '(upper_limit_arg.values, 2)\n', (14029, 14056), True, 'import numpy as np\n'), ((14102, 14127), 'numpy.array', 'np.array', (['upper_limit_arg'], {}), '(upper_limit_arg)\n', (14110, 14127), True, 'import numpy as np\n'), ((14338, 14373), 'numpy.power', 'np.power', (['lower_limit_arg.values', '(2)'], {}), '(lower_limit_arg.values, 2)\n', (14346, 14373), True, 'import numpy as np\n'), ((14419, 14444), 'numpy.array', 'np.array', (['lower_limit_arg'], {}), '(lower_limit_arg)\n', (14427, 14444), True, 'import numpy as np\n'), ((14175, 14210), 'numpy.power', 'np.power', (['upper_limit_arg.values', '(2)'], {}), '(upper_limit_arg.values, 2)\n', (14183, 14210), True, 'import numpy as np\n'), ((14492, 14527), 'numpy.power', 'np.power', (['lower_limit_arg.values', '(2)'], {}), '(lower_limit_arg.values, 2)\n', (14500, 14527), True, 'import numpy as np\n')] |
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
import math as m
import matplotlib as mlp
pgf_with_rc_fonts = {
"font.family": "serif",
"font.size": 16,
"legend.fontsize": 16,
"font.sans-serif": ["DejaVu Sans"], # use a specific sans-serif font
}
mlp.rcParams.update(pgf_with_rc_fonts)
def estimate_time(x, y):
angle = np.degrees(np.arctan2(y, x))
rot_time = np.abs(angle / velRot)
# calculate the distance
distance = np.hypot(x, y)
distance_time = distance / velWalk
total_time = distance_time + rot_time # type: np.ndarray
for d1 in range(len(x)):
for d2 in range(len(y)):
total_time[d1, d2] = 1.5 * total_time[d1, d2] * m.exp(-total_time[d1, d2] * 0.1)
if total_time[d1, d2] >= 5:
total_time[d1, d2] = 5
total_time[d1, d2] -= 5
return total_time
if __name__ == "__main__":
# Constants for robot
velRot = 60 # grad pro second
velWalk = 200 # mm pro second
size = 1000
x_val = np.arange(-size, size, 10)
y_val = np.arange(-size, size, 10)
xm, ym = np.meshgrid(x_val, y_val)
times = estimate_time(xm, ym)
# plot
fig = plt.figure(frameon=False)
ax = fig.gca()
ax.set_aspect("equal")
ax.set_xlabel("x [mm]")
ax.set_ylabel("y [mm]")
ax.axis('on')
ax.set_xlim([-size, size])
ax.set_ylim([-size, size])
ax.spines['left'].set_position(('axes', 0.0))
ax.spines['bottom'].set_position(('axes', 0.0))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
CS1 = plt.contourf(x_val, y_val, times, 10, alpha=0.5, cmap="coolwarm", frameon=False)
CS = plt.contour(CS1, levels=CS1.levels)
plt.clabel(CS, inline=1, fontsize=10)
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.contourf",
"matplotlib.rcParams.update",
"numpy.hypot",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.figure",
"numpy.arctan2",
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"math.exp",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((305, 343), 'matplotlib.rcParams.update', 'mlp.rcParams.update', (['pgf_with_rc_fonts'], {}), '(pgf_with_rc_fonts)\n', (324, 343), True, 'import matplotlib as mlp\n'), ((427, 449), 'numpy.abs', 'np.abs', (['(angle / velRot)'], {}), '(angle / velRot)\n', (433, 449), True, 'import numpy as np\n'), ((495, 509), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (503, 509), True, 'import numpy as np\n'), ((1061, 1087), 'numpy.arange', 'np.arange', (['(-size)', 'size', '(10)'], {}), '(-size, size, 10)\n', (1070, 1087), True, 'import numpy as np\n'), ((1100, 1126), 'numpy.arange', 'np.arange', (['(-size)', 'size', '(10)'], {}), '(-size, size, 10)\n', (1109, 1126), True, 'import numpy as np\n'), ((1140, 1165), 'numpy.meshgrid', 'np.meshgrid', (['x_val', 'y_val'], {}), '(x_val, y_val)\n', (1151, 1165), True, 'import numpy as np\n'), ((1223, 1248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (1233, 1248), True, 'from matplotlib import pyplot as plt\n'), ((1717, 1802), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x_val', 'y_val', 'times', '(10)'], {'alpha': '(0.5)', 'cmap': '"""coolwarm"""', 'frameon': '(False)'}), "(x_val, y_val, times, 10, alpha=0.5, cmap='coolwarm', frameon=False\n )\n", (1729, 1802), True, 'from matplotlib import pyplot as plt\n'), ((1808, 1843), 'matplotlib.pyplot.contour', 'plt.contour', (['CS1'], {'levels': 'CS1.levels'}), '(CS1, levels=CS1.levels)\n', (1819, 1843), True, 'from matplotlib import pyplot as plt\n'), ((1848, 1885), 'matplotlib.pyplot.clabel', 'plt.clabel', (['CS'], {'inline': '(1)', 'fontsize': '(10)'}), '(CS, inline=1, fontsize=10)\n', (1858, 1885), True, 'from matplotlib import pyplot as plt\n'), ((1891, 1901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1899, 1901), True, 'from matplotlib import pyplot as plt\n'), ((394, 410), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (404, 410), True, 'import numpy as np\n'), ((734, 766), 'math.exp', 'm.exp', (['(-total_time[d1, d2] * 0.1)'], {}), '(-total_time[d1, d2] * 0.1)\n', (739, 766), True, 'import math as m\n')] |
from ctypes import *
import numpy as np
######## Legs collisions
# Init C types and store function call in a vector y of dim (nb_motors + 1)*nb_pairs
def getLegsCollisionsResults(q, cdll_func, nb_motors, nb_pairs, witnessPoints=False):
n_wp = 6 if witnessPoints else 0
ny = (nb_motors+1+n_wp)*nb_pairs
DoubleArrayIn = c_double*nb_motors
DoubleArrayOut = c_double*ny
y = np.zeros(ny).tolist()
q = DoubleArrayIn(*q)
y = DoubleArrayOut(*y)
cdll_func.solo_autocollision_legs_legs_forward_zero(q, y)
return y
# Extract distances from results vector
def getLegsDistances(legsCollResults, nb_motors, nb_pairs, witnessPoints=False):
n_wp = 6 if witnessPoints else 0
return np.array([legsCollResults[i*(1+nb_motors+n_wp)] for i in range(nb_pairs)])
# Extract jacobians from results vector
def getLegsJacobians(legsCollResults, nb_motors, nb_pairs, witnessPoints=False):
n_wp = 6 if witnessPoints else 0
return np.vstack([legsCollResults[i*(1+nb_motors+n_wp) + 1 : i*(1+nb_motors+n_wp) + 1 + nb_motors] for i in range(nb_pairs)])
def getLegsWitnessPoints(legsCollResults, nb_motors, nb_pairs):
wPoints = []
for i in range(nb_pairs):
ind_offset = i*(1+nb_motors+6) + 1 + nb_motors
wpoint1, wpoint2 = legsCollResults[ind_offset:ind_offset+3], legsCollResults[ind_offset+3:ind_offset+6]
wPoints.append([wpoint1, wpoint2])
return wPoints
######## Shoulders collisions
# Init C types and store function call in a vector y of dim q_dim+1
def getShoulderCollisionsResults(q, cdll_func, q_dim):
DoubleArrayIn = c_double*(2*q_dim)
DoubleArrayOut = c_double*(1 + q_dim)
x = np.concatenate((np.cos(q), np.sin(q))).tolist()
y = np.zeros(1 + q_dim).tolist()
x = DoubleArrayIn(*x)
y = DoubleArrayOut(*y)
cdll_func.solo_autocollision_nn_shoulder_forward_zero(x,y)
return y
# Extract distance from results vector
def getShoulderDistance(shoulderCollResult, offset=0):
return np.array(shoulderCollResult[0]) - offset
# Extract jacobian from results vector
def getShoulderJacobian(shoulderCollResult):
return np.array(shoulderCollResult[1:])
def getAllShouldersCollisionsResults(q, cdll_func, q_dim=2, offset=0):
distances = []
jacobians = []
shoulder_syms = [[1,1,1], [-1,1,1], [1,-1,-1], [-1,-1,-1]]
for shoulder_ind in range(4):
q_ind = [k for k in range(3*shoulder_ind,3*shoulder_ind + q_dim)]
q_val = q[q_ind]
sym = shoulder_syms[shoulder_ind]
sym_q_val = np.array(q_val.copy())
sym_q_val[0] = sym[0]*sym_q_val[0]
sym_q_val[1] = sym[1]*sym_q_val[1]
if(q_dim>2):
sym_q_val[2] = sym[2]*sym_q_val[2]
shoulder_result = getShoulderCollisionsResults(sym_q_val, cdll_func, q_dim)
J = np.array(getShoulderJacobian(shoulder_result))
J[0] = sym[0]*J[0]
J[1] = sym[1]*J[1]
if(q_dim > 2):
J[2] = sym[2]*J[2]
formatted_J = np.zeros(len(q))
formatted_J[q_ind] = J
distances.append(getShoulderDistance(shoulder_result, offset=offset))
jacobians.append(formatted_J)
return np.array(distances), np.vstack(jacobians)
| [
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.vstack",
"numpy.sin"
] | [((2137, 2169), 'numpy.array', 'np.array', (['shoulderCollResult[1:]'], {}), '(shoulderCollResult[1:])\n', (2145, 2169), True, 'import numpy as np\n'), ((1999, 2030), 'numpy.array', 'np.array', (['shoulderCollResult[0]'], {}), '(shoulderCollResult[0])\n', (2007, 2030), True, 'import numpy as np\n'), ((3187, 3206), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (3195, 3206), True, 'import numpy as np\n'), ((3208, 3228), 'numpy.vstack', 'np.vstack', (['jacobians'], {}), '(jacobians)\n', (3217, 3228), True, 'import numpy as np\n'), ((395, 407), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (403, 407), True, 'import numpy as np\n'), ((1728, 1747), 'numpy.zeros', 'np.zeros', (['(1 + q_dim)'], {}), '(1 + q_dim)\n', (1736, 1747), True, 'import numpy as np\n'), ((1688, 1697), 'numpy.cos', 'np.cos', (['q'], {}), '(q)\n', (1694, 1697), True, 'import numpy as np\n'), ((1699, 1708), 'numpy.sin', 'np.sin', (['q'], {}), '(q)\n', (1705, 1708), True, 'import numpy as np\n')] |
import numpy as np
from numpy import genfromtxt
def splitGlobalTrajectory(globalPoints, stepSize):
# Up
length = np.linalg.norm(globalPoints[0] - globalPoints[1])
numberOfIncrements = int(length/stepSize)
x = np.linspace(globalPoints[0, 0],
globalPoints[1, 0], numberOfIncrements+1)
y = np.linspace(globalPoints[0, 1],
globalPoints[1, 1], numberOfIncrements+1)
z = np.linspace(globalPoints[0, 2],
globalPoints[1, 2], numberOfIncrements+1)
# Straight
length = np.linalg.norm(globalPoints[1] - globalPoints[2])
numberOfIncrements = int(length/stepSize)
x = np.append(x, np.linspace(globalPoints[1, 0],
globalPoints[2, 0], numberOfIncrements+1))
y = np.append(y, np.linspace(globalPoints[1, 1],
globalPoints[2, 1], numberOfIncrements+1))
z = np.append(z, np.linspace(globalPoints[1, 2],
globalPoints[2, 2], numberOfIncrements+1))
# Down
length = np.linalg.norm(globalPoints[2] - globalPoints[3])
numberOfIncrements = int(length/stepSize)
x = np.append(x, np.linspace(globalPoints[2, 0],
globalPoints[3, 0], numberOfIncrements+1))
y = np.append(y, np.linspace(globalPoints[2, 1],
globalPoints[3, 1], numberOfIncrements+1))
z = np.append(z, np.linspace(globalPoints[2, 2],
globalPoints[3, 2], numberOfIncrements+1))
return np.matrix([x, y, z]).T
def addCharacteristics(steps, flyingCharacteristics, characterGain, everyXth):
speed = False
counter = 0
frames = flyingCharacteristics.shape[0]
newSteps = []
previousPoint = 0
while (counter*frames < steps.shape[0]):
for i in range(frames):
if (counter*frames + i < steps.shape[0]):
newPoint = steps[counter*frames + i] + \
characterGain * flyingCharacteristics[i]
if (speed and counter*frames + i != 0 and (counter*frames+i) % everyXth == 0):
newSteps.append(newPoint - previousPoint)
elif not speed:
newSteps.append(newPoint)
previousPoint = newPoint
counter += 1
return np.vstack(newSteps)
if __name__ == "__main__":
# Init parameters.
filename = 'data/input/Lost.csv'
stepSize = 0.03
characterGain = 0.000
everyXth = 1
# Create global trajectory points.
globalPoints = np.matrix([
[0.0, 0.0, 1.0],
[0.0, 0.0, 5.0],
[10.0, 0.0, 5.0],
[10.0, 0.0, 2.0]])
# Read flying characteristics.
flyingCharacteristics = genfromtxt(filename, delimiter=';')
# Split global trajectory according to step size.
steps = splitGlobalTrajectory(globalPoints, stepSize)
# Add flying characteristics to steps.
newSteps = addCharacteristics(
steps, flyingCharacteristics, characterGain, everyXth)
# Save matrix to txt file.
np.savetxt('data/matlab/Original.txt', newSteps)
| [
"numpy.linspace",
"numpy.vstack",
"numpy.savetxt",
"numpy.linalg.norm",
"numpy.matrix",
"numpy.genfromtxt"
] | [((127, 176), 'numpy.linalg.norm', 'np.linalg.norm', (['(globalPoints[0] - globalPoints[1])'], {}), '(globalPoints[0] - globalPoints[1])\n', (141, 176), True, 'import numpy as np\n'), ((232, 307), 'numpy.linspace', 'np.linspace', (['globalPoints[0, 0]', 'globalPoints[1, 0]', '(numberOfIncrements + 1)'], {}), '(globalPoints[0, 0], globalPoints[1, 0], numberOfIncrements + 1)\n', (243, 307), True, 'import numpy as np\n'), ((334, 409), 'numpy.linspace', 'np.linspace', (['globalPoints[0, 1]', 'globalPoints[1, 1]', '(numberOfIncrements + 1)'], {}), '(globalPoints[0, 1], globalPoints[1, 1], numberOfIncrements + 1)\n', (345, 409), True, 'import numpy as np\n'), ((436, 511), 'numpy.linspace', 'np.linspace', (['globalPoints[0, 2]', 'globalPoints[1, 2]', '(numberOfIncrements + 1)'], {}), '(globalPoints[0, 2], globalPoints[1, 2], numberOfIncrements + 1)\n', (447, 511), True, 'import numpy as np\n'), ((559, 608), 'numpy.linalg.norm', 'np.linalg.norm', (['(globalPoints[1] - globalPoints[2])'], {}), '(globalPoints[1] - globalPoints[2])\n', (573, 608), True, 'import numpy as np\n'), ((1068, 1117), 'numpy.linalg.norm', 'np.linalg.norm', (['(globalPoints[2] - globalPoints[3])'], {}), '(globalPoints[2] - globalPoints[3])\n', (1082, 1117), True, 'import numpy as np\n'), ((2343, 2362), 'numpy.vstack', 'np.vstack', (['newSteps'], {}), '(newSteps)\n', (2352, 2362), True, 'import numpy as np\n'), ((2574, 2660), 'numpy.matrix', 'np.matrix', (['[[0.0, 0.0, 1.0], [0.0, 0.0, 5.0], [10.0, 0.0, 5.0], [10.0, 0.0, 2.0]]'], {}), '([[0.0, 0.0, 1.0], [0.0, 0.0, 5.0], [10.0, 0.0, 5.0], [10.0, 0.0, \n 2.0]])\n', (2583, 2660), True, 'import numpy as np\n'), ((2753, 2788), 'numpy.genfromtxt', 'genfromtxt', (['filename'], {'delimiter': '""";"""'}), "(filename, delimiter=';')\n", (2763, 2788), False, 'from numpy import genfromtxt\n'), ((3080, 3128), 'numpy.savetxt', 'np.savetxt', (['"""data/matlab/Original.txt"""', 'newSteps'], {}), "('data/matlab/Original.txt', newSteps)\n", (3090, 3128), True, 'import numpy as np\n'), ((677, 752), 'numpy.linspace', 'np.linspace', (['globalPoints[1, 0]', 'globalPoints[2, 0]', '(numberOfIncrements + 1)'], {}), '(globalPoints[1, 0], globalPoints[2, 0], numberOfIncrements + 1)\n', (688, 752), True, 'import numpy as np\n'), ((806, 881), 'numpy.linspace', 'np.linspace', (['globalPoints[1, 1]', 'globalPoints[2, 1]', '(numberOfIncrements + 1)'], {}), '(globalPoints[1, 1], globalPoints[2, 1], numberOfIncrements + 1)\n', (817, 881), True, 'import numpy as np\n'), ((935, 1010), 'numpy.linspace', 'np.linspace', (['globalPoints[1, 2]', 'globalPoints[2, 2]', '(numberOfIncrements + 1)'], {}), '(globalPoints[1, 2], globalPoints[2, 2], numberOfIncrements + 1)\n', (946, 1010), True, 'import numpy as np\n'), ((1186, 1261), 'numpy.linspace', 'np.linspace', (['globalPoints[2, 0]', 'globalPoints[3, 0]', '(numberOfIncrements + 1)'], {}), '(globalPoints[2, 0], globalPoints[3, 0], numberOfIncrements + 1)\n', (1197, 1261), True, 'import numpy as np\n'), ((1315, 1390), 'numpy.linspace', 'np.linspace', (['globalPoints[2, 1]', 'globalPoints[3, 1]', '(numberOfIncrements + 1)'], {}), '(globalPoints[2, 1], globalPoints[3, 1], numberOfIncrements + 1)\n', (1326, 1390), True, 'import numpy as np\n'), ((1444, 1519), 'numpy.linspace', 'np.linspace', (['globalPoints[2, 2]', 'globalPoints[3, 2]', '(numberOfIncrements + 1)'], {}), '(globalPoints[2, 2], globalPoints[3, 2], numberOfIncrements + 1)\n', (1455, 1519), True, 'import numpy as np\n'), ((1564, 1584), 'numpy.matrix', 'np.matrix', (['[x, y, z]'], {}), '([x, y, z])\n', (1573, 1584), True, 'import numpy as np\n')] |
import pickle
import tensorflow as tf
import numpy as np
from baselines.ddpg.memory import Memory
from baselines.ddpg.ddpg import normalize, denormalize
from baselines.ddpg.models import Discriminator
class Expert:
def __init__(self, limit, env):
self.limit = limit
self.env = env
self.memory = Memory(limit=self.limit,
action_shape=self.env.action_space.shape,
observation_shape=self.env.observation_space.shape)
self.file_dir = None
def load_file(self, file_dir, print_reward=False):
self.file_dir = file_dir
expert_file = open(self.file_dir, 'rb')
expert_data = pickle.load(expert_file)
expert_file.close()
k = 0
if print_reward:
total_rew = 0.
ep_rew = 0.
nep = 1.
for episode_sample in expert_data:
for step_sample in episode_sample:
k = k+1
if k <= self.limit:
if print_reward:
ep_rew += step_sample[2]
if step_sample[4]:
nep += 1
total_rew += ep_rew
ep_rew = 0
self.memory.append(step_sample[0], step_sample[1], step_sample[2], step_sample[3], step_sample[4])
else:
if print_reward:
print('Successfully loaded expert files, average reward ',total_rew/nep)
return
if print_reward:
print('Successfully loaded expert files, average reward ',total_rew/nep)
def load_file_trpo(self, file_dir):
self.file_dir = file_dir
traj_data = np.load(file_dir)
if self.limit is None:
obs = traj_data["obs"][:]
acs = traj_data["acs"][:]
else:
obs = traj_data["obs"][:self.limit]
acs = traj_data["acs"][:self.limit]
episode_num = len(acs)
'''
step_num = 0
for i in range(episode_num):
step_num += len(acs[i])
print("Total Step is:", step_num, "\nTotal_Episode is:", episode_num)
'''
for i in range(episode_num):
episode_len = len(acs[i])
for j in range(episode_len):
done = True if (j == episode_len - 1) else False
self.memory.append(obs[i][j], acs[i][j], 0., 0., done)
def sample(self, batch_size):
return self.memory.sample(batch_size)
def set_tf(self, actor, critic, obs0, actions, obs_rms, ret_rms, observation_range, return_range, supervise=False, critic_only=False,
actor_only=False, both_ours_sup = False, gail = False, pofd = False):
self.expert_state = tf.placeholder(tf.float32, shape=(None,) + self.env.observation_space.shape,
name='expert_state')
self.expert_action = tf.placeholder(tf.float32, shape=(None,) + self.env.action_space.shape,
name='expert_action')
normalized_state = tf.clip_by_value(normalize(self.expert_state, obs_rms),
observation_range[0], observation_range[1])
expert_actor = actor(normalized_state, reuse=True)
normalized_q_with_expert_data = critic(normalized_state, self.expert_action, reuse=True)
normalized_q_with_expert_actor = critic(normalized_state, expert_actor, reuse=True)
self.Q_with_expert_data = denormalize(
tf.clip_by_value(normalized_q_with_expert_data, return_range[0], return_range[1]), ret_rms)
self.Q_with_expert_actor = denormalize(
tf.clip_by_value(normalized_q_with_expert_actor, return_range[0], return_range[1]), ret_rms)
if supervise:
self.actor_loss = tf.nn.l2_loss(self.expert_action-expert_actor)
self.critic_loss = 0
else:
self.critic_loss = tf.reduce_mean(tf.nn.relu(self.Q_with_expert_actor - self.Q_with_expert_data))
self.actor_loss = -tf.reduce_mean(self.Q_with_expert_actor)
if critic_only:
self.actor_loss = 0
if actor_only:
self.critic_loss = 0
#self.dist = tf.reduce_mean(self.Q_with_expert_data - self.Q_with_expert_actor)
if both_ours_sup:
self.actor_loss = tf.nn.l2_loss(self.expert_action-expert_actor) - tf.reduce_mean(self.Q_with_expert_actor)
self.critic_loss = tf.reduce_mean(tf.nn.relu(self.Q_with_expert_actor - self.Q_with_expert_data))
if gail or pofd:
discriminator = Discriminator()
d_with_expert_data = discriminator(normalized_state, self.expert_action)
d_with_gen_data = discriminator(obs0, actions, reuse=True)
self.discriminator_loss = tf.reduce_mean(tf.log(d_with_gen_data))+tf.reduce_mean(tf.log(1-d_with_expert_data))
self.actor_loss = -tf.reduce_mean(tf.log(d_with_gen_data))
| [
"baselines.ddpg.memory.Memory",
"tensorflow.nn.relu",
"baselines.ddpg.ddpg.normalize",
"tensorflow.placeholder",
"pickle.load",
"tensorflow.nn.l2_loss",
"baselines.ddpg.models.Discriminator",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"numpy.load",
"tensorflow.log"
] | [((325, 447), 'baselines.ddpg.memory.Memory', 'Memory', ([], {'limit': 'self.limit', 'action_shape': 'self.env.action_space.shape', 'observation_shape': 'self.env.observation_space.shape'}), '(limit=self.limit, action_shape=self.env.action_space.shape,\n observation_shape=self.env.observation_space.shape)\n', (331, 447), False, 'from baselines.ddpg.memory import Memory\n'), ((690, 714), 'pickle.load', 'pickle.load', (['expert_file'], {}), '(expert_file)\n', (701, 714), False, 'import pickle\n'), ((1763, 1780), 'numpy.load', 'np.load', (['file_dir'], {}), '(file_dir)\n', (1770, 1780), True, 'import numpy as np\n'), ((2810, 2911), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '((None,) + self.env.observation_space.shape)', 'name': '"""expert_state"""'}), "(tf.float32, shape=(None,) + self.env.observation_space.shape,\n name='expert_state')\n", (2824, 2911), True, 'import tensorflow as tf\n'), ((2980, 3077), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '((None,) + self.env.action_space.shape)', 'name': '"""expert_action"""'}), "(tf.float32, shape=(None,) + self.env.action_space.shape,\n name='expert_action')\n", (2994, 3077), True, 'import tensorflow as tf\n'), ((3162, 3199), 'baselines.ddpg.ddpg.normalize', 'normalize', (['self.expert_state', 'obs_rms'], {}), '(self.expert_state, obs_rms)\n', (3171, 3199), False, 'from baselines.ddpg.ddpg import normalize, denormalize\n'), ((3596, 3681), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['normalized_q_with_expert_data', 'return_range[0]', 'return_range[1]'], {}), '(normalized_q_with_expert_data, return_range[0],\n return_range[1])\n', (3612, 3681), True, 'import tensorflow as tf\n'), ((3748, 3834), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['normalized_q_with_expert_actor', 'return_range[0]', 'return_range[1]'], {}), '(normalized_q_with_expert_actor, return_range[0],\n return_range[1])\n', (3764, 3834), True, 'import tensorflow as tf\n'), ((3893, 3941), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.expert_action - expert_actor)'], {}), '(self.expert_action - expert_actor)\n', (3906, 3941), True, 'import tensorflow as tf\n'), ((4707, 4722), 'baselines.ddpg.models.Discriminator', 'Discriminator', ([], {}), '()\n', (4720, 4722), False, 'from baselines.ddpg.models import Discriminator\n'), ((4033, 4095), 'tensorflow.nn.relu', 'tf.nn.relu', (['(self.Q_with_expert_actor - self.Q_with_expert_data)'], {}), '(self.Q_with_expert_actor - self.Q_with_expert_data)\n', (4043, 4095), True, 'import tensorflow as tf\n'), ((4128, 4168), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.Q_with_expert_actor'], {}), '(self.Q_with_expert_actor)\n', (4142, 4168), True, 'import tensorflow as tf\n'), ((4441, 4489), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.expert_action - expert_actor)'], {}), '(self.expert_action - expert_actor)\n', (4454, 4489), True, 'import tensorflow as tf\n'), ((4490, 4530), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.Q_with_expert_actor'], {}), '(self.Q_with_expert_actor)\n', (4504, 4530), True, 'import tensorflow as tf\n'), ((4577, 4639), 'tensorflow.nn.relu', 'tf.nn.relu', (['(self.Q_with_expert_actor - self.Q_with_expert_data)'], {}), '(self.Q_with_expert_actor - self.Q_with_expert_data)\n', (4587, 4639), True, 'import tensorflow as tf\n'), ((4932, 4955), 'tensorflow.log', 'tf.log', (['d_with_gen_data'], {}), '(d_with_gen_data)\n', (4938, 4955), True, 'import tensorflow as tf\n'), ((4972, 5002), 'tensorflow.log', 'tf.log', (['(1 - d_with_expert_data)'], {}), '(1 - d_with_expert_data)\n', (4978, 5002), True, 'import tensorflow as tf\n'), ((5048, 5071), 'tensorflow.log', 'tf.log', (['d_with_gen_data'], {}), '(d_with_gen_data)\n', (5054, 5071), True, 'import tensorflow as tf\n')] |
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import trapz
from astropy import units as u, constants as c, table as t
from speclite import filters
import astropy.io
from warnings import warn
l_eff_d = {'r': 6166. * u.AA, 'i': 7480. * u.AA, 'z': 8932. * u.AA}
l_wid_d = {'r': 550. * u.AA, 'i': 1300. * u.AA, 'z': 1000. * u.AA}
absmag_sun_band = {'u': 6.39, 'g': 5.12, 'r': 4.64, 'i': 4.53, 'z': 4.51, 'V': 4.81}
# from Conroy
class Spec2PhotWarning(UserWarning):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Spec2Phot(object):
'''
object to convert spectra into photometric magnitudes
'''
def __init__(self, lam, flam, family='sdss2010-*', axis=0,
redshift=None):
self.filters = filters.load_filters(family)
if redshift is not None:
self.filters = filters.FilterSequence(
[f_.create_shifted(band_shift=redshift)
for f_ in self.filters])
else:
pass
# spectral dimension has to be the final one
nl0 = flam.shape[axis]
flam, self.lam = self.filters.pad_spectrum(
spectrum=np.moveaxis(flam, axis, -1),
wavelength=lam, method='zero')
self.flam = np.moveaxis(flam, -1, axis)
if self.flam.shape[axis] > nl0:
warn('spectrum has been padded, bad mags possible',
Spec2PhotWarning)
self.ABmags = self.filters.get_ab_magnitudes(
spectrum=self.flam, wavelength=self.lam,
axis=axis).as_array()
def color(self, b1, b2):
return self.ABmags[b1] - self.ABmags[b2]
def l_eff(lam, band):
'''
Calculate the effective (pivot) wavelength of a response function
'''
# read in filter table
band_tab = t.Table.read('filters/{}.res'.format(band),
names=['lam', 'f'], format='ascii')
# set up interpolator
band_interp = interp1d(x=band_tab['lam'].quantity.value,
y=band_tab['f'], fill_value=0.,
bounds_error=False)
# response function
Rlam = band_interp(lam)
l_eff = np.sqrt(np.trapz(x=lam, y=Rlam * lam) /
np.trapz(x=lam, y=Rlam / lam))
return l_eff
def spec2mag(lam, Flam, band):
'''
Convolve a source spectrum with a filter to get a magnitude
Parameters
----------
lam :
wavelength array of source spectrum
Flam :
Flux density, units sim to erg/s/cm2/AA
band :
chosen band
'''
# read in filter table
band_tab = t.Table.read('filters/{}.res'.format(band),
names=['lam', 'f'], format='ascii')
# set up interpolator
band_interp = interp1d(x=band_tab['lam'].quantity.value,
y=band_tab['f'], fill_value=0.,
bounds_error=False)
# response function
Rlam = band_interp(lam)
lam = lam.to('AA')
# calculate pivot wavelength of response function
l_eff_b = l_eff(lam=lam, band=band)
# average flux-density over bandpass
Flam_avg = (np.trapz(x=lam, y=lam * Rlam * Flam, axis=-1) /
(np.trapz(x=lam, y=Rlam * lam, axis=-1)))
Fnu_avg = (Flam_avg * (l_eff_b**2. / c.c)).to('Jy')
mag = -2.5 * np.log10((Fnu_avg / (3631. * u.Jy)).to('').value)
return mag
def lumspec2absmag(lam, Llam, band):
'''
Convolve a source (luminosity) spectrum with a filter to get
an absolute magnitude
Parameters
----------
lam :
wavelength array of source spectrum
Llam :
Luminosity density, units sim to Lsun/AA
band :
chosen band
'''
# convert to a spectral flux-density at 10 pc
Flam = (Llam / (4 * np.pi * (10. * u.pc)**2.)).to('erg s-1 cm-2 AA-1')
# absolute magnitude is just Lum at d=10pc
M = spec2mag(lam=lam, Flam=Flam, band=band)
return M
def lumspec2lsun(lam, Llam, band):
M = lumspec2absmag(lam=lam, Llam=Llam, band=band)
M_sun = absmag_sun_band[band]
L_sun = 10.**(-0.4 * (M - M_sun))
return L_sun
def color(hdulist, band1='g', band2='r'):
'''
Calculate the color of a MaNGA galaxy, based on two bandpasses
By convention, C_br = b - r, i.e., band1 - band2
'''
img1 = hdulist['{}IMG'.format(band1)].data
img2 = hdulist['{}IMG'.format(band2)].data
color = -2.5 * np.log10(img1 / img2)
return color
def kcorr(l_o, fl_o, band, z, axis=0):
'''
'''
# read in filter table
band_tab = t.Table.read('filters/{}_SDSS.res'.format(band),
names=['lam', 'f'], format='ascii')
# set up interpolator
band_interp = interp1d(x=band_tab['lam'].quantity.value,
y=band_tab['f'], fill_value=0.,
bounds_error=False)
l_o = l_o.to('AA')
l_e = l_o / (1. + z)
R_o = band_interp(l_o)
R_e = band_interp(l_e)
fl_e_ = interp1d(x=l_e, y=fl_o,
bounds_error=False, fill_value='extrapolate')
fl_o_ = interp1d(x=l_o, y=fl_o,
bounds_error=False, fill_value='extrapolate')
n = np.trapz(x=l_o,
y=(R_o * l_o * fl_o_(l_o / (1. + z))),
axis=axis)
d = np.trapz(x=l_e,
y=(R_e * l_e * fl_e_(l_e)),
axis=axis)
F = n / d
K_QR = -2.5 * np.log10(F.to('').value / (1. + z))
return K_QR
color_ml_conv = '''
C a_g b_g a_r b_r a_i b_i a_z b_z a_J b_J a_H b_H a_K b_K
'ug' -.221 .485 -.099 .345 -.053 .268 -.105 .226 -.128 .169 -.209 .133 -.260 .123
'ur' -.390 .417 -.223 .299 -.151 .233 -.178 .192 -.172 .138 -.237 .104 -.273 .091
'ui' -.375 .359 -.212 .257 -.144 .201 -.171 .165 -.169 .119 -.233 .090 -.267 .077
'uz' -.400 .332 -.232 .239 -.161 .187 -.179 .151 -.163 .105 -.205 .071 -.232 .056
'gr' -.499 1.519 -.306 1.097 -.222 0.864 -.223 .689 -.172 .444 -.189 .266 -.209 .197
'gi' -.379 .914 -.220 .661 -.152 .518 -.175 .421 -.153 .283 -.186 .179 -.211 .137
'gz' -.367 .698 -.215 .508 -.153 .402 -.171 .322 -.097 .175 -.117 .083 -.138 .047
'ri' -.106 1.982 -.022 1.431 .006 1.114 -.052 .923 -.079 .650 -.148 .437 -.186 .349
'rz' -.124 1.067 -.041 .780 -.018 .623 -.041 .463 -.011 .224 -.059 .076 -.092 .019
'BV' -.942 1.737 -.628 1.305 -.520 1.094 -.399 .824 -.261 .433 -.209 .210 -.206 .135
'BR' -.976 1.111 -.633 .816 -.523 .683 -.405 .518 -.289 .297 -.262 .180 -.264 .138
'''
C_ML_conv_t = astropy.io.ascii.read(color_ml_conv, guess=True, quotechar="'")
C_ML_conv_t.add_index('C')
class PowerLaw(object):
def __init__(self, x0, y0, n):
self.x0, self.y0, self.n = x0, y0, n
def __call__(self, x):
return self.y0 * (x / self.x0)**-self.n
def reddener(c1, c2):
lam = np.linspace(3800., 10500., 10000)
flam = np.ones_like(lam)
atten = PowerLaw(5500., 1. / np.e, -0.7)
s2p_nat = Spec2Phot(lam=lam * u.AA, flam=flam * u.Unit('erg s-1 cm-2 AA-1'))
c0_nat, c1_nat = s2p_nat.color(*c1), s2p_nat.color(*c2)
s2p_att = Spec2Phot(lam=lam * u.AA, flam=flam * u.Unit('erg s-1 cm-2 AA-1') * \
atten(lam))
c0_att, c1_att = s2p_att.color(*c1), s2p_att.color(*c2)
return c0_att - c0_nat, c1_att - c1_nat
| [
"numpy.ones_like",
"numpy.trapz",
"numpy.log10",
"astropy.units.Unit",
"speclite.filters.load_filters",
"scipy.interpolate.interp1d",
"numpy.linspace",
"numpy.moveaxis",
"warnings.warn"
] | [((1991, 2090), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': "band_tab['lam'].quantity.value", 'y': "band_tab['f']", 'fill_value': '(0.0)', 'bounds_error': '(False)'}), "(x=band_tab['lam'].quantity.value, y=band_tab['f'], fill_value=0.0,\n bounds_error=False)\n", (1999, 2090), False, 'from scipy.interpolate import interp1d\n'), ((2800, 2899), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': "band_tab['lam'].quantity.value", 'y': "band_tab['f']", 'fill_value': '(0.0)', 'bounds_error': '(False)'}), "(x=band_tab['lam'].quantity.value, y=band_tab['f'], fill_value=0.0,\n bounds_error=False)\n", (2808, 2899), False, 'from scipy.interpolate import interp1d\n'), ((4758, 4857), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': "band_tab['lam'].quantity.value", 'y': "band_tab['f']", 'fill_value': '(0.0)', 'bounds_error': '(False)'}), "(x=band_tab['lam'].quantity.value, y=band_tab['f'], fill_value=0.0,\n bounds_error=False)\n", (4766, 4857), False, 'from scipy.interpolate import interp1d\n'), ((5023, 5092), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': 'l_e', 'y': 'fl_o', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(x=l_e, y=fl_o, bounds_error=False, fill_value='extrapolate')\n", (5031, 5092), False, 'from scipy.interpolate import interp1d\n'), ((5126, 5195), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': 'l_o', 'y': 'fl_o', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(x=l_o, y=fl_o, bounds_error=False, fill_value='extrapolate')\n", (5134, 5195), False, 'from scipy.interpolate import interp1d\n'), ((7005, 7040), 'numpy.linspace', 'np.linspace', (['(3800.0)', '(10500.0)', '(10000)'], {}), '(3800.0, 10500.0, 10000)\n', (7016, 7040), True, 'import numpy as np\n'), ((7050, 7067), 'numpy.ones_like', 'np.ones_like', (['lam'], {}), '(lam)\n', (7062, 7067), True, 'import numpy as np\n'), ((806, 834), 'speclite.filters.load_filters', 'filters.load_filters', (['family'], {}), '(family)\n', (826, 834), False, 'from speclite import filters\n'), ((1298, 1325), 'numpy.moveaxis', 'np.moveaxis', (['flam', '(-1)', 'axis'], {}), '(flam, -1, axis)\n', (1309, 1325), True, 'import numpy as np\n'), ((3178, 3223), 'numpy.trapz', 'np.trapz', ([], {'x': 'lam', 'y': '(lam * Rlam * Flam)', 'axis': '(-1)'}), '(x=lam, y=lam * Rlam * Flam, axis=-1)\n', (3186, 3223), True, 'import numpy as np\n'), ((3242, 3280), 'numpy.trapz', 'np.trapz', ([], {'x': 'lam', 'y': '(Rlam * lam)', 'axis': '(-1)'}), '(x=lam, y=Rlam * lam, axis=-1)\n', (3250, 3280), True, 'import numpy as np\n'), ((4461, 4482), 'numpy.log10', 'np.log10', (['(img1 / img2)'], {}), '(img1 / img2)\n', (4469, 4482), True, 'import numpy as np\n'), ((1378, 1447), 'warnings.warn', 'warn', (['"""spectrum has been padded, bad mags possible"""', 'Spec2PhotWarning'], {}), "('spectrum has been padded, bad mags possible', Spec2PhotWarning)\n", (1382, 1447), False, 'from warnings import warn\n'), ((2214, 2243), 'numpy.trapz', 'np.trapz', ([], {'x': 'lam', 'y': '(Rlam * lam)'}), '(x=lam, y=Rlam * lam)\n', (2222, 2243), True, 'import numpy as np\n'), ((2266, 2295), 'numpy.trapz', 'np.trapz', ([], {'x': 'lam', 'y': '(Rlam / lam)'}), '(x=lam, y=Rlam / lam)\n', (2274, 2295), True, 'import numpy as np\n'), ((1206, 1233), 'numpy.moveaxis', 'np.moveaxis', (['flam', 'axis', '(-1)'], {}), '(flam, axis, -1)\n', (1217, 1233), True, 'import numpy as np\n'), ((7167, 7194), 'astropy.units.Unit', 'u.Unit', (['"""erg s-1 cm-2 AA-1"""'], {}), "('erg s-1 cm-2 AA-1')\n", (7173, 7194), True, 'from astropy import units as u, constants as c, table as t\n'), ((7308, 7335), 'astropy.units.Unit', 'u.Unit', (['"""erg s-1 cm-2 AA-1"""'], {}), "('erg s-1 cm-2 AA-1')\n", (7314, 7335), True, 'from astropy import units as u, constants as c, table as t\n')] |
import numpy as np
from scipy.stats import ortho_group
d = 4
seed = 1
size = 2
a, b = np.int64(ortho_group.rvs(size=size, dim=d, random_state=seed))
print(a)
print(np.linalg.det(a))
print(a @ a.T) | [
"numpy.linalg.det",
"scipy.stats.ortho_group.rvs"
] | [((95, 147), 'scipy.stats.ortho_group.rvs', 'ortho_group.rvs', ([], {'size': 'size', 'dim': 'd', 'random_state': 'seed'}), '(size=size, dim=d, random_state=seed)\n', (110, 147), False, 'from scipy.stats import ortho_group\n'), ((164, 180), 'numpy.linalg.det', 'np.linalg.det', (['a'], {}), '(a)\n', (177, 180), True, 'import numpy as np\n')] |
import os
import sys
import unicodedata
from dateutil import parser
from keras.applications.densenet import DenseNet121
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.mobilenet import MobileNet
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
import numpy as np
import pandas as pd
import scipy as sp
from six import text_type
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.externals.joblib import Parallel, delayed
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import (FeatureUnion, _fit_one_transformer,
_fit_transform_one, _transform_one)
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
if sys.version_info.major == 3:
import pickle
else:
import cPickle as pickle
class HeterogeneousFeatureUnion(FeatureUnion):
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : HeterogeneousFeatureUnion
This estimator
"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, trans.select_item(X), y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform") or not hasattr(t, "select_item")):
raise TypeError("All estimators should implement fit and "
"transform and select_item. "
"'%s' (type %s) doesn't" % (t, type(t)))
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, weight, trans.select_item(X), y,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sp.sparse.issparse(f) for f in Xs):
Xs = sp.sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, weight, trans.select_item(X))
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sp.sparse.issparse(f) for f in Xs):
Xs = sp.sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
class ColumnTransformer(BaseEstimator, TransformerMixin):
"""Applies a pre-set transformer to a pre-set column
"""
def __init__(self, colname):
"""Init
Args:
colname (str): column name of an input DataFrame
"""
self.colname = colname
self.tolist_flag = False
if 'Vectorizer' in self.__class__.__name__:
# e.g., TfidfVectorizer, CountVectorizer, TextLengthVectorizer
self.tolist_flag = True
def select_item(self, X):
if self.tolist_flag:
return X[self.colname].tolist()
else:
return X[self.colname].as_matrix()[None].T
class DummyTransformer(ColumnTransformer):
"""Dummy Transformer which returns original input."""
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def get_feature_names(self):
return [u"Value"]
class CategoryOneHotEncoder(ColumnTransformer):
"""OneHotEncoder for String values
Args:
"""
def __init__(self, colname):
super(CategoryOneHotEncoder, self).__init__(colname)
self.label_encoder = LabelEncoder()
self.onehot_encoder = OneHotEncoder()
def fit(self, X, y=None):
X_encoded = self.label_encoder.fit_transform(X.ravel()).reshape(-1, 1)
self.onehot_encoder.fit(X_encoded)
return self
def transform(self, X):
X_encoded = self.label_encoder.transform(X.ravel()).reshape(-1, 1)
return self.onehot_encoder.transform(X_encoded)
def get_feature_names(self):
# we need to transform classes_ to list of strings
string_list = list(map(lambda x: text_type(x),
list(self.label_encoder.classes_)))
return string_list
class DateTransformer(ColumnTransformer):
"""Transforms pandas datetime64 to feature vectors
Args:
weekday (boolean): weekday extraction flag. Default: True
timeoftheday (boolean): specifies if we include hours and minutes.
Default: True
seconds (boolean): seconds extraction flag. Default: False
microseconds (boolean): microseconds extraction flag. Default: False
days_in_month (boolean): days in this month. Default: False
is_leap_year (boolean): leap year indicator. Default: False
month_start_end (boolean): month start and end indicators.
Default: False
nweek (boolean): ordinal of the week from the begginning of the year.
Default: False
TODO ideas:
#idea: what we can do is apply all the possible parameters and filter
out the ones that has equal values (say no milliseconds at all)
holiday markers?
season markers?
time zones?
"""
def __init__(self, colname,
weekday=True, timeoftheday=True, seconds=False,
microseconds=False, days_in_month=False,
is_leap_year=False, month_start_end=False, nweek=False):
super(DateTransformer, self).__init__(colname)
self.weekday = weekday
self.timeoftheday = timeoftheday
self.seconds = seconds
self.microseconds = microseconds
self.days_in_month = days_in_month
self.is_leap_year = is_leap_year
self.month_start_end = month_start_end
self.nweek = nweek
def fit(self, X, y=None):
"""Fit function
Args:
X (pandas dataframe): column to transform
Returns:
self
"""
return self
def transform(self, X):
"""Transform function
Args:
X (pandas.DataFrame): df to pick the self.colname column
"""
dates = pd.DatetimeIndex(X[self.colname])
result_df = pd.DataFrame(dates.year.values, columns=['Year'])
result_df['Month'] = dates.month.values
result_df['Day'] = dates.day.values
result_df['DayOfYear'] = dates.dayofyear.values # ordinal day of year
if self.weekday:
result_df['Weekday'] = dates.dayofweek.values
if self.days_in_month:
result_df['Days_in_month'] = dates.days_in_month.values
if self.is_leap_year:
# *1 is converting bool to int
result_df['is_leap_year'] = dates.is_leap_year * 1
if self.month_start_end:
# *1 is converting bool to int
result_df['Month_start'] = dates.is_month_start * 1
result_df['Month_end'] = dates.is_month_end * 1
if self.nweek:
result_df['Nweek'] = dates.week.values # ordinal week of the year
if self.timeoftheday:
result_df['Hour'] = dates.hour.values
result_df['Minutes'] = dates.minute.values
if self.seconds:
result_df['Seconds'] = dates.second.values
if self.microseconds:
result_df['Microseconds'] = dates.microsecond.values
# unix date
result_df['Unix'] = dates.astype(np.int64)
return result_df.as_matrix()
def get_feature_names(self):
"""Provides column names for features."""
# The following columns initialization is needed to make
# get_feature_names function work properly before fit/transform
self.columns = ['Year', 'Month', 'Day', 'DayOfYear']
if self.weekday:
self.columns.append('Weekday')
if self.days_in_month:
self.columns.append('Days_in_month')
if self.is_leap_year:
self.columns.append('is_leap_year')
if self.month_start_end:
self.columns.append('Month_start')
self.columns.append('Month_end')
if self.nweek:
self.columns.append('Nweek')
if self.timeoftheday:
self.columns.append('Hour')
self.columns.append('Minutes')
if self.seconds:
self.columns.append('Seconds')
if self.microseconds:
self.columns.append('Microseconds')
self.columns.append('Unix')
return [text_type(x) for x in self.columns]
def select_item(self, X):
# TODO(Bublin): pass pd.Series instead of pd.DataFrame?
return X
class SklearnVectorizer(ColumnTransformer):
"""Converts into topic distributions using LDA."""
def __init__(self, colname, vectorizer=None, **kwargs):
"""Initializer
Args:
kwargs_dict (dict): kwargs parameters to CountVectorizer and
LatentDirichletAllocation
"""
super(SklearnVectorizer, self).__init__(colname)
self.vectorizer = vectorizer(**kwargs)
def fit(self, X, y=None):
self.vectorizer.fit(X)
return self
def transform(self, X):
return self.vectorizer.transform(X)
def get_feature_names(self):
return self.vectorizer.get_feature_names()
class LDAVectorizer(ColumnTransformer):
"""Converts into topic distributions using LDA."""
def __init__(self, colname, kwargs_dict=None):
"""Initializer
Args:
kwargs_dict (dict): kwargs parameters to CountVectorizer and
LatentDirichletAllocation
"""
super(LDAVectorizer, self).__init__(colname)
if kwargs_dict is None:
kwargs_dict = {'CountVectorizer': {},
'LatentDirichletAllocation': {}}
self.vectorizer = CountVectorizer(
**kwargs_dict['CountVectorizer'])
self.lda = LatentDirichletAllocation(
**kwargs_dict['LatentDirichletAllocation'])
def fit(self, X, y=None):
assert type(X) == list
self.vectorizer.fit(X)
self.lda.fit(self.vectorizer.transform(X))
return self
def transform(self, X):
assert type(X) == list
return self.lda.transform(self.vectorizer.transform(X))
def get_feature_names(self):
# To make sure self.lda is fitted (is there any better way?)
assert self.lda._n_components is not None
clsname = self.__class__.__name__
return list(map(lambda x: u"{}_{}".format(clsname, x),
range(self.lda.n_components)))
class TextLengthVectorizer(ColumnTransformer):
"""Extracts textual length.
Future work: Word length vs character length
"""
def __init__(self, colname, null_value=0):
"""Initialization
Args:
null_value (int): Default value for empty string
"""
super(TextLengthVectorizer, self).__init__(colname)
self.null_value = null_value
def fit(self, X, y=None):
return self
def transform(self, X):
"""Transform function
Args:
X (list): list of string objects
Returns:
np.array
"""
assert type(X) == list
return np.array(list(map(lambda x: len(x), X))).reshape(-1, 1)
def get_feature_names(self):
return [u"text_length"]
class PreTrainedModel:
"""Singleton pre-trained model
All models will be loaded at first calling
"""
model_dict = {}
def __getitem__(self, key):
if key not in self.model_dict:
method_name = "_add_{}".format(key)
getattr(self, method_name)()
return self.model_dict[key]
@classmethod
def _add_resnet50(cls):
cls.model_dict["resnet50"] = {
"name": "resnet50",
"model": ResNet50(weights='imagenet', include_top=False),
"image_size": (224, 224)
}
@classmethod
def _add_densenet121(cls):
cls.model_dict["densenet121"] = {
"name": "densenet121",
"model": DenseNet121(weights='imagenet', include_top=False),
"image_size": (224, 224)
}
@classmethod
def _add_mobilenet(cls):
cls.model_dict["mobilenet"] = {
"name": "mobilenet",
"model": MobileNet(
weights='imagenet', include_top=False,
input_shape=(224, 224, 3), pooling="avg"
),
"image_size": (224, 224)
}
@staticmethod
def get_output_dimension(model):
len_output = len(model.output.shape)
dim = model.output.shape[len_output - 1]
return dim
class ImageVectorizer(ColumnTransformer):
"""Extracts image vector with predefined Keras DL Model.
Default model: image net
Input: url list
Output: The last layer vector
"""
def __init__(self, colname, way="mobilenet", batch_size=512):
super(ImageVectorizer, self).__init__(colname)
self.trained_model = PreTrainedModel()[way]
self.batch_size = batch_size
def fit(self, X, y=None):
return self
def transform(self, X):
"""Transform function
Args:
X (np.array): list of string objects
Returns:
np.array
"""
X = np.array(X)
loop_size = X.shape[0] // self.batch_size
loop_size += ((X.shape[0] % self.batch_size) > 0)
return np.array([self.img2vec(x) for x in np.split(X, loop_size)])
def img2vec(self, path_list):
"""Transform function
Args:
path_list (np.array): path url list
Returns:
np.array
"""
img_list = []
for path in path_list:
if not path.startswith("/"):
path = os.getcwd() + "/" + path
img = image.load_img(path,
target_size=self.trained_model["image_size"])
x = image.img_to_array(img)
x = preprocess_input(x)
img_list.append(x)
img_arr = np.array(img_list)
pred = self.trained_model["model"].predict(img_arr)
for i in range(len(self.trained_model["model"].output.shape) - 1):
pred = pred[0]
return pred
def get_feature_names(self):
# To make sure self.lda is fitted (is there any better way?)
clsname = self.__class__.__name__
dim = PreTrainedModel.get_output_dimension(self.trained_model["model"])
return [u"{}_{}_{}".format(clsname, self.trained_model["name"], x)
for x in range(dim)]
class AutoConverter():
def __init__(self,
target,
strategy='auto',
coltype_converters={},
column_converters={},
use_column_converter_only=True,
n_jobs=1):
"""Big wrapping class for convertors
Args:
target (str): target column name
strategy (str): {'auto'}
coltype_converters (dict): dict of customized Transformers
column_converters (dict): dict of customized column transformers
use_column_converter_only (bool): Use only column converter or not
n_jobs (int): n_jobs parameter for FeatureUnion
column_converters will be applied to columns on a priority basis.
If use_column_converter == True (default value),
pre-defined transformers in TransformerCatalog will NOT be applied.
Therefore, giving an empty list to a column can be used to "ignore"
the column for feature extraction.
In the following example, only TfIdfVectorizer with default parameters
will be applied to "Name" column and no transformer will be applied to
"Age" column.
column_converters={"Name": [(TfIdfVectorizer, {})],
"Age": []}
"""
self.target = target
self.strategy = strategy
self.feature_names = []
self.X = None
self.y = None
self.hasdata = False
self.target_le = LabelEncoder()
self.subtables_ = None
self.converter_catalog = None
self.set_converter(coltype_converters)
self.column_converters = column_converters
self.use_column_converter_only = use_column_converter_only
self.n_jobs = n_jobs
def set_converter(self, coltype_converters):
"""Insert customized transformers into self.converter_catalog."""
# TODO(Yoshi): Technically, dict.update overwrite existing entry
# We might want to "append" instead. To be discussed.
self.converter_catalog = (DefaultTransformerCatalog.transformer_dict
.copy())
self.converter_catalog.update(coltype_converters)
def fit(self, df, subtables=None, y=None, custom_types={}):
"""Fits the data to the custom_converters
Args:
df (pd.DataFrame): main dataframe table
subtables (dictionary): dictionary of subtables with keys for
linking them to main table. Default: None.
subtables =
{tabname(str) : { "table": (pd.Dataframe),
"link_key": (str) main table column name,
"group_key": (str) this table column name,
"custom_aggregator": (dict) col_type:aggregator_class}}
Example:
{"school_table": {"table": school_df,
"link_key": "school_id",
"group_key": "id",
"custom_aggregator": {"text":
CustomTextAggregator()}
}
}
custom_types (dictionary): dictionary of col_types that overrides
col_type_dicts made by auto_converter orcibly
Returns:
self
"""
assert self.target in df
# filtering None
df.dropna(subset=[self.target], inplace=True)
# filterung NaN
df = df[df[self.target].notnull()]
self.target_le.fit(df[self.target].as_matrix())
X_df = df.drop(self.target, axis=1)
# 1. typing columns
self.colname_type_dict = type_columns(X_df)
if isinstance(custom_types, dict):
self.colname_type_dict.update(custom_types)
# 2. Pre-imputing missing values for textual column
for colname in X_df.columns:
if (self.colname_type_dict[colname] == 'text'
or self.colname_type_dict[colname] == 'categorical'
or self.colname_type_dict[colname] == 'text_ja'):
X_df.loc[:, colname] = X_df[colname].fillna("NaN").astype(str)
# 3. create feature union
transformer_list = []
for colname in X_df.columns:
if colname in self.column_converters:
for transformer_cls, kwargs in self.column_converters[colname]:
transformer_list.append(
(u"{}.{}".format(colname, transformer_cls.__name__),
transformer_cls(colname, **kwargs))
)
if self.use_column_converter_only:
# Since transformer(s) are defined by users,
# skip automatic assignment of transformers for this column
continue
assert colname in self.colname_type_dict
coltype = self.colname_type_dict[colname]
if coltype == 'ignore':
continue
if coltype == 'date':
# we don't want to pass np array to date transformer,
# instead we pass pandas df
# TODO(Yoshi): This is hard-coded??
d = DateTransformer(colname=colname)
transformer_list.append((u"{}.{}".format(colname, 'date'), d))
continue
t_dict = self.converter_catalog[coltype]
for transformer in t_dict:
transformer_cls = transformer[0]
kwargs = transformer[1]
transformer_list.append(
(u"{}.{}".format(colname, transformer_cls.__name__),
transformer_cls(colname, **kwargs))
)
# 4. fit feature union
if transformer_list: # if there's something to transform
self.fu = HeterogeneousFeatureUnion(transformer_list,
n_jobs=self.n_jobs)
self.fu.fit(X_df)
feature_names = list(map(lambda x: 'main..' + text_type(x),
self.fu.get_feature_names()))
else: # emppty main table (only target and ignore types)
# we assume there exist information in subtables then
if not subtables:
raise ValueError("There's nothing to transform")
self.fu = None
feature_names = []
# defining Aggregator structure and fitting the tables in
if subtables:
self.subtables_ = subtables
for key in sorted(list(subtables.keys())):
subtable_dict = subtables[key]
if subtable_dict['link_key'] not in X_df.columns:
raise KeyError("Link key " + subtable_dict['link_key'] +
" does not exist in the main table")
aggr = AutoAggregator(
group_key=subtable_dict['group_key'],
custom_aggregators=subtables.get("custom_aggregator", {}))
self.subtables_[key]['aggr'] = aggr
aggr.fit(subtable_dict['table'])
self.colname_type_dict[key] = aggr.colname_type_dict.copy()
# gathering feature names from subtables
append_list = list(
map(lambda x: text_type(key) + '..' + text_type(x),
aggr.feature_names))
feature_names.extend(append_list)
self.feature_names = feature_names
return self
def transform(self,
df,
subtables=None,
prediction=False):
"""Transforms data to feature matrix
Args:
df (pandas.DataFrame): data to transform
subtables (dictionary): dictionary of subtables with keys for
linking them to main table. Default: None.
subtables =
{tabname(str) : { "table": (pd.Dataframe),
"link_key": (str) main table column name,
"group_key": (str) this table column name }}
Example:
{"school_table": {"table": shool_pd},
"link_key": "school_id",
"group_key": "id" }
}
prediction (bool): Returns only X if True
Returns:
X (numpy.ndarray): feature matrix
y (array-like of shape [n_samples]): target vector
"""
if not prediction:
# filtering None
df.dropna(subset=[self.target], inplace=True)
# filterung NaN
df = df[df[self.target].notnull()]
# TODO(Yoshi): Should display Warning message when transform
# is called with prediction=False if self.hasdata is True
if self.hasdata:
print("[WARNING] This instance already has been fitted.")
assert self.target in df
y_unique = df[self.target].unique()
if len(y_unique) == 1 and np.isnan(y_unique[0]):
# this just leaves y equal to a np.nan vector of the same size
# TODO(Yoshi): This should raise exception.
# Will revise here after specifying exceptions
y = df[self.target]
else:
y = self.target_le.transform(df[self.target].as_matrix())
X_df = df.drop(self.target, axis=1)
else:
# Prediction
if self.subtables_ is not None:
assert subtables is not None
if self.target in df:
X_df = df.drop(self.target, axis=1)
else:
X_df = df
# TODO(later): Pre-imputing. This part could be redundant
for colname in X_df.columns:
if self.colname_type_dict[colname] in ['categorical',
'text',
'text_ja']:
X_df.loc[:, colname] = X_df[colname].fillna("NaN").astype(str)
if self.fu:
X = self.fu.transform(X_df)
else:
# Creating the empty matrix of the same size to use it later during
# data aggregation, since we can't use feature union in absence of
# features
X = np.empty([X_df.shape[0], 0])
# Ad-hoc way to convert sparse matrix into numpy.array and replace NaN
# values with 0.0
if type(X) == sp.sparse.csr.csr_matrix:
X = X.toarray()
X[np.isnan(X)] = 0.0
# transforming subtables and concating them with main table feature
# matrix
if subtables:
# TODO(Kate): make sure that subtables passed and subtables stored
# have the same structure. Any ideas?
X_gather = pd.DataFrame(X)
for key in sorted(list(subtables.keys())):
subtable = subtables[key]
aggr = subtable['aggr']
link_key = subtable['link_key']
X_sub = aggr.transform(subtable['table'])
# combine X_gather with subtable['link_key']
if link_key in X_gather.columns.tolist():
raise KeyError(
'column already exists in a dataframe' + link_key)
X_gather[link_key] = df[link_key]
# X_sub is already a pd.DataFrame with group_key included
# as index
X_gather = X_gather.merge(X_sub, how='left', left_on=link_key,
right_index=True)
# make sure we don't leave anything(index) behind ;)
del X_gather[link_key]
# do something with get_feature_names
X = X_gather.as_matrix()
# TODO(Yoshi): Post pre-processing such as missing value imputation
# TODO(Yoshi): Tentative naive replacement of NaN values
X = np.nan_to_num(X)
if not prediction:
self.X = X
self.y = y
self.hasdata = True
return [X, y]
else:
return X
def fit_transform(self, df, subtables=None, y=None):
"""Fit + Transform
Args:
df (pandas.DataFrame): main df
subtables (dict): dictionary of subtables
Returns:
X (numpy.ndarray): feature matrix
y (array-like of shape [n_samples]): target vector
"""
return self.fit(df, subtables).transform(df, subtables)
def index2label(self, predictions):
"""Transforms predictions from numerical format back to labels
Args:
predictions (np.array): array of label numbers
Returns:
labels (np.array): array of label values
"""
return self.target_le.inverse_transform(predictions)
def get_feature_names(self, colname=None):
"""Returns feature names
Args:
colname (str or tuple): column name
if colname is a tuple (subtable name, colname)
if None returns all feature names
(default: None)
Returns:
feature_names (list)
"""
if colname is None:
if len(self.feature_names) == 0:
# TODO(Yoshi): Do we want to use a "trained" flag instead?
print("[WARNING]:",
"AutoConverter instance has extracted no feature.",
"Probably, it has not been fit to data yet.")
return self.feature_names
# Use tuple (or list) to handle subtable feature names
if type(colname) in [tuple, list]:
# TODO(Yoshi): replace with Exception
assert len(colname) == 2
colname_ = "..".join(colname)
else:
# colname is in main table
colname_ = "main..{}".format(colname)
colname_idx_list = list(filter(lambda x: colname_ in x[1],
enumerate(self.feature_names)))
colname_list = list(map(lambda x: x[1], colname_idx_list))
return colname_list
def save(self,
filepath,
overwrite=False):
"""Save AutoConverter object as pickle file
Args:
filepath (str): Output pickle filepath
overwrite (bool): Overwrites a file with the same name if true
Returns:
success_flag (bool)
"""
if not overwrite and os.path.exists(filepath):
# TODO(Yoshi): Warning handling
print("File already exists. Skip.")
return False
with open(filepath, "wb") as fout:
pickle.dump(self, fout)
return True
@classmethod
def load(cls, filepath):
"""Load AutoConverter object from pickle file
Args:
filepath (str): Input pickle filepath
Returns:
AutoLearn object
"""
with open(filepath, "rb") as fin:
obj = pickle.load(fin)
assert obj.__class__.__name__ == 'AutoConverter'
return obj
class DefaultTransformerCatalog:
# TODO(later): boolean
transformer_dict = {
'text': [
(SklearnVectorizer, {"vectorizer": TfidfVectorizer,
"max_features": 500}),
(LDAVectorizer, {}),
(TextLengthVectorizer, {})
],
'text_ja': [
(SklearnVectorizer, {"vectorizer": TfidfVectorizer,
"max_features": 500,
"analyzer": "char_wb",
"ngram_range": (2, 3)}),
(TextLengthVectorizer, {})
],
'numerical': [(DummyTransformer, {})],
'categorical': [(CategoryOneHotEncoder, {})],
'date': [(DateTransformer, {})]
}
class AutoAggregator():
def __init__(self,
group_key,
custom_aggregators={},
n_jobs=1):
"""Initialization
Args:
group_key (string): column name of the aggregated table. This
column will be used for 'grouping'/aggregating the values in
the dataframe
custom_aggregators (list): list of customized Aggregators
n_jobs (int): n_jobs parameter for FeatureUnion
Class performs feature extraction from the subsidiary table and then
groups the rows of the data by group_key
"""
self.group_key = group_key
self.feature_names = []
self.set_aggregator_catalog(custom_aggregators)
self.n_jobs = n_jobs
def set_aggregator_catalog(self, custom_aggregators):
"""Insert customized aggregators to self.aggregator_catalog."""
# TODO(Yoshi): Technically, dict.update overwrite existing entry
# We might want to "append" instead. To be discussed.
self.aggregator_catalog = (DefaultAggregatorCatalog.transformer_dict
.copy())
self.aggregator_catalog.update(custom_aggregators)
def fit(self, sec_df):
"""Fits the data
Args:
sec_df (pandas.DataFrame): subsidiary table
"""
# TODO(Kate): make sure that df has group_key as index so that when we
# pass the whole thing to transformers, we could group by it
# 1. typing columns
self.colname_type_dict = type_columns(sec_df)
# 2. Pre-imputing missing values for textual column
for colname in sec_df.columns:
if (self.colname_type_dict[colname] == 'text'
or self.colname_type_dict[colname] == 'categorical'
or self.colname_type_dict[colname] == 'text_ja'):
sec_df.loc[:, colname] = sec_df[colname].fillna("NaN")
# 3. create feature union
transformer_list = []
for colname in sec_df.columns:
assert colname in self.colname_type_dict
coltype = self.colname_type_dict[colname]
if colname == self.group_key:
# we are ignoring this column, since it should be included
# from the main table
continue
if coltype == 'ignore':
continue
if coltype in self.aggregator_catalog:
for agg_cls, kwargs in self.aggregator_catalog[coltype]:
agg = agg_cls(colname=colname,
group_key=self.group_key,
**kwargs)
transformer_list.append(
(u"{}.{}".format(colname, coltype), agg)
)
# 4. fit feature union
# we need to make sure that we still store id values from this one
self.fu = HeterogeneousFeatureUnion(transformer_list,
n_jobs=self.n_jobs)
self.fu.fit(sec_df)
self.feature_names = self.fu.get_feature_names()
return self
def transform(self, sec_df):
"""Transformer
Args:
df (pandas.DataFrame): secondary dataframe for transformation and
aggregation
"""
X = self.fu.transform(sec_df)
# change X back to pandas.DataFrame
X_df = pd.DataFrame(X)
X_df.index = sorted(sec_df[self.group_key].unique())
return X_df
def fit_transform(self, df, y=None):
return self.fit(df).transform(df)
# I copied this from AutoConvertor. We may want to move it out of both classes
def is_japanese(string):
"""Returns whether the text include Japanese
Args:
string (str): value
Returns:
is_ja (bool): whether the text include Japanese or not
"""
if not isinstance(string, text_type):
return False
if hasattr(string, 'decode'):
# In Python 2.x, string has to be decoded as Unicode
string = string.decode("utf-8")
for ch in string:
try:
name = unicodedata.name(ch)
for symbolname in ["CJK UNIFIED", "HIRAGANA", "KATAKANA"]:
if symbolname in name:
return True
except Exception as e:
# The character is not defined in UTF-8
# TODO(Yoshi): Logger
print(e)
return False
def is_japanese_col(sr, check_num=500):
"""Returns whether the text include Japanese
Args:
sr (pd.Series): text Series
check_num(int): number of check rows from
Returns:
is_ja_column (bool): whether the series is Japanese or not
"""
use_sr = sr[:check_num]
for index, string in use_sr.iteritems():
if is_japanese(string):
return True
return False
def type_column(s, cat_proportion_threshold=0.01,
cat_absolute_threshold=100, id_threshold=0.99):
"""Returns type of column
Args:
s (pd.Series): column values
cat_proportion_threshold (float): percentage of unique values in
dataset. Default 0.01
cat_absolute_threshold (int): absolute value count. Default = 100.
The minimum of cat_proportion_threshhold and
cat_absolute_threshold is used to define the actual threshhold.
id_threshold (float): threshold to ignore id columns.
Default = 0.99
Returns:
coltype (str): type of the column. Current possible values:
numerical','categorical', 'text', 'date', 'ignore'. The latter
column type is used to ignore the column when extracting features.
Currently used for id-containing columns.
"""
coltype = None
if s.dtype in ['float16', 'float32', 'float64']:
coltype = 'numerical'
elif s.dtype == 'datetime64[ns]':
coltype = 'date'
elif (s.value_counts().size - 1 <
min(cat_proportion_threshold * s.size, cat_absolute_threshold)):
# valid both for integer or text values
coltype = 'categorical'
elif s.dtype in ['intc', 'intp', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']:
if s.value_counts().size - 1 > id_threshold * s.size:
# this is probably id and should be ignored
coltype = 'ignore'
else:
coltype = 'numerical'
else:
# object type
try:
# Another chance to determine if "date" type
# TODO(Yoshi): Will make the sampling size a parameter
# parser.parse() raises Exception if it fails to parse
s[:100].apply(lambda x: parser.parse(x))
coltype = 'date'
except Exception:
None
if coltype is None:
if s.str.count(' ').sum() == 0:
# seems like id
coltype = 'ignore'
else:
if is_japanese_col(s):
coltype = 'text_ja'
else:
coltype = 'text'
return coltype
def type_columns(df):
"""Returns column names for the feature matrix
Args:
df (pd.DataFrame): input data
Returns:
colname_type_dict (dict)
"""
# we need to make sure we add aggr columns here ----------------------
colname_type_dict = {}
for c in df.columns:
colname_type_dict[c] = type_column(df[c])
return colname_type_dict
class AggregateTransformer(BaseEstimator, TransformerMixin):
"""Applies a pre-set transformer to a pre-set column
"""
def __init__(self, colname, group_key):
"""Init
Args:
colname (str): column name of an input DataFrame
"""
self.colname = colname
self.group_key = group_key
def select_item(self, X):
return X[[self.group_key, self.colname]]
class NumericalAggregator(AggregateTransformer):
"""Transforms numerical values to basic statistics.
"""
def __init__(self, colname, group_key, functions=None):
"""Initialization
Args:
colname (str): Column name used for aggregation
group_key (str): Column name used for groupby
functions (dict): Dictionary of function names and functions
Default: {'sum': np.sum,
'mean': np.mean,
'std': np.std,
'count': 'count'}
"""
super(NumericalAggregator, self).__init__(colname, group_key)
if functions is None:
functions = {'sum': np.sum,
'mean': np.mean,
'std': np.std,
'count': 'count'}
self.functions = functions
assert colname != group_key, "group key equal to colname"
def fit(self, X, y=None):
"""Fit function
Args:
X (pandas dataframe): df to pick colname from
Returns:
self
"""
return self
def transform(self, X):
"""Transform function
Args:
X (pandas.DataFrame): df to pick colname from
Returns:
result (pandas.DataFrame): aggregated values
"""
grouped = X.groupby(self.group_key)
result_df = grouped.agg(self.functions.values())
# Drop top-level columns to sort by function names
result_df.columns = result_df.columns.droplevel()
result_df = result_df[sorted(self.functions.keys())]
# Make sure returned columns are consistent with key names of functions
assert sorted(self.functions.keys()) == result_df.columns.tolist()
return result_df
def get_feature_names(self):
"""Provides column names for features."""
return[text_type(x) for x in sorted(self.functions.keys())]
class TextualAggregator(AggregateTransformer):
"""Aggregates textual values."""
def __init__(self, colname, group_key, vectorizer=None):
"""Initialization
Args:
colname (str): Column name used for aggregation
group_key (str): Column name used for groupby
vectorizer (Vectorizer):
sklearn.feature_extraction.text.
CountVectorizer()
"""
super(TextualAggregator, self).__init__(colname, group_key)
assert colname != group_key
if vectorizer is None:
self.vectorizer = CountVectorizer()
else:
self.vectorizer = vectorizer
def fit(self, df, y=None):
"""Fit function
Args:
df (pd.DataFrame): df to pick colname from
Returns:
self
"""
assert self.colname in df.columns
assert self.group_key in df.columns
# Concatenate textual contents of a user into a single string
texts = df.groupby([self.group_key]).agg(
{self.colname: " ".join})[self.colname].tolist()
self.vectorizer.fit(texts)
return self
def transform(self, df):
"""Transform function
Args:
df (pd.DataFrame): df to pick colname from
Returns:
result_df (pd.DataFrame): Extracted features
"""
assert self.colname in df.columns
assert self.group_key in df.columns
# Same above
agg_df = df.groupby([self.group_key]).agg({self.colname: " ".join})
texts = agg_df[self.colname].tolist()
X = self.vectorizer.transform(texts).todense()
result_df = pd.DataFrame(X)
result_df.index = agg_df.index
# The original order of the DataFrame should be preserved.
return result_df
def get_feature_names(self):
"""Returns feature names."""
return self.vectorizer.get_feature_names()
class CategoryAggregator(AggregateTransformer):
"""Transforms a column of categorical values to a row of tfidf values."""
def __init__(self, colname, group_key, vectorizer=None):
super(CategoryAggregator, self).__init__(colname, group_key)
assert colname != group_key, "group key equal to colname"
if vectorizer is None:
self.vectorizer = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b")
else:
self.vectorizer = vectorizer
def fit(self, X, y=None):
"""Fit function
Args:
X (pandas dataframe): df to pick colname from
Returns:
self
"""
df = X[[self.group_key, self.colname]]
cat_values = list(df[self.colname])
# cat values might be numbers, so we force them to strings
cat_values = list(map(lambda x: text_type(x), cat_values))
text = ' '.join(cat_values)
self.vectorizer.fit([text])
return self
def transform(self, X):
"""Transform function
Args:
X (pandas.DataFrame): df to pick colname from
Returns:
result (pandas.DataFrame): aggregated values
"""
df = X[[self.group_key, self.colname]]
grouped = df.groupby(self.group_key)
grouped_agg = grouped.agg(" ".join)
result = self.vectorizer.transform(grouped_agg[self.colname])
result_df = pd.DataFrame(result.todense())
return result_df
def get_feature_names(self):
"""Provides column names for features."""
return self.vectorizer.get_feature_names()
class DefaultAggregatorCatalog:
ja_vec = CountVectorizer(analyzer="char_wb", ngram_range=(2, 3))
transformer_dict = {'text': [(TextualAggregator, {}),
(LDAVectorizer, {})],
'text_ja': [(TextualAggregator,
{"vectorizer": ja_vec})],
'numerical': [(NumericalAggregator, {})],
'categorical': [(CategoryAggregator, {})]}
def check_transformer(input_df, colname, transformer):
"""Check if a user-defined tarnsformer works properly
Args:
input_df (pd.DataFrame):
colname (str): target column name
transformer (Transformer): Transformer instance
Returns:
"""
# TODO(Yoshi): Replace with exception
assert colname in input_df
try:
X = transformer.fit_transform(transformer.select_item(input_df))
except Exception as e:
print("Something wrong happened. :(")
print(e)
return None
print("Feature(s) successfully extracted! :)")
print("# of features={}".format(X.shape[1]))
return X
| [
"keras.preprocessing.image.img_to_array",
"sklearn.preprocessing.LabelEncoder",
"sklearn.externals.joblib.delayed",
"numpy.hstack",
"numpy.array",
"sklearn.externals.joblib.Parallel",
"six.text_type",
"os.path.exists",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.empty",
"pandas.Dat... | [((46751, 46806), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""char_wb"""', 'ngram_range': '(2, 3)'}), "(analyzer='char_wb', ngram_range=(2, 3))\n", (46766, 46806), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((5843, 5857), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5855, 5857), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5888, 5903), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (5901, 5903), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((8418, 8451), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['X[self.colname]'], {}), '(X[self.colname])\n', (8434, 8451), True, 'import pandas as pd\n'), ((8472, 8521), 'pandas.DataFrame', 'pd.DataFrame', (['dates.year.values'], {'columns': "['Year']"}), "(dates.year.values, columns=['Year'])\n", (8484, 8521), True, 'import pandas as pd\n'), ((12176, 12225), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), "(**kwargs_dict['CountVectorizer'])\n", (12191, 12225), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((12258, 12327), 'sklearn.decomposition.LatentDirichletAllocation', 'LatentDirichletAllocation', ([], {}), "(**kwargs_dict['LatentDirichletAllocation'])\n", (12283, 12327), False, 'from sklearn.decomposition import LatentDirichletAllocation\n'), ((15691, 15702), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (15699, 15702), True, 'import numpy as np\n'), ((16452, 16470), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (16460, 16470), True, 'import numpy as np\n'), ((18497, 18511), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (18509, 18511), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((29166, 29182), 'numpy.nan_to_num', 'np.nan_to_num', (['X'], {}), '(X)\n', (29179, 29182), True, 'import numpy as np\n'), ((36568, 36583), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (36580, 36583), True, 'import pandas as pd\n'), ((44811, 44826), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (44823, 44826), True, 'import pandas as pd\n'), ((1602, 1630), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (1610, 1630), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((3160, 3188), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (3168, 3188), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((3460, 3485), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 0)'], {}), '((X.shape[0], 0))\n', (3468, 3485), True, 'import numpy as np\n'), ((3706, 3719), 'numpy.hstack', 'np.hstack', (['Xs'], {}), '(Xs)\n', (3715, 3719), True, 'import numpy as np\n'), ((4264, 4292), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (4272, 4292), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((4498, 4523), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 0)'], {}), '((X.shape[0], 0))\n', (4506, 4523), True, 'import numpy as np\n'), ((4652, 4665), 'numpy.hstack', 'np.hstack', (['Xs'], {}), '(Xs)\n', (4661, 4665), True, 'import numpy as np\n'), ((10787, 10799), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (10796, 10799), False, 'from six import text_type\n'), ((14206, 14253), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (14214, 14253), False, 'from keras.applications.resnet50 import ResNet50\n'), ((14449, 14499), 'keras.applications.densenet.DenseNet121', 'DenseNet121', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (14460, 14499), False, 'from keras.applications.densenet import DenseNet121\n'), ((14689, 14783), 'keras.applications.mobilenet.MobileNet', 'MobileNet', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(weights='imagenet', include_top=False, input_shape=(224, 224, 3),\n pooling='avg')\n", (14698, 14783), False, 'from keras.applications.mobilenet import MobileNet\n'), ((16227, 16293), 'keras.preprocessing.image.load_img', 'image.load_img', (['path'], {'target_size': "self.trained_model['image_size']"}), "(path, target_size=self.trained_model['image_size'])\n", (16241, 16293), False, 'from keras.preprocessing import image\n'), ((16343, 16366), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (16361, 16366), False, 'from keras.preprocessing import image\n'), ((16383, 16402), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (16399, 16402), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((27521, 27549), 'numpy.empty', 'np.empty', (['[X_df.shape[0], 0]'], {}), '([X_df.shape[0], 0])\n', (27529, 27549), True, 'import numpy as np\n'), ((27742, 27753), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (27750, 27753), True, 'import numpy as np\n'), ((28029, 28044), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (28041, 28044), True, 'import pandas as pd\n'), ((31723, 31747), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (31737, 31747), False, 'import os\n'), ((31922, 31945), 'cPickle.dump', 'pickle.dump', (['self', 'fout'], {}), '(self, fout)\n', (31933, 31945), True, 'import cPickle as pickle\n'), ((32253, 32269), 'cPickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (32264, 32269), True, 'import cPickle as pickle\n'), ((37301, 37321), 'unicodedata.name', 'unicodedata.name', (['ch'], {}), '(ch)\n', (37317, 37321), False, 'import unicodedata\n'), ((43056, 43068), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (43065, 43068), False, 'from six import text_type\n'), ((43716, 43733), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (43731, 43733), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((45469, 45516), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'token_pattern': '"""(?u)\\\\b\\\\w+\\\\b"""'}), "(token_pattern='(?u)\\\\b\\\\w+\\\\b')\n", (45484, 45516), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((3593, 3614), 'scipy.sparse.issparse', 'sp.sparse.issparse', (['f'], {}), '(f)\n', (3611, 3614), True, 'import scipy as sp\n'), ((4539, 4560), 'scipy.sparse.issparse', 'sp.sparse.issparse', (['f'], {}), '(f)\n', (4557, 4560), True, 'import scipy as sp\n'), ((26202, 26223), 'numpy.isnan', 'np.isnan', (['y_unique[0]'], {}), '(y_unique[0])\n', (26210, 26223), True, 'import numpy as np\n'), ((1644, 1673), 'sklearn.externals.joblib.delayed', 'delayed', (['_fit_one_transformer'], {}), '(_fit_one_transformer)\n', (1651, 1673), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((3202, 3229), 'sklearn.externals.joblib.delayed', 'delayed', (['_fit_transform_one'], {}), '(_fit_transform_one)\n', (3209, 3229), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((3646, 3666), 'scipy.sparse.hstack', 'sp.sparse.hstack', (['Xs'], {}), '(Xs)\n', (3662, 3666), True, 'import scipy as sp\n'), ((4306, 4329), 'sklearn.externals.joblib.delayed', 'delayed', (['_transform_one'], {}), '(_transform_one)\n', (4313, 4329), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((4592, 4612), 'scipy.sparse.hstack', 'sp.sparse.hstack', (['Xs'], {}), '(Xs)\n', (4608, 4612), True, 'import scipy as sp\n'), ((6371, 6383), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (6380, 6383), False, 'from six import text_type\n'), ((15862, 15884), 'numpy.split', 'np.split', (['X', 'loop_size'], {}), '(X, loop_size)\n', (15870, 15884), True, 'import numpy as np\n'), ((45945, 45957), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (45954, 45957), False, 'from six import text_type\n'), ((16184, 16195), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16193, 16195), False, 'import os\n'), ((23140, 23152), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (23149, 23152), False, 'from six import text_type\n'), ((24456, 24468), 'six.text_type', 'text_type', (['x'], {}), '(x)\n', (24465, 24468), False, 'from six import text_type\n'), ((39906, 39921), 'dateutil.parser.parse', 'parser.parse', (['x'], {}), '(x)\n', (39918, 39921), False, 'from dateutil import parser\n'), ((24432, 24446), 'six.text_type', 'text_type', (['key'], {}), '(key)\n', (24441, 24446), False, 'from six import text_type\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
D3net Architecture definition for MSS.
'''
import math
import os
import sys
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from nnabla.parameter import get_parameter_or_create
import nnabla.initializer as I
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from d3net_basic_blocks import D3NetBase
def stft(x, n_fft=4096, n_hop=1024, center=True, patch_length=None):
'''
Multichannel STFT
Input: (nb_samples, nb_channels, nb_timesteps)
Output: (nb_samples, nb_channels, nb_bins, nb_frames),
(nb_samples, nb_channels, nb_bins, nb_frames)
'''
nb_samples, nb_channels, _ = x.shape
x = F.reshape(x, (nb_samples*nb_channels, -1))
real, imag = F.stft(x, n_fft, n_hop, n_fft,
window_type='hanning', center=center, pad_mode='reflect')
real = F.reshape(real, (nb_samples, nb_channels, n_fft // 2 + 1, -1))
imag = F.reshape(imag, (nb_samples, nb_channels, n_fft // 2 + 1, -1))
if patch_length is not None:
# slice 256(patch_length) frames from 259 frames
return real[..., :patch_length], imag[..., :patch_length]
return real, imag
def spectogram(real, imag, power=1, mono=True):
'''
Input: (nb_samples, nb_channels, nb_bins, nb_frames),
(nb_samples, nb_channels, nb_bins, nb_frames)
Output: (nb_samples, nb_frames, nb_channels, nb_bins)
'''
spec = ((real ** 2) + (imag ** 2)) ** (power / 2.0)
if mono:
spec = F.mean(spec, axis=1, keepdims=True)
return F.transpose(spec, ((0, 3, 1, 2)))
class D3NetMSS(D3NetBase):
def __init__(self, hparams, comm=None, test=False, recompute=False, init_method=None, input_mean=None, input_scale=None):
super(D3NetMSS, self).__init__(comm=comm, test=test,
recompute=recompute, init_method=init_method)
self.hparams = hparams
if input_mean is None or input_scale is None:
input_mean = np.zeros((1, 1, 1, self.hparams['fft_size']//2+1))
input_scale = np.ones((1, 1, 1, self.hparams['fft_size']//2+1))
else:
input_mean = input_mean.reshape(
(1, 1, 1, self.hparams['fft_size']//2+1))
input_scale = input_scale.reshape(
(1, 1, 1, self.hparams['fft_size']//2+1))
self.in_offset = get_parameter_or_create(
'in_offset', shape=input_mean.shape, initializer=input_mean)
self.in_scale = get_parameter_or_create(
'in_scale', shape=input_scale.shape, initializer=input_scale)
self.decode_scale = get_parameter_or_create(
'decode_scale', (1, 1, 1, self.hparams['valid_signal_idx']), initializer=I.ConstantInitializer(value=1))
self.decode_bias = get_parameter_or_create(
'decode_bias', (1, 1, 1, self.hparams['valid_signal_idx']), initializer=I.ConstantInitializer(value=1))
def dilated_dense_block_2(self, inp, growth_rate, num_layers, scope_name, name='bottleneck'):
'''
Dilated Dense Block-2
'''
with nn.parameter_scope(scope_name):
with nn.parameter_scope('initial_layer'):
out = self.bn_conv_block(
inp, growth_rate*num_layers, name=name)
return self.dilated_dense_block(out, growth_rate, num_layers, name=name)
def upsampling_layer(self, inp, num_input_features, comp_rate=1.):
'''
Define Upsampling Layer
'''
num_filters = int(math.ceil(comp_rate * num_input_features))
with nn.parameter_scope('upsample'):
out = self.batch_norm(inp, name='norm')
out = PF.deconvolution(out, num_filters, kernel=(
2, 2), stride=(2, 2), name='transconv')
return out
def d3_block(self, inp, growth_rate, num_layers, n_blocks):
'''
Define D3Block
'''
out = self.dilated_dense_block_2(
inp, growth_rate*n_blocks, num_layers, scope_name='initial_block')
if n_blocks > 1:
lst = []
for i in range(n_blocks):
lst.append(out[:, i*growth_rate:(i+1)*growth_rate])
def update(inp_, n):
for j in range(n_blocks-n-1):
lst[j+1+n] += inp_[:, j*growth_rate:(j+1)*growth_rate]
for i in range(n_blocks-1):
tmp = self.dilated_dense_block_2(
lst[i], growth_rate*(n_blocks-i-1), num_layers, scope_name='layers/layer%s' % (i+1))
update(tmp, i)
out = F.concatenate(*lst, axis=1)
return out[:, -growth_rate:]
def md3_block_ds(self, inp, in_channels, ks, n_layers, n_blocks, comp_rates, name=''):
'''
Define MD3BlockDS
'''
if not len(ks) == len(n_layers):
print('length of ks and n_layers should be match.')
if min(len(ks), len(n_layers)) % 2 == 0:
sys.stderr.write(
'length of ks, n_layers and comp_rates must be odd.')
ds_len = (len(n_layers) - 1) // 2
ds = []
out = inp
out_layers = []
with nn.parameter_scope(name + '/ds_layers'):
# Down-sampling path
n_channels = in_channels
dense_blk_cnt = 0
ds_concat_channels = []
for k, nl, comp, b in zip(ks[:ds_len], n_layers[:ds_len], comp_rates[:ds_len], n_blocks[:ds_len]):
with nn.parameter_scope('dense_block%s' % dense_blk_cnt):
out = self.d3_block(out, k, nl, b)
ds_concat_channels.append(k)
n_channels = k
ds.append(out)
out = F.average_pooling(out, kernel=(2, 2), stride=(2, 2))
if comp < 1.:
n_channels = int(math.ceil(comp * n_channels))
dense_blk_cnt += 1
# concatenation happens in reverse order, so reverse the list
ds = ds[::-1]
# bottleneck block
with nn.parameter_scope(name + '/bottleneck_block'):
out = self.d3_block(
out, ks[ds_len], n_layers[ds_len], n_blocks[ds_len])
dense_blk_cnt += 1
out_layers.append(out)
ds_concat_channels = ds_concat_channels[::-1]
n_channels = ks[ds_len]
# Up-sampling path
cnt = 0
with nn.parameter_scope(name + '/us_layers'):
for k, nl, comp, b, i in zip(ks[ds_len + 1:], n_layers[ds_len + 1:], comp_rates[ds_len:], n_blocks[ds_len:], range(ds_len)):
with nn.parameter_scope('upsample%s' % i):
out = self.upsampling_layer(out, n_channels, comp)
out = F.concatenate(out, ds[cnt], axis=1)
cnt += 1
if comp < 1.:
n_channels = int(math.ceil(comp * n_channels))
n_channels += ds_concat_channels[i]
with nn.parameter_scope('dense_block%s' % dense_blk_cnt):
out = self.d3_block(out, k, nl, b)
out_layers.append(out)
n_channels = k
dense_blk_cnt += 1
return out_layers
def __call__(self, inp):
'''
Define D3Net
'''
valid_signal_idx = self.hparams['valid_signal_idx']
band_split_idxs = self.hparams['band_split_idxs'] + \
[self.hparams['valid_signal_idx']]
inp = F.transpose(inp, (0, 2, 1, 3))
scaled_inp = (inp - self.in_offset)/self.in_scale
max_final_k = 0
for k in self.hparams['dens_k']:
if max_final_k < k[-1]:
max_final_k = k[-1]
i = 0
band_idx_start = 0
band_out = []
band_dense_out = []
# Low ~ middle bands
for num_init_features, dens_k, num_layer_block, b_n_block, comp_rates in zip(self.hparams['num_init_features'], self.hparams['dens_k'], self.hparams['num_layer_blocks'], self.hparams['b_n_blocks'], self.hparams['comp_rates']):
x_band = scaled_inp[:, :, :, band_idx_start:band_split_idxs[i]]
x_band = self.conv2d(x_band, num_init_features, kernel_size=3,
stride=1, name='features_init/%s' % i, pad=1)
dense_band = self.md3_block_ds(
x_band, num_init_features, dens_k, num_layer_block, b_n_block, comp_rates, name='dense_band/%s' % i)
band_dense_out.append(dense_band[::-1])
if max_final_k > self.hparams['dens_k'][i][-1]:
h = self.batch_norm(
band_dense_out[-1][0], name='match_fm_conv/%s/norm' % i)
out = self.conv2d(h, max_final_k, kernel_size=1,
stride=1, name='match_fm_conv/%s/conv' % i)
band_out.append(out)
else:
band_out.append(band_dense_out[-1][0])
band_idx_start = band_split_idxs[i]
i += 1
# full bands
full = self.conv2d(scaled_inp[:, :, :, :valid_signal_idx], self.hparams['f_num_init_features'], kernel_size=3,
stride=1, name='features_init_full', pad=1)
full = self.md3_block_ds(full, self.hparams['f_num_init_features'], self.hparams['f_dens_k'], self.hparams['f_num_layer_block'],
self.hparams['f_n_blocks'], self.hparams['f_comp_rates'], name='dense_full')
# concat low~middle bands and then with full bands
concat_bands = F.concatenate(*band_out, axis=3)
concat_full = F.concatenate(*[concat_bands, full[-1]], axis=1)
# Final dense block
final = self.dilated_dense_block_2(
concat_full, self.hparams['ttl_dens_k'], self.hparams['ttl_num_layer_block'], scope_name='final_dense')
# Define BNC_Gate : Batch-Normalization, Convolution and Sigmoid Gate
with nn.parameter_scope('out_gate'):
bn_out = self.batch_norm(final, name='bn')
gate = F.sigmoid(self.conv2d(
bn_out, self.hparams['n_channels'], kernel_size=1, stride=1, name='conv_gate/conv'))
filt = self.conv2d(
bn_out, self.hparams['n_channels'], kernel_size=1, stride=1, name='conv_filt/conv')
out = gate * filt
out = out * self.decode_scale + self.decode_bias
out = F.relu(out)
out = F.concatenate(*[out, inp[:, :, :, valid_signal_idx:]], axis=3)
out = F.transpose(out, (0, 2, 1, 3))
return out
| [
"nnabla.parameter.get_parameter_or_create",
"nnabla.functions.transpose",
"math.ceil",
"numpy.ones",
"nnabla.initializer.ConstantInitializer",
"nnabla.functions.concatenate",
"nnabla.parametric_functions.deconvolution",
"os.path.dirname",
"numpy.zeros",
"nnabla.parameter_scope",
"nnabla.function... | [((1309, 1353), 'nnabla.functions.reshape', 'F.reshape', (['x', '(nb_samples * nb_channels, -1)'], {}), '(x, (nb_samples * nb_channels, -1))\n', (1318, 1353), True, 'import nnabla.functions as F\n'), ((1369, 1461), 'nnabla.functions.stft', 'F.stft', (['x', 'n_fft', 'n_hop', 'n_fft'], {'window_type': '"""hanning"""', 'center': 'center', 'pad_mode': '"""reflect"""'}), "(x, n_fft, n_hop, n_fft, window_type='hanning', center=center,\n pad_mode='reflect')\n", (1375, 1461), True, 'import nnabla.functions as F\n'), ((1493, 1555), 'nnabla.functions.reshape', 'F.reshape', (['real', '(nb_samples, nb_channels, n_fft // 2 + 1, -1)'], {}), '(real, (nb_samples, nb_channels, n_fft // 2 + 1, -1))\n', (1502, 1555), True, 'import nnabla.functions as F\n'), ((1567, 1629), 'nnabla.functions.reshape', 'F.reshape', (['imag', '(nb_samples, nb_channels, n_fft // 2 + 1, -1)'], {}), '(imag, (nb_samples, nb_channels, n_fft // 2 + 1, -1))\n', (1576, 1629), True, 'import nnabla.functions as F\n'), ((2182, 2213), 'nnabla.functions.transpose', 'F.transpose', (['spec', '(0, 3, 1, 2)'], {}), '(spec, (0, 3, 1, 2))\n', (2193, 2213), True, 'import nnabla.functions as F\n'), ((2134, 2169), 'nnabla.functions.mean', 'F.mean', (['spec'], {'axis': '(1)', 'keepdims': '(True)'}), '(spec, axis=1, keepdims=True)\n', (2140, 2169), True, 'import nnabla.functions as F\n'), ((3002, 3091), 'nnabla.parameter.get_parameter_or_create', 'get_parameter_or_create', (['"""in_offset"""'], {'shape': 'input_mean.shape', 'initializer': 'input_mean'}), "('in_offset', shape=input_mean.shape, initializer=\n input_mean)\n", (3025, 3091), False, 'from nnabla.parameter import get_parameter_or_create\n'), ((3124, 3214), 'nnabla.parameter.get_parameter_or_create', 'get_parameter_or_create', (['"""in_scale"""'], {'shape': 'input_scale.shape', 'initializer': 'input_scale'}), "('in_scale', shape=input_scale.shape, initializer=\n input_scale)\n", (3147, 3214), False, 'from nnabla.parameter import get_parameter_or_create\n'), ((8079, 8109), 'nnabla.functions.transpose', 'F.transpose', (['inp', '(0, 2, 1, 3)'], {}), '(inp, (0, 2, 1, 3))\n', (8090, 8109), True, 'import nnabla.functions as F\n'), ((10142, 10174), 'nnabla.functions.concatenate', 'F.concatenate', (['*band_out'], {'axis': '(3)'}), '(*band_out, axis=3)\n', (10155, 10174), True, 'import nnabla.functions as F\n'), ((10197, 10245), 'nnabla.functions.concatenate', 'F.concatenate', (['*[concat_bands, full[-1]]'], {'axis': '(1)'}), '(*[concat_bands, full[-1]], axis=1)\n', (10210, 10245), True, 'import nnabla.functions as F\n'), ((10987, 10998), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (10993, 10998), True, 'import nnabla.functions as F\n'), ((11013, 11075), 'nnabla.functions.concatenate', 'F.concatenate', (['*[out, inp[:, :, :, valid_signal_idx:]]'], {'axis': '(3)'}), '(*[out, inp[:, :, :, valid_signal_idx:]], axis=3)\n', (11026, 11075), True, 'import nnabla.functions as F\n'), ((11090, 11120), 'nnabla.functions.transpose', 'F.transpose', (['out', '(0, 2, 1, 3)'], {}), '(out, (0, 2, 1, 3))\n', (11101, 11120), True, 'import nnabla.functions as F\n'), ((907, 932), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (922, 932), False, 'import os\n'), ((2627, 2681), 'numpy.zeros', 'np.zeros', (["(1, 1, 1, self.hparams['fft_size'] // 2 + 1)"], {}), "((1, 1, 1, self.hparams['fft_size'] // 2 + 1))\n", (2635, 2681), True, 'import numpy as np\n'), ((2704, 2757), 'numpy.ones', 'np.ones', (["(1, 1, 1, self.hparams['fft_size'] // 2 + 1)"], {}), "((1, 1, 1, self.hparams['fft_size'] // 2 + 1))\n", (2711, 2757), True, 'import numpy as np\n'), ((3727, 3757), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope_name'], {}), '(scope_name)\n', (3745, 3757), True, 'import nnabla as nn\n'), ((4154, 4195), 'math.ceil', 'math.ceil', (['(comp_rate * num_input_features)'], {}), '(comp_rate * num_input_features)\n', (4163, 4195), False, 'import math\n'), ((4210, 4240), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""upsample"""'], {}), "('upsample')\n", (4228, 4240), True, 'import nnabla as nn\n'), ((4312, 4399), 'nnabla.parametric_functions.deconvolution', 'PF.deconvolution', (['out', 'num_filters'], {'kernel': '(2, 2)', 'stride': '(2, 2)', 'name': '"""transconv"""'}), "(out, num_filters, kernel=(2, 2), stride=(2, 2), name=\n 'transconv')\n", (4328, 4399), True, 'import nnabla.parametric_functions as PF\n'), ((5215, 5242), 'nnabla.functions.concatenate', 'F.concatenate', (['*lst'], {'axis': '(1)'}), '(*lst, axis=1)\n', (5228, 5242), True, 'import nnabla.functions as F\n'), ((5588, 5658), 'sys.stderr.write', 'sys.stderr.write', (['"""length of ks, n_layers and comp_rates must be odd."""'], {}), "('length of ks, n_layers and comp_rates must be odd.')\n", (5604, 5658), False, 'import sys\n'), ((5790, 5829), 'nnabla.parameter_scope', 'nn.parameter_scope', (["(name + '/ds_layers')"], {}), "(name + '/ds_layers')\n", (5808, 5829), True, 'import nnabla as nn\n'), ((6655, 6701), 'nnabla.parameter_scope', 'nn.parameter_scope', (["(name + '/bottleneck_block')"], {}), "(name + '/bottleneck_block')\n", (6673, 6701), True, 'import nnabla as nn\n'), ((7006, 7045), 'nnabla.parameter_scope', 'nn.parameter_scope', (["(name + '/us_layers')"], {}), "(name + '/us_layers')\n", (7024, 7045), True, 'import nnabla as nn\n'), ((10527, 10557), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""out_gate"""'], {}), "('out_gate')\n", (10545, 10557), True, 'import nnabla as nn\n'), ((3361, 3391), 'nnabla.initializer.ConstantInitializer', 'I.ConstantInitializer', ([], {'value': '(1)'}), '(value=1)\n', (3382, 3391), True, 'import nnabla.initializer as I\n'), ((3529, 3559), 'nnabla.initializer.ConstantInitializer', 'I.ConstantInitializer', ([], {'value': '(1)'}), '(value=1)\n', (3550, 3559), True, 'import nnabla.initializer as I\n'), ((3776, 3811), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""initial_layer"""'], {}), "('initial_layer')\n", (3794, 3811), True, 'import nnabla as nn\n'), ((6336, 6388), 'nnabla.functions.average_pooling', 'F.average_pooling', (['out'], {'kernel': '(2, 2)', 'stride': '(2, 2)'}), '(out, kernel=(2, 2), stride=(2, 2))\n', (6353, 6388), True, 'import nnabla.functions as F\n'), ((6099, 6150), 'nnabla.parameter_scope', 'nn.parameter_scope', (["('dense_block%s' % dense_blk_cnt)"], {}), "('dense_block%s' % dense_blk_cnt)\n", (6117, 6150), True, 'import nnabla as nn\n'), ((7205, 7241), 'nnabla.parameter_scope', 'nn.parameter_scope', (["('upsample%s' % i)"], {}), "('upsample%s' % i)\n", (7223, 7241), True, 'import nnabla as nn\n'), ((7340, 7375), 'nnabla.functions.concatenate', 'F.concatenate', (['out', 'ds[cnt]'], {'axis': '(1)'}), '(out, ds[cnt], axis=1)\n', (7353, 7375), True, 'import nnabla.functions as F\n'), ((7575, 7626), 'nnabla.parameter_scope', 'nn.parameter_scope', (["('dense_block%s' % dense_blk_cnt)"], {}), "('dense_block%s' % dense_blk_cnt)\n", (7593, 7626), True, 'import nnabla as nn\n'), ((6456, 6484), 'math.ceil', 'math.ceil', (['(comp * n_channels)'], {}), '(comp * n_channels)\n', (6465, 6484), False, 'import math\n'), ((7472, 7500), 'math.ceil', 'math.ceil', (['(comp * n_channels)'], {}), '(comp * n_channels)\n', (7481, 7500), False, 'import math\n')] |
import tempfile
import numpy as np
import pydicom
import pytest
from pydicom_seg import __version__
from pydicom_seg.dicom_utils import DimensionOrganizationSequence
from pydicom_seg.segmentation_dataset import (
SegmentationDataset,
SegmentationFractionalType,
SegmentationType
)
class TestSegmentationDataset:
def setup(self):
self.dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.BINARY
)
self.setup_dummy_segment(self.dataset)
def setup_dummy_segment(self, dataset: pydicom.Dataset):
ds = pydicom.Dataset()
ds.SegmentNumber = 1
dataset.SegmentSequence.append(ds)
def generate_dummy_source_image(self):
ds = pydicom.Dataset()
ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
ds.SOPInstanceUID = pydicom.uid.generate_uid()
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
return ds
def test_dataset_is_writable(self):
with tempfile.NamedTemporaryFile() as ofile:
self.dataset.save_as(ofile.name)
def test_dataset_has_valid_file_meta(self):
pydicom.dataset.validate_file_meta(self.dataset.file_meta)
def test_mandatory_sop_common(self):
assert self.dataset.SOPClassUID == '1.2.840.10008.5.1.4.1.1.66.4'
assert 'SOPInstanceUID' in self.dataset
def test_mandatory_enhanced_equipment_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.5.2.html#table_C.7-8b"""
assert self.dataset.Manufacturer == 'pydicom-seg'
assert self.dataset.ManufacturerModelName == 'git@github.com/razorx89/pydicom-seg.git'
assert self.dataset.DeviceSerialNumber == '0'
assert self.dataset.SoftwareVersions == __version__
def test_mandatory_frame_of_reference_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.4.html#table_C.7-6"""
assert 'FrameOfReferenceUID' in self.dataset
def test_mandatory_gernal_series_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.3.html#table_C.7-5a"""
assert self.dataset.Modality == 'SEG'
assert 'SeriesInstanceUID' in self.dataset
def test_mandatory_segmentation_series_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.html#table_C.8.20-1"""
assert self.dataset.Modality == 'SEG'
assert self.dataset.SeriesNumber
def test_mandatory_image_pixel_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.3.html#table_C.7-11a"""
assert self.dataset.SamplesPerPixel >= 1
assert self.dataset.PhotometricInterpretation in ['MONOCHROME1', 'MONOCHROME2']
assert 'Rows' in self.dataset
assert 'Columns' in self.dataset
assert self.dataset.BitsAllocated in [1, 8, 16]
assert 0 < self.dataset.BitsStored <= self.dataset.BitsAllocated
assert self.dataset.HighBit == self.dataset.BitsStored - 1
assert self.dataset.PixelRepresentation in [0, 1]
def test_mandatory_and_common_segmentation_image_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
assert 'ImageType' in self.dataset
assert all([a == b for a, b in zip(self.dataset.ImageType, ['DERIVED', 'PRIMARY'])])
assert self.dataset.InstanceNumber
assert self.dataset.ContentLabel == 'SEGMENTATION'
assert 'ContentCreatorName' in self.dataset
assert 'ContentDescription' in self.dataset
assert self.dataset.SamplesPerPixel == 1
assert self.dataset.PhotometricInterpretation == 'MONOCHROME2'
assert self.dataset.PixelRepresentation == 0
assert self.dataset.LossyImageCompression == '00'
assert 'SegmentSequence' in self.dataset
def test_mandatory_binary_segmentation_image_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
assert self.dataset.BitsAllocated == 1
assert self.dataset.BitsStored == 1
assert self.dataset.HighBit == 0
assert self.dataset.SegmentationType == 'BINARY'
@pytest.mark.parametrize('fractional_type', ['PROBABILITY', 'OCCUPANCY'])
def test_mandatory_fractional_segmentation_image_elements(self, fractional_type):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.20.2.html#table_C.8.20-2"""
dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.FRACTIONAL,
segmentation_fractional_type=SegmentationFractionalType(fractional_type)
)
assert dataset.BitsAllocated == 8
assert dataset.BitsStored == 8
assert dataset.HighBit == 7 # Little Endian
assert dataset.SegmentationType == 'FRACTIONAL'
assert dataset.SegmentationFractionalType == fractional_type
assert dataset.MaximumFractionalValue == 255
def test_mandatory_multi_frame_functional_groups_elements(self):
"""http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.16.html#table_C.7.6.16-1"""
assert 'SharedFunctionalGroupsSequence' in self.dataset
assert len(self.dataset.SharedFunctionalGroupsSequence) == 1
assert 'PerFrameFunctionalGroupsSequence' in self.dataset
assert self.dataset.NumberOfFrames == 0
assert self.dataset.InstanceNumber
assert 'ContentDate' in self.dataset
assert 'ContentTime' in self.dataset
def test_timestamps_exist(self):
assert 'InstanceCreationDate' in self.dataset
assert 'InstanceCreationTime' in self.dataset
assert self.dataset.InstanceCreationDate == self.dataset.SeriesDate
assert self.dataset.InstanceCreationTime == self.dataset.SeriesTime
assert self.dataset.InstanceCreationDate == self.dataset.ContentDate
assert self.dataset.InstanceCreationTime == self.dataset.ContentTime
def test_exception_on_invalid_image_dimensions(self):
with pytest.raises(ValueError, match='.*must be larger than zero'):
SegmentationDataset(
rows=0,
columns=0,
segmentation_type=SegmentationType.BINARY
)
@pytest.mark.parametrize('max_fractional_value', [-1, 0, 256])
def test_exception_on_invalid_max_fractional_value(self, max_fractional_value):
with pytest.raises(ValueError, match='Invalid maximum fractional value.*'):
SegmentationDataset(
rows=1,
columns=1,
segmentation_type=SegmentationType.FRACTIONAL,
max_fractional_value=max_fractional_value,
)
def test_exception_when_adding_frame_with_wrong_rank(self):
with pytest.raises(ValueError, match='.*expecting 2D image'):
self.dataset.add_frame(np.zeros((1, 1, 1), dtype=np.uint8), 1)
def test_exception_when_adding_frame_with_wrong_shape(self):
with pytest.raises(ValueError, match='.*expecting \\d+x\\d+ images'):
self.dataset.add_frame(np.zeros((2, 1), dtype=np.uint8), 1)
@pytest.mark.parametrize('segmentation_type,dtype', [
(SegmentationType.BINARY, np.float32),
(SegmentationType.FRACTIONAL, np.uint8)
])
def test_exception_when_adding_frame_with_wrong_data_type(self, segmentation_type, dtype):
dataset = SegmentationDataset(
rows=1,
columns=1,
segmentation_type=segmentation_type
)
with pytest.raises(ValueError, match='.*requires.*?data type'):
dataset.add_frame(np.zeros((1, 1), dtype=dtype), 1)
def test_adding_frame_increases_number_of_frames(self):
old_count = self.dataset.NumberOfFrames
print(type(old_count))
self.dataset.add_frame(np.zeros((1, 1), dtype=np.uint8), 1)
assert self.dataset.NumberOfFrames == old_count + 1
def test_adding_binary_frame_modifies_pixel_data(self):
dataset = SegmentationDataset(
rows=2,
columns=2,
segmentation_type=SegmentationType.BINARY
)
self.setup_dummy_segment(dataset)
assert len(dataset.PixelData) == 0
dataset.add_frame(np.zeros((2, 2), dtype=np.uint8), 1)
assert len(dataset.PixelData) == 1
for _ in range(2):
dataset.add_frame(np.ones((2, 2), dtype=np.uint8), 1)
assert len(dataset.PixelData) == 2
def test_adding_fractional_frame_modifies_pixel_data(self):
dataset = SegmentationDataset(
rows=2,
columns=2,
segmentation_type=SegmentationType.FRACTIONAL
)
self.setup_dummy_segment(dataset)
assert len(dataset.PixelData) == 0
dataset.add_frame(np.zeros((2, 2), dtype=np.float32), 1)
assert len(dataset.PixelData) == 4
for _ in range(2):
dataset.add_frame(np.ones((2, 2), dtype=np.float32), 1)
assert len(dataset.PixelData) == 12
def test_adding_frame_with_reference_creates_referenced_series_sequence(self):
assert 'ReferencedSeriesSequence' not in self.dataset
dummy = self.generate_dummy_source_image()
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy])
assert 'ReferencedSeriesSequence' in self.dataset
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy.SeriesInstanceUID
assert 'ReferencedInstanceSequence' in series_sequence[0]
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPClassUID == dummy.SOPClassUID
assert instance_sequence[0].ReferencedSOPInstanceUID == dummy.SOPInstanceUID
def test_adding_frames_with_different_references_from_same_series(self):
dummy1 = self.generate_dummy_source_image()
dummy2 = self.generate_dummy_source_image()
dummy2.SeriesInstanceUID = dummy1.SeriesInstanceUID
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy1])
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy2])
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy1.SeriesInstanceUID
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 2
assert instance_sequence[0].ReferencedSOPInstanceUID == dummy1.SOPInstanceUID
assert instance_sequence[1].ReferencedSOPInstanceUID == dummy2.SOPInstanceUID
def test_adding_frames_with_different_references_from_different_series(self):
dummies = [self.generate_dummy_source_image() for _ in range(2)]
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummies[0]])
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummies[1]])
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 2
assert series_sequence[0].SeriesInstanceUID == dummies[0].SeriesInstanceUID
assert series_sequence[1].SeriesInstanceUID == dummies[1].SeriesInstanceUID
instance_sequence = series_sequence[0].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPInstanceUID == dummies[0].SOPInstanceUID
instance_sequence = series_sequence[1].ReferencedInstanceSequence
assert len(instance_sequence) == 1
assert instance_sequence[0].ReferencedSOPInstanceUID == dummies[1].SOPInstanceUID
def test_adding_instance_reference_multiple_times(self):
dummy = self.generate_dummy_source_image()
item_added = self.dataset.add_instance_reference(dummy)
assert item_added
item_added = self.dataset.add_instance_reference(dummy)
assert not item_added
series_sequence = self.dataset.ReferencedSeriesSequence
assert len(series_sequence) == 1
assert series_sequence[0].SeriesInstanceUID == dummy.SeriesInstanceUID
assert len(series_sequence[0].ReferencedInstanceSequence) == 1
def test_adding_frame_increases_count_of_per_functional_groups_sequence(self):
assert len(self.dataset.PerFrameFunctionalGroupsSequence) == 0
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert len(self.dataset.PerFrameFunctionalGroupsSequence) == 1
def test_adding_frame_with_reference_adds_source_image_sequence_to_per_frame_functional_group_item(self):
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert 'SourceImageSequence' not in frame_item
dummy = self.generate_dummy_source_image()
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1, [dummy])
assert 'SourceImageSequence' in frame_item
assert len(frame_item.SourceImageSequence) == 1
def test_adding_frame_adds_referenced_segment_to_per_frame_functional_group_item(self):
frame_item = self.dataset.add_frame(np.zeros((1, 1), np.uint8), 1)
assert 'SegmentIdentificationSequence' in frame_item
assert len(frame_item.SegmentIdentificationSequence) == 1
segment_id_item = frame_item.SegmentIdentificationSequence[0]
assert 'ReferencedSegmentNumber' in segment_id_item
assert segment_id_item.ReferencedSegmentNumber == 1
def test_exception_on_adding_frame_with_non_existing_segment(self):
with pytest.raises(IndexError, match='Segment not found.*'):
self.dataset.add_frame(np.zeros((1, 1), np.uint8), 2)
def test_add_dimension_organization(self):
assert 'DimensionOrganizationSequence' not in self.dataset
assert 'DimensionIndexSequence' not in self.dataset
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
assert len(self.dataset.DimensionOrganizationSequence) == 1
assert len(self.dataset.DimensionIndexSequence) == 2
assert self.dataset.DimensionIndexSequence[0].DimensionDescriptionLabel == 'ReferencedSegmentNumber'
assert self.dataset.DimensionIndexSequence[1].DimensionDescriptionLabel == 'ImagePositionPatient'
def test_add_dimension_organization_duplicate(self):
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
with pytest.raises(ValueError, match='Dimension organization with UID.*'):
self.dataset.add_dimension_organization(seq)
def test_add_multiple_dimension_organizations(self):
for _ in range(2):
seq = DimensionOrganizationSequence()
seq.add_dimension('ReferencedSegmentNumber', 'SegmentIdentificationSequence')
seq.add_dimension('ImagePositionPatient', 'PlanePositionSequence')
self.dataset.add_dimension_organization(seq)
assert len(self.dataset.DimensionOrganizationSequence) == 2
assert len(self.dataset.DimensionIndexSequence) == 4
| [
"pydicom_seg.segmentation_dataset.SegmentationFractionalType",
"numpy.ones",
"pydicom_seg.dicom_utils.DimensionOrganizationSequence",
"pydicom_seg.segmentation_dataset.SegmentationDataset",
"pydicom.uid.generate_uid",
"pytest.mark.parametrize",
"numpy.zeros",
"pytest.raises",
"pydicom.Dataset",
"t... | [((4391, 4463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fractional_type"""', "['PROBABILITY', 'OCCUPANCY']"], {}), "('fractional_type', ['PROBABILITY', 'OCCUPANCY'])\n", (4414, 4463), False, 'import pytest\n'), ((6523, 6584), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_fractional_value"""', '[-1, 0, 256]'], {}), "('max_fractional_value', [-1, 0, 256])\n", (6546, 6584), False, 'import pytest\n'), ((7409, 7546), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""segmentation_type,dtype"""', '[(SegmentationType.BINARY, np.float32), (SegmentationType.FRACTIONAL, np.uint8)\n ]'], {}), "('segmentation_type,dtype', [(SegmentationType.\n BINARY, np.float32), (SegmentationType.FRACTIONAL, np.uint8)])\n", (7432, 7546), False, 'import pytest\n'), ((372, 458), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(1)', 'columns': '(1)', 'segmentation_type': 'SegmentationType.BINARY'}), '(rows=1, columns=1, segmentation_type=SegmentationType.\n BINARY)\n', (391, 458), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((622, 639), 'pydicom.Dataset', 'pydicom.Dataset', ([], {}), '()\n', (637, 639), False, 'import pydicom\n'), ((769, 786), 'pydicom.Dataset', 'pydicom.Dataset', ([], {}), '()\n', (784, 786), False, 'import pydicom\n'), ((888, 914), 'pydicom.uid.generate_uid', 'pydicom.uid.generate_uid', ([], {}), '()\n', (912, 914), False, 'import pydicom\n'), ((946, 972), 'pydicom.uid.generate_uid', 'pydicom.uid.generate_uid', ([], {}), '()\n', (970, 972), False, 'import pydicom\n'), ((1187, 1245), 'pydicom.dataset.validate_file_meta', 'pydicom.dataset.validate_file_meta', (['self.dataset.file_meta'], {}), '(self.dataset.file_meta)\n', (1221, 1245), False, 'import pydicom\n'), ((7677, 7752), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(1)', 'columns': '(1)', 'segmentation_type': 'segmentation_type'}), '(rows=1, columns=1, segmentation_type=segmentation_type)\n', (7696, 7752), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((8282, 8368), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(2)', 'columns': '(2)', 'segmentation_type': 'SegmentationType.BINARY'}), '(rows=2, columns=2, segmentation_type=SegmentationType.\n BINARY)\n', (8301, 8368), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((8826, 8916), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(2)', 'columns': '(2)', 'segmentation_type': 'SegmentationType.FRACTIONAL'}), '(rows=2, columns=2, segmentation_type=SegmentationType.\n FRACTIONAL)\n', (8845, 8916), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((14234, 14265), 'pydicom_seg.dicom_utils.DimensionOrganizationSequence', 'DimensionOrganizationSequence', ([], {}), '()\n', (14263, 14265), False, 'from pydicom_seg.dicom_utils import DimensionOrganizationSequence\n'), ((14897, 14928), 'pydicom_seg.dicom_utils.DimensionOrganizationSequence', 'DimensionOrganizationSequence', ([], {}), '()\n', (14926, 14928), False, 'from pydicom_seg.dicom_utils import DimensionOrganizationSequence\n'), ((1045, 1074), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1072, 1074), False, 'import tempfile\n'), ((6298, 6359), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*must be larger than zero"""'}), "(ValueError, match='.*must be larger than zero')\n", (6311, 6359), False, 'import pytest\n'), ((6373, 6459), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(0)', 'columns': '(0)', 'segmentation_type': 'SegmentationType.BINARY'}), '(rows=0, columns=0, segmentation_type=SegmentationType.\n BINARY)\n', (6392, 6459), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((6682, 6751), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid maximum fractional value.*"""'}), "(ValueError, match='Invalid maximum fractional value.*')\n", (6695, 6751), False, 'import pytest\n'), ((6765, 6898), 'pydicom_seg.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'rows': '(1)', 'columns': '(1)', 'segmentation_type': 'SegmentationType.FRACTIONAL', 'max_fractional_value': 'max_fractional_value'}), '(rows=1, columns=1, segmentation_type=SegmentationType.\n FRACTIONAL, max_fractional_value=max_fractional_value)\n', (6784, 6898), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((7055, 7110), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*expecting 2D image"""'}), "(ValueError, match='.*expecting 2D image')\n", (7068, 7110), False, 'import pytest\n'), ((7266, 7329), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*expecting \\\\d+x\\\\d+ images"""'}), "(ValueError, match='.*expecting \\\\d+x\\\\d+ images')\n", (7279, 7329), False, 'import pytest\n'), ((7812, 7869), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*requires.*?data type"""'}), "(ValueError, match='.*requires.*?data type')\n", (7825, 7869), False, 'import pytest\n'), ((8106, 8138), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {'dtype': 'np.uint8'}), '((1, 1), dtype=np.uint8)\n', (8114, 8138), True, 'import numpy as np\n'), ((8522, 8554), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.uint8'}), '((2, 2), dtype=np.uint8)\n', (8530, 8554), True, 'import numpy as np\n'), ((9070, 9104), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (9078, 9104), True, 'import numpy as np\n'), ((9534, 9560), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (9542, 9560), True, 'import numpy as np\n'), ((10438, 10464), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (10446, 10464), True, 'import numpy as np\n'), ((10510, 10536), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (10518, 10536), True, 'import numpy as np\n'), ((11214, 11240), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (11222, 11240), True, 'import numpy as np\n'), ((11290, 11316), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (11298, 11316), True, 'import numpy as np\n'), ((12764, 12790), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (12772, 12790), True, 'import numpy as np\n'), ((13021, 13047), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (13029, 13047), True, 'import numpy as np\n'), ((13204, 13230), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (13212, 13230), True, 'import numpy as np\n'), ((13488, 13514), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (13496, 13514), True, 'import numpy as np\n'), ((13922, 13976), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Segment not found.*"""'}), "(IndexError, match='Segment not found.*')\n", (13935, 13976), False, 'import pytest\n'), ((15156, 15224), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Dimension organization with UID.*"""'}), "(ValueError, match='Dimension organization with UID.*')\n", (15169, 15224), False, 'import pytest\n'), ((15386, 15417), 'pydicom_seg.dicom_utils.DimensionOrganizationSequence', 'DimensionOrganizationSequence', ([], {}), '()\n', (15415, 15417), False, 'from pydicom_seg.dicom_utils import DimensionOrganizationSequence\n'), ((4844, 4887), 'pydicom_seg.segmentation_dataset.SegmentationFractionalType', 'SegmentationFractionalType', (['fractional_type'], {}), '(fractional_type)\n', (4870, 4887), False, 'from pydicom_seg.segmentation_dataset import SegmentationDataset, SegmentationFractionalType, SegmentationType\n'), ((7147, 7182), 'numpy.zeros', 'np.zeros', (['(1, 1, 1)'], {'dtype': 'np.uint8'}), '((1, 1, 1), dtype=np.uint8)\n', (7155, 7182), True, 'import numpy as np\n'), ((7366, 7398), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {'dtype': 'np.uint8'}), '((2, 1), dtype=np.uint8)\n', (7374, 7398), True, 'import numpy as np\n'), ((7901, 7930), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {'dtype': 'dtype'}), '((1, 1), dtype=dtype)\n', (7909, 7930), True, 'import numpy as np\n'), ((8660, 8691), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.uint8'}), '((2, 2), dtype=np.uint8)\n', (8667, 8691), True, 'import numpy as np\n'), ((9210, 9243), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (9217, 9243), True, 'import numpy as np\n'), ((14013, 14039), 'numpy.zeros', 'np.zeros', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (14021, 14039), True, 'import numpy as np\n')] |
""" This module provides the functionality to calculate ephemeris for two bodies problem
also in the case of perturbed methods. More advance pertubed methods will be handled
in other module
"""
# Standard library imports
import logging
from math import isclose
from typing import ForwardRef
# Third party imports
import pandas as pd
import numpy as np
from numpy.linalg import norm
from toolz import pipe
# Local application imports
from myorbit.util.general import my_range, NoConvergenceError, my_isclose
import myorbit.data_catalog as dc
from myorbit.util.timeut import mjd2str_date
from myorbit.planets import g_xyz_equat_sun_j2000
from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver
from myorbit.kepler.ellipitical import calc_rv_for_elliptic_orbit, calc_M
from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0
from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors
from myorbit.pert_cowels import calc_eph_by_cowells
from myorbit.two_body import calc_eph_planet
from myorbit.util.timeut import EQX_B1950, EQX_J2000
from myorbit.ephemeris_input import EphemrisInput
from myorbit.pert_enckes import calc_eph_by_enckes
from myorbit.two_body import calc_eph_twobody
from myorbit.util.constants import *
logger = logging.getLogger(__name__)
def calc_tp(M0, a, epoch):
deltaT = TWOPI*np.sqrt(pow(a,3)/GM)*(1-M0/TWOPI)
return deltaT + epoch
def calc_comets_that_no_converge(delta_days):
"""The orbit of all comets is studied around the perihelion [-days, +days]
Parameters
----------
delta_days : int
[description]
"""
df = dc.DF_COMETS
not_converged=[]
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd, M_at_epoch=M_at_epoch)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
hs = []
es = []
for dt in range(2,delta_days*2,2):
clock_mjd = T0_MJD + dt
try :
r_xyz, rdot_xyz, h_xyz, f = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
hs.append(np.linalg.norm(h_xyz))
es.append(np.linalg.norm(calc_eccentricity_vector(r_xyz, rdot_xyz,h_xyz)))
except NoConvergenceError :
print (f"===== Object {name} doest not converged at {clock_mjd} MJD")
not_converged.append(name)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
if not all(isclose(ec, es[0], abs_tol=1e-12) for ec in es):
msg = f'The eccentric vector is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_all_bodies(delta_days):
df = dc.DF_BODIES
not_converged=[]
for idx, name in enumerate(df['Name']):
body = dc.read_body_elms_for(name,df)
msg = f'Testing Object: {body.name}'
solver = KeplerianStateSolver.make(e=body.e, a=body.a, epoch=body.epoch_mjd, M_at_epoch=body.M0)
tp = calc_tp(body.M0, body.a, body.epoch_mjd)
hs = []
try :
for clock_mjd in my_range(tp-delta_days, tp+delta_days, 2):
r_xyz, rdot_xyz, r, h = solver.calc_rv(clock_mjd)
hs.append(h)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
except NoConvergenceError :
print (f"===========> NOT converged for object {name}")
not_converged.append(name)
if idx % 1000 == 0 :
print (f"================================================>> {idx}")
print (not_converged)
def test_almost_parabolical(delta_days):
df = dc.DF_COMETS
not_converged=[]
names = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#names = ['C/2020 U5 (PANSTARRS)']
df = df[df.Name.isin(names)]
for idx, name in enumerate(df['Name']):
if name not in names :
continue
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name} with Tp:{mjd2str_date(obj.tp_mjd)}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
#solver = ParabolicalStateSolver(obj.tp_mjd, obj.q, obj.e)
solver = EllipticalStateSolver(q=obj.q, a=obj.a, e=obj.e, tp_mjd=obj.tp_mjd, epoch_mjd=obj.epoch_mjd)
hs = []
for clock_mjd in my_range(obj.tp_mjd-delta_days, obj.tp_mjd+delta_days, 2):
r_xyz, rdot_xyz, r, h_xyz, *others = solver.calc_rv(clock_mjd)
hs.append(h_xyz)
print(mjd2str_date(clock_mjd))
if not all(np.allclose(h_xyz, hs[0], atol=1e-12) for h_xyz in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_comets_convergence(delta_days=50):
df = dc.DF_COMETS
#FILTERED_OBJS = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#FILTERED_OBJS=['C/1827 P1 (Pons)']
FILTERED_OBJS=[]
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
result = []
df = df.sort_values('e', ascending=False)
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
kep_nc = uni_nc = 0
#print (f"Object {name} with e={obj.e}")
for dt in range(2,delta_days*2,2):
r1_xyz = rdot1_xyz = f1 = None
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
except NoConvergenceError :
kep_nc += 1
r2_xyz = rdot2_xyz = f2 = None
try :
r2_xyz, rdot2_xyz, h_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
except NoConvergenceError :
uni_nc += 1
print (f"The noconvergence was with e: {obj.e}")
if (kep_nc >0) or (uni_nc > 0) :
row = {}
row['name'] = name
row['e'] = obj.e
row['kep_nc'] = kep_nc
row['uni_nc'] = uni_nc
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('convergence_problems.csv',index=False,header=True)
else :
print ("Undetected no-convergences")
def test_universal_kepler(delta_days=50):
df = dc.DF_COMETS
FILTERED_OBJS=[]
#FILTERED_OBJS=['C/1933 D1 (Peltier)','C/1989 R1 (Helin-Roman)','C/2007 M5 (SOHO)','C/1988 M1 (SMM)','C/2008 C5 (SOHO)']
#FILTERED_OBJS=['C/2007 M5 (SOHO)']
# C/2000 O1 (Koehn)
# This one has high nonconverence with 500 C/2000 O1 (Koehn)
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
df = df.sort_values('e', ascending=False)
result = []
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
#print (name)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
r_failed = v_failed = f_failed = nc_failed= 0
for dt in range(2,delta_days*2,2):
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
e_xyz = calc_eccentricity_vector(r1_xyz, rdot1_xyz, h1_xyz)
f3 = angle_between_vectors(e_xyz, r1_xyz)
if not isclose(f1,f2,rel_tol=0, abs_tol=1e-03):
f_failed += 1
msg=f"name: {obj.name}, TWOPI - f univ: {TWOPI-f2} f Universal: {f2} f Kepler: {f1} e:{obj.e} f Excentricity: {f3} f Excentricity: {TWOPI-f3}"
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-03):
msg = f"name: {obj.name}, e: {obj.e}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
logger.error(msg)
r_failed += 1
if not my_isclose (rdot1_xyz, rdot2_xyz, abs_tol=1e-03) :
v_failed += 1
except NoConvergenceError :
nc_failed += 1
if (f_failed >0) or (r_failed > 0) or (v_failed > 0) or (nc_failed > 0):
row = {}
row['name'] = name
row['e'] = obj.e
row['f_failed'] = f_failed
row['r_failed'] = r_failed
row['v_failed'] = v_failed
row['nc_failed'] = nc_failed
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
#df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('kepler_universal.csv',index=False,header=True)
print (df_out)
else :
print ("No problems detected")
def test_enckes():
obj= dc.C_2003_M3_SOHO
eph = EphemrisInput(from_date="2001.03.01.0",
to_date = "2005.08.31.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
dfc = calc_eph_by_enckes(obj, eph)
def test_comet(name, delta_days=50):
obj = dc.read_comet_elms_for(name,dc.DF_COMETS)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
#print (f"Time interval considered: from:{mjd2str_date(T0_MJD-delta_days)} to {mjd2str_date(T0_MJD+delta_days)}")
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
max_diff_r = 0
for dt in range(2,delta_days*2,2):
try :
print (f"{mjd2str_date(T0_MJD+dt)}")
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
if not isclose(f1,f2, rel_tol=0, abs_tol=1e-03):
msg=f"{mjd2str_date(T0_MJD+dt)} f Uni:{f2} f Kepler:{f1} TWOPI-f:{TWOPI-f1}"
print (msg)
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-07):
diff_rxyz = np.linalg.norm(r1_xyz- r2_xyz)
if diff_rxyz > max_diff_r :
max_diff_r = diff_rxyz
print (f"Maximun distance at time:{mjd2str_date(T0_MJD+dt)}")
msg = f"{mjd2str_date(T0_MJD+dt)}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
print (msg)
logger.error(msg)
except NoConvergenceError :
nc_failed += 1
def test_near_parabollic():
obj=dc.C_2007_M5_SOHO
eph = EphemrisInput(from_date="2007.06.15.0",
to_date = "2007.07.15.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
df = calc_eph_twobody(obj, eph, force_orbit='near_parabolical')
#df = calc_eph_twobody(obj, eph)
print (df)
def change_reference_frame(heliocentric_orbs, name):
orbs_from_obj = dict()
# A new orbs object is created changing the frame of reference to the object (name of the object)
# The object should be included in the helliocentric_orbs
for body_name in filter(lambda x : x.lower()!=name.lower(), heliocentric_orbs.keys()):
orbs_from_obj[body_name] = heliocentric_orbs[body_name] - heliocentric_orbs[name]
return orbs_from_obj
PLANET_NAMES= [x.lower() for x in GM_by_planet.keys()]
def calc_orbits_heliocentric_data(eph, obj_names):
"""
Computes the orbits of the planets, minor bodys and comets
Args:
eph : EphemerisData
planets : List of name of planets
minor_bodys : List of names of minor bodys or orbital elements itself
comets : List of names of comets bodys or orbital elements itself
Returns :
orbs : A dictionary where the key is the name of the body and value is a
matrix of n,3 (n rows per 3 cols) with the heliocentric coordinates h_x, h_y, h_z
and the index is the date of corresponding to the position.
date_refs : list of the dates where the heliocentric coordinates were calculated
"""
# orbs is a dictionary where the key is the name of the object (planet, asteroids or comet)
# and the value is the dataframe with the ephemeris data.
orbs = {}
dfs = []
for name in obj_names:
if not isinstance(name, str):
# Assumed that this is a BodyElms or CometElms
obj = name
df = calc_eph_by_cowells(obj,eph, include_osc=False)
orbs[obj.name] = df
dfs.append(df)
continue
if name.lower() in PLANET_NAMES:
df = calc_eph_planet(name, eph)
orbs[name] = df
dfs.append(df)
else :
obj = dc.read_comet_elms_for(name,dc.DF_COMETS)
if obj is not None:
df = calc_eph_by_cowells(obj,eph, include_osc=False)
orbs[name] = df
dfs.append(df)
else :
obj = dc.read_body_elms_for(name,dc.DF_BODIES)
if obj is not None:
df = calc_eph_by_cowells(obj,eph, include_osc=False)
orbs[name] = df
dfs.append(df)
else :
print (f"Object {name} not found")
# Assumed that the ['date'] colum of each ephemeris are the same for every object so
# we get the list of dates from the first object.
first_key= list(orbs.keys())[0]
date_refs = orbs[first_key]['date'].to_list()
cols=['h_x','h_y','h_z']
for k, df in orbs.items():
# For each object, only the ecliptic (heliocentric) coordinates are kept and
# transformed to a matrix with shape (len(date_refs), 3)
# [[x1,y1,z1],
# [x2,y2,z2],
# ....
# [xn,yn,zn]]
# for each key in the obr object, the value will be a nx3 matrix with the heliocentric coordinates
orbs[k] = df[cols].to_numpy()
return orbs, dfs, date_refs
def calc_dangerous_asteroids(eph, n_objects=10):
fname='/home/benito/PERSONAL/dangerous.csv'
print (eph)
df_out = pd.read_csv(fname,sep='|',names=['name', 'min_date','min_distance'])
prev_len = len(dc.DF_BODIES)
df = dc.DF_BODIES[~dc.DF_BODIES.Name.isin(df_out.name.values)]
print (f"Filtered out {prev_len-len(df)} bodies")
print ("Calculating Earth orbit data")
orb_earth_from_Sun, *others = calc_orbits_heliocentric_data(eph, ['Earth'])
with open(fname, 'at') as f:
for idx, name in enumerate(df['Name']):
body = dc.read_body_elms_for(name,df)
print (f"Processing {name}, Processed:{idx+1}, Remaining:{len(df)-idx}")
orb_obj_from_Sun, _, date_refs = calc_orbits_heliocentric_data(eph, [name])
orb_obj_from_Earth = orb_obj_from_Sun[name] - orb_earth_from_Sun['Earth']
distances_from_Earth = np.linalg.norm(orb_obj_from_Earth,axis=1)
min_index = np.argmin(distances_from_Earth, axis=0)
min_distance = distances_from_Earth[min_index]
min_date = date_refs[min_index]
f.write(f"{body.name}|{min_date}|{min_distance}\n")
f.flush()
if (idx > n_objects):
break
if __name__ == "__main__":
#test_all_comets()
#test_all_bodies()
#test_almost_parabolical(50)
#test_universal()
#calc_comets_that_no_converge(20)
#import logging.config
#logging.config.fileConfig(CONFIG_INI, disable_existing_loggers=False)
#test_comets_convergence(5000)
#test_universal_kepler(5000)
#test_comet('C/2007 M5 (SOHO)',2500)
#test_enckes()
#test_near_parabollic()
eph = EphemrisInput(from_date="2021.01.01.0",
to_date = "2060.12.01.0",
step_dd_hh_hhh = "05 00.0",
equinox_name = "J2000")
calc_dangerous_asteroids(eph,n_objects=3000000)
| [
"logging.getLogger",
"pandas.read_csv",
"myorbit.data_catalog.read_comet_elms_for",
"numpy.linalg.norm",
"myorbit.data_catalog.read_body_elms_for",
"myorbit.util.general.my_isclose",
"myorbit.two_body.calc_eph_twobody",
"myorbit.data_catalog.DF_BODIES.Name.isin",
"myorbit.kepler.keplerian.KeplerianS... | [((1324, 1351), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1341, 1351), False, 'import logging\n'), ((8068, 8088), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (8080, 8088), True, 'import pandas as pd\n'), ((10816, 10836), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (10828, 10836), True, 'import pandas as pd\n'), ((11215, 11332), 'myorbit.ephemeris_input.EphemrisInput', 'EphemrisInput', ([], {'from_date': '"""2001.03.01.0"""', 'to_date': '"""2005.08.31.0"""', 'step_dd_hh_hhh': '"""02 00.0"""', 'equinox_name': 'EQX_J2000'}), "(from_date='2001.03.01.0', to_date='2005.08.31.0',\n step_dd_hh_hhh='02 00.0', equinox_name=EQX_J2000)\n", (11228, 11332), False, 'from myorbit.ephemeris_input import EphemrisInput\n'), ((11418, 11446), 'myorbit.pert_enckes.calc_eph_by_enckes', 'calc_eph_by_enckes', (['obj', 'eph'], {}), '(obj, eph)\n', (11436, 11446), False, 'from myorbit.pert_enckes import calc_eph_by_enckes\n'), ((11504, 11546), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'dc.DF_COMETS'], {}), '(name, dc.DF_COMETS)\n', (11526, 11546), True, 'import myorbit.data_catalog as dc\n'), ((11559, 11655), 'myorbit.kepler.keplerian.KeplerianStateSolver.make', 'KeplerianStateSolver.make', ([], {'e': 'obj.e', 'a': 'obj.a', 'tp_mjd': 'obj.tp_mjd', 'q': 'obj.q', 'epoch': 'obj.epoch_mjd'}), '(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q,\n epoch=obj.epoch_mjd)\n', (11584, 11655), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((13023, 13140), 'myorbit.ephemeris_input.EphemrisInput', 'EphemrisInput', ([], {'from_date': '"""2007.06.15.0"""', 'to_date': '"""2007.07.15.0"""', 'step_dd_hh_hhh': '"""02 00.0"""', 'equinox_name': 'EQX_J2000'}), "(from_date='2007.06.15.0', to_date='2007.07.15.0',\n step_dd_hh_hhh='02 00.0', equinox_name=EQX_J2000)\n", (13036, 13140), False, 'from myorbit.ephemeris_input import EphemrisInput\n'), ((13225, 13283), 'myorbit.two_body.calc_eph_twobody', 'calc_eph_twobody', (['obj', 'eph'], {'force_orbit': '"""near_parabolical"""'}), "(obj, eph, force_orbit='near_parabolical')\n", (13241, 13283), False, 'from myorbit.two_body import calc_eph_twobody\n'), ((16690, 16761), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""|"""', 'names': "['name', 'min_date', 'min_distance']"}), "(fname, sep='|', names=['name', 'min_date', 'min_distance'])\n", (16701, 16761), True, 'import pandas as pd\n'), ((18268, 18383), 'myorbit.ephemeris_input.EphemrisInput', 'EphemrisInput', ([], {'from_date': '"""2021.01.01.0"""', 'to_date': '"""2060.12.01.0"""', 'step_dd_hh_hhh': '"""05 00.0"""', 'equinox_name': '"""J2000"""'}), "(from_date='2021.01.01.0', to_date='2060.12.01.0',\n step_dd_hh_hhh='05 00.0', equinox_name='J2000')\n", (18281, 18383), False, 'from myorbit.ephemeris_input import EphemrisInput\n'), ((1770, 1802), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'df'], {}), '(name, df)\n', (1792, 1802), True, 'import myorbit.data_catalog as dc\n'), ((2109, 2228), 'myorbit.kepler.keplerian.KeplerianStateSolver.make', 'KeplerianStateSolver.make', ([], {'e': 'obj.e', 'a': 'obj.a', 'tp_mjd': 'obj.tp_mjd', 'q': 'obj.q', 'epoch': 'obj.epoch_mjd', 'M_at_epoch': 'M_at_epoch'}), '(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q,\n epoch=obj.epoch_mjd, M_at_epoch=M_at_epoch)\n', (2134, 2228), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((3457, 3488), 'myorbit.data_catalog.read_body_elms_for', 'dc.read_body_elms_for', (['name', 'df'], {}), '(name, df)\n', (3478, 3488), True, 'import myorbit.data_catalog as dc\n'), ((3550, 3641), 'myorbit.kepler.keplerian.KeplerianStateSolver.make', 'KeplerianStateSolver.make', ([], {'e': 'body.e', 'a': 'body.a', 'epoch': 'body.epoch_mjd', 'M_at_epoch': 'body.M0'}), '(e=body.e, a=body.a, epoch=body.epoch_mjd,\n M_at_epoch=body.M0)\n', (3575, 3641), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((5101, 5133), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'df'], {}), '(name, df)\n', (5123, 5133), True, 'import myorbit.data_catalog as dc\n'), ((5542, 5638), 'myorbit.kepler.keplerian.EllipticalStateSolver', 'EllipticalStateSolver', ([], {'q': 'obj.q', 'a': 'obj.a', 'e': 'obj.e', 'tp_mjd': 'obj.tp_mjd', 'epoch_mjd': 'obj.epoch_mjd'}), '(q=obj.q, a=obj.a, e=obj.e, tp_mjd=obj.tp_mjd,\n epoch_mjd=obj.epoch_mjd)\n', (5563, 5638), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((5676, 5737), 'myorbit.util.general.my_range', 'my_range', (['(obj.tp_mjd - delta_days)', '(obj.tp_mjd + delta_days)', '(2)'], {}), '(obj.tp_mjd - delta_days, obj.tp_mjd + delta_days, 2)\n', (5684, 5737), False, 'from myorbit.util.general import my_range, NoConvergenceError, my_isclose\n'), ((6910, 6942), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'df'], {}), '(name, df)\n', (6932, 6942), True, 'import myorbit.data_catalog as dc\n'), ((6959, 7055), 'myorbit.kepler.keplerian.KeplerianStateSolver.make', 'KeplerianStateSolver.make', ([], {'e': 'obj.e', 'a': 'obj.a', 'tp_mjd': 'obj.tp_mjd', 'q': 'obj.q', 'epoch': 'obj.epoch_mjd'}), '(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q,\n epoch=obj.epoch_mjd)\n', (6984, 7055), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((8942, 8974), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'df'], {}), '(name, df)\n', (8964, 8974), True, 'import myorbit.data_catalog as dc\n'), ((9013, 9109), 'myorbit.kepler.keplerian.KeplerianStateSolver.make', 'KeplerianStateSolver.make', ([], {'e': 'obj.e', 'a': 'obj.a', 'tp_mjd': 'obj.tp_mjd', 'q': 'obj.q', 'epoch': 'obj.epoch_mjd'}), '(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q,\n epoch=obj.epoch_mjd)\n', (9038, 9109), False, 'from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver\n'), ((3756, 3801), 'myorbit.util.general.my_range', 'my_range', (['(tp - delta_days)', '(tp + delta_days)', '(2)'], {}), '(tp - delta_days, tp + delta_days, 2)\n', (3764, 3801), False, 'from myorbit.util.general import my_range, NoConvergenceError, my_isclose\n'), ((12121, 12173), 'myorbit.lagrange.lagrange_coeff.calc_rv_from_r0v0', 'calc_rv_from_r0v0', (['mu_Sun', 'r0_xyz', 'rdot0_xyz', 'dt', 'f0'], {}), '(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)\n', (12138, 12173), False, 'from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0\n'), ((14962, 15010), 'myorbit.pert_cowels.calc_eph_by_cowells', 'calc_eph_by_cowells', (['obj', 'eph'], {'include_osc': '(False)'}), '(obj, eph, include_osc=False)\n', (14981, 15010), False, 'from myorbit.pert_cowels import calc_eph_by_cowells\n'), ((15160, 15186), 'myorbit.two_body.calc_eph_planet', 'calc_eph_planet', (['name', 'eph'], {}), '(name, eph)\n', (15175, 15186), False, 'from myorbit.two_body import calc_eph_planet\n'), ((15275, 15317), 'myorbit.data_catalog.read_comet_elms_for', 'dc.read_comet_elms_for', (['name', 'dc.DF_COMETS'], {}), '(name, dc.DF_COMETS)\n', (15297, 15317), True, 'import myorbit.data_catalog as dc\n'), ((16815, 16857), 'myorbit.data_catalog.DF_BODIES.Name.isin', 'dc.DF_BODIES.Name.isin', (['df_out.name.values'], {}), '(df_out.name.values)\n', (16837, 16857), True, 'import myorbit.data_catalog as dc\n'), ((17137, 17168), 'myorbit.data_catalog.read_body_elms_for', 'dc.read_body_elms_for', (['name', 'df'], {}), '(name, df)\n', (17158, 17168), True, 'import myorbit.data_catalog as dc\n'), ((17475, 17517), 'numpy.linalg.norm', 'np.linalg.norm', (['orb_obj_from_Earth'], {'axis': '(1)'}), '(orb_obj_from_Earth, axis=1)\n', (17489, 17517), True, 'import numpy as np\n'), ((17541, 17580), 'numpy.argmin', 'np.argmin', (['distances_from_Earth'], {'axis': '(0)'}), '(distances_from_Earth, axis=0)\n', (17550, 17580), True, 'import numpy as np\n'), ((2513, 2565), 'myorbit.lagrange.lagrange_coeff.calc_rv_from_r0v0', 'calc_rv_from_r0v0', (['mu_Sun', 'r0_xyz', 'rdot0_xyz', 'dt', 'f0'], {}), '(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)\n', (2530, 2565), False, 'from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0\n'), ((5185, 5209), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['obj.tp_mjd'], {}), '(obj.tp_mjd)\n', (5197, 5209), False, 'from myorbit.util.timeut import mjd2str_date\n'), ((5872, 5895), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['clock_mjd'], {}), '(clock_mjd)\n', (5884, 5895), False, 'from myorbit.util.timeut import mjd2str_date\n'), ((7633, 7685), 'myorbit.lagrange.lagrange_coeff.calc_rv_from_r0v0', 'calc_rv_from_r0v0', (['mu_Sun', 'r0_xyz', 'rdot0_xyz', 'dt', 'f0'], {}), '(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)\n', (7650, 7685), False, 'from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0\n'), ((9475, 9527), 'myorbit.lagrange.lagrange_coeff.calc_rv_from_r0v0', 'calc_rv_from_r0v0', (['mu_Sun', 'r0_xyz', 'rdot0_xyz', 'dt', 'f0'], {}), '(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)\n', (9492, 9527), False, 'from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0\n'), ((9552, 9603), 'myorbit.util.general.calc_eccentricity_vector', 'calc_eccentricity_vector', (['r1_xyz', 'rdot1_xyz', 'h1_xyz'], {}), '(r1_xyz, rdot1_xyz, h1_xyz)\n', (9576, 9603), False, 'from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors\n'), ((9625, 9661), 'myorbit.util.general.angle_between_vectors', 'angle_between_vectors', (['e_xyz', 'r1_xyz'], {}), '(e_xyz, r1_xyz)\n', (9646, 9661), False, 'from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors\n'), ((12193, 12234), 'math.isclose', 'isclose', (['f1', 'f2'], {'rel_tol': '(0)', 'abs_tol': '(0.001)'}), '(f1, f2, rel_tol=0, abs_tol=0.001)\n', (12200, 12234), False, 'from math import isclose\n'), ((12410, 12451), 'myorbit.util.general.my_isclose', 'my_isclose', (['r1_xyz', 'r2_xyz'], {'abs_tol': '(1e-07)'}), '(r1_xyz, r2_xyz, abs_tol=1e-07)\n', (12420, 12451), False, 'from myorbit.util.general import my_range, NoConvergenceError, my_isclose\n'), ((12481, 12512), 'numpy.linalg.norm', 'np.linalg.norm', (['(r1_xyz - r2_xyz)'], {}), '(r1_xyz - r2_xyz)\n', (12495, 12512), True, 'import numpy as np\n'), ((15379, 15427), 'myorbit.pert_cowels.calc_eph_by_cowells', 'calc_eph_by_cowells', (['obj', 'eph'], {'include_osc': '(False)'}), '(obj, eph, include_osc=False)\n', (15398, 15427), False, 'from myorbit.pert_cowels import calc_eph_by_cowells\n'), ((15531, 15572), 'myorbit.data_catalog.read_body_elms_for', 'dc.read_body_elms_for', (['name', 'dc.DF_BODIES'], {}), '(name, dc.DF_BODIES)\n', (15552, 15572), True, 'import myorbit.data_catalog as dc\n'), ((2592, 2613), 'numpy.linalg.norm', 'np.linalg.norm', (['h_xyz'], {}), '(h_xyz)\n', (2606, 2613), True, 'import numpy as np\n'), ((2924, 2956), 'math.isclose', 'isclose', (['h', 'hs[0]'], {'abs_tol': '(1e-12)'}), '(h, hs[0], abs_tol=1e-12)\n', (2931, 2956), False, 'from math import isclose\n'), ((3115, 3148), 'math.isclose', 'isclose', (['ec', 'es[0]'], {'abs_tol': '(1e-12)'}), '(ec, es[0], abs_tol=1e-12)\n', (3122, 3148), False, 'from math import isclose\n'), ((5921, 5958), 'numpy.allclose', 'np.allclose', (['h_xyz', 'hs[0]'], {'atol': '(1e-12)'}), '(h_xyz, hs[0], atol=1e-12)\n', (5932, 5958), True, 'import numpy as np\n'), ((9685, 9726), 'math.isclose', 'isclose', (['f1', 'f2'], {'rel_tol': '(0)', 'abs_tol': '(0.001)'}), '(f1, f2, rel_tol=0, abs_tol=0.001)\n', (9692, 9726), False, 'from math import isclose\n'), ((9988, 10029), 'myorbit.util.general.my_isclose', 'my_isclose', (['r1_xyz', 'r2_xyz'], {'abs_tol': '(0.001)'}), '(r1_xyz, r2_xyz, abs_tol=0.001)\n', (9998, 10029), False, 'from myorbit.util.general import my_range, NoConvergenceError, my_isclose\n'), ((10283, 10330), 'myorbit.util.general.my_isclose', 'my_isclose', (['rdot1_xyz', 'rdot2_xyz'], {'abs_tol': '(0.001)'}), '(rdot1_xyz, rdot2_xyz, abs_tol=0.001)\n', (10293, 10330), False, 'from myorbit.util.general import my_range, NoConvergenceError, my_isclose\n'), ((15634, 15682), 'myorbit.pert_cowels.calc_eph_by_cowells', 'calc_eph_by_cowells', (['obj', 'eph'], {'include_osc': '(False)'}), '(obj, eph, include_osc=False)\n', (15653, 15682), False, 'from myorbit.pert_cowels import calc_eph_by_cowells\n'), ((2656, 2704), 'myorbit.util.general.calc_eccentricity_vector', 'calc_eccentricity_vector', (['r_xyz', 'rdot_xyz', 'h_xyz'], {}), '(r_xyz, rdot_xyz, h_xyz)\n', (2680, 2704), False, 'from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors\n'), ((3925, 3957), 'math.isclose', 'isclose', (['h', 'hs[0]'], {'abs_tol': '(1e-12)'}), '(h, hs[0], abs_tol=1e-12)\n', (3932, 3957), False, 'from math import isclose\n'), ((11968, 11993), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['(T0_MJD + dt)'], {}), '(T0_MJD + dt)\n', (11980, 11993), False, 'from myorbit.util.timeut import mjd2str_date\n'), ((12258, 12283), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['(T0_MJD + dt)'], {}), '(T0_MJD + dt)\n', (12270, 12283), False, 'from myorbit.util.timeut import mjd2str_date\n'), ((12706, 12731), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['(T0_MJD + dt)'], {}), '(T0_MJD + dt)\n', (12718, 12731), False, 'from myorbit.util.timeut import mjd2str_date\n'), ((12744, 12775), 'numpy.linalg.norm', 'np.linalg.norm', (['(r1_xyz - r2_xyz)'], {}), '(r1_xyz - r2_xyz)\n', (12758, 12775), True, 'import numpy as np\n'), ((12792, 12829), 'numpy.linalg.norm', 'np.linalg.norm', (['(rdot1_xyz - rdot2_xyz)'], {}), '(rdot1_xyz - rdot2_xyz)\n', (12806, 12829), True, 'import numpy as np\n'), ((10101, 10132), 'numpy.linalg.norm', 'np.linalg.norm', (['(r1_xyz - r2_xyz)'], {}), '(r1_xyz - r2_xyz)\n', (10115, 10132), True, 'import numpy as np\n'), ((10149, 10186), 'numpy.linalg.norm', 'np.linalg.norm', (['(rdot1_xyz - rdot2_xyz)'], {}), '(rdot1_xyz - rdot2_xyz)\n', (10163, 10186), True, 'import numpy as np\n'), ((12654, 12679), 'myorbit.util.timeut.mjd2str_date', 'mjd2str_date', (['(T0_MJD + dt)'], {}), '(T0_MJD + dt)\n', (12666, 12679), False, 'from myorbit.util.timeut import mjd2str_date\n')] |
import pylab as plt
import numpy as np
data = np.genfromtxt('data.txt', delimiter=' ')
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
yaw = data[:, 3]
pitch = data[:, 4]
roll = data[:, 5]
X = np.arange(0, len(data))
plt.figure(0)
plt.xlabel('Time')
plt.ylabel('X')
plt.plot(X, x)
plt.figure(1)
plt.xlabel('Time')
plt.ylabel('Y')
plt.plot(X, y)
plt.figure(2)
plt.xlabel('Time')
plt.ylabel('Z')
plt.plot(X, z)
plt.figure(3)
plt.xlabel('Time')
plt.ylabel('Yaw')
plt.plot(X, yaw)
plt.figure(4)
plt.xlabel('Time')
plt.ylabel('Pitch')
plt.plot(X, pitch)
plt.figure(5)
plt.xlabel('Time')
plt.ylabel('Roll')
plt.plot(X, roll)
plt.show()
| [
"pylab.plot",
"pylab.xlabel",
"pylab.figure",
"numpy.genfromtxt",
"pylab.ylabel",
"pylab.show"
] | [((49, 89), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.txt"""'], {'delimiter': '""" """'}), "('data.txt', delimiter=' ')\n", (62, 89), True, 'import numpy as np\n'), ((218, 231), 'pylab.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (228, 231), True, 'import pylab as plt\n'), ((232, 250), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (242, 250), True, 'import pylab as plt\n'), ((251, 266), 'pylab.ylabel', 'plt.ylabel', (['"""X"""'], {}), "('X')\n", (261, 266), True, 'import pylab as plt\n'), ((267, 281), 'pylab.plot', 'plt.plot', (['X', 'x'], {}), '(X, x)\n', (275, 281), True, 'import pylab as plt\n'), ((283, 296), 'pylab.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (293, 296), True, 'import pylab as plt\n'), ((297, 315), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (307, 315), True, 'import pylab as plt\n'), ((316, 331), 'pylab.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (326, 331), True, 'import pylab as plt\n'), ((332, 346), 'pylab.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (340, 346), True, 'import pylab as plt\n'), ((348, 361), 'pylab.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (358, 361), True, 'import pylab as plt\n'), ((362, 380), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (372, 380), True, 'import pylab as plt\n'), ((381, 396), 'pylab.ylabel', 'plt.ylabel', (['"""Z"""'], {}), "('Z')\n", (391, 396), True, 'import pylab as plt\n'), ((397, 411), 'pylab.plot', 'plt.plot', (['X', 'z'], {}), '(X, z)\n', (405, 411), True, 'import pylab as plt\n'), ((413, 426), 'pylab.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (423, 426), True, 'import pylab as plt\n'), ((427, 445), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (437, 445), True, 'import pylab as plt\n'), ((446, 463), 'pylab.ylabel', 'plt.ylabel', (['"""Yaw"""'], {}), "('Yaw')\n", (456, 463), True, 'import pylab as plt\n'), ((464, 480), 'pylab.plot', 'plt.plot', (['X', 'yaw'], {}), '(X, yaw)\n', (472, 480), True, 'import pylab as plt\n'), ((482, 495), 'pylab.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (492, 495), True, 'import pylab as plt\n'), ((496, 514), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (506, 514), True, 'import pylab as plt\n'), ((515, 534), 'pylab.ylabel', 'plt.ylabel', (['"""Pitch"""'], {}), "('Pitch')\n", (525, 534), True, 'import pylab as plt\n'), ((535, 553), 'pylab.plot', 'plt.plot', (['X', 'pitch'], {}), '(X, pitch)\n', (543, 553), True, 'import pylab as plt\n'), ((555, 568), 'pylab.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (565, 568), True, 'import pylab as plt\n'), ((569, 587), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (579, 587), True, 'import pylab as plt\n'), ((588, 606), 'pylab.ylabel', 'plt.ylabel', (['"""Roll"""'], {}), "('Roll')\n", (598, 606), True, 'import pylab as plt\n'), ((607, 624), 'pylab.plot', 'plt.plot', (['X', 'roll'], {}), '(X, roll)\n', (615, 624), True, 'import pylab as plt\n'), ((626, 636), 'pylab.show', 'plt.show', ([], {}), '()\n', (634, 636), True, 'import pylab as plt\n')] |
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import tflearn
from matplotlib import pyplot as plt
from sklearn.metrics import classification_report
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
submission = pd.read_csv("../input/sample_submission.csv")
submission["type"] = "Unknown"
#sns.pairplot(train.drop("id",axis=1), hue="type", diag_kind="kde")
import itertools
comb = list(itertools.combinations(train.drop(["id", "color", "type"], axis=1).columns, 2))
try_comb = pd.DataFrame()
for c in comb:
try_comb[c[0]+"_x_"+c[1]] = train[c[0]].values *train[c[1]].values
try_comb["type"] = train.type
#sns.pairplot(try_comb, hue="type", diag_kind="kde")
try_comb = None
for i in [1,2,-1]:
train[comb[i][0]+"_x_"+comb[i][1]] = train[comb[i][0]].values * train[comb[i][1]].values
test[comb[i][0]+"_x_"+comb[i][1]] = test[comb[i][0]].values * test[comb[i][1]].values
from sklearn.decomposition import KernelPCA
kPCA = KernelPCA(n_components=2, kernel="rbf", gamma=1)
transf = kPCA.fit_transform(train.drop(["id", "color", "type"], axis=1))
plt.figure(figsize=(10,8))
for label,marker,color in zip(["Ghost", "Ghoul", "Goblin"],('x', 'o', '^'),('blue', 'red', 'green')):
plt.scatter(x=transf[:,0][(train.type == label).values],
y=transf[:,1][(train.type == label).values],
marker=marker,
color=color,
alpha=0.7,
label='class {}'.format(label)
)
plt.legend()
plt.title('KernelPCA projection')
plt.show()
train["kPCA_0"] = transf[:,0]
train["kPCA_1"] = transf[:,1]
transf_test = kPCA.transform(test.drop(["id", "color"], axis=1).values)
test["kPCA_0"] = transf_test[:,0]
test["kPCA_1"] = transf_test[:,1]
#sns.pairplot(train.drop(["id", "color"],axis=1), hue="type", diag_kind="kde")
X_train = train.drop(["id", "color", "type"], axis=1)
y_train = pd.get_dummies(train["type"]).values
X_test = test.drop(["id", "color"], axis=1)
run = True
from sklearn.model_selection import KFold
kf = KFold(n_splits=5)
scores = []
predictions = np.zeros((X_test.values.shape[0], 3))
for train_idx, val_idx in kf.split(X_train):
with tf.Graph().as_default():
net = tflearn.input_data(shape=[None, 9])
net = tflearn.fully_connected(net, 1024,
activation='relu',
weights_init='xavier',
regularizer='L2')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 3, activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X_train.values[train_idx],
y_train[train_idx],
n_epoch=150)
score = model.evaluate(X_train.values[val_idx], y_train[val_idx])
scores.append(score[0])
print("\n", "SCORE:", score[0], "\n\n")
prediction = np.array(model.predict(X_test))
predictions += prediction * score[0]
scores
test_pred = np.argmax(predictions, axis=1).astype(str)
test_pred[test_pred=="0"] = "Ghost"
test_pred[test_pred=="1"] = "Ghoul"
test_pred[test_pred=="2"] = "Goblin"
test_pred
submission["type"] = test_pred
submission.to_csv("NN.csv", index=False)
| [
"tensorflow.Graph",
"tflearn.fully_connected",
"tflearn.regression",
"pandas.read_csv",
"tflearn.DNN",
"numpy.argmax",
"pandas.get_dummies",
"matplotlib.pyplot.figure",
"sklearn.decomposition.KernelPCA",
"numpy.zeros",
"tflearn.dropout",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"skle... | [((197, 230), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (208, 230), True, 'import pandas as pd\n'), ((238, 270), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {}), "('../input/test.csv')\n", (249, 270), True, 'import pandas as pd\n'), ((285, 330), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv"""'], {}), "('../input/sample_submission.csv')\n", (296, 330), True, 'import pandas as pd\n'), ((550, 564), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (562, 564), True, 'import pandas as pd\n'), ((1004, 1052), 'sklearn.decomposition.KernelPCA', 'KernelPCA', ([], {'n_components': '(2)', 'kernel': '"""rbf"""', 'gamma': '(1)'}), "(n_components=2, kernel='rbf', gamma=1)\n", (1013, 1052), False, 'from sklearn.decomposition import KernelPCA\n'), ((1127, 1154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1137, 1154), True, 'from matplotlib import pyplot as plt\n'), ((1533, 1545), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1543, 1545), True, 'from matplotlib import pyplot as plt\n'), ((1546, 1579), 'matplotlib.pyplot.title', 'plt.title', (['"""KernelPCA projection"""'], {}), "('KernelPCA projection')\n", (1555, 1579), True, 'from matplotlib import pyplot as plt\n'), ((1581, 1591), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1589, 1591), True, 'from matplotlib import pyplot as plt\n'), ((2076, 2093), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (2081, 2093), False, 'from sklearn.model_selection import KFold\n'), ((2120, 2157), 'numpy.zeros', 'np.zeros', (['(X_test.values.shape[0], 3)'], {}), '((X_test.values.shape[0], 3))\n', (2128, 2157), True, 'import numpy as np\n'), ((1936, 1965), 'pandas.get_dummies', 'pd.get_dummies', (["train['type']"], {}), "(train['type'])\n", (1950, 1965), True, 'import pandas as pd\n'), ((2252, 2287), 'tflearn.input_data', 'tflearn.input_data', ([], {'shape': '[None, 9]'}), '(shape=[None, 9])\n', (2270, 2287), False, 'import tflearn\n'), ((2303, 2401), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(1024)'], {'activation': '"""relu"""', 'weights_init': '"""xavier"""', 'regularizer': '"""L2"""'}), "(net, 1024, activation='relu', weights_init='xavier',\n regularizer='L2')\n", (2326, 2401), False, 'import tflearn\n'), ((2526, 2551), 'tflearn.dropout', 'tflearn.dropout', (['net', '(0.5)'], {}), '(net, 0.5)\n', (2541, 2551), False, 'import tflearn\n'), ((2567, 2620), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(3)'], {'activation': '"""softmax"""'}), "(net, 3, activation='softmax')\n", (2590, 2620), False, 'import tflearn\n'), ((2636, 2659), 'tflearn.regression', 'tflearn.regression', (['net'], {}), '(net)\n', (2654, 2659), False, 'import tflearn\n'), ((2677, 2716), 'tflearn.DNN', 'tflearn.DNN', (['net'], {'tensorboard_verbose': '(0)'}), '(net, tensorboard_verbose=0)\n', (2688, 2716), False, 'import tflearn\n'), ((3108, 3138), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3117, 3138), True, 'import numpy as np\n'), ((2212, 2222), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2220, 2222), True, 'import tensorflow as tf\n')] |
'''
CHypre (Complex Hypre)
CHypreVec : ParVector
CHypreMat : ParCSR
container object to support complex using
real value hypre
it should work with pure real or pure imaginary
case too.
it follows the mathod naming convetion used
in scipy.sparse. However, since it inherits the list
object, __setitem__ can not be used for accessing
array elements. Use set_element, instead.
'''
import numpy as np
from numbers import Number
from scipy.sparse import csr_matrix, coo_matrix, lil_matrix
from mfem.common.parcsr_extra import *
# DO NOT IMPORT MPI in Global, sicne some routins will be used
# in serial mode too.
try:
import mfem.par
MFEM_PAR = True
except BaseException:
MFEM_PAR = False
class CHypreVec(list):
def __init__(self, r=None, i=None, horizontal=False):
list.__init__(self, [None] * 2)
self._horizontal = horizontal
if isinstance(r, np.ndarray):
self[0] = ToHypreParVec(r)
else:
self[0] = r
if isinstance(i, np.ndarray):
self[1] = ToHypreParVec(i)
else:
self[1] = i
def __repr__(self):
if self[0] is not None:
part = self[0].GetPartitioningArray()
elif self[1] is not None:
part = self[1].GetPartitioningArray()
else:
return "CHypreVec (empty)"
return "CHypreVec" + str(self.shape) + \
"[" + str(part[1] - part[0]) + "]"
@property
def imag(self):
return self[1]
@imag.setter
def imag(self, value):
self[1] = value
@property
def real(self):
return self[0]
@real.setter
def real(self, value):
self[0] = value
@property
def shape(self):
if self[0] is not None:
size = self[0].GlobalSize()
elif self[1] is not None:
size = self[1].GlobalSize()
else:
size = 0.0
if self._horizontal:
return 1, size
else:
return size, 1
def isComplex(self):
return not (self[1] is None)
def GetPartitioningArray(self):
if self[0] is not None:
part = self[0].GetPartitioningArray()
#part[2] = self[0].GlobalSize()
elif self[1] is not None:
prat = self[1].GetPartitioningArray()
#part[2] = self[1].GlobalSize()
else:
raise ValueError("CHypreVec is empty")
return part
def __imul__(self, other):
if isinstance(other, CHypreVec):
assert False, "CHypreVec *= vector is not supported. Use dot"
elif np.iscomplexobj(other):
#other = complex(other)
i = other.imag
r = other.real
if self[0] is not None and self[1] is not None:
rr = self[0].GetDataArray() * r - self[1].GetDataArray() * i
ii = self[0].GetDataArray() * i + self[1].GetDataArray() * r
self[0] = ToHypreParVec(rr)
self[1] = ToHypreParVec(ii)
elif self[0] is not None:
if np.any(i != 0.):
self[1] = ToHypreParVec(i * self[0].GetDataArray())
if np.any(r != 0.):
tmp = self[0].GetDataArray()
tmp *= r
else:
self[0] = None
elif self[1] is not None:
if np.any(i != 0.):
self[0] = ToHypreParVec(-i * self[1].GetDataArray())
if np.any(r != 0.):
tmp = self[1].GetDataArray()
tmp *= r
else:
self[1] = None
else:
passself[0] = None
else:
other = float(other)
if self[0] is not None:
self[0] *= other
if self[1] is not None:
self[1] *= other
return self
def __mul__(self, other):
if isinstance(other, CHypreVec):
assert False, "CHypreVec *= vector is not supported. Use dot"
elif np.iscomplexobj(other):
other = complex(other)
i = other.imag
r = other.real
else:
r = float(other)
i = 0.0
rdata = self[0].GetDataArray() if self[0] is not None else 0
idata = self[1].GetDataArray() if self[1] is not None else 0
rr = rdata * r - idata * i
ii = rdata * i + idata * r
# note: for the real part we keep it even if it is zero
# so that it conservs vector size information
rr = ToHypreParVec(rr)
ii = ToHypreParVec(ii) if np.count_nonzero(ii) != 0 else None
return CHypreVec(rr, ii, horizontal=self._horizontal)
def __add__(self, other):
assert self._horizontal == other._horizontal, "can not add vertical and hirozontal vector"
if self[0] is not None and other[0] is not None:
data = self[0].GetDataArray() + other[0].GetDataArray()
r = ToHypreParVec(data)
elif self[0] is not None:
data = self[0].GetDataArray()
r = ToHypreParVec(data)
elif other[0] is not None:
data = other[0].GetDataArray()
r = ToHypreParVec(data)
else:
r = None
if self[1] is not None and other[1] is not None:
data = self[1].GetDataArray() + other[1].GetDataArray()
i = ToHypreParVec(data)
elif self[1] is not None:
data = self[1].GetDataArray()
i = ToHypreParVec(data)
elif other[1] is not None:
data = other[1].GetDataArray()
i = ToHypreParVec(data)
else:
i = None
return CHypreVec(r, i, horizontal=self._horizontal)
def __sub__(self, other):
assert self._horizontal == other._horizontal, "can not add vertical and hirozontal vector"
if self[0] is not None and other[0] is not None:
data = self[0].GetDataArray() - other[0].GetDataArray()
r = ToHypreParVec(data)
elif self[0] is not None:
data = self[0].GetDataArray()
r = ToHypreParVec(data)
elif other[0] is not None:
data = -other[0].GetDataArray()
r = ToHypreParVec(data)
else:
r = None
if self[1] is not None and other[1] is not None:
data = self[1].GetDataArray() - other[1].GetDataArray()
i = ToHypreParVec(data)
elif self[1] is not None:
data = self[1].GetDataArray()
i = ToHypreParVec(data)
elif other[1] is not None:
data = -other[1].GetDataArray()
i = ToHypreParVec(data)
else:
i = None
return CHypreVec(r, i, horizontal=self._horizontal)
def dot(self, other):
if isinstance(other, CHypreVec):
return InnerProductComplex(self, other)
elif (isinstance(other, CHypreMat) and
self._horizontal):
ret = other.transpose().dot(self)
ret._horizontal = True
return ret
else:
raise ValueError(
"CHypreVec::dot supports Vec*Vec (InnerProduct) and (Mat^t*Vec)^t ")
def get_elements(self, idx):
part = self.GetPartitioningArray()
idx = idx - part[0]
idx = idx[idx < part[1]-part[0]]
idx = idx[idx >= 0]
if len(idx) == 0:
return np.array([])
ret = 0.0
if self[0] is not None:
ret = ret + self[0].GetDataArray()[idx]
if self[1] is not None:
ret = ret + 1j*self[1].GetDataArray()[idx]
return ret
def set_elements(self, idx, value):
part = self.GetPartitioningArray()
idx = idx - part[0]
idx = idx[idx < part[1]-part[0]]
idx = idx[idx >= 0]
rvalue = value.real if np.iscomplexobj(value) else value
if len(idx) == 0:
return
if self[0] is not None:
self[0].GetDataArray()[idx] = rvalue
if np.iscomplexobj(value):
if self[1] is None:
i = self[0].GetDataArray()*0.0
self[1] = ToHypreParVec(i)
self[1].GetDataArray()[idx] = value.imag
def set_element(self, i, v):
part = self.GetPartitioningArray()
if part[0] <= i and i < part[1]:
v = complex(v)
if self[0] is not None:
self[0][int(i - part[0])] = v.real
if self[1] is not None:
self[1][int(i - part[0])] = v.imag
def get_element(self, i):
part = self.GetPartitioningArray()
if part[0] <= i and i < part[1]:
if self[0] is not None:
r = self[0][int(i - part[0])]
else:
r = 0
if self[1] is not None:
return r + 1j * self[1][int(i - part[0])]
else:
return r
def copy_element(self, tdof, vec):
for i in tdof:
v = vec.get_element(i)
self.set_element(i, v)
'''
def gather(self):
from mpi4py import MPI
myid = MPI.COMM_WORLD.rank
vecr = 0.0; veci = 0.0
if self[0] is not None:
vecr = gather_vector(self[0].GetDataArray(), MPI.DOUBLE)
if self[1] is not None:
veci = gather_vector(self[1].GetDataArray(), MPI.DOUBLE)
if myid == 0:
if self[0] is None:
return vecr
else:
return vecr + 1j*veci
'''
def get_squaremat_from_right(self):
'''
squre matrix which can be multipled from the right of self.
'''
if not self._horizontal:
raise ValueError("Vector orientation is not right")
part = self.GetPartitioningArray()
width = self[1].GlobalSize()
return SquareCHypreMat(width, part, real=(self[1] is None))
def transpose(self):
self._horizontal = not self._horizontal
return self
def _do_reset(self, v, idx):
# ownership is transferrd to a new vector.
ownership = v.GetOwnership()
data = v.GetDataArray()
part = v.GetPartitioningArray()
for i in idx:
if i >= part[1]:
continue
if i < part[0]:
continue
data[i - part[0]] = 0
ret = ToHypreParVec(data)
ret.SetOwnership(ownership)
v.SetOwnership(0)
return ret
def resetCol(self, idx):
if self._horizontal:
if self[0] is not None:
self[0] = self._do_reset(self[0], idx)
if self[1] is not None:
self[1] = self._do_reset(self[1], idx)
else:
if 0 in idx:
self *= 0.0
def resetRow(self, idx):
if self._horizontal:
if 0 in idx:
self *= 0.0
else:
if self[0] is not None:
self[0] = self._do_reset(self[0], idx)
if self[1] is not None:
self[1] = self._do_reset(self[1], idx)
def _do_select(self, v, lidx):
# ownership is transferrd to a new vector.
ownership = v.GetOwnership()
data = v.GetDataArray()
data2 = data[lidx]
ret = ToHypreParVec(data2)
ret.SetOwnership(ownership)
v.SetOwnership(0)
return ret
def selectRows(self, idx):
'''
idx is global index
'''
if self._horizontal:
if not 0 in idx:
raise ValueError("VectorSize becomes zero")
return self
part = self.GetPartitioningArray()
idx = idx[idx >= part[0]]
idx = idx[idx < part[1]]
lidx = idx - part[0]
r = None
i = None
if not self._horizontal:
if self[0] is not None:
r = self._do_select(self[0], lidx)
if self[1] is not None:
i = self._do_select(self[1], lidx)
return CHypreVec(r, i, horizontal=self._horizontal)
def selectCols(self, idx):
'''
idx is global index
'''
if not self._horizontal:
if not 0 in idx:
raise ValueError("VectorSize becomes zero")
return self
part = self.GetPartitioningArray()
idx = idx[idx >= part[0]]
idx = idx[idx < part[1]]
lidx = idx - part[0]
r = None
i = None
if self._horizontal:
if self[0] is not None:
r = self._do_select(self[0], lidx)
if self[1] is not None:
i = self._do_select(self[1], lidx)
return CHypreVec(r, i, horizontal=self._horizontal)
def GlobalVector(self):
'''
Here it is reimplmente using MPI allgather.
This is because GlobalVactor does not work when the vector
is too small so that some node does not have data.
'''
def gather_allvector(data):
from mfem.common.mpi_dtype import get_mpi_datatype
from mpi4py import MPI
mpi_data_type = get_mpi_datatype(data)
myid = MPI.COMM_WORLD.rank
rcounts = data.shape[0]
rcounts = np.array(MPI.COMM_WORLD.allgather(rcounts))
for x in data.shape[1:]:
rcounts = rcounts * x
cm = np.hstack((0, np.cumsum(rcounts)))
disps = list(cm[:-1])
length = cm[-1]
recvbuf = np.empty([length], dtype=data.dtype)
recvdata = [recvbuf, rcounts, disps, mpi_data_type]
senddata = [data.flatten(), data.flatten().shape[0]]
MPI.COMM_WORLD.Allgatherv(senddata, recvdata)
return recvbuf.reshape(-1, *data.shape[1:])
if self[0] is not None:
v1 = gather_allvector(self[0].GetDataArray().copy())
else:
v1 = 0.0
if self[1] is not None:
v2 = gather_allvector(self[1].GetDataArray().copy())
v1 = v1 + 1j * v2
return v1
def toarray(self):
'''
numpy array of local vector
'''
if self[0] is not None:
v = self[0].GetDataArray()
else:
v = 0.0
if self[1] is not None:
v = v + 1j * self[1].GetDataArray()
return v
def isAllZero(self):
return any(self.GlobalVector())
def get_global_coo(self):
data = self[0].GetDataArray()
if self[1] is not None:
data = data + 1j * self[1].GetDataArray()
gcoo = coo_matrix(self.shape, dtype=data.dtype)
gcoo.data = data
part = self.GetPartitioningArray()
if self._horizontal:
gcoo.row = np.zeros(len(data))
gcoo.col = np.arange(len(data)) + part[0]
else:
gcoo.col = np.zeros(len(data))
gcoo.row = np.arange(len(data)) + part[0]
return gcoo
def true_nnz(self):
'''
more expensive version which reports nnz after
eliminating all zero entries
I can not use @property, since ScipyCoo can't
have @property (class inherited from ndarray)
'''
data = self[0].GetDataArray()
if self[1] is not None:
data = data + 1j * self[1].GetDataArray()
local_nnz = np.sum(data == 0.)
from mpi4py import MPI
comm = MPI.COMM_WORLD
nnz = comm.allgather(local_nnz)
return nnz
@property
def isHypre(self):
return True
class CHypreMat(list):
def __init__(self, r=None, i=None, col_starts=None):
list.__init__(self, [None] * 2)
if isinstance(r, csr_matrix):
self[0] = ToHypreParCSR(r, col_starts=col_starts)
elif isinstance(r, mfem.par.HypreParMatrix):
self[0] = r
elif r is None:
self[0] = r
else:
assert False, "unknonw matrix element"
if isinstance(i, csr_matrix):
self[1] = ToHypreParCSR(i, col_starts=col_starts)
elif isinstance(i, mfem.par.HypreParMatrix):
self[1] = i
elif i is None:
self[1] = i
else:
assert False, "unknonw matrix element"
def GetOwns(self):
flags = [None] * 6
if self[0] is not None:
flags[0] = self[0].OwnsDiag()
flags[1] = self[0].OwnsOffd()
flags[2] = self[0].OwnsColMap()
if self[1] is not None:
flags[3] = self[1].OwnsDiag()
flags[4] = self[1].OwnsOffd()
flags[5] = self[1].OwnsColMap()
return flags
def __repr__(self):
return "CHypreMat" + str(self.shape) + "[" + str(self.lshape) + "]"
def isComplex(self):
return not (self[1] is None)
def __mul__(self, other): # A * B or A * v
if isinstance(other, CHypreMat):
return CHypreMat(*ParMultComplex(self, other))
elif isinstance(other, CHypreVec):
v = CHypreVec(*ParMultVecComplex(self, other))
v._horizontal = other._horizontal
return v
elif isinstance(other, Number):
if np.iscomplexobj(other):
other = complex(other)
r = other.real
i = other.imag
else:
r = other
i = 0
if self[0] is not None and self[1] is not None:
R = mfem.par.Add(r, self[0], -i, self[1])
I = mfem.par.Add(i, self[0], r, self[1])
elif self[0] is not None:
#R = mfem.par.Add(r, self[0], 0.0, self[0])
R = mfem.par.Add(r, self[0], 0.0, self[0])
if i != 0.0:
I = mfem.par.Add(i, self[0], 0.0, self[0])
else:
I = None
elif self[1] is not None:
if r != 0.0:
I = mfem.par.Add(r, self[1], 0.0, self[1])
else:
I = None
R = mfem.par.Add(-i, self[1], 0.0, self[1])
else:
assert False, "this mode is not supported"
R = None
I = None
if R is not None:
R.CopyRowStarts()
R.CopyColStarts()
if I is not None:
I.CopyRowStarts()
I.CopyColStarts()
return CHypreMat(R, I)
raise ValueError("argument should be CHypreMat/Vec")
def __rmul__(self, other):
if not isinstance(other, CHypreMat):
raise ValueError(
"argument should be CHypreMat")
return CHypreMat(*ParMultComplex(other, self))
def __add__(self, other): # A + B
if not isinstance(other, CHypreMat):
raise ValueError(
"argument should be CHypreMat")
if self[0] is not None and other[0] is not None:
r = ParAdd(self[0], other[0])
elif self[0] is not None:
r = ToHypreParCSR(ToScipyCoo(self[0]).tocsr())
elif other[0] is not None:
r = ToHypreParCSR(ToScipyCoo(other[0]).tocsr())
else:
r = None
if self[1] is not None and other[1] is not None:
i = ParAdd(self[1], other[1])
# i = mfem.par.add_hypre(1.0, self[1], 1.0, other[1])
elif self[1] is not None:
i = ToHypreParCSR(ToScipyCoo(self[1]).tocsr())
elif other[1] is not None:
i = ToHypreParCSR(ToScipyCoo(other[1]).tocsr())
else:
i = None
if r is not None:
r.CopyRowStarts()
r.CopyColStarts()
if i is not None:
i.CopyRowStarts()
i.CopyColStarts()
return CHypreMat(r, i)
def __sub__(self, other): # A - B
if not isinstance(other, CHypreMat):
raise ValueError(
"argument should be CHypreMat")
if self[0] is not None and other[0] is not None:
other[0] *= -1
r = ParAdd(self[0], other[0])
other[0] *= -1
elif self[0] is not None:
r = ToHypreParCSR(ToScipyCoo(self[0]).tocsr())
elif other[0] is not None:
r = mfem.par.Add(-1, othe[0], 0.0, other[0])
else:
r = None
if self[1] is not None and other[1] is not None:
other[1] *= -1
i = ParAdd(self[1], other[1])
other[1] *= -1
elif self[1] is not None:
i = ToHypreParCSR(ToScipyCoo(self[1]).tocsr())
elif other[1] is not None:
i = mfem.par.Add(-1, othe[1], 0.0, other[1])
else:
i = None
if r is not None:
r.CopyRowStarts()
r.CopyColStarts()
if i is not None:
i.CopyRowStarts()
i.CopyColStarts()
return CHypreMat(r, i)
def __neg__(self): # -B
r = None
i = None
if self[0] is not None:
r = mfem.par.Add(-1, self[0], 0.0, self[0])
r.CopyRowStarts()
r.CopyColStarts()
if self[1] is not None:
i = mfem.par.Add(-1, self[1], 0.0, self[0])
i.CopyRowStarts()
i.CopyColStarts()
return CHypreMat(r, i)
@property
def imag(self):
return self[1]
@imag.setter
def imag(self, value):
self[1] = value
@property
def real(self):
return self[0]
@real.setter
def real(self, value):
self[0] = value
def GetColPartArray(self):
if self[0] is not None:
return self[0].GetColPartArray()
else:
return self[1].GetColPartArray()
def GetRowPartArray(self):
if self[0] is not None:
return self[0].GetRowPartArray()
else:
return self[1].GetRowPartArray()
def __imul__(self, other):
if self[0] is not None:
self[0] *= other
if self[1] is not None:
self[1] *= other
return self
def dot(self, other):
return self.__mul__(other)
def transpose(self):
'''
transpose of matrix
this method returns a new matrix
'''
R = self[0].Transpose() if self[0] is not None else None
I = self[1].Transpose() if self[1] is not None else None
return CHypreMat(R, I)
# return CHypreMat(self[0].Transpose(), self[1].Transpose())
def conj(self, inplace=True):
'''
complex conjugate
if copy is on, imaginary part becomes different object
'''
if self[1] is None:
return self
if not inplace:
self[1] = ToHypreParCSR(-ToScipyCoo(self[1]).tocsr())
else:
self[1] *= -1.0
return self
def rap(self, B):
'''
B^* A B
'''
ret = B.conj().transpose() * self
ret = ret * (B.conj())
# this should put B back to original
return ret
def setDiag(self, idx, value=1.0):
if self[0] is not None:
self[0] = ResetHypreDiag(self[0], idx, value=np.real(value))
if self[1] is not None:
self[1] = ResetHypreDiag(self[1], idx, value=np.imag(value))
def getDiag(self, idx):
if self[0] is not None:
diagvalue = ReadHypreDiag(self[0], idx)
else:
diagvalue = complex(0.0)
if self[1] is not None:
diagvalue = diagvalue + 1j*ReadHypreDiag(self[1], idx)
else:
diagvalue = diagvalue + 0j
return diagvalue
def resetCol(self, idx, inplace=True):
if self[0] is not None:
r = ResetHypreCol(self[0], idx)
else:
r = None
if self[1] is not None:
i = ResetHypreCol(self[1], idx)
else:
i = None
if inplace:
self[0] = r
self[1] = i
return self
else:
return CHypreMat(r, i)
def resetRow(self, idx, inplace=True):
if self[0] is not None:
r = ResetHypreRow(self[0], idx)
else:
r = None
if self[1] is not None:
i = ResetHypreRow(self[1], idx)
else:
i = None
if inplace:
self[0] = r
self[1] = i
return self
else:
return CHypreMat(r, i)
def selectRows(self, idx):
'''
idx is global index
'''
rpart = self[0].GetRowPartArray()
#rpart[2] = self[0].GetGlobalNumRows()
cpart = self[0].GetColPartArray()
#cpart[2] = self[0].GetGlobalNumCols()
idx = idx[idx >= rpart[0]]
idx = idx[idx < rpart[1]]
idx = idx - rpart[0]
csr = ToScipyCoo(self[0]).tocsr()
csr = csr[idx, :]
r = ToHypreParCSR(csr, col_starts=cpart)
if self[1] is not None:
csr = ToScipyCoo(self[1]).tocsr()
csr = csr[idx, :]
i = ToHypreParCSR(csr, col_starts=cpart)
else:
i = None
return CHypreMat(r, i)
def selectCols(self, idx):
'''
idx is global index
'''
cpart = self[0].GetColPartArray()
#cpart[2] = self[0].GetGlobalNumCols()
rpart = self[0].GetRowPartArray()
#rpart[2] = self[0].GetGlobalNumRows()
idx = idx[idx >= cpart[0]]
idx = idx[idx < cpart[1]]
idx = idx - cpart[0]
mat = self.transpose()
csr = ToScipyCoo(mat[0]).tocsr()
csr = csr[idx, :]
r = ToHypreParCSR(csr, col_starts=rpart)
if self[1] is not None:
csr = ToScipyCoo(mat[1]).tocsr()
csr = csr[idx, :]
i = ToHypreParCSR(csr, col_starts=rpart)
else:
i = None
mat = CHypreMat(r, i).transpose()
'''
if (cpart == rpart).all():
csr = ToScipyCoo(mat[0]).tocsr()
mat[0] = ToHypreParCSR(csr, col_starts =rpart)
if mat[1] is not None:
csr = ToScipyCoo(mat[1]).tocsr()
mat[1] = ToHypreParCSR(csr, col_starts =rpart)
'''
return mat
@property
def nnz(self):
if self[0] is not None and self[1] is not None:
return self[0].NNZ(), self[1].NNZ()
if self[0] is not None:
return self[0].NNZ()
if self[1] is not None:
return self[1].NNZ()
def true_nnz(self):
'''
more expensive version which reports nnz after
eliminating all zero entries
'''
#coo = self.get_local_coo()
if self[0] is not None:
nnz0, tnnz0 = self[0].get_local_true_nnz()
else:
nnz0 = 0
tnnz0 = 0
if self[1] is not None:
nnz1, tnnz1 = self[1].get_local_true_nnz()
else:
nnz1 = 0
tnnz1 = 0
# print nnz0, tnnz0, nnz1, tnnz1
return tnnz0, tnnz1
def m(self):
'''
return global row number: two number should be the same
'''
ans = []
if self[0] is not None:
ans.append(self[0].M())
if self[1] is not None:
ans.append(self[1].M())
if len(ans) == 2 and ans[0] != ans[1]:
raise ValueError(
"data format error, real and imag should have same size")
return ans[0]
def n(self):
'''
return global col number: two number should be the same
'''
ans = []
if self[0] is not None:
ans.append(self[0].N())
if self[1] is not None:
ans.append(self[1].N())
if len(ans) == 2 and ans[0] != ans[1]:
raise ValueError(
"data format error, real and imag should have same size")
return ans[0]
@property
def shape(self):
if self[0] is not None:
return (self[0].GetGlobalNumRows(), self[0].GetGlobalNumCols())
elif self[1] is not None:
return (self[1].GetGlobalNumRows(), self[1].GetGlobalNumCols())
else:
return (0, 0)
@property
def lshape(self):
if self[0] is not None:
return (self[0].GetNumRows(), self[0].GetNumCols())
elif self[1] is not None:
return (self[1].GetNumRows(), self[1].GetNumCols())
else:
return (0, 0)
def get_local_coo(self):
if self.isComplex():
return (ToScipyCoo(self[0]) + 1j * ToScipyCoo(self[1])).tocoo()
else:
return ToScipyCoo(self[0])
def get_global_coo(self):
lcoo = self.get_local_coo()
gcoo = coo_matrix(self.shape)
gcoo.data = lcoo.data
gcoo.row = lcoo.row + self.GetRowPartArray()[0]
gcoo.col = lcoo.col
return gcoo
def get_squaremat_from_right(self):
'''
squre matrix which can be multipled from the right of self.
'''
size = self.shape[1]
if self[0] is not None:
part = self[0].GetColPartArray()
width = self[0].GetGlobalNumCols()
elif self[1] is not None:
part = self[1].GetColPartArray()
width = self[1].GetGlobalNumCols()
else:
raise ValueError("CHypreMat is empty")
return SquareCHypreMat(width, part, real=(self[1] is None))
def elimination_matrix(self, idx):
# # local version
# ret = lil_matrix((len(nonzeros), self.shape[0]))
# for k, z in enumerate(nonzeros):
# ret[k, z] = 1.
# return ret.tocoo()
cpart = self.GetColPartArray()
rpart = self.GetRowPartArray()
idx = idx[idx >= rpart[0]]
idx = idx[idx < rpart[1]]
idx = idx - rpart[0]
shape = (len(idx), self.shape[1])
# print shape, idx + rpart[0]
elil = lil_matrix(shape)
for i, j in enumerate(idx):
elil[i, j + rpart[0]] = 1
r = ToHypreParCSR(elil.tocsr(), col_starts=cpart)
return CHypreMat(r, None)
def eliminate_RowsCols(self, B, tdof, inplace=False, diagpolicy=0):
# note: tdof is a valued viewed in each node. MyTDoF offset is
# subtracted
tdof1 = mfem.par.intArray(tdof)
if not inplace:
#print("inplace flag off copying ParCSR")
if self[0] is not None:
r = ToHypreParCSR(ToScipyCoo(self[0]).tocsr())
else:
r = None
if self[1] is not None:
i = ToHypreParCSR(ToScipyCoo(self[1]).tocsr())
else:
i = None
target = CHypreMat(r, i)
else:
target = self
row0 = self.GetRowPartArray()[0]
gtdof = list(np.array(tdof, dtype=int) + row0)
if diagpolicy == 0:
diagAe = target.getDiag(gtdof) - 1
diagA = 1.0
else:
diagAe = 0.0
diagA = target.getDiag(gtdof)
if target[0] is not None:
Aer = target[0].EliminateRowsCols(tdof1)
Aer.CopyRowStarts()
Aer.CopyColStarts()
#row0 = Aer.GetRowPartArray()[0]
else:
Aer = None
if target[1] is not None:
Aei = target[1].EliminateRowsCols(tdof1)
Aei.CopyRowStarts()
Aei.CopyColStarts()
#row0 = Aei.GetRowPartArray()[0]
else:
Aei = None
Ae = CHypreMat(Aer, Aei)
target.setDiag(gtdof, value=diagA)
Ae.setDiag(gtdof, value=diagAe)
# if diagpolicy == 0:
# part = B.GetPartitioningArray()
# xxx = np.ones(part[1] - part[0], dtype=complex)
# xxx[tdof] = diagA
# B *= xxx
# if diagpolicy == 1:
# print(tdof)
# print(diagA.shape)
# diagA = diagA[tdof]
B.set_elements(gtdof, diagA)
return Ae, target, B
@property
def isHypre(self):
return True
def SquareCHypreMat(width, part, real=False):
from scipy.sparse import csr_matrix
lr = part[1] - part[0]
m1 = csr_matrix((lr, width))
if real:
m2 = None
else:
m2 = csr_matrix((lr, width))
return CHypreMat(m1, m2)
def Array2CHypreVec(array, part=None, horizontal=False):
'''
convert array in rank =0 to
distributed Hypre 1D Matrix (size = m x 1)
'''
from mpi4py import MPI
isComplex = MPI.COMM_WORLD.bcast(np.iscomplexobj(array), root=0)
if isComplex:
if array is None:
rarray = None
iarray = None
else:
rarray = array.real
iarray = array.imag
return CHypreVec(Array2HypreVec(rarray, part),
Array2HypreVec(iarray, part), horizontal=horizontal)
else:
if array is None:
rarray = None
else:
rarray = array
return CHypreVec(Array2HypreVec(rarray, part),
None, horizontal=horizontal)
def CHypreVec2Array(array):
from mpi4py import MPI
myid = MPI.COMM_WORLD.rank
if array[0] is not None:
r = HypreVec2Array(array[0])
else:
if myid == 0:
r = 0.0
else:
r = None
if array[1] is None:
return r
else:
i = HypreVec2Array(array[1])
if i is None:
return r
else:
if myid == 0:
return r + 1j * i
else:
return None
def CHypreMat2Coo(mat):
print("CHYPREMat2Coo: deprecated, Use class method !!!!")
if mat.isComplex():
return ToScipyCoo(mat.real) + 1j * ToScipyCoo(mat.imag)
else:
return ToScipyCoo(mat.real)
def LF2PyVec(rlf, ilf=None, horizontal=False):
if MFEM_PAR:
'''
From ParLF to CHypreVec
'''
rv = rlf.ParallelAssemble()
rv.thisown = True
if ilf is not None:
iv = ilf.ParallelAssemble()
iv.thisown = True
else:
iv = None
return CHypreVec(rv, iv, horizontal=horizontal)
else:
b1 = rlf.GetDataArray().copy() # ; rlf.thisown = False
if ilf is not None:
b2 = ilf.GetDataArray() # ; ilf.thisown = False
b1 = b1 + 1j * b2
if horizontal:
return b1.reshape((1, -1))
else:
return b1.reshape((-1, 1))
LinearForm2PyVector = LF2PyVec
def MfemVec2PyVec(rlf, ilf=None, horizontal=False):
b1 = rlf.GetDataArray().copy() # ; rlf.thisown = False
if ilf is not None:
b2 = ilf.GetDataArray() # ; ilf.thisown = False
else:
b2 = None
if MFEM_PAR:
b1 = ToHypreParVec(b1)
if b2 is not None:
b2 = ToHypreParVec(b2)
return CHypreVec(b1, b2, horizontal=horizontal)
else:
if b2 is not None:
b1 = b1 + 1j * b2
if horizontal:
return b1.reshape((1, -1))
else:
return b1.reshape((-1, 1))
def Array2PyVec(array, part=None, horizontal=False):
'''
convert array in rank = 0 to
distributed Hypre 1D Matrix (size = m x 1)
'''
if MFEM_PAR:
return Array2CHypreVec(array, part=part, horizontal=horizontal)
else:
if horizontal:
return array.reshape((1, -1))
else:
return array.reshape((-1, 1))
def BF2PyMat(rbf, ibf=None, finalize=False):
'''
Convert pair of BilinearForms to CHypreMat or
ScipySparsematrix
'''
if finalize:
rbf.Finalize()
if ibf is not None:
ibf.Finalize()
if MFEM_PAR:
M1 = rbf.ParallelAssemble()
M1.thisown = True
if ibf is not None:
M2 = ibf.ParallelAssemble()
M2.thisown = True
else:
M2 = None
return CHypreMat(M1, M2)
else:
from mfem.common.sparse_utils import sparsemat_to_scipycsr
M1 = rbf.SpMat()
if ibf is None:
return sparsemat_to_scipycsr(M1, dtype=float)
if ibf is not None:
M2 = ibf.SpMat()
m1 = sparsemat_to_scipycsr(M1, dtype=float).tolil()
m2 = sparsemat_to_scipycsr(M2, dtype=complex).tolil()
m = m1 + 1j * m2
m = m.tocsr()
return m
BilinearForm2PyMatix = BF2PyMat
def MfemMat2PyMat(M1, M2=None):
'''
Convert pair of SpMat/HypreParCSR to CHypreMat or
ScipySparsematrix. This is simpler version of BF2PyMat, only
difference is it skippes convertion from BF to Matrix.
'''
from mfem.common.sparse_utils import sparsemat_to_scipycsr
if MFEM_PAR:
return CHypreMat(M1, M2)
else:
if M2 is None:
return sparsemat_to_scipycsr(M1, dtype=float)
else:
m1 = sparsemat_to_scipycsr(M1, dtype=float).tolil()
m2 = sparsemat_to_scipycsr(M2, dtype=complex).tolil()
m = m1 + 1j * m2
m = m.tocsr()
return m
def EmptySquarePyMat(m, col_starts=None):
from scipy.sparse import csr_matrix
if MFEM_PAR:
if col_starts is None:
col_starts = get_assumed_patitioning(m)
rows = col_starts[1] - col_starts[0]
m1 = csr_matrix((rows, m))
return CHypreMat(m1, None, )
else:
from scipy.sparse import csr_matrix
return csr_matrix((m, m))
def IdentityPyMat(m, col_starts=None, diag=1.0):
from scipy.sparse import coo_matrix, lil_matrix
if MFEM_PAR:
if col_starts is None:
col_starts = get_assumed_patitioning(m)
rows = col_starts[1] - col_starts[0]
if np.iscomplexobj(diag):
real = diag.real
imag = diag.imag
else:
real = float(np.real(diag))
imag = 0.0
# if real != 0.0:
m1 = lil_matrix((rows, m))
for i in range(rows):
m1[i, i + col_starts[0]] = real
m1 = m1.tocsr()
if imag != 0.0:
m2 = lil_matrix((rows, m))
for i in range(rows):
m2[i, i + col_starts[0]] = imag
m2 = m2.tocsr()
else:
m2 = None
return CHypreMat(m1, m2, )
else:
m1 = coo_matrix((m, m))
m1.setdiag(np.zeros(m) + diag)
return m1.tocsr()
def HStackPyVec(vecs, col_starts=None):
'''
horizontally stack vertical vectors to generate
PyMat
'''
from scipy.sparse import csr_matrix
if MFEM_PAR:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rows = vecs[0].GetPartitioningArray()
if col_starts is None:
col_starts = get_assumed_patitioning(len(vecs))
isComplex = any([v.isComplex() for v in vecs])
mat = np.hstack([np.atleast_2d(v.toarray()).transpose() for v in vecs])
if isComplex:
m1 = csr_matrix(mat.real)
m2 = csr_matrix(mat.imag)
else:
m1 = csr_matrix(mat)
m2 = None
ret = CHypreMat(m1, m2, col_starts=col_starts)
return ret
else:
return csr_matrix(np.hstack(vecs))
def PyVec2PyMat(vec, col_starts=None):
from scipy.sparse import csr_matrix
if MFEM_PAR:
'''
vec must be vertical
'''
assert not vec._horizontal, "PyVec must be vertical"
from mpi4py import MPI
comm = MPI.COMM_WORLD
rows = vec.GetPartitioningArray()
if col_starts is None:
col_starts = get_assumed_patitioning(1)
isComplex = vec.isComplex()
mat = np.atleast_2d(vec.toarray()).transpose()
if isComplex:
m1 = csr_matrix(mat.real)
m2 = csr_matrix(mat.imag)
else:
m1 = csr_matrix(mat)
m2 = None
# print m1.shape
ret = CHypreMat(m1, m2, col_starts=col_starts)
# print "returning", ret,
return ret
else:
return csr_matrix(vec)
| [
"scipy.sparse.lil_matrix",
"numpy.hstack",
"mfem.common.sparse_utils.sparsemat_to_scipycsr",
"mpi4py.MPI.COMM_WORLD.allgather",
"numpy.any",
"numpy.count_nonzero",
"numpy.iscomplexobj",
"numpy.sum",
"numpy.array",
"mfem.common.mpi_dtype.get_mpi_datatype",
"numpy.empty",
"numpy.real",
"scipy.... | [((32235, 32258), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(lr, width)'], {}), '((lr, width))\n', (32245, 32258), False, 'from scipy.sparse import csr_matrix\n'), ((8105, 8127), 'numpy.iscomplexobj', 'np.iscomplexobj', (['value'], {}), '(value)\n', (8120, 8127), True, 'import numpy as np\n'), ((14653, 14693), 'scipy.sparse.coo_matrix', 'coo_matrix', (['self.shape'], {'dtype': 'data.dtype'}), '(self.shape, dtype=data.dtype)\n', (14663, 14693), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((15412, 15431), 'numpy.sum', 'np.sum', (['(data == 0.0)'], {}), '(data == 0.0)\n', (15418, 15431), True, 'import numpy as np\n'), ((28774, 28796), 'scipy.sparse.coo_matrix', 'coo_matrix', (['self.shape'], {}), '(self.shape)\n', (28784, 28796), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((29980, 29997), 'scipy.sparse.lil_matrix', 'lil_matrix', (['shape'], {}), '(shape)\n', (29990, 29997), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((32313, 32336), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(lr, width)'], {}), '((lr, width))\n', (32323, 32336), False, 'from scipy.sparse import csr_matrix\n'), ((32584, 32606), 'numpy.iscomplexobj', 'np.iscomplexobj', (['array'], {}), '(array)\n', (32599, 32606), True, 'import numpy as np\n'), ((37332, 37353), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(rows, m)'], {}), '((rows, m))\n', (37342, 37353), False, 'from scipy.sparse import csr_matrix\n'), ((37460, 37478), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(m, m)'], {}), '((m, m))\n', (37470, 37478), False, 'from scipy.sparse import csr_matrix\n'), ((37739, 37760), 'numpy.iscomplexobj', 'np.iscomplexobj', (['diag'], {}), '(diag)\n', (37754, 37760), True, 'import numpy as np\n'), ((37936, 37957), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(rows, m)'], {}), '((rows, m))\n', (37946, 37957), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((38324, 38342), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(m, m)'], {}), '((m, m))\n', (38334, 38342), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((40030, 40045), 'scipy.sparse.csr_matrix', 'csr_matrix', (['vec'], {}), '(vec)\n', (40040, 40045), False, 'from scipy.sparse import csr_matrix\n'), ((2626, 2648), 'numpy.iscomplexobj', 'np.iscomplexobj', (['other'], {}), '(other)\n', (2641, 2648), True, 'import numpy as np\n'), ((4098, 4120), 'numpy.iscomplexobj', 'np.iscomplexobj', (['other'], {}), '(other)\n', (4113, 4120), True, 'import numpy as np\n'), ((7496, 7508), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7504, 7508), True, 'import numpy as np\n'), ((7931, 7953), 'numpy.iscomplexobj', 'np.iscomplexobj', (['value'], {}), '(value)\n', (7946, 7953), True, 'import numpy as np\n'), ((13197, 13219), 'mfem.common.mpi_dtype.get_mpi_datatype', 'get_mpi_datatype', (['data'], {}), '(data)\n', (13213, 13219), False, 'from mfem.common.mpi_dtype import get_mpi_datatype\n'), ((13573, 13609), 'numpy.empty', 'np.empty', (['[length]'], {'dtype': 'data.dtype'}), '([length], dtype=data.dtype)\n', (13581, 13609), True, 'import numpy as np\n'), ((13751, 13796), 'mpi4py.MPI.COMM_WORLD.Allgatherv', 'MPI.COMM_WORLD.Allgatherv', (['senddata', 'recvdata'], {}), '(senddata, recvdata)\n', (13776, 13796), False, 'from mpi4py import MPI\n'), ((36106, 36144), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M1'], {'dtype': 'float'}), '(M1, dtype=float)\n', (36127, 36144), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((36831, 36869), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M1'], {'dtype': 'float'}), '(M1, dtype=float)\n', (36852, 36869), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((38098, 38119), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(rows, m)'], {}), '((rows, m))\n', (38108, 38119), False, 'from scipy.sparse import coo_matrix, lil_matrix\n'), ((38958, 38978), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat.real'], {}), '(mat.real)\n', (38968, 38978), False, 'from scipy.sparse import csr_matrix\n'), ((38996, 39016), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat.imag'], {}), '(mat.imag)\n', (39006, 39016), False, 'from scipy.sparse import csr_matrix\n'), ((39048, 39063), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat'], {}), '(mat)\n', (39058, 39063), False, 'from scipy.sparse import csr_matrix\n'), ((39196, 39211), 'numpy.hstack', 'np.hstack', (['vecs'], {}), '(vecs)\n', (39205, 39211), True, 'import numpy as np\n'), ((39744, 39764), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat.real'], {}), '(mat.real)\n', (39754, 39764), False, 'from scipy.sparse import csr_matrix\n'), ((39782, 39802), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat.imag'], {}), '(mat.imag)\n', (39792, 39802), False, 'from scipy.sparse import csr_matrix\n'), ((39834, 39849), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat'], {}), '(mat)\n', (39844, 39849), False, 'from scipy.sparse import csr_matrix\n'), ((4673, 4693), 'numpy.count_nonzero', 'np.count_nonzero', (['ii'], {}), '(ii)\n', (4689, 4693), True, 'import numpy as np\n'), ((13326, 13359), 'mpi4py.MPI.COMM_WORLD.allgather', 'MPI.COMM_WORLD.allgather', (['rcounts'], {}), '(rcounts)\n', (13350, 13359), False, 'from mpi4py import MPI\n'), ((30874, 30899), 'numpy.array', 'np.array', (['tdof'], {'dtype': 'int'}), '(tdof, dtype=int)\n', (30882, 30899), True, 'import numpy as np\n'), ((37859, 37872), 'numpy.real', 'np.real', (['diag'], {}), '(diag)\n', (37866, 37872), True, 'import numpy as np\n'), ((38362, 38373), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (38370, 38373), True, 'import numpy as np\n'), ((13468, 13486), 'numpy.cumsum', 'np.cumsum', (['rcounts'], {}), '(rcounts)\n', (13477, 13486), True, 'import numpy as np\n'), ((17241, 17263), 'numpy.iscomplexobj', 'np.iscomplexobj', (['other'], {}), '(other)\n', (17256, 17263), True, 'import numpy as np\n'), ((23220, 23234), 'numpy.real', 'np.real', (['value'], {}), '(value)\n', (23227, 23234), True, 'import numpy as np\n'), ((23325, 23339), 'numpy.imag', 'np.imag', (['value'], {}), '(value)\n', (23332, 23339), True, 'import numpy as np\n'), ((36219, 36257), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M1'], {'dtype': 'float'}), '(M1, dtype=float)\n', (36240, 36257), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((36283, 36323), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M2'], {'dtype': 'complex'}), '(M2, dtype=complex)\n', (36304, 36323), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((36901, 36939), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M1'], {'dtype': 'float'}), '(M1, dtype=float)\n', (36922, 36939), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((36965, 37005), 'mfem.common.sparse_utils.sparsemat_to_scipycsr', 'sparsemat_to_scipycsr', (['M2'], {'dtype': 'complex'}), '(M2, dtype=complex)\n', (36986, 37005), False, 'from mfem.common.sparse_utils import sparsemat_to_scipycsr\n'), ((3101, 3117), 'numpy.any', 'np.any', (['(i != 0.0)'], {}), '(i != 0.0)\n', (3107, 3117), True, 'import numpy as np\n'), ((3209, 3225), 'numpy.any', 'np.any', (['(r != 0.0)'], {}), '(r != 0.0)\n', (3215, 3225), True, 'import numpy as np\n'), ((3419, 3435), 'numpy.any', 'np.any', (['(i != 0.0)'], {}), '(i != 0.0)\n', (3425, 3435), True, 'import numpy as np\n'), ((3528, 3544), 'numpy.any', 'np.any', (['(r != 0.0)'], {}), '(r != 0.0)\n', (3534, 3544), True, 'import numpy as np\n')] |
import imageio
import copy as cp
import numpy as np
import cv2
from PIL import Image
'''
###############
addBoundary(img, kernel)
convolve1(img, kernel, filter_type, mode='same')
convolve(img, kernel, filter_type, mode='same')
wise_element_sum(img, kernel, filter_type)
上面四个函数用于构建高斯滤波器,与我写的第二章节的作业中的滤波器一样(我是先做第二章作业再做第一章的)
'''
def addBoundary(img, kernel):
'''
给图像添加边界
:param img: 输入图像
:param kernel:卷积核
:return: 加边界后的图像
'''
kernel_size = kernel.shape[0]
addLine = (int)((kernel_size - 1) / 2)
img_ = cv2.copyMakeBorder(img, addLine, addLine, addLine, addLine, cv2.BORDER_CONSTANT, value=0);
return img_
def convolve1(img, kernel, filter_type, mode='same'):
'''
单通道图像与卷积核的卷积,主要用于灰度图
:param img: 输入单通道图像矩阵
:param kernel: 卷积核
:param model: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像
'''
if mode == 'same':
img_ = addBoundary(img, kernel)
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
# 横向卷积、纵向卷积的次数
conv_height = img_.shape[0] - kernel_height + 1
conv_width = img_.shape[1] - kernel_width + 1
# 卷积结果存储在conv中
conv = np.zeros((conv_height, conv_width), dtype='uint8')
for i in range(conv_height):
for j in range(conv_width):
conv[i][j] = wise_element_sum(img_[i:i + kernel_height, j:j + kernel_width], kernel, filter_type)
return conv
def convolve(img, kernel, filter_type, mode='same'):
'''
三通道卷积,主要用于彩色图
:param img: 输入图像矩阵
:param kernel: 卷积核
:param mode: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像矩阵
'''
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
conv_B = convolve1(img[:, :, 0], kernel, filter_type, mode)
conv_G = convolve1(img[:, :, 1], kernel, filter_type, mode)
conv_R = convolve1(img[:, :, 2], kernel, filter_type, mode)
conv_img = np.dstack([conv_B, conv_G, conv_R])
return conv_img
def wise_element_sum(img, kernel, filter_type):
'''
对于某一次卷积结果的取值
:param img: 输入的图片片段矩阵
:param kernel: 卷积核
:param modle: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 返回该像素值
'''
if filter_type == 'medium_Filter':
temp = img * kernel
list = []
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
list.append(temp[i][j])
list.sort()
if list[int(len(list) / 2)] > 255:
return 255
elif list[int(len(list) / 2)] < 0:
return 0
else:
return list[int(len(list) / 2)]
# 均值、高斯滤波等
else:
result = (img * kernel).sum()
if result < 0:
return 0
elif result > 255:
return 255
else:
return result
def Gauss_Fileter(img, kernel_size, sigma):
'''
高斯滤波器
:param img: 输入图像
:param kernel_size: 卷积核大小
:param sigma: 高斯函数的标准差
:return: 高斯滤波后的图片
'''
# 避免除0
if sigma == 0:
sigma = 6
kernel = np.zeros([kernel_size, kernel_size])
kernel_center = kernel_size / 2 # 卷积核中心位置
sum_val = 0 # 记录卷积核中数字之和
for i in range(0, kernel_size):
for j in range(0, kernel_size):
kernel[i, j] = np.exp((-(i - kernel_center) ** 2 + (j - kernel_center) ** 2) / (2 * (sigma ** 2)))
sum_val += kernel[i, j]
# 得到卷积核
kernel = kernel / sum_val
img_out = convolve(img, kernel, filter_type='Gauss_Fileter', mode='same')
# img_out = scipy.signal.convolve2d(img, kernel, mode='same', boundary='symm')
# 返回图片
return img_out
def white_balance(img):
'''
原始灰度世界算法
:param img: cv2.imread读取的图片数据
:return: 返回的白平衡结果图片数据
'''
B, G, R = np.double(img[:, :, 0]), np.double(img[:, :, 1]), np.double(img[:, :, 2])
B_ave, G_ave, R_ave = np.mean(B), np.mean(G), np.mean(R)
K = (B_ave + G_ave + R_ave) / 3
Kb = K / B_ave
Kg = K / G_ave
Kr = K / R_ave
Bnew = B * Kb
Gnew = G * Kg
Rnew = R * Kr
for i in range(len(Bnew)):
for j in range(len(Bnew[0])):
Bnew[i][j] = 255 if Bnew[i][j] > 255 else Bnew[i][j]
Gnew[i][j] = 255 if Gnew[i][j] > 255 else Gnew[i][j]
Rnew[i][j] = 255 if Rnew[i][j] > 255 else Rnew[i][j]
# print(np.mean(Ba), np.mean(Ga), np.mean(Ra))
dst_img = np.uint8(np.zeros_like(img))
dst_img[:, :, 0] = Bnew
dst_img[:, :, 1] = Gnew
dst_img[:, :, 2] = Rnew
return dst_img
def deMosaic(raw_image):
'''
对图片插值,转为RGB图片
:param raw_image: 输入单通道的图片
:return: RGB图
'''
H = raw_image.shape[0]
W = raw_image.shape[1]
R = raw_image
r_image = cp.deepcopy(R)
g_image = cp.deepcopy(R)
b_image = cp.deepcopy(R)
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
r_image[i + 1][j] = raw_image[i][j]
r_image[i + 1][j + 1] = raw_image[i][j]
r_image[i][j + 1] = raw_image[i][j]
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2
g_image[i][j] = temp
g_image[i + 1][j + 1] = temp
g_image[i + 1][j] = temp
g_image[i][j + 1] = temp
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
b_image[i + 1][j] = raw_image[i + 1][j + 1]
b_image[i][j] = raw_image[i + 1][j + 1]
b_image[i][j + 1] = raw_image[i + 1][j + 1]
rgb_image = cv2.merge([b_image, g_image, r_image])
return rgb_image
def deMosaic1(raw_image):
'''
对图片插值,转为RGB图片,主要与deMosaic()函数的效果作对比
:param raw_image: 输入单通道的图片
:return: RGB图
'''
H = raw_image.shape[0]
W = raw_image.shape[1]
R = raw_image
r_image = cp.deepcopy(R)
g_image = cp.deepcopy(R)
b_image = cp.deepcopy(R)
for i in range(1, H - 1, 3):
for j in range(1, W - 1, 3):
temp = (raw_image[i-1][j-1]+raw_image[i+1][j-1]+raw_image[i-1][j+1]+raw_image[i+1][j+1])/4
r_image[i - 1][j] = temp
r_image[i+1][j] = raw_image[i][j]
#r_image[i][j + 1] = raw_image[i][j]
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
#temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2
g_image[i][j] = raw_image[i][j]
g_image[i + 1][j + 1] = raw_image[i][j]
g_image[i + 1][j] = raw_image[i][j]
g_image[i][j + 1] = raw_image[i][j]
for i in range(0, H - 1, 2):
for j in range(1, W - 1, 2):
b_image[i][j-1] = raw_image[i][j]
b_image[i][j+1] = raw_image[i][j]
#b_image[i][j + 1] = raw_image[i + 1][j + 1]
rgb_image = cv2.merge([b_image, g_image, r_image])
return rgb_image
def deMosaic2(raw_image):
'''
对图片插值,转为RGB图片,主要与deMosaic()函数的效果作对比
:param raw_image: 输入单通道的图片
:return: RGB图
'''
H = raw_image.shape[0]
W = raw_image.shape[1]
R = raw_image
r_image = cp.deepcopy(R)
g_image = cp.deepcopy(R)
b_image = cp.deepcopy(R)
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
r_image[i,j]=raw_image[i][j]
r_image[i + 1][j] = raw_image[i][j]/2
r_image[i + 1][j + 1] = raw_image[i][j]/2
r_image[i][j + 1] = raw_image[i][j]/2
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2
g_image[i][j] = temp
g_image[i + 1][j + 1] = temp/2
g_image[i + 1][j] = temp/2
g_image[i][j + 1] = temp/2
for i in range(0, H - 1, 2):
for j in range(0, W - 1, 2):
r_image[i][j] = raw_image[i+1][j+1]
b_image[i + 1][j] = raw_image[i + 1][j + 1]/2
b_image[i][j] = raw_image[i + 1][j + 1]/2
b_image[i][j + 1] = raw_image[i + 1][j + 1]/2
rgb_image = cv2.merge([b_image, g_image, r_image])
return rgb_image
def gamma_correct(img,gamma):
'''
用于gamma校正
:param img: 输入RGB图
:return: gamma校正后的图
'''
img = np.power(img / 255.0, gamma)
img = img * 255
return img
def main():
Image.MAX_IMAGE_PIXELS = None
img = cv2.imread("raw-data-BayerpatternEncodedImage.tif", 1).astype(np.float)
single_img = img[:, :, 0]
imageio.imsave('单通道图片.jpg', single_img)
#组合一
deMosaic_img = deMosaic(single_img)
imageio.imsave('RGB图片1.jpg', deMosaic_img)
balance_img = white_balance(deMosaic_img)
imageio.imsave('白平衡1.jpg', balance_img)
gamma_img = gamma_correct(balance_img, 1.2)
imageio.imsave('gamma校正1.jpg', gamma_img)
Filter_img = Gauss_Fileter(gamma_img, 5, 25)
imageio.imsave('高斯滤波1-sigma=25.jpg', Filter_img)
'''
#组合二
deMosaic_img = deMosaic(single_img)
imageio.imsave('RGB图片2.jpg', deMosaic_img)
Filter_img = Gauss_Fileter( deMosaic_img, 5, 25)
imageio.imsave('高斯滤波2-sigma=25.jpg', Filter_img)
balance_img = white_balance(Filter_img)
imageio.imsave('白平衡2.jpg', balance_img)
gamma_img = gamma_correct(balance_img, 1.2)
imageio.imsave('gamma校正2.jpg', gamma_img)
'''
'''
#组合1各步骤不同参数对图片处理的效果
deMosaic_img = deMosaic(single_img)
imageio.imsave('RGB图片0.jpg', deMosaic_img)
deMosaic_img1 = deMosaic1(single_img)
imageio.imsave('RGB图片1.jpg', deMosaic_img1)
deMosaic_img2 = deMosaic2(single_img)
imageio.imsave('RGB图片2.jpg', deMosaic_img2)
balance_img = white_balance(deMosaic_img)
imageio.imsave('白平衡3.jpg', balance_img)
balance_img1 = white_balance(deMosaic_img1)
imageio.imsave('白平衡4.jpg', balance_img1)
balance_img2 = white_balance(deMosaic_img2)
imageio.imsave('白平衡5.jpg', balance_img2)
gamma_img = gamma_correct(balance_img, 1.2)
imageio.imsave('gamma校正.jpg', gamma_img)
gamma_img1 = gamma_correct(balance_img, 2)
imageio.imsave('gamma校正1.jpg', gamma_img1)
gamma_img2 = gamma_correct(balance_img, 4)
imageio.imsave('gamma校正2.jpg', gamma_img2)
gamma_img3 = gamma_correct(balance_img, 0.8)
imageio.imsave('gamma校正3.jpg', gamma_img3)
gamma_img4 = gamma_correct(balance_img, 0.5)
imageio.imsave('gamma校正4.jpg', gamma_img4)
gamma_img5 = gamma_correct(balance_img, 0.1)
imageio.imsave('gamma校正5.jpg', gamma_img5)
Filter_img = Gauss_Fileter(gamma_img, 5, 15)
imageio.imsave('高斯滤波-sigma=15.jpg', Filter_img)
Filter_img1 = Gauss_Fileter(gamma_img, 5, 25)
imageio.imsave('高斯滤波-sigma=25.jpg', Filter_img1)
Filter_img3 = Gauss_Fileter(gamma_img, 5, 5)
imageio.imsave('高斯滤波-sigma=5.jpg', Filter_img3)
'''
if __name__ == '__main__':
main()
| [
"numpy.mat",
"numpy.dstack",
"cv2.merge",
"numpy.mean",
"numpy.double",
"imageio.imsave",
"numpy.power",
"cv2.copyMakeBorder",
"numpy.exp",
"numpy.zeros",
"copy.deepcopy",
"numpy.zeros_like",
"cv2.imread"
] | [((562, 656), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'addLine', 'addLine', 'addLine', 'addLine', 'cv2.BORDER_CONSTANT'], {'value': '(0)'}), '(img, addLine, addLine, addLine, addLine, cv2.\n BORDER_CONSTANT, value=0)\n', (580, 656), False, 'import cv2\n'), ((1187, 1237), 'numpy.zeros', 'np.zeros', (['(conv_height, conv_width)'], {'dtype': '"""uint8"""'}), "((conv_height, conv_width), dtype='uint8')\n", (1195, 1237), True, 'import numpy as np\n'), ((1666, 1686), 'numpy.mat', 'np.mat', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (1672, 1686), True, 'import numpy as np\n'), ((1695, 1715), 'numpy.mat', 'np.mat', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (1701, 1715), True, 'import numpy as np\n'), ((1724, 1744), 'numpy.mat', 'np.mat', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (1730, 1744), True, 'import numpy as np\n'), ((1953, 1988), 'numpy.dstack', 'np.dstack', (['[conv_B, conv_G, conv_R]'], {}), '([conv_B, conv_G, conv_R])\n', (1962, 1988), True, 'import numpy as np\n'), ((3072, 3108), 'numpy.zeros', 'np.zeros', (['[kernel_size, kernel_size]'], {}), '([kernel_size, kernel_size])\n', (3080, 3108), True, 'import numpy as np\n'), ((4717, 4731), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (4728, 4731), True, 'import copy as cp\n'), ((4746, 4760), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (4757, 4760), True, 'import copy as cp\n'), ((4775, 4789), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (4786, 4789), True, 'import copy as cp\n'), ((5549, 5587), 'cv2.merge', 'cv2.merge', (['[b_image, g_image, r_image]'], {}), '([b_image, g_image, r_image])\n', (5558, 5587), False, 'import cv2\n'), ((5829, 5843), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (5840, 5843), True, 'import copy as cp\n'), ((5858, 5872), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (5869, 5872), True, 'import copy as cp\n'), ((5887, 5901), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (5898, 5901), True, 'import copy as cp\n'), ((6778, 6816), 'cv2.merge', 'cv2.merge', (['[b_image, g_image, r_image]'], {}), '([b_image, g_image, r_image])\n', (6787, 6816), False, 'import cv2\n'), ((7057, 7071), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (7068, 7071), True, 'import copy as cp\n'), ((7086, 7100), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (7097, 7100), True, 'import copy as cp\n'), ((7115, 7129), 'copy.deepcopy', 'cp.deepcopy', (['R'], {}), '(R)\n', (7126, 7129), True, 'import copy as cp\n'), ((7997, 8035), 'cv2.merge', 'cv2.merge', (['[b_image, g_image, r_image]'], {}), '([b_image, g_image, r_image])\n', (8006, 8035), False, 'import cv2\n'), ((8176, 8204), 'numpy.power', 'np.power', (['(img / 255.0)', 'gamma'], {}), '(img / 255.0, gamma)\n', (8184, 8204), True, 'import numpy as np\n'), ((8406, 8445), 'imageio.imsave', 'imageio.imsave', (['"""单通道图片.jpg"""', 'single_img'], {}), "('单通道图片.jpg', single_img)\n", (8420, 8445), False, 'import imageio\n'), ((8500, 8542), 'imageio.imsave', 'imageio.imsave', (['"""RGB图片1.jpg"""', 'deMosaic_img'], {}), "('RGB图片1.jpg', deMosaic_img)\n", (8514, 8542), False, 'import imageio\n'), ((8593, 8632), 'imageio.imsave', 'imageio.imsave', (['"""白平衡1.jpg"""', 'balance_img'], {}), "('白平衡1.jpg', balance_img)\n", (8607, 8632), False, 'import imageio\n'), ((8685, 8726), 'imageio.imsave', 'imageio.imsave', (['"""gamma校正1.jpg"""', 'gamma_img'], {}), "('gamma校正1.jpg', gamma_img)\n", (8699, 8726), False, 'import imageio\n'), ((8780, 8828), 'imageio.imsave', 'imageio.imsave', (['"""高斯滤波1-sigma=25.jpg"""', 'Filter_img'], {}), "('高斯滤波1-sigma=25.jpg', Filter_img)\n", (8794, 8828), False, 'import imageio\n'), ((3776, 3799), 'numpy.double', 'np.double', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (3785, 3799), True, 'import numpy as np\n'), ((3801, 3824), 'numpy.double', 'np.double', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (3810, 3824), True, 'import numpy as np\n'), ((3826, 3849), 'numpy.double', 'np.double', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (3835, 3849), True, 'import numpy as np\n'), ((3876, 3886), 'numpy.mean', 'np.mean', (['B'], {}), '(B)\n', (3883, 3886), True, 'import numpy as np\n'), ((3888, 3898), 'numpy.mean', 'np.mean', (['G'], {}), '(G)\n', (3895, 3898), True, 'import numpy as np\n'), ((3900, 3910), 'numpy.mean', 'np.mean', (['R'], {}), '(R)\n', (3907, 3910), True, 'import numpy as np\n'), ((4399, 4417), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (4412, 4417), True, 'import numpy as np\n'), ((3289, 3374), 'numpy.exp', 'np.exp', (['((-(i - kernel_center) ** 2 + (j - kernel_center) ** 2) / (2 * sigma ** 2))'], {}), '((-(i - kernel_center) ** 2 + (j - kernel_center) ** 2) / (2 * sigma **\n 2))\n', (3295, 3374), True, 'import numpy as np\n'), ((8299, 8353), 'cv2.imread', 'cv2.imread', (['"""raw-data-BayerpatternEncodedImage.tif"""', '(1)'], {}), "('raw-data-BayerpatternEncodedImage.tif', 1)\n", (8309, 8353), False, 'import cv2\n')] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
import logging
import unittest
from collections import OrderedDict
from copy import deepcopy
import mxnet as mx
import numpy as np
from hypothesis import given, strategies as st
from hypothesis.extra import numpy as hpnp
from mxnet import nd
from data.Batch import Batch, ClosedVocabInput, CharCNNInput, GSCVocabInput
from experiments.utils import PaddedArray
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class TestTask(unittest.TestCase):
@given(input=st.recursive(st.builds(lambda x: nd.array(x, ctx=mx.cpu(0)),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())), st.lists))
def test_recursive_move_to_context_moves_all_elements(self, input):
input = [input]
self.assertNotIn('cpu(1)', str(input)) # Super hacky test...
Batch.recurse_move_to_context(input, mx.cpu(1))
self.assertNotIn('cpu(0)', str(input)) # Super hacky test...
class TestClosedVocabInput(unittest.TestCase):
@given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
dict_class=OrderedDict, min_size=1),
node_types=st.builds(lambda v, l: PaddedArray(v, l),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),
node_names=st.builds(lambda v, l: PaddedArray(v, l),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),
batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))
def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):
inp = ClosedVocabInput(edges, node_types, node_names, batch_sizes, mx.cpu())
originp = deepcopy(inp)
inp.repack(*inp.unpack())
inp.batch_sizes = inp.batch_sizes
self.assertEqual(inp.edges.keys(), originp.edges.keys())
for k in inp.edges.keys():
np.testing.assert_equal(inp.edges[k], originp.edges[k])
np.testing.assert_equal(inp.node_names.values, originp.node_names.values)
np.testing.assert_equal(inp.node_names.value_lengths, originp.node_names.value_lengths)
np.testing.assert_equal(inp.node_types.values, originp.node_types.values)
np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)
np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)
class TestCharCNNInput(unittest.TestCase):
@given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
dict_class=OrderedDict, min_size=1),
node_types=st.builds(lambda v, l: PaddedArray(v, l),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),
node_names=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))
def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):
inp = CharCNNInput(edges, node_types, node_names, batch_sizes, mx.cpu())
originp = deepcopy(inp)
inp.repack(*inp.unpack())
inp.batch_sizes = inp.batch_sizes
self.assertEqual(inp.edges.keys(), originp.edges.keys())
for k in inp.edges.keys():
np.testing.assert_equal(inp.edges[k], originp.edges[k])
np.testing.assert_equal(inp.node_names, originp.node_names)
np.testing.assert_equal(inp.node_types.values, originp.node_types.values)
np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)
np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)
class TestGSCVocabInput(unittest.TestCase):
@given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
dict_class=OrderedDict, min_size=1),
node_types=st.builds(lambda v, l: PaddedArray(v, l),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),
node_names=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),
batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))
def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):
inp = GSCVocabInput(edges, node_types, node_names, batch_sizes, mx.cpu())
originp = deepcopy(inp)
inp.repack(*inp.unpack())
inp.batch_sizes = inp.batch_sizes
self.assertEqual(inp.edges.keys(), originp.edges.keys())
for k in inp.edges.keys():
np.testing.assert_equal(inp.edges[k], originp.edges[k])
np.testing.assert_equal(inp.node_names, originp.node_names)
np.testing.assert_equal(inp.node_types.values, originp.node_types.values)
np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)
np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)
| [
"logging.basicConfig",
"logging.getLogger",
"numpy.testing.assert_equal",
"experiments.utils.PaddedArray",
"mxnet.cpu",
"hypothesis.strategies.characters",
"copy.deepcopy",
"numpy.dtype",
"hypothesis.extra.numpy.array_shapes"
] | [((437, 476), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (456, 476), False, 'import logging\n'), ((486, 505), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (503, 505), False, 'import logging\n'), ((2082, 2095), 'copy.deepcopy', 'deepcopy', (['inp'], {}), '(inp)\n', (2090, 2095), False, 'from copy import deepcopy\n'), ((2348, 2421), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_names.values', 'originp.node_names.values'], {}), '(inp.node_names.values, originp.node_names.values)\n', (2371, 2421), True, 'import numpy as np\n'), ((2430, 2522), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_names.value_lengths', 'originp.node_names.value_lengths'], {}), '(inp.node_names.value_lengths, originp.node_names.\n value_lengths)\n', (2453, 2522), True, 'import numpy as np\n'), ((2526, 2599), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.values', 'originp.node_types.values'], {}), '(inp.node_types.values, originp.node_types.values)\n', (2549, 2599), True, 'import numpy as np\n'), ((2608, 2700), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.value_lengths', 'originp.node_types.value_lengths'], {}), '(inp.node_types.value_lengths, originp.node_types.\n value_lengths)\n', (2631, 2700), True, 'import numpy as np\n'), ((2704, 2765), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.batch_sizes', 'originp.batch_sizes'], {}), '(inp.batch_sizes, originp.batch_sizes)\n', (2727, 2765), True, 'import numpy as np\n'), ((3634, 3647), 'copy.deepcopy', 'deepcopy', (['inp'], {}), '(inp)\n', (3642, 3647), False, 'from copy import deepcopy\n'), ((3900, 3959), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_names', 'originp.node_names'], {}), '(inp.node_names, originp.node_names)\n', (3923, 3959), True, 'import numpy as np\n'), ((3968, 4041), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.values', 'originp.node_types.values'], {}), '(inp.node_types.values, originp.node_types.values)\n', (3991, 4041), True, 'import numpy as np\n'), ((4050, 4142), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.value_lengths', 'originp.node_types.value_lengths'], {}), '(inp.node_types.value_lengths, originp.node_types.\n value_lengths)\n', (4073, 4142), True, 'import numpy as np\n'), ((4146, 4207), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.batch_sizes', 'originp.batch_sizes'], {}), '(inp.batch_sizes, originp.batch_sizes)\n', (4169, 4207), True, 'import numpy as np\n'), ((5078, 5091), 'copy.deepcopy', 'deepcopy', (['inp'], {}), '(inp)\n', (5086, 5091), False, 'from copy import deepcopy\n'), ((5344, 5403), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_names', 'originp.node_names'], {}), '(inp.node_names, originp.node_names)\n', (5367, 5403), True, 'import numpy as np\n'), ((5412, 5485), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.values', 'originp.node_types.values'], {}), '(inp.node_types.values, originp.node_types.values)\n', (5435, 5485), True, 'import numpy as np\n'), ((5494, 5586), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.node_types.value_lengths', 'originp.node_types.value_lengths'], {}), '(inp.node_types.value_lengths, originp.node_types.\n value_lengths)\n', (5517, 5586), True, 'import numpy as np\n'), ((5590, 5651), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.batch_sizes', 'originp.batch_sizes'], {}), '(inp.batch_sizes, originp.batch_sizes)\n', (5613, 5651), True, 'import numpy as np\n'), ((951, 960), 'mxnet.cpu', 'mx.cpu', (['(1)'], {}), '(1)\n', (957, 960), True, 'import mxnet as mx\n'), ((2054, 2062), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2060, 2062), True, 'import mxnet as mx\n'), ((2284, 2339), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.edges[k]', 'originp.edges[k]'], {}), '(inp.edges[k], originp.edges[k])\n', (2307, 2339), True, 'import numpy as np\n'), ((3606, 3614), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (3612, 3614), True, 'import mxnet as mx\n'), ((3836, 3891), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.edges[k]', 'originp.edges[k]'], {}), '(inp.edges[k], originp.edges[k])\n', (3859, 3891), True, 'import numpy as np\n'), ((5050, 5058), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (5056, 5058), True, 'import mxnet as mx\n'), ((5280, 5335), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['inp.edges[k]', 'originp.edges[k]'], {}), '(inp.edges[k], originp.edges[k])\n', (5303, 5335), True, 'import numpy as np\n'), ((1114, 1129), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (1127, 1129), True, 'from hypothesis import given, strategies as st\n'), ((2844, 2859), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (2857, 2859), True, 'from hypothesis import given, strategies as st\n'), ((4287, 4302), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (4300, 4302), True, 'from hypothesis import given, strategies as st\n'), ((1313, 1330), 'experiments.utils.PaddedArray', 'PaddedArray', (['v', 'l'], {}), '(v, l)\n', (1324, 1330), False, 'from experiments.utils import PaddedArray\n'), ((1576, 1593), 'experiments.utils.PaddedArray', 'PaddedArray', (['v', 'l'], {}), '(v, l)\n', (1587, 1593), False, 'from experiments.utils import PaddedArray\n'), ((1835, 1854), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1843, 1854), True, 'import numpy as np\n'), ((1862, 1881), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1879, 1881), True, 'from hypothesis.extra import numpy as hpnp\n'), ((3043, 3060), 'experiments.utils.PaddedArray', 'PaddedArray', (['v', 'l'], {}), '(v, l)\n', (3054, 3060), False, 'from experiments.utils import PaddedArray\n'), ((3301, 3320), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3309, 3320), True, 'import numpy as np\n'), ((3328, 3347), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (3345, 3347), True, 'from hypothesis.extra import numpy as hpnp\n'), ((3391, 3410), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3399, 3410), True, 'import numpy as np\n'), ((3418, 3437), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (3435, 3437), True, 'from hypothesis.extra import numpy as hpnp\n'), ((4486, 4503), 'experiments.utils.PaddedArray', 'PaddedArray', (['v', 'l'], {}), '(v, l)\n', (4497, 4503), False, 'from experiments.utils import PaddedArray\n'), ((4744, 4763), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4752, 4763), True, 'import numpy as np\n'), ((4771, 4790), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (4788, 4790), True, 'from hypothesis.extra import numpy as hpnp\n'), ((4834, 4853), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4842, 4853), True, 'import numpy as np\n'), ((4861, 4880), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (4878, 4880), True, 'from hypothesis.extra import numpy as hpnp\n'), ((1149, 1168), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1157, 1168), True, 'import numpy as np\n'), ((1176, 1195), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1193, 1195), True, 'from hypothesis.extra import numpy as hpnp\n'), ((1382, 1401), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1390, 1401), True, 'import numpy as np\n'), ((1409, 1428), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1426, 1428), True, 'from hypothesis.extra import numpy as hpnp\n'), ((1481, 1500), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1489, 1500), True, 'import numpy as np\n'), ((1508, 1527), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1525, 1527), True, 'from hypothesis.extra import numpy as hpnp\n'), ((1645, 1664), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1653, 1664), True, 'import numpy as np\n'), ((1672, 1691), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1689, 1691), True, 'from hypothesis.extra import numpy as hpnp\n'), ((1744, 1763), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1752, 1763), True, 'import numpy as np\n'), ((1771, 1790), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (1788, 1790), True, 'from hypothesis.extra import numpy as hpnp\n'), ((2879, 2898), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (2887, 2898), True, 'import numpy as np\n'), ((2906, 2925), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (2923, 2925), True, 'from hypothesis.extra import numpy as hpnp\n'), ((3112, 3131), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3120, 3131), True, 'import numpy as np\n'), ((3139, 3158), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (3156, 3158), True, 'from hypothesis.extra import numpy as hpnp\n'), ((3211, 3230), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3219, 3230), True, 'import numpy as np\n'), ((3238, 3257), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (3255, 3257), True, 'from hypothesis.extra import numpy as hpnp\n'), ((4322, 4341), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4330, 4341), True, 'import numpy as np\n'), ((4349, 4368), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (4366, 4368), True, 'from hypothesis.extra import numpy as hpnp\n'), ((4555, 4574), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4563, 4574), True, 'import numpy as np\n'), ((4582, 4601), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (4599, 4601), True, 'from hypothesis.extra import numpy as hpnp\n'), ((4654, 4673), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4662, 4673), True, 'import numpy as np\n'), ((4681, 4700), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (4698, 4700), True, 'from hypothesis.extra import numpy as hpnp\n'), ((679, 698), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (687, 698), True, 'import numpy as np\n'), ((706, 725), 'hypothesis.extra.numpy.array_shapes', 'hpnp.array_shapes', ([], {}), '()\n', (723, 725), True, 'from hypothesis.extra import numpy as hpnp\n'), ((609, 618), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (615, 618), True, 'import mxnet as mx\n')] |
from itertools import product
import numpy as np
import pandas as pd
import torch
from trains import Task
from models.detection.SSD.priorbox_optimization import PriorOptimizationInput, ImageSizeTuple
from models.detection.SSD.priorbox_optimization.bbox_clustering import get_box_pairwise_iou
def collect_ground_truth_stats(ground_truth_loader):
def just_meta_iter(loader):
for gt in loader:
yield gt[-1]
gt = list(just_meta_iter(ground_truth_loader))
gt_df = get_gt_df_from_gt(gt)
return gt_df
def get_gt_df_from_gt(gt):
# removing all "crowd" labels
def process_meta_element(element):
boxes = element['boxes']
iscrowd = element['iscrowd']
labels = element['labels']
orig_boxes = [box for box, crowd in zip(boxes, iscrowd) if not crowd]
orig_labels = [label for label, crowd in zip(labels, iscrowd) if not crowd]
orig_boxes = np.around(orig_boxes)
width = np.around(orig_boxes[:, 2] - orig_boxes[:, 0])
height = np.around(orig_boxes[:, 3] - orig_boxes[:, 1])
area = width * height
good_boxes = np.where(area > 0)[0]
if len(good_boxes) != len(orig_boxes):
boxes = orig_boxes[good_boxes]
labels = np.array(orig_labels)[good_boxes].tolist()
height = height[good_boxes]
width = width[good_boxes]
else:
boxes = orig_boxes
labels = orig_labels
pairwise_iou = get_box_pairwise_iou(boxes)
score = np.around(pairwise_iou.sum(axis=0) - 1, decimals=2)
return [(w, h, label, q) for w, h, label, q in zip(width, height, labels, score)]
processed_gt = [process_meta_element(el) for elem in gt for el in elem if len(el['boxes']) > 0]
all_gt = [elem for elements in processed_gt for elem in elements]
column_names = ['width', 'height', 'label', 'overlap_score']
return pd.DataFrame(all_gt, columns=column_names)
def get_optimization_input(ground_truth_df, fmap_sizes, input_priors, image_size):
def fmap_to_pixel_fov(fmap_sizes):
# fm = [np.array([fmap, fmap]) for fmap in fmap_sizes]
# fm_np = np.vstack(fm)
# fm_in_pixels = np.array(image_size) / fm_np
fm_in_pixels = np.array(image_size) * \
np.array([3/fmap_sizes[-7], 3/fmap_sizes[-6], 3/(fmap_sizes[-5]+2), 3/(fmap_sizes[-4]+2),
3/(fmap_sizes[-3]+2), 3/(fmap_sizes[-2]+2), 1])
fm_in_pixels = [np.array([fmap, fmap]) for fmap in fm_in_pixels]
fm_in_pixels = np.vstack(fm_in_pixels)
return pd.DataFrame(fm_in_pixels, columns=['width', 'height'])
task = Task.current_task()
fmap = [np.array([fmap, fmap]) for fmap in fmap_sizes]
task.upload_artifact('feature_maps_sizes', pd.DataFrame(np.vstack(fmap), columns=['width', 'height']))
fmap_df = fmap_to_pixel_fov(fmap_sizes)
task.upload_artifact('feature_maps_pixel_fov', fmap_df)
in_priors_df = pd.DataFrame(input_priors.numpy(), columns=['match_group', 'width', 'height'])
target_image_size = ImageSizeTuple(w=image_size, h=image_size)
return PriorOptimizationInput(
target_image_size=target_image_size,
gt_bbox=ground_truth_df,
fmap_sizes=fmap_df,
in_priors=in_priors_df,
)
def convert_optimization_result_to_priors(fm_sizes, steps, opt_result):
priors_output = opt_result.out_priors
by_resolution = list(priors_output.groupby('match_group'))
num_anchors_per_resolution = [len(priors[-1]) for priors in by_resolution]
if len(num_anchors_per_resolution) < len(fm_sizes):
print('Some resolution were empty - setting default prior per empty resolution')
curr_match_groups = opt_result.out_priors.match_group.to_list()
curr_prior_number = len(curr_match_groups)
empty_match_groups = list(set(range(len(fm_sizes))) - set(np.unique(curr_match_groups)))
for empty_match_group in empty_match_groups:
prior_size = opt_result.target_image_size.w / fm_sizes[empty_match_group]
new_prior = pd.DataFrame(np.array([empty_match_group, prior_size**2, 1, prior_size, prior_size]).reshape(1, 5),
columns=['match_group', 'area', 'aspect_ratio', 'width', 'height'])
new_prior['index'] = 'prior_{}'.format(curr_prior_number)
new_prior = new_prior.set_index('index')
priors_output = priors_output.append(new_prior)
curr_prior_number += 1
by_resolution.append((empty_match_group, new_prior))
num_anchors_per_resolution.append(1)
Task.current_task().register_artifact('priors_output', priors_output.sort_values('match_group'))
by_resolution = list(priors_output.groupby('match_group'))
boxes = []
priors = []
for i, (fm_size, new_priors) in enumerate(zip(fm_sizes, by_resolution)):
for h, w in product(range(fm_size), repeat=2):
cx = (w + 0.5) * steps[i]
cy = (h + 0.5) * steps[i]
for prior in new_priors[-1].iterrows():
w = prior[-1].width
h = prior[-1].height
boxes.append((cx, cy, w, h))
priors.append((i, w, h))
return torch.Tensor(boxes), torch.Tensor(np.unique(np.array(priors), axis=0)), num_anchors_per_resolution
| [
"numpy.unique",
"models.detection.SSD.priorbox_optimization.bbox_clustering.get_box_pairwise_iou",
"numpy.where",
"torch.Tensor",
"trains.Task.current_task",
"models.detection.SSD.priorbox_optimization.PriorOptimizationInput",
"numpy.array",
"numpy.around",
"numpy.vstack",
"pandas.DataFrame",
"m... | [((1921, 1963), 'pandas.DataFrame', 'pd.DataFrame', (['all_gt'], {'columns': 'column_names'}), '(all_gt, columns=column_names)\n', (1933, 1963), True, 'import pandas as pd\n'), ((2682, 2701), 'trains.Task.current_task', 'Task.current_task', ([], {}), '()\n', (2699, 2701), False, 'from trains import Task\n'), ((3096, 3138), 'models.detection.SSD.priorbox_optimization.ImageSizeTuple', 'ImageSizeTuple', ([], {'w': 'image_size', 'h': 'image_size'}), '(w=image_size, h=image_size)\n', (3110, 3138), False, 'from models.detection.SSD.priorbox_optimization import PriorOptimizationInput, ImageSizeTuple\n'), ((3151, 3284), 'models.detection.SSD.priorbox_optimization.PriorOptimizationInput', 'PriorOptimizationInput', ([], {'target_image_size': 'target_image_size', 'gt_bbox': 'ground_truth_df', 'fmap_sizes': 'fmap_df', 'in_priors': 'in_priors_df'}), '(target_image_size=target_image_size, gt_bbox=\n ground_truth_df, fmap_sizes=fmap_df, in_priors=in_priors_df)\n', (3173, 3284), False, 'from models.detection.SSD.priorbox_optimization import PriorOptimizationInput, ImageSizeTuple\n'), ((929, 950), 'numpy.around', 'np.around', (['orig_boxes'], {}), '(orig_boxes)\n', (938, 950), True, 'import numpy as np\n'), ((967, 1013), 'numpy.around', 'np.around', (['(orig_boxes[:, 2] - orig_boxes[:, 0])'], {}), '(orig_boxes[:, 2] - orig_boxes[:, 0])\n', (976, 1013), True, 'import numpy as np\n'), ((1031, 1077), 'numpy.around', 'np.around', (['(orig_boxes[:, 3] - orig_boxes[:, 1])'], {}), '(orig_boxes[:, 3] - orig_boxes[:, 1])\n', (1040, 1077), True, 'import numpy as np\n'), ((1486, 1513), 'models.detection.SSD.priorbox_optimization.bbox_clustering.get_box_pairwise_iou', 'get_box_pairwise_iou', (['boxes'], {}), '(boxes)\n', (1506, 1513), False, 'from models.detection.SSD.priorbox_optimization.bbox_clustering import get_box_pairwise_iou\n'), ((2575, 2598), 'numpy.vstack', 'np.vstack', (['fm_in_pixels'], {}), '(fm_in_pixels)\n', (2584, 2598), True, 'import numpy as np\n'), ((2614, 2669), 'pandas.DataFrame', 'pd.DataFrame', (['fm_in_pixels'], {'columns': "['width', 'height']"}), "(fm_in_pixels, columns=['width', 'height'])\n", (2626, 2669), True, 'import pandas as pd\n'), ((2714, 2736), 'numpy.array', 'np.array', (['[fmap, fmap]'], {}), '([fmap, fmap])\n', (2722, 2736), True, 'import numpy as np\n'), ((5278, 5297), 'torch.Tensor', 'torch.Tensor', (['boxes'], {}), '(boxes)\n', (5290, 5297), False, 'import torch\n'), ((1130, 1148), 'numpy.where', 'np.where', (['(area > 0)'], {}), '(area > 0)\n', (1138, 1148), True, 'import numpy as np\n'), ((2260, 2280), 'numpy.array', 'np.array', (['image_size'], {}), '(image_size)\n', (2268, 2280), True, 'import numpy as np\n'), ((2308, 2473), 'numpy.array', 'np.array', (['[3 / fmap_sizes[-7], 3 / fmap_sizes[-6], 3 / (fmap_sizes[-5] + 2), 3 / (\n fmap_sizes[-4] + 2), 3 / (fmap_sizes[-3] + 2), 3 / (fmap_sizes[-2] + 2), 1]'], {}), '([3 / fmap_sizes[-7], 3 / fmap_sizes[-6], 3 / (fmap_sizes[-5] + 2),\n 3 / (fmap_sizes[-4] + 2), 3 / (fmap_sizes[-3] + 2), 3 / (fmap_sizes[-2] +\n 2), 1])\n', (2316, 2473), True, 'import numpy as np\n'), ((2503, 2525), 'numpy.array', 'np.array', (['[fmap, fmap]'], {}), '([fmap, fmap])\n', (2511, 2525), True, 'import numpy as np\n'), ((2821, 2836), 'numpy.vstack', 'np.vstack', (['fmap'], {}), '(fmap)\n', (2830, 2836), True, 'import numpy as np\n'), ((4650, 4669), 'trains.Task.current_task', 'Task.current_task', ([], {}), '()\n', (4667, 4669), False, 'from trains import Task\n'), ((5322, 5338), 'numpy.array', 'np.array', (['priors'], {}), '(priors)\n', (5330, 5338), True, 'import numpy as np\n'), ((3911, 3939), 'numpy.unique', 'np.unique', (['curr_match_groups'], {}), '(curr_match_groups)\n', (3920, 3939), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.array', 'np.array', (['orig_labels'], {}), '(orig_labels)\n', (1271, 1284), True, 'import numpy as np\n'), ((4118, 4191), 'numpy.array', 'np.array', (['[empty_match_group, prior_size ** 2, 1, prior_size, prior_size]'], {}), '([empty_match_group, prior_size ** 2, 1, prior_size, prior_size])\n', (4126, 4191), True, 'import numpy as np\n')] |
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import time
import math
import copy
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.onnx
import onnx
import datetime
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import random
import cv2
from colorama import Fore
import progiter
from packaging import version
import warnings
from torchvision.edgeailite import xnn
from torchvision.edgeailite import xvision
from torchvision.edgeailite.xvision.transforms import image_transforms
from torchvision.edgeailite.xvision import losses as pixel2pixel_losses
from .infer_pixel2pixel import compute_accuracy
##################################################
warnings.filterwarnings('ignore', category=torch.jit.TracerWarning)
##################################################
def get_config():
args = xnn.utils.ConfigNode()
args.dataset_config = xnn.utils.ConfigNode()
args.dataset_config.split_name = 'val'
args.dataset_config.max_depth_bfr_scaling = 80
args.dataset_config.depth_scale = 1
args.dataset_config.train_depth_log = 1
args.use_semseg_for_depth = False
# model config
args.model_config = xnn.utils.ConfigNode()
args.model_config.output_type = ['segmentation'] # the network is used to predict flow or depth or sceneflow
args.model_config.output_channels = None # number of output channels
args.model_config.prediction_channels = None # intermediate number of channels before final output_channels
args.model_config.input_channels = None # number of input channels
args.model_config.final_upsample = True # use final upsample to input resolution or not
args.model_config.output_range = None # max range of output
args.model_config.num_decoders = None # number of decoders to use. [options: 0, 1, None]
args.model_config.freeze_encoder = False # do not update encoder weights
args.model_config.freeze_decoder = False # do not update decoder weights
args.model_config.multi_task_type = 'learned' # find out loss multiplier by learning, choices=[None, 'learned', 'uncertainty', 'grad_norm', 'dwa_grad_norm']
args.model_config.target_input_ratio = 1 # Keep target size same as input size
args.model_config.input_nv12 = False # convert input to nv12 format
args.model_config.enable_fp16 = False # faster training if the GPU supports fp16
args.model = None # the model itself can be given from ouside
args.model_name = 'deeplabv2lite_mobilenetv2' # model architecture, overwritten if pretrained is specified
args.dataset_name = 'cityscapes_segmentation' # dataset type
args.transforms = None # the transforms itself can be given from outside
args.input_channel_reverse = False # reverse input channels, for example RGB to BGR
args.data_path = './data/cityscapes' # 'path to dataset'
args.save_path = None # checkpoints save path
args.phase = 'training' # training/calibration/validation
args.date = None # date to add to save path. if this is None, current date will be added.
args.logger = None # logger stream to output into
args.show_gpu_usage = False # Shows gpu usage at the begining of each training epoch
args.split_file = None # train_val split file
args.split_files = None # split list files. eg: train.txt val.txt
args.split_value = None # test_val split proportion (between 0 (only test) and 1 (only train))
args.optimizer = 'adam' # optimizer algorithms, choices=['adam','sgd']
args.scheduler = 'step' # scheduler algorithms, choices=['step','poly', 'cosine']
args.workers = 8 # number of data loading workers
args.epochs = 250 # number of total epochs to run
args.start_epoch = 0 # manual epoch number (useful on restarts)
args.epoch_size = 0 # manual epoch size (will match dataset size if not specified)
args.epoch_size_val = 0 # manual epoch size (will match dataset size if not specified)
args.batch_size = 12 # mini_batch size
args.total_batch_size = None # accumulated batch size. total_batch_size = batch_size*iter_size
args.iter_size = 1 # iteration size. total_batch_size = batch_size*iter_size
args.lr = 1e-4 # initial learning rate
args.lr_clips = None # use args.lr itself if it is None
args.lr_calib = 0.05 # lr for bias calibration
args.warmup_epochs = 5 # number of epochs to warmup
args.warmup_factor = 1e-3 # max lr allowed for the first epoch during warmup (as a factor of initial lr)
args.momentum = 0.9 # momentum for sgd, alpha parameter for adam
args.beta = 0.999 # beta parameter for adam
args.weight_decay = 1e-4 # weight decay
args.bias_decay = None # bias decay
args.sparse = True # avoid invalid/ignored target pixels from loss computation, use NEAREST for interpolation
args.tensorboard_num_imgs = 5 # number of imgs to display in tensorboard
args.pretrained = None # path to pre_trained model
args.resume = None # path to latest checkpoint (default: none)
args.no_date = False # don\'t append date timestamp to folder
args.print_freq = 100 # print frequency (default: 100)
args.milestones = (100, 200) # epochs at which learning rate is divided by 2
args.losses = ['segmentation_loss'] # loss functions to mchoices=['step','poly', 'cosine'],loss multiplication factor')
args.metrics = ['segmentation_metrics'] # metric/measurement/error functions for train/validation
args.multi_task_factors = None # loss mult factors
args.class_weights = None # class weights
args.loss_mult_factors = None # fixed loss mult factors - per loss - not: this is different from multi_task_factors (which is per task)
args.multistep_gamma = 0.5 # steps for step scheduler
args.polystep_power = 1.0 # power for polynomial scheduler
args.rand_seed = 1 # random seed
args.img_border_crop = None # image border crop rectangle. can be relative or absolute
args.target_mask = None # mask rectangle. can be relative or absolute. last value is the mask value
args.rand_resize = None # random image size to be resized to during training
args.rand_output_size = None # output size to be resized to during training
args.rand_scale = (1.0, 2.0) # random scale range for training
args.rand_crop = None # image size to be cropped to
args.img_resize = None # image size to be resized to during evaluation
args.output_size = None # target output size to be resized to
args.count_flops = True # count flops and report
args.shuffle = True # shuffle or not
args.shuffle_val = True # shuffle val dataset or not
args.transform_rotation = 0. # apply rotation augumentation. value is rotation in degrees. 0 indicates no rotation
args.is_flow = None # whether entries in images and targets lists are optical flow or not
args.upsample_mode = 'bilinear' # upsample mode to use, choices=['nearest','bilinear']
args.image_prenorm = True # whether normalization is done before all other the transforms
args.image_mean = (128.0,) # image mean for input image normalization
args.image_scale = (1.0 / (0.25 * 256),) # image scaling/mult for input iamge normalization
args.max_depth = 80 # maximum depth to be used for visualization
args.pivot_task_idx = 0 # task id to select best model
args.parallel_model = True # Usedata parallel for model
args.parallel_criterion = True # Usedata parallel for loss and metric
args.evaluate_start = True # evaluate right at the begining of training or not
args.save_onnx = True # apply quantized inference or not
args.print_model = False # print the model to text
args.run_soon = True # To start training after generating configs/models
args.quantize = False # apply quantized inference or not
#args.model_surgery = None # replace activations with PAct2 activation module. Helpful in quantized training.
args.bitwidth_weights = 8 # bitwidth for weights
args.bitwidth_activations = 8 # bitwidth for activations
args.histogram_range = True # histogram range for calibration
args.bias_calibration = True # apply bias correction during quantized inference calibration
args.per_channel_q = False # apply separate quantizion factor for each channel in depthwise or not
args.constrain_bias = None # constrain bias according to the constraints of convolution engine
args.save_mod_files = False # saves modified files after last commit. Also stores commit id.
args.make_score_zero_mean = False # make score zero mean while learning
args.no_q_for_dws_layer_idx = 0 # no_q_for_dws_layer_idx
args.viz_colormap = 'rainbow' # colormap for tensorboard: 'rainbow', 'plasma', 'magma', 'bone'
args.freeze_bn = False # freeze the statistics of bn
args.tensorboard_enable = True # en/disable of TB writing
args.print_train_class_iou = False
args.print_val_class_iou = False
args.freeze_layers = None
args.opset_version = 11 # onnx opset_version
args.prob_color_to_gray = (0.0,0.0) # this will be used for controlling color 2 gray augmentation
args.interpolation = None # interpolation method to be used for resize. one of cv2.INTER_
return args
# ################################################
# to avoid hangs in data loader with multi threads
# this was observed after using cv2 image processing functions
# https://github.com/pytorch/pytorch/issues/1355
cv2.setNumThreads(0)
# ################################################
def main(args):
# ensure pytorch version is 1.2 or higher
assert version.parse(torch.__version__) >= version.parse('1.1'), \
'torch version must be 1.1 or higher, due to the change in scheduler.step() and optimiser.step() call order'
assert (not hasattr(args, 'evaluate')), 'args.evaluate is deprecated. use args.phase=training or calibration or validation'
assert is_valid_phase(args.phase), f'invalid phase {args.phase}'
assert not hasattr(args, 'model_surgery'), 'the argument model_surgery is deprecated, it is not needed now - remove it'
if (args.phase == 'validation' and args.bias_calibration):
args.bias_calibration = False
warnings.warn('switching off bias calibration in validation')
#
#################################################
args.rand_resize = args.img_resize if args.rand_resize is None else args.rand_resize
args.rand_crop = args.img_resize if args.rand_crop is None else args.rand_crop
args.output_size = args.img_resize if args.output_size is None else args.output_size
# resume has higher priority
args.pretrained = None if (args.resume is not None) else args.pretrained
# prob_color_to_gray will be used for controlling color 2 gray augmentation
if 'tiad' in args.dataset_name and args.prob_color_to_gray == (0.0, 0.0):
#override in case of 'tiad' if default values are used
args.prob_color_to_gray = (0.5, 0.0)
if args.save_path is None:
save_path = get_save_path(args)
else:
save_path = args.save_path
#
if not os.path.exists(save_path):
os.makedirs(save_path)
if args.save_mod_files:
#store all the files after the last commit.
mod_files_path = save_path+'/mod_files'
os.makedirs(mod_files_path)
cmd = "git ls-files --modified | xargs -i cp {} {}".format("{}", mod_files_path)
print("cmd:", cmd)
os.system(cmd)
#stoe last commit id.
cmd = "git log -n 1 >> {}".format(mod_files_path + '/commit_id.txt')
print("cmd:", cmd)
os.system(cmd)
#################################################
if args.logger is None:
log_file = os.path.splitext(os.path.basename(__file__))[0] + '.log'
args.logger = xnn.utils.TeeLogger(filename=os.path.join(save_path,log_file))
#################################################
# global settings. rand seeds for repeatability
random.seed(args.rand_seed)
np.random.seed(args.rand_seed)
torch.manual_seed(args.rand_seed)
torch.cuda.manual_seed(args.rand_seed)
################################
# args check and config
if args.iter_size != 1 and args.total_batch_size is not None:
warnings.warn("only one of --iter_size or --total_batch_size must be set")
#
if args.total_batch_size is not None:
args.iter_size = args.total_batch_size//args.batch_size
else:
args.total_batch_size = args.batch_size*args.iter_size
#################################################
# set some global flags and initializations
# keep it in args for now - although they don't belong here strictly
# using pin_memory is seen to cause issues, especially when when lot of memory is used.
args.use_pinned_memory = False
args.n_iter = 0
args.best_metric = -1
cudnn.benchmark = True
# torch.autograd.set_detect_anomaly(True)
################################
# reset character color, in case it is different
print('{}'.format(Fore.RESET))
# print everything for log
print('=> args: {}'.format(args))
print('\n'.join("%s: %s" % item for item in sorted(vars(args).items())))
print('=> will save everything to {}'.format(save_path))
#################################################
train_writer = SummaryWriter(os.path.join(save_path,'train')) if args.tensorboard_enable else None
val_writer = SummaryWriter(os.path.join(save_path,'val')) if args.tensorboard_enable else None
transforms = get_transforms(args) if args.transforms is None else args.transforms
assert isinstance(transforms, (list,tuple)) and len(transforms) == 2, 'incorrect transforms were given'
print("=> fetching images in '{}'".format(args.data_path))
split_arg = args.split_file if args.split_file else (args.split_files if args.split_files else args.split_value)
train_dataset, val_dataset = xvision.datasets.__dict__[args.dataset_name](args.dataset_config, args.data_path, split=split_arg, transforms=transforms)
#################################################
print('=> {} samples found, {} train samples and {} test samples '.format(len(train_dataset)+len(val_dataset),
len(train_dataset), len(val_dataset)))
train_sampler = get_dataset_sampler(train_dataset, args.epoch_size) if args.epoch_size != 0 else None
shuffle_train = args.shuffle and (train_sampler is None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=train_sampler, shuffle=shuffle_train)
val_sampler = get_dataset_sampler(val_dataset, args.epoch_size_val) if args.epoch_size_val != 0 else None
shuffle_val = args.shuffle_val and (val_sampler is None)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=val_sampler, shuffle=shuffle_val)
#################################################
if (args.model_config.input_channels is None):
args.model_config.input_channels = (3,)
print("=> input channels is not given - setting to {}".format(args.model_config.input_channels))
if (args.model_config.output_channels is None):
if ('num_classes' in dir(train_dataset)):
args.model_config.output_channels = train_dataset.num_classes()
else:
args.model_config.output_channels = (2 if args.model_config.output_type == 'flow' else args.model_config.output_channels)
xnn.utils.print_yellow("=> output channels is not given - setting to {} - not sure to work".format(args.model_config.output_channels))
#
if not isinstance(args.model_config.output_channels,(list,tuple)):
args.model_config.output_channels = [args.model_config.output_channels]
if (args.class_weights is None) and ('class_weights' in dir(train_dataset)):
args.class_weights = train_dataset.class_weights()
if not isinstance(args.class_weights, (list,tuple)):
args.class_weights = [args.class_weights]
#
print("=> class weights available for dataset: {}".format(args.class_weights))
#################################################
pretrained_data = None
model_surgery_quantize = False
pretrained_data = None
if args.pretrained and args.pretrained != "None":
pretrained_data = []
pretrained_files = args.pretrained if isinstance(args.pretrained,(list,tuple)) else [args.pretrained]
for p in pretrained_files:
if isinstance(p, dict):
p_data = p
else:
if p.startswith('http://') or p.startswith('https://'):
p_file = xnn.utils.download_url(p, './data/downloads')
else:
p_file = p
#
print(f'=> loading pretrained weights file: {p}')
p_data = torch.load(p_file)
#
pretrained_data.append(p_data)
model_surgery_quantize = p_data['quantize'] if 'quantize' in p_data else False
#
#################################################
# create model
is_onnx_model = False
if isinstance(args.model, torch.nn.Module):
model, change_names_dict = args.model if isinstance(args.model, (list, tuple)) else (args.model, None)
assert isinstance(model, torch.nn.Module), 'args.model, if provided must be a valid torch.nn.Module'
elif isinstance(args.model, str) and args.model.endswith('.onnx'):
model = xnn.onnx.import_onnx(args.model)
is_onnx_model = True
else:
xnn.utils.print_yellow("=> creating model '{}'".format(args.model_name))
model = xvision.models.pixel2pixel.__dict__[args.model_name](args.model_config)
# check if we got the model as well as parameters to change the names in pretrained
model, change_names_dict = model if isinstance(model, (list,tuple)) else (model,None)
#
if args.quantize:
# dummy input is used by quantized models to analyze graph
is_cuda = next(model.parameters()).is_cuda
dummy_input = create_rand_inputs(args, is_cuda=is_cuda)
#
if 'training' in args.phase:
model = xnn.quantize.QuantTrainModule(model, per_channel_q=args.per_channel_q,
histogram_range=args.histogram_range, bitwidth_weights=args.bitwidth_weights,
bitwidth_activations=args.bitwidth_activations, constrain_bias=args.constrain_bias,
dummy_input=dummy_input)
elif 'calibration' in args.phase:
model = xnn.quantize.QuantCalibrateModule(model, per_channel_q=args.per_channel_q,
bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,
histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,
bias_calibration=args.bias_calibration, dummy_input=dummy_input, lr_calib=args.lr_calib)
elif 'validation' in args.phase:
# Note: bias_calibration is not emabled
model = xnn.quantize.QuantTestModule(model, per_channel_q=args.per_channel_q,
bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,
histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,
dummy_input=dummy_input, model_surgery_quantize=model_surgery_quantize)
else:
assert False, f'invalid phase {args.phase}'
#
# load pretrained model
if pretrained_data is not None and not is_onnx_model:
model_orig = get_model_orig(model)
for (p_data,p_file) in zip(pretrained_data, pretrained_files):
print("=> using pretrained weights from: {}".format(p_file))
if hasattr(model_orig, 'load_weights'):
model_orig.load_weights(pretrained=p_data, change_names_dict=change_names_dict)
else:
xnn.utils.load_weights(get_model_orig(model), pretrained=p_data, change_names_dict=change_names_dict)
#
#
#
#################################################
if args.count_flops:
count_flops(args, model)
#################################################
if args.save_onnx:
write_onnx_model(args, get_model_orig(model), save_path, save_traced_model=False)
#
#################################################
if args.print_model:
print(model)
print('\n')
else:
args.logger.debug(str(model))
args.logger.debug('\n')
#################################################
if (not args.run_soon):
print("Training not needed for now")
close(args)
exit()
#################################################
# DataParallel does not work for QuantCalibrateModule or QuantTestModule
if args.parallel_model and (not isinstance(model, (xnn.quantize.QuantCalibrateModule, xnn.quantize.QuantTestModule))):
model = torch.nn.DataParallel(model)
#################################################
model = model.cuda()
#################################################
# for help in debug/print
for name, module in model.named_modules():
module.name = name
#################################################
args.loss_modules = copy.deepcopy(args.losses)
for task_dx, task_losses in enumerate(args.losses):
for loss_idx, loss_fn in enumerate(task_losses):
kw_args = {}
loss_args = pixel2pixel_losses.__dict__[loss_fn].args()
for arg in loss_args:
if arg == 'weight' and (args.class_weights is not None):
kw_args.update({arg:args.class_weights[task_dx]})
elif arg == 'num_classes':
kw_args.update({arg:args.model_config.output_channels[task_dx]})
elif arg == 'sparse':
kw_args.update({arg:args.sparse})
elif arg == 'enable_fp16':
kw_args.update({arg:args.model_config.enable_fp16})
#
#
loss_fn_raw = pixel2pixel_losses.__dict__[loss_fn](**kw_args)
if args.parallel_criterion:
loss_fn = torch.nn.DataParallel(loss_fn_raw).cuda() if args.parallel_criterion else loss_fn_raw.cuda()
loss_fn.info = loss_fn_raw.info
loss_fn.clear = loss_fn_raw.clear
else:
loss_fn = loss_fn_raw.cuda()
#
args.loss_modules[task_dx][loss_idx] = loss_fn
#
args.metric_modules = copy.deepcopy(args.metrics)
for task_dx, task_metrics in enumerate(args.metrics):
for midx, metric_fn in enumerate(task_metrics):
kw_args = {}
loss_args = pixel2pixel_losses.__dict__[metric_fn].args()
for arg in loss_args:
if arg == 'weight':
kw_args.update({arg:args.class_weights[task_dx]})
elif arg == 'num_classes':
kw_args.update({arg:args.model_config.output_channels[task_dx]})
elif arg == 'sparse':
kw_args.update({arg:args.sparse})
elif arg == 'enable_fp16':
kw_args.update({arg:args.model_config.enable_fp16})
#
#
metric_fn_raw = pixel2pixel_losses.__dict__[metric_fn](**kw_args)
if args.parallel_criterion:
metric_fn = torch.nn.DataParallel(metric_fn_raw).cuda()
metric_fn.info = metric_fn_raw.info
metric_fn.clear = metric_fn_raw.clear
else:
metric_fn = metric_fn_raw.cuda()
#
args.metric_modules[task_dx][midx] = metric_fn
#
#################################################
if args.phase=='validation':
with torch.no_grad():
validate(args, val_dataset, val_loader, model, 0, val_writer)
#
close(args)
return
#################################################
assert(args.optimizer in ['adam', 'sgd'])
print('=> setting {} optimizer'.format(args.optimizer))
if args.lr_clips is not None:
learning_rate_clips = args.lr_clips if 'training' in args.phase else 0.0
clips_decay = args.bias_decay if (args.bias_decay is not None and args.bias_decay != 0.0) else args.weight_decay
clips_params = [p for n,p in model.named_parameters() if 'clips' in n]
other_params = [p for n,p in model.named_parameters() if 'clips' not in n]
param_groups = [{'params': clips_params, 'weight_decay': clips_decay, 'lr': learning_rate_clips},
{'params': other_params, 'weight_decay': args.weight_decay}]
else:
param_groups = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'weight_decay': args.weight_decay}]
#
learning_rate = args.lr if ('training'in args.phase) else 0.0
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(param_groups, learning_rate, betas=(args.momentum, args.beta))
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(param_groups, learning_rate, momentum=args.momentum)
else:
raise ValueError('Unknown optimizer type{}'.format(args.optimizer))
#
#################################################
max_iter = args.epochs * len(train_loader)
scheduler = xnn.optim.lr_scheduler.SchedulerWrapper(scheduler_type=args.scheduler, optimizer=optimizer,
epochs=args.epochs, start_epoch=args.start_epoch,
warmup_epochs=args.warmup_epochs, warmup_factor=args.warmup_factor,
max_iter=max_iter, polystep_power=args.polystep_power,
milestones=args.milestones, multistep_gamma=args.multistep_gamma)
# optionally resume from a checkpoint
if args.resume:
if not os.path.isfile(args.resume):
print("=> no checkpoint found at '{}'".format(args.resume))
else:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model = xnn.utils.load_weights(model, checkpoint)
if args.start_epoch == 0:
args.start_epoch = checkpoint['epoch']
if 'best_metric' in list(checkpoint.keys()):
args.best_metric = checkpoint['best_metric']
if 'optimizer' in list(checkpoint.keys()):
optimizer.load_state_dict(checkpoint['optimizer'])
if 'scheduler' in list(checkpoint.keys()):
scheduler.load_state_dict(checkpoint['scheduler'])
if 'multi_task_factors' in list(checkpoint.keys()):
args.multi_task_factors = checkpoint['multi_task_factors']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
#################################################
if args.evaluate_start:
with torch.no_grad():
validate(args, val_dataset, val_loader, model, args.start_epoch, val_writer)
grad_scaler = torch.cuda.amp.GradScaler() if args.model_config.enable_fp16 else None
for epoch in range(args.start_epoch, args.epochs):
# epoch is needed to seed shuffling in DistributedSampler, every epoch.
# otherwise seed of 0 is used every epoch, which seems incorrect.
if train_sampler and isinstance(train_sampler, torch.utils.data.DistributedSampler):
train_sampler.set_epoch(epoch)
if val_sampler and isinstance(val_sampler, torch.utils.data.DistributedSampler):
val_sampler.set_epoch(epoch)
# train for one epoch
train(args, train_dataset, train_loader, model, optimizer, epoch, train_writer, scheduler, grad_scaler)
# evaluate on validation set
with torch.no_grad():
val_metric, metric_name = validate(args, val_dataset, val_loader, model, epoch, val_writer)
if args.best_metric < 0:
args.best_metric = val_metric
if "iou" in metric_name.lower() or "acc" in metric_name.lower():
is_best = val_metric >= args.best_metric
args.best_metric = max(val_metric, args.best_metric)
elif "error" in metric_name.lower() or "diff" in metric_name.lower() or "norm" in metric_name.lower() \
or "loss" in metric_name.lower() or "outlier" in metric_name.lower():
is_best = val_metric <= args.best_metric
args.best_metric = min(val_metric, args.best_metric)
else:
raise ValueError("Metric is not known. Best model could not be saved.")
#
checkpoint_dict = { 'epoch': epoch + 1, 'model_name': args.model_name,
'state_dict': get_model_orig(model).state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'best_metric': args.best_metric,
'multi_task_factors': args.multi_task_factors,
'quantize' : args.quantize}
save_checkpoint(args, save_path, get_model_orig(model), checkpoint_dict, is_best)
if args.tensorboard_enable:
train_writer.file_writer.flush()
val_writer.file_writer.flush()
# adjust the learning rate using lr scheduler
if 'training' in args.phase:
scheduler.step()
#
#
# close and cleanup
close(args)
#
###################################################################
def is_valid_phase(phase):
phases = ('training', 'calibration', 'validation')
return any(p in phase for p in phases)
###################################################################
def train(args, train_dataset, train_loader, model, optimizer, epoch, train_writer, scheduler, grad_scaler):
batch_time = xnn.utils.AverageMeter()
data_time = xnn.utils.AverageMeter()
# if the loss/ metric is already an average, no need to further average
avg_loss = [xnn.utils.AverageMeter(print_avg=(not task_loss[0].info()['is_avg'])) for task_loss in args.loss_modules]
avg_loss_orig = [xnn.utils.AverageMeter(print_avg=(not task_loss[0].info()['is_avg'])) for task_loss in args.loss_modules]
avg_metric = [xnn.utils.AverageMeter(print_avg=(not task_metric[0].info()['is_avg'])) for task_metric in args.metric_modules]
##########################
# switch to train mode
model.train()
# freeze bn and range after some epochs during quantization
if args.freeze_bn or (args.quantize and epoch > 2 and epoch >= ((args.epochs//2)-1)):
xnn.utils.print_once('Freezing BN for subsequent epochs')
xnn.utils.freeze_bn(model)
#
if (args.quantize and epoch > 4 and epoch >= ((args.epochs//2)+1)):
xnn.utils.print_once('Freezing ranges for subsequent epochs')
xnn.layers.freeze_quant_range(model)
#
#freeze layers
if args.freeze_layers is not None:
# 'freeze_layer_name' could be part of 'name', i.e. 'name' need not be exact same as 'freeze_layer_name'
# e.g. freeze_layer_name = 'encoder.0' then all layers like, 'encoder.0.0' 'encoder.0.1' will be frozen
for freeze_layer_name in args.freeze_layers:
for name, module in model.named_modules():
if freeze_layer_name in name:
xnn.utils.print_once("Freezing the module : {}".format(name))
module.eval()
for param in module.parameters():
param.requires_grad = False
##########################
for task_dx, task_losses in enumerate(args.loss_modules):
for loss_idx, loss_fn in enumerate(task_losses):
loss_fn.clear()
for task_dx, task_metrics in enumerate(args.metric_modules):
for midx, metric_fn in enumerate(task_metrics):
metric_fn.clear()
num_iter = len(train_loader)
progress_bar = progiter.ProgIter(np.arange(num_iter), chunksize=1)
metric_name = "Metric"
metric_ctx = [None] * len(args.metric_modules)
end_time = time.time()
writer_idx = 0
last_update_iter = -1
# change color to yellow for calibration
progressbar_color = (Fore.YELLOW if (('calibration' in args.phase) or ('training' in args.phase and args.quantize)) else Fore.WHITE)
print('{}'.format(progressbar_color), end='')
##########################
for iter_id, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end_time)
lr = scheduler.get_lr()[0]
input_list = [[jj.cuda() for jj in img] if isinstance(img,(list,tuple)) else img.cuda() for img in inputs]
target_list = [tgt.cuda(non_blocking=True) for tgt in targets]
target_sizes = [tgt.shape for tgt in target_list]
batch_size_cur = target_sizes[0][0]
##########################
# compute output
task_outputs = model(input_list)
task_outputs = task_outputs if isinstance(task_outputs,(list,tuple)) else [task_outputs]
# upsample output to target resolution
if args.upsample_mode is not None:
task_outputs = upsample_tensors(task_outputs, target_sizes, args.upsample_mode)
if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:
args.multi_task_factors, args.multi_task_offsets = xnn.layers.get_loss_scales(model)
else:
args.multi_task_factors = None
args.multi_task_offsets = None
loss_total, loss_list, loss_names, loss_types, loss_list_orig = \
compute_task_objectives(args, args.loss_modules, input_list, task_outputs, target_list,
task_mults=args.multi_task_factors, task_offsets=args.multi_task_offsets,
loss_mult_factors=args.loss_mult_factors)
if args.print_train_class_iou:
metric_total, metric_list, metric_names, metric_types, _, confusion_matrix = \
compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list,
get_confusion_matrix=args.print_train_class_iou)
else:
metric_total, metric_list, metric_names, metric_types, _ = \
compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list,
get_confusion_matrix=args.print_train_class_iou)
if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:
xnn.layers.set_losses(model, loss_list_orig)
if 'training' in args.phase:
# accumulate gradients
if args.model_config.enable_fp16:
grad_scaler.scale(loss_total).backward()
else:
loss_total.backward()
#
# optimization step
if ((iter_id+1) % args.iter_size) == 0:
if args.model_config.enable_fp16:
grad_scaler.step(optimizer)
grad_scaler.update()
else:
optimizer.step()
#
# zero gradients so that we can accumulate gradients
# setting grad=None is a faster alternative instead of optimizer.zero_grad()
xnn.utils.clear_grad(model)
#
#
# record loss.
for task_idx, task_losses in enumerate(args.loss_modules):
avg_loss[task_idx].update(float(loss_list[task_idx].cpu()), batch_size_cur)
avg_loss_orig[task_idx].update(float(loss_list_orig[task_idx].cpu()), batch_size_cur)
if args.tensorboard_enable:
train_writer.add_scalar('Training/Task{}_{}_Loss_Iter'.format(task_idx,loss_names[task_idx]), float(loss_list[task_idx]), args.n_iter)
if args.model_config.multi_task_type is not None and len(args.model_config.output_channels) > 1:
train_writer.add_scalar('Training/multi_task_Factor_Task{}_{}'.format(task_idx,loss_names[task_idx]), float(args.multi_task_factors[task_idx]), args.n_iter)
# record error/accuracy.
for task_idx, task_metrics in enumerate(args.metric_modules):
avg_metric[task_idx].update(float(metric_list[task_idx].cpu()), batch_size_cur)
##########################
if args.tensorboard_enable:
write_output(args, 'Training_', num_iter, iter_id, epoch, train_dataset, train_writer, input_list, task_outputs, target_list, metric_name, writer_idx)
if ((iter_id % args.print_freq) == 0) or (iter_id == (num_iter-1)):
output_string = ''
for task_idx, task_metrics in enumerate(args.metric_modules):
output_string += '[{}={}]'.format(metric_names[task_idx], str(avg_metric[task_idx]))
epoch_str = '{}/{}'.format(epoch + 1, args.epochs)
progress_bar.set_description("{}=> {} ".format(progressbar_color, args.phase))
multi_task_factors_print = ['{:.3f}'.format(float(lmf)) for lmf in args.multi_task_factors] if args.multi_task_factors is not None else None
progress_bar.set_postfix(Epoch=epoch_str, LR=lr, DataTime=str(data_time), LossMult=multi_task_factors_print, Loss=avg_loss, Output=output_string)
progress_bar.update(iter_id-last_update_iter)
last_update_iter = iter_id
args.n_iter += 1
end_time = time.time()
writer_idx = (writer_idx + 1) % args.tensorboard_num_imgs
# add onnx graph to tensorboard
# commenting out due to issues in transitioning to pytorch 0.4
# (bilinear mode in upsampling causes hang or crash - may be due to align_borders change, nearest is fine)
#if epoch == 0 and iter_id == 0:
# input_zero = torch.zeros(input_var.shape)
# train_writer.add_graph(model, input_zero)
#This cache operation slows down tranining
#torch.cuda.empty_cache()
#
if args.print_train_class_iou:
print_class_iou(args=args, confusion_matrix=confusion_matrix, task_idx=task_idx)
progress_bar.close()
# to print a new line - do not provide end=''
print('{}'.format(Fore.RESET), end='')
if args.tensorboard_enable:
for task_idx, task_losses in enumerate(args.loss_modules):
train_writer.add_scalar('Training/Task{}_{}_Loss_Epoch'.format(task_idx,loss_names[task_idx]), float(avg_loss[task_idx]), epoch)
for task_idx, task_metrics in enumerate(args.metric_modules):
train_writer.add_scalar('Training/Task{}_{}_Metric_Epoch'.format(task_idx,metric_names[task_idx]), float(avg_metric[task_idx]), epoch)
output_name = metric_names[args.pivot_task_idx]
output_metric = float(avg_metric[args.pivot_task_idx])
##########################
if args.quantize:
def debug_format(v):
return ('{:.3f}'.format(v) if v is not None else 'None')
#
clips_act = [m.get_clips_act()[1] for n,m in model.named_modules() if isinstance(m,xnn.layers.PAct2)]
if len(clips_act) > 0:
args.logger.debug('\nclips_act : ' + ' '.join(map(debug_format, clips_act)))
args.logger.debug('')
#
return output_metric, output_name
###################################################################
def validate(args, val_dataset, val_loader, model, epoch, val_writer):
data_time = xnn.utils.AverageMeter()
# if the loss/ metric is already an average, no need to further average
avg_metric = [xnn.utils.AverageMeter(print_avg=(not task_metric[0].info()['is_avg'])) for task_metric in args.metric_modules]
##########################
# switch to evaluate mode
model.eval()
##########################
for task_dx, task_metrics in enumerate(args.metric_modules):
for midx, metric_fn in enumerate(task_metrics):
metric_fn.clear()
metric_name = "Metric"
end_time = time.time()
writer_idx = 0
last_update_iter = -1
metric_ctx = [None] * len(args.metric_modules)
num_iter = len(val_loader)
progress_bar = progiter.ProgIter(np.arange(num_iter), chunksize=1)
# change color to green
print('{}'.format(Fore.GREEN), end='')
##########################
for iter_id, (inputs, targets) in enumerate(val_loader):
data_time.update(time.time() - end_time)
input_list = [[jj.cuda() for jj in img] if isinstance(img,(list,tuple)) else img.cuda() for img in inputs]
target_list = [j.cuda(non_blocking=True) for j in targets]
target_sizes = [tgt.shape for tgt in target_list]
batch_size_cur = target_sizes[0][0]
# compute output
task_outputs = model(input_list)
task_outputs = task_outputs if isinstance(task_outputs, (list, tuple)) else [task_outputs]
if args.upsample_mode is not None:
task_outputs = upsample_tensors(task_outputs, target_sizes, args.upsample_mode)
if args.print_val_class_iou:
metric_total, metric_list, metric_names, metric_types, _, confusion_matrix = \
compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list,
get_confusion_matrix = args.print_val_class_iou)
else:
metric_total, metric_list, metric_names, metric_types, _ = \
compute_task_objectives(args, args.metric_modules, input_list, task_outputs, target_list,
get_confusion_matrix = args.print_val_class_iou)
# record error/accuracy.
for task_idx, task_metrics in enumerate(args.metric_modules):
avg_metric[task_idx].update(float(metric_list[task_idx].cpu()), batch_size_cur)
if args.tensorboard_enable:
write_output(args, 'Validation_', num_iter, iter_id, epoch, val_dataset, val_writer, input_list, task_outputs, target_list, metric_names, writer_idx)
if ((iter_id % args.print_freq) == 0) or (iter_id == (num_iter-1)):
output_string = ''
for task_idx, task_metrics in enumerate(args.metric_modules):
output_string += '[{}={}]'.format(metric_names[task_idx], str(avg_metric[task_idx]))
epoch_str = '{}/{}'.format(epoch + 1, args.epochs)
progress_bar.set_description("=> validation")
progress_bar.set_postfix(Epoch=epoch_str, DataTime=data_time, Output="{}".format(output_string))
progress_bar.update(iter_id-last_update_iter)
last_update_iter = iter_id
#
end_time = time.time()
writer_idx = (writer_idx + 1) % args.tensorboard_num_imgs
#
if args.print_val_class_iou:
print_class_iou(args = args, confusion_matrix = confusion_matrix, task_idx=task_idx)
#
#print_conf_matrix(conf_matrix=conf_matrix, en=False)
progress_bar.close()
# to print a new line - do not provide end=''
print('{}'.format(Fore.RESET), end='')
if args.tensorboard_enable:
for task_idx, task_metrics in enumerate(args.metric_modules):
val_writer.add_scalar('Validation/Task{}_{}_Metric_Epoch'.format(task_idx,metric_names[task_idx]), float(avg_metric[task_idx]), epoch)
output_name = metric_names[args.pivot_task_idx]
output_metric = float(avg_metric[args.pivot_task_idx])
return output_metric, output_name
###################################################################
def close(args):
if args.logger is not None:
del args.logger
args.logger = None
#
args.best_metric = -1
#
def get_save_path(args, phase=None):
date = args.date if args.date else datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
save_path = os.path.join('./data/checkpoints/edgeailite', args.dataset_name, date + '_' + args.dataset_name + '_' + args.model_name)
save_path += '_resize{}x{}_traincrop{}x{}'.format(args.img_resize[1], args.img_resize[0], args.rand_crop[1], args.rand_crop[0])
phase = phase if (phase is not None) else args.phase
save_path = os.path.join(save_path, phase)
return save_path
def get_model_orig(model):
is_parallel_model = isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
model_orig = (model.module if is_parallel_model else model)
model_orig = (model_orig.module if isinstance(model_orig, (xnn.quantize.QuantBaseModule)) else model_orig)
return model_orig
def create_rand_inputs(args, is_cuda):
dummy_input = []
if not args.model_config.input_nv12:
for i_ch in args.model_config.input_channels:
x = torch.rand((1, i_ch, args.img_resize[0], args.img_resize[1]))
x = x.cuda() if is_cuda else x
dummy_input.append(x)
else: #nv12
for i_ch in args.model_config.input_channels:
y = torch.rand((1, 1, args.img_resize[0], args.img_resize[1]))
uv = torch.rand((1, 1, args.img_resize[0]//2, args.img_resize[1]))
y = y.cuda() if is_cuda else y
uv = uv.cuda() if is_cuda else uv
dummy_input.append([y,uv])
return dummy_input
def count_flops(args, model):
is_cuda = next(model.parameters()).is_cuda
dummy_input = create_rand_inputs(args, is_cuda)
#
model.eval()
flops = xnn.utils.forward_count_flops(model, dummy_input)
gflops = flops/1e9
print('=> Size = {}, GFLOPs = {}, GMACs = {}'.format(args.img_resize, gflops, gflops/2))
def derive_node_name(input_name):
#take last entry of input names for deciding node name
#print("input_name[-1]: ", input_name[-1])
node_name = input_name[-1].rsplit('.', 1)[0]
#print("formed node_name: ", node_name)
return node_name
#torch onnx export does not update names. Do it using onnx.save
def add_node_names(onnx_model_name):
onnx_model = onnx.load(onnx_model_name)
for i in range(len(onnx_model.graph.node)):
for j in range(len(onnx_model.graph.node[i].input)):
#print('-'*60)
#print("name: ", onnx_model.graph.node[i].name)
#print("input: ", onnx_model.graph.node[i].input)
#print("output: ", onnx_model.graph.node[i].output)
onnx_model.graph.node[i].input[j] = onnx_model.graph.node[i].input[j].split(':')[0]
onnx_model.graph.node[i].name = derive_node_name(onnx_model.graph.node[i].input)
#
#
#update model inplace
onnx.save(onnx_model, onnx_model_name)
def write_onnx_model(args, model, save_path, name='checkpoint.onnx', save_traced_model=False):
is_cuda = next(model.parameters()).is_cuda
input_list = create_rand_inputs(args, is_cuda=is_cuda)
onnx_file = os.path.join(save_path, name)
model.eval()
torch.onnx.export(model, input_list, onnx_file, export_params=True, verbose=False,
do_constant_folding=True, opset_version=args.opset_version)
#torch onnx export does not update names. Do it using onnx.save
add_node_names(onnx_model_name=onnx_file)
# infer shapes
onnx.shape_inference.infer_shapes_path(onnx_file, onnx_file)
if save_traced_model:
traced_model = torch.jit.trace(model, (input_list,))
traced_save_path = os.path.join(save_path, 'traced_model.pth')
torch.jit.save(traced_model, traced_save_path)
#
###################################################################
def write_output(args, prefix, val_epoch_size, iter_id, epoch, dataset, output_writer, input_images, task_outputs, task_targets, metric_names, writer_idx):
write_freq = (args.tensorboard_num_imgs / float(val_epoch_size))
write_prob = np.random.random()
if (write_prob > write_freq):
return
if args.model_config.input_nv12:
batch_size = input_images[0][0].shape[0]
else:
batch_size = input_images[0].shape[0]
b_index = random.randint(0, batch_size - 1)
input_image = None
for img_idx, img in enumerate(input_images):
if args.model_config.input_nv12:
#convert NV12 to BGR for tensorboard
input_image = xvision.transforms.image_transforms_xv12.nv12_to_bgr_image(Y = input_images[img_idx][0][b_index], UV = input_images[img_idx][1][b_index],
image_scale=args.image_scale, image_mean=args.image_mean)
else:
input_image = input_images[img_idx][b_index].cpu().numpy().transpose((1, 2, 0))
# convert back to original input range (0-255)
input_image = input_image / args.image_scale + args.image_mean
if args.is_flow and args.is_flow[0][img_idx]:
#input corresponding to flow is assumed to have been generated by adding 128
flow = input_image - 128
flow_hsv = xnn.utils.flow2hsv(flow.transpose(2, 0, 1), confidence=False).transpose(2, 0, 1)
#flow_hsv = (flow_hsv / 255.0).clip(0, 1) #TODO: check this
output_writer.add_image(prefix +'Input{}/{}'.format(img_idx, writer_idx), flow_hsv, epoch)
else:
input_image = (input_image/255.0).clip(0,1) #.astype(np.uint8)
output_writer.add_image(prefix + 'Input{}/{}'.format(img_idx, writer_idx), input_image.transpose((2,0,1)), epoch)
# for sparse data, chroma blending does not look good
for task_idx, output_type in enumerate(args.model_config.output_type):
# metric_name = metric_names[task_idx]
output = task_outputs[task_idx]
target = task_targets[task_idx]
if (output_type == 'segmentation') and hasattr(dataset, 'decode_segmap'):
segmentation_target = dataset.decode_segmap(target[b_index,0].cpu().numpy())
segmentation_output = output.max(dim=1,keepdim=True)[1].data.cpu().numpy() if(output.shape[1]>1) else output.data.cpu().numpy()
segmentation_output = dataset.decode_segmap(segmentation_output[b_index,0])
segmentation_output_blend = xnn.utils.chroma_blend(input_image, segmentation_output)
#
output_writer.add_image(prefix+'Task{}_{}_GT/{}'.format(task_idx,output_type,writer_idx), segmentation_target.transpose(2,0,1), epoch)
if not args.sparse:
segmentation_target_blend = xnn.utils.chroma_blend(input_image, segmentation_target)
output_writer.add_image(prefix + 'Task{}_{}_GT_ColorBlend/{}'.format(task_idx, output_type, writer_idx), segmentation_target_blend.transpose(2, 0, 1), epoch)
#
output_writer.add_image(prefix+'Task{}_{}_Output/{}'.format(task_idx,output_type,writer_idx), segmentation_output.transpose(2,0,1), epoch)
output_writer.add_image(prefix+'Task{}_{}_Output_ColorBlend/{}'.format(task_idx,output_type,writer_idx), segmentation_output_blend.transpose(2,0,1), epoch)
elif (output_type in ('depth', 'disparity')):
depth_chanidx = 0
output_writer.add_image(prefix+'Task{}_{}_GT_Color_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(target[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap).transpose(2,0,1), epoch)
if not args.sparse:
output_writer.add_image(prefix + 'Task{}_{}_GT_ColorBlend_Visualization/{}'.format(task_idx, output_type, writer_idx), xnn.utils.tensor2array(target[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap, input_blend=input_image).transpose(2, 0, 1), epoch)
#
output_writer.add_image(prefix+'Task{}_{}_Output_Color_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(output.data[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap).transpose(2,0,1), epoch)
output_writer.add_image(prefix + 'Task{}_{}_Output_ColorBlend_Visualization/{}'.format(task_idx, output_type, writer_idx),xnn.utils.tensor2array(output.data[b_index][depth_chanidx].cpu(), max_value=args.max_depth, colormap=args.viz_colormap, input_blend=input_image).transpose(2, 0, 1), epoch)
elif (output_type == 'flow'):
max_value_flow = 10.0 # only for visualization
output_writer.add_image(prefix+'Task{}_{}_GT/{}'.format(task_idx,output_type,writer_idx), xnn.utils.flow2hsv(target[b_index][:2].cpu().numpy(), max_value=max_value_flow).transpose(2,0,1), epoch)
output_writer.add_image(prefix+'Task{}_{}_Output/{}'.format(task_idx,output_type,writer_idx), xnn.utils.flow2hsv(output.data[b_index][:2].cpu().numpy(), max_value=max_value_flow).transpose(2,0,1), epoch)
elif (output_type == 'interest_pt'):
score_chanidx = 0
target_score_to_write = target[b_index][score_chanidx].cpu()
output_score_to_write = output.data[b_index][score_chanidx].cpu()
#if score is learnt as zero mean add offset to make it [0-255]
if args.make_score_zero_mean:
# target_score_to_write!=0 : value 0 indicates GT unavailble. Leave them to be 0.
target_score_to_write[target_score_to_write!=0] += 128.0
output_score_to_write += 128.0
max_value_score = float(torch.max(target_score_to_write)) #0.002
output_writer.add_image(prefix+'Task{}_{}_GT_Bone_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(target_score_to_write, max_value=max_value_score, colormap='bone').transpose(2,0,1), epoch)
output_writer.add_image(prefix+'Task{}_{}_Output_Bone_Visualization/{}'.format(task_idx,output_type,writer_idx), xnn.utils.tensor2array(output_score_to_write, max_value=max_value_score, colormap='bone').transpose(2,0,1), epoch)
#
def print_conf_matrix(conf_matrix = [], en = False):
if not en:
return
num_rows = conf_matrix.shape[0]
num_cols = conf_matrix.shape[1]
print("-"*64)
num_ele = 1
for r_idx in range(num_rows):
print("\n")
for c_idx in range(0,num_cols,num_ele):
print(conf_matrix[r_idx][c_idx:c_idx+num_ele], end="")
print("\n")
print("-" * 64)
def compute_task_objectives(args, objective_fns, input_var, task_outputs, task_targets, task_mults=None,
task_offsets=None, loss_mult_factors=None, get_confusion_matrix = False):
##########################
objective_total = torch.zeros_like(task_outputs[0].view(-1)[0])
objective_list = []
objective_list_orig = []
objective_names = []
objective_types = []
for task_idx, task_objectives in enumerate(objective_fns):
output_type = args.model_config.output_type[task_idx]
objective_sum_value = torch.zeros_like(task_outputs[task_idx].view(-1)[0])
objective_sum_name = ''
objective_sum_type = ''
task_mult = task_mults[task_idx] if task_mults is not None else 1.0
task_offset = task_offsets[task_idx] if task_offsets is not None else 0.0
for oidx, objective_fn in enumerate(task_objectives):
objective_batch = objective_fn(input_var, task_outputs[task_idx], task_targets[task_idx])
objective_batch = objective_batch.mean() if isinstance(objective_fn, torch.nn.DataParallel) else objective_batch
objective_name = objective_fn.info()['name']
objective_type = objective_fn.info()['is_avg']
if get_confusion_matrix:
confusion_matrix = objective_fn.info()['confusion_matrix']
loss_mult = loss_mult_factors[task_idx][oidx] if (loss_mult_factors is not None) else 1.0
# --
objective_batch_not_nan = (objective_batch if not torch.isnan(objective_batch) else 0.0)
objective_sum_value = objective_batch_not_nan*loss_mult + objective_sum_value
objective_sum_name += (objective_name if (objective_sum_name == '') else ('+' + objective_name))
assert (objective_sum_type == '' or objective_sum_type == objective_type), 'metric types (avg/val) for a given task should match'
objective_sum_type = objective_type
objective_list.append(objective_sum_value)
objective_list_orig.append(objective_sum_value)
objective_names.append(objective_sum_name)
objective_types.append(objective_sum_type)
objective_total = objective_sum_value*task_mult + task_offset + objective_total
return_list = [objective_total, objective_list, objective_names, objective_types, objective_list_orig]
if get_confusion_matrix:
return_list.append(confusion_matrix)
return return_list
def save_checkpoint(args, save_path, model, checkpoint_dict, is_best, filename='checkpoint.pth'):
torch.save(checkpoint_dict, os.path.join(save_path,filename))
if is_best:
shutil.copyfile(os.path.join(save_path,filename), os.path.join(save_path,'model_best.pth'))
#
if args.save_onnx:
write_onnx_model(args, model, save_path, name='checkpoint.onnx')
if is_best:
write_onnx_model(args, model, save_path, name='model_best.onnx')
#
def get_dataset_sampler(dataset_object, epoch_size):
print('=> creating a random sampler as epoch_size is specified')
num_samples = len(dataset_object)
epoch_size = int(epoch_size * num_samples) if epoch_size < 1 else int(epoch_size)
dataset_sampler = torch.utils.data.sampler.RandomSampler(data_source=dataset_object, replacement=True, num_samples=epoch_size)
return dataset_sampler
def get_train_transform(args):
# image normalization can be at the beginning of transforms or at the end
image_mean = np.array(args.image_mean, dtype=np.float32)
image_scale = np.array(args.image_scale, dtype=np.float32)
image_prenorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if args.image_prenorm else None
image_postnorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if (not image_prenorm) else None
reverse_channels = image_transforms.ReverseImageChannels() if args.input_channel_reverse else None
color_2_gray = image_transforms.RandomColor2Gray(is_flow=args.is_flow, random_threshold=args.prob_color_to_gray[0]) if args.prob_color_to_gray[0] != 0.0 else None
# crop size used only for training
image_train_output_scaling = image_transforms.Scale(args.rand_resize, target_size=args.rand_output_size, is_flow=args.is_flow) \
if (args.rand_output_size is not None and args.rand_output_size != args.rand_resize) else None
train_transform = image_transforms.Compose([
reverse_channels,
image_prenorm,
image_transforms.AlignImages(interpolation=args.interpolation),
image_transforms.MaskTarget(args.target_mask, 0),
image_transforms.CropRect(args.img_border_crop),
image_transforms.RandomRotate(args.transform_rotation, is_flow=args.is_flow) if args.transform_rotation else None,
image_transforms.RandomScaleCrop(args.rand_resize, scale_range=args.rand_scale, is_flow=args.is_flow, interpolation=args.interpolation),
image_transforms.RandomHorizontalFlip(is_flow=args.is_flow),
image_transforms.RandomCrop(args.rand_crop),
color_2_gray,
image_train_output_scaling,
image_postnorm,
image_transforms.ConvertToTensor()
])
return train_transform
def get_validation_transform(args):
# image normalization can be at the beginning of transforms or at the end
image_mean = np.array(args.image_mean, dtype=np.float32)
image_scale = np.array(args.image_scale, dtype=np.float32)
image_prenorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if args.image_prenorm else None
image_postnorm = image_transforms.NormalizeMeanScale(mean=image_mean, scale=image_scale) if (not image_prenorm) else None
reverse_channels = image_transforms.ReverseImageChannels() if args.input_channel_reverse else None
color_2_gray = image_transforms.RandomColor2Gray(is_flow=args.is_flow, random_threshold=args.prob_color_to_gray[1]) if args.prob_color_to_gray[1] != 0.0 else None
# prediction is resized to output_size before evaluation.
val_transform = image_transforms.Compose([
reverse_channels,
image_prenorm,
image_transforms.AlignImages(interpolation=args.interpolation),
image_transforms.MaskTarget(args.target_mask, 0),
image_transforms.CropRect(args.img_border_crop),
image_transforms.Scale(args.img_resize, target_size=args.output_size, is_flow=args.is_flow, interpolation=args.interpolation),
color_2_gray,
image_postnorm,
image_transforms.ConvertToTensor()
])
return val_transform
def get_transforms(args):
# Provision to train with val transform - provide rand_scale as (0, 0)
# Fixing the train-test resolution discrepancy, https://arxiv.org/abs/1906.06423
always_use_val_transform = (args.rand_scale[0] == 0)
train_transform = get_validation_transform(args) if always_use_val_transform else get_train_transform(args)
val_transform = get_validation_transform(args)
return train_transform, val_transform
def _upsample_impl(tensor, output_size, upsample_mode):
# upsample of long tensor is not supported currently. covert to float, just to avoid error.
# we can do thsi only in the case of nearest mode, otherwise output will have invalid values.
convert_to_float = False
if isinstance(tensor, (torch.LongTensor,torch.cuda.LongTensor)):
convert_to_float = True
original_dtype = tensor.dtype
tensor = tensor.float()
upsample_mode = 'nearest'
dim_added = False
if len(tensor.shape) < 4:
tensor = tensor[np.newaxis,...]
dim_added = True
if (tensor.size()[-2:] != output_size):
tensor = torch.nn.functional.interpolate(tensor, output_size, mode=upsample_mode)
if dim_added:
tensor = tensor[0,...]
if convert_to_float:
tensor = tensor.long() #tensor.astype(original_dtype)
return tensor
def upsample_tensors(tensors, output_sizes, upsample_mode):
if isinstance(tensors, (list,tuple)):
for tidx, tensor in enumerate(tensors):
tensors[tidx] = _upsample_impl(tensor, output_sizes[tidx][-2:], upsample_mode)
#
else:
tensors = _upsample_impl(tensors, output_sizes[0][-2:], upsample_mode)
return tensors
#print IoU for each class
def print_class_iou(args = None, confusion_matrix = None, task_idx = 0):
n_classes = args.model_config.output_channels[task_idx]
[accuracy, mean_iou, iou, f1_score] = compute_accuracy(args, confusion_matrix, n_classes)
print("\n Class IoU: [", end = "")
for class_iou in iou:
print("{:0.3f}".format(class_iou), end=",")
print("]")
if __name__ == '__main__':
train_args = get_config()
main(train_args)
| [
"torchvision.edgeailite.xvision.transforms.image_transforms.Scale",
"torchvision.edgeailite.xnn.utils.print_once",
"onnx.save",
"torch.max",
"torchvision.edgeailite.xnn.utils.freeze_bn",
"numpy.array",
"onnx.load",
"torch.nn.functional.interpolate",
"copy.deepcopy",
"torchvision.edgeailite.xnn.uti... | [((2284, 2351), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'torch.jit.TracerWarning'}), "('ignore', category=torch.jit.TracerWarning)\n", (2307, 2351), False, 'import warnings\n'), ((13496, 13516), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (13513, 13516), False, 'import cv2\n'), ((2433, 2455), 'torchvision.edgeailite.xnn.utils.ConfigNode', 'xnn.utils.ConfigNode', ([], {}), '()\n', (2453, 2455), False, 'from torchvision.edgeailite import xnn\n'), ((2483, 2505), 'torchvision.edgeailite.xnn.utils.ConfigNode', 'xnn.utils.ConfigNode', ([], {}), '()\n', (2503, 2505), False, 'from torchvision.edgeailite import xnn\n'), ((2766, 2788), 'torchvision.edgeailite.xnn.utils.ConfigNode', 'xnn.utils.ConfigNode', ([], {}), '()\n', (2786, 2788), False, 'from torchvision.edgeailite import xnn\n'), ((16040, 16067), 'random.seed', 'random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (16051, 16067), False, 'import random\n'), ((16072, 16102), 'numpy.random.seed', 'np.random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (16086, 16102), True, 'import numpy as np\n'), ((16107, 16140), 'torch.manual_seed', 'torch.manual_seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (16124, 16140), False, 'import torch\n'), ((16145, 16183), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (16167, 16183), False, 'import torch\n'), ((18530, 18716), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': 'args.use_pinned_memory', 'sampler': 'train_sampler', 'shuffle': 'shuffle_train'}), '(train_dataset, batch_size=args.batch_size,\n num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=\n train_sampler, shuffle=shuffle_train)\n', (18557, 18716), False, 'import torch\n'), ((18905, 19085), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': 'args.use_pinned_memory', 'sampler': 'val_sampler', 'shuffle': 'shuffle_val'}), '(val_dataset, batch_size=args.batch_size,\n num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=\n val_sampler, shuffle=shuffle_val)\n', (18932, 19085), False, 'import torch\n'), ((25647, 25673), 'copy.deepcopy', 'copy.deepcopy', (['args.losses'], {}), '(args.losses)\n', (25660, 25673), False, 'import copy\n'), ((26924, 26951), 'copy.deepcopy', 'copy.deepcopy', (['args.metrics'], {}), '(args.metrics)\n', (26937, 26951), False, 'import copy\n'), ((29772, 30119), 'torchvision.edgeailite.xnn.optim.lr_scheduler.SchedulerWrapper', 'xnn.optim.lr_scheduler.SchedulerWrapper', ([], {'scheduler_type': 'args.scheduler', 'optimizer': 'optimizer', 'epochs': 'args.epochs', 'start_epoch': 'args.start_epoch', 'warmup_epochs': 'args.warmup_epochs', 'warmup_factor': 'args.warmup_factor', 'max_iter': 'max_iter', 'polystep_power': 'args.polystep_power', 'milestones': 'args.milestones', 'multistep_gamma': 'args.multistep_gamma'}), '(scheduler_type=args.scheduler,\n optimizer=optimizer, epochs=args.epochs, start_epoch=args.start_epoch,\n warmup_epochs=args.warmup_epochs, warmup_factor=args.warmup_factor,\n max_iter=max_iter, polystep_power=args.polystep_power, milestones=args.\n milestones, multistep_gamma=args.multistep_gamma)\n', (29811, 30119), False, 'from torchvision.edgeailite import xnn\n'), ((34278, 34302), 'torchvision.edgeailite.xnn.utils.AverageMeter', 'xnn.utils.AverageMeter', ([], {}), '()\n', (34300, 34302), False, 'from torchvision.edgeailite import xnn\n'), ((34319, 34343), 'torchvision.edgeailite.xnn.utils.AverageMeter', 'xnn.utils.AverageMeter', ([], {}), '()\n', (34341, 34343), False, 'from torchvision.edgeailite import xnn\n'), ((36521, 36532), 'time.time', 'time.time', ([], {}), '()\n', (36530, 36532), False, 'import time\n'), ((43934, 43958), 'torchvision.edgeailite.xnn.utils.AverageMeter', 'xnn.utils.AverageMeter', ([], {}), '()\n', (43956, 43958), False, 'from torchvision.edgeailite import xnn\n'), ((44470, 44481), 'time.time', 'time.time', ([], {}), '()\n', (44479, 44481), False, 'import time\n'), ((48239, 48363), 'os.path.join', 'os.path.join', (['"""./data/checkpoints/edgeailite"""', 'args.dataset_name', "(date + '_' + args.dataset_name + '_' + args.model_name)"], {}), "('./data/checkpoints/edgeailite', args.dataset_name, date + '_' +\n args.dataset_name + '_' + args.model_name)\n", (48251, 48363), False, 'import os\n'), ((48565, 48595), 'os.path.join', 'os.path.join', (['save_path', 'phase'], {}), '(save_path, phase)\n', (48577, 48595), False, 'import os\n'), ((49810, 49859), 'torchvision.edgeailite.xnn.utils.forward_count_flops', 'xnn.utils.forward_count_flops', (['model', 'dummy_input'], {}), '(model, dummy_input)\n', (49839, 49859), False, 'from torchvision.edgeailite import xnn\n'), ((50352, 50378), 'onnx.load', 'onnx.load', (['onnx_model_name'], {}), '(onnx_model_name)\n', (50361, 50378), False, 'import onnx\n'), ((50936, 50974), 'onnx.save', 'onnx.save', (['onnx_model', 'onnx_model_name'], {}), '(onnx_model, onnx_model_name)\n', (50945, 50974), False, 'import onnx\n'), ((51194, 51223), 'os.path.join', 'os.path.join', (['save_path', 'name'], {}), '(save_path, name)\n', (51206, 51223), False, 'import os\n'), ((51245, 51392), 'torch.onnx.export', 'torch.onnx.export', (['model', 'input_list', 'onnx_file'], {'export_params': '(True)', 'verbose': '(False)', 'do_constant_folding': '(True)', 'opset_version': 'args.opset_version'}), '(model, input_list, onnx_file, export_params=True, verbose\n =False, do_constant_folding=True, opset_version=args.opset_version)\n', (51262, 51392), False, 'import torch\n'), ((51548, 51608), 'onnx.shape_inference.infer_shapes_path', 'onnx.shape_inference.infer_shapes_path', (['onnx_file', 'onnx_file'], {}), '(onnx_file, onnx_file)\n', (51586, 51608), False, 'import onnx\n'), ((52141, 52159), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (52157, 52159), True, 'import numpy as np\n'), ((52365, 52398), 'random.randint', 'random.randint', (['(0)', '(batch_size - 1)'], {}), '(0, batch_size - 1)\n', (52379, 52398), False, 'import random\n'), ((61826, 61938), 'torch.utils.data.sampler.RandomSampler', 'torch.utils.data.sampler.RandomSampler', ([], {'data_source': 'dataset_object', 'replacement': '(True)', 'num_samples': 'epoch_size'}), '(data_source=dataset_object,\n replacement=True, num_samples=epoch_size)\n', (61864, 61938), False, 'import torch\n'), ((62090, 62133), 'numpy.array', 'np.array', (['args.image_mean'], {'dtype': 'np.float32'}), '(args.image_mean, dtype=np.float32)\n', (62098, 62133), True, 'import numpy as np\n'), ((62152, 62196), 'numpy.array', 'np.array', (['args.image_scale'], {'dtype': 'np.float32'}), '(args.image_scale, dtype=np.float32)\n', (62160, 62196), True, 'import numpy as np\n'), ((63964, 64007), 'numpy.array', 'np.array', (['args.image_mean'], {'dtype': 'np.float32'}), '(args.image_mean, dtype=np.float32)\n', (63972, 64007), True, 'import numpy as np\n'), ((64026, 64070), 'numpy.array', 'np.array', (['args.image_scale'], {'dtype': 'np.float32'}), '(args.image_scale, dtype=np.float32)\n', (64034, 64070), True, 'import numpy as np\n'), ((13642, 13674), 'packaging.version.parse', 'version.parse', (['torch.__version__'], {}), '(torch.__version__)\n', (13655, 13674), False, 'from packaging import version\n'), ((13678, 13698), 'packaging.version.parse', 'version.parse', (['"""1.1"""'], {}), "('1.1')\n", (13691, 13698), False, 'from packaging import version\n'), ((14251, 14312), 'warnings.warn', 'warnings.warn', (['"""switching off bias calibration in validation"""'], {}), "('switching off bias calibration in validation')\n", (14264, 14312), False, 'import warnings\n'), ((15146, 15171), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (15160, 15171), False, 'import os\n'), ((15181, 15203), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (15192, 15203), False, 'import os\n'), ((15341, 15368), 'os.makedirs', 'os.makedirs', (['mod_files_path'], {}), '(mod_files_path)\n', (15352, 15368), False, 'import os\n'), ((15506, 15520), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15515, 15520), False, 'import os\n'), ((15670, 15684), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15679, 15684), False, 'import os\n'), ((16324, 16398), 'warnings.warn', 'warnings.warn', (['"""only one of --iter_size or --total_batch_size must be set"""'], {}), "('only one of --iter_size or --total_batch_size must be set')\n", (16337, 16398), False, 'import warnings\n'), ((25300, 25328), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (25321, 25328), False, 'import torch\n'), ((29359, 29438), 'torch.optim.Adam', 'torch.optim.Adam', (['param_groups', 'learning_rate'], {'betas': '(args.momentum, args.beta)'}), '(param_groups, learning_rate, betas=(args.momentum, args.beta))\n', (29375, 29438), False, 'import torch\n'), ((30474, 30497), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (30484, 30497), False, 'import torch\n'), ((30514, 30555), 'torchvision.edgeailite.xnn.utils.load_weights', 'xnn.utils.load_weights', (['model', 'checkpoint'], {}), '(model, checkpoint)\n', (30536, 30555), False, 'from torchvision.edgeailite import xnn\n'), ((31458, 31485), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {}), '()\n', (31483, 31485), False, 'import torch\n'), ((35039, 35096), 'torchvision.edgeailite.xnn.utils.print_once', 'xnn.utils.print_once', (['"""Freezing BN for subsequent epochs"""'], {}), "('Freezing BN for subsequent epochs')\n", (35059, 35096), False, 'from torchvision.edgeailite import xnn\n'), ((35105, 35131), 'torchvision.edgeailite.xnn.utils.freeze_bn', 'xnn.utils.freeze_bn', (['model'], {}), '(model)\n', (35124, 35131), False, 'from torchvision.edgeailite import xnn\n'), ((35218, 35279), 'torchvision.edgeailite.xnn.utils.print_once', 'xnn.utils.print_once', (['"""Freezing ranges for subsequent epochs"""'], {}), "('Freezing ranges for subsequent epochs')\n", (35238, 35279), False, 'from torchvision.edgeailite import xnn\n'), ((35288, 35324), 'torchvision.edgeailite.xnn.layers.freeze_quant_range', 'xnn.layers.freeze_quant_range', (['model'], {}), '(model)\n', (35317, 35324), False, 'from torchvision.edgeailite import xnn\n'), ((36394, 36413), 'numpy.arange', 'np.arange', (['num_iter'], {}), '(num_iter)\n', (36403, 36413), True, 'import numpy as np\n'), ((41934, 41945), 'time.time', 'time.time', ([], {}), '()\n', (41943, 41945), False, 'import time\n'), ((44647, 44666), 'numpy.arange', 'np.arange', (['num_iter'], {}), '(num_iter)\n', (44656, 44666), True, 'import numpy as np\n'), ((47092, 47103), 'time.time', 'time.time', ([], {}), '()\n', (47101, 47103), False, 'import time\n'), ((51659, 51696), 'torch.jit.trace', 'torch.jit.trace', (['model', '(input_list,)'], {}), '(model, (input_list,))\n', (51674, 51696), False, 'import torch\n'), ((51724, 51767), 'os.path.join', 'os.path.join', (['save_path', '"""traced_model.pth"""'], {}), "(save_path, 'traced_model.pth')\n", (51736, 51767), False, 'import os\n'), ((51776, 51822), 'torch.jit.save', 'torch.jit.save', (['traced_model', 'traced_save_path'], {}), '(traced_model, traced_save_path)\n', (51790, 51822), False, 'import torch\n'), ((61201, 61234), 'os.path.join', 'os.path.join', (['save_path', 'filename'], {}), '(save_path, filename)\n', (61213, 61234), False, 'import os\n'), ((62217, 62288), 'torchvision.edgeailite.xvision.transforms.image_transforms.NormalizeMeanScale', 'image_transforms.NormalizeMeanScale', ([], {'mean': 'image_mean', 'scale': 'image_scale'}), '(mean=image_mean, scale=image_scale)\n', (62252, 62288), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((62342, 62413), 'torchvision.edgeailite.xvision.transforms.image_transforms.NormalizeMeanScale', 'image_transforms.NormalizeMeanScale', ([], {'mean': 'image_mean', 'scale': 'image_scale'}), '(mean=image_mean, scale=image_scale)\n', (62377, 62413), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((62470, 62509), 'torchvision.edgeailite.xvision.transforms.image_transforms.ReverseImageChannels', 'image_transforms.ReverseImageChannels', ([], {}), '()\n', (62507, 62509), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((62569, 62674), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomColor2Gray', 'image_transforms.RandomColor2Gray', ([], {'is_flow': 'args.is_flow', 'random_threshold': 'args.prob_color_to_gray[0]'}), '(is_flow=args.is_flow, random_threshold=\n args.prob_color_to_gray[0])\n', (62602, 62674), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((62790, 62891), 'torchvision.edgeailite.xvision.transforms.image_transforms.Scale', 'image_transforms.Scale', (['args.rand_resize'], {'target_size': 'args.rand_output_size', 'is_flow': 'args.is_flow'}), '(args.rand_resize, target_size=args.rand_output_size,\n is_flow=args.is_flow)\n', (62812, 62891), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64091, 64162), 'torchvision.edgeailite.xvision.transforms.image_transforms.NormalizeMeanScale', 'image_transforms.NormalizeMeanScale', ([], {'mean': 'image_mean', 'scale': 'image_scale'}), '(mean=image_mean, scale=image_scale)\n', (64126, 64162), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64216, 64287), 'torchvision.edgeailite.xvision.transforms.image_transforms.NormalizeMeanScale', 'image_transforms.NormalizeMeanScale', ([], {'mean': 'image_mean', 'scale': 'image_scale'}), '(mean=image_mean, scale=image_scale)\n', (64251, 64287), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64344, 64383), 'torchvision.edgeailite.xvision.transforms.image_transforms.ReverseImageChannels', 'image_transforms.ReverseImageChannels', ([], {}), '()\n', (64381, 64383), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64443, 64548), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomColor2Gray', 'image_transforms.RandomColor2Gray', ([], {'is_flow': 'args.is_flow', 'random_threshold': 'args.prob_color_to_gray[1]'}), '(is_flow=args.is_flow, random_threshold=\n args.prob_color_to_gray[1])\n', (64476, 64548), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((66313, 66385), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['tensor', 'output_size'], {'mode': 'upsample_mode'}), '(tensor, output_size, mode=upsample_mode)\n', (66344, 66385), False, 'import torch\n'), ((17428, 17460), 'os.path.join', 'os.path.join', (['save_path', '"""train"""'], {}), "(save_path, 'train')\n", (17440, 17460), False, 'import os\n'), ((17529, 17559), 'os.path.join', 'os.path.join', (['save_path', '"""val"""'], {}), "(save_path, 'val')\n", (17541, 17559), False, 'import os\n'), ((21730, 21762), 'torchvision.edgeailite.xnn.onnx.import_onnx', 'xnn.onnx.import_onnx', (['args.model'], {}), '(args.model)\n', (21750, 21762), False, 'from torchvision.edgeailite import xnn\n'), ((22435, 22705), 'torchvision.edgeailite.xnn.quantize.QuantTrainModule', 'xnn.quantize.QuantTrainModule', (['model'], {'per_channel_q': 'args.per_channel_q', 'histogram_range': 'args.histogram_range', 'bitwidth_weights': 'args.bitwidth_weights', 'bitwidth_activations': 'args.bitwidth_activations', 'constrain_bias': 'args.constrain_bias', 'dummy_input': 'dummy_input'}), '(model, per_channel_q=args.per_channel_q,\n histogram_range=args.histogram_range, bitwidth_weights=args.\n bitwidth_weights, bitwidth_activations=args.bitwidth_activations,\n constrain_bias=args.constrain_bias, dummy_input=dummy_input)\n', (22464, 22705), False, 'from torchvision.edgeailite import xnn\n'), ((28211, 28226), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28224, 28226), False, 'import torch\n'), ((29493, 29561), 'torch.optim.SGD', 'torch.optim.SGD', (['param_groups', 'learning_rate'], {'momentum': 'args.momentum'}), '(param_groups, learning_rate, momentum=args.momentum)\n', (29508, 29561), False, 'import torch\n'), ((30261, 30288), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (30275, 30288), False, 'import os\n'), ((31333, 31348), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (31346, 31348), False, 'import torch\n'), ((32199, 32214), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32212, 32214), False, 'import torch\n'), ((37868, 37901), 'torchvision.edgeailite.xnn.layers.get_loss_scales', 'xnn.layers.get_loss_scales', (['model'], {}), '(model)\n', (37894, 37901), False, 'from torchvision.edgeailite import xnn\n'), ((39031, 39075), 'torchvision.edgeailite.xnn.layers.set_losses', 'xnn.layers.set_losses', (['model', 'loss_list_orig'], {}), '(model, loss_list_orig)\n', (39052, 39075), False, 'from torchvision.edgeailite import xnn\n'), ((49126, 49187), 'torch.rand', 'torch.rand', (['(1, i_ch, args.img_resize[0], args.img_resize[1])'], {}), '((1, i_ch, args.img_resize[0], args.img_resize[1]))\n', (49136, 49187), False, 'import torch\n'), ((49355, 49413), 'torch.rand', 'torch.rand', (['(1, 1, args.img_resize[0], args.img_resize[1])'], {}), '((1, 1, args.img_resize[0], args.img_resize[1]))\n', (49365, 49413), False, 'import torch\n'), ((49431, 49494), 'torch.rand', 'torch.rand', (['(1, 1, args.img_resize[0] // 2, args.img_resize[1])'], {}), '((1, 1, args.img_resize[0] // 2, args.img_resize[1]))\n', (49441, 49494), False, 'import torch\n'), ((52588, 52789), 'torchvision.edgeailite.xvision.transforms.image_transforms_xv12.nv12_to_bgr_image', 'xvision.transforms.image_transforms_xv12.nv12_to_bgr_image', ([], {'Y': 'input_images[img_idx][0][b_index]', 'UV': 'input_images[img_idx][1][b_index]', 'image_scale': 'args.image_scale', 'image_mean': 'args.image_mean'}), '(Y=input_images[\n img_idx][0][b_index], UV=input_images[img_idx][1][b_index], image_scale\n =args.image_scale, image_mean=args.image_mean)\n', (52646, 52789), False, 'from torchvision.edgeailite import xvision\n'), ((54434, 54490), 'torchvision.edgeailite.xnn.utils.chroma_blend', 'xnn.utils.chroma_blend', (['input_image', 'segmentation_output'], {}), '(input_image, segmentation_output)\n', (54456, 54490), False, 'from torchvision.edgeailite import xnn\n'), ((61275, 61308), 'os.path.join', 'os.path.join', (['save_path', 'filename'], {}), '(save_path, filename)\n', (61287, 61308), False, 'import os\n'), ((61309, 61350), 'os.path.join', 'os.path.join', (['save_path', '"""model_best.pth"""'], {}), "(save_path, 'model_best.pth')\n", (61321, 61350), False, 'import os\n'), ((63099, 63161), 'torchvision.edgeailite.xvision.transforms.image_transforms.AlignImages', 'image_transforms.AlignImages', ([], {'interpolation': 'args.interpolation'}), '(interpolation=args.interpolation)\n', (63127, 63161), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63171, 63219), 'torchvision.edgeailite.xvision.transforms.image_transforms.MaskTarget', 'image_transforms.MaskTarget', (['args.target_mask', '(0)'], {}), '(args.target_mask, 0)\n', (63198, 63219), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63229, 63276), 'torchvision.edgeailite.xvision.transforms.image_transforms.CropRect', 'image_transforms.CropRect', (['args.img_border_crop'], {}), '(args.img_border_crop)\n', (63254, 63276), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63409, 63549), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomScaleCrop', 'image_transforms.RandomScaleCrop', (['args.rand_resize'], {'scale_range': 'args.rand_scale', 'is_flow': 'args.is_flow', 'interpolation': 'args.interpolation'}), '(args.rand_resize, scale_range=args.\n rand_scale, is_flow=args.is_flow, interpolation=args.interpolation)\n', (63441, 63549), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63554, 63613), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomHorizontalFlip', 'image_transforms.RandomHorizontalFlip', ([], {'is_flow': 'args.is_flow'}), '(is_flow=args.is_flow)\n', (63591, 63613), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63623, 63666), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomCrop', 'image_transforms.RandomCrop', (['args.rand_crop'], {}), '(args.rand_crop)\n', (63650, 63666), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((63758, 63792), 'torchvision.edgeailite.xvision.transforms.image_transforms.ConvertToTensor', 'image_transforms.ConvertToTensor', ([], {}), '()\n', (63790, 63792), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64758, 64820), 'torchvision.edgeailite.xvision.transforms.image_transforms.AlignImages', 'image_transforms.AlignImages', ([], {'interpolation': 'args.interpolation'}), '(interpolation=args.interpolation)\n', (64786, 64820), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64830, 64878), 'torchvision.edgeailite.xvision.transforms.image_transforms.MaskTarget', 'image_transforms.MaskTarget', (['args.target_mask', '(0)'], {}), '(args.target_mask, 0)\n', (64857, 64878), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64888, 64935), 'torchvision.edgeailite.xvision.transforms.image_transforms.CropRect', 'image_transforms.CropRect', (['args.img_border_crop'], {}), '(args.img_border_crop)\n', (64913, 64935), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((64945, 65074), 'torchvision.edgeailite.xvision.transforms.image_transforms.Scale', 'image_transforms.Scale', (['args.img_resize'], {'target_size': 'args.output_size', 'is_flow': 'args.is_flow', 'interpolation': 'args.interpolation'}), '(args.img_resize, target_size=args.output_size,\n is_flow=args.is_flow, interpolation=args.interpolation)\n', (64967, 65074), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((65126, 65160), 'torchvision.edgeailite.xvision.transforms.image_transforms.ConvertToTensor', 'image_transforms.ConvertToTensor', ([], {}), '()\n', (65158, 65160), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((15895, 15928), 'os.path.join', 'os.path.join', (['save_path', 'log_file'], {}), '(save_path, log_file)\n', (15907, 15928), False, 'import os\n'), ((21102, 21120), 'torch.load', 'torch.load', (['p_file'], {}), '(p_file)\n', (21112, 21120), False, 'import torch\n'), ((22827, 23170), 'torchvision.edgeailite.xnn.quantize.QuantCalibrateModule', 'xnn.quantize.QuantCalibrateModule', (['model'], {'per_channel_q': 'args.per_channel_q', 'bitwidth_weights': 'args.bitwidth_weights', 'bitwidth_activations': 'args.bitwidth_activations', 'histogram_range': 'args.histogram_range', 'constrain_bias': 'args.constrain_bias', 'bias_calibration': 'args.bias_calibration', 'dummy_input': 'dummy_input', 'lr_calib': 'args.lr_calib'}), '(model, per_channel_q=args.per_channel_q,\n bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.\n bitwidth_activations, histogram_range=args.histogram_range,\n constrain_bias=args.constrain_bias, bias_calibration=args.\n bias_calibration, dummy_input=dummy_input, lr_calib=args.lr_calib)\n', (22860, 23170), False, 'from torchvision.edgeailite import xnn\n'), ((36967, 36978), 'time.time', 'time.time', ([], {}), '()\n', (36976, 36978), False, 'import time\n'), ((39801, 39828), 'torchvision.edgeailite.xnn.utils.clear_grad', 'xnn.utils.clear_grad', (['model'], {}), '(model)\n', (39821, 39828), False, 'from torchvision.edgeailite import xnn\n'), ((44871, 44882), 'time.time', 'time.time', ([], {}), '()\n', (44880, 44882), False, 'import time\n'), ((48169, 48192), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (48190, 48192), False, 'import datetime\n'), ((54728, 54784), 'torchvision.edgeailite.xnn.utils.chroma_blend', 'xnn.utils.chroma_blend', (['input_image', 'segmentation_target'], {}), '(input_image, segmentation_target)\n', (54750, 54784), False, 'from torchvision.edgeailite import xnn\n'), ((63286, 63362), 'torchvision.edgeailite.xvision.transforms.image_transforms.RandomRotate', 'image_transforms.RandomRotate', (['args.transform_rotation'], {'is_flow': 'args.is_flow'}), '(args.transform_rotation, is_flow=args.is_flow)\n', (63315, 63362), False, 'from torchvision.edgeailite.xvision.transforms import image_transforms\n'), ((15804, 15830), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (15820, 15830), False, 'import os\n'), ((20894, 20939), 'torchvision.edgeailite.xnn.utils.download_url', 'xnn.utils.download_url', (['p', '"""./data/downloads"""'], {}), "(p, './data/downloads')\n", (20916, 20939), False, 'from torchvision.edgeailite import xnn\n'), ((23338, 23658), 'torchvision.edgeailite.xnn.quantize.QuantTestModule', 'xnn.quantize.QuantTestModule', (['model'], {'per_channel_q': 'args.per_channel_q', 'bitwidth_weights': 'args.bitwidth_weights', 'bitwidth_activations': 'args.bitwidth_activations', 'histogram_range': 'args.histogram_range', 'constrain_bias': 'args.constrain_bias', 'dummy_input': 'dummy_input', 'model_surgery_quantize': 'model_surgery_quantize'}), '(model, per_channel_q=args.per_channel_q,\n bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.\n bitwidth_activations, histogram_range=args.histogram_range,\n constrain_bias=args.constrain_bias, dummy_input=dummy_input,\n model_surgery_quantize=model_surgery_quantize)\n', (23366, 23658), False, 'from torchvision.edgeailite import xnn\n'), ((60135, 60163), 'torch.isnan', 'torch.isnan', (['objective_batch'], {}), '(objective_batch)\n', (60146, 60163), False, 'import torch\n'), ((27814, 27850), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['metric_fn_raw'], {}), '(metric_fn_raw)\n', (27835, 27850), False, 'import torch\n'), ((26564, 26598), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['loss_fn_raw'], {}), '(loss_fn_raw)\n', (26585, 26598), False, 'import torch\n'), ((57694, 57726), 'torch.max', 'torch.max', (['target_score_to_write'], {}), '(target_score_to_write)\n', (57703, 57726), False, 'import torch\n'), ((57856, 57949), 'torchvision.edgeailite.xnn.utils.tensor2array', 'xnn.utils.tensor2array', (['target_score_to_write'], {'max_value': 'max_value_score', 'colormap': '"""bone"""'}), "(target_score_to_write, max_value=max_value_score,\n colormap='bone')\n", (57878, 57949), False, 'from torchvision.edgeailite import xnn\n'), ((58096, 58189), 'torchvision.edgeailite.xnn.utils.tensor2array', 'xnn.utils.tensor2array', (['output_score_to_write'], {'max_value': 'max_value_score', 'colormap': '"""bone"""'}), "(output_score_to_write, max_value=max_value_score,\n colormap='bone')\n", (58118, 58189), False, 'from torchvision.edgeailite import xnn\n')] |
# -*- coding: utf-8 -*-
import datetime
import data_base_manipulation as db
import analysis_files_manipulation as fm
import caiman as cm
from caiman.source_extraction import cnmf
from caiman.source_extraction.cnmf import params as params
import caiman.base.rois
import logging
import numpy as np
import os
import psutil
#step = 'source_extraction'
#%% MAIN
def run_source_extraction(row, parameters, dview, session_wise = False):
'''
This is the function for source extraction.
Its goal is to take in a .mmap file,
perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
This function is only runnable on the cn76 server because it requires parralel processing.
Args:
row: pd.DataFrame object
The row corresponding to the analysis state to be source extracted.
Returns:
row: pd.DataFrame object
The row corresponding to the source extracted analysis state.
'''
step_index = 4
row_local = row.copy()
row_local.loc['source_extraction_parameters'] = str(parameters)
row_local = db.set_version_analysis('source_extraction',row_local,session_wise)
index = row_local.name
# Determine input path
if parameters['session_wise']:
input_mmap_file_path = eval(row_local.loc['alignment_output'])['main']
else:
input_mmap_file_path = eval(row_local.loc['motion_correction_output'])['main']
if not os.path.isfile(input_mmap_file_path):
logging.error('Input file does not exist. Cancelling.')
return row_local
# Determine output paths
file_name = db.create_file_name(step_index, index)
data_dir = 'data/interim/source_extraction/session_wise/' if parameters['session_wise'] else 'data/interim/source_extraction/trial_wise/'
output_file_path = data_dir + f'main/{file_name}.hdf5'
# Create a dictionary with parameters
output = {
'main': output_file_path,
'meta':{
'analysis' : {
'analyst' : os.environ['ANALYST'],
'date' : datetime.datetime.today().strftime("%m-%d-%Y"),
'time' : datetime.datetime.today().strftime("%H:%M:%S"),
},
'duration': {}
}
}
# Load memmory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{index} .mmap file does not exist. Cancelling')
return row_local
# SOURCE EXTRACTION
# Check if the summary images are already there
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(index, gSig_abs = parameters['gSig'][0])
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{index} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{index} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig = parameters['gSig'][0], swap_dim=False)
dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
output['meta']['duration']['summary_images'] = dt
logging.info(f'{index} Computed summary images. dt = {dt} min')
# Saving summary images as npy files
gSig = parameters['gSig'][0]
corr_npy_file_path = data_dir + f'/meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'/meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
# Store the paths in the meta dictionary
output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}
output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}
# Calculate min, mean, max value for cn_filter and pnr
corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(), cn_filter.max()
output['meta']['corr']['meta'] = {'min': corr_min, 'mean': corr_mean, 'max': corr_max}
pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()
output['meta']['pnr']['meta'] = {'min': pnr_min, 'mean': pnr_mean, 'max': pnr_max}
# If min_corr and min_pnr are specified via a linear equation, calculate
# this value
if type(parameters['min_corr']) == list:
min_corr = parameters['min_corr'][0]*corr_mean + parameters['min_corr'][1]
parameters['min_corr'] = min_corr
logging.info(f'{index} Automatically setting min_corr = {min_corr}')
if type(parameters['min_pnr']) == list:
min_pnr = parameters['min_pnr'][0]*pnr_mean + parameters['min_pnr'][1]
parameters['min_pnr'] = min_pnr
logging.info(f'{index} Automatically setting min_pnr = {min_pnr}')
# Set the parameters for caiman
opts = params.CNMFParams(params_dict = parameters)
# SOURCE EXTRACTION
logging.info(f'{index} Performing source extraction')
t0 = datetime.datetime.today()
n_processes = psutil.cpu_count()
logging.info(f'{index} n_processes: {n_processes}')
cnm = cnmf.CNMF(n_processes = n_processes, dview = dview, params = opts)
cnm.fit(images)
cnm.estimates.dims = dims
# Store the number of neurons
output['meta']['K'] = len(cnm.estimates.C)
# Calculate the center of masses
cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A, images.shape[1], images.shape[2])
# Save the cnmf object as a hdf5 file
logging.info(f'{index} Saving cnmf object')
cnm.save(output_file_path)
dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
output['meta']['duration']['source_extraction'] = dt
logging.info(f'{index} Source extraction finished. dt = {dt} min')
# Write necessary variables in row and return
row_local.loc['source_extraction_parameters'] = str(parameters)
row_local.loc['source_extraction_output'] = str(output)
return row_local
| [
"caiman.load_memmap",
"logging.warning",
"analysis_files_manipulation.get_corr_pnr_path",
"caiman.source_extraction.cnmf.params.CNMFParams",
"os.path.isfile",
"psutil.cpu_count",
"caiman.source_extraction.cnmf.CNMF",
"data_base_manipulation.create_file_name",
"datetime.datetime.today",
"caiman.sum... | [((1133, 1202), 'data_base_manipulation.set_version_analysis', 'db.set_version_analysis', (['"""source_extraction"""', 'row_local', 'session_wise'], {}), "('source_extraction', row_local, session_wise)\n", (1156, 1202), True, 'import data_base_manipulation as db\n'), ((1656, 1694), 'data_base_manipulation.create_file_name', 'db.create_file_name', (['step_index', 'index'], {}), '(step_index, index)\n', (1675, 1694), True, 'import data_base_manipulation as db\n'), ((2430, 2466), 'os.path.isfile', 'os.path.isfile', (['input_mmap_file_path'], {}), '(input_mmap_file_path)\n', (2444, 2466), False, 'import os\n'), ((2888, 2947), 'analysis_files_manipulation.get_corr_pnr_path', 'fm.get_corr_pnr_path', (['index'], {'gSig_abs': "parameters['gSig'][0]"}), "(index, gSig_abs=parameters['gSig'][0])\n", (2908, 2947), True, 'import analysis_files_manipulation as fm\n'), ((5365, 5406), 'caiman.source_extraction.cnmf.params.CNMFParams', 'params.CNMFParams', ([], {'params_dict': 'parameters'}), '(params_dict=parameters)\n', (5382, 5406), True, 'from caiman.source_extraction.cnmf import params as params\n'), ((5446, 5499), 'logging.info', 'logging.info', (['f"""{index} Performing source extraction"""'], {}), "(f'{index} Performing source extraction')\n", (5458, 5499), False, 'import logging\n'), ((5509, 5534), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5532, 5534), False, 'import datetime\n'), ((5553, 5571), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (5569, 5571), False, 'import psutil\n'), ((5576, 5627), 'logging.info', 'logging.info', (['f"""{index} n_processes: {n_processes}"""'], {}), "(f'{index} n_processes: {n_processes}')\n", (5588, 5627), False, 'import logging\n'), ((5638, 5698), 'caiman.source_extraction.cnmf.CNMF', 'cnmf.CNMF', ([], {'n_processes': 'n_processes', 'dview': 'dview', 'params': 'opts'}), '(n_processes=n_processes, dview=dview, params=opts)\n', (5647, 5698), False, 'from caiman.source_extraction import cnmf\n'), ((6038, 6081), 'logging.info', 'logging.info', (['f"""{index} Saving cnmf object"""'], {}), "(f'{index} Saving cnmf object')\n", (6050, 6081), False, 'import logging\n'), ((6255, 6321), 'logging.info', 'logging.info', (['f"""{index} Source extraction finished. dt = {dt} min"""'], {}), "(f'{index} Source extraction finished. dt = {dt} min')\n", (6267, 6321), False, 'import logging\n'), ((1479, 1515), 'os.path.isfile', 'os.path.isfile', (['input_mmap_file_path'], {}), '(input_mmap_file_path)\n', (1493, 1515), False, 'import os\n'), ((1525, 1580), 'logging.error', 'logging.error', (['"""Input file does not exist. Cancelling."""'], {}), "('Input file does not exist. Cancelling.')\n", (1538, 1580), False, 'import logging\n'), ((2490, 2526), 'caiman.load_memmap', 'cm.load_memmap', (['input_mmap_file_path'], {}), '(input_mmap_file_path)\n', (2504, 2526), True, 'import caiman as cm\n'), ((2672, 2737), 'logging.warning', 'logging.warning', (['f"""{index} .mmap file does not exist. Cancelling"""'], {}), "(f'{index} .mmap file does not exist. Cancelling')\n", (2687, 2737), False, 'import logging\n'), ((2993, 3027), 'os.path.isfile', 'os.path.isfile', (['corr_npy_file_path'], {}), '(corr_npy_file_path)\n', (3007, 3027), False, 'import os\n'), ((3081, 3137), 'logging.info', 'logging.info', (['f"""{index} Already computed summary images"""'], {}), "(f'{index} Already computed summary images')\n", (3093, 3137), False, 'import logging\n'), ((3158, 3185), 'numpy.load', 'np.load', (['corr_npy_file_path'], {}), '(corr_npy_file_path)\n', (3165, 3185), True, 'import numpy as np\n'), ((3200, 3226), 'numpy.load', 'np.load', (['pnr_npy_file_path'], {}), '(pnr_npy_file_path)\n', (3207, 3226), True, 'import numpy as np\n'), ((3283, 3308), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3306, 3308), False, 'import datetime\n'), ((3317, 3366), 'logging.info', 'logging.info', (['f"""{index} Computing summary images"""'], {}), "(f'{index} Computing summary images')\n", (3329, 3366), False, 'import logging\n'), ((3392, 3486), 'caiman.summary_images.correlation_pnr', 'cm.summary_images.correlation_pnr', (['images[::1]'], {'gSig': "parameters['gSig'][0]", 'swap_dim': '(False)'}), "(images[::1], gSig=parameters['gSig'][0],\n swap_dim=False)\n", (3425, 3486), True, 'import caiman as cm\n'), ((3637, 3700), 'logging.info', 'logging.info', (['f"""{index} Computed summary images. dt = {dt} min"""'], {}), "(f'{index} Computed summary images. dt = {dt} min')\n", (3649, 3700), False, 'import logging\n'), ((5009, 5077), 'logging.info', 'logging.info', (['f"""{index} Automatically setting min_corr = {min_corr}"""'], {}), "(f'{index} Automatically setting min_corr = {min_corr}')\n", (5021, 5077), False, 'import logging\n'), ((5250, 5316), 'logging.info', 'logging.info', (['f"""{index} Automatically setting min_pnr = {min_pnr}"""'], {}), "(f'{index} Automatically setting min_pnr = {min_pnr}')\n", (5262, 5316), False, 'import logging\n'), ((4047, 4068), 'numpy.save', 'np.save', (['f', 'cn_filter'], {}), '(f, cn_filter)\n', (4054, 4068), True, 'import numpy as np\n'), ((4130, 4145), 'numpy.save', 'np.save', (['f', 'pnr'], {}), '(f, pnr)\n', (4137, 4145), True, 'import numpy as np\n'), ((3837, 3866), 'data_base_manipulation.create_file_name', 'db.create_file_name', (['(3)', 'index'], {}), '(3, index)\n', (3856, 3866), True, 'import data_base_manipulation as db\n'), ((3937, 3966), 'data_base_manipulation.create_file_name', 'db.create_file_name', (['(3)', 'index'], {}), '(3, index)\n', (3956, 3966), True, 'import data_base_manipulation as db\n'), ((6127, 6152), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (6150, 6152), False, 'import datetime\n'), ((2148, 2173), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2171, 2173), False, 'import datetime\n'), ((2229, 2254), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2252, 2254), False, 'import datetime\n'), ((3503, 3528), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3526, 3528), False, 'import datetime\n')] |
import sys
import numpy as np
n, h, a, b, c, d, e = map(int, sys.stdin.read().split())
def cost(x, y):
return a * x + c * y
def main():
x = np.arange(n + 1, dtype=np.int64)
y = (n * e - (b + e) * x - h + (d + e)) // (d + e)
y = np.maximum(y, 0) # マイナス日食べることはできない
y = np.minimum(
y, n - x
) # xを固定した時にx+y(x+y >= n+1)日以上食べ続けないと体調を崩す場合でも、n日までのことだけを考えれば良い。
costs = cost(x, y)
return np.amin(costs)
if __name__ == "__main__":
ans = main()
print(ans)
| [
"numpy.amin",
"numpy.minimum",
"sys.stdin.read",
"numpy.maximum",
"numpy.arange"
] | [((166, 198), 'numpy.arange', 'np.arange', (['(n + 1)'], {'dtype': 'np.int64'}), '(n + 1, dtype=np.int64)\n', (175, 198), True, 'import numpy as np\n'), ((264, 280), 'numpy.maximum', 'np.maximum', (['y', '(0)'], {}), '(y, 0)\n', (274, 280), True, 'import numpy as np\n'), ((309, 329), 'numpy.minimum', 'np.minimum', (['y', '(n - x)'], {}), '(y, n - x)\n', (319, 329), True, 'import numpy as np\n'), ((448, 462), 'numpy.amin', 'np.amin', (['costs'], {}), '(costs)\n', (455, 462), True, 'import numpy as np\n'), ((67, 83), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (81, 83), False, 'import sys\n')] |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Callable
from functools import partial
from absl.testing import absltest
import numpy as np
from flax.core import Scope, Array, init, apply, unfreeze, lift, nn
import jax
from jax import random, numpy as jnp
def mlp_custom_grad(scope: Scope, x: Array,
sizes: Sequence[int] = (8, 1),
act_fn: Callable[[Array], Array] = nn.relu):
f = nn.dense
def fwd(scope, x, features):
y, vjp_fn = lift.vjp(partial(f, features=features), scope, x)
return y, vjp_fn
def bwd(features, res, y_t):
del features
vjp_fn = res
input_t, params_t = vjp_fn(y_t)
params_t = jax.tree_map(jnp.sign, params_t)
return input_t, params_t
dense_custom_grad = lift.custom_vjp(
f, forward_fn=fwd, backward_fn=bwd, nondiff_argnums=(2,))
# hidden layers
for size in sizes[:-1]:
x = scope.child(dense_custom_grad, prefix='hidden_')(x, size)
x = act_fn(x)
# output layer
return scope.child(dense_custom_grad, 'out')(x, sizes[-1])
class CustomVJPTest(absltest.TestCase):
def test_custom_vjp(self):
x = random.normal(random.PRNGKey(0), (1, 4))
y, variables = init(mlp_custom_grad)(random.PRNGKey(1), x)
param_shapes = unfreeze(
jax.tree_map(jnp.shape, variables['params']))
loss_fn = lambda p, x: jnp.mean(apply(mlp_custom_grad)(p, x) ** 2)
grad = jax.grad(loss_fn)(variables, x)
grad_shapes = unfreeze(
jax.tree_map(jnp.shape, grad['params']))
self.assertEqual(y.shape, (1, 1))
expected_param_shapes = {
'hidden_0': {'kernel': (4, 8), 'bias': (8,)},
'out': {'kernel': (8, 1), 'bias': (1,)},
}
self.assertEqual(param_shapes, expected_param_shapes)
self.assertEqual(grad_shapes, expected_param_shapes)
for g in jax.tree_leaves(grad):
self.assertTrue(np.all(g == np.sign(g)))
if __name__ == '__main__':
absltest.main()
| [
"flax.core.lift.custom_vjp",
"jax.random.PRNGKey",
"absl.testing.absltest.main",
"jax.tree_map",
"jax.tree_leaves",
"jax.grad",
"functools.partial",
"numpy.sign",
"flax.core.init",
"flax.core.apply"
] | [((1323, 1396), 'flax.core.lift.custom_vjp', 'lift.custom_vjp', (['f'], {'forward_fn': 'fwd', 'backward_fn': 'bwd', 'nondiff_argnums': '(2,)'}), '(f, forward_fn=fwd, backward_fn=bwd, nondiff_argnums=(2,))\n', (1338, 1396), False, 'from flax.core import Scope, Array, init, apply, unfreeze, lift, nn\n'), ((2476, 2491), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2489, 2491), False, 'from absl.testing import absltest\n'), ((1238, 1270), 'jax.tree_map', 'jax.tree_map', (['jnp.sign', 'params_t'], {}), '(jnp.sign, params_t)\n', (1250, 1270), False, 'import jax\n'), ((2375, 2396), 'jax.tree_leaves', 'jax.tree_leaves', (['grad'], {}), '(grad)\n', (2390, 2396), False, 'import jax\n'), ((1059, 1088), 'functools.partial', 'partial', (['f'], {'features': 'features'}), '(f, features=features)\n', (1066, 1088), False, 'from functools import partial\n'), ((1706, 1723), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1720, 1723), False, 'from jax import random, numpy as jnp\n'), ((1752, 1773), 'flax.core.init', 'init', (['mlp_custom_grad'], {}), '(mlp_custom_grad)\n', (1756, 1773), False, 'from flax.core import Scope, Array, init, apply, unfreeze, lift, nn\n'), ((1774, 1791), 'jax.random.PRNGKey', 'random.PRNGKey', (['(1)'], {}), '(1)\n', (1788, 1791), False, 'from jax import random, numpy as jnp\n'), ((1833, 1877), 'jax.tree_map', 'jax.tree_map', (['jnp.shape', "variables['params']"], {}), "(jnp.shape, variables['params'])\n", (1845, 1877), False, 'import jax\n'), ((1961, 1978), 'jax.grad', 'jax.grad', (['loss_fn'], {}), '(loss_fn)\n', (1969, 1978), False, 'import jax\n'), ((2029, 2068), 'jax.tree_map', 'jax.tree_map', (['jnp.shape', "grad['params']"], {}), "(jnp.shape, grad['params'])\n", (2041, 2068), False, 'import jax\n'), ((1915, 1937), 'flax.core.apply', 'apply', (['mlp_custom_grad'], {}), '(mlp_custom_grad)\n', (1920, 1937), False, 'from flax.core import Scope, Array, init, apply, unfreeze, lift, nn\n'), ((2432, 2442), 'numpy.sign', 'np.sign', (['g'], {}), '(g)\n', (2439, 2442), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from astropy.utils.data import download_file
from jdaviz.app import Application
# This file is originally from
# https://data.sdss.org/sas/dr14/manga/spectro/redux/v2_1_2/7495/stack/manga-7495-12704-LOGCUBE.fits.gz
URL = 'https://stsci.box.com/shared/static/28a88k1qfipo4yxc4p4d40v4axtlal8y.fits'
""" The purpose of this test is to check that both methods:
- app.get_viewer('spectrum-viewer').data()
- app.get_data_from_viewer("spectrum-viewer")
return the same spectrum values.
"""
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
@pytest.mark.filterwarnings('ignore')
@pytest.mark.remote_data
def test_data_retrieval(jdaviz_app):
fn = download_file(URL, cache=True)
jdaviz_app.load_data(fn)
# two ways of retrieving data from the viewer.
# They should return the same spectral values
a1 = jdaviz_app.get_viewer('spectrum-viewer').data()
a2 = jdaviz_app.get_data_from_viewer("spectrum-viewer")
test_value_1 = a1[0].data
test_value_2 = list(a2.values())[0].data
assert np.allclose(test_value_1, test_value_2, atol=1e-5)
| [
"astropy.utils.data.download_file",
"jdaviz.app.Application",
"numpy.allclose",
"pytest.mark.filterwarnings"
] | [((628, 664), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (654, 664), False, 'import pytest\n'), ((588, 624), 'jdaviz.app.Application', 'Application', ([], {'configuration': '"""cubeviz"""'}), "(configuration='cubeviz')\n", (599, 624), False, 'from jdaviz.app import Application\n'), ((737, 767), 'astropy.utils.data.download_file', 'download_file', (['URL'], {'cache': '(True)'}), '(URL, cache=True)\n', (750, 767), False, 'from astropy.utils.data import download_file\n'), ((1104, 1155), 'numpy.allclose', 'np.allclose', (['test_value_1', 'test_value_2'], {'atol': '(1e-05)'}), '(test_value_1, test_value_2, atol=1e-05)\n', (1115, 1155), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from plot_one import plot_me_one
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.integrate import simps
M_sun=1.989*10**30;
R_sun=696340*10**3;
M=0.62*M_sun
r_star=0.0151*R_sun
G=6.67408*10**(-11);
####
####
#### This code will create the sandbox and allow user to play around with densities. To begin one needs a density to start with.
#### You can generate one by running one of the other programs.
#### The controls are:
####
#### c - switches between drawing circles and drawing by hand. Circles are drawn between inner and outer radius
#### B - sets color/density to 0
#### b - decreases current color/density by 1
#### w - increases current color/density by 1
#### backspace - Plot the emission lines from current density
#### Esc - close
####
img=np.load("density_todraw.npy")
# variables
ix = -1
iy = -1
drawing = False
size=img.shape[0]
color=1
circle=True
consts={'e':0.0,
'b':0.0,
'view_angle':np.pi/2,
'inclination_angle':np.pi/5,
'r_max':550*r_star,
'r_min':r_star
}
def on_change(val):
consts['b']=4*(val-100)/100
print(4*(val-100)/100)
def draw_rectangle_with_drag(event, x, y, flags, param):
global ix, iy,ir, drawing, img
if event == cv2.EVENT_LBUTTONDOWN and circle:
if not drawing:
ix = x
iy = y
ir = np.sqrt((ix-size//2)**2+(iy-size//2)**2)
if drawing:
r = np.sqrt((x-size//2)**2+(y-size//2)**2)
print(r,ir)
cv2.circle(img, (size//2, size//2), ((r+ir)/2).astype(int), color=color, thickness=np.abs((r-ir)/2).astype(int))
print('drawn 1')
print(x,y)
drawing = not drawing
if event == cv2.EVENT_LBUTTONDOWN and not circle:
drawing = True
ix=x
iy=y
elif event == cv2.EVENT_MOUSEMOVE and not circle:
if drawing == True:
cv2.line(img,(ix,iy),(x,y),color,50)
ix=x
iy=y
elif event == cv2.EVENT_LBUTTONUP and not circle:
if(drawing):
cv2.line(img,(ix,iy),(x,y),color,50)
drawing = False
cv2.namedWindow(winname = "Density of gas")
cv2.createTrackbar('Emissivity(b)', "Density of gas", 100, 200, on_change)
cv2.setMouseCallback("Density of gas",
draw_rectangle_with_drag)
fig_hist = plt.figure(1)
ax_hist = fig_hist.add_subplot(1, 1, 1)
plt.ion()
plt.xlabel("Velocity/Wavelength")
plt.ylabel("Flux")
inst_names=['Xshooter','MIKE2']
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/SiII'+'_'+inst_name+'.csv', delimiter=',', unpack=True)
area = simps((y-1),x)
y=(y-1)/area
ax_hist.plot(x,y, linewidth=1,label=inst_name)
while True:
# imgC = cv2.applyColorMap(img, cv2.COLORMAP_JET)
if img.max()!=0: cv2.imshow("Density of gas", img/img.max())
else: cv2.imshow("Density of gas", img)
k = cv2.waitKey(33)
if k == 27:
break
elif k== ord(' '):
print('Plotting')
plot_me_one(img,ax_hist,consts)
plt.show()
plt.pause(0.001)
elif k== ord('B'):
color=0
print('Density now: '+str(color))
elif k== ord('b'):
color-=1
print('Density now: '+str(color))
elif k== ord('w'):
color+=1
print('Density now: '+str(color))
elif k== ord('c'):
circle = not circle
drawing=False
if(circle):
print('Now in circle mode')
else:
print('Now in drawing mode')
cv2.destroyAllWindows()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.setMouseCallback",
"matplotlib.pyplot.xlabel",
"cv2.line",
"cv2.waitKey",
"numpy.abs",
"scipy.integrate.simps",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ion",
"cv2.createTrackbar",
"cv2.namedWindow",... | [((809, 838), 'numpy.load', 'np.load', (['"""density_todraw.npy"""'], {}), "('density_todraw.npy')\n", (816, 838), True, 'import numpy as np\n'), ((2112, 2153), 'cv2.namedWindow', 'cv2.namedWindow', ([], {'winname': '"""Density of gas"""'}), "(winname='Density of gas')\n", (2127, 2153), False, 'import cv2\n'), ((2156, 2230), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Emissivity(b)"""', '"""Density of gas"""', '(100)', '(200)', 'on_change'], {}), "('Emissivity(b)', 'Density of gas', 100, 200, on_change)\n", (2174, 2230), False, 'import cv2\n'), ((2231, 2295), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Density of gas"""', 'draw_rectangle_with_drag'], {}), "('Density of gas', draw_rectangle_with_drag)\n", (2251, 2295), False, 'import cv2\n'), ((2330, 2343), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2340, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2393), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2391, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2427), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Velocity/Wavelength"""'], {}), "('Velocity/Wavelength')\n", (2404, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2446), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (2438, 2446), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3519), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3517, 3519), False, 'import cv2\n'), ((2529, 2607), 'numpy.loadtxt', 'np.loadtxt', (["('data/SiII' + '_' + inst_name + '.csv')"], {'delimiter': '""","""', 'unpack': '(True)'}), "('data/SiII' + '_' + inst_name + '.csv', delimiter=',', unpack=True)\n", (2539, 2607), True, 'import numpy as np\n'), ((2614, 2629), 'scipy.integrate.simps', 'simps', (['(y - 1)', 'x'], {}), '(y - 1, x)\n', (2619, 2629), False, 'from scipy.integrate import simps\n'), ((2882, 2897), 'cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (2893, 2897), False, 'import cv2\n'), ((2840, 2873), 'cv2.imshow', 'cv2.imshow', (['"""Density of gas"""', 'img'], {}), "('Density of gas', img)\n", (2850, 2873), False, 'import cv2\n'), ((1342, 1396), 'numpy.sqrt', 'np.sqrt', (['((ix - size // 2) ** 2 + (iy - size // 2) ** 2)'], {}), '((ix - size // 2) ** 2 + (iy - size // 2) ** 2)\n', (1349, 1396), True, 'import numpy as np\n'), ((1419, 1471), 'numpy.sqrt', 'np.sqrt', (['((x - size // 2) ** 2 + (y - size // 2) ** 2)'], {}), '((x - size // 2) ** 2 + (y - size // 2) ** 2)\n', (1426, 1471), True, 'import numpy as np\n'), ((2985, 3018), 'plot_one.plot_me_one', 'plot_me_one', (['img', 'ax_hist', 'consts'], {}), '(img, ax_hist, consts)\n', (2996, 3018), False, 'from plot_one import plot_me_one\n'), ((3025, 3035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3033, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3060), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3053, 3060), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1926), 'cv2.line', 'cv2.line', (['img', '(ix, iy)', '(x, y)', 'color', '(50)'], {}), '(img, (ix, iy), (x, y), color, 50)\n', (1892, 1926), False, 'import cv2\n'), ((2043, 2085), 'cv2.line', 'cv2.line', (['img', '(ix, iy)', '(x, y)', 'color', '(50)'], {}), '(img, (ix, iy), (x, y), color, 50)\n', (2051, 2085), False, 'import cv2\n'), ((1577, 1597), 'numpy.abs', 'np.abs', (['((r - ir) / 2)'], {}), '((r - ir) / 2)\n', (1583, 1597), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.