code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import attr
import struct
import msprime
import tskit
import kastore
import json
from collections import OrderedDict
import warnings
import numpy as np
from .slim_metadata import *
from .provenance import *
from .slim_metadata import _decode_mutation_pre_nucleotides
INDIVIDUAL_ALIVE = 2**16
INDIVIDUAL_REMEMBERED = 2**17
INDIVIDUAL_FIRST_GEN = 2**18
# A nucleotide k in mutation metadata actually means
# something that in reference_sequence is NUCLEOTIDES[k]
NUCLEOTIDES = ['A', 'C', 'G', 'T']
def load(path):
'''
Load the SLiM-compatible tree sequence found in the .trees file at ``path``.
:param string path: The path to a .trees file.
'''
ts = SlimTreeSequence.load(path)
return ts
def load_tables(tables, **kwargs):
'''
See :func:`SlimTreeSequence.load_tables`.
:param TableCollection tables: A set of tables.
'''
ts = SlimTreeSequence.load_tables(tables, **kwargs)
return ts
def annotate_defaults(ts, model_type, slim_generation, reference_sequence=None):
'''
Takes a tree sequence (as produced by msprime, for instance), and adds in the
information necessary for SLiM to use it as an initial state, filling in
mostly default values. Returns a :class:`SlimTreeSequence`.
:param TreeSequence ts: A :class:`TreeSequence`.
:param string model_type: SLiM model type: either "WF" or "nonWF".
:param int slim_generation: What generation number in SLiM correponds to
``time=0`` in the tree sequence.
'''
tables = ts.dump_tables()
annotate_defaults_tables(tables, model_type, slim_generation)
return SlimTreeSequence.load_tables(tables,
reference_sequence=reference_sequence)
def annotate_defaults_tables(tables, model_type, slim_generation):
'''
Does the work of :func:`annotate_defaults()`, but modifies the tables in place: so,
takes tables as produced by ``msprime``, and makes them look like the
tables as output by SLiM. See :func:`annotate_defaults` for details.
'''
if (type(slim_generation) is not int) or (slim_generation < 1):
raise ValueError("SLiM generation must be an integer and at least 1.")
# set_nodes must come before set_populations
if model_type == "WF":
default_ages = -1
elif model_type == "nonWF":
default_ages = 0
else:
raise ValueError("Model type must be 'WF' or 'nonWF'")
_set_nodes_individuals(tables, age=default_ages)
_set_populations(tables)
_set_sites_mutations(tables)
_set_provenance(tables, model_type=model_type, slim_generation=slim_generation)
class SlimTreeSequence(tskit.TreeSequence):
'''
This is just like a :class:`tskit.TreeSequence`, with a few more properties
and methods, notably:
- :meth:`.recapitate`
You should create a :class:`.SlimTreeSequence` using one of
- :meth:`.SlimTreeSequence.load_tables` :meth:`.SlimTreeSequence.load`,
- :func:`.load`, or :func:`.load_tables`.
:ivar slim_generation: The generation that the SLiM simulation was at upon writing;
will be read from provenance if not provided.
:ivar reference_sequence: None, or an string of length equal to the sequence
length that gives the entire reference sequence for nucleotide models.
:vartype slim_generation: int
:vartype reference_sequence: string
'''
def __init__(self, ts, reference_sequence=None):
provenance = get_provenance(ts)
slim_generation = provenance.slim_generation
if provenance.file_version != "0.3":
warnings.warn("This is an v{} SLiM tree sequence.".format(provenance.file_version) +
" When you write this out, " +
"it will be converted to v0.3 (which you should do).")
tables = ts.dump_tables()
if provenance.file_version == "0.1" or provenance.file_version == "0.2":
# add empty nucleotide slots to metadata
mut_bytes = tskit.unpack_bytes(tables.mutations.metadata,
tables.mutations.metadata_offset)
mut_metadata = [_decode_mutation_pre_nucleotides(md)
for md in mut_bytes]
annotate_mutation_metadata(tables, mut_metadata)
if provenance.file_version == "0.1":
# shift times
node_times = tables.nodes.time + slim_generation
tables.nodes.set_columns(
flags=tables.nodes.flags,
time=node_times,
population=tables.nodes.population,
individual=tables.nodes.individual,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset)
migration_times = tables.migrations.time + slim_generation
tables.migrations.set_columns(
left=tables.migrations.left,
right=tables.migrations.right,
node=tables.migrations.node,
source=tables.migrations.source,
dest=tables.migrations.dest,
time=migration_times)
upgrade_slim_provenance(tables)
ts = tables.tree_sequence()
provenance = get_provenance(ts)
assert(provenance.file_version == "0.3")
self._ll_tree_sequence = ts._ll_tree_sequence
self.slim_generation = slim_generation
self.reference_sequence = reference_sequence
# pre-extract individual metadata
self.individual_locations = ts.tables.individuals.location
self.individual_locations.shape = (int(len(self.individual_locations)/3), 3)
self.individual_ages = np.zeros(ts.num_individuals, dtype='int')
if self.slim_provenance.model_type != "WF":
self.individual_ages = np.fromiter(map(lambda ind: decode_individual(ind.metadata).age, ts.individuals()), dtype='int64')
self.individual_times = np.zeros(ts.num_individuals)
self.individual_populations = np.repeat(np.int32(-1), ts.num_individuals)
npops = [len(set(self.node(n).population for n in ind.nodes)) for ind in ts.individuals()]
ntimes = [len(set(self.node(n).time for n in ind.nodes)) for ind in ts.individuals()]
if max(npops) > 1:
raise ValueError("Individual has nodes from more than one population.")
if max(ntimes) > 1:
raise ValueError("Individual has nodes from more than one time.")
has_indiv = (ts.tables.nodes.individual >= 0)
which_indiv = ts.tables.nodes.individual[has_indiv]
# if we did not do the sanity check above then an individual with nodes in more than one pop
# would get the pop of their last node in the list
self.individual_populations[which_indiv] = ts.tables.nodes.population[has_indiv]
self.individual_times[which_indiv] = ts.tables.nodes.time[has_indiv]
@classmethod
def load(cls, path):
'''
Load a :class:`SlimTreeSequence` from a .trees file on disk.
:param string path: The path to a .trees file.
:rtype SlimTreeSequence:
'''
ts = tskit.load(path)
# extract the reference sequence from the kastore
kas = kastore.load(path)
if 'reference_sequence/data' in kas:
int_rs = kas['reference_sequence/data']
reference_sequence = int_rs.tostring().decode('ascii')
else:
reference_sequence = None
return cls(ts, reference_sequence)
@classmethod
def load_tables(cls, tables, **kwargs):
'''
Creates the :class:`SlimTreeSequence` defined by the tables.
:param TableCollection tables: A set of tables, as produced by SLiM
or by annotate_defaults().
:param TableCollection reference_sequence: An optional string of ACGT giving
the reference sequence.
:rtype SlimTreeSequence:
'''
# a roundabout way to copy the tables
ts = tables.tree_sequence()
return cls(ts, **kwargs)
def simplify(self, *args, **kwargs):
'''
This is a wrapper for :meth:`tskit.TreeSequence.simplify`.
The only difference is that this method returns the
derived class :class:`.SlimTreeSequence`.
:rtype SlimTreeSequence:
'''
sts = super(SlimTreeSequence, self).simplify(*args, **kwargs)
if (type(sts) == tuple):
ret = (SlimTreeSequence(sts[0]), sts[1])
ret[0].reference_sequence = self.reference_sequence
else:
ret = SlimTreeSequence(sts)
ret.reference_sequence = self.reference_sequence
return ret
def population(self, id_):
'''
Returns the population whose ID is given by `id_`, as documented in
:meth:`tskit.TreeSequence.population`, but with additional attributes::
slim_id, selfing_fraction, female_cloning_fraction,
male_cloning_fraction, sex_ratio,
bounds_x0, bounds_x1, bounds_y0, bounds_y1, bounds_z0, bounds_z1,
migration_records.
These are all recorded by SLiM in the metadata.
Note that SLiM populations are usually indexed starting from 1,
but in tskit from zero, so there may be populations (e.g., with id_=0)
that have no metadata and are not used by SLiM.
:param int id_: The ID of the population (i.e., its index).
'''
pop = super(SlimTreeSequence, self).population(id_)
try:
pop.metadata = decode_population(pop.metadata)
except:
pass
return pop
def individual(self, id_):
'''
Returns the individual whose ID is given by `id_`, as documented in
:meth:`tskit.TreeSequence.individual`, but with additional attributes::
time, pedigree_id, age, slim_population, sex, slim_flags.
The `time` and `population` properties are extracted from the nodes,
and an error will be thrown if the individual's nodes derive from
more than one population or more than one time.
:param int id_: The ID of the individual (i.e., its index).
'''
ind = super(SlimTreeSequence, self).individual(id_)
ind.population = self.individual_populations[id_]
ind.time = self.individual_times[id_]
try:
ind.metadata = decode_individual(ind.metadata)
except:
pass
return ind
def node(self, id_):
'''
Returns the node whose ID is given by `id_`, as documented in
:meth:`tskit.TreeSequence.node`, but with additional attributes::
slim_id, is_null, genome_type.
These are all recorded by SLiM in the metadata.
:param int id_: The ID of the node (i.e., its index).
'''
node = super(SlimTreeSequence, self).node(id_)
try:
node.metadata = decode_node(node.metadata)
except:
pass
return node
def mutation(self, id_):
'''
Returns the mutation whose ID is given by `id_`, as documented in
:meth:`tskit.TreeSequence.mutation`, but with additional attributes::
mutation_type, selection_coeff, population, slim_time, nucleotide.
These are all recorded by SLiM in the metadata.
:param int id_: The ID of the mutation (i.e., its index).
'''
mut = super(SlimTreeSequence, self).mutation(id_)
try:
mut.metadata = decode_mutation(mut.metadata)
except:
pass
return mut
def recapitate(self, recombination_rate=None, keep_first_generation=False,
population_configurations=None, recombination_map=None, **kwargs):
'''
Returns a "recapitated" tree sequence, by using msprime to run a
coalescent simulation from the "top" of this tree sequence, i.e.,
allowing any uncoalesced lineages to coalesce.
To allow this process, the first generation of the SLiM simulation has been
recorded in the tree sequence, but are not currently marked as samples,
so this process (or, simplify()) will remove any of these that are not needed.
If you want to keep them, then set ``keep_first_generation`` to True;
although this will make more work here.
This also means that you must *not* simplify before you recapitate your
SLiM-produced tree sequence.
Note that ``Ne`` is not set automatically, so defaults to ``1.0``; you probably
want to set it explicitly. Similarly, migration is not set up
automatically, so that if there are uncoalesced lineages in more than
one population, you will need to pass in a migration matrix to allow
coalescence. In both cases, remember that population IDs in ``tskit`` begin
with 0, so that if your SLiM simulation has populations ``p1`` and ``p2``,
then the tree sequence will have three populations (but with no nodes
assigned to population 0), so that migration rate of 1.0 between ``p1`` and
``p2`` needs a migration matrix of::
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]
In general, all defaults are whatever the defaults of ``msprime.simulate`` are;
this includes recombination rate, so that if neither ``recombination_rate``
or a ``recombination_map`` are provided, there will be *no* recombination.
However, if ``recombination_rate`` *is* provided, then recapitation will
use a constant rate of recombination on a discretized map -- in other words,
recombinations in the coalescent portion of the simulation will only occur
at integer locations, just as in SLiM. If you do not want this to happen,
you need to construct a ``recombination_map`` explicitly.
:param float recombination_rate: A (constant) recombination rate,
in units of crossovers per nucleotide per unit of time.
:param bool keep_first_generation: Whether to keep the individuals (and genomes)
corresponding to the first SLiM generation in the resulting tree sequence
:param list population_configurations: See :meth:`msprime.simulate` for
this argument; if not provided, each population will have zero growth rate
and the same effective population size.
:type recombination_map: :class`msprime.RecombinationMap`
:param recombination_map: The recombination map, or None,
if recombination_rate is specified.
:param dict kwargs: Any other arguments to :meth:`msprime.simulate`.
'''
if recombination_rate is not None:
if recombination_map is not None:
raise ValueError("Cannot specify length/recombination_rate along with a recombination map")
recombination_map = msprime.RecombinationMap(positions = [0.0, self.sequence_length],
rates = [recombination_rate, 0.0],
num_loci = int(self.sequence_length))
if population_configurations is None:
population_configurations = [msprime.PopulationConfiguration()
for _ in range(self.num_populations)]
if keep_first_generation:
ts = self._mark_first_generation()
else:
ts = self
recap = msprime.simulate(
from_ts = ts,
population_configurations = population_configurations,
recombination_map = recombination_map,
start_time = self.slim_generation,
**kwargs)
ts = SlimTreeSequence.load_tables(recap.tables)
ts.reference_sequence = self.reference_sequence
return ts
def mutation_at(self, node, position, time=None):
'''
Finds the mutation present in the genome of ``node`` at ``position``,
returning -1 if there is no such mutation recorded in the tree
sequence. Warning: if ``node`` is not actually in the tree sequence
(e.g., not ancestral to any samples) at ``position``, then this
function will return -1, possibly erroneously. If `time` is provided,
returns the last mutation at ``position`` inherited by ``node`` that
occurred at or before ``time`` ago (using the `slim_time` attribute of
mutation metadata to infer this).
:param int node: The index of a node in the tree sequence.
:param float position: A position along the genome.
:param int time: The time ago that we want the nucleotide, or None,
in which case the ``time`` of ``node`` is used.
:returns: Index of the mutation in question, or -1 if none.
'''
if position < 0 or position >= self.sequence_length:
raise ValueError("Position {} not valid.".format(position))
if node < 0 or node >= self.num_nodes:
raise ValueError("Node {} not valid.".format(node))
if time is None:
time = self.node(node).time
tree = self.at(position)
slim_time = self.slim_generation - time
# Mutation's slim_times are one less than the corresponding node's slim times
# in WF models, but not in WF models, for some reason.
if self.slim_provenance.model_type == "WF":
slim_time -= 1.0
site_pos = self.tables.sites.position
out = tskit.NULL
if position in site_pos:
site_index = np.where(site_pos == position)[0][0]
site = self.site(site_index)
mut_nodes = []
# look for only mutations that occurred before `time`
# not strictly necessary if time was None
for mut in site.mutations:
if len(mut.metadata) == 0:
raise ValueError("All mutations must have SLiM metadata.")
if max([u.slim_time for u in mut.metadata]) <= slim_time:
mut_nodes.append(mut.node)
n = node
while n > -1 and n not in mut_nodes:
n = tree.parent(n)
if n >= 0:
# do careful error checking here
for mut in site.mutations:
if mut.node == n:
assert(out == tskit.NULL or out == mut.parent)
out = mut.id
return out
def nucleotide_at(self, node, position, time=None):
'''
Finds the nucleotide present in the genome of ``node`` at ``position``.
Warning: if ``node`` is not actually in the tree sequence (e.g., not
ancestral to any samples) at ``position``, then this function will
return the reference sequence nucleotide, possibly erroneously. If
`time` is provided, returns the last nucletide produced by a mutation
at ``position`` inherited by ``node`` that occurred at or before
``time`` ago (using the `slim_time` attribute of mutation metadata
to infer this).
:param int node: The index of a node in the tree sequence.
:param float position: A position along the genome.
:param int time: The time ago that we want the nucleotide, or None,
in which case the ``time`` of ``node`` is used.
:returns: Index of the nucleotide in ``NUCLEOTIDES`` (0=A, 1=C, 2=G, 3=T).
'''
if self.reference_sequence is None:
raise ValueError("This tree sequence has no reference sequence.")
mut_id = self.mutation_at(node, position, time)
if mut_id == tskit.NULL:
out = NUCLEOTIDES.index(self.reference_sequence[int(position)])
else:
mut = self.mutation(mut_id)
k = np.argmax([u.slim_time for u in mut.metadata])
out = mut.metadata[k].nucleotide
return out
@property
def slim_provenance(self):
'''
Extracts model type, slim generation, and remembmered node count from the last
entry in the provenance table that is tagged with "program"="SLiM".
:rtype ProvenanceMetadata:
'''
return get_provenance(self)
def _mark_first_generation(self):
'''
Mark all 'first generation' individuals' nodes as samples, and return
the corresponding tree sequence.
'''
tables = self.dump_tables()
first_gen_nodes = ((tables.nodes.individual > 0)
& ((tables.individuals.flags[tables.nodes.individual]
& INDIVIDUAL_FIRST_GEN) > 0))
if sum(first_gen_nodes) == 0:
warnings.warn("Tree sequence does not have the initial generation; " +
" did you simplify it after output from SLiM?")
flags = tables.nodes.flags
flags[first_gen_nodes] = (flags[first_gen_nodes] | tskit.NODE_IS_SAMPLE)
tables.nodes.set_columns(flags=flags, population=tables.nodes.population,
individual=tables.nodes.individual, time=tables.nodes.time,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset)
ts = load_tables(tables)
ts.reference_sequence = self.reference_sequence
return ts
def individuals_alive_at(self, time):
"""
Returns an array giving the IDs of all individuals that are known to be
alive at the given time ago. This is determined by seeing if their age
at `time`, determined since the time since they were born (their
`.time` attribute) is less than or equal to their `age` attribute
(which will reflect their age at the last time they were Remembered).
:param float time: The time ago.
"""
births = self.individual_times
ages = self.individual_ages
alive_bool = np.logical_and(births >= time, births - ages <= time)
return np.where(alive_bool)[0]
def individual_ages_at(self, time):
"""
Returns the *ages* of each individual at the corresponding time ago,
which will be `nan` if the individual is either not born yet or dead.
This is computed as the time ago the individual was born (found by the
`time` associated with the the individual's nodes) minus the `time`
argument; while "death" is inferred from the individual's `age`,
recorded in metadata.
The age is the number of complete time steps the individual has lived
through, so if they were born in time step `time`, then their age
will be zero.
:param float time: The reference time ago.
"""
ages = np.repeat(np.nan, self.num_individuals)
alive = self.individuals_alive_at(time)
ages[alive] = self.individual_times[alive] - time
return ages
def first_generation_individuals(self):
"""
Returns the IDs of the individuals remembered as part of the first SLiM generation,
as determined by their flags.
"""
return np.where(self.tables.individuals.flags & INDIVIDUAL_FIRST_GEN > 0)[0]
def _set_nodes_individuals(
tables, node_ind=None, location=(0, 0, 0), age=0, ind_id=None,
ind_population=None, ind_sex=INDIVIDUAL_TYPE_HERMAPHRODITE,
ind_flags=INDIVIDUAL_ALIVE, slim_ind_flags=0, node_id=None,
node_is_null=False, node_type=GENOME_TYPE_AUTOSOME):
'''
Adds to a TableCollection the information relevant to individuals required
for SLiM to load in a tree sequence, that is found in Node and Individual
tables. This will replace any existing Individual table, and will replace
any information already in the individual, metadata, and population columns
of the Node table.
This is designed to make it easy to assign default values:
- (node_ind) the 2*j-th and (2*j+1)-st `sample` nodes to individual j
- (location) individual locations to (0, 0, 0)
- (age) individual age to 0
- (ind_id) SLiM individual pedigree IDs to sequential integers starting from 0
- (ind_population) individual populations to 0
- (node_id) SLiM genome IDs to sequential integers starting with samples from 0
- (node_is_null) genomes to be non-null
- (node_type) genome type to 0 (= autosome)
- (ind_flags) INDIVIDUAL_ALIVE
If you have other situations, like non-alive "remembered" individuals, you
will need to edit the tables by hand, afterwards.
'''
samples = list(filter(lambda j: tables.nodes.flags[j] & tskit.NODE_IS_SAMPLE,
range(tables.nodes.num_rows)))
if (len(samples) % 2) != 0:
raise ValueError("There must be an even number of sampled nodes,"\
+ "since organisms are diploid.")
if node_ind is None:
node_ind = [tskit.NULL for _ in range(tables.nodes.num_rows)]
for j, k in enumerate(samples):
node_ind[j] = int(k/2)
num_individuals = max(node_ind) + 1
num_nodes = tables.nodes.num_rows
if type(location) is tuple:
location = [location for _ in range(num_individuals)]
assert(len(location) == num_individuals)
if type(age) is int or type(age) is float:
age = [age for _ in range(num_individuals)]
assert(len(age) == num_individuals)
if ind_id is None:
ind_id = list(range(num_individuals))
assert(len(ind_id) == num_individuals)
if type(ind_sex) is int:
ind_sex = [ind_sex for _ in range(num_individuals)]
assert(len(ind_sex) == num_individuals)
if type(slim_ind_flags) is int:
slim_ind_flags = [slim_ind_flags for _ in range(num_individuals)]
assert(len(slim_ind_flags) == num_individuals)
if type(ind_flags) is int:
ind_flags = [ind_flags for _ in range(num_individuals)]
assert(len(ind_flags) == num_individuals)
if node_id is None:
node_id = [-1 for _ in range(num_nodes)]
for j, k in enumerate(list(samples)
+ sorted(list(set(range(num_nodes))
- set(samples)))):
node_id[k] = j
assert(len(node_id) == num_nodes)
if type(node_is_null) is bool:
node_is_null = [node_is_null for _ in range(num_nodes)]
assert(len(node_is_null) == num_nodes)
if type(node_type) is int:
node_type = [node_type for _ in range(num_nodes)]
assert(len(node_type) == tables.nodes.num_rows)
if ind_population is None:
# set the individual populations based on what's in the nodes
ind_population = [tskit.NULL for _ in range(num_individuals)]
for j, u in enumerate(node_ind):
if u >= 0:
ind_population[u] = tables.nodes.population[j]
assert(len(ind_population) == num_individuals)
# check for consistency: every individual has two nodes, and populations agree
ploidy = [0 for _ in range(num_individuals)]
for j in samples:
u = node_ind[j]
assert(u >= 0)
ploidy[u] += 1
if tables.nodes.population[j] != ind_population[u]:
raise ValueError("Inconsistent populations: nodes and individuals do not agree.")
if any([p != 2 for p in ploidy]):
raise ValueError("Not all individuals have two assigned nodes.")
tables.nodes.set_columns(flags=tables.nodes.flags, time=tables.nodes.time,
population=tables.nodes.population, individual=node_ind,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset)
loc_vec, loc_off = tskit.pack_bytes(location)
tables.individuals.set_columns(
flags=ind_flags, location=loc_vec, location_offset=loc_off)
individual_metadata = [IndividualMetadata(*x) for x in
zip(ind_id, age, ind_population, ind_sex, slim_ind_flags)]
node_metadata = [None for _ in range(num_nodes)]
for j in samples:
node_metadata[j] = NodeMetadata(slim_id=node_id[j], is_null=node_is_null[j],
genome_type=node_type[j])
annotate_individual_metadata(tables, individual_metadata)
annotate_node_metadata(tables, node_metadata)
def _set_populations(
tables, pop_id=None, selfing_fraction=0.0, female_cloning_fraction=0.0,
male_cloning_fraction=0.0, sex_ratio=0.5, bounds_x0=0.0, bounds_x1=0.0,
bounds_y0=0.0, bounds_y1=0.0, bounds_z0=0.0, bounds_z1=0.0,
migration_records=None):
'''
Adds to a TableCollection the information about populations required for SLiM
to load a tree sequence. This will replace anything already in the Population
table.
'''
num_pops = max(tables.nodes.population) + 1
for md in tskit.unpack_bytes(tables.individuals.metadata,
tables.individuals.metadata_offset):
try:
ind_md = decode_individual(md)
except:
raise ValueError("Individuals do not have metadata:"
+ "need to run set_nodes_individuals() first?")
assert(ind_md.population < num_pops)
if pop_id is None:
pop_id = list(range(num_pops))
assert(len(pop_id) == num_pops)
if type(selfing_fraction) is float:
selfing_fraction = [selfing_fraction for _ in range(num_pops)]
assert(len(selfing_fraction) == num_pops)
if type(female_cloning_fraction) is float:
female_cloning_fraction = [female_cloning_fraction for _ in range(num_pops)]
assert(len(female_cloning_fraction) == num_pops)
if type(male_cloning_fraction) is float:
male_cloning_fraction = [male_cloning_fraction for _ in range(num_pops)]
assert(len(male_cloning_fraction) == num_pops)
if type(sex_ratio) is float:
sex_ratio = [sex_ratio for _ in range(num_pops)]
assert(len(sex_ratio) == num_pops)
if type(bounds_x0) is float:
bounds_x0 = [bounds_x0 for _ in range(num_pops)]
assert(len(bounds_x0) == num_pops)
if type(bounds_x1) is float:
bounds_x1 = [bounds_x1 for _ in range(num_pops)]
assert(len(bounds_x1) == num_pops)
if type(bounds_y0) is float:
bounds_y0 = [bounds_y0 for _ in range(num_pops)]
assert(len(bounds_y0) == num_pops)
if type(bounds_y1) is float:
bounds_y1 = [bounds_y1 for _ in range(num_pops)]
assert(len(bounds_y1) == num_pops)
if type(bounds_z0) is float:
bounds_z0 = [bounds_z0 for _ in range(num_pops)]
assert(len(bounds_z0) == num_pops)
if type(bounds_z1) is float:
bounds_z1 = [bounds_z1 for _ in range(num_pops)]
assert(len(bounds_z1) == num_pops)
if migration_records is None:
migration_records = [[] for _ in range(num_pops)]
assert(len(migration_records) == num_pops)
for mrl in migration_records:
for mr in mrl:
assert(type(mr) is PopulationMigrationMetadata)
population_metadata = [PopulationMetadata(*x) for x in
zip(pop_id, selfing_fraction, female_cloning_fraction,
male_cloning_fraction, sex_ratio, bounds_x0,
bounds_x1, bounds_y0, bounds_y1, bounds_z0, bounds_z1,
migration_records)]
annotate_population_metadata(tables, population_metadata)
def _set_sites_mutations(
tables, mutation_id=None, mutation_type=1, selection_coeff=0.0,
population=tskit.NULL, slim_time=None):
'''
Adds to a TableCollection the information relevant to mutations required
for SLiM to load in a tree sequence. This means adding to the metadata column
of the Mutation table, It will also
- give SLiM IDs to each mutation
- round Site positions to integer values
- stack any mutations that end up at the same position as a result
- replace ancestral states with ""
This will replace any information already in the metadata or derived state
columns of the Mutation table.
'''
num_mutations = tables.mutations.num_rows
if mutation_id is None:
mutation_id = list(range(num_mutations))
assert(len(mutation_id) == num_mutations)
if type(mutation_type) is int:
mutation_type = [mutation_type for _ in range(num_mutations)]
assert(len(mutation_type) == num_mutations)
if type(selection_coeff) is float:
selection_coeff = [selection_coeff for _ in range(num_mutations)]
assert(len(selection_coeff) == num_mutations)
if type(population) is int:
population = [population for _ in range(num_mutations)]
assert(len(population) == num_mutations)
if slim_time is None:
## This may *not* make sense because we have to round:
# slim_time = [(-1) * int(tables.nodes.time[u]) for u in tables.mutations.node]
slim_time = [0 for _ in range(num_mutations)]
assert(len(slim_time) == num_mutations)
mutation_metadata = [[MutationMetadata(*x)] for x in
zip(mutation_type, selection_coeff, population, slim_time)]
annotate_mutation_metadata(tables, mutation_metadata)
############
# Provenance
############
# See provenances.py for the structure of a Provenance entry.
def _set_provenance(tables, model_type, slim_generation):
'''
Appends to the provenance table of a :class:`TableCollection` a record containing
the information that SLiM expects to find there.
:param TableCollection tables: The table collection.
:param string model_type: The model type: either "WF" or "nonWF".
:param int slim_generation: The "current" generation in the SLiM simulation.
'''
pyslim_dict = make_pyslim_provenance_dict()
slim_dict = make_slim_provenance_dict(model_type, slim_generation)
tables.provenances.add_row(json.dumps(pyslim_dict))
tables.provenances.add_row(json.dumps(slim_dict))
| [
"msprime.PopulationConfiguration",
"numpy.logical_and",
"numpy.argmax",
"tskit.load",
"numpy.zeros",
"json.dumps",
"tskit.pack_bytes",
"numpy.where",
"msprime.simulate",
"numpy.repeat",
"numpy.int32",
"tskit.unpack_bytes",
"warnings.warn",
"kastore.load"
] | [((27940, 27966), 'tskit.pack_bytes', 'tskit.pack_bytes', (['location'], {}), '(location)\n', (27956, 27966), False, 'import tskit\n'), ((29098, 29186), 'tskit.unpack_bytes', 'tskit.unpack_bytes', (['tables.individuals.metadata', 'tables.individuals.metadata_offset'], {}), '(tables.individuals.metadata, tables.individuals.\n metadata_offset)\n', (29116, 29186), False, 'import tskit\n'), ((5849, 5890), 'numpy.zeros', 'np.zeros', (['ts.num_individuals'], {'dtype': '"""int"""'}), "(ts.num_individuals, dtype='int')\n", (5857, 5890), True, 'import numpy as np\n'), ((6110, 6138), 'numpy.zeros', 'np.zeros', (['ts.num_individuals'], {}), '(ts.num_individuals)\n', (6118, 6138), True, 'import numpy as np\n'), ((7309, 7325), 'tskit.load', 'tskit.load', (['path'], {}), '(path)\n', (7319, 7325), False, 'import tskit\n'), ((7398, 7416), 'kastore.load', 'kastore.load', (['path'], {}), '(path)\n', (7410, 7416), False, 'import kastore\n'), ((15676, 15846), 'msprime.simulate', 'msprime.simulate', ([], {'from_ts': 'ts', 'population_configurations': 'population_configurations', 'recombination_map': 'recombination_map', 'start_time': 'self.slim_generation'}), '(from_ts=ts, population_configurations=\n population_configurations, recombination_map=recombination_map,\n start_time=self.slim_generation, **kwargs)\n', (15692, 15846), False, 'import msprime\n'), ((22174, 22227), 'numpy.logical_and', 'np.logical_and', (['(births >= time)', '(births - ages <= time)'], {}), '(births >= time, births - ages <= time)\n', (22188, 22227), True, 'import numpy as np\n'), ((22987, 23026), 'numpy.repeat', 'np.repeat', (['np.nan', 'self.num_individuals'], {}), '(np.nan, self.num_individuals)\n', (22996, 23026), True, 'import numpy as np\n'), ((34125, 34148), 'json.dumps', 'json.dumps', (['pyslim_dict'], {}), '(pyslim_dict)\n', (34135, 34148), False, 'import json\n'), ((34181, 34202), 'json.dumps', 'json.dumps', (['slim_dict'], {}), '(slim_dict)\n', (34191, 34202), False, 'import json\n'), ((6187, 6199), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (6195, 6199), True, 'import numpy as np\n'), ((20065, 20111), 'numpy.argmax', 'np.argmax', (['[u.slim_time for u in mut.metadata]'], {}), '([u.slim_time for u in mut.metadata])\n', (20074, 20111), True, 'import numpy as np\n'), ((20948, 21070), 'warnings.warn', 'warnings.warn', (["('Tree sequence does not have the initial generation; ' +\n ' did you simplify it after output from SLiM?')"], {}), "('Tree sequence does not have the initial generation; ' +\n ' did you simplify it after output from SLiM?')\n", (20961, 21070), False, 'import warnings\n'), ((22243, 22263), 'numpy.where', 'np.where', (['alive_bool'], {}), '(alive_bool)\n', (22251, 22263), True, 'import numpy as np\n'), ((23367, 23433), 'numpy.where', 'np.where', (['(self.tables.individuals.flags & INDIVIDUAL_FIRST_GEN > 0)'], {}), '(self.tables.individuals.flags & INDIVIDUAL_FIRST_GEN > 0)\n', (23375, 23433), True, 'import numpy as np\n'), ((4012, 4091), 'tskit.unpack_bytes', 'tskit.unpack_bytes', (['tables.mutations.metadata', 'tables.mutations.metadata_offset'], {}), '(tables.mutations.metadata, tables.mutations.metadata_offset)\n', (4030, 4091), False, 'import tskit\n'), ((15428, 15461), 'msprime.PopulationConfiguration', 'msprime.PopulationConfiguration', ([], {}), '()\n', (15459, 15461), False, 'import msprime\n'), ((17821, 17851), 'numpy.where', 'np.where', (['(site_pos == position)'], {}), '(site_pos == position)\n', (17829, 17851), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 25 20:02:19 2016
@author: technologos
Thanks to mewo2 and amitp for inspiration and tutorials.
Thanks particularly to mewo2 for the framework for the regularization.
"""
import numpy
from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate
from numpy.random import random, randint, normal
from numpy.linalg import norm as magnitude
from matplotlib import pyplot, is_interactive
from matplotlib.collections import LineCollection
from matplotlib import colors as mpl_colors
from matplotlib import cm as colormap
from scipy.spatial import Voronoi
from math import degrees, acos
from functools import wraps
from time import time
from random import choice
from os import listdir, chdir, mkdir
from sys import platform
if is_interactive():
pyplot.ioff()
'''
helper functions
'''
def centroid(vertices):
x = vertices[:, 0]
y = vertices[:, 1]
x1 = concatenate((x[1:], x[:1]))
y1 = concatenate((y[1:], y[:1]))
C = (x * y1) - (y * x1)
A = sum(C / 2)
C_x = ((x + x1) * C) / (6 * A)
C_y = ((y + y1) * C) / (6 * A)
return (sum(C_x), sum(C_y))
def angle(vector1, vector2):
return degrees(acos(dot(vector1, vector2) /
(magnitude(vector1) * magnitude(vector2))))
def timed(function):
@wraps(function)
def timed_function(*args, **kwargs):
start_time = time()
function(*args, **kwargs)
end_time = time()
print(round(end_time - start_time, 2), 'seconds')
return timed_function
def bulk_generate(number, folder = '', size = 4096):
if folder == '':
if platform == 'darwin':
folder = '/Users/CMilroy/Pictures/Maps/'
elif platform == 'win32':
folder = 'E:/Documents/Sandbox/WorldBuilder/Maps/'
else:
raise Exception('Platform not recognized')
chdir(folder)
folders = [int(name) for name in listdir() if name[0] != '.']
if len(folders) == 0:
new_folder = '1'
else:
max_existing_index = max(folders)
new_folder = str(max_existing_index + 1)
mkdir(new_folder)
chdir(new_folder)
for _ in range(number):
try:
testmap = Map(size)
print('Building image...')
testmap.display(show_tile_elevation = True, show_grid = False,
show_water = True, window = False)
timestamp = int(time())
pyplot.savefig(str(timestamp) + '.png')
pyplot.close('all')
except:
continue
'''
map classes
'''
class MapTile():
def __init__(self, center):
self.center = center # coordinates of defining point (~centroid, after relaxation)
self.vertices = [] # indices of MapVertexes defining the region
self.vertex_coords = []
self.neighbors = [] # indices of MapTiles bordering this MapTile
self.edges = [] #indices of MapEdges bordering this MapTile
self.biome = []
self.plate = -1 # index of plate on which this tile sits
self.properties = []
self.elevation = 0 # calculated from edges
self.boundary = False # is this tile on the outisde of a plate?
self.slope = 0
self.roughness = 0
self.water_depth = 0
self.ocean = False
class MapEdge():
def __init__(self, tiles):
self.tiles = tiles # indices of two adjacent MapTiles
self.vertices = [] # indices of two endpoints
self.type = []
self.properties = []
self.elevation = 0 # derived from adjacent MapTiles' velocity vectors
self.length = 0
self.boundary = False
self.river = False
class MapVertex():
def __init__(self, coords):
self.coords = coords
self.edges = [] # indices of connected MapEdges
self.elevation = -100
class TectonicPlate():
def __init__(self, initial_tile):
self.plate_type = []
self.tiles = [initial_tile]
self.center = initial_tile.center
self.velocity = array([0, 0]) # do plates also rotate?
self.color = tuple(random(3))
self.boundaries = [] # MapTiles
self.elevation = normal(scale = .25) # TODO: refine
class Map():
def __init__(self,
number_of_tiles = 4096,
smoothing_strength = 2):
# define the fundamental topology
self.points = random((number_of_tiles, 2))
self.ocean_elevation = random() - .5 # TODO: refine
self.tiles = [] # MapTiles
print('Generating tiles...')
self.generate_tiles(smoothing_strength) # center, vertices, vertex_coords
self.edges = [] # MapEdges
print('Generating edges...')
self.generate_adjacencies() # neighbors
self.vertices = [] # MapVertexes
print('Generating vertices...')
self.generate_vertices()
# create tectonic plates
self.plates = [] # TectonicPlates
self.boundaries = [] # MapEdges
print('Generating plates...')
self.generate_plates()
# move plates
print('Moving plates...')
self.move_plates()
# TODO: island-forming volcanoes?
# update elevations
print('Calculating elevation...')
self.calculate_elevation()
print('Filling oceans...')
self.ocean = []
self.fill_oceans()
# generate tradewinds
latitude_span = randint(0,10) + 1
min_latitude = randint(-60, 61 - latitude_span)
self.latitudes = (min_latitude, min_latitude + latitude_span)
self.wind_tiles = []
self.calculate_tradewinds()
# TODO: generate base heat
# TODO: run simple dynamic weather
# note: stream length and basin size can be checked against Hack's Law
# TODO: build biomes
# TODO: select initial settlement locations
# more... second-gen settlements?
@timed
def generate_tiles(self, smoothing_strength = 2):
self.regularize_tiles(smoothing_strength)
#self.points = numpy.append(self.points, [[0,0], [0,1], [1,0], [1,1]], axis = 0)
self.voronoi = Voronoi(self.points)
for index, point in enumerate(self.voronoi.points):
new_tile = MapTile(point)
self.tiles.append(new_tile)
def regularize_tiles(self, number_of_iterations):
for _ in range(number_of_iterations):
vor = Voronoi(self.points)
new_points = []
for index in range(len(vor.points)):
point = vor.points[index,:]
region = vor.regions[vor.point_region[index]]
if -1 in region:
new_points.append(point)
else:
region_vertices = asarray([vor.vertices[i,:] for i in region])
region_vertices[region_vertices < 0] = 0
region_vertices[region_vertices > 1] = 1
new_point = centroid(region_vertices)
new_points.append(new_point)
self.points = asarray(new_points)
@timed
def generate_adjacencies(self):
for ridge in self.voronoi.ridge_points:
tiles = [self.tiles[i] for i in ridge]
new_edge = MapEdge(tiles)
self.edges.append(new_edge)
for index, tile in enumerate(tiles):
tiles[index].neighbors.append(tiles[1 - index])
tiles[index].edges.append(new_edge)
@timed
def generate_vertices(self):
for vertex in self.voronoi.vertices:
new_vertex = MapVertex(vertex)
self.vertices.append(new_vertex)
for index, ridge in enumerate(self.voronoi.ridge_vertices):
for vertex_index in ridge:
if vertex_index != -1:
self.edges[index].vertices.append(self.vertices[vertex_index])
self.vertices[vertex_index].edges.append(self.edges[index])
for index, tile in enumerate(self.tiles):
vertex_indices = [i for i in self.voronoi.regions[self.voronoi.point_region[index]]
if i != -1]
tile.vertices = [self.vertices[j] for j in vertex_indices]
tile.vertex_coords = asarray([self.voronoi.vertices[k]
for k in vertex_indices])
@timed
def generate_plates(self):
tiles_assigned = 0
while tiles_assigned < len(self.tiles):
focus_index = randint(len(self.tiles))
focus_tile = self.tiles[focus_index]
if focus_tile.plate == -1:
new_plate = TectonicPlate(focus_tile)
direction = random() * 2 * pi
velocity_vector = array([cos(direction), sin(direction)])
magnitude = random()
new_plate.velocity = magnitude * velocity_vector
focus_tile.plate = new_plate
tiles_assigned += 1
self.plates.append(new_plate)
for focus_plate in self.plates:
tiles_to_add = []
for tile in focus_plate.tiles:
for index, neighbor in enumerate(tile.neighbors):
if neighbor.plate == -1: # TODO: consider prob < 1
neighbor.plate = tile.plate
tiles_assigned += 1
tiles_to_add.append(neighbor)
elif neighbor.plate is not tile.plate:
tile.boundary = True
focus_plate.boundaries.append(tile)
focus_plate.tiles.extend(tiles_to_add)
for edge in self.edges:
if not edge.boundary:
edge_plates = [tile.plate for tile in edge.tiles]
if edge_plates[0] is not edge_plates[1]:
edge.boundary = True
self.boundaries.append(edge)
for tile in edge.tiles:
tile.boundary = True
@timed
def move_plates(self):
for edge in self.boundaries:
for index, tile in enumerate(edge.tiles):
normal_vector = edge.tiles[1-index].center - tile.center
normal_vector /= magnitude(normal_vector) # sets magnitude to 1
normal_force = dot(tile.plate.velocity, normal_vector)
edge.elevation += (sign(normal_force) * sqrt(abs(normal_force))) # TODO: sqrt?
edge.elevation += max([tile.plate.elevation for tile in edge.tiles]) # TODO: max?
@timed
def calculate_elevation(self):
boundary_vertices = [vertex for boundary in self.boundaries
for vertex in boundary.vertices]
boundary_vertices = list(set(boundary_vertices))
for vertex in boundary_vertices:
vertex.elevation = mean([edge.elevation for edge in vertex.edges
if edge in self.boundaries])
current_vertices = boundary_vertices
completed_vertices = len(boundary_vertices)
total_vertices = len(self.vertices)
# log_map_size = 1 / log2(len(self.points))
log_map_size = 8 / sqrt(len(self.points)) # TODO: calibrate
while completed_vertices < total_vertices:
new_vertices = [new_vertex for current_vertex in current_vertices
for new_edge in current_vertex.edges
for new_vertex in new_edge.vertices
if new_vertex.elevation == -100]
new_vertices = list(set(new_vertices))
old_vertices = []
to_remove = []
for new_vertex in new_vertices:
if random() < .5: # TODO: calibrate this
plate_elevation = new_vertex.edges[0].tiles[0].plate.elevation
new_vertex.elevation = (((.8 + random() * .2) ** log_map_size) *
((mean([vertex.elevation # TODO: coefficient
for edge in new_vertex.edges
for vertex in edge.vertices
if vertex.elevation != -100])) -
plate_elevation)) + plate_elevation
else:
old_vertex = choice([old_vertex for old_edge in new_vertex.edges
for old_vertex in old_edge.vertices
if old_vertex.elevation != -100])
old_vertices.append(old_vertex)
to_remove.append(new_vertex)
for vertex in to_remove:
new_vertices.remove(vertex)
completed_vertices += len(new_vertices)
current_vertices = new_vertices
current_vertices.extend(old_vertices)
for tile in self.tiles:
vertex_elevations = [vertex.elevation - self.ocean_elevation for vertex in tile.vertices]
tile.elevation = mean(vertex_elevations) # TODO: refine
tile.slope = max(vertex_elevations) - min(vertex_elevations)
# TODO: tile.roughness based on coplanarity
@timed
def fill_oceans(self):
for tile in self.tiles:
if (max(tile.center) > .95 or min(tile.center) < .05) and tile.elevation < 0:
tile.ocean = True
self.ocean.append(tile)
new_ocean = [neighbor for tile in self.ocean
for neighbor in tile.neighbors
if neighbor.elevation < 0 and not neighbor.ocean]
while len(new_ocean) > 0:
future_ocean = [neighbor for tile in new_ocean
for neighbor in tile.neighbors
if neighbor.elevation < 0 and not neighbor.ocean]
future_ocean = list(set(future_ocean))
if len(future_ocean) > 0:
self.ocean.extend(future_ocean)
for tile in future_ocean:
tile.ocean = True
new_ocean = future_ocean
for tile in self.ocean:
tile.water_depth = -tile.elevation
def calculate_tradewinds(self):
# if abs(self.latitude) <= 5:
pass
@timed
def display(self,
show_grid = False,
highlight_tile = [-1],
show_centers = False,
show_intersections = False,
show_plates = False,
show_plate_centers = False,
show_plate_velocities = False,
show_plate_boundaries = False,
show_boundary_elevation = False,
show_tile_elevation = True,
show_vertex_elevation = False,
show_tile_elevation_labels = False,
show_water = True,
clean = False,
plate_test = False,
elevation_test = False,
xlim = [0.05, .95],
ylim = [0.05, .95],
window = True):
if clean:
# show_centers = False
# show_intersections = False
xlim = [.05, .95]
ylim = [.05, .95]
if plate_test:
show_plates = True
show_plate_centers = True
show_plate_velocities = True
show_plate_boundaries = True
show_boundary_elevation = True
if elevation_test:
show_plate_boundaries = True
show_boundary_elevation = True
show_tile_elevation = True
figure = pyplot.figure()
axes = figure.gca()
if show_boundary_elevation or show_tile_elevation or show_vertex_elevation:
color_norm = mpl_colors.Normalize(vmin = -2, vmax = 2)
color_map = pyplot.get_cmap('gist_earth')
palette = colormap.ScalarMappable(norm = color_norm, cmap = color_map)
if show_grid:
line_segments = []
for edge in self.edges:
if len(edge.vertices) == 2:
line_segments.append([(x, y) for x, y in [vertex.coords
for vertex in edge.vertices]])
grid = LineCollection(line_segments,
colors='k',
lw=1.0,
linestyle='solid')
grid.set_alpha(1.0)
axes.add_collection(grid)
if show_plates:
for plate in self.plates:
for tile in plate.tiles:
pyplot.fill(*zip(*tile.vertex_coords), color = plate.color)
if show_plate_centers:
pyplot.plot(plate.center[0], plate.center[1], 'ko')
if show_plate_velocities:
pyplot.arrow(plate.center[0], plate.center[1],
plate.velocity[0], plate.velocity[1],
label = magnitude(plate.velocity))
if show_plate_boundaries:
line_segments = []
colors = []
for edge in self.boundaries:
if len(edge.vertices) == 2:
line_segments.append([(x, y) for x, y in [vertex.coords
for vertex in edge.vertices]])
if show_boundary_elevation:
colors.append(palette.to_rgba(edge.elevation))
else:
colors.append('k')
borders = LineCollection(line_segments,
colors=colors,
lw=3.0,
linestyle='solid')
borders.set_alpha(1.0)
axes.add_collection(borders)
if show_tile_elevation or show_tile_elevation_labels:
for tile in self.tiles:
if show_tile_elevation:
water_divider = -.55
water_max = water_divider - .05
land_min = water_divider + .05
if show_water and tile.water_depth > 0:
pyplot.fill(*zip(*tile.vertex_coords),
color = palette.to_rgba(water_max -
tile.water_depth / 4))
else:
pyplot.fill(*zip(*tile.vertex_coords),
color = palette.to_rgba(tile.elevation *
(2 - land_min) / 4 - land_min))
if show_tile_elevation_labels:
pyplot.text(tile.center[0], tile.center[1], round(tile.elevation, 2))
if show_vertex_elevation:
for vertex in self.vertices:
pyplot.plot(vertex.coords[0], vertex.coords[1],
color = palette.to_rgba(vertex.elevation),
marker = 'o')
highlight_tile = asarray(highlight_tile)
if numpy.all(highlight_tile >= 0) and numpy.all(highlight_tile < len(self.tiles)):
for tile in highlight_tile:
pyplot.fill(*zip(*self.tiles[tile].vertex_coords), 'y')
pyplot.xlim(xlim[0], xlim[1])
pyplot.ylim(ylim[0], ylim[1])
if window:
pyplot.show()
else:
return
if __name__ == '__main__':
pass | [
"os.mkdir",
"scipy.spatial.Voronoi",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.mean",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.sin",
"os.chdir",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.close",
"matplotlib.cm.ScalarMappable",
"matplotlib.is_interactive",
... | [((816, 832), 'matplotlib.is_interactive', 'is_interactive', ([], {}), '()\n', (830, 832), False, 'from matplotlib import pyplot, is_interactive\n'), ((838, 851), 'matplotlib.pyplot.ioff', 'pyplot.ioff', ([], {}), '()\n', (849, 851), False, 'from matplotlib import pyplot, is_interactive\n'), ((957, 984), 'numpy.concatenate', 'concatenate', (['(x[1:], x[:1])'], {}), '((x[1:], x[:1]))\n', (968, 984), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((994, 1021), 'numpy.concatenate', 'concatenate', (['(y[1:], y[:1])'], {}), '((y[1:], y[:1]))\n', (1005, 1021), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((1350, 1365), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (1355, 1365), False, 'from functools import wraps\n'), ((1925, 1938), 'os.chdir', 'chdir', (['folder'], {}), '(folder)\n', (1930, 1938), False, 'from os import listdir, chdir, mkdir\n'), ((2161, 2178), 'os.mkdir', 'mkdir', (['new_folder'], {}), '(new_folder)\n', (2166, 2178), False, 'from os import listdir, chdir, mkdir\n'), ((2183, 2200), 'os.chdir', 'chdir', (['new_folder'], {}), '(new_folder)\n', (2188, 2200), False, 'from os import listdir, chdir, mkdir\n'), ((1428, 1434), 'time.time', 'time', ([], {}), '()\n', (1432, 1434), False, 'from time import time\n'), ((1488, 1494), 'time.time', 'time', ([], {}), '()\n', (1492, 1494), False, 'from time import time\n'), ((4114, 4127), 'numpy.array', 'array', (['[0, 0]'], {}), '([0, 0])\n', (4119, 4127), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((4257, 4275), 'numpy.random.normal', 'normal', ([], {'scale': '(0.25)'}), '(scale=0.25)\n', (4263, 4275), False, 'from numpy.random import random, randint, normal\n'), ((4499, 4527), 'numpy.random.random', 'random', (['(number_of_tiles, 2)'], {}), '((number_of_tiles, 2))\n', (4505, 4527), False, 'from numpy.random import random, randint, normal\n'), ((5647, 5679), 'numpy.random.randint', 'randint', (['(-60)', '(61 - latitude_span)'], {}), '(-60, 61 - latitude_span)\n', (5654, 5679), False, 'from numpy.random import random, randint, normal\n'), ((6340, 6360), 'scipy.spatial.Voronoi', 'Voronoi', (['self.points'], {}), '(self.points)\n', (6347, 6360), False, 'from scipy.spatial import Voronoi\n'), ((16274, 16289), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (16287, 16289), False, 'from matplotlib import pyplot, is_interactive\n'), ((19832, 19855), 'numpy.asarray', 'asarray', (['highlight_tile'], {}), '(highlight_tile)\n', (19839, 19855), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((20089, 20118), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['xlim[0]', 'xlim[1]'], {}), '(xlim[0], xlim[1])\n', (20100, 20118), False, 'from matplotlib import pyplot, is_interactive\n'), ((20127, 20156), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['ylim[0]', 'ylim[1]'], {}), '(ylim[0], ylim[1])\n', (20138, 20156), False, 'from matplotlib import pyplot, is_interactive\n'), ((1976, 1985), 'os.listdir', 'listdir', ([], {}), '()\n', (1983, 1985), False, 'from os import listdir, chdir, mkdir\n'), ((2557, 2576), 'matplotlib.pyplot.close', 'pyplot.close', (['"""all"""'], {}), "('all')\n", (2569, 2576), False, 'from matplotlib import pyplot, is_interactive\n'), ((4180, 4189), 'numpy.random.random', 'random', (['(3)'], {}), '(3)\n', (4186, 4189), False, 'from numpy.random import random, randint, normal\n'), ((4559, 4567), 'numpy.random.random', 'random', ([], {}), '()\n', (4565, 4567), False, 'from numpy.random import random, randint, normal\n'), ((5606, 5620), 'numpy.random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (5613, 5620), False, 'from numpy.random import random, randint, normal\n'), ((6622, 6642), 'scipy.spatial.Voronoi', 'Voronoi', (['self.points'], {}), '(self.points)\n', (6629, 6642), False, 'from scipy.spatial import Voronoi\n'), ((7264, 7283), 'numpy.asarray', 'asarray', (['new_points'], {}), '(new_points)\n', (7271, 7283), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((8474, 8533), 'numpy.asarray', 'asarray', (['[self.voronoi.vertices[k] for k in vertex_indices]'], {}), '([self.voronoi.vertices[k] for k in vertex_indices])\n', (8481, 8533), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((11256, 11330), 'numpy.mean', 'mean', (['[edge.elevation for edge in vertex.edges if edge in self.boundaries]'], {}), '([edge.elevation for edge in vertex.edges if edge in self.boundaries])\n', (11260, 11330), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((13582, 13605), 'numpy.mean', 'mean', (['vertex_elevations'], {}), '(vertex_elevations)\n', (13586, 13605), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((16436, 16473), 'matplotlib.colors.Normalize', 'mpl_colors.Normalize', ([], {'vmin': '(-2)', 'vmax': '(2)'}), '(vmin=-2, vmax=2)\n', (16456, 16473), True, 'from matplotlib import colors as mpl_colors\n'), ((16502, 16531), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['"""gist_earth"""'], {}), "('gist_earth')\n", (16517, 16531), False, 'from matplotlib import pyplot, is_interactive\n'), ((16554, 16610), 'matplotlib.cm.ScalarMappable', 'colormap.ScalarMappable', ([], {'norm': 'color_norm', 'cmap': 'color_map'}), '(norm=color_norm, cmap=color_map)\n', (16577, 16610), True, 'from matplotlib import cm as colormap\n'), ((16939, 17007), 'matplotlib.collections.LineCollection', 'LineCollection', (['line_segments'], {'colors': '"""k"""', 'lw': '(1.0)', 'linestyle': '"""solid"""'}), "(line_segments, colors='k', lw=1.0, linestyle='solid')\n", (16953, 17007), False, 'from matplotlib.collections import LineCollection\n'), ((18292, 18363), 'matplotlib.collections.LineCollection', 'LineCollection', (['line_segments'], {'colors': 'colors', 'lw': '(3.0)', 'linestyle': '"""solid"""'}), "(line_segments, colors=colors, lw=3.0, linestyle='solid')\n", (18306, 18363), False, 'from matplotlib.collections import LineCollection\n'), ((19880, 19910), 'numpy.all', 'numpy.all', (['(highlight_tile >= 0)'], {}), '(highlight_tile >= 0)\n', (19889, 19910), False, 'import numpy\n'), ((20188, 20201), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (20199, 20201), False, 'from matplotlib import pyplot, is_interactive\n'), ((1225, 1246), 'numpy.dot', 'dot', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (1228, 1246), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((2485, 2491), 'time.time', 'time', ([], {}), '()\n', (2489, 2491), False, 'from time import time\n'), ((9088, 9096), 'numpy.random.random', 'random', ([], {}), '()\n', (9094, 9096), False, 'from numpy.random import random, randint, normal\n'), ((10607, 10631), 'numpy.linalg.norm', 'magnitude', (['normal_vector'], {}), '(normal_vector)\n', (10616, 10631), True, 'from numpy.linalg import norm as magnitude\n'), ((10685, 10724), 'numpy.dot', 'dot', (['tile.plate.velocity', 'normal_vector'], {}), '(tile.plate.velocity, normal_vector)\n', (10688, 10724), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((1275, 1293), 'numpy.linalg.norm', 'magnitude', (['vector1'], {}), '(vector1)\n', (1284, 1293), True, 'from numpy.linalg import norm as magnitude\n'), ((1296, 1314), 'numpy.linalg.norm', 'magnitude', (['vector2'], {}), '(vector2)\n', (1305, 1314), True, 'from numpy.linalg import norm as magnitude\n'), ((6964, 7009), 'numpy.asarray', 'asarray', (['[vor.vertices[i, :] for i in region]'], {}), '([vor.vertices[i, :] for i in region])\n', (6971, 7009), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((10761, 10779), 'numpy.sign', 'sign', (['normal_force'], {}), '(normal_force)\n', (10765, 10779), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((12193, 12201), 'numpy.random.random', 'random', ([], {}), '()\n', (12199, 12201), False, 'from numpy.random import random, randint, normal\n'), ((12872, 12997), 'random.choice', 'choice', (['[old_vertex for old_edge in new_vertex.edges for old_vertex in old_edge.\n vertices if old_vertex.elevation != -100]'], {}), '([old_vertex for old_edge in new_vertex.edges for old_vertex in\n old_edge.vertices if old_vertex.elevation != -100])\n', (12878, 12997), False, 'from random import choice\n'), ((17440, 17491), 'matplotlib.pyplot.plot', 'pyplot.plot', (['plate.center[0]', 'plate.center[1]', '"""ko"""'], {}), "(plate.center[0], plate.center[1], 'ko')\n", (17451, 17491), False, 'from matplotlib import pyplot, is_interactive\n'), ((8968, 8976), 'numpy.random.random', 'random', ([], {}), '()\n', (8974, 8976), False, 'from numpy.random import random, randint, normal\n'), ((9027, 9041), 'numpy.cos', 'cos', (['direction'], {}), '(direction)\n', (9030, 9041), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((9043, 9057), 'numpy.sin', 'sin', (['direction'], {}), '(direction)\n', (9046, 9057), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((12446, 12560), 'numpy.mean', 'mean', (['[vertex.elevation for edge in new_vertex.edges for vertex in edge.vertices if\n vertex.elevation != -100]'], {}), '([vertex.elevation for edge in new_vertex.edges for vertex in edge.\n vertices if vertex.elevation != -100])\n', (12450, 12560), False, 'from numpy import sqrt, dot, sign, mean, pi, cos, sin, array, asarray, concatenate\n'), ((17730, 17755), 'numpy.linalg.norm', 'magnitude', (['plate.velocity'], {}), '(plate.velocity)\n', (17739, 17755), True, 'from numpy.linalg import norm as magnitude\n'), ((12365, 12373), 'numpy.random.random', 'random', ([], {}), '()\n', (12371, 12373), False, 'from numpy.random import random, randint, normal\n')] |
"""MiniMap widget.
"""
import math
import numpy as np
from qtpy.QtGui import QImage, QPixmap
from qtpy.QtWidgets import QLabel
from ....layers.image.experimental import OctreeIntersection
from ....layers.image.experimental.octree_image import OctreeImage
# Width of the map in the dockable widget.
MAP_WIDTH = 200
# Tiles are seen if they are visible within the current view, otherwise unseen.
COLOR_SEEN = (255, 0, 0, 255) # red
COLOR_UNSEEN = (80, 80, 80, 255) # gray
# The view bounds itself is drawn on top of the seen/unseen tiles.
COLOR_VIEW = (227, 220, 111, 255) # yellow
class MiniMap(QLabel):
"""A small bitmap that shows the view bounds and which tiles are seen.
Parameters
----------
viewer : Viewer
The napari viewer.
layer : OctreeImage
The octree image we are viewing.
"""
# Border between the tiles is twice this.
HALF_BORDER = 1
def __init__(self, viewer, layer: OctreeImage):
super().__init__()
self.viewer = viewer
self.layer = layer
@property
def data_corners(self):
"""Return data corners for current view in this layer."""
# TODO_OCTREE: We should not calculate this here. We should query
# the layer or something to get these corner pixels.
qt_viewer = self.viewer.window.qt_viewer
ndim = self.layer.ndim
xform = self.layer._transforms[1:].simplified
corner_pixels = qt_viewer._canvas_corners_in_world[:, -ndim:]
return xform.inverse(corner_pixels)
def update(self) -> None:
"""Update the minimap to show latest intersection."""
# This actually performs the intersection, but it's very fast.
intersection = self.layer.get_intersection(self.data_corners)
if intersection is not None:
self._draw_map(intersection)
def _draw_map(self, intersection: OctreeIntersection) -> None:
"""Draw the minimap showing the latest intersection.
Parameters
----------
intersection : OctreeIntersection
The intersection we are drawing on the map.
"""
data = self._get_map_data(intersection)
height, width = data.shape[:2]
image = QImage(data, width, height, QImage.Format_RGBA8888)
self.setPixmap(QPixmap.fromImage(image))
def _get_map_data(self, intersection: OctreeIntersection) -> np.ndarray:
"""Get the image data to be draw in the map.
Parameters
----------
intersection : OctreeIntersection
Draw this intersection on the map.
"""
tile_shape = intersection.info.tile_shape
aspect = intersection.info.octree_info.aspect
# Shape of the map bitmap.
map_shape = (MAP_WIDTH, math.ceil(MAP_WIDTH / aspect))
# The map shape with RGBA pixels
bitmap_shape = map_shape + (4,)
# The bitmap data.
data = np.zeros(bitmap_shape, dtype=np.uint8)
# Tile size in bitmap coordinates.
tile_size = math.ceil(map_shape[1] / tile_shape[1])
# Leave a bit of space between the tiles.
edge = self.HALF_BORDER
# TODO_OCTREE: Can we remove these for loops? This is looping
# over *tiles* not pixels. But still will add up.
for row in range(0, tile_shape[0]):
for col in range(0, tile_shape[1]):
# Is this tile in the view?
seen = intersection.is_visible(row, col)
# Coordinate for this one tile.
y0 = row * tile_size + edge
y1 = y0 + tile_size - edge
x0 = col * tile_size + edge
x1 = x0 + tile_size - edge
# Draw one tile.
data[y0:y1, x0:x1, :] = COLOR_SEEN if seen else COLOR_UNSEEN
self._draw_view(data, intersection)
return data
def _draw_view(self, data, intersection: OctreeIntersection) -> None:
"""Draw the view rectangle onto the map data.
Parameters
----------
data : np.ndarray
Draw the view into this data.
intersection : OctreeIntersection
Draw the view in this intersection.
"""
max_y = data.shape[0] - 1
max_x = data.shape[1] - 1
rows = (intersection.normalized_rows * max_y).astype(int)
cols = (intersection.normalized_cols * max_x).astype(int)
data[rows[0] : rows[1], cols[0] : cols[1], :] = COLOR_VIEW
| [
"qtpy.QtGui.QImage",
"qtpy.QtGui.QPixmap.fromImage",
"numpy.zeros",
"math.ceil"
] | [((2229, 2280), 'qtpy.QtGui.QImage', 'QImage', (['data', 'width', 'height', 'QImage.Format_RGBA8888'], {}), '(data, width, height, QImage.Format_RGBA8888)\n', (2235, 2280), False, 'from qtpy.QtGui import QImage, QPixmap\n'), ((2929, 2967), 'numpy.zeros', 'np.zeros', (['bitmap_shape'], {'dtype': 'np.uint8'}), '(bitmap_shape, dtype=np.uint8)\n', (2937, 2967), True, 'import numpy as np\n'), ((3032, 3071), 'math.ceil', 'math.ceil', (['(map_shape[1] / tile_shape[1])'], {}), '(map_shape[1] / tile_shape[1])\n', (3041, 3071), False, 'import math\n'), ((2304, 2328), 'qtpy.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (2321, 2328), False, 'from qtpy.QtGui import QImage, QPixmap\n'), ((2773, 2802), 'math.ceil', 'math.ceil', (['(MAP_WIDTH / aspect)'], {}), '(MAP_WIDTH / aspect)\n', (2782, 2802), False, 'import math\n')] |
import cached_property
import numpy as np
from einops import rearrange
import pb_bss_eval
# TODO: Should mir_eval_sxr_selection stay in InputMetrics?
# TODO: Add SI-SDR even though there are arguments against it.
# TODO: Explain, why we compare BSS-Eval against source and not image.
# TODO: Explain, why invasive SXR does not work with, e.g., Nara-WPE.
def _get_err_msg(msg, metrics: 'OutputMetrics'):
msg = f'{msg}'
msg += f'\nShapes: (is shape) (symbolic shape)'
msg += f'\n\tspeech_prediction: {metrics.speech_prediction.shape} (K_target, N)' # noqa
msg += f'\n\tspeech_source: {metrics.speech_source.shape} (K_source, N)'
if metrics.speech_contribution is not None:
msg += (f'\n\tspeech_contribution: '
f'{metrics.speech_contribution.shape} (K_source, K_target, N)')
if metrics.noise_contribution is not None:
msg += (f'\n\tnoise_contribution: '
f'{metrics.noise_contribution.shape} (K_target, N)')
return msg
class VerboseKeyError(KeyError):
def __str__(self):
if len(self.args) == 2:
item, keys = self.args
import difflib
# Suggestions are sorted by their similarity.
suggestions = difflib.get_close_matches(
item, keys, cutoff=0, n=100
)
return f'{item!r}.\n' \
f'Close matches: {suggestions!r}'
elif len(self.args) == 3:
item, keys, msg = self.args
import difflib
# Suggestions are sorted by their similarity.
suggestions = difflib.get_close_matches(
item, keys, cutoff=0, n=100
)
return f'{item!r}.\n' \
f'Close matches: {suggestions!r}\n' \
f'{msg}'
else:
return super().__str__()
class InputMetrics:
def __init__(
self,
observation: 'Shape(D, N)',
speech_source: 'Shape(K_source, N)',
speech_image: 'Shape(K_source, D, N)'=None,
noise_image: 'Shape(D, N)'=None,
sample_rate: int = None,
enable_si_sdr: bool = False,
):
"""
Args:
observation: When you pass D channels, you get D metrics per
speaker. If you want to select a reference channel, you need
to slice the input to just have a singleton channel dimension.
speech_source:
speech_image:
noise_image:
sample_rate:
enable_si_sdr: Since SI-SDR is only well defined for non-reverb
single-channel data, it is disabled by default.
"""
self.observation = observation
self.speech_source = speech_source
self.speech_image = speech_image
self.noise_image = noise_image
self.sample_rate = sample_rate
self.enable_1d_inputs()
self._has_image_signals \
= (speech_image is not None and noise_image is not None)
self.samples = self.observation.shape[-1]
self.channels = self.observation.shape[-2]
self.K_source = self.speech_source.shape[0]
self.enable_si_sdr = enable_si_sdr
self.check_inputs()
def enable_1d_inputs(self):
if self.observation.ndim == 1:
self.observation = self.observation[None]
if self.speech_source.ndim == 1:
self.speech_source = self.speech_source[None]
def check_inputs(self):
assert self.observation.ndim == 2, self.observation.shape
assert self.speech_source.ndim == 2, self.speech_source.shape
@cached_property.cached_property
def mir_eval(self):
return pb_bss_eval.evaluation.mir_eval_sources(
reference=rearrange(
[self.speech_source] * self.channels,
'channels sources samples -> sources channels samples'
),
estimation=rearrange(
[self.observation] * self.K_source,
'sources channels samples -> sources channels samples'
),
return_dict=True,
compute_permutation=False,
)
@cached_property.cached_property
def mir_eval_sdr(self):
return self.mir_eval['sdr']
@cached_property.cached_property
def mir_eval_sir(self):
return self.mir_eval['sir']
@cached_property.cached_property
def mir_eval_sar(self):
return self.mir_eval['sar']
@cached_property.cached_property
def pesq(self):
return pb_bss_eval.evaluation.pesq(
rearrange(
[self.speech_source] * self.channels,
'channels sources samples -> sources channels samples'
),
[self.observation] * self.K_source,
sample_rate=self.sample_rate,
)
@cached_property.cached_property
def invasive_sxr(self):
from pb_bss_eval.evaluation.sxr_module import input_sxr
invasive_sxr = input_sxr(
rearrange(
self.speech_image,
'sources sensors samples -> sources sensors samples'
),
rearrange(self.noise_image, 'sensors samples -> sensors samples'),
average_sources=False,
average_channels=False,
return_dict=True,
)
return invasive_sxr
@cached_property.cached_property
def invasive_sdr(self):
return self.invasive_sxr['sdr']
@cached_property.cached_property
def invasive_sir(self):
return self.invasive_sxr['sir']
@cached_property.cached_property
def invasive_snr(self):
return self.invasive_sxr['snr']
@cached_property.cached_property
def stoi(self):
scores = pb_bss_eval.evaluation.stoi(
reference=rearrange(
[self.speech_source] * self.channels,
'channels sources samples -> sources channels samples'
),
estimation=rearrange(
[self.observation] * self.K_source,
'sources channels samples -> sources channels samples'
),
sample_rate=self.sample_rate,
)
return scores
@cached_property.cached_property
def si_sdr(self):
if self.enable_si_sdr:
return pb_bss_eval.evaluation.si_sdr(
# Shape: (sources, 1, samples)
reference=self.speech_source[:, None, :],
# Shape: (1, channels, samples)
estimation=self.observation[None, :, :],
)
else:
raise ValueError(
'SI-SDR is disabled by default since it is only well-defined '
'for non-reverberant single-channel data. Enable it with '
'`enable_si_sdr=True`.'
)
def _available_metric_names(self):
metric_names = [
'pesq',
'stoi',
'mir_eval_sdr',
'mir_eval_sir',
'mir_eval_sar',
]
if self.enable_si_sdr:
metric_names.append('si_sdr')
if self._has_image_signals:
metric_names.append('invasive_sdr')
metric_names.append('invasive_snr')
metric_names.append('invasive_sir')
return tuple(metric_names)
def _disabled_metric_names(self):
disabled = []
if not self.enable_si_sdr:
disabled.append('si_sdr')
if not self._has_image_signals:
disabled.append('invasive_sdr')
disabled.append('invasive_snr')
disabled.append('invasive_sir')
return disabled
def as_dict(self):
return {name: self[name] for name in self._available_metric_names()}
# Aliases
@property
def sdr(self):
return self.mir_eval_sdr
@property
def sir(self):
return self.mir_eval_sir
@property
def sar(self):
return self.mir_eval_sar
def __getitem__(self, item):
if isinstance(item, list):
return {name: self[name] for name in item}
assert isinstance(item, str), (type(item), item)
try:
return getattr(self, item)
except AttributeError:
pass
raise VerboseKeyError(
item,
self._available_metric_names(),
f'Disabled: {self._disabled_metric_names()}',
)
class OutputMetrics:
def __init__(
self,
speech_prediction: 'Shape(K_target, N)',
speech_source: 'Shape(K_source, N)',
speech_contribution: 'Shape(K_source, K_target, N)'=None,
noise_contribution: 'Shape(K_target, N)'=None,
sample_rate: int = None,
enable_si_sdr: bool = False,
compute_permutation: bool = True,
):
"""
Args:
speech_prediction: Shape(K_target, N)
The prediction of the source signal.
speech_source: Shape(K_source, N)
The true source signal, before the reverberation.
speech_contribution: Shape(K_source, K_target, N)
Optional for linear enhancements. See below.
noise_contribution: Shape(K_target, N)
Optional for linear enhancements. See below.
sample_rate: int
pesq and stoi need the sample rate.
In pesq the sample rate defines the mode:
8000: narrow band (nb)
8000: wide band (wb)
enable_si_sdr: Since SI-SDR is only well defined for non-reverb
single-channel data, it is disabled by default.
compute_permutation: whether to realign sources and estimates
according to SDR values.
speech_contribution and noise_contribution can only be calculated for
linear system and are used for the calculation of invasive_sxr.
Use speech image (reverberated speech source) and apply for each source
the enhancement for each target speaker enhancement. The same for the
noise and each target speaker.
Example:
>>> from IPython.lib.pretty import pprint
>>> metrics = OutputMetrics(
... speech_prediction=np.array([[1., 2., 3., 4.] * 1000,
... [4., 3., 2., 1.] * 1000]),
... speech_source=np.array([[1., 2., 2., 3., 2.] * 800,
... [4., 3., 3., 2., 3.] * 800]),
... sample_rate=8000,
... )
# Obtain all metrics (recommended)
>>> with np.printoptions(precision=4):
... pprint(metrics.as_dict())
{'pesq': array([1.2235, 1.225 ]),
'stoi': array([0.0503, 0.0638]),
'mir_eval_sdr': array([7.2565, 7.3303]),
'mir_eval_sir': array([25.6896, 46.638 ]),
'mir_eval_sar': array([7.3309, 7.3309]),
'mir_eval_selection': array([0, 1])}
# Obtain particular metric (e.g. pesq)
>>> metrics.pesq
array([1.22345543, 1.2250005 ])
# Obtain multiple metrics (e.g. pesq and stoi)
>>> pprint({m: metrics[m] for m in ['pesq', 'stoi']})
{'pesq': array([1.22345543, 1.2250005 ]),
'stoi': array([0.05026565, 0.06377457])}
"""
self.speech_prediction = speech_prediction
self.speech_source = speech_source
self.speech_contribution = speech_contribution
self.noise_contribution = noise_contribution
self.sample_rate = sample_rate
self.compute_permutation = compute_permutation
self.enable_1d_inputs()
self._has_contribution_signals = (
speech_contribution is not None
and
noise_contribution is not None
)
self.samples = self.speech_prediction.shape[-1]
self.K_source = self.speech_source.shape[0]
self.K_target = self.speech_prediction.shape[0]
self.enable_si_sdr = enable_si_sdr
self.check_inputs()
def enable_1d_inputs(self):
if self.speech_source.ndim == 1:
self.speech_source = self.speech_source[None]
if self.speech_prediction.ndim == 1:
self.speech_prediction = self.speech_prediction[None]
def check_inputs(self):
assert self.speech_prediction.ndim == 2, self.speech_prediction.shape
assert self.speech_source.ndim == 2, self.speech_source.shape
assert self.K_source <= 5, _get_err_msg(
f'Number of source speakers (K_source) of speech_source is '
f'{self.K_source}. Expect a reasonable value of 5 or less.',
self
)
assert self.K_target <= 5, _get_err_msg(
f'Number of target speakers (K_target) of speech_prediction is '
f'{self.K_target}. Expect a reasonable value of 5 or less.',
self
)
assert self.K_target in [self.K_source, self.K_source+1], _get_err_msg(
f'Number of target speakers (K_target) should be equal to '
f'number of source speakers (K_source) or K_target + 1',
self
)
assert self.speech_source.shape[1] == self.samples, _get_err_msg(
'Num samples (N) of speech_source does not fit to the'
'shape from speech_prediction',
self
)
if (
self.speech_contribution is not None
and self.noise_contribution is not None
):
assert self.noise_contribution is not None, self.noise_contribution
K_source_, K_target_, samples_ = self.speech_contribution.shape
assert self.samples == samples_, _get_err_msg(
'Num samples (N) of speech_contribution does not fit to the'
'shape from speech_prediction',
self
)
assert self.K_target == K_target_, _get_err_msg(
'Num target speakers (K_target) of speech_contribution does '
'not fit to the shape from speech_prediction',
self
)
assert self.K_source < 5, _get_err_msg(
'Num source speakers (K_source) of speech_contribution does '
'not fit to the shape from speech_source',
self
)
K_target_, samples_ = self.noise_contribution.shape
assert self.samples == samples_, _get_err_msg(
'Num samples (N) of noise_contribution does not fit to the '
'shape from speech_prediction',
self
)
assert self.K_target == K_target_, _get_err_msg(
'Num target speakers (K_target) of noise_contribution does '
'not fit to the shape from speech_prediction',
self
)
deviation = np.std(np.abs(
self.speech_prediction
- np.sum(self.speech_contribution, axis=0)
- self.noise_contribution
))
assert deviation < 1e-3, (
'The deviation of speech prediction and the sum of individual '
f'contributions is expected to be low: {deviation}'
)
else:
assert (
self.speech_contribution is None
and self.noise_contribution is None
), (
'Expect that speech_contribution and noise_contribution are '
'both None or given.\n'
'Got:\n'
f'speech_contribution: {self.speech_contribution}\n'
f'noise_contribution: {self.noise_contribution}'
)
@cached_property.cached_property
def mir_eval_selection(self):
return self.mir_eval['selection']
@cached_property.cached_property
def speech_prediction_selection(self):
assert self.speech_prediction.ndim == 2, self.speech_prediction.shape
assert self.speech_prediction.shape[0] < 10, self.speech_prediction.shape # NOQA
if not self.compute_permutation:
return self.speech_prediction
assert (
self.speech_prediction.shape[0]
in (len(self.mir_eval_selection), len(self.mir_eval_selection) + 1)
), self.speech_prediction.shape
return self.speech_prediction[self.mir_eval_selection]
@cached_property.cached_property
def mir_eval(self):
return pb_bss_eval.evaluation.mir_eval_sources(
reference=self.speech_source,
estimation=self.speech_prediction,
return_dict=True,
)
@cached_property.cached_property
def mir_eval_sdr(self):
return self.mir_eval['sdr']
@cached_property.cached_property
def mir_eval_sir(self):
return self.mir_eval['sir']
@cached_property.cached_property
def mir_eval_sar(self):
return self.mir_eval['sar']
@cached_property.cached_property
def pesq(self):
return pb_bss_eval.evaluation.pesq(
reference=self.speech_source,
estimation=self.speech_prediction_selection,
sample_rate=self.sample_rate,
)
@cached_property.cached_property
def invasive_sxr(self):
from pb_bss_eval.evaluation.sxr_module import output_sxr
invasive_sxr = output_sxr(
rearrange(
self.speech_contribution,
'sources targets samples -> sources targets samples'
)[:, self.mir_eval_selection, :],
rearrange(
self.noise_contribution, 'targets samples -> targets samples'
)[self.mir_eval_selection, :],
average_sources=False,
return_dict=True,
)
return invasive_sxr
@cached_property.cached_property
def invasive_sdr(self):
return self.invasive_sxr['sdr']
@cached_property.cached_property
def invasive_sir(self):
return self.invasive_sxr['sir']
@cached_property.cached_property
def invasive_snr(self):
return self.invasive_sxr['snr']
@cached_property.cached_property
def stoi(self):
return pb_bss_eval.evaluation.stoi(
reference=self.speech_source,
estimation=self.speech_prediction_selection,
sample_rate=self.sample_rate,
)
@cached_property.cached_property
def si_sdr(self):
if self.enable_si_sdr:
return pb_bss_eval.evaluation.si_sdr(
reference=self.speech_source,
estimation=self.speech_prediction_selection,
)
else:
raise ValueError(
'SI-SDR is disabled by default since it is only well-defined '
'for non-reverberant single-channel data. Enable it with '
'`enable_si_sdr=True`.'
)
def _available_metric_names(self):
metric_names = [
'pesq',
'stoi',
'mir_eval_sdr',
'mir_eval_sir',
'mir_eval_sar',
]
if self.compute_permutation:
metric_names.append('mir_eval_selection')
if self.enable_si_sdr:
metric_names.append('si_sdr')
if self._has_contribution_signals:
metric_names.append('invasive_sdr')
metric_names.append('invasive_snr')
metric_names.append('invasive_sir')
return tuple(metric_names)
def _disabled_metric_names(self):
disabled = []
if not self.compute_permutation:
disabled.append('mir_eval_selection')
if not self.enable_si_sdr:
disabled.append('si_sdr')
if not self._has_contribution_signals:
disabled.append('invasive_sdr')
disabled.append('invasive_snr')
disabled.append('invasive_sir')
return disabled
def as_dict(self):
return {name: self[name] for name in self._available_metric_names()}
# Aliases
@property
def sdr(self):
return self.mir_eval_sdr
@property
def sir(self):
return self.mir_eval_sir
@property
def sar(self):
return self.mir_eval_sar
def __getitem__(self, item):
if isinstance(item, list):
return {name: self[name] for name in item}
assert isinstance(item, str), (type(item), item)
try:
return getattr(self, item)
except AttributeError:
pass
raise VerboseKeyError(
item,
self._available_metric_names(),
f'Disabled: {self._disabled_metric_names()}',
)
| [
"numpy.sum",
"difflib.get_close_matches",
"pb_bss_eval.evaluation.stoi",
"pb_bss_eval.evaluation.si_sdr",
"einops.rearrange",
"pb_bss_eval.evaluation.mir_eval_sources",
"pb_bss_eval.evaluation.pesq"
] | [((16593, 16719), 'pb_bss_eval.evaluation.mir_eval_sources', 'pb_bss_eval.evaluation.mir_eval_sources', ([], {'reference': 'self.speech_source', 'estimation': 'self.speech_prediction', 'return_dict': '(True)'}), '(reference=self.speech_source,\n estimation=self.speech_prediction, return_dict=True)\n', (16632, 16719), False, 'import pb_bss_eval\n'), ((17142, 17279), 'pb_bss_eval.evaluation.pesq', 'pb_bss_eval.evaluation.pesq', ([], {'reference': 'self.speech_source', 'estimation': 'self.speech_prediction_selection', 'sample_rate': 'self.sample_rate'}), '(reference=self.speech_source, estimation=self.\n speech_prediction_selection, sample_rate=self.sample_rate)\n', (17169, 17279), False, 'import pb_bss_eval\n'), ((18306, 18443), 'pb_bss_eval.evaluation.stoi', 'pb_bss_eval.evaluation.stoi', ([], {'reference': 'self.speech_source', 'estimation': 'self.speech_prediction_selection', 'sample_rate': 'self.sample_rate'}), '(reference=self.speech_source, estimation=self.\n speech_prediction_selection, sample_rate=self.sample_rate)\n', (18333, 18443), False, 'import pb_bss_eval\n'), ((1234, 1288), 'difflib.get_close_matches', 'difflib.get_close_matches', (['item', 'keys'], {'cutoff': '(0)', 'n': '(100)'}), '(item, keys, cutoff=0, n=100)\n', (1259, 1288), False, 'import difflib\n'), ((4614, 4721), 'einops.rearrange', 'rearrange', (['([self.speech_source] * self.channels)', '"""channels sources samples -> sources channels samples"""'], {}), "([self.speech_source] * self.channels,\n 'channels sources samples -> sources channels samples')\n", (4623, 4721), False, 'from einops import rearrange\n'), ((5061, 5147), 'einops.rearrange', 'rearrange', (['self.speech_image', '"""sources sensors samples -> sources sensors samples"""'], {}), "(self.speech_image,\n 'sources sensors samples -> sources sensors samples')\n", (5070, 5147), False, 'from einops import rearrange\n'), ((5203, 5268), 'einops.rearrange', 'rearrange', (['self.noise_image', '"""sensors samples -> sensors samples"""'], {}), "(self.noise_image, 'sensors samples -> sensors samples')\n", (5212, 5268), False, 'from einops import rearrange\n'), ((6360, 6476), 'pb_bss_eval.evaluation.si_sdr', 'pb_bss_eval.evaluation.si_sdr', ([], {'reference': 'self.speech_source[:, None, :]', 'estimation': 'self.observation[None, :, :]'}), '(reference=self.speech_source[:, None, :],\n estimation=self.observation[None, :, :])\n', (6389, 6476), False, 'import pb_bss_eval\n'), ((18596, 18705), 'pb_bss_eval.evaluation.si_sdr', 'pb_bss_eval.evaluation.si_sdr', ([], {'reference': 'self.speech_source', 'estimation': 'self.speech_prediction_selection'}), '(reference=self.speech_source, estimation=self\n .speech_prediction_selection)\n', (18625, 18705), False, 'import pb_bss_eval\n'), ((1593, 1647), 'difflib.get_close_matches', 'difflib.get_close_matches', (['item', 'keys'], {'cutoff': '(0)', 'n': '(100)'}), '(item, keys, cutoff=0, n=100)\n', (1618, 1647), False, 'import difflib\n'), ((3788, 3895), 'einops.rearrange', 'rearrange', (['([self.speech_source] * self.channels)', '"""channels sources samples -> sources channels samples"""'], {}), "([self.speech_source] * self.channels,\n 'channels sources samples -> sources channels samples')\n", (3797, 3895), False, 'from einops import rearrange\n'), ((3962, 4067), 'einops.rearrange', 'rearrange', (['([self.observation] * self.K_source)', '"""sources channels samples -> sources channels samples"""'], {}), "([self.observation] * self.K_source,\n 'sources channels samples -> sources channels samples')\n", (3971, 4067), False, 'from einops import rearrange\n'), ((5853, 5960), 'einops.rearrange', 'rearrange', (['([self.speech_source] * self.channels)', '"""channels sources samples -> sources channels samples"""'], {}), "([self.speech_source] * self.channels,\n 'channels sources samples -> sources channels samples')\n", (5862, 5960), False, 'from einops import rearrange\n'), ((6027, 6132), 'einops.rearrange', 'rearrange', (['([self.observation] * self.K_source)', '"""sources channels samples -> sources channels samples"""'], {}), "([self.observation] * self.K_source,\n 'sources channels samples -> sources channels samples')\n", (6036, 6132), False, 'from einops import rearrange\n'), ((17500, 17593), 'einops.rearrange', 'rearrange', (['self.speech_contribution', '"""sources targets samples -> sources targets samples"""'], {}), "(self.speech_contribution,\n 'sources targets samples -> sources targets samples')\n", (17509, 17593), False, 'from einops import rearrange\n'), ((17680, 17752), 'einops.rearrange', 'rearrange', (['self.noise_contribution', '"""targets samples -> targets samples"""'], {}), "(self.noise_contribution, 'targets samples -> targets samples')\n", (17689, 17752), False, 'from einops import rearrange\n'), ((15082, 15122), 'numpy.sum', 'np.sum', (['self.speech_contribution'], {'axis': '(0)'}), '(self.speech_contribution, axis=0)\n', (15088, 15122), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import copy
import collections as clc
import math
from warnings import warn
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
import pdb
import time
from skrules.rule import Rule
class CN2algorithm(BaseEstimator):
def __init__(
self,
min_significance=0.5,
max_star_size=5,
remaining_data=1,
entropy_threshold=0,
max_num_rules=5,
min_data_rule = 1
):
"""
constructor: partitions data into train and test sets, sets the minimum accepted significance value
and maximum star size which limits the number of complexes considered for specialisation.
"""
# train_data = pd.read_csv(train_data_csv)
# test_data = pd.read_csv(test_data_csv)
# class_column_name = unsplit_data.columns[-1]
# unsplit_data.rename(columns={class_column_name: 'class'}, inplace = True)
# self.unsplit_data = unsplit_data
# train_set,test_set = train_test_split(unsplit_data,test_size=0.33, random_state=42)
# self.train_set = pd.read_csv(data_csv)
self.min_significance = min_significance
self.max_star_size = max_star_size
self.remaining_data = remaining_data
self.entropy_threshold = entropy_threshold
self.max_num_rules = max_num_rules
self.min_data_rule = min_data_rule
def fit(self, X, y):
check_X_y(X, y)
self.classes_ = unique_labels(y)
self.rule_list = []
if isinstance(X, pd.DataFrame) == False:
X = self._convert_to_dataframe(X)
if isinstance(y, pd.DataFrame) == False:
y = self._convert_to_dataframe(y, target=True)
self.X = X
self.y = y
X_rem = self.X
y_rem = self.y
while (X_rem.shape[0] > self.remaining_data) and (
len(self.rule_list) < self.max_num_rules
):
best_cplx = self.find_best_complex(X_rem, y_rem)
X_rem, y_rem, y_left = self.complex_coverage(
best_cplx, X_rem, y_rem, operator=">", y_inverse=True
)
# This only works for classification at the moment
prob = sum(y_left.values) / len(y_left.values)
self.rule_list.append([best_cplx, prob])
return self
def _convert_to_dataframe(self, data, target=False):
cols = []
if target:
cols = ["target"]
else:
for i in range(0, len(data[0])):
cols.append("col" + str(i))
return pd.DataFrame(data, columns=cols)
def find_best_complex(self, X_data, y_data):
cplx = []
entropy_gain = 100
while (
(X_data.shape[0] > self.min_data_rule)
and (entropy_gain > self.entropy_threshold)
and (len(cplx) < self.max_star_size)
):
beam_results = self.evaluate_beam_rules(cplx, X_data, y_data)
# Get the best rule
cplx = beam_results["rule"].iloc[0]
entropy_gain = beam_results["entropy_gain"].iloc[0]
X_data, y_data = self.complex_coverage(cplx, X_data, y_data)
return cplx
def evaluate_beam_rules(self, current_rules, X_rem, y_rem):
"""
Concatenate rules, if empty return all selectors
"""
# Beam Search rules
specialised_rules = self.beam_search_complexes(current_rules)
# Apply the rules and select the top ones
apply_and_select = self.apply_and_order_rules_by_score(
specialised_rules, X_rem, y_rem
).head(self.max_star_size)
return apply_and_select
def apply_and_order_rules_by_score(self, complexes, X_data, y_data):
"""
A function which takes a list of complexes/rules and returns a pandas DataFrame
that contains the complex, the entropy, the significance, the number of selectors,
the number of examples covered, the length of the rule and the predicted class of the rule.
The input param complexes should be a list of lists of tuples.
"""
# build a dictionary for each rule with relevant stats
list_of_row_dicts = []
for row in complexes:
X_coverage, y_coverage = self.complex_coverage(row, X_data, y_data)
rule_length = len(row)
# test if rule covers 0 examples
if (type(X_coverage) == list) or (X_coverage.empty):
row_dictionary = {
"rule": row,
"predict_class": "dud rule",
"entropy": 10,
"laplace_accuracy": 0,
"significance": 0,
"length": rule_length, #### this might be an error
"num_insts_covered": 0,
"specificity": 0,
}
list_of_row_dicts.append(row_dictionary)
# calculate stats for non 0 coverage rules
else:
num_examples_covered = X_coverage.shape[0]
class_attrib = y_coverage
class_counts = class_attrib.value_counts()
majority_class = class_counts.axes[0][0]
rule_specificity = class_counts.values[0] / sum(class_counts)
row_dictionary = {
"rule": row,
"predict_class": majority_class,
"entropy": self.rule_entropy(y_coverage),
"entropy_gain": self.rule_entropy(y_coverage)
- self.rule_entropy(y_data),
"significance": self.rule_significance(X_coverage, y_coverage),
"length": rule_length,
"num_insts_covered": num_examples_covered,
"specificity": rule_specificity,
}
list_of_row_dicts.append(row_dictionary)
# put dictionaries into dataframe and order them according to laplace acc, length
rules_and_stats = pd.DataFrame(list_of_row_dicts)
ordered_rules_and_stats = self.order_rules(rules_and_stats)
return ordered_rules_and_stats
def order_rules(self, dataFrame_of_rules):
"""
Function to order a dataframe of rules and stats according to laplace acc and length then reindex
the ordered frame.
"""
# ordered_rules_and_stats = dataFrame_of_rules.sort_values(["entropy", "length", "num_insts_covered"], ascending=[True, True, False])
# ordered_rules_and_stats = ordered_rules_and_stats.reset_index(drop=True)
#return dataFrame_of_rules.sort_values("entropy_gain")
return dataFrame_of_rules.sort_values(["entropy", "length", "num_insts_covered"],ascending=[True, False, False],).reset_index(drop=True)
def get_splits(self, data):
"""function to return the first set
of complexes which are the
1 attribute selectors
"""
# get attribute names
attributes = data.columns.values.tolist()
# get possible values for attributes
possAttribVals = {}
for att in attributes:
possAttribVals[att] = set(data[att])
# get list of attribute,value pairs
# from possAttribVals dictionary
attrib_value_pairs = []
for key in possAttribVals.keys():
for possVal in possAttribVals[key]:
attrib_value_pairs.append([(key, possVal)])
return attrib_value_pairs
def beam_search_complexes(self, target_complexes):
"""
Function to specialise the complexes in the "star", the current set of
complexes in consideration. Expects to receive a complex (a list of tuples)
to which it adds additional conjunctions using all the possible selectors.
Returns a list of new, specialised complexes.
"""
# If there are no target complex return all possible values
if len(target_complexes) == 0:
return self.get_splits(self.X)
provisional_specialisations = []
for targ_complex in target_complexes:
for selector in self.get_splits(self.X):
# check to see if target complex is a single tuple otherwise assume list of tuples
if type(targ_complex) == tuple:
comp_to_specialise = [copy.copy(targ_complex)]
else:
comp_to_specialise = copy.copy(targ_complex)
comp_to_specialise.append(selector[0])
# count if any slector is duplicated and append rule if not
count_of_selectors_in_complex = clc.Counter(comp_to_specialise)
flag = True
for count in count_of_selectors_in_complex.values():
if count > 1:
flag = False
if flag == True:
provisional_specialisations.append(comp_to_specialise)
# remove complexes that have been specialised with same selector eg [(A=1),(A=1)]
# trimmed_specialisations = [rule for rule in provisional_specialisations if rule[0] != rule[1]]
return provisional_specialisations
def check_rule_datapoint(self, data):
if data.shape[0] != 1:
raise ValueError("Datapoint is more than one.")
for rule in self.rule_list:
datapoint = data
for cond in rule[0]:
datapoint = datapoint[datapoint[cond[0]] <= cond[1]]
if datapoint.shape[0] == 1:
return rule[1]
warn("Datapoint not in rules")
return [0]
def predict(self, X_test):
check_is_fitted(self)
if isinstance(X_test, pd.DataFrame) == False:
X_test = self._convert_to_dataframe(X_test)
preds = []
for index, row in X_test.iterrows():
preds.append(self.check_rule_datapoint(pd.DataFrame([row])))
return preds
def build_rule(self, passed_complex):
"""
Carlos: I have no clue of why this is here
build a rule in dict format where target attributes have a single value and non-target attributes
have a list of all possible values.
Checks if there are repetitions in the attributes used, if so
it returns False -- why?
"""
if len(passed_complex) < 1:
warn("Passed a complex with length <1")
atts_used_in_rule = []
for selector in passed_complex:
atts_used_in_rule.append(selector[0])
# Check if there are duplicates
# If there are return FALSE???
if len(set(atts_used_in_rule)) < len(atts_used_in_rule):
warn("THERE ARE DUPLICATED SELECTORS")
return False
# Get all the values by column in the rule dict
rule = {}
features = self.X.columns.values.tolist()
for col in features:
rule[col] = list(set(self.X[col]))
# Add the passed selectors to the rule dict
for att_val_pair in passed_complex:
att = att_val_pair[0]
val = att_val_pair[1]
rule[att] = [val]
return rule
def complex_coverage(
self, passed_complex, X_data, y_data, operator="<=", y_inverse=False
):
"""Returns set of instances of the data
which complex (rule) covers as a dataframe.
"""
# rule = self.build_rule(passed_complex)
if len(passed_complex) < 1:
warn("Empty complex")
return pd.DataFrame(), pd.DataFrame()
if operator == "<=":
for cond in passed_complex:
X_rest = X_data[X_data[cond[0]] <= cond[1]]
y_rest = y_data[X_data[cond[0]] <= cond[1]]
if y_inverse:
y_lefts = y_data[~(X_data[cond[0]] <= cond[1])]
elif operator == "<":
for cond in passed_complex:
X_rest = X_data[X_data[cond[0]] < cond[1]]
y_rest = y_data[X_data[cond[0]] < cond[1]]
if y_inverse:
y_lefts = y_data[~(X_data[cond[0]] < cond[1])]
elif operator == ">=":
for cond in passed_complex:
X_rest = X_data[X_data[cond[0]] >= cond[1]]
y_rest = y_data[X_data[cond[0]] >= cond[1]]
if y_inverse:
y_left = y_data[~(X_data[cond[0]] >= cond[1])]
elif operator == ">":
for cond in passed_complex:
X_rest = X_data[X_data[cond[0]] > cond[1]]
y_rest = y_data[X_data[cond[0]] > cond[1]]
if y_inverse:
y_lefts = y_data[~(X_data[cond[0]] > cond[1])]
if y_inverse:
return X_rest, y_rest, y_lefts
return X_rest, y_rest
def check_rule_datapoint_old(self, datapoint, complex):
"""
Function to check if a given data point satisfies
the conditions of a given complex. Data point
should be a pandas Series. Complex should be a
tuple or a list of tuples where each tuple is of
the form ('Attribute', 'Value').
"""
if type(complex) == tuple:
if datapoint[complex[0]] == complex[1]:
return True
else:
return False
if type(complex) == list:
result = True
for selector in complex:
if datapoint[selector[0]] != selector[1]:
result = False
return result
def rule_entropy(self, y_data, base=None):
"""
Function to check the Shannon entropy of a complex/rule
given the instances it covers. Pass the instances
covered by the rule as a dataframe where class column is
named class.
#Not sure this works
class_series = y_data
num_instances = len(class_series)
class_counts = class_series.value_counts()
class_probabilities = class_counts.divide(num_instances)
log2_of_classprobs = np.log2(class_probabilities)
plog2p = class_probabilities.multiply(log2_of_classprobs)
entropy = -plog2p.sum()
return entropy
# Entropy from
#https://stackoverflow.com/questions/15450192/fastest-way-to-compute-entropy-in-python
"""
value, counts = np.unique(y_data, return_counts=True)
norm_counts = counts / counts.sum()
base = math.e if base is None else base
return -(norm_counts * np.log(norm_counts) / np.log(base)).sum()
def rule_significance(self, X_data, y_data):
"""
Function to check the significance of a rule using the
likelihood ratio test where observed frequency of class
in the coverage of the rule is compared to the observed
frequencies of the classes in the training data.
"""
covered_classes = y_data
covered_num_instances = len(covered_classes)
covered_counts = covered_classes.value_counts()
covered_probs = covered_counts.divide(covered_num_instances)
train_classes = self.y
train_num_instances = len(train_classes)
train_counts = train_classes.value_counts()
train_probs = train_counts.divide(train_num_instances)
significance = (
covered_probs.multiply(np.log(covered_probs.divide(train_probs))).sum() * 2
)
return significance
def laplace_accuracy(self, y_data):
"""
function to calculate laplace accuracy of a rule, taken from update to CN2
paper by author of original CN2.
????
"""
class_series = y_data
class_counts = class_series.value_counts()
num_instances = len(class_series)
num_classes = len(class_counts)
num_pred_class = class_counts.iloc[0]
laplace_accuracy_2 = (num_instances + num_classes - num_pred_class - 1) / (
num_instances + num_classes
)
return laplace_accuracy_2
def fit_old(self, X, y):
self.X = X
self.y = y
X_rem = self.X
y_rem = self.y
rule_list = []
# loop until data is all covered or target is unique
while (X_rem.shape[0] > self.remaining_data) and (
self.rule_entropy(y_rem) > self.entropy_threshold
):
best_new_rule_significance = 1
entropy_gain = 100
rules_to_specialise = []
existing_results = pd.DataFrame()
# search rule space until rule best_new_rule_significance = 1
# significance is lower than user set boundary(0.5 for testing)
while (best_new_rule_significance > self.min_significance) and (
entropy_gain > 0
):
trimmed_rule_results = self.evaluate_beam_rules(
rules_to_specialise, X_rem, y_rem
)
# append newly discovered rules to existing ones
# order them and then take best X(3 for testing)
existing_results = existing_results.append(trimmed_rule_results)
existing_results = self.order_rules(existing_results).iloc[0:2]
# update 'rules to specialise' and significance value of best new rule
rules_to_specialise = trimmed_rule_results["rule"]
# The condition to exit the inner loop.
## Get the significance of the best rule
best_new_rule_significance = trimmed_rule_results[
"significance"
].values[0]
entropy_gain = trimmed_rule_results["entropy_gain"].values[0]
best_rule = (
existing_results["rule"].iloc[0],
existing_results["predict_class"].iloc[0],
existing_results["num_insts_covered"].iloc[0],
)
X_rem, y_rem = self.complex_coverage(best_rule[0], X_rem, y_rem)
rule_list.append(best_rule)
# return rule_list
self.rule_list = rule_list
return self
def test_fitted_model(self, rule_list, data_set="default"):
"""
Test rule list returned by fit_CN2 function on test data(or manually supplied data)
returns a dataframe that contains the rule, rule acc, num of examples covered.
Also return general accuracy as average of each rule accuracy
"""
if type(data_set) == str:
data_set = self.test_set
remaining_examples = data_set
list_of_row_dicts = []
for rule in rule_list:
rule_coverage_indexes, rule_coverage_dataframe = self.complex_coverage(
rule[0], remaining_examples
)
# check for zero coverage due to noise(lense data too small)
if len(rule_coverage_dataframe) == 0:
row_dictionary = {
"rule": rule,
"pred_class": "zero coverage",
"rule_acc": 0,
"num_examples": 0,
"num_correct": 0,
"num_wrong": 0,
}
list_of_row_dicts.append(row_dictionary)
# otherwise generate statistics about rule then save and remove examples from the data and test next rule.
else:
class_of_covered_examples = rule_coverage_dataframe["class"]
# import ipdb;ipdb.set_trace(context=8)
class_counts = class_of_covered_examples.value_counts()
rule_accuracy = class_counts.values[0] / sum(class_counts)
num_correctly_classified_examples = class_counts.values[0]
num_incorrectly_classified_examples = (
sum(class_counts.values) - num_correctly_classified_examples
)
row_dictionary = {
"rule": rule,
"pred_class": rule[1],
"rule_acc": rule_accuracy,
"num_examples": len(rule_coverage_indexes),
"num_correct": num_correctly_classified_examples,
"num_wrong": num_incorrectly_classified_examples,
}
list_of_row_dicts.append(row_dictionary)
remaining_examples = remaining_examples.drop(rule_coverage_indexes)
results = pd.DataFrame(list_of_row_dicts)
overall_accuracy = sum(results["rule_acc"]) / len(
[r for r in results["rule_acc"] if r != 0]
)
return results, overall_accuracy
| [
"pandas.DataFrame",
"numpy.log",
"sklearn.utils.validation.check_X_y",
"collections.Counter",
"copy.copy",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.multiclass.unique_labels",
"warnings.warn",
"numpy.unique"
] | [((1573, 1588), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (1582, 1588), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((1613, 1629), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (1626, 1629), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((2716, 2748), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (2728, 2748), True, 'import pandas as pd\n'), ((6176, 6207), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_row_dicts'], {}), '(list_of_row_dicts)\n', (6188, 6207), True, 'import pandas as pd\n'), ((9733, 9763), 'warnings.warn', 'warn', (['"""Datapoint not in rules"""'], {}), "('Datapoint not in rules')\n", (9737, 9763), False, 'from warnings import warn\n'), ((9823, 9844), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (9838, 9844), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((14522, 14559), 'numpy.unique', 'np.unique', (['y_data'], {'return_counts': '(True)'}), '(y_data, return_counts=True)\n', (14531, 14559), True, 'import numpy as np\n'), ((20588, 20619), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_row_dicts'], {}), '(list_of_row_dicts)\n', (20600, 20619), True, 'import pandas as pd\n'), ((10536, 10575), 'warnings.warn', 'warn', (['"""Passed a complex with length <1"""'], {}), "('Passed a complex with length <1')\n", (10540, 10575), False, 'from warnings import warn\n'), ((10855, 10893), 'warnings.warn', 'warn', (['"""THERE ARE DUPLICATED SELECTORS"""'], {}), "('THERE ARE DUPLICATED SELECTORS')\n", (10859, 10893), False, 'from warnings import warn\n'), ((11655, 11676), 'warnings.warn', 'warn', (['"""Empty complex"""'], {}), "('Empty complex')\n", (11659, 11676), False, 'from warnings import warn\n'), ((16664, 16678), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16676, 16678), True, 'import pandas as pd\n'), ((8802, 8833), 'collections.Counter', 'clc.Counter', (['comp_to_specialise'], {}), '(comp_to_specialise)\n', (8813, 8833), True, 'import collections as clc\n'), ((11696, 11710), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11708, 11710), True, 'import pandas as pd\n'), ((11712, 11726), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11724, 11726), True, 'import pandas as pd\n'), ((8597, 8620), 'copy.copy', 'copy.copy', (['targ_complex'], {}), '(targ_complex)\n', (8606, 8620), False, 'import copy\n'), ((10072, 10091), 'pandas.DataFrame', 'pd.DataFrame', (['[row]'], {}), '([row])\n', (10084, 10091), True, 'import pandas as pd\n'), ((8509, 8532), 'copy.copy', 'copy.copy', (['targ_complex'], {}), '(targ_complex)\n', (8518, 8532), False, 'import copy\n'), ((14705, 14717), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (14711, 14717), True, 'import numpy as np\n'), ((14683, 14702), 'numpy.log', 'np.log', (['norm_counts'], {}), '(norm_counts)\n', (14689, 14702), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
PACKET_SIZE = 1500.0 # bytes
TIME_INTERVAL = 5.0
BITS_IN_BYTE = 8.0
MBITS_IN_BITS = 1000000.0
MILLISECONDS_IN_SECONDS = 1000.0
N = 100
LINK_FILE = './logs/report_bus_0010.log'
time_ms = []
bytes_recv = []
recv_time = []
with open(LINK_FILE, 'rb') as f:
for line in f:
parse = line.split()
time_ms.append(int(parse[1]))
bytes_recv.append(float(parse[4]))
recv_time.append(float(parse[5]))
time_ms = np.array(time_ms)
bytes_recv = np.array(bytes_recv)
recv_time = np.array(recv_time)
throughput_all = bytes_recv / recv_time
time_ms = time_ms - time_ms[0]
time_ms = time_ms / MILLISECONDS_IN_SECONDS
throughput_all = throughput_all * BITS_IN_BYTE / MBITS_IN_BITS * MILLISECONDS_IN_SECONDS
plt.plot(time_ms, throughput_all)
plt.xlabel('Time (second)')
plt.ylabel('Throughput (Mbit/sec)')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((463, 480), 'numpy.array', 'np.array', (['time_ms'], {}), '(time_ms)\n', (471, 480), True, 'import numpy as np\n'), ((494, 514), 'numpy.array', 'np.array', (['bytes_recv'], {}), '(bytes_recv)\n', (502, 514), True, 'import numpy as np\n'), ((527, 546), 'numpy.array', 'np.array', (['recv_time'], {}), '(recv_time)\n', (535, 546), True, 'import numpy as np\n'), ((753, 786), 'matplotlib.pyplot.plot', 'plt.plot', (['time_ms', 'throughput_all'], {}), '(time_ms, throughput_all)\n', (761, 786), True, 'import matplotlib.pyplot as plt\n'), ((787, 814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (second)"""'], {}), "('Time (second)')\n", (797, 814), True, 'import matplotlib.pyplot as plt\n'), ((815, 850), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Throughput (Mbit/sec)"""'], {}), "('Throughput (Mbit/sec)')\n", (825, 850), True, 'import matplotlib.pyplot as plt\n'), ((851, 861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (859, 861), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import operator
class KNN(object):
def __init__(self, k=3):
self.k = k
def fit(self, x, y):
self.x = x
self.y = y
def _square_distance(self, v1, v2):
return np.sum(np.square(v1-v2))
def _vote(self, ys):
ys_unique = np.unique(ys)
vote_dict = {}
for y in ys:
if y not in vote_dict.keys():
vote_dict[y] = 1
else:
vote_dict[y] += 1
sorted_vote_dict = sorted(vote_dict.items(), key=operator.itemgetter(1), reverse=True)
return sorted_vote_dict[0][0]
def predict(self, x):
y_pred = []
for i in range(len(x)):
dist_arr = [self._square_distance(x[i], self.x[j]) for j in range(len(self.x))]
sorted_index = np.argsort(dist_arr)
top_k_index = sorted_index[:self.k]
y_pred.append(self._vote(ys=self.y[top_k_index]))
return np.array(y_pred)
def score(self, y_true=None, y_pred=None):
if y_true is None and y_pred is None:
y_pred = self.predict(self.x)
y_true = self.y
score = 0.0
for i in range(len(y_true)):
if y_true[i] == y_pred[i]:
score += 1
score /= len(y_true)
return score
| [
"numpy.square",
"numpy.argsort",
"numpy.array",
"operator.itemgetter",
"numpy.unique"
] | [((320, 333), 'numpy.unique', 'np.unique', (['ys'], {}), '(ys)\n', (329, 333), True, 'import numpy as np\n'), ((982, 998), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (990, 998), True, 'import numpy as np\n'), ((256, 274), 'numpy.square', 'np.square', (['(v1 - v2)'], {}), '(v1 - v2)\n', (265, 274), True, 'import numpy as np\n'), ((836, 856), 'numpy.argsort', 'np.argsort', (['dist_arr'], {}), '(dist_arr)\n', (846, 856), True, 'import numpy as np\n'), ((562, 584), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (581, 584), False, 'import operator\n')] |
import anndata
import dask.dataframe
import numpy as np
import os
import pandas as pd
import pickle
from typing import Dict, List, Tuple, Union
from sfaira.consts import AdataIdsSfaira
from sfaira.data.store.stores.base import StoreBase
from sfaira.data.store.stores.single import StoreSingleFeatureSpace, \
StoreDao, StoreAnndata
from sfaira.data.store.carts.multi import CartMulti
from sfaira.data.store.io.io_dao import read_dao
from sfaira.versions.genomes.genomes import GenomeContainer
class StoreMultipleFeatureSpaceBase(StoreBase):
"""
Umbrella class for a dictionary over multiple instances DistributedStoreSingleFeatureSpace.
Allows for operations on data sets that are defined in different feature spaces.
"""
_adata_ids_sfaira: AdataIdsSfaira
_stores: Dict[str, StoreSingleFeatureSpace]
def __init__(self, stores: Dict[str, StoreSingleFeatureSpace]):
self._stores = stores
@property
def stores(self) -> Dict[str, StoreSingleFeatureSpace]:
"""
Only expose stores that contain observations.
"""
return dict([(k, v) for k, v in self._stores.items() if v.n_obs > 0])
@stores.setter
def stores(self, x: Dict[str, StoreSingleFeatureSpace]):
raise NotImplementedError("cannot set this attribute, it s defined in constructor")
@property
def genome_container(self) -> Dict[str, Union[GenomeContainer, None]]:
return dict([(k, v.genome_container) for k, v in self._stores.items()])
@genome_container.setter
def genome_container(self, x: Union[GenomeContainer, Dict[str, GenomeContainer]]):
if isinstance(x, GenomeContainer):
# Transform into dictionary first.
organisms = [k for k, v in self.stores.items()]
if isinstance(organisms, list) and len(organisms) == 0:
raise Warning("found empty organism lists in genome_container.setter")
if len(organisms) > 1:
raise ValueError(f"Gave a single GenomeContainer for a store instance that has mulitiple organism: "
f"{organisms}, either further subset the store or give a dictionary of "
f"GenomeContainers")
else:
x = {organisms[0]: x}
for k, v in x.items():
self.stores[k].genome_container = v
@property
def indices(self) -> Dict[str, np.ndarray]:
"""
Dictionary of indices of selected observations contained in all stores.
"""
return dict([(kk, vv) for k, v in self.stores.items() for kk, vv in v.indices.items()])
@property
def adata_by_key(self) -> Dict[str, anndata.AnnData]:
"""
Dictionary of all anndata instances for each selected data set in store, sub-setted by selected cells, for each
stores.
"""
return dict([(kk, vv) for k, v in self.stores.items() for kk, vv in v.adata_by_key.items()])
@property
def data_by_key(self):
"""
Data matrix for each selected data set in store, sub-setted by selected cells.
"""
return dict([(kk, vv) for k, v in self.stores.items() for kk, vv in v.data_by_key.items()])
@property
def obs_by_key(self) -> Dict[str, Union[pd.DataFrame, dask.dataframe.DataFrame]]:
"""
Dictionary of all anndata instances for each selected data set in store, sub-setted by selected cells, for each
stores.
"""
return dict([(k, v.obs) for k, v in self.adata_by_key.items()])
@property
def var_names(self) -> Dict[str, List[str]]:
"""
Dictionary of variable names by store.
"""
return dict([(k, v.var_names) for k, v in self.stores.items()])
@property
def n_vars(self) -> Dict[str, int]:
"""
Dictionary of number of features by store.
"""
return dict([(k, v.n_vars) for k, v in self.stores.items()])
@property
def n_obs(self) -> int:
"""
Dictionary of number of observations across stores.
"""
return np.asarray(np.sum([v.n_obs for v in self.stores.values()]), dtype="int32")
@property
def n_obs_dict(self) -> Dict[str, int]:
"""
Dictionary of number of observations by store.
"""
return dict([(k, v.n_obs) for k, v in self.stores.items()])
@property
def shape(self) -> Dict[str, Tuple[int, int]]:
"""
Dictionary of full selected data matrix shape by store.
"""
return dict([(k, v.shape) for k, v in self.stores.items()])
def subset(self, attr_key, values: Union[str, List[str], None] = None,
excluded_values: Union[str, List[str], None] = None, verbose: int = 1):
"""
Subset list of adata objects based on cell-wise properties.
Subsetting is done based on index vectors, the objects remain untouched.
:param attr_key: Property to subset by. Options:
- "assay_differentiation" points to self.assay_differentiation_obs_key
- "assay_sc" points to self.assay_sc_obs_key
- "assay_type_differentiation" points to self.assay_type_differentiation_obs_key
- "cell_line" points to self.cell_line
- "cell_type" points to self.cell_type_obs_key
- "developmental_stage" points to self.developmental_stage_obs_key
- "ethnicity" points to self.ethnicity_obs_key
- "organ" points to self.organ_obs_key
- "organism" points to self.organism_obs_key
- "sample_source" points to self.sample_source_obs_key
- "sex" points to self.sex_obs_key
- "state_exact" points to self.state_exact_obs_key
:param values: Classes to overlap to. Supply either values or excluded_values.
:param excluded_values: Classes to exclude from match list. Supply either values or excluded_values.
"""
for k in self.stores.keys():
self.stores[k].subset(attr_key=attr_key, values=values, excluded_values=excluded_values, verbose=0)
if self.n_obs == 0 and verbose > 0:
print("WARNING: multi store is now empty.")
def write_config(self, fn: Union[str, os.PathLike]):
"""
Writes a config file that describes the current data sub-setting.
This config file can be loaded later to recreate a sub-setting.
This config file contains observation-wise subsetting information.
:param fn: Output file without file type extension.
"""
indices = {}
for v in self.stores.values():
indices.update(v.indices)
with open(fn + '.pickle', 'wb') as f:
pickle.dump(indices, f)
def load_config(self, fn: Union[str, os.PathLike]):
"""
Load a config file and recreates a data sub-setting.
This config file contains observation-wise subsetting information.
:param fn: Output file without file type extension.
"""
with open(fn, 'rb') as f:
indices = pickle.load(f)
# Distribute indices to corresponding stores by matched keys.
keys_found = []
for k, v in self.stores.items():
indices_k = {}
for kk, vv in indices.items():
if kk in v.adata_by_key.keys():
indices_k[kk] = vv
keys_found.append(kk)
self.stores[k].indices = indices_k
# Make sure all declared data were assigned to stores:
keys_not_found = list(set(list(indices.keys())).difference(set(keys_found)))
if len(keys_not_found) > 0:
raise ValueError(f"did not find object(s) with name(s) in store: {keys_not_found}")
def checkout(
self,
idx: Union[Dict[str, Union[np.ndarray, None]], None] = None,
intercalated: bool = True,
**kwargs
) -> CartMulti:
"""
Carts per store.
See also DistributedStore*.checkout().
:param idx:
:param intercalated: Whether to do sequential or intercalated emission.
:param kwargs: See parameters of DistributedStore*.generator().
:return: Generator function which yields batch_size at every invocation.
The generator returns a tuple of (.X, .obs).
"""
if idx is None:
idx = dict([(k, None) for k in self.stores.keys()])
for k in self.stores.keys():
assert k in idx.keys(), (idx.keys(), self.stores.keys())
carts = dict([(k, v.checkout(idx=idx[k], **kwargs)) for k, v in self.stores.items()])
return CartMulti(carts=carts, intercalated=intercalated)
class StoresAnndata(StoreMultipleFeatureSpaceBase):
def __init__(self, adatas: Union[anndata.AnnData, List[anndata.AnnData], Tuple[anndata.AnnData]]):
# Collect all data loaders from files in directory:
self._adata_ids_sfaira = AdataIdsSfaira()
adata_by_key = {}
indices = {}
if isinstance(adatas, anndata.AnnData):
adatas = [adatas]
for i, adata in enumerate(adatas):
# Check if adata has a unique ID, if not, add one:
if self._adata_ids_sfaira.id not in adata.uns.keys():
adata.uns[self._adata_ids_sfaira.id] = f"adata_{i}"
if self._adata_ids_sfaira.organism in adata.uns.keys():
organism = adata.uns[self._adata_ids_sfaira.organism]
else:
# Declare as unknown organism and genome and make a group of its own:
organism = adata.uns[self._adata_ids_sfaira.id]
if isinstance(organism, list):
if len(organism) == 1:
organism = organism[0]
assert isinstance(organism, str), organism
else:
raise ValueError(f"tried to register mixed organism data set ({organism})")
adata_id = adata.uns[self._adata_ids_sfaira.id]
# Make up a new merged ID for data set indexing if there is a list of IDs in .uns.
if isinstance(adata_id, list):
adata_id = "_".join(adata_id)
if organism not in adata_by_key.keys():
adata_by_key[organism] = {}
indices[organism] = {}
try:
adata_by_key[organism][adata_id] = adata
indices[organism][adata_id] = np.arange(0, adata.n_obs)
except TypeError as e:
raise TypeError(f"{e} for {organism} or {adata.uns[self._adata_ids_sfaira.id]}")
stores = dict([
(k, StoreAnndata(adata_by_key=adata_by_key[k], indices=indices[k], in_memory=True))
for k in adata_by_key.keys()
])
super(StoresAnndata, self).__init__(stores=stores)
class StoresDao(StoreMultipleFeatureSpaceBase):
_dataset_weights: Union[None, Dict[str, float]]
def __init__(self,
cache_path: Union[str, os.PathLike, List[str], List[os.PathLike]],
columns: Union[None, List[str]] = None):
"""
:param cache_path: Store directory.
:param columns: Which columns to read into the obs copy in the output, see pandas.read_parquet().
"""
# Collect all data loaders from files in directory:
self._adata_ids_sfaira = AdataIdsSfaira()
adata_by_key = {}
x_by_key = {}
indices = {}
if not isinstance(cache_path, list) or isinstance(cache_path, tuple) or isinstance(cache_path, np.ndarray):
cache_path = [cache_path]
for cache_path_i in cache_path:
for f in np.sort(os.listdir(cache_path_i)):
adata = None
x = None
trial_path = os.path.join(cache_path_i, f)
if os.path.isdir(trial_path):
# zarr-backed anndata are saved as directories with the elements of the array group as further sub
# directories, e.g. a directory called "X", and a file ".zgroup" which identifies the zarr group.
adata, x = read_dao(trial_path, use_dask=True, columns=columns, obs_separate=False, x_separate=True)
if adata is not None:
organism = adata.uns[self._adata_ids_sfaira.organism]
if organism not in adata_by_key.keys():
adata_by_key[organism] = {}
x_by_key[organism] = {}
indices[organism] = {}
if adata.uns[self._adata_ids_sfaira.id] in adata_by_key[organism].keys():
print(f"WARNING: overwriting store entry in {adata.uns[self._adata_ids_sfaira.id]} in store "
f"{cache_path_i}.")
adata_by_key[organism][adata.uns[self._adata_ids_sfaira.id]] = adata
x_by_key[organism][adata.uns[self._adata_ids_sfaira.id]] = x
indices[organism][adata.uns[self._adata_ids_sfaira.id]] = np.arange(0, adata.n_obs)
stores = dict([
(k, StoreDao(adata_by_key=adata_by_key[k], x_by_key=x_by_key[k], indices=indices[k],
obs_by_key=None))
for k in adata_by_key.keys()
])
self._x_by_key = x_by_key
super(StoresDao, self).__init__(stores=stores)
class StoresH5ad(StoreMultipleFeatureSpaceBase):
def __init__(
self,
cache_path: Union[str, os.PathLike, List[str], List[os.PathLike]],
in_memory: bool = False):
# Collect all data loaders from files in directory:
self._adata_ids_sfaira = AdataIdsSfaira()
adata_by_key = {}
indices = {}
if not isinstance(cache_path, list) or isinstance(cache_path, tuple) or isinstance(cache_path, np.ndarray):
cache_path = [cache_path]
for cache_path_i in cache_path:
for f in np.sort(os.listdir(cache_path_i)):
adata = None
trial_path = os.path.join(cache_path_i, f)
if os.path.isfile(trial_path):
# Narrow down to supported file types:
if f.split(".")[-1] == "h5ad":
try:
adata = anndata.read_h5ad(
filename=trial_path,
backed="r" if in_memory else None,
)
except OSError as e:
adata = None
print(f"WARNING: for data set {f}: {e}")
if adata is not None:
organism = adata.uns[self._adata_ids_sfaira.organism]
if organism not in adata_by_key.keys():
adata_by_key[organism] = {}
indices[organism] = {}
if adata.uns[self._adata_ids_sfaira.id] in adata_by_key[organism].keys():
print(f"WARNING: overwriting store entry in {adata.uns[self._adata_ids_sfaira.id]} in store "
f"{cache_path_i}.")
adata_by_key[organism][adata.uns[self._adata_ids_sfaira.id]] = adata
indices[organism][adata.uns[self._adata_ids_sfaira.id]] = np.arange(0, adata.n_obs)
stores = dict([
(k, StoreAnndata(adata_by_key=adata_by_key[k], indices=indices[k], in_memory=in_memory))
for k in adata_by_key.keys()
])
super(StoresH5ad, self).__init__(stores=stores)
| [
"anndata.read_h5ad",
"pickle.dump",
"os.path.isdir",
"sfaira.data.store.stores.single.StoreDao",
"sfaira.data.store.carts.multi.CartMulti",
"os.path.isfile",
"pickle.load",
"numpy.arange",
"sfaira.consts.AdataIdsSfaira",
"sfaira.data.store.stores.single.StoreAnndata",
"sfaira.data.store.io.io_da... | [((8672, 8721), 'sfaira.data.store.carts.multi.CartMulti', 'CartMulti', ([], {'carts': 'carts', 'intercalated': 'intercalated'}), '(carts=carts, intercalated=intercalated)\n', (8681, 8721), False, 'from sfaira.data.store.carts.multi import CartMulti\n'), ((8973, 8989), 'sfaira.consts.AdataIdsSfaira', 'AdataIdsSfaira', ([], {}), '()\n', (8987, 8989), False, 'from sfaira.consts import AdataIdsSfaira\n'), ((11392, 11408), 'sfaira.consts.AdataIdsSfaira', 'AdataIdsSfaira', ([], {}), '()\n', (11406, 11408), False, 'from sfaira.consts import AdataIdsSfaira\n'), ((13703, 13719), 'sfaira.consts.AdataIdsSfaira', 'AdataIdsSfaira', ([], {}), '()\n', (13717, 13719), False, 'from sfaira.consts import AdataIdsSfaira\n'), ((6737, 6760), 'pickle.dump', 'pickle.dump', (['indices', 'f'], {}), '(indices, f)\n', (6748, 6760), False, 'import pickle\n'), ((7095, 7109), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7106, 7109), False, 'import pickle\n'), ((10466, 10491), 'numpy.arange', 'np.arange', (['(0)', 'adata.n_obs'], {}), '(0, adata.n_obs)\n', (10475, 10491), True, 'import numpy as np\n'), ((11701, 11725), 'os.listdir', 'os.listdir', (['cache_path_i'], {}), '(cache_path_i)\n', (11711, 11725), False, 'import os\n'), ((11811, 11840), 'os.path.join', 'os.path.join', (['cache_path_i', 'f'], {}), '(cache_path_i, f)\n', (11823, 11840), False, 'import os\n'), ((11860, 11885), 'os.path.isdir', 'os.path.isdir', (['trial_path'], {}), '(trial_path)\n', (11873, 11885), False, 'import os\n'), ((13990, 14014), 'os.listdir', 'os.listdir', (['cache_path_i'], {}), '(cache_path_i)\n', (14000, 14014), False, 'import os\n'), ((14075, 14104), 'os.path.join', 'os.path.join', (['cache_path_i', 'f'], {}), '(cache_path_i, f)\n', (14087, 14104), False, 'import os\n'), ((14124, 14150), 'os.path.isfile', 'os.path.isfile', (['trial_path'], {}), '(trial_path)\n', (14138, 14150), False, 'import os\n'), ((10664, 10742), 'sfaira.data.store.stores.single.StoreAnndata', 'StoreAnndata', ([], {'adata_by_key': 'adata_by_key[k]', 'indices': 'indices[k]', 'in_memory': '(True)'}), '(adata_by_key=adata_by_key[k], indices=indices[k], in_memory=True)\n', (10676, 10742), False, 'from sfaira.data.store.stores.single import StoreSingleFeatureSpace, StoreDao, StoreAnndata\n'), ((12155, 12248), 'sfaira.data.store.io.io_dao.read_dao', 'read_dao', (['trial_path'], {'use_dask': '(True)', 'columns': 'columns', 'obs_separate': '(False)', 'x_separate': '(True)'}), '(trial_path, use_dask=True, columns=columns, obs_separate=False,\n x_separate=True)\n', (12163, 12248), False, 'from sfaira.data.store.io.io_dao import read_dao\n'), ((13074, 13099), 'numpy.arange', 'np.arange', (['(0)', 'adata.n_obs'], {}), '(0, adata.n_obs)\n', (13083, 13099), True, 'import numpy as np\n'), ((13140, 13242), 'sfaira.data.store.stores.single.StoreDao', 'StoreDao', ([], {'adata_by_key': 'adata_by_key[k]', 'x_by_key': 'x_by_key[k]', 'indices': 'indices[k]', 'obs_by_key': 'None'}), '(adata_by_key=adata_by_key[k], x_by_key=x_by_key[k], indices=\n indices[k], obs_by_key=None)\n', (13148, 13242), False, 'from sfaira.data.store.stores.single import StoreSingleFeatureSpace, StoreDao, StoreAnndata\n'), ((15351, 15376), 'numpy.arange', 'np.arange', (['(0)', 'adata.n_obs'], {}), '(0, adata.n_obs)\n', (15360, 15376), True, 'import numpy as np\n'), ((15417, 15505), 'sfaira.data.store.stores.single.StoreAnndata', 'StoreAnndata', ([], {'adata_by_key': 'adata_by_key[k]', 'indices': 'indices[k]', 'in_memory': 'in_memory'}), '(adata_by_key=adata_by_key[k], indices=indices[k], in_memory=\n in_memory)\n', (15429, 15505), False, 'from sfaira.data.store.stores.single import StoreSingleFeatureSpace, StoreDao, StoreAnndata\n'), ((14327, 14400), 'anndata.read_h5ad', 'anndata.read_h5ad', ([], {'filename': 'trial_path', 'backed': "('r' if in_memory else None)"}), "(filename=trial_path, backed='r' if in_memory else None)\n", (14344, 14400), False, 'import anndata\n')] |
"""
animated_plots.py
Author: <NAME> / git: bencottier
Produce and display animated data.
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import math
def animate(i, anim, selections, results, values):
i_iter = i % anim.frames_per_iter # frames within current iteration
j = int((i - i_iter) / anim.frames_per_iter) # current iteration
sf = anim.section_frames
if j == anim.num_iter: # End animation with neutral display
anim.value(values[:, :, j])
return
s = selections[j]
r = results[j]
v = values[:, :, j] # barset, arms, iteration
if i_iter < sf[0]:
# Section 1: show current values
if i_iter == 0:
anim.value(v)
# elif i_iter < sf[1]:
# # Section 2: animate change in auxilliary value by interpolating
# change = np.array([values[0, :, j + 1] - v[0], np.zeros_like(v[1])])
# frame = i_iter - sf[0]
# total_frames = sf[1] - sf[0]
# v_anim = v
# v_anim += frame * change / total_frames
# anim.update(v_anim)
elif i_iter < sf[1]:
# Section 3: indicate selection
if i_iter == sf[0]:
anim.select(s)
elif i_iter < sf[2]:
# Section 4: indicate result of selection
if i_iter == sf[1]:
anim.result(s, r)
else: # i_iter >= sf[-1]
# Last section: animate change in value by interpolating
change = values[:, :, j + 1] - v
frame = i_iter - sf[-2]
total_frames = sf[-1] - sf[-2]
v_anim = v
v_anim += frame * change / total_frames
anim.update(v_anim)
class Animation:
COLOUR_DEFAULT = 'C0' # 'C0' blue
COLOUR_SELECT = 'C1' # 'C1' orange
COLOUR_SUCCESS = 'C2' # 'C2' greed
COLOUR_FAILURE = 'C3' # 'C3' red
def __init__(self, selections, results, values, iter_start, iter_end,
fps=30, speed=2):
fig = plt.figure()
if iter_start < 0 or iter_start >= len(selections):
iter_start = 0
if iter_end < iter_start or iter_end >= len(selections):
iter_end = len(selections)
selections = selections[iter_start:iter_end]
results = results[iter_start:iter_end]
values = values[:, :, iter_start:iter_end+1]
self.num_iter = iter_end - iter_start
self.fps = fps # desired output frame rate
section_times = 1./speed * np.array([1, 2, 3, 4])
self.section_frames = (self.fps * section_times).astype(np.int32)
self.frames_per_iter = self.section_frames[-1]
num_frames = self.frames_per_iter * (self.num_iter + 1)
self.anim = animation.FuncAnimation(
fig, animate, fargs=(self, selections, results, values),
frames=num_frames, repeat=False, blit=False,
interval=int(1000/self.fps))
def save(self, filename):
self.anim.save(filename, writer=animation.FFMpegWriter(fps=self.fps))
def value(self, values):
pass
def select(self, selection):
pass
def result(self, selection, result):
pass
def update(self, values):
pass
class BarAnimation(Animation):
def __init__(self, selections, results, values, iter_start, iter_end,
fps=30, speed=2, num_bars=None, num_series=1):
super(BarAnimation, self).__init__(selections, results, values,
iter_start, iter_end, fps, speed)
if num_bars is None:
num_bars = values.shape[1]
self.x = np.arange(1, num_bars + 1)
self.bar_sets = [plt.bar(self.x, np.zeros(num_bars))
for i in range(num_series)]
for b in self.bar_sets[0]:
b.set_color('gray')
def value(self, values):
for i, bar_set in enumerate(self.bar_sets):
for j, b in enumerate(bar_set):
if i == 1:
b.set_color(self.COLOUR_DEFAULT)
b.set_height(values[i][j])
def select(self, selection):
self.bar_sets[1][selection].set_color(self.COLOUR_SELECT)
def result(self, selection, result):
c = self.COLOUR_SUCCESS if result > 0 else self.COLOUR_FAILURE
self.bar_sets[1][selection].set_color(c)
def update(self, values):
for i, bar_set in enumerate(self.bar_sets):
for j, b in enumerate(bar_set):
b.set_height(values[i][j])
if __name__ == '__main__':
from agents import EpsilonGreedyAgent, FPLAgent, Exp3Agent, UCBAgent
from bandits import Bandit
bandit_probs = [0.10, 0.50, 0.60, 0.80, 0.10,
0.25, 0.60, 0.45, 0.75, 0.65] # success probability
bandit = Bandit(bandit_probs)
agent = UCBAgent(bandit, 1.0)
N_episodes = 10000
def ucb_value(ucb_agent):
t = np.sum(ucb_agent.t)
if t < len(ucb_agent.Q):
return ucb_agent.Q, 0.0
else:
f = 1 + t * (math.log(t))**2
explore = ucb_agent.param * np.sqrt(2 * math.log(f) * 1. / ucb_agent.t)
return ucb_agent.Q, explore
action_history = np.zeros(N_episodes, dtype=np.int32)
reward_history = np.zeros(N_episodes)
value_history = np.zeros((2, bandit.N, N_episodes + 1))
for episode in range(N_episodes):
# Choose action from agent (from current Q estimate)
action = agent.get_action(bandit)
# Pick up reward from bandit for chosen action
reward = bandit.get_reward(action)
# Update Q action-value estimates
agent.update_Q(action, reward)
# Append to history
action_history[episode] = action
reward_history[episode] = reward
ucb_v = ucb_value(agent)
value_history[0, :, episode + 1] = ucb_v[0] + ucb_v[1]
value_history[1, :, episode + 1] = ucb_v[0]
anim = BarAnimation(action_history, reward_history, value_history,
iter_start=9990, iter_end=9999, speed=4, num_series=2)
ax = plt.gca()
plt.ylim([0.0, 2.0])
plt.xticks(range(1, bandit.N + 1))
# ax.yticks([])
plt.xlabel("action")
plt.ylabel("value")
# ax.yaxis.grid(True)
# anim.save("output/graphics/ucb.mp4")
plt.show()
# anim = BarAnimation(action_history, reward_history, value_history,
# iter_start=200, iter_end=210, speed=2, num_series=2)
# ax = plt.gca()
# plt.ylim([0.0, 100.0])
# plt.xticks(range(1, bandit.N + 1))
# # ax.yticks([])
# plt.xlabel("action")
# plt.ylabel("value")
# # ax.yaxis.grid(True)
# anim.save("output/graphics/fpl_q2.mp4")
# # plt.show()
| [
"matplotlib.pyplot.show",
"numpy.sum",
"agents.UCBAgent",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"matplotlib.animation.FFMpegWriter",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.gca",
"bandits.Bandit",
"matplotlib.pyplot.ylabel",
"math.log",
"matplotlib.... | [((4774, 4794), 'bandits.Bandit', 'Bandit', (['bandit_probs'], {}), '(bandit_probs)\n', (4780, 4794), False, 'from bandits import Bandit\n'), ((4807, 4828), 'agents.UCBAgent', 'UCBAgent', (['bandit', '(1.0)'], {}), '(bandit, 1.0)\n', (4815, 4828), False, 'from agents import EpsilonGreedyAgent, FPLAgent, Exp3Agent, UCBAgent\n'), ((5185, 5221), 'numpy.zeros', 'np.zeros', (['N_episodes'], {'dtype': 'np.int32'}), '(N_episodes, dtype=np.int32)\n', (5193, 5221), True, 'import numpy as np\n'), ((5243, 5263), 'numpy.zeros', 'np.zeros', (['N_episodes'], {}), '(N_episodes)\n', (5251, 5263), True, 'import numpy as np\n'), ((5284, 5323), 'numpy.zeros', 'np.zeros', (['(2, bandit.N, N_episodes + 1)'], {}), '((2, bandit.N, N_episodes + 1))\n', (5292, 5323), True, 'import numpy as np\n'), ((6054, 6063), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6061, 6063), True, 'import matplotlib.pyplot as plt\n'), ((6068, 6088), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 2.0]'], {}), '([0.0, 2.0])\n', (6076, 6088), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""action"""'], {}), "('action')\n", (6162, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6177, 6196), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (6187, 6196), True, 'import matplotlib.pyplot as plt\n'), ((6271, 6281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6279, 6281), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2013, 2015), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3638), 'numpy.arange', 'np.arange', (['(1)', '(num_bars + 1)'], {}), '(1, num_bars + 1)\n', (3621, 3638), True, 'import numpy as np\n'), ((4895, 4914), 'numpy.sum', 'np.sum', (['ucb_agent.t'], {}), '(ucb_agent.t)\n', (4901, 4914), True, 'import numpy as np\n'), ((2495, 2517), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2503, 2517), True, 'import numpy as np\n'), ((3009, 3045), 'matplotlib.animation.FFMpegWriter', 'animation.FFMpegWriter', ([], {'fps': 'self.fps'}), '(fps=self.fps)\n', (3031, 3045), False, 'from matplotlib import animation\n'), ((3680, 3698), 'numpy.zeros', 'np.zeros', (['num_bars'], {}), '(num_bars)\n', (3688, 3698), True, 'import numpy as np\n'), ((5023, 5034), 'math.log', 'math.log', (['t'], {}), '(t)\n', (5031, 5034), False, 'import math\n'), ((5091, 5102), 'math.log', 'math.log', (['f'], {}), '(f)\n', (5099, 5102), False, 'import math\n')] |
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from utils import data_loader
from sklearn.cluster import KMeans
from utils import save_images, make_dirs, acc, nmi, ari
class Encoder(tf.keras.Model):
def __init__(self):
super(Encoder, self).__init__()
self.flatten_layer = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(500, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(500, activation=tf.nn.relu)
self.dense3 = tf.keras.layers.Dense(2000, activation=tf.nn.relu)
self.bottleneck = tf.keras.layers.Dense(10)
def call(self, inp):
x_reshaped = self.flatten_layer(inp)
x = self.dense1(x_reshaped)
x = self.dense2(x)
x = self.dense3(x)
latent = self.bottleneck(x)
return latent
class Decoder(tf.keras.Model):
def __init__(self):
super(Decoder, self).__init__()
self.dense4 = tf.keras.layers.Dense(2000, activation=tf.nn.relu)
self.dense5 = tf.keras.layers.Dense(500, activation=tf.nn.relu)
self.dense6 = tf.keras.layers.Dense(500, activation=tf.nn.relu)
self.dense_final = tf.keras.layers.Dense(784)
def call(self, inp):
x = self.dense4(inp)
x = self.dense5(x)
x = self.dense6(x)
x = self.dense_final(x)
return x
class DEC(object):
def __init__(self, config):
self.config = config
self.enc = Encoder()
self.dec = Decoder()
self.alpha = 1.0
self.latent_dim = 10
#self.optim = tf.keras.optimizers.Adam(self.config.learning_rate)
self.pre_optim = tf.keras.optimizers.SGD(lr=1, momentum=0.9)
self.fin_optim = tf.keras.optimizers.SGD(lr=0.01, momentum=0.9)
self.global_step = tf.Variable(0, trainable=False)
self.global_epoch = tf.Variable(0, trainable=False)
self.cluster_centers = tf.Variable(tf.zeros([self.config.n_clusters, self.config.latent_dim]), trainable=True)
self.x, self.y, self.trainloader = data_loader(config)
def pretrain(self):
print('Pretraining start!')
for epoch in tqdm(range(200)):
epoch_loss = []
for x_batch, _ in self.trainloader:
with tf.GradientTape() as tape:
z = self.enc(x_batch)
x_rec = self.dec(z)
batch_loss = tf.reduce_mean(tf.keras.losses.mean_squared_error(tf.keras.layers.Flatten()(x_batch), x_rec))
t_vars = self.enc.trainable_variables + self.dec.trainable_variables
enc_grads = tape.gradient(batch_loss, t_vars)
self.pre_optim.apply_gradients(zip(enc_grads, t_vars))
epoch_loss.append(batch_loss)
print('epoch_loss:{:.4f}'.format(tf.reduce_mean(epoch_loss).numpy()))
print('Pretraining finish!')
def initialize(self):
z = np.array([]).astype('float32').reshape(0, self.latent_dim)
true = np.array([])
for x_batch, labels in self.trainloader:
latent = self.enc(x_batch)
z = np.vstack((z, latent))
true = np.append(true, labels)
kmeans = KMeans(n_clusters=10, n_init=20).fit(z)
self.cluster_centers.assign(kmeans.cluster_centers_)
pred = kmeans.predict(z)
acc_ = acc(true, pred)
nmi_ = nmi(true, pred)
ari_ = ari(true, pred)
print('acc:{}, nmi:{}, ari:{}'.format(acc_, nmi_, ari_))
def cluster_assign(self, z):
z = tf.expand_dims(z, axis=1)
q = 1.0 + (tf.reduce_sum(tf.math.square(z - self.cluster_centers), axis=2) / 1.)
q = q ** (- (1. + 1.0) / 2.0)
q = q / tf.reduce_sum(q, axis=1, keepdims=True)
return q
def target_distribution(self, q):
weight = q ** 2 / tf.reduce_sum(q, axis=0)
p = weight / tf.reduce_sum(weight, axis=1, keepdims=True)
return p
def finetune(self):
for epoch in tqdm(range(20)):
z = self.enc(self.x)
q = self.cluster_assign(z)
p = self.target_distribution(q)
epoch_loss = []
pred = np.array([])
true = np.array([])
for index, (x_batch, labels) in enumerate(self.trainloader):
with tf.GradientTape() as tape:
latent = self.enc(x_batch)
q_ = self.cluster_assign(latent)
kl = tf.keras.losses.KLDivergence()
batch_loss = kl(p[index * self.config.batch_size: min((index+1) * self.config.batch_size, self.x.shape[0])], q_)
t_vars = self.enc.trainable_variables + [self.cluster_centers]
t_grads = tape.gradient(batch_loss, t_vars)
self.fin_optim.apply_gradients(zip(t_grads, t_vars))
epoch_loss.append(batch_loss)
y_pred = np.argmax(q_, axis=1)
pred = np.append(pred, y_pred)
true = np.append(true, labels)
acc_ = acc(true, pred)
nmi_ = nmi(true, pred)
ari_ = ari(true, pred)
print('epoch:{}, epoch_loss:{:.4f}, acc:{:.4f}, nmi:{:.4f}, ari:{:.4f}'.format(
epoch, tf.reduce_mean(epoch_loss).numpy(), acc_, nmi_, ari_)) | [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.optimizers.SGD",
"utils.ari",
"tensorflow.Variable",
"tensorflow.math.square",
"tensorflow.keras.layers.Flatten",
"sklearn.cluster.KMeans",
"numpy.append",
"utils.nmi",
"utils.data_loader",
"tensorflo... | [((424, 449), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (447, 449), True, 'import tensorflow as tf\n'), ((472, 521), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(500)'], {'activation': 'tf.nn.relu'}), '(500, activation=tf.nn.relu)\n', (493, 521), True, 'import tensorflow as tf\n'), ((544, 593), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(500)'], {'activation': 'tf.nn.relu'}), '(500, activation=tf.nn.relu)\n', (565, 593), True, 'import tensorflow as tf\n'), ((616, 666), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2000)'], {'activation': 'tf.nn.relu'}), '(2000, activation=tf.nn.relu)\n', (637, 666), True, 'import tensorflow as tf\n'), ((693, 718), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {}), '(10)\n', (714, 718), True, 'import tensorflow as tf\n'), ((1060, 1110), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2000)'], {'activation': 'tf.nn.relu'}), '(2000, activation=tf.nn.relu)\n', (1081, 1110), True, 'import tensorflow as tf\n'), ((1133, 1182), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(500)'], {'activation': 'tf.nn.relu'}), '(500, activation=tf.nn.relu)\n', (1154, 1182), True, 'import tensorflow as tf\n'), ((1205, 1254), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(500)'], {'activation': 'tf.nn.relu'}), '(500, activation=tf.nn.relu)\n', (1226, 1254), True, 'import tensorflow as tf\n'), ((1282, 1308), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(784)'], {}), '(784)\n', (1303, 1308), True, 'import tensorflow as tf\n'), ((1767, 1810), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(1)', 'momentum': '(0.9)'}), '(lr=1, momentum=0.9)\n', (1790, 1810), True, 'import tensorflow as tf\n'), ((1836, 1882), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)'}), '(lr=0.01, momentum=0.9)\n', (1859, 1882), True, 'import tensorflow as tf\n'), ((1910, 1941), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (1921, 1941), True, 'import tensorflow as tf\n'), ((1970, 2001), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (1981, 2001), True, 'import tensorflow as tf\n'), ((2164, 2183), 'utils.data_loader', 'data_loader', (['config'], {}), '(config)\n', (2175, 2183), False, 'from utils import data_loader\n'), ((3158, 3170), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3166, 3170), True, 'import numpy as np\n'), ((3524, 3539), 'utils.acc', 'acc', (['true', 'pred'], {}), '(true, pred)\n', (3527, 3539), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((3555, 3570), 'utils.nmi', 'nmi', (['true', 'pred'], {}), '(true, pred)\n', (3558, 3570), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((3586, 3601), 'utils.ari', 'ari', (['true', 'pred'], {}), '(true, pred)\n', (3589, 3601), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((3721, 3746), 'tensorflow.expand_dims', 'tf.expand_dims', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (3735, 3746), True, 'import tensorflow as tf\n'), ((2045, 2103), 'tensorflow.zeros', 'tf.zeros', (['[self.config.n_clusters, self.config.latent_dim]'], {}), '([self.config.n_clusters, self.config.latent_dim])\n', (2053, 2103), True, 'import tensorflow as tf\n'), ((3275, 3297), 'numpy.vstack', 'np.vstack', (['(z, latent)'], {}), '((z, latent))\n', (3284, 3297), True, 'import numpy as np\n'), ((3317, 3340), 'numpy.append', 'np.append', (['true', 'labels'], {}), '(true, labels)\n', (3326, 3340), True, 'import numpy as np\n'), ((3890, 3929), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['q'], {'axis': '(1)', 'keepdims': '(True)'}), '(q, axis=1, keepdims=True)\n', (3903, 3929), True, 'import tensorflow as tf\n'), ((4016, 4040), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['q'], {'axis': '(0)'}), '(q, axis=0)\n', (4029, 4040), True, 'import tensorflow as tf\n'), ((4062, 4106), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weight'], {'axis': '(1)', 'keepdims': '(True)'}), '(weight, axis=1, keepdims=True)\n', (4075, 4106), True, 'import tensorflow as tf\n'), ((4363, 4375), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4371, 4375), True, 'import numpy as np\n'), ((4395, 4407), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4403, 4407), True, 'import numpy as np\n'), ((5238, 5253), 'utils.acc', 'acc', (['true', 'pred'], {}), '(true, pred)\n', (5241, 5253), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((5273, 5288), 'utils.nmi', 'nmi', (['true', 'pred'], {}), '(true, pred)\n', (5276, 5288), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((5308, 5323), 'utils.ari', 'ari', (['true', 'pred'], {}), '(true, pred)\n', (5311, 5323), False, 'from utils import save_images, make_dirs, acc, nmi, ari\n'), ((3358, 3390), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(10)', 'n_init': '(20)'}), '(n_clusters=10, n_init=20)\n', (3364, 3390), False, 'from sklearn.cluster import KMeans\n'), ((5102, 5123), 'numpy.argmax', 'np.argmax', (['q_'], {'axis': '(1)'}), '(q_, axis=1)\n', (5111, 5123), True, 'import numpy as np\n'), ((5147, 5170), 'numpy.append', 'np.append', (['pred', 'y_pred'], {}), '(pred, y_pred)\n', (5156, 5170), True, 'import numpy as np\n'), ((5194, 5217), 'numpy.append', 'np.append', (['true', 'labels'], {}), '(true, labels)\n', (5203, 5217), True, 'import numpy as np\n'), ((2382, 2399), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2397, 2399), True, 'import tensorflow as tf\n'), ((3780, 3820), 'tensorflow.math.square', 'tf.math.square', (['(z - self.cluster_centers)'], {}), '(z - self.cluster_centers)\n', (3794, 3820), True, 'import tensorflow as tf\n'), ((4502, 4519), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4517, 4519), True, 'import tensorflow as tf\n'), ((4654, 4684), 'tensorflow.keras.losses.KLDivergence', 'tf.keras.losses.KLDivergence', ([], {}), '()\n', (4682, 4684), True, 'import tensorflow as tf\n'), ((3084, 3096), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3092, 3096), True, 'import numpy as np\n'), ((2958, 2984), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['epoch_loss'], {}), '(epoch_loss)\n', (2972, 2984), True, 'import tensorflow as tf\n'), ((5442, 5468), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5456, 5468), True, 'import tensorflow as tf\n'), ((2574, 2599), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2597, 2599), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import pandas as pd
import trainSet as trainSet
import testSet as testSet
import numpy as np
import Encoder as Encoder
import encoder_lstm as encoder_lstm
import Decoder as Decoder
import matplotlib.pyplot as plt
import Decoder_lstm as Decoder_lstm
import encoder_gru as encodet_gru
import encoder_rnn as encoder_rnn
import os
import datetime
tf.reset_default_graph()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
tf.reset_default_graph()
class parameter(object):
def __init__(self):
'''
used to set the batch_size
'''
self.batch_size=64
self.is_training=True
self.encoder_layer=1
self.decoder_layer=1
self.encoder_nodes=128
self.prediction_size=24
self.learning_rate=0.001
self.time_size=72
self.features=15
'''
para used to set the parameters in later process
'''
class train(object):
def __init__(self,time_size,features,prediction_size):
self.x_input=tf.placeholder(dtype=tf.float32,shape=[None,time_size,features],name='pollutant')
self.y=tf.placeholder(dtype=tf.float32,shape=[None,prediction_size])
def trains(self,batch_size,encoder_layer,decoder_layer,encoder_nodes,prediction_size,is_training):
'''
:param batch_size: 64
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: True
:return:
'''
# #this step use to encoding the input series data
encoder_init=Encoder.encoder(self.x_input,batch_size,encoder_layer,encoder_nodes,is_training)
(c_state, h_state)=encoder_init.encoding()
#encoder_init=encoder_lstm.lstm(self.x_input,batch_size,encoder_layer,encoder_nodes,is_training)
#encoder_init=encodet_gru.gru(self.x_input,batch_size,encoder_layer,encoder_nodes,is_training)
# encoder_init=encoder_rnn.rnn(self.x_input,batch_size,encoder_layer,encoder_nodes,is_training)
# h_state=encoder_init.encoding()
#this step to presict the polutant concentration
decoder_init=Decoder_lstm.lstm(batch_size,prediction_size,decoder_layer,encoder_nodes,is_training)
pre=decoder_init.decoding(h_state)
self.cross_entropy = tf.reduce_mean(tf.sqrt(tf.reduce_mean(tf.square(self.y - pre), axis=0)), axis=0)
# backprocess and update the parameters
# self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.cross_entropy)
# return self.cross_entropy,self.train_op
def test(self,batch_size,encoder_layer,decoder_layer,encoder_nodes,prediction_size,is_training):
'''
:param batch_size: usually use 1
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: False
:return:
'''
encoder_init=Encoder.encoder(self.x_input,batch_size,encoder_layer,encoder_nodes,is_training)
(c_state, h_state)=encoder_init.encoding()
# encoder_init = encoder_lstm.lstm(self.x_input, batch_size, encoder_layer, encoder_nodes, is_training)
# encoder_init = encodet_gru.gru(self.x_input, batch_size, encoder_layer, encoder_nodes, is_training)
# encoder_init = encoder_rnn.rnn(self.x_input, batch_size, encoder_layer, encoder_nodes, is_training)
# h_state = encoder_init.encoding()
#this step to presict the polutant concentration
decoder_init=Decoder_lstm.lstm(batch_size,prediction_size,decoder_layer,encoder_nodes,is_training)
self.pre=decoder_init.decoding(h_state)
return self.pre
def accuracy(self,Label,Predict,epoch,steps):
'''
:param Label: represents the observed value
:param Predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = Label - Predict
average_Error = np.mean(np.fabs(error.astype(float)))
print("After %d epochs and %d steps, MAE error is : %f" % (epoch, steps, average_Error))
RMSE_Error = np.sqrt(np.mean(np.square(np.array(Label) - np.array(Predict))))
print("After %d epochs and %d steps, RMSE error is : %f" % (epoch, steps, RMSE_Error))
cor = np.mean(np.multiply((Label - np.mean(Label)),
(Predict - np.mean(Predict)))) / (np.std(Predict) * np.std(Label))
print('The correlation coefficient is: %f' % (cor))
return average_Error,RMSE_Error,cor
def describe(self,Label,Predict,epoch,prediction_size):
if epoch == 10 or epoch == 30 or epoch == 50 or epoch == 70 or epoch == 90 or epoch == 100:
plt.figure()
# Label is observed value,Blue
plt.plot(Label[24:48], 'b*:', label=u'actual value')
# Predict is predicted value,Red
plt.plot(Predict[24:48], 'r*:', label=u'predicted value')
# use the legend
# plt.legend()
plt.xlabel("Time(hours)", fontsize=17)
plt.ylabel("PM2.5(ug/m3)", fontsize=17)
plt.title("The prediction of PM2.5 (epochs =" + str(epoch) + ")", fontsize=17)
plt.show()
def begin():
'''
from now on,the model begin to training, until the epoch to 100
'''
para = parameter()
training = train(para.time_size,para.features,para.prediction_size)
pre=training.test(1,para.encoder_layer,para.decoder_layer,para.encoder_nodes,para.prediction_size,False)
saver = tf.train.Saver()
with tf.Session() as sess:
# model_file = tf.train.latest_checkpoint('rlstmckpt/')
# saver.restore(sess, model_file)
for i in range(0,10):
saver.restore(sess, 'rlstmckpt/pollutant.ckpt-'+str(i))
para.batch_size = 1
para.is_training=False
# reading for the test sets
Label = list()
Predict = list()
for x, label in testSet.train_data(para.batch_size, para.time_size, para.prediction_size):
s = sess.run((pre),feed_dict={training.x_input: x})
Label.append(label)
Predict.append(s)
Label = np.reshape(np.array(Label), [1, -1])[0]
Predict = np.reshape(np.array(Predict), [1, -1])[0]
print(list(Predict[25:48]))
print(list(Predict[100:124]))
print(list(Predict[200:224]))
print(list(Predict[300:324]))
print(list(Predict[400:424]))
print(list(Predict[500:524]))
average_Error, RMSE_Error, cor=training.accuracy(Label,Predict,3,0)
print('the average is %f:'%average_Error)
print('the RMSE value is %f:'%RMSE_Error)
print('the correlations value is %f:'%cor)
training.describe(Label,Predict,100,para.prediction_size)
def main(argv=None):
begin()
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.show",
"Encoder.encoder",
"tensorflow.train.Saver",
"matplotlib.pyplot.plot",
"Decoder_lstm.lstm",
"tensorflow.reset_default_graph",
"numpy.std",
"tensorflow.Session",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"tensorflow.square",
... | [((367, 391), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (389, 391), True, 'import tensorflow as tf\n'), ((435, 459), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (457, 459), True, 'import tensorflow as tf\n'), ((5544, 5560), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5558, 5560), True, 'import tensorflow as tf\n'), ((991, 1081), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, time_size, features]', 'name': '"""pollutant"""'}), "(dtype=tf.float32, shape=[None, time_size, features], name=\n 'pollutant')\n", (1005, 1081), True, 'import tensorflow as tf\n'), ((1088, 1151), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, prediction_size]'}), '(dtype=tf.float32, shape=[None, prediction_size])\n', (1102, 1151), True, 'import tensorflow as tf\n'), ((1561, 1649), 'Encoder.encoder', 'Encoder.encoder', (['self.x_input', 'batch_size', 'encoder_layer', 'encoder_nodes', 'is_training'], {}), '(self.x_input, batch_size, encoder_layer, encoder_nodes,\n is_training)\n', (1576, 1649), True, 'import Encoder as Encoder\n'), ((2127, 2220), 'Decoder_lstm.lstm', 'Decoder_lstm.lstm', (['batch_size', 'prediction_size', 'decoder_layer', 'encoder_nodes', 'is_training'], {}), '(batch_size, prediction_size, decoder_layer, encoder_nodes,\n is_training)\n', (2144, 2220), True, 'import Decoder_lstm as Decoder_lstm\n'), ((2920, 3008), 'Encoder.encoder', 'Encoder.encoder', (['self.x_input', 'batch_size', 'encoder_layer', 'encoder_nodes', 'is_training'], {}), '(self.x_input, batch_size, encoder_layer, encoder_nodes,\n is_training)\n', (2935, 3008), True, 'import Encoder as Encoder\n'), ((3508, 3601), 'Decoder_lstm.lstm', 'Decoder_lstm.lstm', (['batch_size', 'prediction_size', 'decoder_layer', 'encoder_nodes', 'is_training'], {}), '(batch_size, prediction_size, decoder_layer, encoder_nodes,\n is_training)\n', (3525, 3601), True, 'import Decoder_lstm as Decoder_lstm\n'), ((5570, 5582), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5580, 5582), True, 'import tensorflow as tf\n'), ((4720, 4732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4730, 4732), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4840), 'matplotlib.pyplot.plot', 'plt.plot', (['Label[24:48]', '"""b*:"""'], {'label': 'u"""actual value"""'}), "(Label[24:48], 'b*:', label=u'actual value')\n", (4796, 4840), True, 'import matplotlib.pyplot as plt\n'), ((4898, 4955), 'matplotlib.pyplot.plot', 'plt.plot', (['Predict[24:48]', '"""r*:"""'], {'label': 'u"""predicted value"""'}), "(Predict[24:48], 'r*:', label=u'predicted value')\n", (4906, 4955), True, 'import matplotlib.pyplot as plt\n'), ((5024, 5062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time(hours)"""'], {'fontsize': '(17)'}), "('Time(hours)', fontsize=17)\n", (5034, 5062), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5114), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PM2.5(ug/m3)"""'], {'fontsize': '(17)'}), "('PM2.5(ug/m3)', fontsize=17)\n", (5085, 5114), True, 'import matplotlib.pyplot as plt\n'), ((5219, 5229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5227, 5229), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6060), 'testSet.train_data', 'testSet.train_data', (['para.batch_size', 'para.time_size', 'para.prediction_size'], {}), '(para.batch_size, para.time_size, para.prediction_size)\n', (6005, 6060), True, 'import testSet as testSet\n'), ((4411, 4426), 'numpy.std', 'np.std', (['Predict'], {}), '(Predict)\n', (4417, 4426), True, 'import numpy as np\n'), ((4429, 4442), 'numpy.std', 'np.std', (['Label'], {}), '(Label)\n', (4435, 4442), True, 'import numpy as np\n'), ((2324, 2347), 'tensorflow.square', 'tf.square', (['(self.y - pre)'], {}), '(self.y - pre)\n', (2333, 2347), True, 'import tensorflow as tf\n'), ((6231, 6246), 'numpy.array', 'np.array', (['Label'], {}), '(Label)\n', (6239, 6246), True, 'import numpy as np\n'), ((6293, 6310), 'numpy.array', 'np.array', (['Predict'], {}), '(Predict)\n', (6301, 6310), True, 'import numpy as np\n'), ((4148, 4163), 'numpy.array', 'np.array', (['Label'], {}), '(Label)\n', (4156, 4163), True, 'import numpy as np\n'), ((4166, 4183), 'numpy.array', 'np.array', (['Predict'], {}), '(Predict)\n', (4174, 4183), True, 'import numpy as np\n'), ((4326, 4340), 'numpy.mean', 'np.mean', (['Label'], {}), '(Label)\n', (4333, 4340), True, 'import numpy as np\n'), ((4388, 4404), 'numpy.mean', 'np.mean', (['Predict'], {}), '(Predict)\n', (4395, 4404), True, 'import numpy as np\n')] |
"""
A module for testing `amf.py`.
"""
from numpy.testing import assert_allclose
def test_Ɛ_bar(amf, 𝒫_bar, rtol, atol):
𝒫 = amf.𝒫
𝒫_bar_test = amf.Ɛ_bar(𝒫)
for actual, expected in zip(𝒫_bar_test, 𝒫_bar):
assert_allclose(actual, expected, rtol=rtol, atol=atol)
def test_Ɛ_tilde(amf, 𝒫_bar, 𝒫_tilde, rtol, atol):
𝒫_tilde_test = amf.Ɛ_tilde(𝒫_bar)
for actual, expected in zip(𝒫_tilde_test, 𝒫_tilde):
assert_allclose(actual, expected, rtol=rtol, atol=atol)
def test_iterate(amf, 𝒫_bar, rtol, atol):
T = 1
amf.iterate(T)
𝒫_tilde = amf.𝒫
test_objs = zip([amf.𝒫_t_bar_path[T], amf.𝒫_t_tilde_path[T]],
[𝒫_bar, 𝒫_tilde])
for 𝒫_test, 𝒫 in test_objs:
for actual, expected in zip(𝒫_test, 𝒫):
assert_allclose(actual, expected, rtol=rtol, atol=atol)
| [
"numpy.testing.assert_allclose"
] | [((230, 285), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual', 'expected'], {'rtol': 'rtol', 'atol': 'atol'}), '(actual, expected, rtol=rtol, atol=atol)\n', (245, 285), False, 'from numpy.testing import assert_allclose\n'), ((442, 497), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual', 'expected'], {'rtol': 'rtol', 'atol': 'atol'}), '(actual, expected, rtol=rtol, atol=atol)\n', (457, 497), False, 'from numpy.testing import assert_allclose\n'), ((789, 844), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual', 'expected'], {'rtol': 'rtol', 'atol': 'atol'}), '(actual, expected, rtol=rtol, atol=atol)\n', (804, 844), False, 'from numpy.testing import assert_allclose\n')] |
import numpy as np
def apply_foreground_mask(spots, mask, ratio):
"""
"""
# get spot locations in mask voxel coordinates
x = np.round(spots[:, :3] * ratio).astype(np.uint16)
# correct out of range rounding errors
for i in range(3):
x[x[:, i] >= mask.shape[i], i] = mask.shape[i] - 1
# filter spots and return
return spots[mask[x[:, 0], x[:, 1], x[:, 2]] == 1]
def filter_by_range(spots, origin, span):
"""
"""
# operate on a copy, filter lower/upper range all axes
result = np.copy(spots)
for i in range(3):
result = result[result[:, i] > origin[i] - 1]
result = result[result[:, i] < origin[i] + span[i]]
return result
| [
"numpy.round",
"numpy.copy"
] | [((538, 552), 'numpy.copy', 'np.copy', (['spots'], {}), '(spots)\n', (545, 552), True, 'import numpy as np\n'), ((144, 174), 'numpy.round', 'np.round', (['(spots[:, :3] * ratio)'], {}), '(spots[:, :3] * ratio)\n', (152, 174), True, 'import numpy as np\n')] |
# coding: utf-8
# # Apply vgg16 model and predict class for test data of https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition and submit prediction results to Kaggle.
# In[1]:
# Module versions
import sys
import keras
import theano
import numpy
import pandas
print("Python version:" + sys.version)
print("Keras version:" + keras.__version__)
print("Theano version:" + theano.__version__)
print("Numpy version:" + numpy.__version__)
# ## Setup Keras and its backend before using it
# In[2]:
print("Keras backend:" + keras.backend.backend())
print("Keras backend image data format:Before:" + keras.backend.image_data_format())
# change image_data_format to channels_first
keras.backend.set_image_data_format('channels_first')
print("Keras backend image data format:After:" + keras.backend.image_data_format())
print("Keras backend image_dim_ordering:After:" + keras.backend.image_dim_ordering())
print("Keras backend epsilon:Before:" + str(keras.backend.epsilon()))
# change epsilon to 1e-7
keras.backend.set_epsilon(1e-7)
print("Keras backend epsilon:After:" + str(keras.backend.epsilon()))
print("Keras backend floatx:Before:" + str(keras.backend.floatx()))
# change floatx to float32
keras.backend.set_floatx('float32')
print("Keras backend floatx:After:" + str(keras.backend.floatx()))
# In[3]:
import utils; reload(utils)
import vgg16; reload(vgg16)
#get_ipython().magic(u'matplotlib inline')
#from utils import plots
from vgg16 import Vgg16
# ### Training, Validation and Testing data path
# #### Enable either floydhub or local path
# In[4]:
floyd_or_local = "floyd"
dataset = "global" # sample or global
# In[21]:
#path containing sample, train, valid and test1 directries
global_path = ""
# trimmed down version of train,validation and testing data from above path
sample_path = ""
# path to save any artifacts created
output_path = ""
if floyd_or_local == "local":
global_path = "data/dogscats/"
sample_path = "data/dogscats/sample/"
output_path = "./"
utils.set_keras_cache_dir("/home/shabeer/.keras/")
# Prepare local global/sample test path
# Ignore errors if this notebook is run in non-local environment.
get_ipython().system(u'global_path="data/dogscats/"')
get_ipython().system(u'mkdir -p $global_path/test1/unknown/')
get_ipython().system(u'mv $global_path/test1/*.jpg $global_path/test1/unknown/')
get_ipython().system(u'sample_path="data/dogscats/sample/"')
get_ipython().system(u'mkdir -p $sample_path/test1/unknown/')
get_ipython().system(u'cp $global_path/test1/unknown/4*09.jpg $sample_path/test1/unknown/ ')
else:
global_path = "/input/dogscats/"
sample_path = "/input/dogscats/sample/"
output_path = "/output/"
utils.set_keras_cache_dir("/input/models/")
if dataset == "sample":
path = sample_path
else:
path = global_path
test_path = global_path + "/test1/"
# ### Create vgg16 model with its weights loaded
# In[6]:
vgg = Vgg16()
# In[7]:
# Based on memory available, choosing a medium value. Max could be 64, above which could be - out of memory
batch_size = 16
train_batches = vgg.get_batches(path + '/train/', batch_size = batch_size, class_mode='categorical')
validation_batches = vgg.get_batches(path + '/valid/', batch_size = batch_size)
#test_batches = vgg.get_batches(path + '../test1/', batch_size = batch_size * 4)
# In[8]:
print("Number of classes in vgg model before fine tuning:" + str(len(vgg.classes)))
# fine tune vgg16 model to 2 classes - dogs and cats
vgg.finetune(train_batches)
print("Number of classes in vgg model before after tuning:" + str(len(vgg.classes)))
print("Classes after tuning:" + str(vgg.classes))
# ## TODO train and validate vgg16 model to 2 classes data.
# In[9]:
import datetime
time_before_starting_training = datetime.datetime.now()
print(time_before_starting_training)
# train & validate
vgg.fit(batches= train_batches, val_batches= validation_batches, nb_epoch=1)
time_after_training = datetime.datetime.now()
print(time_after_training)
train_imgs, train_labels = next(train_batches)
print("train_imgs shape:" + str(train_imgs.shape))
print("Time taken to train & validate: " + str(time_after_training - time_before_starting_training))
# ## Save model and weights
# In[10]:
print("Saving model configuration.")
model_json = vgg.model.to_json()
#print(model_json)
with open(output_path + '/model.json', 'w') as f:
f.write(model_json)
print("Saved model configuration.")
#serialize weights to hdf5
model_weights = vgg.model.save_weights(output_path + '/model_weights.h5')
print("Saved model weights.")
# In[11]:
#?vgg.model.save_weights
# ## Test and predict labels
# In[22]:
test_batches = vgg.get_batches(test_path, batch_size = 8, class_mode=None)
imgs = next(test_batches)
print(imgs.shape)
preds, idxs, classes = vgg.predict(imgs)
# In[23]:
#plots(imgs[0:11])
print(preds[0:11])
print(idxs[0:11])
print(classes[0:11])
# In[14]:
#?vgg.model.predict_generator
# In[24]:
time_before_starting_testing = datetime.datetime.now()
print(time_before_starting_testing)
batch_size = 8
# both test_batches and predictions below are generators
#test_batches, predictions = vgg.test(test_path, batch_size = 8)
test_batches = vgg.get_batches(test_path, batch_size=batch_size, class_mode =None)
print("List of images across all test_batches: " + str(test_batches.samples))
import math
steps = int(math.ceil(test_batches.samples*1.0/batch_size))
print("Number of steps:" + str(steps))
predictions = vgg.model.predict_generator(test_batches, steps = steps, verbose=1)
time_after_testing = datetime.datetime.now()
print(time_after_testing)
time_taken = time_after_testing - time_before_starting_training
print("Time taken to test: " + str(time_taken))
print(predictions.shape)
print(predictions[0:11])
print("Probability of image being a dog:" + str(predictions[0:11,1]))
# ## Construct Kaggle submission
# In[34]:
print(vgg.classes)
idx = numpy.argmax(predictions, axis = 1) # idx within each row of predictions, which contains max probability.
print(idx[0:11])
classes_predicted = map(lambda i: vgg.classes[i], idx)
print(classes_predicted[0:11])
from pandas import Series
from pandas import DataFrame
print(test_batches.filenames[0:11])
filenames = map(lambda f: f.replace('unknown/', '').replace('.jpg', ''), test_batches.filenames)
print(filenames[0:11])
# probability of image being a dog ( 1=dog, 0=cat)
dog_prob = [str("%.12f" % p) for p in predictions[:,1]]
p = pandas.concat([Series(filenames), Series(dog_prob)], axis = 1, keys = ['id', 'label'])
print(p[0:11])
p.to_csv(output_path + '/dogs-vs-cats-redux-kernels-edition_predictions_shabeer.csv', header= True, mode='w', index=False)
| [
"vgg16.Vgg16",
"keras.backend.set_floatx",
"keras.backend.image_data_format",
"numpy.argmax",
"math.ceil",
"keras.backend.backend",
"keras.backend.epsilon",
"keras.backend.floatx",
"utils.set_keras_cache_dir",
"keras.backend.set_image_data_format",
"keras.backend.image_dim_ordering",
"pandas.S... | [((687, 740), 'keras.backend.set_image_data_format', 'keras.backend.set_image_data_format', (['"""channels_first"""'], {}), "('channels_first')\n", (722, 740), False, 'import keras\n'), ((1007, 1039), 'keras.backend.set_epsilon', 'keras.backend.set_epsilon', (['(1e-07)'], {}), '(1e-07)\n', (1032, 1039), False, 'import keras\n'), ((1204, 1239), 'keras.backend.set_floatx', 'keras.backend.set_floatx', (['"""float32"""'], {}), "('float32')\n", (1228, 1239), False, 'import keras\n'), ((2978, 2985), 'vgg16.Vgg16', 'Vgg16', ([], {}), '()\n', (2983, 2985), False, 'from vgg16 import Vgg16\n'), ((3817, 3840), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3838, 3840), False, 'import datetime\n'), ((3998, 4021), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4019, 4021), False, 'import datetime\n'), ((5046, 5069), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5067, 5069), False, 'import datetime\n'), ((5623, 5646), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5644, 5646), False, 'import datetime\n'), ((5980, 6013), 'numpy.argmax', 'numpy.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (5992, 6013), False, 'import numpy\n'), ((2008, 2058), 'utils.set_keras_cache_dir', 'utils.set_keras_cache_dir', (['"""/home/shabeer/.keras/"""'], {}), "('/home/shabeer/.keras/')\n", (2033, 2058), False, 'import utils\n'), ((2744, 2787), 'utils.set_keras_cache_dir', 'utils.set_keras_cache_dir', (['"""/input/models/"""'], {}), "('/input/models/')\n", (2769, 2787), False, 'import utils\n'), ((5430, 5480), 'math.ceil', 'math.ceil', (['(test_batches.samples * 1.0 / batch_size)'], {}), '(test_batches.samples * 1.0 / batch_size)\n', (5439, 5480), False, 'import math\n'), ((531, 554), 'keras.backend.backend', 'keras.backend.backend', ([], {}), '()\n', (552, 554), False, 'import keras\n'), ((607, 640), 'keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), '()\n', (638, 640), False, 'import keras\n'), ((790, 823), 'keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), '()\n', (821, 823), False, 'import keras\n'), ((875, 909), 'keras.backend.image_dim_ordering', 'keras.backend.image_dim_ordering', ([], {}), '()\n', (907, 909), False, 'import keras\n'), ((6530, 6547), 'pandas.Series', 'Series', (['filenames'], {}), '(filenames)\n', (6536, 6547), False, 'from pandas import Series\n'), ((6549, 6565), 'pandas.Series', 'Series', (['dog_prob'], {}), '(dog_prob)\n', (6555, 6565), False, 'from pandas import Series\n'), ((956, 979), 'keras.backend.epsilon', 'keras.backend.epsilon', ([], {}), '()\n', (977, 979), False, 'import keras\n'), ((1082, 1105), 'keras.backend.epsilon', 'keras.backend.epsilon', ([], {}), '()\n', (1103, 1105), False, 'import keras\n'), ((1152, 1174), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (1172, 1174), False, 'import keras\n'), ((1282, 1304), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (1302, 1304), False, 'import keras\n')] |
from tequila.simulators.simulator_base import BackendCircuit, QCircuit, BackendExpectationValue
from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction
from tequila import TequilaException
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
import qiskit, numpy
import qiskit.providers.aer.noise as qiskitnoise
from tequila.utils import to_float
import qiskit.test.mock.backends
def get_bit_flip(p):
"""
Return a bit flip error.
Parameters
----------
p: float:
a probability.
Returns
-------
type:
qiskit pauli error
"""
return qiskitnoise.pauli_error(noise_ops=[('X', p), ('I', 1 - p)])
def get_phase_flip(p):
"""
Return a phase flip error in qiskit.
Parameters
----------
p: float:
a probability.
Returns
-------
type:
qiskit pauli error
"""
return qiskitnoise.pauli_error(noise_ops=[('Z', p), ('I', 1 - p)])
gate_qubit_lookup = {
'x': 1,
'y': 1,
'z': 1,
'h': 1,
'u1': 1,
'u2': 1,
'u3': 1,
'cx': 2,
'cy': 2,
'cz': 2,
'ch': 2,
'cu3': 2,
'ccx': 3,
'r': 1,
'single': 1,
'control': 2,
'multicontrol': 3
}
full_basis = ['x', 'y', 'z', 'id', 'u1', 'u2', 'u3', 'h',
'cx', 'cy', 'cz', 'cu3', 'ccx']
class TequilaQiskitException(TequilaException):
def __str__(self):
return "Error in qiskit backend:" + self.message
class BackendCircuitQiskit(BackendCircuit):
"""
Type representing circuits compiled for execution in qiskit.
See BackendCircuit for documentation on inherited attributes and methods.
Attributes
----------
c: the number of classical channels in the circuit.
classical_map:
dictionary mapping qubits in tequila to classical registers representing measurement therefrom
counter:
counts how many distinct sympy.Symbol objects are employed in the circuit.
noise_lookup: dict:
dict mapping strings to qiskitnoise objects.
numbering:
tequila object for qubit order resolution.
noise_model:
a qiskit noise model built from a tequila NoiseModel
op_lookup: dict:
dictionary mapping strings (tequila gate names) to qiskit gate addition functions.
pars_to_tq_: dict:
dictionary mapping qiskit.Parameter objects back to tequila Variables and Objectives.
q:
the number of qubits in the circuit.
qubit_map:
mapping for qubit positions of gates to their location in a qiskit circuit
resolver:
dictionary for resolving parameters at runtime for circuits.
tq_to_pars: dict:
dictionary mapping tequila Variables and Objectives to qiskit.Parameters, for parameter resolution.
Methods
-------
noise_model_converter:
transform a tequila NoiseModel into a qiskit noise model.
"""
compiler_arguments = {
"trotterized": True,
"swap": False,
"multitarget": True,
"controlled_rotation": True,
"gaussian": True,
"exponential_pauli": True,
"controlled_exponential_pauli": True,
"phase": True,
"power": True,
"hadamard_power": True,
"controlled_power": True,
"controlled_phase": False,
"toffoli": False,
"phase_to_z": False,
"cc_max": False
}
numbering = BitNumbering.LSB
def __init__(self, abstract_circuit: QCircuit, variables, qubit_map=None, noise=None,
device=None, *args, **kwargs):
"""
Parameters
----------
abstract_circuit: QCircuit:
the circuit to be compiled to qiskit.
variables: dict:
variables to compile the circuit with
qubit_map: dictionary:
a qubit map which maps the abstract qubits in the abstract_circuit to the qubits on the backend
there is no need to initialize the corresponding backend types
the dictionary should simply be {int:int} (preferred) or {int:name}
if None the default will map to qubits 0 ... n_qubits -1 in the backend
noise:
noise to apply to the circuit.
device:
device on which to (perhaps, via emulation) execute the circuit.
args
kwargs
"""
self.op_lookup = {
'I': (lambda c: c.iden),
'X': (lambda c: c.x, lambda c: c.cx, lambda c: c.ccx),
'Y': (lambda c: c.y, lambda c: c.cy, lambda c: c.ccy),
'Z': (lambda c: c.z, lambda c: c.cz, lambda c: c.ccz),
'H': (lambda c: c.h, lambda c: c.ch, lambda c: c.cch),
'Rx': (lambda c: c.rx, lambda c: c.mcrx),
'Ry': (lambda c: c.ry, lambda c: c.mcry),
'Rz': (lambda c: c.rz, lambda c: c.mcrz),
'Phase': (lambda c: c.u1, lambda c: c.cu1),
'SWAP': (lambda c: c.swap, lambda c: c.cswap),
}
self.resolver = {}
self.tq_to_pars = {}
self.counter = 0
if qubit_map is None:
n_qubits = len(abstract_circuit.qubits)
else:
n_qubits = max(qubit_map.values()) + 1
self.q = qiskit.QuantumRegister(n_qubits, "q")
self.c = qiskit.ClassicalRegister(n_qubits, "c")
super().__init__(abstract_circuit=abstract_circuit, variables=variables, noise=noise, device=device,
qubit_map=qubit_map, *args, **kwargs)
self.classical_map = self.make_classical_map(qubit_map=self.qubit_map)
if noise != None:
self.noise_lookup = {
'phase damp': qiskitnoise.phase_damping_error,
'amplitude damp': qiskitnoise.amplitude_damping_error,
'bit flip': get_bit_flip,
'phase flip': get_phase_flip,
'phase-amplitude damp': qiskitnoise.phase_amplitude_damping_error,
'depolarizing': qiskitnoise.depolarizing_error
}
if isinstance(noise, str):
if noise == 'device':
if device is not None:
self.noise_model = qiskitnoise.NoiseModel.from_backend(self.device)
else:
raise TequilaException('cannot get device noise without specifying a device!')
else:
raise TequilaException(
'The only allowed string for noise is \'device\'; recieved {}. Please try again!'.format(
str(noise)))
else:
nm = self.noise_model_converter(noise)
self.noise_model = nm
else:
self.noise_model = None
if len(self.tq_to_pars.keys()) is None:
self.pars_to_tq = None
self.resolver = None
else:
self.pars_to_tq = {v: k for k, v in self.tq_to_pars.items()}
self.resolver = {k: to_float(v(variables)) for k, v in self.pars_to_tq.items()}
def make_qubit_map(self, qubits: dict = None):
qubit_map = super().make_qubit_map(qubits=qubits)
mapped_qubits = [q.number for q in qubit_map.values()]
for k, v in qubit_map.items():
qubit_map[k].instance = self.q [v.number]
return qubit_map
def make_classical_map(self, qubit_map: dict):
mapped_qubits = [q.number for q in qubit_map.values()]
classical_map = {}
for k, v in qubit_map.items():
classical_map[k] = self.c[v.number]
return classical_map
def do_simulate(self, variables, initial_state=0, *args, **kwargs) -> QubitWaveFunction:
"""
Helper function for performing simulation.
Parameters
----------
variables:
variables to pass to the circuit for simulation.
initial_state:
indicate initial state on which the unitary self.circuit should act.
args
kwargs
Returns
-------
QubitWaveFunction:
the result of simulation.
"""
if self.noise_model is None:
qiskit_backend = self.retrieve_device('statevector_simulator')
else:
raise TequilaQiskitException("wave function simulation with noise cannot be performed presently")
optimization_level = None
if "optimization_level" in kwargs:
optimization_level = kwargs['optimization_level']
opts = None
if initial_state != 0:
array = numpy.zeros(shape=[2 ** self.n_qubits])
i = BitStringLSB.from_binary(BitString.from_int(integer=initial_state, nbits=self.n_qubits).binary)
print(initial_state, " -> ", i)
array[i.integer] = 1.0
opts = {"initial_statevector": array}
print(opts)
backend_result = qiskit.execute(experiments=self.circuit, optimization_level=optimization_level,
backend=qiskit_backend, parameter_binds=[self.resolver],
backend_options=opts).result()
return QubitWaveFunction.from_array(arr=backend_result.get_statevector(self.circuit), numbering=self.numbering)
def do_sample(self, circuit: qiskit.QuantumCircuit, samples: int, read_out_qubits, *args, **kwargs) -> QubitWaveFunction:
"""
Helper function for performing sampling.
Parameters
----------
circuit: qiskit.QuantumCircuit:
the circuit from which to sample.
samples:
the number of samples to take.
args
kwargs
Returns
-------
QubitWaveFunction:
the result of sampling.
"""
optimization_level = 1
if 'optimization_level' in kwargs:
optimization_level = kwargs['optimization_level']
if self.device is None:
qiskit_backend = self.retrieve_device('qasm_simulator')
else:
qiskit_backend = self.device
if qiskit_backend in qiskit.Aer.backends() or str(qiskit_backend).lower() == "ibmq_qasm_simulator":
return self.convert_measurements(qiskit.execute(circuit, backend=qiskit_backend, shots=samples,
basis_gates=full_basis,
optimization_level=optimization_level,
noise_model=self.noise_model,
parameter_binds=[self.resolver]), target_qubits=read_out_qubits)
else:
if isinstance(qiskit_backend, qiskit.test.mock.FakeBackend):
coupling_map = qiskit_backend.configuration().coupling_map
basis = qiskitnoise.NoiseModel.from_backend(qiskit_backend).basis_gates
return self.convert_measurements(qiskit.execute(circuit, self.retrieve_device('qasm_simulator'),
shots=samples,
basis_gates=basis,
coupling_map=coupling_map,
noise_model=self.noise_model,
optimization_level=optimization_level,
parameter_binds=[self.resolver]),
target_qubits=read_out_qubits)
else:
if self.noise_model is not None:
print("WARNING: There are no noise models when running on real machines!")
return self.convert_measurements(qiskit.execute(circuit, backend=qiskit_backend, shots=samples,
optimization_level=optimization_level,
parameter_binds=[self.resolver]))
def convert_measurements(self, backend_result, target_qubits=None) -> QubitWaveFunction:
"""
map backend results to QubitWaveFunction
Parameters
----------
backend_result:
the result returned directly qiskit simulation.
Returns
-------
QubitWaveFunction:
measurements converted into wave function form.
"""
qiskit_counts = backend_result.result().get_counts()
result = QubitWaveFunction()
# todo there are faster ways
for k, v in qiskit_counts.items():
converted_key = BitString.from_bitstring(other=BitStringLSB.from_binary(binary=k))
result._state[converted_key] = v
if target_qubits is not None:
mapped_target = [self.qubit_map[q].number for q in target_qubits]
mapped_full = [self.qubit_map[q].number for q in self.abstract_qubits]
keymap = KeyMapRegisterToSubregister(subregister=mapped_target, register=mapped_full)
result = result.apply_keymap(keymap=keymap)
return result
def no_translation(self, abstract_circuit):
return isinstance(abstract_circuit, qiskit.QuantumCircuit)
def initialize_circuit(self, *args, **kwargs):
"""
return an empty qiskit circuit.
Parameters
----------
args
kwargs
Returns
-------
qiskit.QuantumCircuit:
an empty qiskit circuit.
"""
return qiskit.QuantumCircuit(self.q, self.c)
def add_parametrized_gate(self, gate, circuit, *args, **kwargs):
"""
add a parametrized gate to a circuit.
Parameters
----------
gate: QGateImpl:
the gate to apply to the circuit.
circuit: qiskit.QuantumCircuit:
the circuit, to apply the gate to.
args
kwargs
Returns
-------
None
"""
ops = self.op_lookup[gate.name]
if len(gate.extract_variables()) > 0:
try:
par = self.tq_to_pars[gate.parameter]
except:
par = qiskit.circuit.parameter.Parameter(
'{}_{}'.format(self._name_variable_objective(gate.parameter), str(self.counter)))
self.tq_to_pars[gate.parameter] = par
self.counter += 1
else:
par = float(gate.parameter)
if gate.is_controlled():
if len(gate.control) > 2:
pass
# raise TequilaQiskitException("multi-controls beyond 2 not yet supported for the qiskit backend. Gate was:\n{}".format(gate) )
ops[1](circuit)(par, q_controls=[self.qubit(c) for c in gate.control],
q_target=self.qubit(gate.target[0]), q_ancillae=None, mode='noancilla')
else:
ops[0](circuit)(par, self.qubit(gate.target[0]))
def add_measurement(self, circuit, target_qubits, *args, **kwargs):
"""
add a measurement to a circuit.
Parameters
----------
circuit: qiskit.QuantumCircuit:
the circuit, to apply measurement to.
args
kwargs
Returns
-------
None
"""
target_qubits = sorted(target_qubits)
tq = [self.qubit(t) for t in target_qubits]
tc = [self.classical_map[t] for t in target_qubits]
measurement = self.initialize_circuit()
measurement.barrier(range(self.n_qubits))
measurement.measure(tq, tc)
result = circuit + measurement
return result
def add_basic_gate(self, gate, circuit, *args, **kwargs):
"""
add an unparametrized gate to a circuit.
Parameters
----------
gate: QGateImpl:
the gate to apply to the circuit.
circuit: qiskit.QuantumCircuit:
the circuit, to apply the gate to.
args
kwargs
Returns
-------
None
"""
ops = self.op_lookup[gate.name]
if gate.is_controlled():
if len(gate.control) > 2:
raise TequilaQiskitException(
"multi-controls beyond 2 not yet supported for the qiskit backend. Gate was:\n{}".format(gate))
ops[len(gate.control)](circuit)(*[self.qubit(q) for q in gate.control + gate.target])
else:
ops[0](circuit)(*[self.qubit(q) for q in gate.target])
def noise_model_converter(self, nm):
"""
Convert a tequila NoiseModel to the native qiskit type.
Parameters
----------
nm: NoiseModel:
a tequila noisemodel.
Returns
-------
qiskit.NoiseModel:
a qiskit noise model.
"""
if nm is None:
return None
basis_gates = full_basis
qnoise = qiskitnoise.NoiseModel(basis_gates)
for noise in nm.noises:
op = self.noise_lookup[noise.name]
if op is qiskitnoise.depolarizing_error:
active = op(noise.probs[0], noise.level)
else:
if noise.level == 1:
active = op(*noise.probs)
else:
active = op(*noise.probs)
action = op(*noise.probs)
for i in range(noise.level - 1):
active = active.tensor(action)
if noise.level == 2:
targets = ['cx',
'cy',
'cz',
'crz',
'crx',
'cry',
'cu3',
'ch']
elif noise.level == 1:
targets = ['x',
'y',
'z',
'u3',
'u1',
'u2',
'h']
elif noise.level == 3:
targets = ['ccx']
else:
raise TequilaQiskitException('Sorry, no support yet for qiskit for noise on more than 3 qubits.')
qnoise.add_all_qubit_quantum_error(active, targets)
return qnoise
def update_variables(self, variables):
"""
Update circuit variables for use in simulation or sampling
Parameters
----------
variables:
a new set of variables for use in the circuit.
Returns
-------
None
"""
if self.pars_to_tq is not None:
self.resolver = {k: to_float(v(variables)) for k, v in self.pars_to_tq.items()}
else:
self.resolver = None
def check_device(self, device):
"""
check if a device can be initialized
Parameters
----------
device:
qiskit device or string valid for get_backend.
Returns
-------
None
"""
if device is None:
return
if isinstance(device, qiskit.providers.basebackend.BaseBackend):
return
elif isinstance(device, dict):
try:
qiskit_provider = device['provider']
d = device['name'].lower()
qiskit_provider.get_backend(name=d)
return
except:
raise TequilaQiskitException('dictionary initialization with device = {} failed.'.format(str(device)))
elif isinstance(device, str):
l = device.lower()
if 'fake_' in l:
qiskit_provider = qiskit.test.mock.FakeProvider()
else:
if device in [str(x).lower() for x in qiskit.Aer.backends()] + [qiskit.Aer.backends()]:
qiskit_provider = qiskit.Aer
else:
if qiskit.IBMQ.active_account() is None:
qiskit.IBMQ.load_account()
qiskit_provider = qiskit.IBMQ.get_provider()
qiskit_provider.get_backend(name=device)
return
else:
raise TequilaQiskitException(
'received device {} of unrecognized type {}; only None, strings, dicts, and qiskit backends allowed'.format(
str(device), type(device)))
def retrieve_device(self, device):
"""
Attempt to retrieve an instantiated qiskit device object for use in sampling.
Parameters
----------
device:
qiskit device, or information that can be used to instantiate one.
Returns
-------
type
type is variable. Returns qiskit backend object.
"""
if device is None:
return device
if isinstance(device, qiskit.providers.basebackend.BaseBackend):
return device
elif isinstance(device, dict):
qiskit_provider = device['provider']
d = device['name'].lower()
return qiskit_provider.get_backend(name=d)
elif isinstance(device, str):
device = device.lower()
if device in [str(x).lower() for x in qiskit.Aer.backends()] + [qiskit.Aer.backends()]:
return qiskit.Aer.get_backend(device)
else:
if 'fake_' in device:
try:
return qiskit.test.mock.FakeProvider().get_backend(device)
except:
raise TequilaQiskitException('Unable to retrieve fake device {}'.format(device))
else:
if qiskit.IBMQ.active_account() is None:
qiskit.IBMQ.load_account()
qiskit_provider = qiskit.IBMQ.get_provider()
return qiskit_provider.get_backend(name=device)
else:
raise TequilaQiskitException(
'received device {} of unrecognized type {}; only None, strings, dicts, and qiskit backends allowed'.format(
str(device), type(device)))
class BackendExpectationValueQiskit(BackendExpectationValue):
BackendCircuitType = BackendCircuitQiskit
| [
"qiskit.IBMQ.active_account",
"qiskit.Aer.get_backend",
"tequila.utils.keymap.KeyMapRegisterToSubregister",
"tequila.BitString.from_int",
"qiskit.test.mock.FakeProvider",
"qiskit.execute",
"tequila.BitStringLSB.from_binary",
"tequila.wavefunction.qubit_wavefunction.QubitWaveFunction",
"qiskit.IBMQ.l... | [((669, 728), 'qiskit.providers.aer.noise.pauli_error', 'qiskitnoise.pauli_error', ([], {'noise_ops': "[('X', p), ('I', 1 - p)]"}), "(noise_ops=[('X', p), ('I', 1 - p)])\n", (692, 728), True, 'import qiskit.providers.aer.noise as qiskitnoise\n'), ((951, 1010), 'qiskit.providers.aer.noise.pauli_error', 'qiskitnoise.pauli_error', ([], {'noise_ops': "[('Z', p), ('I', 1 - p)]"}), "(noise_ops=[('Z', p), ('I', 1 - p)])\n", (974, 1010), True, 'import qiskit.providers.aer.noise as qiskitnoise\n'), ((5260, 5297), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['n_qubits', '"""q"""'], {}), "(n_qubits, 'q')\n", (5282, 5297), False, 'import qiskit, numpy\n'), ((5316, 5355), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['n_qubits', '"""c"""'], {}), "(n_qubits, 'c')\n", (5340, 5355), False, 'import qiskit, numpy\n'), ((12667, 12686), 'tequila.wavefunction.qubit_wavefunction.QubitWaveFunction', 'QubitWaveFunction', ([], {}), '()\n', (12684, 12686), False, 'from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction\n'), ((13698, 13735), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['self.q', 'self.c'], {}), '(self.q, self.c)\n', (13719, 13735), False, 'import qiskit, numpy\n'), ((17094, 17129), 'qiskit.providers.aer.noise.NoiseModel', 'qiskitnoise.NoiseModel', (['basis_gates'], {}), '(basis_gates)\n', (17116, 17129), True, 'import qiskit.providers.aer.noise as qiskitnoise\n'), ((8588, 8627), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '[2 ** self.n_qubits]'}), '(shape=[2 ** self.n_qubits])\n', (8599, 8627), False, 'import qiskit, numpy\n'), ((13128, 13204), 'tequila.utils.keymap.KeyMapRegisterToSubregister', 'KeyMapRegisterToSubregister', ([], {'subregister': 'mapped_target', 'register': 'mapped_full'}), '(subregister=mapped_target, register=mapped_full)\n', (13155, 13204), False, 'from tequila.utils.keymap import KeyMapRegisterToSubregister\n'), ((8919, 9087), 'qiskit.execute', 'qiskit.execute', ([], {'experiments': 'self.circuit', 'optimization_level': 'optimization_level', 'backend': 'qiskit_backend', 'parameter_binds': '[self.resolver]', 'backend_options': 'opts'}), '(experiments=self.circuit, optimization_level=\n optimization_level, backend=qiskit_backend, parameter_binds=[self.\n resolver], backend_options=opts)\n', (8933, 9087), False, 'import qiskit, numpy\n'), ((10115, 10136), 'qiskit.Aer.backends', 'qiskit.Aer.backends', ([], {}), '()\n', (10134, 10136), False, 'import qiskit, numpy\n'), ((10239, 10437), 'qiskit.execute', 'qiskit.execute', (['circuit'], {'backend': 'qiskit_backend', 'shots': 'samples', 'basis_gates': 'full_basis', 'optimization_level': 'optimization_level', 'noise_model': 'self.noise_model', 'parameter_binds': '[self.resolver]'}), '(circuit, backend=qiskit_backend, shots=samples, basis_gates=\n full_basis, optimization_level=optimization_level, noise_model=self.\n noise_model, parameter_binds=[self.resolver])\n', (10253, 10437), False, 'import qiskit, numpy\n'), ((8669, 8731), 'tequila.BitString.from_int', 'BitString.from_int', ([], {'integer': 'initial_state', 'nbits': 'self.n_qubits'}), '(integer=initial_state, nbits=self.n_qubits)\n', (8687, 8731), False, 'from tequila import BitString, BitNumbering, BitStringLSB\n'), ((10886, 10937), 'qiskit.providers.aer.noise.NoiseModel.from_backend', 'qiskitnoise.NoiseModel.from_backend', (['qiskit_backend'], {}), '(qiskit_backend)\n', (10921, 10937), True, 'import qiskit.providers.aer.noise as qiskitnoise\n'), ((11917, 12055), 'qiskit.execute', 'qiskit.execute', (['circuit'], {'backend': 'qiskit_backend', 'shots': 'samples', 'optimization_level': 'optimization_level', 'parameter_binds': '[self.resolver]'}), '(circuit, backend=qiskit_backend, shots=samples,\n optimization_level=optimization_level, parameter_binds=[self.resolver])\n', (11931, 12055), False, 'import qiskit, numpy\n'), ((12826, 12860), 'tequila.BitStringLSB.from_binary', 'BitStringLSB.from_binary', ([], {'binary': 'k'}), '(binary=k)\n', (12850, 12860), False, 'from tequila import BitString, BitNumbering, BitStringLSB\n'), ((6216, 6264), 'qiskit.providers.aer.noise.NoiseModel.from_backend', 'qiskitnoise.NoiseModel.from_backend', (['self.device'], {}), '(self.device)\n', (6251, 6264), True, 'import qiskit.providers.aer.noise as qiskitnoise\n'), ((6321, 6393), 'tequila.TequilaException', 'TequilaException', (['"""cannot get device noise without specifying a device!"""'], {}), "('cannot get device noise without specifying a device!')\n", (6337, 6393), False, 'from tequila import TequilaException\n'), ((19880, 19911), 'qiskit.test.mock.FakeProvider', 'qiskit.test.mock.FakeProvider', ([], {}), '()\n', (19909, 19911), False, 'import qiskit, numpy\n'), ((21508, 21538), 'qiskit.Aer.get_backend', 'qiskit.Aer.get_backend', (['device'], {}), '(device)\n', (21530, 21538), False, 'import qiskit, numpy\n'), ((20255, 20281), 'qiskit.IBMQ.get_provider', 'qiskit.IBMQ.get_provider', ([], {}), '()\n', (20279, 20281), False, 'import qiskit, numpy\n'), ((22008, 22034), 'qiskit.IBMQ.get_provider', 'qiskit.IBMQ.get_provider', ([], {}), '()\n', (22032, 22034), False, 'import qiskit, numpy\n'), ((20128, 20156), 'qiskit.IBMQ.active_account', 'qiskit.IBMQ.active_account', ([], {}), '()\n', (20154, 20156), False, 'import qiskit, numpy\n'), ((20190, 20216), 'qiskit.IBMQ.load_account', 'qiskit.IBMQ.load_account', ([], {}), '()\n', (20214, 20216), False, 'import qiskit, numpy\n'), ((21461, 21482), 'qiskit.Aer.backends', 'qiskit.Aer.backends', ([], {}), '()\n', (21480, 21482), False, 'import qiskit, numpy\n'), ((21881, 21909), 'qiskit.IBMQ.active_account', 'qiskit.IBMQ.active_account', ([], {}), '()\n', (21907, 21909), False, 'import qiskit, numpy\n'), ((21943, 21969), 'qiskit.IBMQ.load_account', 'qiskit.IBMQ.load_account', ([], {}), '()\n', (21967, 21969), False, 'import qiskit, numpy\n'), ((20010, 20031), 'qiskit.Aer.backends', 'qiskit.Aer.backends', ([], {}), '()\n', (20029, 20031), False, 'import qiskit, numpy\n'), ((21435, 21456), 'qiskit.Aer.backends', 'qiskit.Aer.backends', ([], {}), '()\n', (21454, 21456), False, 'import qiskit, numpy\n'), ((19984, 20005), 'qiskit.Aer.backends', 'qiskit.Aer.backends', ([], {}), '()\n', (20003, 20005), False, 'import qiskit, numpy\n'), ((21651, 21682), 'qiskit.test.mock.FakeProvider', 'qiskit.test.mock.FakeProvider', ([], {}), '()\n', (21680, 21682), False, 'import qiskit, numpy\n')] |
import numpy as np
from onehot import onehot
from util import softmax, cosine
import time
class word2vec:
def __init__(self, size, skip_gram=3, n_gram=5):
self.__hidden_size = size
self.__n_gram = n_gram
self.__skip_gram = skip_gram
def __loss(self, y_pred, y_true):
"""
:y_pred is generated by the network
:y_true is the index of the label
"""
return -np.log(y_pred[y_true, 0])
def __loss_grad(self, y_pred, y_true):
tmp = y_pred.copy()
tmp[y_true, 0] -= 1
return np.mat(tmp)
def __V_grad(self, loss_grad, hidden_value):
return loss_grad * hidden_value
def __W_grad(self, loss_grad):
return self.__tmp_x_input * (loss_grad.transpose() * self.__V_weight)
def __input_to_hidden(self, input_index, sentence):
left = max(input_index - self.__n_gram, 0)
right = min(input_index + self.__n_gram, len(sentence) - 1)
hidden_value = np.mat(np.zeros((1, self.__hidden_size)))
self.__C = right - left
self.__tmp_x_input = np.mat(np.zeros((self.__voca_size, 1)))
for index in range(left, right + 1):
if index != input_index:
i = self.__voca.get_index_of_word(sentence[index])
self.__tmp_x_input[i, 0] = 1
hidden_value += self.__W_weight[i]
return hidden_value / (right - left)
def __hidden_to_output(self, hidden_value):
return softmax(self.__V_weight * hidden_value.transpose())
def train(self, sentences, lr=0.01):
if type(sentences) != list:
print('Error type of sentences.')
return
self.__voca = onehot(sentences)
self.__voca_size = self.__voca.get_voca_size()
self.__W_weight = np.mat(np.random.rand(self.__voca_size, self.__hidden_size))
self.__V_weight = np.mat(np.random.rand(self.__voca_size, self.__hidden_size))
cnt = 0
max_len = len(sentences)
cnt_time = 0
for sentence in sentences:
time_start=time.time()
for index in range(0, len(sentence)):
hidden_value = self.__input_to_hidden(index, sentence)
output_value = self.__hidden_to_output(hidden_value)
loss_grad = self.__loss_grad(output_value, self.__voca.get_index_of_word(sentence[index]))
W_grad = self.__W_grad(loss_grad)
V_grad = self.__V_grad(loss_grad, hidden_value)
self.__W_weight += lr * W_grad
self.__V_weight += lr * V_grad
time_end=time.time()
print('A sentence time cost',time_end-time_start,'s')
cnt_time += time_end - time_start
cnt += 1
if cnt % 100 == 0:
loss = self.__loss(output_value, self.__voca.get_index_of_word(sentence[index]))
print('{} / {} , loss is {}.'.format(cnt, max_len, loss))
print('Avg time is {}'.format(cnt_time / max_len))
def calc_similarity(self, w1, w2):
index1 = self.__voca.get_index_of_word(w1)
index2 = self.__voca.get_index_of_word(w2)
return cosine(self.__W_weight[index1], self.__W_weight[index2])
| [
"numpy.log",
"util.cosine",
"numpy.zeros",
"time.time",
"onehot.onehot",
"numpy.random.rand",
"numpy.mat"
] | [((587, 598), 'numpy.mat', 'np.mat', (['tmp'], {}), '(tmp)\n', (593, 598), True, 'import numpy as np\n'), ((1748, 1765), 'onehot.onehot', 'onehot', (['sentences'], {}), '(sentences)\n', (1754, 1765), False, 'from onehot import onehot\n'), ((3253, 3309), 'util.cosine', 'cosine', (['self.__W_weight[index1]', 'self.__W_weight[index2]'], {}), '(self.__W_weight[index1], self.__W_weight[index2])\n', (3259, 3309), False, 'from util import softmax, cosine\n'), ((441, 466), 'numpy.log', 'np.log', (['y_pred[y_true, 0]'], {}), '(y_pred[y_true, 0])\n', (447, 466), True, 'import numpy as np\n'), ((1024, 1057), 'numpy.zeros', 'np.zeros', (['(1, self.__hidden_size)'], {}), '((1, self.__hidden_size))\n', (1032, 1057), True, 'import numpy as np\n'), ((1129, 1160), 'numpy.zeros', 'np.zeros', (['(self.__voca_size, 1)'], {}), '((self.__voca_size, 1))\n', (1137, 1160), True, 'import numpy as np\n'), ((1856, 1908), 'numpy.random.rand', 'np.random.rand', (['self.__voca_size', 'self.__hidden_size'], {}), '(self.__voca_size, self.__hidden_size)\n', (1870, 1908), True, 'import numpy as np\n'), ((1944, 1996), 'numpy.random.rand', 'np.random.rand', (['self.__voca_size', 'self.__hidden_size'], {}), '(self.__voca_size, self.__hidden_size)\n', (1958, 1996), True, 'import numpy as np\n'), ((2131, 2142), 'time.time', 'time.time', ([], {}), '()\n', (2140, 2142), False, 'import time\n'), ((2678, 2689), 'time.time', 'time.time', ([], {}), '()\n', (2687, 2689), False, 'import time\n')] |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under NVIDIA Simple Streamer License
import multiprocessing as mp
import numpy as np
import time
import socket
import cv2
import sys
if sys.version_info[0] < 3:
raise Exception("Only Python 3 supported")
def adjust(quality, bandwidth, rate, lower, upper):
if bandwidth < rate:
return min(quality+1, upper)
else:
return max(quality-1, lower)
class Timer:
def __init__(self, alpha=0.1, epsilon=1E-6):
self.fps = 0.0
self.bnw = 0.0
self.alpha = alpha
self.ying = time.time()
self.epsilon = epsilon
def update(self, batch_size=1, bandwidth=None):
# compute inverse time interval
self.yang = time.time()
self.rtau = batch_size/max(self.yang-self.ying, self.epsilon)
self.ying = self.yang
# update fps and bandwidth estimate
self.fps += self.alpha*(self.rtau-self.fps)
if bandwidth:
self.bnw += self.alpha*(bandwidth*self.rtau-self.bnw)
return (self.fps, self.bnw) if bandwidth else self.fps
class WebCamServer:
def __init__(self, host=None, port=None, info=None, maxQ=None, chnk=None, ncon=None):
self.host = host if host else ''
self.port = port if port else 8089
self.ncon = ncon if ncon else 1
self.chnk = chnk if chnk else 2**12
self.maxQ = maxQ if maxQ else 16
self.info = info if info else False
self.skip = 8
self.queue = mp.Queue(self.maxQ)
self.cache = None
self.worker = mp.Process(target=self.__listen__, args=(self.queue,))
self.worker.start()
def __listen__(self, queue):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((self.host, self.port))
server.listen(self.ncon)
print("webcam server: waiting for connections")
client, address = server.accept()
print("webcam server: listening to client %s" % str(address))
data = b''
timer = Timer()
while True:
while len(data) < self.skip:
data += client.recv(self.chnk)
packed_msg_size = data[:self.skip]
data = data[self.skip:]
msg_size = np.frombuffer(packed_msg_size, dtype=">u8")[0]
while len(data) < msg_size:
data += client.recv(self.chnk)
frame_data = data[:msg_size]
data = data[msg_size:]
try:
frame = np.fromstring(frame_data, np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
if not queue.full():
queue.put_nowait(frame)
elif self.info:
print("webcam server: skipped frame (queue full)")
except:
if self.info:
print("webcam server: skipped frame (jpeg decode error)")
continue
# estimate moving frame rate and bandwidth statistics
if self.info:
fps, bw = timer.update(bandwidth=len(frame_data))
# useful stats
if self.info:
print("webcam server reading data: %2.2f FPS \t %2.2f MiB/s" % (fps, bw/2**20))
def __del__(self):
self.worker.terminate()
def read_nowait(self):
try:
success = True
self.cache = self.queue.get_nowait()
except:
success = False
self.cache = self.queue.get() if self.cache is None else self.cache
return success, self.cache
def read_wait(self):
return True, self.queue.get()
class StreamServer:
def __init__(self, host=None, port=None, info=None, maxQ=None, ncon=None, qual=None, MBps=None):
self.host = host if host else ''
self.port = port if port else 8090
self.ncon = ncon if ncon else 1
self.maxQ = maxQ if maxQ else 16
self.info = info if info else False
self.qual = qual if qual else 95
self.rate = MBps*2**20 if MBps else float("infinity")
self.cache = None
self.queue = mp.Queue(self.maxQ)
self.worker = mp.Process(target=self.__listen__, args=(self.queue,))
self.worker.start()
def __listen__(self, queue):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((self.host, self.port))
server.listen(self.ncon)
print("stream server: waiting for connections ")
client, address = server.accept()
print("stream server: listening to client %s" % str(address))
timer = Timer()
while True:
# read a frame from the queue
frame = self.queue.get()
# encode to jpeg with potentially dynamic quality
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), self.qual]
success, a_numpy = cv2.imencode('.jpg', frame, encode_param)
data = a_numpy.tostring()
# send the jpeg frame over the network
message_size = np.array([len(data)], dtype=">u8").tostring()
try:
client.sendall(message_size + data)
except:
print("stream server: frame skipped (send failed)")
continue
# estimate moving frame rate and bandwidth statistics
if self.info or self.rate:
fps, bw = timer.update(bandwidth=len(data))
# useful stats
if self.info:
print("stream server sending data: %2.2f FPS \t \
%2.2f/%2.2f MiB/s \t %d Quality" %
(fps, bw/2**20, self.rate/2**20, self.qual) )
# adjust jpeg encoding quality if fixed bandwidth specified
if self.rate:
self.qual = adjust(self.qual, bw, self.rate, 10, 100)
def __del__(self):
self.worker.terminate()
def write_nowait(self, frame):
try:
self.queue.put_nowait(frame)
except:
if self.info:
print("stream server: frame skipped (queue full)")
def write_wait(self, frame):
self.queue.put(frame)
class StreamClient :
def __init__(self, host=None, port=None, info=None, chnk=None):
self.host = host if host else "localhost"
self.port = port if port else 8090
self.info = info if info else True
self.chnk = chnk if chnk else 2**12
self.skip = 8
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as client:
client.connect((self.host, self.port))
data = b''
# frame rate and bandwidth estimates, decay rate for EMA and time
timer = Timer()
while True:
while len(data) < self.skip:
data += client.recv(self.chnk)
packed_msg_size = data[:self.skip]
data = data[self.skip:]
msg_size = np.frombuffer(packed_msg_size, dtype=">u8")[0]
while len(data) < msg_size:
data += client.recv(self.chnk)
frame_data = data[:msg_size]
data = data[msg_size:]
try:
frame = np.fromstring(frame_data, np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
cv2.imshow('frame', frame)
cv2.waitKey(1)
except:
if self.info:
print("stream client: skipped frame (jpeg decode error)")
continue
# estimate moving frame rate and bandwidth statistics
if self.info or self.rate:
fps, bw = timer.update(bandwidth=len(frame_data))
# useful stats
if self.info:
print("stream client receiving data: %2.2f FPS \t %2.2f MiB/s" % (fps, bw/2**20))
class WebCamClient :
def __init__(self, host=None, port=None, cam=None, qual=None, info=None, MBps=None, wcfg=None):
self.host = host if host else "localhost"
self.port = port if port else 8089
self.cam = cam if cam else 0
self.qual = qual if qual else 95
self.info = info if info else True
self.wcfg = wcfg if wcfg else (640, 480, 30)
self.rate = MBps*2**20 if MBps else float("infinity")
self.capture = cv2.VideoCapture(self.cam)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.wcfg[0])
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,self.wcfg[1])
self.capture.set(cv2.CAP_PROP_FPS, self.wcfg[2])
self.mfps = self.capture.get(cv2.CAP_PROP_FPS)
print(self.capture.get(cv2.CAP_PROP_FPS))
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as client:
client.connect((self.host, self.port))
# frame rate and bandwidth estimates, decay rate for EMA and time
timer = Timer()
while True:
# read a frame from the webcam
success, frame = self.capture.read()
# encode to jpeg with potentially dynamic quality
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), self.qual]
success, a_numpy = cv2.imencode('.jpg', frame, encode_param)
data = a_numpy.tostring()
# send the jpeg frame over the network
message_size = np.array([len(data)], dtype=">u8").tostring()
try:
client.sendall(message_size + data)
except:
if self.info:
print("webcam client: frame skipped (send failed)")
continue
# estimate moving frame rate and bandwidth statistics
if self.info or self.rate:
fps, bw = timer.update(bandwidth=len(data))
# useful stats
if self.info:
print("webcam client sending data: %2.2f/%2.2f FPS \t %2.2f/%2.2f MiB/s \t %d Quality" %
(fps, self.mfps, bw/2**20, self.rate/2**20, self.qual))
# adjust jpeg encoding quality if fixed bandwidth specified
if self.rate:
self.qual = adjust(self.qual, bw, self.rate, 10, 100) | [
"cv2.waitKey",
"numpy.frombuffer",
"socket.socket",
"cv2.imdecode",
"time.time",
"cv2.VideoCapture",
"multiprocessing.Queue",
"cv2.imencode",
"multiprocessing.Process",
"cv2.imshow",
"numpy.fromstring"
] | [((651, 662), 'time.time', 'time.time', ([], {}), '()\n', (660, 662), False, 'import time\n'), ((808, 819), 'time.time', 'time.time', ([], {}), '()\n', (817, 819), False, 'import time\n'), ((1595, 1614), 'multiprocessing.Queue', 'mp.Queue', (['self.maxQ'], {}), '(self.maxQ)\n', (1603, 1614), True, 'import multiprocessing as mp\n'), ((1663, 1717), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.__listen__', 'args': '(self.queue,)'}), '(target=self.__listen__, args=(self.queue,))\n', (1673, 1717), True, 'import multiprocessing as mp\n'), ((4512, 4531), 'multiprocessing.Queue', 'mp.Queue', (['self.maxQ'], {}), '(self.maxQ)\n', (4520, 4531), True, 'import multiprocessing as mp\n'), ((4554, 4608), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.__listen__', 'args': '(self.queue,)'}), '(target=self.__listen__, args=(self.queue,))\n', (4564, 4608), True, 'import multiprocessing as mp\n'), ((9221, 9247), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.cam'], {}), '(self.cam)\n', (9237, 9247), False, 'import cv2\n'), ((1794, 1843), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1807, 1843), False, 'import socket\n'), ((4694, 4743), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4707, 4743), False, 'import socket\n'), ((7230, 7279), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (7243, 7279), False, 'import socket\n'), ((9578, 9627), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (9591, 9627), False, 'import socket\n'), ((5426, 5467), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame', 'encode_param'], {}), "('.jpg', frame, encode_param)\n", (5438, 5467), False, 'import cv2\n'), ((10098, 10139), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame', 'encode_param'], {}), "('.jpg', frame, encode_param)\n", (10110, 10139), False, 'import cv2\n'), ((2490, 2533), 'numpy.frombuffer', 'np.frombuffer', (['packed_msg_size'], {'dtype': '""">u8"""'}), "(packed_msg_size, dtype='>u8')\n", (2503, 2533), True, 'import numpy as np\n'), ((2784, 2819), 'numpy.fromstring', 'np.fromstring', (['frame_data', 'np.uint8'], {}), '(frame_data, np.uint8)\n', (2797, 2819), True, 'import numpy as np\n'), ((2848, 2885), 'cv2.imdecode', 'cv2.imdecode', (['frame', 'cv2.IMREAD_COLOR'], {}), '(frame, cv2.IMREAD_COLOR)\n', (2860, 2885), False, 'import cv2\n'), ((7730, 7773), 'numpy.frombuffer', 'np.frombuffer', (['packed_msg_size'], {'dtype': '""">u8"""'}), "(packed_msg_size, dtype='>u8')\n", (7743, 7773), True, 'import numpy as np\n'), ((8024, 8059), 'numpy.fromstring', 'np.fromstring', (['frame_data', 'np.uint8'], {}), '(frame_data, np.uint8)\n', (8037, 8059), True, 'import numpy as np\n'), ((8088, 8125), 'cv2.imdecode', 'cv2.imdecode', (['frame', 'cv2.IMREAD_COLOR'], {}), '(frame, cv2.IMREAD_COLOR)\n', (8100, 8125), False, 'import cv2\n'), ((8146, 8172), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (8156, 8172), False, 'import cv2\n'), ((8193, 8207), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8204, 8207), False, 'import cv2\n')] |
import os
import sys
import numpy
import clint
import pickle
import zipfile
import requests
import subprocess
import matplotlib
matplotlib.use('Agg') #don't use X backend so headless servers don't die
from matplotlib import pyplot
def pickleload(filename):
"""
Does what it says. Loads a pickle (and returns it)
"""
with open(filename, 'rb') as f:
results_dict = pickle.load(f, encoding='latin1')
return results_dict
def compile_dict_results(files):
"""
Compiles a dict of lists of like results given a list of
dictionaries containing results
Inputs
------
files: list of strings
strings of filenames of input dicts
Returns
-------
compiled_results: dict of lists
dictionary where keyname is same as input dicts but corresponding
value is a list containing all results that have the same key
"""
compiled_results = {}
for filename in files:
results_dict = pickleload(filename)
for k, v in results_dict.items():
if k in compiled_results.keys():
compiled_results[k].append(v)
else:
compiled_results[k] = [v]
return compiled_results
def richardson_extrapolation(compiled_results):
"""
Performs an estimate of the exact solution using
Richardson extrapolation, given by
f_ex = (f_1 * f_3 - f_2^2) / (f_3 - 2*f_2+f_1)
where f_1 is a result from the finest grid and f_3 is from the coarsest.
The grids f_1, f_2, f_3 should have the same refinement ratio (e.g. 2 -> 4 -> 8)
"""
try:
esolv = compiled_results['E_solv_kJ']
except KeyError:
print('No results found for solvation energy. \n'
'Something has gone wrong.')
sys.exit()
f1 = esolv[5] # assuming 6 runs: 1, 2, 4, 8, 12, 16
f2 = esolv[3]
f3 = esolv[2]
return (f1 * f3 - f2**2) / (f3 - 2 * f2 + f1)
def generate_plot(compiled_results, filetype='pdf', repro=None):
"""
Generates a plot with some hard-coded info based on APBS runs
"""
res = compiled_results
N_fem = numpy.array([97*65*97, 129*97*129, 161*129*161, 257*161*257, 385*257*385, 449*385*449,513*449*513])
Vfem = 50.*40.*50.
Lfem = (Vfem/N_fem)**(1/3.)
Lfem_aux = numpy.array([[0.521,0.625,0.521],[0.391,0.417,0.391],
[0.312,0.312,0.312],[0.195,0.250,0.195],
[0.130,0.156,0.130],[0.112,0.104,0.112],
[0.098,0.089,0.098]])
timeAPB = numpy.array([4.3,8.6,17.7,54,161,352,768])
EsolvAPB = numpy.array([-2237,-2172.9,-2142,-2121.5,-2102.6,-2093.7,-2090.7])
apb_ext = -2070.47
pyg_ext = richardson_extrapolation(res)
font = {'family':'serif', 'size':7}
pyplot.figure(figsize=(3, 2), dpi=80)
pyplot.rc('font', **font)
#calc plot extremas for plotting
xmax = N_fem[-1]*5
xmin = res['total_elements'][0] / 1.5
pyplot.semilogx(res['total_elements'], res['E_solv_kJ'],
c='k', marker='o', mfc='w', ms=3, ls='-', lw=0.5, label='PyGBe')
pyplot.semilogx(N_fem, EsolvAPB,
c='k', marker='^', mfc='w', ms=3, ls='-', lw=0.5, label='APBS')
pyplot.semilogx([xmin,xmax],pyg_ext*numpy.ones(2), c='k', marker='', mfc='w', ms=1, ls='dotted',lw=0.2)
pyplot.semilogx([xmin,xmax],apb_ext*numpy.ones(2), c='k', marker='', mfc='w', ms=1, ls='dotted',lw=0.2)
pyplot.ylabel('$\Delta G_{solv}$ [kJ/mol]', fontsize=10)
pyplot.xlabel('N',fontsize=10)
pyplot.text(5e5, pyg_ext - 25,'PyGBe extrap.',fontsize=6,rotation=0)
pyplot.text(1e7, -2067,'APBS extrap.',fontsize=6,rotation=0)
pyplot.subplots_adjust(left=0.22, bottom=0.21, right=0.96, top=0.95)
pyplot.axis([xmin,xmax,-2450,-2040])
pyplot.legend(loc='lower right')
if repro:
fname = 'Esolv_lys_K40repro.{}'.format(filetype)
else:
fname = 'Esolv_lys.{}'.format(filetype)
print('Writing figure to "{}"'.format(fname))
pyplot.savefig(fname)
pygbe_err = numpy.abs(numpy.array(res['E_solv_kJ']) - pyg_ext) / numpy.abs(pyg_ext)
apb_err = numpy.abs(EsolvAPB - apb_ext) / numpy.abs(apb_ext)
pyplot.figure(figsize=(3, 2), dpi=80)
pyplot.loglog(pygbe_err, res['total_time'],
c='k', marker='o', mfc='w', ms=5,
ls='-',lw=0.5, label='PyGBe')
pyplot.loglog(apb_err, timeAPB,
c='k', marker='^', mfc='w', ms=5,
ls='-',lw=0.5, label='APBS')
pyplot.subplots_adjust(left=0.19, bottom=0.21, right=0.96, top=0.95)
pyplot.ylabel('Time to solution [s]',fontsize=10)
pyplot.xlabel('Error',fontsize=10)
pyplot.legend(loc='lower left')
if repro:
fname = 'time_lys_K40repro.{}'.format(filetype)
else:
fname = 'time_lys.{}'.format(filetype)
print('Writing figure to "{}"'.format(fname))
pyplot.savefig(fname)
pyplot.figure(figsize=(3,2), dpi=80)
pyplot.loglog(res['total_elements'], res['total_time'],
c='k', marker='o', mfc='w', ms=5,
ls='-',lw=0.5)
pyplot.xlabel('Number of elements', fontsize=10)
pyplot.ylabel('Time to solution [s]', fontsize=10)
pyplot.subplots_adjust(left=0.19, bottom=0.21, right=0.96, top=0.95)
if repro:
fname = 'time_v_N_lys_K40repro.{}'.format(filetype)
else:
fname = 'time_v_N_lys.{}'.format(filetype)
print('Writing figure to "{}"'.format(fname))
pyplot.savefig(fname)
def check_mesh():
"""
Check if there is a geometry folder already present in the current location.
If not, download the mesh files from Zenodo.
"""
if not os.path.isdir('geometry'):
dl_check = input('The meshes for the performance check don\'t appear '
'to be loaded. Would you like to download them from '
'Zenodo? (~11MB) (y/n): ')
if dl_check == 'y':
mesh_file = 'https://zenodo.org/record/58308/files/lysozyme_meshes.zip'
download_zip_with_progress_bar(mesh_file)
unzip(mesh_file.split('/')[-1])
print('Done!')
def download_zip_with_progress_bar(url):
r = requests.get(url, stream=True)
path = url.rsplit('/', 1)[-1]
with open(path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in clint.textui.progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
def unzip(meshzip):
with zipfile.ZipFile(meshzip, 'r') as myzip:
myzip.extractall(path='.')
print('Unzipping meshes ...')
print('Removing zip file...')
os.remove(meshzip)
def repro_fig():
"""
Reproduce figure for latest performance report
"""
try:
os.mkdir('results_K40')
except OSError:
pass
if [a for a in os.listdir('results_K40') if 'pickle' in a]:
run_check_yn = input('\n\n\n'
'You are about to reproduce the figure that shows the '
'results reported in the Jupyter notebook located in this '
'directory. '
'Do you want to reproduce it? If you select "no" '
'you will be asked if you want to re-run the tests '
'again using your hardware. '
'If you select "yes" you will proceed to reproduce the '
'figure (yes/no): ')
if run_check_yn in ['No', 'no', 'n']:
return
elif run_check_yn in ['Yes', 'yes', 'y']:
files = [os.path.join('results_K40', a)
for a in os.listdir('results_K40') if 'pickle' in a]
files.sort()
compiled_results = compile_dict_results(files)
generate_plot(compiled_results, filetype='pdf', repro=True)
continue_check_yn = input('\n\n\n''Do you want to re-run the test with your '
'local hardware? (yes/no): ')
if continue_check_yn in ['No', 'no', 'n']:
sys.exit()
elif continue_check_yn in ['Yes', 'yes', 'y']:
return
else:
print('Didn\'t understand your response, exiting')
sys.exit()
def run_check():
"""
Use subprocess to run through the 6 cases to generate results
"""
try:
os.mkdir('output')
except OSError:
pass
if [a for a in os.listdir('output') if 'pickle' in a]:
run_check_yn = input('\n\n\n'
'There are already results in your output directory. '
'Do you want to re-run the tests? If you select "no" '
'then the plotting routine will still run. If you select '
'"yes", note that this script is not smart enough to '
'distinguish between "old" and "new" runs and will just '
'jam everything together into one figure (yes/no): ')
if run_check_yn in ['No', 'no', 'n']:
return
elif run_check_yn in ['Yes', 'yes', 'y']:
run_lysozome()
else:
print('Didn\'t understand your response, exiting')
sys.exit()
else:
run_lysozome()
def run_lysozome():
conf_files = ['lys.config', 'lys2.config', 'lys4.config',
'lys8.config', 'lys12.config', 'lys16.config']
for conf in conf_files:
subprocess.call(['pygbe', '-c', conf, '-p', 'lys.param', '.'])
def main():
run_yn = input('This will run 6 lysozyme cases in order to generate '
'results necessary to generate a few figures. It '
'takes around 10 minutes to run on a Tesla K40 '
'and also time to download meshes from Zenodo (~11MB). '
'Type "y" or some variant of yes to accept this: ')
if run_yn in ['Yes', 'yes', 'y', 'Y']:
#ask if user want to reproduce fig in notebook
repro_fig()
#check that meshes are present
check_mesh()
#run the lysozome problems
run_check()
files = [os.path.join('output', a)
for a in os.listdir('output') if 'pickle' in a]
files.sort()
compiled_results = compile_dict_results(files)
generate_plot(compiled_results, filetype='pdf')
return compiled_results
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.loglog",
"os.mkdir",
"os.remove",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.figure",
"pickle.load",
"os.path.join",
"matplotlib.pyplot.rc",
"requests.get",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.text",
"matplotlib.use",
"subprocess.call",
"matplotlib.pyplot... | [((129, 150), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (143, 150), False, 'import matplotlib\n'), ((2140, 2272), 'numpy.array', 'numpy.array', (['[97 * 65 * 97, 129 * 97 * 129, 161 * 129 * 161, 257 * 161 * 257, 385 * 257 *\n 385, 449 * 385 * 449, 513 * 449 * 513]'], {}), '([97 * 65 * 97, 129 * 97 * 129, 161 * 129 * 161, 257 * 161 * 257,\n 385 * 257 * 385, 449 * 385 * 449, 513 * 449 * 513])\n', (2151, 2272), False, 'import numpy\n'), ((2310, 2491), 'numpy.array', 'numpy.array', (['[[0.521, 0.625, 0.521], [0.391, 0.417, 0.391], [0.312, 0.312, 0.312], [\n 0.195, 0.25, 0.195], [0.13, 0.156, 0.13], [0.112, 0.104, 0.112], [0.098,\n 0.089, 0.098]]'], {}), '([[0.521, 0.625, 0.521], [0.391, 0.417, 0.391], [0.312, 0.312, \n 0.312], [0.195, 0.25, 0.195], [0.13, 0.156, 0.13], [0.112, 0.104, 0.112\n ], [0.098, 0.089, 0.098]])\n', (2321, 2491), False, 'import numpy\n'), ((2566, 2614), 'numpy.array', 'numpy.array', (['[4.3, 8.6, 17.7, 54, 161, 352, 768]'], {}), '([4.3, 8.6, 17.7, 54, 161, 352, 768])\n', (2577, 2614), False, 'import numpy\n'), ((2624, 2696), 'numpy.array', 'numpy.array', (['[-2237, -2172.9, -2142, -2121.5, -2102.6, -2093.7, -2090.7]'], {}), '([-2237, -2172.9, -2142, -2121.5, -2102.6, -2093.7, -2090.7])\n', (2635, 2696), False, 'import numpy\n'), ((2804, 2841), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 2)', 'dpi': '(80)'}), '(figsize=(3, 2), dpi=80)\n', (2817, 2841), False, 'from matplotlib import pyplot\n'), ((2846, 2871), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {}), "('font', **font)\n", (2855, 2871), False, 'from matplotlib import pyplot\n'), ((2980, 3105), 'matplotlib.pyplot.semilogx', 'pyplot.semilogx', (["res['total_elements']", "res['E_solv_kJ']"], {'c': '"""k"""', 'marker': '"""o"""', 'mfc': '"""w"""', 'ms': '(3)', 'ls': '"""-"""', 'lw': '(0.5)', 'label': '"""PyGBe"""'}), "(res['total_elements'], res['E_solv_kJ'], c='k', marker='o',\n mfc='w', ms=3, ls='-', lw=0.5, label='PyGBe')\n", (2995, 3105), False, 'from matplotlib import pyplot\n'), ((3126, 3226), 'matplotlib.pyplot.semilogx', 'pyplot.semilogx', (['N_fem', 'EsolvAPB'], {'c': '"""k"""', 'marker': '"""^"""', 'mfc': '"""w"""', 'ms': '(3)', 'ls': '"""-"""', 'lw': '(0.5)', 'label': '"""APBS"""'}), "(N_fem, EsolvAPB, c='k', marker='^', mfc='w', ms=3, ls='-',\n lw=0.5, label='APBS')\n", (3141, 3226), False, 'from matplotlib import pyplot\n'), ((3464, 3521), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""$\\\\Delta G_{solv}$ [kJ/mol]"""'], {'fontsize': '(10)'}), "('$\\\\Delta G_{solv}$ [kJ/mol]', fontsize=10)\n", (3477, 3521), False, 'from matplotlib import pyplot\n'), ((3525, 3556), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""N"""'], {'fontsize': '(10)'}), "('N', fontsize=10)\n", (3538, 3556), False, 'from matplotlib import pyplot\n'), ((3560, 3636), 'matplotlib.pyplot.text', 'pyplot.text', (['(500000.0)', '(pyg_ext - 25)', '"""PyGBe extrap."""'], {'fontsize': '(6)', 'rotation': '(0)'}), "(500000.0, pyg_ext - 25, 'PyGBe extrap.', fontsize=6, rotation=0)\n", (3571, 3636), False, 'from matplotlib import pyplot\n'), ((3633, 3703), 'matplotlib.pyplot.text', 'pyplot.text', (['(10000000.0)', '(-2067)', '"""APBS extrap."""'], {'fontsize': '(6)', 'rotation': '(0)'}), "(10000000.0, -2067, 'APBS extrap.', fontsize=6, rotation=0)\n", (3644, 3703), False, 'from matplotlib import pyplot\n'), ((3698, 3766), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'left': '(0.22)', 'bottom': '(0.21)', 'right': '(0.96)', 'top': '(0.95)'}), '(left=0.22, bottom=0.21, right=0.96, top=0.95)\n', (3720, 3766), False, 'from matplotlib import pyplot\n'), ((3771, 3810), 'matplotlib.pyplot.axis', 'pyplot.axis', (['[xmin, xmax, -2450, -2040]'], {}), '([xmin, xmax, -2450, -2040])\n', (3782, 3810), False, 'from matplotlib import pyplot\n'), ((3812, 3844), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3825, 3844), False, 'from matplotlib import pyplot\n'), ((4029, 4050), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['fname'], {}), '(fname)\n', (4043, 4050), False, 'from matplotlib import pyplot\n'), ((4211, 4248), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 2)', 'dpi': '(80)'}), '(figsize=(3, 2), dpi=80)\n', (4224, 4248), False, 'from matplotlib import pyplot\n'), ((4253, 4366), 'matplotlib.pyplot.loglog', 'pyplot.loglog', (['pygbe_err', "res['total_time']"], {'c': '"""k"""', 'marker': '"""o"""', 'mfc': '"""w"""', 'ms': '(5)', 'ls': '"""-"""', 'lw': '(0.5)', 'label': '"""PyGBe"""'}), "(pygbe_err, res['total_time'], c='k', marker='o', mfc='w', ms=\n 5, ls='-', lw=0.5, label='PyGBe')\n", (4266, 4366), False, 'from matplotlib import pyplot\n'), ((4401, 4500), 'matplotlib.pyplot.loglog', 'pyplot.loglog', (['apb_err', 'timeAPB'], {'c': '"""k"""', 'marker': '"""^"""', 'mfc': '"""w"""', 'ms': '(5)', 'ls': '"""-"""', 'lw': '(0.5)', 'label': '"""APBS"""'}), "(apb_err, timeAPB, c='k', marker='^', mfc='w', ms=5, ls='-',\n lw=0.5, label='APBS')\n", (4414, 4500), False, 'from matplotlib import pyplot\n'), ((4536, 4604), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'left': '(0.19)', 'bottom': '(0.21)', 'right': '(0.96)', 'top': '(0.95)'}), '(left=0.19, bottom=0.21, right=0.96, top=0.95)\n', (4558, 4604), False, 'from matplotlib import pyplot\n'), ((4609, 4659), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Time to solution [s]"""'], {'fontsize': '(10)'}), "('Time to solution [s]', fontsize=10)\n", (4622, 4659), False, 'from matplotlib import pyplot\n'), ((4663, 4698), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Error"""'], {'fontsize': '(10)'}), "('Error', fontsize=10)\n", (4676, 4698), False, 'from matplotlib import pyplot\n'), ((4702, 4733), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (4715, 4733), False, 'from matplotlib import pyplot\n'), ((4917, 4938), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['fname'], {}), '(fname)\n', (4931, 4938), False, 'from matplotlib import pyplot\n'), ((4944, 4981), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 2)', 'dpi': '(80)'}), '(figsize=(3, 2), dpi=80)\n', (4957, 4981), False, 'from matplotlib import pyplot\n'), ((4985, 5094), 'matplotlib.pyplot.loglog', 'pyplot.loglog', (["res['total_elements']", "res['total_time']"], {'c': '"""k"""', 'marker': '"""o"""', 'mfc': '"""w"""', 'ms': '(5)', 'ls': '"""-"""', 'lw': '(0.5)'}), "(res['total_elements'], res['total_time'], c='k', marker='o',\n mfc='w', ms=5, ls='-', lw=0.5)\n", (4998, 5094), False, 'from matplotlib import pyplot\n'), ((5130, 5178), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Number of elements"""'], {'fontsize': '(10)'}), "('Number of elements', fontsize=10)\n", (5143, 5178), False, 'from matplotlib import pyplot\n'), ((5183, 5233), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Time to solution [s]"""'], {'fontsize': '(10)'}), "('Time to solution [s]', fontsize=10)\n", (5196, 5233), False, 'from matplotlib import pyplot\n'), ((5238, 5306), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'left': '(0.19)', 'bottom': '(0.21)', 'right': '(0.96)', 'top': '(0.95)'}), '(left=0.19, bottom=0.21, right=0.96, top=0.95)\n', (5260, 5306), False, 'from matplotlib import pyplot\n'), ((5502, 5523), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['fname'], {}), '(fname)\n', (5516, 5523), False, 'from matplotlib import pyplot\n'), ((6231, 6261), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (6243, 6261), False, 'import requests\n'), ((6766, 6784), 'os.remove', 'os.remove', (['meshzip'], {}), '(meshzip)\n', (6775, 6784), False, 'import os\n'), ((392, 425), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (403, 425), False, 'import pickle\n'), ((4122, 4140), 'numpy.abs', 'numpy.abs', (['pyg_ext'], {}), '(pyg_ext)\n', (4131, 4140), False, 'import numpy\n'), ((4155, 4184), 'numpy.abs', 'numpy.abs', (['(EsolvAPB - apb_ext)'], {}), '(EsolvAPB - apb_ext)\n', (4164, 4184), False, 'import numpy\n'), ((4187, 4205), 'numpy.abs', 'numpy.abs', (['apb_ext'], {}), '(apb_ext)\n', (4196, 4205), False, 'import numpy\n'), ((5701, 5726), 'os.path.isdir', 'os.path.isdir', (['"""geometry"""'], {}), "('geometry')\n", (5714, 5726), False, 'import os\n'), ((6618, 6647), 'zipfile.ZipFile', 'zipfile.ZipFile', (['meshzip', '"""r"""'], {}), "(meshzip, 'r')\n", (6633, 6647), False, 'import zipfile\n'), ((6888, 6911), 'os.mkdir', 'os.mkdir', (['"""results_K40"""'], {}), "('results_K40')\n", (6896, 6911), False, 'import os\n'), ((8566, 8584), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (8574, 8584), False, 'import os\n'), ((9693, 9755), 'subprocess.call', 'subprocess.call', (["['pygbe', '-c', conf, '-p', 'lys.param', '.']"], {}), "(['pygbe', '-c', conf, '-p', 'lys.param', '.'])\n", (9708, 9755), False, 'import subprocess\n'), ((1797, 1807), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1805, 1807), False, 'import sys\n'), ((3283, 3296), 'numpy.ones', 'numpy.ones', (['(2)'], {}), '(2)\n', (3293, 3296), False, 'import numpy\n'), ((3391, 3404), 'numpy.ones', 'numpy.ones', (['(2)'], {}), '(2)\n', (3401, 3404), False, 'import numpy\n'), ((6964, 6989), 'os.listdir', 'os.listdir', (['"""results_K40"""'], {}), "('results_K40')\n", (6974, 6989), False, 'import os\n'), ((8637, 8657), 'os.listdir', 'os.listdir', (['"""output"""'], {}), "('output')\n", (8647, 8657), False, 'import os\n'), ((10398, 10423), 'os.path.join', 'os.path.join', (['"""output"""', 'a'], {}), "('output', a)\n", (10410, 10423), False, 'import os\n'), ((4079, 4108), 'numpy.array', 'numpy.array', (["res['E_solv_kJ']"], {}), "(res['E_solv_kJ'])\n", (4090, 4108), False, 'import numpy\n'), ((8436, 8446), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8444, 8446), False, 'import sys\n'), ((9466, 9476), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9474, 9476), False, 'import sys\n'), ((10450, 10470), 'os.listdir', 'os.listdir', (['"""output"""'], {}), "('output')\n", (10460, 10470), False, 'import os\n'), ((7767, 7797), 'os.path.join', 'os.path.join', (['"""results_K40"""', 'a'], {}), "('results_K40', a)\n", (7779, 7797), False, 'import os\n'), ((8254, 8264), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8262, 8264), False, 'import sys\n'), ((7824, 7849), 'os.listdir', 'os.listdir', (['"""results_K40"""'], {}), "('results_K40')\n", (7834, 7849), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from networkx.drawing.nx_pydot import graphviz_layout
if __name__ == "__main__":
os.environ["PATH"] += ":/usr/local/bin"
map = {
0: r"$\alpha_1$",
1: r"$\alpha_2$",
2: r"$\alpha_3$",
3: "1",
4: "2",
5: r"$\beta_1$",
6: r"$\beta_2$",
7: r"$\beta_3$",
}
dod = {
"a1": {"1": {"weight": 1}, "2": {"weight": 3}},
"a2": {"1": {"weight": 2}, "2": {"weight": 3}},
"a3": {"2": {"weight": 1}, "b3": {"weight": 4}},
"1": {"2": {"weight": 1}, "b1": {"weight": 2}, "b2": {"weight": 4}},
"2": {"b1": {"weight": 3}, "b2": {"weight": 5}, "b3": {"weight": 2}},
}
adj_mat = np.array(
[
[0, 0, 0, 1, 3, 0, 0, 0],
[0, 0, 0, 2, 3, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 4],
[0, 0, 0, 0, 1, 2, 4, 0],
[0, 0, 0, 0, 0, 3, 5, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
)
# G = nx.DiGraph(adj_mat)
G = nx.DiGraph(dod)
G.node["a1"]["demand"] = -2
G.node["a2"]["demand"] = -2
G.node["a3"]["demand"] = -1
# G.node["1"]["demand"] = 0
# G.node["1"]["capacity"] = 0
# G.node["2"]["demand"] = 0
# G.node["2"]["capacity"] = 0
G.node["b1"]["demand"] = 2
G.node["b2"]["demand"] = 2
G.node["b3"]["demand"] = 1
# G = nx.relabel_nodes(G, map)
# pos = nx.random_layout(G)
pos = graphviz_layout(G, prog="dot")
# nx.draw(G, pos, node_color="lightblue")
# nx.draw_networkx_edge_labels(G, pos)
# nx.draw_networkx_labels(G, pos)
# plt.show()
# print(nx.dijkstra_predecessor_and_distance(G, "a1"))
# print(nx.dijkstra_predecessor_and_distance(G, "a2"))
# print(nx.dijkstra_predecessor_and_distance(G, "a3"))
# print(list(nx.bfs_edges(G, "a1")))
# print(list(nx.dfs_edges(G, "a1")))
# print(nx.algorithms.max_weight_matching(G, True))
print(nx.min_cost_flow(G))
print(G)
| [
"networkx.min_cost_flow",
"networkx.DiGraph",
"numpy.array",
"networkx.drawing.nx_pydot.graphviz_layout"
] | [((795, 1022), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 3, 0, 0, 0], [0, 0, 0, 2, 3, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, \n 4], [0, 0, 0, 0, 1, 2, 4, 0], [0, 0, 0, 0, 0, 3, 5, 2], [0, 0, 0, 0, 0,\n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 1, 3, 0, 0, 0], [0, 0, 0, 2, 3, 0, 0, 0], [0, 0, 0, 0, \n 1, 0, 0, 4], [0, 0, 0, 0, 1, 2, 4, 0], [0, 0, 0, 0, 0, 3, 5, 2], [0, 0,\n 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]])\n', (803, 1022), True, 'import numpy as np\n'), ((1174, 1189), 'networkx.DiGraph', 'nx.DiGraph', (['dod'], {}), '(dod)\n', (1184, 1189), True, 'import networkx as nx\n'), ((1588, 1618), 'networkx.drawing.nx_pydot.graphviz_layout', 'graphviz_layout', (['G'], {'prog': '"""dot"""'}), "(G, prog='dot')\n", (1603, 1618), False, 'from networkx.drawing.nx_pydot import graphviz_layout\n'), ((2091, 2110), 'networkx.min_cost_flow', 'nx.min_cost_flow', (['G'], {}), '(G)\n', (2107, 2110), True, 'import networkx as nx\n')] |
"""
collection of useful miscellaneous functions
"""
def get_dim_exp(exp):
"""
outputs hard-coded data dimensions (lat-lon-lev-time)
for a given simulation
"""
if exp == "QSC5.TRACMIP.NH01.L.pos.Q0.300.lon0.150.lond.45.lat0.0.latd.30":
from ds21grl import dim_aqua_short as dim
else:
from ds21grl import dim_aqua as dim
return dim
def tic():
# Homemade version of matlab tic function
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
# Homemade version of matlab tic function
import time
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set")
def qflux_const(exp):
"""
predefined constants to generate qflux patterns
"""
if exp == 'QSC5.TRACMIP.NH01.L.pos.Q0.150.lon0.150.lond.90.lat0.0.latd.30':
Q0 = 150
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.L.neg.Q0.150.lon0.150.lond.90.lat0.0.latd.30':
Q0 = -150
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.U.pos.Q0.150.lon0.150.lond.90.lat0.0.latd.30':
Q0 = 150
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.U.neg.Q0.150.lon0.150.lond.90.lat0.0.latd.30':
Q0 = -150
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.L.pos.Q0.300.lon0.150.lond.90.lat0.0.latd.30':
Q0 = 300
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.Lk1.Q0.75.lon0.150.lat0.0.latd.30':
Q0 = 75
lon0 = 150
lond = 0
lat0 = 0
latd = 30
wavenum_flag = 1
elif exp == 'QSC5.TRACMIP.NH01.U.pos.Q0.300.lon0.150.lond.90.lat0.0.latd.30':
Q0 = 300
lon0 = 150
lond = 90
lat0 = 0
latd = 30
wavenum_flag = 0
elif exp == 'QSC5.TRACMIP.NH01.L.pos.Q0.300.lon0.150.lond.45.lat0.0.latd.30':
Q0 = 300
lon0 = 150
lond = 45
lat0 = 0
latd = 30
wavenum_flag = 0
return Q0,lon0,lond,lat0,latd,wavenum_flag
def daysinmonths(month):
"""
outputs number of days in a given month without leap
year days
"""
import numpy as np
temp = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
return temp[month-1]
def month_to_dayofyear(month):
"""
maps months (0-11) to day of year (0-364)
where day of year refers to last day of that month
"""
import numpy as np
daysinmonths = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
if month < 0:
dayofyear = 0
else:
dayofyear = daysinmonths[:month+1].sum(axis=0)
return dayofyear
def dayofyear_to_month(dayofyear):
"""
maps day of year (1-365) onto a months (1-12)
"""
import numpy as np
daysinmonths = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
dayofyear = dayofyear + 1 # convert to dayofyear (1-365) from (0-364)
if dayofyear > 0 and dayofyear <= 31:
month = 1
else:
for i in range(1,12):
if (dayofyear > np.sum(daysinmonths[0:i])) and (dayofyear <= np.sum(daysinmonths[0:i+1])) :
month = i + 1
return month
def leap_year_test(year):
"""
Flag if year is a leap year
"""
flag = 0
if (year % 4 == 0):
flag = 1
elif (year % 100 == 0) and (year % 400 != 0):
flag = 0
return flag
def get_aqua_timestamp(iyear,ichunk,branch_flag):
"""
outputs a timestamp string for model runs with a
predifined year-month-day timestamp split into
5 x 73 day chunks for a given year
"""
import numpy as np
if branch_flag == 0:
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-01-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-15-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-27-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-08-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-20-00000'
else: # branch run chunk start days shifted by 1 day
if ichunk == 0:
timestamp = format(iyear,"04") + '-01-02-00000'
elif ichunk == 1:
timestamp = format(iyear,"04") + '-03-16-00000'
elif ichunk == 2:
timestamp = format(iyear,"04") + '-05-28-00000'
elif ichunk == 3:
timestamp = format(iyear,"04") + '-08-09-00000'
elif ichunk == 4:
timestamp = format(iyear,"04") + '-10-21-00000'
return timestamp
def AxRoll(x,ax,invert=False):
"""
Re-arrange array x so that axis 'ax' is first dimension.
Undo this if invert=True
"""
import numpy as np
if ax < 0:
n = len(x.shape) + ax
else:
n = ax
if invert is False:
y = np.rollaxis(x,n,0)
else:
y = np.rollaxis(x,0,n+1)
return y
def get_season_daily(data,season,ax):
"""
Extracts days in season from axis with
days 1-365
"""
import numpy as np
data = AxRoll(data,ax,invert=False)
dayofyear = np.arange(0,365,1)
# hardcoded mask for days in season
if season == 'NDJFM':
dayofyear = np.roll(dayofyear,61,axis=0)
index = dayofyear[0:151]
elif season == 'MJJAS':
index = dayofyear[120:273]
elif season == 'ANNUAL':
index = dayofyear
# extract days in season
data = data[index,:]
data = AxRoll(data,ax,invert=True)
return data
def get_season_monthly(data,season,ax):
"""
Extracts months correspinding to a given season from monthly
data
NOTE: ax = month dimension
"""
import numpy as np
data = AxRoll(data,ax,invert=False)
# hardcoded mask for days in season
if season == 'NDJFM':
index = np.array([1,2,3,11,12]) -1
elif season == 'MJJAS':
index = np.arange(5,10,1) -1
elif season == 'ANNUAL':
index = np.arange(1,13,1) -1
# extract months in season
data = data[index,:]
data = AxRoll(data,ax,invert=True)
return data
def get_anomaly_daily_seasonal_cycle(data,dim):
"""
Removes the climatological daily seasonal cycle.
NOTE: Data must have years and daysofyear as 1st and 2nd dims
respectively
"""
import numpy as np
# define daily mean seasonal cycle
scycle = data.mean(axis=0)
# remove seasonal cycle
for t in range(0,dim.years.size):
data[t,:] = data[t,:] - scycle[:]
return data
def get_eddy(data,ax):
"""
Extracts deviation from zonal-mean
ax = longitude axis
"""
import numpy as np
if data.ndim == 1:
zmean = data.mean(axis=0)
data = data - zmean
else:
data = AxRoll(data,ax,invert=False) # shift longitude to 1st dim
zmean = data.mean(axis=0)
for i in range(0,data.shape[0]):
data[i,:] = data[i,:] - zmean[:]
data = AxRoll(data,ax,invert=True)
return data
| [
"numpy.sum",
"numpy.roll",
"time.time",
"numpy.array",
"numpy.arange",
"numpy.rollaxis"
] | [((613, 624), 'time.time', 'time.time', ([], {}), '()\n', (622, 624), False, 'import time\n'), ((3294, 3352), 'numpy.array', 'np.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (3302, 3352), True, 'import numpy as np\n'), ((3560, 3618), 'numpy.array', 'np.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (3568, 3618), True, 'import numpy as np\n'), ((3981, 4039), 'numpy.array', 'np.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (3989, 4039), True, 'import numpy as np\n'), ((6474, 6494), 'numpy.arange', 'np.arange', (['(0)', '(365)', '(1)'], {}), '(0, 365, 1)\n', (6483, 6494), True, 'import numpy as np\n'), ((6198, 6218), 'numpy.rollaxis', 'np.rollaxis', (['x', 'n', '(0)'], {}), '(x, n, 0)\n', (6209, 6218), True, 'import numpy as np\n'), ((6239, 6263), 'numpy.rollaxis', 'np.rollaxis', (['x', '(0)', '(n + 1)'], {}), '(x, 0, n + 1)\n', (6250, 6263), True, 'import numpy as np\n'), ((6580, 6610), 'numpy.roll', 'np.roll', (['dayofyear', '(61)'], {'axis': '(0)'}), '(dayofyear, 61, axis=0)\n', (6587, 6610), True, 'import numpy as np\n'), ((7500, 7527), 'numpy.array', 'np.array', (['[1, 2, 3, 11, 12]'], {}), '([1, 2, 3, 11, 12])\n', (7508, 7527), True, 'import numpy as np\n'), ((7575, 7594), 'numpy.arange', 'np.arange', (['(5)', '(10)', '(1)'], {}), '(5, 10, 1)\n', (7584, 7594), True, 'import numpy as np\n'), ((4234, 4259), 'numpy.sum', 'np.sum', (['daysinmonths[0:i]'], {}), '(daysinmonths[0:i])\n', (4240, 4259), True, 'import numpy as np\n'), ((4279, 4308), 'numpy.sum', 'np.sum', (['daysinmonths[0:i + 1]'], {}), '(daysinmonths[0:i + 1])\n', (4285, 4308), True, 'import numpy as np\n'), ((7645, 7664), 'numpy.arange', 'np.arange', (['(1)', '(13)', '(1)'], {}), '(1, 13, 1)\n', (7654, 7664), True, 'import numpy as np\n'), ((838, 849), 'time.time', 'time.time', ([], {}), '()\n', (847, 849), False, 'import time\n')] |
# coding=utf-8
import numpy as np
from scipy.misc import logsumexp
from pybasicbayes.util.stats import sample_discrete
from pyhsmm.internals.hmm_states import HMMStatesEigen
from pyslds.states import _SLDSStatesCountData, _SLDSStatesMaskedData
from rslds.util import one_hot, logistic
class InputHMMStates(HMMStatesEigen):
def __init__(self, covariates, *args, **kwargs):
self.covariates = covariates
super(InputHMMStates, self).__init__(*args, **kwargs)
@property
def trans_matrix(self):
return self.model.trans_distn.get_trans_matrices(self.covariates)
def generate_states(self, initial_condition=None, with_noise=True, stateseq=None):
"""
Generate discrete and continuous states. Note that the handling of 'with_noise'
differs slightly from pySLDS implementation. Rather than selecting the most
likely discrete state, we randomly sample the discrete statse.
"""
if stateseq is None:
As = self.trans_matrix
self.stateseq = -1 * np.ones(self.T, dtype=np.int32)
self.stateseq[0] = np.random.choice(self.num_states)
for t in range(1, self.T):
self.stateseq[t] = sample_discrete(As[t-1, self.stateseq[t-1], :].ravel())
else:
assert stateseq.shape == (self.T,)
self.stateseq = stateseq.astype(np.int32)
###
# The recurrent SLDS is basically a combo of the Input HMM and SLDS
# However, we need a few enhancements. The latent states need to how to
# update the continuous values given the discrete states.
#
class _RecurrentSLDSStatesBase(object):
"""
Effectively, the multinomial emissions from the discrete states
as observations for the continuous states.
"""
def __init__(self, model, covariates=None, data=None, **kwargs):
# By definition, the covariates are the latent gaussian states
if covariates is not None:
raise NotImplementedError("Not supporting exogenous inputs yet")
super(_RecurrentSLDSStatesBase, self).\
__init__(model, data=data, **kwargs)
# Set the covariates to be the gaussian states
self.covariates = self.gaussian_states[:-1]
@property
def trans_distn(self):
return self.model.trans_distn
def generate_states(self, initial_condition=None, with_noise=True, stateseq=None):
"""
Jointly sample the discrete and continuous states
"""
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
T, K, n = self.T, self.num_states, self.D_latent
# Initialize discrete state sequence
dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq
gss = np.empty((T,n), dtype='double')
if initial_condition is None:
init_state_distn = np.ones(self.num_states) / float(self.num_states)
dss[0] = sample_discrete(init_state_distn.ravel())
gss[0] = self.init_dynamics_distns[dss[0]].rvs()
else:
dss[0] = initial_condition[0]
gss[0] = initial_condition[1]
for t in range(1,T):
# Sample discrete state given previous continuous state
A = self.trans_distn.get_trans_matrices(gss[t-1:t])[0]
if with_noise:
# Sample discrete state from recurrent transition matrix
if dss[t] == -1:
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
gss[t] = self.dynamics_distns[dss[t-1]].\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
else:
# Pick the most likely next discrete state and continuous state
if dss[t] == -1:
dss[t] = np.argmax(A[dss[t-1], :])
gss[t] = self.dynamics_distns[dss[t-1]]. \
predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])))
assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!"
self.stateseq = dss
self.gaussian_states = gss
class PGRecurrentSLDSStates(_RecurrentSLDSStatesBase,
_SLDSStatesCountData,
InputHMMStates):
"""
Use Pólya-gamma augmentation to perform Gibbs sampling with conjugate updates.
"""
def __init__(self, model, covariates=None, data=None, mask=None,
stateseq=None, gaussian_states=None, **kwargs):
super(PGRecurrentSLDSStates, self).\
__init__(model, covariates=covariates, data=data, mask=mask,
stateseq=stateseq, gaussian_states=gaussian_states,
**kwargs)
# Initialize the Polya gamma samplers if they haven't already been set
if not hasattr(self, 'ppgs'):
import pypolyagamma as ppg
# Initialize the Polya-gamma samplers
num_threads = ppg.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=num_threads)
self.ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds]
# Initialize auxiliary variables for transitions
self.trans_omegas = np.ones((self.T-1, self.num_states-1))
# If discrete and continuous states are given, resample the auxiliary variables once
if stateseq is not None and gaussian_states is not None:
self.resample_transition_auxiliary_variables()
@property
def info_emission_params(self):
J_node, h_node, log_Z_node = super(PGRecurrentSLDSStates, self).info_emission_params
J_node_trans, h_node_trans = self.info_trans_params
J_node[:-1] += J_node_trans
h_node[:-1] += h_node_trans
return J_node, h_node, log_Z_node
@property
def info_trans_params(self):
# Add the potential from the transitions
trans_distn, omega = self.trans_distn, self.trans_omegas
prev_state = one_hot(self.stateseq[:-1], self.num_states)
next_state = one_hot(self.stateseq[1:], self.num_states)
A = trans_distn.A[:, :self.num_states]
C = trans_distn.A[:, self.num_states:self.num_states+self.D_latent]
# D = trans_distn.A[:, self.num_states+self.D_latent:]
b = trans_distn.b
CCT = np.array([np.outer(cp, cp) for cp in C]). \
reshape((trans_distn.D_out, self.D_latent ** 2))
J_node = np.dot(omega, CCT)
kappa = trans_distn.kappa_func(next_state[:,:-1])
h_node = kappa.dot(C)
h_node -= (omega * b.T).dot(C)
h_node -= (omega * prev_state.dot(A.T)).dot(C)
# h_node[:-1] -= (omega * self.inputs.dot(D.T)).dot(C)
# Restore J_node to its original shape
J_node = J_node.reshape((self.T-1, self.D_latent, self.D_latent))
return J_node, h_node
def resample(self, niter=1):
super(PGRecurrentSLDSStates, self).resample(niter=niter)
self.resample_transition_auxiliary_variables()
def resample_gaussian_states(self):
super(PGRecurrentSLDSStates, self).resample_gaussian_states()
self.covariates = self.gaussian_states[:-1].copy()
def resample_transition_auxiliary_variables(self):
# Resample the auxiliary variable for the transition matrix
trans_distn = self.trans_distn
prev_state = one_hot(self.stateseq[:-1], self.num_states)
next_state = one_hot(self.stateseq[1:], self.num_states)
A = trans_distn.A[:, :self.num_states]
C = trans_distn.A[:, self.num_states:self.num_states + self.D_latent]
# D = trans_distn.A[:, self.num_states+self.D_latent:]
b = trans_distn.b
psi = prev_state.dot(A.T) \
+ self.covariates.dot(C.T) \
+ b.T \
# + self.inputs.dot(D.T) \
b_pg = trans_distn.b_func(next_state[:,:-1])
import pypolyagamma as ppg
ppg.pgdrawvpar(self.ppgs, b_pg.ravel(), psi.ravel(), self.trans_omegas.ravel())
##
# Recurrent SLDS with softmax transition model.
# Use a variational lower bound suggested by <NAME> Minka, 2009
# in order to perform conjugate updates of q(z) and q(x)
##
class _SoftmaxRecurrentSLDSStatesBase(_RecurrentSLDSStatesBase,
_SLDSStatesMaskedData,
InputHMMStates):
"""
Implement variational EM with the JJ96 lower bound to update q(z) and q(x)
"""
def __init__(self, model, **kwargs):
super(_SoftmaxRecurrentSLDSStatesBase, self).__init__(model, **kwargs)
self.a = np.zeros((self.T - 1,))
self.bs = np.ones((self.T - 1, self.num_states))
@property
def lambda_bs(self):
return 0.5 / self.bs * (logistic(self.bs) - 0.5)
def _set_expected_trans_stats(self):
"""
Compute the expected stats for updating the transition distn
stats = E_u_zp1T, E_uuT, E_u, a, lambda_bs
"""
T, D, K = self.T, self.D_latent, self.num_states
E_z = self.expected_states
E_z_zp1T = self.expected_joints
E_x = self.smoothed_mus
E_x_xT = self.smoothed_sigmas + E_x[:, :, None] * E_x[:, None, :]
# Combine to get trans stats
# E_u = [E[z], E[x]]
E_u = np.concatenate((E_z[:-1], E_x[:-1]), axis=1)
# E_u_zp1T = [ E[z zp1^T], E[x, zp1^T] ]
E_x_zp1T = E_x[:-1, :, None] * E_z[1:, None, :]
E_u_zp1T = np.concatenate((E_z_zp1T, E_x_zp1T), axis=1)
# E_uuT = [[ diag(E[z]), E[z]E[x^T] ]
# [ E[x]E[z^T], E[xxT] ]]
E_u_uT = np.zeros((T - 1, K + D, K + D))
E_u_uT[:, np.arange(K), np.arange(K)] = E_z[:-1]
E_u_uT[:, :K, K:] = E_z[:-1, :, None] * E_x[:-1, None, :]
E_u_uT[:, K:, :K] = E_x[:-1, :, None] * E_z[:-1, None, :]
E_u_uT[:, K:, K:] = E_x_xT[:-1]
self.E_trans_stats = (E_u_zp1T, E_u_uT, E_u, self.a, self.lambda_bs)
class _SoftmaxRecurrentSLDSStatesMeanField(_SoftmaxRecurrentSLDSStatesBase):
@property
def expected_info_rec_params(self):
"""
Compute J_rec and h_rec
"""
E_z = self.expected_states
E_W = self.trans_distn.expected_W
E_WWT = self.trans_distn.expected_WWT
E_logpi_WT = self.trans_distn.expected_logpi_WT
# Eq (24) 2 * E[ W diag(lambda(b_t)) W^\trans ]
J_rec = np.zeros((self.T, self.D_latent, self.D_latent))
np.einsum('tk, kij -> tij', 2 * self.lambda_bs, E_WWT, out=J_rec[:-1])
# Eq (25)
h_rec = np.zeros((self.T, self.D_latent))
h_rec[:-1] += E_z[1:].dot(E_W.T)
h_rec[:-1] += -1 * (0.5 - 2 * self.a[:,None] * self.lambda_bs).dot(E_W.T)
h_rec[:-1] += -2 * np.einsum('ti, tj, jid -> td', E_z[:-1], self.lambda_bs, E_logpi_WT)
return J_rec, h_rec
@property
def expected_info_emission_params(self):
"""
Fold in the recurrent potentials
"""
J_node, h_node, log_Z_node = \
super(_SoftmaxRecurrentSLDSStatesMeanField, self).\
expected_info_emission_params
J_rec, h_rec = self.expected_info_rec_params
return J_node + J_rec, h_node + h_rec, log_Z_node
@property
def mf_aBl(self):
# Add in node potentials from transitions
aBl = super(_SoftmaxRecurrentSLDSStatesMeanField, self).mf_aBl
aBl += self._mf_aBl_rec
return aBl
@property
def _mf_aBl_rec(self):
# Compute the extra node _log_ potentials from the transition model
aBl = np.zeros((self.T, self.num_states))
# Eq (34): \psi_{t+1}^{rec} = E [ x_t^\trans W(\theta) ]
E_x = self.smoothed_mus
E_W = self.trans_distn.expected_W
E_logpi = self.trans_distn.expected_logpi
E_logpi_WT = self.trans_distn.expected_logpi_WT
E_logpi_logpiT = self.trans_distn.expected_logpi_logpiT
E_logpisq = np.array([np.diag(Pk) for Pk in E_logpi_logpiT]).T
aBl[1:] += E_x[:-1].dot(E_W)
# Eq (36) transpose:
# -2 E[ x_t W \diag(\lambda(b_t) \log \pi^\trans ]
aBl[:-1] += -2 * np.einsum('td, kid, tk -> ti', E_x[:-1], E_logpi_WT, self.lambda_bs)
# Eq (37) transpose:
# (1/2 - 2 a_t \lambda(b_t)^\trans E[ \log \pi^\trans]
a, bs = self.a, self.bs
aBl[:-1] += -1 * (0.5 - 2*a[:,None] * self.lambda_bs).dot(E_logpi.T)
# Eq (38)
aBl[:-1] += -1 * self.lambda_bs.dot(E_logpisq.T)
return aBl
### Updates for auxiliary variables of JJ96 bound (a and bs)
def meanfield_update_auxiliary_vars(self, n_iter=10):
"""
Update a and bs via block coordinate updates
"""
K = self.num_states
E_z = self.expected_states
E_z /= E_z.sum(1, keepdims=True)
E_x = self.smoothed_mus
E_xxT = self.smoothed_sigmas + E_x[:,:,None] * E_x[:,None,:]
E_logpi = self.trans_distn.expected_logpi
E_W = self.trans_distn.expected_W
E_WWT = self.trans_distn.expected_WWT
E_logpi_WT = self.trans_distn.expected_logpi_WT
E_logpi_logpiT = self.trans_distn.expected_logpi_logpiT
E_logpi_sq = np.array([np.diag(Pk) for Pk in E_logpi_logpiT]).T
# Compute m_{tk} = E[v_{tk}]
m = E_z[:-1].dot(E_logpi) + E_x[:-1].dot(E_W)
# Compute s_{tk} = E[v_{tk}^2]
# E[v_{tk}^2] = e_k^T E[\psi_1 + \psi_2 + \psi_3] e_k where
# e_k^T \psi_1 e_k =
# = Tr(E[z_t z_t^T p_k p_k^T]) with p_k = P[:,k] (kth col of trans matrix)
# = Tr(diag(E[z_t]) \dot E[p_k p_k^T] )
# = Tr(A^T \dot B) with A = A^T = diag(E[z_t]), B = E[p_k p_k^T]
# = \sum_{ij} A_{ij} * B_{ij}
# = \sum_{i} E[z_{t,i}] * E[p_{ik}^2]
psi_1 = E_z[:-1].dot(E_logpi_sq)
# e_k^T \psi_2 e_k =
# = 2e_k^T E[W^T x_t z_t^T log pi] e_k
# \psi_2 = 2 diag*(E[W^T x_t z_t^T log pi])
# = 2 E[(x_t^T W) * (z_t^T log pi)]
# psi_2 = 2 * E_x[:-1].dot(E_W) * E_z[:-1].dot(E_logpi)
psi_2 = 2 * np.einsum('td, ti, kid -> tk', E_x[:-1], E_z[:-1], E_logpi_WT)
# e_k^T \psi_3 e_k =
# =Tr(E[x_t x_t^T w_k w_k^T]) with w_k = W[:,k] (kth col of weight matrix)
# = Tr(E[x_t x_t^T] \dot E[w_k w_k^T])
# = Tr(A^T \dot B) with A = A^T = E[x_t x_t^T]), B = E[w_k w_k^T]
# = \sum_{ij} A_{ij} * B_{ij}
psi_3 = np.einsum('tij, kij -> tk', E_xxT[:-1], E_WWT)
# s_{tk} = E[v_{tk}^2]
s = psi_1 + psi_2 + psi_3
assert np.all(s > 0)
for itr in range(n_iter):
lambda_bs = self.lambda_bs
# Eq (42)
self.a = 2 * (m * lambda_bs).sum(axis=1) + K / 2.0 - 1.0
self.a /= 2 * lambda_bs.sum(axis=1)
# Eq (43)
self.bs = np.sqrt(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)
def meanfield_update_discrete_states(self):
"""
Override the discrete state updates in pyhsmm to keep the necessary suff stats.
"""
self.clear_caches()
# Run the message passing algorithm
trans_potential = self.trans_distn.exp_expected_logpi
init_potential = self.mf_pi_0
likelihood_potential = self.mf_aBl
alphal = self._messages_forwards_log(trans_potential, init_potential, likelihood_potential)
betal = self._messages_backwards_log(trans_potential, likelihood_potential)
# Convert messages into expectations
expected_states = alphal + betal
expected_states -= expected_states.max(1)[:, None]
np.exp(expected_states, out=expected_states)
expected_states /= expected_states.sum(1)[:, None]
Al = np.log(trans_potential)
log_joints = alphal[:-1, :, None] + betal[1:, None, :] \
+ likelihood_potential[1:, None, :] \
+ Al[None, ...]
log_joints -= log_joints.max(axis=(1, 2), keepdims=True)
joints = np.exp(log_joints)
joints /= joints.sum(axis=(1, 2), keepdims=True)
# Compute the log normalizer log p(x_{1:T} | \theta, a, b)
normalizer = logsumexp(alphal[0] + betal[0])
# Save expected statistics
self.expected_states = expected_states
self.expected_joints = joints
self.expected_transcounts = joints.sum(0)
self._normalizer = normalizer
# Update the "stateseq" variable too
self.stateseq = self.expected_states.argmax(1).astype('int32')
# And then there's this snapshot thing... yikes mattjj!
self._mf_param_snapshot = \
(np.log(trans_potential), np.log(init_potential),
likelihood_potential, normalizer)
# Compute the variational entropy
from pyslds.util import hmm_entropy
params = (np.log(trans_potential), np.log(init_potential), likelihood_potential, normalizer)
stats = (expected_states, self.expected_transcounts, normalizer)
return hmm_entropy(params, stats)
def meanfieldupdate(self, niter=1):
super(_SoftmaxRecurrentSLDSStatesMeanField, self).meanfieldupdate()
self.meanfield_update_auxiliary_vars()
self._set_expected_trans_stats()
def get_vlb(self, most_recently_updated=False):
# E_{q(z)}[log p(z)]
from pyslds.util import expected_hmm_logprob
vlb = expected_hmm_logprob(
self.mf_pi_0, self.trans_distn.exp_expected_logpi,
(self.expected_states, self.expected_transcounts, self._normalizer))
# E_{q(x)}[log p(y, x | z)] is given by aBl
# To get E_{q(x)}[ aBl ] we multiply and sum
vlb += np.sum(self.expected_states * self.mf_aBl)
# Add the variational entropy
vlb += self._variational_entropy
return vlb
def _init_mf_from_gibbs(self):
super(_SoftmaxRecurrentSLDSStatesBase, self)._init_mf_from_gibbs()
self.meanfield_update_auxiliary_vars()
self.expected_joints = self.expected_states[:-1, :, None] * self.expected_states[1:, None, :]
self._mf_param_snapshot = \
(self.trans_distn.expected_logpi, np.log(self.mf_pi_0),
self.mf_aBl, self._normalizer)
self._set_expected_trans_stats()
class _SoftmaxRecurrentSLDSStatesVBEM(_SoftmaxRecurrentSLDSStatesBase):
def vb_E_step(self):
H_z = self.vb_E_step_discrete_states()
H_x = self.vb_E_step_gaussian_states()
self.vbem_update_auxiliary_vars()
self._set_expected_trans_stats()
self._variational_entropy = H_z + H_x
### Updates for q(x)
@property
def vbem_info_rec_params(self):
"""
Compute J_rec and h_rec
"""
E_z = self.expected_states
W = self.trans_distn.W
WWT = np.array([np.outer(wk, wk) for wk in W.T])
logpi = self.trans_distn.logpi
logpi_WT = np.array([np.outer(lpk, wk) for lpk, wk in zip(logpi.T, W.T)])
# Eq (24) 2 * E[ W diag(lambda(b_t)) W^\trans ]
J_rec = np.zeros((self.T, self.D_latent, self.D_latent))
np.einsum('tk, kij -> tij', 2 * self.lambda_bs, WWT, out=J_rec[:-1])
# Eq (25)
h_rec = np.zeros((self.T, self.D_latent))
h_rec[:-1] += E_z[1:].dot(W.T)
h_rec[:-1] += -1 * (0.5 - 2 * self.a[:,None] * self.lambda_bs).dot(W.T)
h_rec[:-1] += -2 * np.einsum('ti, tj, jid -> td', E_z[:-1], self.lambda_bs, logpi_WT)
return J_rec, h_rec
@property
def vbem_info_emission_params(self):
"""
Fold in the recurrent potentials
"""
J_node, h_node, log_Z_node = \
super(_SoftmaxRecurrentSLDSStatesVBEM, self). \
vbem_info_emission_params
J_rec, h_rec = self.vbem_info_rec_params
return J_node + J_rec, h_node + h_rec, log_Z_node
@property
def vbem_aBl(self):
aBl = super(_SoftmaxRecurrentSLDSStatesVBEM, self).vbem_aBl
# Add in node potentials from transitions
aBl += self._vbem_aBl_rec
return aBl
@property
def _vbem_aBl_rec(self):
# Compute the extra node *log* potentials from the transition model
aBl = np.zeros((self.T, self.num_states))
# Eq (34): \psi_{t+1}^{rec} = E [ x_t^\trans W(\theta) ]
E_x = self.smoothed_mus
W = self.trans_distn.W
logpi = self.trans_distn.logpi
logpi_WT = np.array([np.outer(lpk, wk) for lpk, wk in zip(logpi.T, W.T)])
logpisq = logpi**2
aBl[1:] += E_x[:-1].dot(W)
# Eq (36) transpose:
# -2 E[ x_t W \diag(\lambda(b_t) \log \pi^\trans ]
aBl[:-1] += -2 * np.einsum('td, kid, tk -> ti', E_x[:-1], logpi_WT, self.lambda_bs)
# Eq (37) transpose:
# (1/2 - 2 a_t \lambda(b_t)^\trans E[ \log \pi^\trans]
a, bs = self.a, self.bs
aBl[:-1] += -1 * (0.5 - 2*a[:,None] * self.lambda_bs).dot(logpi.T)
# Eq (38)
aBl[:-1] += -1 * self.lambda_bs.dot(logpisq.T)
return aBl
### Updates for auxiliary variables of JJ96 bound (a and bs)
def vbem_update_auxiliary_vars(self, n_iter=10):
"""
Update a and bs via block coordinate updates
"""
K = self.num_states
E_z = self.expected_states
E_z /= E_z.sum(1, keepdims=True)
E_x = self.smoothed_mus
E_xxT = self.smoothed_sigmas + E_x[:,:,None] * E_x[:,None,:]
logpi = self.trans_distn.logpi
W = self.trans_distn.W
WWT = np.array([np.outer(wk, wk) for wk in W.T])
logpi_WT = np.array([np.outer(lpk, wk) for lpk, wk in zip(logpi.T, W.T)])
logpi_sq = logpi**2
# Compute m_{tk} = E[v_{tk}]
m = E_z[:-1].dot(logpi) + E_x[:-1].dot(W)
# Compute s_{tk} = E[v_{tk}^2]
# E[v_{tk}^2] = e_k^T E[\psi_1 + \psi_2 + \psi_3] e_k where
# e_k^T \psi_1 e_k =
# = Tr(E[z_t z_t^T p_k p_k^T]) with p_k = P[:,k] (kth col of trans matrix)
# = Tr(diag(E[z_t]) \dot E[p_k p_k^T] )
# = Tr(A^T \dot B) with A = A^T = diag(E[z_t]), B = E[p_k p_k^T]
# = \sum_{ij} A_{ij} * B_{ij}
# = \sum_{i} E[z_{t,i}] * E[p_{ik}^2]
psi_1 = E_z[:-1].dot(logpi_sq)
# e_k^T \psi_2 e_k =
# = 2e_k^T E[W^T x_t z_t^T log pi] e_k
# \psi_2 = 2 diag*(E[W^T x_t z_t^T log pi])
# = 2 E[(x_t^T W) * (z_t^T log pi)]
# psi_2 = 2 * E_x[:-1].dot(E_W) * E_z[:-1].dot(E_logpi)
psi_2 = 2 * np.einsum('td, ti, kid -> tk', E_x[:-1], E_z[:-1], logpi_WT)
# e_k^T \psi_3 e_k =
# =Tr(E[x_t x_t^T w_k w_k^T]) with w_k = W[:,k] (kth col of weight matrix)
# = Tr(E[x_t x_t^T] \dot E[w_k w_k^T])
# = Tr(A^T \dot B) with A = A^T = E[x_t x_t^T]), B = E[w_k w_k^T]
# = \sum_{ij} A_{ij} * B_{ij}
psi_3 = np.einsum('tij, kij -> tk', E_xxT[:-1], WWT)
# s_{tk} = E[v_{tk}^2]
s = psi_1 + psi_2 + psi_3
assert np.all(s >= 0)
for itr in range(n_iter):
lambda_bs = self.lambda_bs
# Eq (42)
self.a = 2 * (m * lambda_bs).sum(axis=1) + K / 2.0 - 1.0
self.a /= 2 * lambda_bs.sum(axis=1)
# Eq (43)
self.bs = np.sqrt(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)
def vb_E_step_discrete_states(self):
"""
Override the discrete state updates in pyhsmm to keep the necessary suff stats.
"""
self.clear_caches()
# Run the message passing algorithm
trans_potential = np.exp(self.trans_distn.logpi)
init_potential = self.pi_0
likelihood_potential = self.vbem_aBl
alphal = self._messages_forwards_log(trans_potential, init_potential, likelihood_potential)
betal = self._messages_backwards_log(trans_potential, likelihood_potential)
# Convert messages into expectations
expected_states = alphal + betal
expected_states -= expected_states.max(1)[:, None]
np.exp(expected_states, out=expected_states)
expected_states /= expected_states.sum(1)[:, None]
Al = np.log(trans_potential)
log_joints = alphal[:-1, :, None] + betal[1:, None, :] \
+ likelihood_potential[1:, None, :] + Al[None, :, :]
log_joints -= log_joints.max(axis=(1, 2), keepdims=True)
joints = np.exp(log_joints)
joints /= joints.sum(axis=(1, 2), keepdims=True)
# Compute the log normalizer log p(x_{1:T} | \theta, a, b)
normalizer = logsumexp(alphal[0] + betal[0])
# Save expected statistics
self.expected_states = expected_states
self.expected_joints = joints
self.expected_transcounts = joints.sum(0)
self._normalizer = normalizer
# Update the "stateseq" variable too
self.stateseq = self.expected_states.argmax(1).astype('int32')
# Compute the entropy
from pyslds.util import hmm_entropy
params = (np.log(trans_potential), np.log(init_potential), likelihood_potential, normalizer)
stats = (expected_states, self.expected_transcounts, normalizer)
return hmm_entropy(params, stats)
def expected_log_joint_probability(self):
"""
Compute E_{q(z) q(x)} [log p(z) + log p(x | z) + log p(y | x, z)]
"""
# E_{q(z)}[log p(z)]
# todo: fix this to computed expected VLB instead
elp = np.dot(self.expected_states[0], np.log(self.pi_0))
elp += np.sum(self.expected_joints * np.log(self.trans_matrix + 1e-16))
# E_{q(x)}[log p(y, x | z)] is given by aBl
# To get E_{q(x)}[ aBl ] we multiply and sum
elp += np.sum(self.expected_states * self.vbem_aBl)
return elp
def _init_vbem_from_gibbs(self):
super(_SoftmaxRecurrentSLDSStatesBase, self)._init_mf_from_gibbs()
self.vbem_update_auxiliary_vars()
self.expected_joints = self.expected_states[:-1, :, None] * self.expected_states[1:, None, :]
self._set_expected_trans_stats()
class SoftmaxRecurrentSLDSStates(_SoftmaxRecurrentSLDSStatesVBEM,
_SoftmaxRecurrentSLDSStatesMeanField):
pass
| [
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.einsum",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"pyslds.util.expected_hmm_logprob",
"pybasicbayes.util.stats.sample_discrete",
"numpy.isfinite",
"scipy.misc.logsumexp",
"numpy.random.choice",
"rs... | [((2803, 2835), 'numpy.empty', 'np.empty', (['(T, n)'], {'dtype': '"""double"""'}), "((T, n), dtype='double')\n", (2811, 2835), True, 'import numpy as np\n'), ((5349, 5391), 'numpy.ones', 'np.ones', (['(self.T - 1, self.num_states - 1)'], {}), '((self.T - 1, self.num_states - 1))\n', (5356, 5391), True, 'import numpy as np\n'), ((6108, 6152), 'rslds.util.one_hot', 'one_hot', (['self.stateseq[:-1]', 'self.num_states'], {}), '(self.stateseq[:-1], self.num_states)\n', (6115, 6152), False, 'from rslds.util import one_hot, logistic\n'), ((6174, 6217), 'rslds.util.one_hot', 'one_hot', (['self.stateseq[1:]', 'self.num_states'], {}), '(self.stateseq[1:], self.num_states)\n', (6181, 6217), False, 'from rslds.util import one_hot, logistic\n'), ((6568, 6586), 'numpy.dot', 'np.dot', (['omega', 'CCT'], {}), '(omega, CCT)\n', (6574, 6586), True, 'import numpy as np\n'), ((7493, 7537), 'rslds.util.one_hot', 'one_hot', (['self.stateseq[:-1]', 'self.num_states'], {}), '(self.stateseq[:-1], self.num_states)\n', (7500, 7537), False, 'from rslds.util import one_hot, logistic\n'), ((7559, 7602), 'rslds.util.one_hot', 'one_hot', (['self.stateseq[1:]', 'self.num_states'], {}), '(self.stateseq[1:], self.num_states)\n', (7566, 7602), False, 'from rslds.util import one_hot, logistic\n'), ((8728, 8751), 'numpy.zeros', 'np.zeros', (['(self.T - 1,)'], {}), '((self.T - 1,))\n', (8736, 8751), True, 'import numpy as np\n'), ((8770, 8808), 'numpy.ones', 'np.ones', (['(self.T - 1, self.num_states)'], {}), '((self.T - 1, self.num_states))\n', (8777, 8808), True, 'import numpy as np\n'), ((9412, 9456), 'numpy.concatenate', 'np.concatenate', (['(E_z[:-1], E_x[:-1])'], {'axis': '(1)'}), '((E_z[:-1], E_x[:-1]), axis=1)\n', (9426, 9456), True, 'import numpy as np\n'), ((9583, 9627), 'numpy.concatenate', 'np.concatenate', (['(E_z_zp1T, E_x_zp1T)'], {'axis': '(1)'}), '((E_z_zp1T, E_x_zp1T), axis=1)\n', (9597, 9627), True, 'import numpy as np\n'), ((9741, 9772), 'numpy.zeros', 'np.zeros', (['(T - 1, K + D, K + D)'], {}), '((T - 1, K + D, K + D))\n', (9749, 9772), True, 'import numpy as np\n'), ((10522, 10570), 'numpy.zeros', 'np.zeros', (['(self.T, self.D_latent, self.D_latent)'], {}), '((self.T, self.D_latent, self.D_latent))\n', (10530, 10570), True, 'import numpy as np\n'), ((10579, 10649), 'numpy.einsum', 'np.einsum', (['"""tk, kij -> tij"""', '(2 * self.lambda_bs)', 'E_WWT'], {'out': 'J_rec[:-1]'}), "('tk, kij -> tij', 2 * self.lambda_bs, E_WWT, out=J_rec[:-1])\n", (10588, 10649), True, 'import numpy as np\n'), ((10685, 10718), 'numpy.zeros', 'np.zeros', (['(self.T, self.D_latent)'], {}), '((self.T, self.D_latent))\n', (10693, 10718), True, 'import numpy as np\n'), ((11694, 11729), 'numpy.zeros', 'np.zeros', (['(self.T, self.num_states)'], {}), '((self.T, self.num_states))\n', (11702, 11729), True, 'import numpy as np\n'), ((14708, 14754), 'numpy.einsum', 'np.einsum', (['"""tij, kij -> tk"""', 'E_xxT[:-1]', 'E_WWT'], {}), "('tij, kij -> tk', E_xxT[:-1], E_WWT)\n", (14717, 14754), True, 'import numpy as np\n'), ((14836, 14849), 'numpy.all', 'np.all', (['(s > 0)'], {}), '(s > 0)\n', (14842, 14849), True, 'import numpy as np\n'), ((15883, 15927), 'numpy.exp', 'np.exp', (['expected_states'], {'out': 'expected_states'}), '(expected_states, out=expected_states)\n', (15889, 15927), True, 'import numpy as np\n'), ((16001, 16024), 'numpy.log', 'np.log', (['trans_potential'], {}), '(trans_potential)\n', (16007, 16024), True, 'import numpy as np\n'), ((16268, 16286), 'numpy.exp', 'np.exp', (['log_joints'], {}), '(log_joints)\n', (16274, 16286), True, 'import numpy as np\n'), ((16433, 16464), 'scipy.misc.logsumexp', 'logsumexp', (['(alphal[0] + betal[0])'], {}), '(alphal[0] + betal[0])\n', (16442, 16464), False, 'from scipy.misc import logsumexp\n'), ((17277, 17303), 'pyslds.util.hmm_entropy', 'hmm_entropy', (['params', 'stats'], {}), '(params, stats)\n', (17288, 17303), False, 'from pyslds.util import hmm_entropy\n'), ((17659, 17804), 'pyslds.util.expected_hmm_logprob', 'expected_hmm_logprob', (['self.mf_pi_0', 'self.trans_distn.exp_expected_logpi', '(self.expected_states, self.expected_transcounts, self._normalizer)'], {}), '(self.mf_pi_0, self.trans_distn.exp_expected_logpi, (\n self.expected_states, self.expected_transcounts, self._normalizer))\n', (17679, 17804), False, 'from pyslds.util import expected_hmm_logprob\n'), ((17947, 17989), 'numpy.sum', 'np.sum', (['(self.expected_states * self.mf_aBl)'], {}), '(self.expected_states * self.mf_aBl)\n', (17953, 17989), True, 'import numpy as np\n'), ((19310, 19358), 'numpy.zeros', 'np.zeros', (['(self.T, self.D_latent, self.D_latent)'], {}), '((self.T, self.D_latent, self.D_latent))\n', (19318, 19358), True, 'import numpy as np\n'), ((19367, 19435), 'numpy.einsum', 'np.einsum', (['"""tk, kij -> tij"""', '(2 * self.lambda_bs)', 'WWT'], {'out': 'J_rec[:-1]'}), "('tk, kij -> tij', 2 * self.lambda_bs, WWT, out=J_rec[:-1])\n", (19376, 19435), True, 'import numpy as np\n'), ((19471, 19504), 'numpy.zeros', 'np.zeros', (['(self.T, self.D_latent)'], {}), '((self.T, self.D_latent))\n', (19479, 19504), True, 'import numpy as np\n'), ((20462, 20497), 'numpy.zeros', 'np.zeros', (['(self.T, self.num_states)'], {}), '((self.T, self.num_states))\n', (20470, 20497), True, 'import numpy as np\n'), ((23258, 23302), 'numpy.einsum', 'np.einsum', (['"""tij, kij -> tk"""', 'E_xxT[:-1]', 'WWT'], {}), "('tij, kij -> tk', E_xxT[:-1], WWT)\n", (23267, 23302), True, 'import numpy as np\n'), ((23384, 23398), 'numpy.all', 'np.all', (['(s >= 0)'], {}), '(s >= 0)\n', (23390, 23398), True, 'import numpy as np\n'), ((23970, 24000), 'numpy.exp', 'np.exp', (['self.trans_distn.logpi'], {}), '(self.trans_distn.logpi)\n', (23976, 24000), True, 'import numpy as np\n'), ((24419, 24463), 'numpy.exp', 'np.exp', (['expected_states'], {'out': 'expected_states'}), '(expected_states, out=expected_states)\n', (24425, 24463), True, 'import numpy as np\n'), ((24537, 24560), 'numpy.log', 'np.log', (['trans_potential'], {}), '(trans_potential)\n', (24543, 24560), True, 'import numpy as np\n'), ((24773, 24791), 'numpy.exp', 'np.exp', (['log_joints'], {}), '(log_joints)\n', (24779, 24791), True, 'import numpy as np\n'), ((24938, 24969), 'scipy.misc.logsumexp', 'logsumexp', (['(alphal[0] + betal[0])'], {}), '(alphal[0] + betal[0])\n', (24947, 24969), False, 'from scipy.misc import logsumexp\n'), ((25560, 25586), 'pyslds.util.hmm_entropy', 'hmm_entropy', (['params', 'stats'], {}), '(params, stats)\n', (25571, 25586), False, 'from pyslds.util import hmm_entropy\n'), ((26086, 26130), 'numpy.sum', 'np.sum', (['(self.expected_states * self.vbem_aBl)'], {}), '(self.expected_states * self.vbem_aBl)\n', (26092, 26130), True, 'import numpy as np\n'), ((1115, 1148), 'numpy.random.choice', 'np.random.choice', (['self.num_states'], {}), '(self.num_states)\n', (1131, 1148), True, 'import numpy as np\n'), ((5105, 5130), 'pypolyagamma.get_omp_num_threads', 'ppg.get_omp_num_threads', ([], {}), '()\n', (5128, 5130), True, 'import pypolyagamma as ppg\n'), ((5151, 5195), 'numpy.random.randint', 'np.random.randint', (['(2 ** 16)'], {'size': 'num_threads'}), '(2 ** 16, size=num_threads)\n', (5168, 5195), True, 'import numpy as np\n'), ((10869, 10937), 'numpy.einsum', 'np.einsum', (['"""ti, tj, jid -> td"""', 'E_z[:-1]', 'self.lambda_bs', 'E_logpi_WT'], {}), "('ti, tj, jid -> td', E_z[:-1], self.lambda_bs, E_logpi_WT)\n", (10878, 10937), True, 'import numpy as np\n'), ((12265, 12333), 'numpy.einsum', 'np.einsum', (['"""td, kid, tk -> ti"""', 'E_x[:-1]', 'E_logpi_WT', 'self.lambda_bs'], {}), "('td, kid, tk -> ti', E_x[:-1], E_logpi_WT, self.lambda_bs)\n", (12274, 12333), True, 'import numpy as np\n'), ((14288, 14350), 'numpy.einsum', 'np.einsum', (['"""td, ti, kid -> tk"""', 'E_x[:-1]', 'E_z[:-1]', 'E_logpi_WT'], {}), "('td, ti, kid -> tk', E_x[:-1], E_z[:-1], E_logpi_WT)\n", (14297, 14350), True, 'import numpy as np\n'), ((15108, 15167), 'numpy.sqrt', 'np.sqrt', (['(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)'], {}), '(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)\n', (15115, 15167), True, 'import numpy as np\n'), ((16905, 16928), 'numpy.log', 'np.log', (['trans_potential'], {}), '(trans_potential)\n', (16911, 16928), True, 'import numpy as np\n'), ((16930, 16952), 'numpy.log', 'np.log', (['init_potential'], {}), '(init_potential)\n', (16936, 16952), True, 'import numpy as np\n'), ((17106, 17129), 'numpy.log', 'np.log', (['trans_potential'], {}), '(trans_potential)\n', (17112, 17129), True, 'import numpy as np\n'), ((17131, 17153), 'numpy.log', 'np.log', (['init_potential'], {}), '(init_potential)\n', (17137, 17153), True, 'import numpy as np\n'), ((18432, 18452), 'numpy.log', 'np.log', (['self.mf_pi_0'], {}), '(self.mf_pi_0)\n', (18438, 18452), True, 'import numpy as np\n'), ((19651, 19717), 'numpy.einsum', 'np.einsum', (['"""ti, tj, jid -> td"""', 'E_z[:-1]', 'self.lambda_bs', 'logpi_WT'], {}), "('ti, tj, jid -> td', E_z[:-1], self.lambda_bs, logpi_WT)\n", (19660, 19717), True, 'import numpy as np\n'), ((20927, 20993), 'numpy.einsum', 'np.einsum', (['"""td, kid, tk -> ti"""', 'E_x[:-1]', 'logpi_WT', 'self.lambda_bs'], {}), "('td, kid, tk -> ti', E_x[:-1], logpi_WT, self.lambda_bs)\n", (20936, 20993), True, 'import numpy as np\n'), ((22840, 22900), 'numpy.einsum', 'np.einsum', (['"""td, ti, kid -> tk"""', 'E_x[:-1]', 'E_z[:-1]', 'logpi_WT'], {}), "('td, ti, kid -> tk', E_x[:-1], E_z[:-1], logpi_WT)\n", (22849, 22900), True, 'import numpy as np\n'), ((23657, 23716), 'numpy.sqrt', 'np.sqrt', (['(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)'], {}), '(s - 2 * m * self.a[:, None] + self.a[:, None] ** 2)\n', (23664, 23716), True, 'import numpy as np\n'), ((25389, 25412), 'numpy.log', 'np.log', (['trans_potential'], {}), '(trans_potential)\n', (25395, 25412), True, 'import numpy as np\n'), ((25414, 25436), 'numpy.log', 'np.log', (['init_potential'], {}), '(init_potential)\n', (25420, 25436), True, 'import numpy as np\n'), ((25865, 25882), 'numpy.log', 'np.log', (['self.pi_0'], {}), '(self.pi_0)\n', (25871, 25882), True, 'import numpy as np\n'), ((1052, 1083), 'numpy.ones', 'np.ones', (['self.T'], {'dtype': 'np.int32'}), '(self.T, dtype=np.int32)\n', (1059, 1083), True, 'import numpy as np\n'), ((2728, 2754), 'numpy.ones', 'np.ones', (['T'], {'dtype': 'np.int32'}), '(T, dtype=np.int32)\n', (2735, 2754), True, 'import numpy as np\n'), ((2905, 2929), 'numpy.ones', 'np.ones', (['self.num_states'], {}), '(self.num_states)\n', (2912, 2929), True, 'import numpy as np\n'), ((4147, 4166), 'numpy.isfinite', 'np.isfinite', (['gss[t]'], {}), '(gss[t])\n', (4158, 4166), True, 'import numpy as np\n'), ((5221, 5243), 'pypolyagamma.PyPolyaGamma', 'ppg.PyPolyaGamma', (['seed'], {}), '(seed)\n', (5237, 5243), True, 'import pypolyagamma as ppg\n'), ((8881, 8898), 'rslds.util.logistic', 'logistic', (['self.bs'], {}), '(self.bs)\n', (8889, 8898), False, 'from rslds.util import one_hot, logistic\n'), ((9791, 9803), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (9800, 9803), True, 'import numpy as np\n'), ((9805, 9817), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (9814, 9817), True, 'import numpy as np\n'), ((19083, 19099), 'numpy.outer', 'np.outer', (['wk', 'wk'], {}), '(wk, wk)\n', (19091, 19099), True, 'import numpy as np\n'), ((19184, 19201), 'numpy.outer', 'np.outer', (['lpk', 'wk'], {}), '(lpk, wk)\n', (19192, 19201), True, 'import numpy as np\n'), ((20695, 20712), 'numpy.outer', 'np.outer', (['lpk', 'wk'], {}), '(lpk, wk)\n', (20703, 20712), True, 'import numpy as np\n'), ((21785, 21801), 'numpy.outer', 'np.outer', (['wk', 'wk'], {}), '(wk, wk)\n', (21793, 21801), True, 'import numpy as np\n'), ((21847, 21864), 'numpy.outer', 'np.outer', (['lpk', 'wk'], {}), '(lpk, wk)\n', (21855, 21864), True, 'import numpy as np\n'), ((25929, 25962), 'numpy.log', 'np.log', (['(self.trans_matrix + 1e-16)'], {}), '(self.trans_matrix + 1e-16)\n', (25935, 25962), True, 'import numpy as np\n'), ((3504, 3537), 'pybasicbayes.util.stats.sample_discrete', 'sample_discrete', (['A[dss[t - 1], :]'], {}), '(A[dss[t - 1], :])\n', (3519, 3537), False, 'from pybasicbayes.util.stats import sample_discrete\n'), ((3950, 3977), 'numpy.argmax', 'np.argmax', (['A[dss[t - 1], :]'], {}), '(A[dss[t - 1], :])\n', (3959, 3977), True, 'import numpy as np\n'), ((4064, 4125), 'numpy.hstack', 'np.hstack', (['(gss[t - 1][None, :], self.inputs[t - 1][None, :])'], {}), '((gss[t - 1][None, :], self.inputs[t - 1][None, :]))\n', (4073, 4125), True, 'import numpy as np\n'), ((12070, 12081), 'numpy.diag', 'np.diag', (['Pk'], {}), '(Pk)\n', (12077, 12081), True, 'import numpy as np\n'), ((13329, 13340), 'numpy.diag', 'np.diag', (['Pk'], {}), '(Pk)\n', (13336, 13340), True, 'import numpy as np\n'), ((3692, 3753), 'numpy.hstack', 'np.hstack', (['(gss[t - 1][None, :], self.inputs[t - 1][None, :])'], {}), '((gss[t - 1][None, :], self.inputs[t - 1][None, :]))\n', (3701, 3753), True, 'import numpy as np\n'), ((6456, 6472), 'numpy.outer', 'np.outer', (['cp', 'cp'], {}), '(cp, cp)\n', (6464, 6472), True, 'import numpy as np\n')] |
import numpy as np
from proteus import Domain, Context, Comm
from proteus.mprans import SpatialTools as st
import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow
import proteus.TwoPhaseFlow.utils.Parameters as Parameters
from proteus import WaveTools as wt
from proteus.Profiling import logEvent
from proteus.mbd import CouplingFSI as fsi
import os
import pychrono
rho_0 = 998.2
nu_0 = 1.004e-6
rho_1 = 1.205
nu_1 = 1.5e-5
sigma_01 = 0.
he = 0.2
tank_dim = [1., 1., 1.]
water_level = 0.5
genMesh = False
rhor = 0.5
# ____ _
# | _ \ ___ _ __ ___ __ _(_)_ __
# | | | |/ _ \| '_ ` _ \ / _` | | '_ \
# | |_| | (_) | | | | | | (_| | | | | |
# |____/ \___/|_| |_| |_|\__,_|_|_| |_|
# Domain
# All geometrical options go here (but not mesh options)
domain = Domain.PiecewiseLinearComplexDomain()
# ----- SHAPES ----- #
# TANK
tank = st.Tank3D(domain, tank_dim)
# CAISSON
radius = 0.1
caisson = st.Cuboid(domain,
dim=[2*radius, 2*radius, 2*radius],
coords=(tank_dim[0]/2., tank_dim[1]/2., water_level+radius/10.),
barycenter=(tank_dim[0]/2., tank_dim[1]/2., water_level+radius/10.))
caisson.setHoles([caisson.barycenter])
caisson.holes_ind = np.array([0])
# let gmsh know that the caisson is IN the tank
tank.setChildShape(caisson, 0)
# ____ _ ____ _ _ _ _
# | __ ) ___ _ _ _ __ __| | __ _ _ __ _ _ / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | _ \ / _ \| | | | '_ \ / _` |/ _` | '__| | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | |_) | (_) | |_| | | | | (_| | (_| | | | |_| | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |____/ \___/ \__,_|_| |_|\__,_|\__,_|_| \__, |\____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# |___/
# Boundary Conditions
tank.BC['z+'].setAtmosphere()
tank.BC['z-'].setFreeSlip()
tank.BC['y+'].setFreeSlip()
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
for tag, bc in caisson.BC.items():
bc.setNoSlip()
for tag, bc in tank.BC.items():
bc.setFixedNodes()
# ___ _ _ _ _ ____ _ _ _ _
# |_ _|_ __ (_) |_(_) __ _| | / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | || '_ \| | __| |/ _` | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | || | | | | |_| | (_| | | | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |___|_| |_|_|\__|_|\__,_|_| \____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# Initial Conditions
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
smoothing = 1.5 * he
nd = domain.nd
class P_IC:
def uOfXT(self, x, t):
p_L = 0.0
phi_L = tank_dim[nd-1] - water_level
phi = x[nd-1] - water_level
p = p_L -g[nd-1]*(rho_0*(phi_L - phi)
+(rho_1 -rho_0)*(smoothedHeaviside_integral(smoothing,phi_L)
-smoothedHeaviside_integral(smoothing,phi)))
return p
class Zero_IC:
def uOfXT(self, x, t):
return 0.0
class U_IC:
def uOfXT(self, x, t):
return 0.0
class V_IC:
def uOfXT(self, x, t):
return 0.0
class W_IC:
def uOfXT(self, x, t):
return 0.0
class VF_IC:
def uOfXT(self, x, t):
return smoothedHeaviside(smoothing,x[nd-1]-water_level)
class PHI_IC:
def uOfXT(self, x, t):
return x[nd-1] - water_level
# ____ _
# / ___| |__ _ __ ___ _ __ ___
# | | | '_ \| '__/ _ \| '_ \ / _ \
# | |___| | | | | | (_) | | | | (_) |
# \____|_| |_|_| \___/|_| |_|\___/
# Chrono
# System
g = np.array([0., 0., -9.81])
system = fsi.ProtChSystem()
system.ChSystem.Set_G_acc(pychrono.ChVectorD(g[0], g[1], g[2]))
system.setTimeStep(1e-5)
#system.setCouplingScheme("CSS", prediction="backwardEuler")
# Body
body = fsi.ProtChBody(system=system)
body.attachShape(caisson)
#body.Aij_factor = 1/width
chbod = body.ChBody
x, y, z = caisson.barycenter
pos = pychrono.ChVectorD(x, y, z)
mass = (2.*radius)**3*rho_0*rhor
inertia = pychrono.ChVectorD(1., 1., 1.)
chbod.SetPos(pos)
chbod.SetMass(mass)
chbod.SetInertiaXX(inertia)
#chbod.SetBodyFixed(True)
body.setConstraints(free_x=np.array([1.,1.,1.]), free_r=np.array([1.,1.,1.]))
# body.setInitialRot(rotation_init)
# body.rotation_init=np.array([np.cos(ang/2.), 0., 0., np.sin(ang/2.)*1.])
body.setRecordValues(all_values=True)
# __ __ _ ___ _ _
# | \/ | ___ ___| |__ / _ \ _ __ | |_(_) ___ _ __ ___
# | |\/| |/ _ \/ __| '_ \ | | | | '_ \| __| |/ _ \| '_ \/ __|
# | | | | __/\__ \ | | | | |_| | |_) | |_| | (_) | | | \__ \
# |_| |_|\___||___/_| |_| \___/| .__/ \__|_|\___/|_| |_|___/
# |_|
domain.MeshOptions.use_gmsh = genMesh
domain.MeshOptions.genMesh = genMesh
he = he
domain.MeshOptions.he = he
modulepath = os.path.dirname(os.path.abspath(__file__))
mesh_fileprefix=modulepath+'/meshFloatingCube'
domain.MeshOptions.setOutputFiles(mesh_fileprefix)
st.assembleDomain(domain)
domain.use_gmsh = False
domain.geofile = mesh_fileprefix
# _ _ _
# | \ | |_ _ _ __ ___ ___ _ __(_) ___ ___
# | \| | | | | '_ ` _ \ / _ \ '__| |/ __/ __|
# | |\ | |_| | | | | | | __/ | | | (__\__ \
# |_| \_|\__,_|_| |_| |_|\___|_| |_|\___|___/
# Numerics
myTpFlowProblem = TpFlow.TwoPhaseFlowProblem()
myTpFlowProblem.outputStepping.final_time = 0.1
myTpFlowProblem.outputStepping.dt_init = 0.01
myTpFlowProblem.outputStepping.dt_output = 0.1
myTpFlowProblem.outputStepping.dt_fixed = 0.01
myTpFlowProblem.outputStepping.archiveAllSteps = True
myTpFlowProblem.domain = domain
myTpFlowProblem.SystemNumerics.useSuperlu=False
myTpFlowProblem.SystemNumerics.cfl=0.9
myTpFlowProblem.SystemPhysics.setDefaults()
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelMoveMeshElastic,'move')
myTpFlowProblem.SystemPhysics.useDefaultModels()
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelAddedMass,'addedMass')
myTpFlowProblem.SystemPhysics.movingDomain = True
# line below needed for relaxation zones
# (!) hack
m = myTpFlowProblem.SystemPhysics.modelDict
m['flow'].auxiliaryVariables += domain.auxiliaryVariables['twp']
params = myTpFlowProblem.SystemPhysics
#initialConditions
myTpFlowProblem.SystemPhysics.modelDict['move'].p.initialConditions['hx']=Zero_IC()
myTpFlowProblem.SystemPhysics.modelDict['move'].p.initialConditions['hy']=Zero_IC()
myTpFlowProblem.SystemPhysics.modelDict['move'].p.initialConditions['hz']=Zero_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['p']=P_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['u']=U_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['v']=V_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['w']=W_IC()
myTpFlowProblem.SystemPhysics.modelDict['vof'].p.initialConditions['vof']=VF_IC()
myTpFlowProblem.SystemPhysics.modelDict['ncls'].p.initialConditions['phi']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['rdls'].p.initialConditions['phid']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['mcorr'].p.initialConditions['phiCorr']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['addedMass'].p.initialConditions['addedMass']=Zero_IC()
# PHYSICAL PARAMETERS
params['rho_0'] = rho_0 # water
params['rho_1'] = rho_1 # air
params['nu_0'] = nu_0 # water
params['nu_1'] = nu_1 # air
params['gravity'] = np.array(g)
params['surf_tension_coeff'] = sigma_01
m['flow'].auxiliaryVariables += [system]
m['flow'].p.coefficients.eb_bc_penalty_constant = 10.#/nu_0#Re
m['addedMass'].auxiliaryVariables += [system.ProtChAddedMass]
m['flow'].p.coefficients.NONCONSERVATIVE_FORM=0.0
max_flag = 0
max_flag = max(domain.vertexFlags)
max_flag = max(domain.segmentFlags+[max_flag])
max_flag = max(domain.facetFlags+[max_flag])
flags_rigidbody = np.zeros(max_flag+1, dtype='int32')
for s in system.subcomponents:
if type(s) is fsi.ProtChBody:
for i in s.boundaryFlags:
flags_rigidbody[i] = 1
m['addedMass'].p.coefficients.flags_rigidbody = flags_rigidbody
| [
"proteus.Domain.PiecewiseLinearComplexDomain",
"os.path.abspath",
"proteus.mprans.SpatialTools.Tank3D",
"proteus.mbd.CouplingFSI.ProtChSystem",
"proteus.mprans.SpatialTools.Cuboid",
"pychrono.ChVectorD",
"proteus.mprans.SpatialTools.assembleDomain",
"numpy.zeros",
"proteus.ctransportCoefficients.smo... | [((787, 824), 'proteus.Domain.PiecewiseLinearComplexDomain', 'Domain.PiecewiseLinearComplexDomain', ([], {}), '()\n', (822, 824), False, 'from proteus import Domain, Context, Comm\n'), ((864, 891), 'proteus.mprans.SpatialTools.Tank3D', 'st.Tank3D', (['domain', 'tank_dim'], {}), '(domain, tank_dim)\n', (873, 891), True, 'from proteus.mprans import SpatialTools as st\n'), ((926, 1154), 'proteus.mprans.SpatialTools.Cuboid', 'st.Cuboid', (['domain'], {'dim': '[2 * radius, 2 * radius, 2 * radius]', 'coords': '(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0)', 'barycenter': '(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0)'}), '(domain, dim=[2 * radius, 2 * radius, 2 * radius], coords=(\n tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0),\n barycenter=(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius /\n 10.0))\n', (935, 1154), True, 'from proteus.mprans import SpatialTools as st\n'), ((1233, 1246), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1241, 1246), True, 'import numpy as np\n'), ((3741, 3768), 'numpy.array', 'np.array', (['[0.0, 0.0, -9.81]'], {}), '([0.0, 0.0, -9.81])\n', (3749, 3768), True, 'import numpy as np\n'), ((3776, 3794), 'proteus.mbd.CouplingFSI.ProtChSystem', 'fsi.ProtChSystem', ([], {}), '()\n', (3792, 3794), True, 'from proteus.mbd import CouplingFSI as fsi\n'), ((3959, 3988), 'proteus.mbd.CouplingFSI.ProtChBody', 'fsi.ProtChBody', ([], {'system': 'system'}), '(system=system)\n', (3973, 3988), True, 'from proteus.mbd import CouplingFSI as fsi\n'), ((4097, 4124), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['x', 'y', 'z'], {}), '(x, y, z)\n', (4115, 4124), False, 'import pychrono\n'), ((4168, 4201), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0)\n', (4186, 4201), False, 'import pychrono\n'), ((5120, 5145), 'proteus.mprans.SpatialTools.assembleDomain', 'st.assembleDomain', (['domain'], {}), '(domain)\n', (5137, 5145), True, 'from proteus.mprans import SpatialTools as st\n'), ((5459, 5487), 'proteus.TwoPhaseFlow.TwoPhaseFlowProblem.TwoPhaseFlowProblem', 'TpFlow.TwoPhaseFlowProblem', ([], {}), '()\n', (5485, 5487), True, 'import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow\n'), ((7569, 7580), 'numpy.array', 'np.array', (['g'], {}), '(g)\n', (7577, 7580), True, 'import numpy as np\n'), ((7996, 8033), 'numpy.zeros', 'np.zeros', (['(max_flag + 1)'], {'dtype': '"""int32"""'}), "(max_flag + 1, dtype='int32')\n", (8004, 8033), True, 'import numpy as np\n'), ((3821, 3857), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['g[0]', 'g[1]', 'g[2]'], {}), '(g[0], g[1], g[2])\n', (3839, 3857), False, 'import pychrono\n'), ((4995, 5020), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5010, 5020), False, 'import os\n'), ((3431, 3484), 'proteus.ctransportCoefficients.smoothedHeaviside', 'smoothedHeaviside', (['smoothing', '(x[nd - 1] - water_level)'], {}), '(smoothing, x[nd - 1] - water_level)\n', (3448, 3484), False, 'from proteus.ctransportCoefficients import smoothedHeaviside\n'), ((4318, 4343), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4326, 4343), True, 'import numpy as np\n'), ((4347, 4372), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4355, 4372), True, 'import numpy as np\n'), ((2985, 3029), 'proteus.ctransportCoefficients.smoothedHeaviside_integral', 'smoothedHeaviside_integral', (['smoothing', 'phi_L'], {}), '(smoothing, phi_L)\n', (3011, 3029), False, 'from proteus.ctransportCoefficients import smoothedHeaviside_integral\n'), ((3078, 3120), 'proteus.ctransportCoefficients.smoothedHeaviside_integral', 'smoothedHeaviside_integral', (['smoothing', 'phi'], {}), '(smoothing, phi)\n', (3104, 3120), False, 'from proteus.ctransportCoefficients import smoothedHeaviside_integral\n')] |
def ImportData():
import numpy as np
import pandas as pd
mydata = pd.read_csv("u.csv")
#print(mydata.head())
#print(np.random.randn(6, 2)*10)
df1 = pd.DataFrame(np.random.randn(6, 2)*10, columns=list('xy'))
#print(df1)
df2 = pd.DataFrame(mydata.to_numpy(), columns=list('xy'))
#print(df2)
return df2
| [
"pandas.read_csv",
"numpy.random.randn"
] | [((81, 101), 'pandas.read_csv', 'pd.read_csv', (['"""u.csv"""'], {}), "('u.csv')\n", (92, 101), True, 'import pandas as pd\n'), ((192, 213), 'numpy.random.randn', 'np.random.randn', (['(6)', '(2)'], {}), '(6, 2)\n', (207, 213), True, 'import numpy as np\n')] |
import sys
print(sys.path)
# ['/home/lanhai/Projects/second.pytorch', '/home/lanhai/pycharm-community-2018.2.3/helpers/pydev', '/home/lanhai/Projects/second.pytorch', '/home/lanhai/pycharm-community-2018.2.3/helpers/pydev', '/home/lanhai/.PyCharmCE2018.2/system/cythonExtensions', '/home/lanhai/anaconda3/envs/pytorch/lib/python37.zip', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7/lib-dynload', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7/site-packages', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7/site-packages/IPython/extensions']
# ['/home/lanhai/Projects/second.pytorch', '/home/lanhai/Projects/second.pytorch', '/home/lanhai/anaconda3/envs/pytorch/lib/python37.zip', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7/lib-dynload', '/home/lanhai/anaconda3/envs/pytorch/lib/python3.7/site-packages']
from numba import vectorize, float32
import numpy as np
import time
@vectorize([float32(float32, float32)], target='cuda')
def g(x, y):
return x + y
def main():
N = 32000000
A = np.ones(N, dtype=np.float32)
B = np.ones(N, dtype=np.float32)
t0 = time.process_time()
C = g(A, B)
t1 = time.process_time()
delta_t = t1 - t0
print('g executed in {0} seconds'.format(delta_t))
if __name__ == '__main__':
main() | [
"time.process_time",
"numpy.ones",
"numba.float32"
] | [((1298, 1326), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.float32'}), '(N, dtype=np.float32)\n', (1305, 1326), True, 'import numpy as np\n'), ((1335, 1363), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.float32'}), '(N, dtype=np.float32)\n', (1342, 1363), True, 'import numpy as np\n'), ((1374, 1393), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1391, 1393), False, 'import time\n'), ((1421, 1440), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1438, 1440), False, 'import time\n'), ((1187, 1212), 'numba.float32', 'float32', (['float32', 'float32'], {}), '(float32, float32)\n', (1194, 1212), False, 'from numba import vectorize, float32\n')] |
# MIT License
#
# Copyright (c) 2018 Capital One Services, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import filecmp
import os
import shutil
from pathlib import Path
import boto3
import numpy as np
import pandas as pd
import pytest
import snowflake.connector
import locopy
DBAPIS = [snowflake.connector]
INTEGRATION_CREDS = str(Path.home()) + os.sep + ".locopy-sfrc"
S3_BUCKET = "locopy-integration-testing"
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_FILE = os.path.join(CURR_DIR, "data", "mock_file.txt")
LOCAL_FILE_JSON = os.path.join(CURR_DIR, "data", "mock_file.json")
LOCAL_FILE_DL = os.path.join(CURR_DIR, "data", "mock_file_dl.txt")
TEST_DF = pd.read_csv(os.path.join(CURR_DIR, "data", "mock_dataframe.txt"), sep=",")
TEST_DF_2 = pd.read_csv(os.path.join(CURR_DIR, "data", "mock_dataframe_2.txt"), sep=",")
CREDS_DICT = locopy.utility.read_config_yaml(INTEGRATION_CREDS)
@pytest.fixture()
def s3_bucket():
session = boto3.Session(profile_name=CREDS_DICT["profile"])
c = session.client("s3")
c.create_bucket(Bucket=S3_BUCKET)
yield c
r = session.resource("s3").Bucket(S3_BUCKET)
r.objects.all().delete()
r.delete()
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_snowflake_execute_single_rows(dbapi):
expected = pd.DataFrame({"field_1": [1], "field_2": [2]})
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.execute("SELECT 1 AS field_1, 2 AS field_2 ")
df = test.to_dataframe()
df.columns = [c.lower() for c in df.columns]
assert np.allclose(df["field_1"], expected["field_1"])
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_snowflake_execute_multiple_rows(dbapi):
expected = pd.DataFrame({"field_1": [1, 2], "field_2": [1, 2]})
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.execute(
"SELECT 1 AS field_1, 1 AS field_2 " "UNION " "SELECT 2 AS field_1, 2 AS field_2"
)
df = test.to_dataframe()
df.columns = [c.lower() for c in df.columns]
assert np.allclose(df["field_1"], expected["field_1"])
assert np.allclose(df["field_2"], expected["field_2"])
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_upload_download_internal(dbapi):
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
# delete if exists
test.execute("REMOVE @~/staged/mock_file_dl.txt")
# test
shutil.copy(LOCAL_FILE, LOCAL_FILE_DL)
test.upload_to_internal(LOCAL_FILE_DL, "@~/staged/", auto_compress=False)
test.execute("LIST @~/staged/mock_file_dl.txt")
res = test.cursor.fetchall()
assert res[0][0] == "staged/mock_file_dl.txt"
test.download_from_internal(
"@~/staged/mock_file_dl.txt", os.path.dirname(LOCAL_FILE_DL) + os.sep
)
assert filecmp.cmp(LOCAL_FILE, LOCAL_FILE_DL)
# clean up
test.execute("REMOVE @~/staged/mock_file_dl.txt")
os.remove(LOCAL_FILE_DL)
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_copy(dbapi):
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.upload_to_internal(LOCAL_FILE, "@~/staged/")
test.execute("USE SCHEMA {}".format(CREDS_DICT["schema"]))
test.execute(
"CREATE OR REPLACE TEMPORARY TABLE locopy_integration_testing (id INTEGER, variable VARCHAR(20))"
)
test.copy(
"locopy_integration_testing",
"@~/staged/mock_file.txt.gz",
copy_options=["PURGE = TRUE"],
)
test.execute("SELECT * FROM locopy_integration_testing ORDER BY id")
results = test.cursor.fetchall()
expected = [
(1, "This iš line 1"),
(2, "This is liné 2"),
(3, "This is line 3"),
(4, "This is lïne 4"),
]
for i, result in enumerate(results):
assert result[0] == expected[i][0]
assert result[1] == expected[i][1]
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_copy_json(dbapi):
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.upload_to_internal(LOCAL_FILE_JSON, "@~/staged/")
test.execute("USE SCHEMA {}".format(CREDS_DICT["schema"]))
test.execute(
"CREATE OR REPLACE TEMPORARY TABLE locopy_integration_testing (variable VARIANT)"
)
test.copy(
"locopy_integration_testing",
"@~/staged/mock_file.json.gz",
file_type="json",
copy_options=["PURGE = TRUE"],
)
test.execute(
"SELECT variable:location:city, variable:price FROM locopy_integration_testing ORDER BY variable"
)
results = test.cursor.fetchall()
expected = [
('"Belmont"', '"92567"'),
('"Lexington"', '"75836"'),
('"Winchester"', '"89921"'),
]
for i, result in enumerate(results):
assert result[0] == expected[i][0]
assert result[1] == expected[i][1]
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_to_dataframe(dbapi):
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.upload_to_internal(LOCAL_FILE_JSON, "@~/staged/")
test.execute("USE SCHEMA {}".format(CREDS_DICT["schema"]))
test.execute(
"CREATE OR REPLACE TEMPORARY TABLE locopy_integration_testing (variable VARIANT)"
)
test.copy(
"locopy_integration_testing",
"@~/staged/mock_file.json.gz",
file_type="json",
copy_options=["PURGE = TRUE"],
)
# get all
test.execute(
"SELECT variable:location:city, variable:price FROM locopy_integration_testing ORDER BY variable"
)
result = test.to_dataframe()
result.columns = [c.lower() for c in result.columns]
expected = pd.DataFrame(
[('"Belmont"', '"92567"'), ('"Lexington"', '"75836"'), ('"Winchester"', '"89921"'),],
columns=["variable:location:city", "variable:price"],
)
assert (result["variable:location:city"] == expected["variable:location:city"]).all()
assert (result["variable:price"] == expected["variable:price"]).all()
# with size of 2
test.execute(
"SELECT variable:location:city, variable:price FROM locopy_integration_testing ORDER BY variable"
)
result = test.to_dataframe(size=2)
result.columns = [c.lower() for c in result.columns]
expected = pd.DataFrame(
[('"Belmont"', '"92567"'), ('"Lexington"', '"75836"'),],
columns=["variable:location:city", "variable:price"],
)
assert (result["variable:location:city"] == expected["variable:location:city"]).all()
assert (result["variable:price"] == expected["variable:price"]).all()
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_insert_dataframe_to_table(dbapi):
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.insert_dataframe_to_table(TEST_DF, "test", create=True)
test.execute("SELECT a, b, c FROM test ORDER BY a ASC")
results = test.cursor.fetchall()
test.execute("drop table if exists test")
expected = [
(1, "x", pd.to_datetime("2011-01-01").date()),
(2, "y", pd.to_datetime("2001-04-02").date()),
]
assert len(expected) == len(results)
for i, result in enumerate(results):
assert result[0] == expected[i][0]
assert result[1] == expected[i][1]
assert result[2] == expected[i][2]
test.insert_dataframe_to_table(TEST_DF_2, "test_2", create=True)
test.execute("SELECT col1, col2 FROM test_2 ORDER BY col1 ASC")
results = test.cursor.fetchall()
test.execute("drop table if exists test_2")
expected = [(1, "a"), (2, "b"), (3, "c"), (4, "d"), (5, "e"), (6, "f"), (7, "g")]
assert len(expected) == len(results)
for i, result in enumerate(results):
assert result[0] == expected[i][0]
assert result[1] == expected[i][1]
from decimal import Decimal
TEST_DF_3 = pd.DataFrame(
{
"a": [1, 2],
"b": [pd.to_datetime("2013-01-01"), pd.to_datetime("2019-01-01")],
"c": ["1.2", "3.5"],
"d": [Decimal(2), Decimal(3)],
}
)
test.insert_dataframe_to_table(TEST_DF_3, "test_3", create=True)
test.execute("SELECT a, b FROM test_3 ORDER BY a ASC")
results = test.cursor.fetchall()
test.execute("drop table if exists test_3")
expected = [
(1, pd.to_datetime("2013-01-01"), 1.2, 2),
(2, pd.to_datetime("2019-01-01"), 3.5, 3),
]
assert len(expected) == len(results)
for i, result in enumerate(results):
assert result[0] == expected[i][0]
assert result[1] == expected[i][1]
| [
"pandas.DataFrame",
"os.path.abspath",
"os.remove",
"pathlib.Path.home",
"boto3.Session",
"decimal.Decimal",
"locopy.utility.read_config_yaml",
"numpy.allclose",
"os.path.dirname",
"pytest.fixture",
"locopy.Snowflake",
"pandas.to_datetime",
"pytest.mark.parametrize",
"filecmp.cmp",
"os.p... | [((1497, 1544), 'os.path.join', 'os.path.join', (['CURR_DIR', '"""data"""', '"""mock_file.txt"""'], {}), "(CURR_DIR, 'data', 'mock_file.txt')\n", (1509, 1544), False, 'import os\n'), ((1563, 1611), 'os.path.join', 'os.path.join', (['CURR_DIR', '"""data"""', '"""mock_file.json"""'], {}), "(CURR_DIR, 'data', 'mock_file.json')\n", (1575, 1611), False, 'import os\n'), ((1628, 1678), 'os.path.join', 'os.path.join', (['CURR_DIR', '"""data"""', '"""mock_file_dl.txt"""'], {}), "(CURR_DIR, 'data', 'mock_file_dl.txt')\n", (1640, 1678), False, 'import os\n'), ((1867, 1917), 'locopy.utility.read_config_yaml', 'locopy.utility.read_config_yaml', (['INTEGRATION_CREDS'], {}), '(INTEGRATION_CREDS)\n', (1898, 1917), False, 'import locopy\n'), ((1921, 1937), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1935, 1937), False, 'import pytest\n'), ((2219, 2259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (2242, 2259), False, 'import pytest\n'), ((2665, 2705), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (2688, 2705), False, 'import pytest\n'), ((3245, 3285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (3268, 3285), False, 'import pytest\n'), ((4091, 4131), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (4114, 4131), False, 'import pytest\n'), ((5098, 5138), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (5121, 5138), False, 'import pytest\n'), ((6174, 6214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (6197, 6214), False, 'import pytest\n'), ((8041, 8081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dbapi"""', 'DBAPIS'], {}), "('dbapi', DBAPIS)\n", (8064, 8081), False, 'import pytest\n'), ((1457, 1482), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1472, 1482), False, 'import os\n'), ((1701, 1753), 'os.path.join', 'os.path.join', (['CURR_DIR', '"""data"""', '"""mock_dataframe.txt"""'], {}), "(CURR_DIR, 'data', 'mock_dataframe.txt')\n", (1713, 1753), False, 'import os\n'), ((1788, 1842), 'os.path.join', 'os.path.join', (['CURR_DIR', '"""data"""', '"""mock_dataframe_2.txt"""'], {}), "(CURR_DIR, 'data', 'mock_dataframe_2.txt')\n", (1800, 1842), False, 'import os\n'), ((1969, 2018), 'boto3.Session', 'boto3.Session', ([], {'profile_name': "CREDS_DICT['profile']"}), "(profile_name=CREDS_DICT['profile'])\n", (1982, 2018), False, 'import boto3\n'), ((2323, 2369), 'pandas.DataFrame', 'pd.DataFrame', (["{'field_1': [1], 'field_2': [2]}"], {}), "({'field_1': [1], 'field_2': [2]})\n", (2335, 2369), True, 'import pandas as pd\n'), ((2589, 2636), 'numpy.allclose', 'np.allclose', (["df['field_1']", "expected['field_1']"], {}), "(df['field_1'], expected['field_1'])\n", (2600, 2636), True, 'import numpy as np\n'), ((2771, 2823), 'pandas.DataFrame', 'pd.DataFrame', (["{'field_1': [1, 2], 'field_2': [1, 2]}"], {}), "({'field_1': [1, 2], 'field_2': [1, 2]})\n", (2783, 2823), True, 'import pandas as pd\n'), ((3110, 3157), 'numpy.allclose', 'np.allclose', (["df['field_1']", "expected['field_1']"], {}), "(df['field_1'], expected['field_1'])\n", (3121, 3157), True, 'import numpy as np\n'), ((3169, 3216), 'numpy.allclose', 'np.allclose', (["df['field_2']", "expected['field_2']"], {}), "(df['field_2'], expected['field_2'])\n", (3180, 3216), True, 'import numpy as np\n'), ((2379, 2422), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (2395, 2422), False, 'import locopy\n'), ((2833, 2876), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (2849, 2876), False, 'import locopy\n'), ((3338, 3381), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (3354, 3381), False, 'import locopy\n'), ((3500, 3538), 'shutil.copy', 'shutil.copy', (['LOCAL_FILE', 'LOCAL_FILE_DL'], {}), '(LOCAL_FILE, LOCAL_FILE_DL)\n', (3511, 3538), False, 'import shutil\n'), ((3913, 3951), 'filecmp.cmp', 'filecmp.cmp', (['LOCAL_FILE', 'LOCAL_FILE_DL'], {}), '(LOCAL_FILE, LOCAL_FILE_DL)\n', (3924, 3951), False, 'import filecmp\n'), ((4038, 4062), 'os.remove', 'os.remove', (['LOCAL_FILE_DL'], {}), '(LOCAL_FILE_DL)\n', (4047, 4062), False, 'import os\n'), ((4164, 4207), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (4180, 4207), False, 'import locopy\n'), ((5176, 5219), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (5192, 5219), False, 'import locopy\n'), ((6255, 6298), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (6271, 6298), False, 'import locopy\n'), ((7029, 7189), 'pandas.DataFrame', 'pd.DataFrame', (['[(\'"Belmont"\', \'"92567"\'), (\'"Lexington"\', \'"75836"\'), (\'"Winchester"\',\n \'"89921"\')]'], {'columns': "['variable:location:city', 'variable:price']"}), '([(\'"Belmont"\', \'"92567"\'), (\'"Lexington"\', \'"75836"\'), (\n \'"Winchester"\', \'"89921"\')], columns=[\'variable:location:city\',\n \'variable:price\'])\n', (7041, 7189), True, 'import pandas as pd\n'), ((7681, 7807), 'pandas.DataFrame', 'pd.DataFrame', (['[(\'"Belmont"\', \'"92567"\'), (\'"Lexington"\', \'"75836"\')]'], {'columns': "['variable:location:city', 'variable:price']"}), '([(\'"Belmont"\', \'"92567"\'), (\'"Lexington"\', \'"75836"\')],\n columns=[\'variable:location:city\', \'variable:price\'])\n', (7693, 7807), True, 'import pandas as pd\n'), ((8135, 8178), 'locopy.Snowflake', 'locopy.Snowflake', ([], {'dbapi': 'dbapi'}), '(dbapi=dbapi, **CREDS_DICT)\n', (8151, 8178), False, 'import locopy\n'), ((1350, 1361), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1359, 1361), False, 'from pathlib import Path\n'), ((3848, 3878), 'os.path.dirname', 'os.path.dirname', (['LOCAL_FILE_DL'], {}), '(LOCAL_FILE_DL)\n', (3863, 3878), False, 'import os\n'), ((9882, 9910), 'pandas.to_datetime', 'pd.to_datetime', (['"""2013-01-01"""'], {}), "('2013-01-01')\n", (9896, 9910), True, 'import pandas as pd\n'), ((9937, 9965), 'pandas.to_datetime', 'pd.to_datetime', (['"""2019-01-01"""'], {}), "('2019-01-01')\n", (9951, 9965), True, 'import pandas as pd\n'), ((9446, 9474), 'pandas.to_datetime', 'pd.to_datetime', (['"""2013-01-01"""'], {}), "('2013-01-01')\n", (9460, 9474), True, 'import pandas as pd\n'), ((9476, 9504), 'pandas.to_datetime', 'pd.to_datetime', (['"""2019-01-01"""'], {}), "('2019-01-01')\n", (9490, 9504), True, 'import pandas as pd\n'), ((9566, 9576), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (9573, 9576), False, 'from decimal import Decimal\n'), ((9578, 9588), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (9585, 9588), False, 'from decimal import Decimal\n'), ((8455, 8483), 'pandas.to_datetime', 'pd.to_datetime', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (8469, 8483), True, 'import pandas as pd\n'), ((8514, 8542), 'pandas.to_datetime', 'pd.to_datetime', (['"""2001-04-02"""'], {}), "('2001-04-02')\n", (8528, 8542), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
"""
Copyright (c) 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import argparse
import sys
ap = argparse.ArgumentParser()
ap.add_argument("-i", required=True, dest="image_path", help="path to input image")
ap.add_argument("-m", required=True, dest="model_path", help="path to model's XML file")
args = ap.parse_args()
class PixelLinkDecoder():
""" Decoder for Intel's version of PixelLink "text-detection-0001".
You will need OpenCV compiled with Inference Engine to use this.
Example of usage:
td = cv2.dnn.readNet('./text-detection-0001.xml','./text-detection-0001.bin')
img = cv2.imread('tmp.jpg')
blob = cv2.dnn.blobFromImage(img, 1, (1280,768))
td.setInput(blob)
a, b = td.forward(td.getUnconnectedOutLayersNames())
dcd = PixelLinkDecoder()
dcd.load(img, a, b)
dcd.decode() # results are in dcd.bboxes
dcd.plot_result(img)
"""
def __init__(self):
pass
def load(self, image, pixel_scores, link_scores,
pixel_conf_threshold=0.8, link_conf_threshold=0.8, four_neighbours=False):
self.image_shape = image.shape[0:2]
self.pixel_scores = self._set_pixel_scores(pixel_scores)
self.link_scores = self._set_link_scores(link_scores)
if four_neighbours:
self._get_neighbours = self._get_neighbours_4
else:
self._get_neighbours = self._get_neighbours_8
self.pixel_conf_threshold = pixel_conf_threshold
self.link_conf_threshold = link_conf_threshold
self.pixel_mask = self.pixel_scores >= self.pixel_conf_threshold
self.link_mask = self.link_scores >= self.link_conf_threshold
self.points = list(zip(*np.where(self.pixel_mask)))
self.h, self.w = np.shape(self.pixel_mask)
self.group_mask = dict.fromkeys(self.points, -1)
self.bboxes = None
self.root_map = None
self.mask = None
def _softmax(self, x, axis=None):
return np.exp(x - self._logsumexp(x, axis=axis, keepdims=True))
def _logsumexp(self, a, axis=None, b=None, keepdims=False, return_sign=False):
if b is not None:
a, b = np.broadcast_arrays(a, b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
tmp = b * np.exp(a - a_max)
else:
tmp = np.exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = np.sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = np.sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = np.log(s)
if not keepdims:
a_max = np.squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def _set_pixel_scores(self, pixel_scores):
"get softmaxed properly shaped pixel scores"
tmp = np.transpose(pixel_scores, (0, 2, 3, 1))
return self._softmax(tmp, axis=-1)[0, :, :, 1]
def _set_link_scores(self, link_scores):
"get softmaxed properly shaped links scores"
tmp = np.transpose(link_scores, (0, 2, 3, 1))
tmp_reshaped = tmp.reshape(tmp.shape[:-1] + (8, 2))
return self._softmax(tmp_reshaped, axis=-1)[0, :, :, :, 1]
def _find_root(self, point):
root = point
update_parent = False
tmp = self.group_mask[root]
while tmp is not -1:
root = tmp
tmp = self.group_mask[root]
update_parent = True
if update_parent:
self.group_mask[point] = root
return root
def _join(self, p1, p2):
root1 = self._find_root(p1)
root2 = self._find_root(p2)
if root1 != root2:
self.group_mask[root2] = root1
def _get_index(self, root):
if root not in self.root_map:
self.root_map[root] = len(self.root_map) + 1
return self.root_map[root]
def _get_all(self):
self.root_map = {}
self.mask = np.zeros_like(self.pixel_mask, dtype=np.int32)
for point in self.points:
point_root = self._find_root(point)
bbox_idx = self._get_index(point_root)
self.mask[point] = bbox_idx
def _get_neighbours_8(self, x, y):
w, h = self.w, self.h
tmp = [(0, x - 1, y - 1), (1, x, y - 1),
(2, x + 1, y - 1), (3, x - 1, y),
(4, x + 1, y), (5, x - 1, y + 1),
(6, x, y + 1), (7, x + 1, y + 1)]
return [i for i in tmp if i[1] >= 0 and i[1] < w and i[2] >= 0 and i[2] < h]
def _get_neighbours_4(self, x, y):
w, h = self.w, self.h
tmp = [(1, x, y - 1),
(3, x - 1, y),
(4, x + 1, y),
(6, x, y + 1)]
return [i for i in tmp if i[1] >= 0 and i[1] < w and i[2] >= 0 and i[2] < h]
def _mask_to_bboxes(self, min_area=300, min_height=10):
image_h, image_w = self.image_shape
self.bboxes = []
max_bbox_idx = self.mask.max()
mask_tmp = cv2.resize(self.mask, (image_w, image_h), interpolation=cv2.INTER_NEAREST)
for bbox_idx in range(1, max_bbox_idx + 1):
bbox_mask = mask_tmp == bbox_idx
cnts, _ = cv2.findContours(bbox_mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 0:
continue
cnt = cnts[0]
rect, w, h = self._min_area_rect(cnt)
if min(w, h) < min_height:
continue
if w * h < min_area:
continue
self.bboxes.append(self._order_points(rect))
def _min_area_rect(self, cnt):
rect = cv2.minAreaRect(cnt)
w, h = rect[1]
box = cv2.boxPoints(rect)
box = np.int0(box)
return box, w, h
def _order_points(self, rect):
""" (x, y)
Order: TL, TR, BR, BL
"""
tmp = np.zeros_like(rect)
sums = rect.sum(axis=1)
tmp[0] = rect[np.argmin(sums)]
tmp[2] = rect[np.argmax(sums)]
diff = np.diff(rect, axis=1)
tmp[1] = rect[np.argmin(diff)]
tmp[3] = rect[np.argmax(diff)]
return tmp
def decode(self):
for point in self.points:
y, x = point
neighbours = self._get_neighbours(x, y)
for n_idx, nx, ny in neighbours:
link_value = self.link_mask[y, x, n_idx]
pixel_cls = self.pixel_mask[ny, nx]
if link_value and pixel_cls:
self._join(point, (ny, nx))
self._get_all()
self._mask_to_bboxes()
def plot_result(self, image):
img_tmp = image.copy()
for box in self.bboxes:
cv2.drawContours(img_tmp, [box], 0, (0, 0, 255), 2)
cv2.imshow('Detected text', img_tmp)
cv2.waitKey(0)
if cv2.waitKey():
cv2.destroyAllWindows()
def main():
if args.model_path.endswith('.xml'):
td = cv2.dnn.readNet(args.model_path, args.model_path[:-3] + 'bin')
else:
print("Not valid model's XML file name (should be something liike 'foo.xml')")
sys.exit()
img = cv2.imread(args.image_path)
blob = cv2.dnn.blobFromImage(img, 1, (1280, 768))
td.setInput(blob)
a, b = td.forward(td.getUnconnectedOutLayersNames())
dcd = PixelLinkDecoder()
dcd.load(img, a, b)
dcd.decode() # results are in dcd.bboxes
dcd.plot_result(img)
if __name__ == '__main__':
sys.exit(main() or 0)
| [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.argmin",
"numpy.shape",
"cv2.boxPoints",
"numpy.exp",
"cv2.minAreaRect",
"cv2.imshow",
"numpy.zeros_like",
"cv2.dnn.blobFromImage",
"numpy.transpose",
"numpy.isfinite",
"cv2.drawContours",
"numpy.broadcast_arrays",
"cv2.des... | [((634, 659), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (657, 659), False, 'import argparse\n'), ((8143, 8170), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (8153, 8170), False, 'import cv2\n'), ((8182, 8224), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['img', '(1)', '(1280, 768)'], {}), '(img, 1, (1280, 768))\n', (8203, 8224), False, 'import cv2\n'), ((2357, 2382), 'numpy.shape', 'np.shape', (['self.pixel_mask'], {}), '(self.pixel_mask)\n', (2365, 2382), True, 'import numpy as np\n'), ((2927, 2963), 'numpy.amax', 'np.amax', (['a'], {'axis': 'axis', 'keepdims': '(True)'}), '(a, axis=axis, keepdims=True)\n', (2934, 2963), True, 'import numpy as np\n'), ((3852, 3892), 'numpy.transpose', 'np.transpose', (['pixel_scores', '(0, 2, 3, 1)'], {}), '(pixel_scores, (0, 2, 3, 1))\n', (3864, 3892), True, 'import numpy as np\n'), ((4061, 4100), 'numpy.transpose', 'np.transpose', (['link_scores', '(0, 2, 3, 1)'], {}), '(link_scores, (0, 2, 3, 1))\n', (4073, 4100), True, 'import numpy as np\n'), ((4969, 5015), 'numpy.zeros_like', 'np.zeros_like', (['self.pixel_mask'], {'dtype': 'np.int32'}), '(self.pixel_mask, dtype=np.int32)\n', (4982, 5015), True, 'import numpy as np\n'), ((6006, 6080), 'cv2.resize', 'cv2.resize', (['self.mask', '(image_w, image_h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(self.mask, (image_w, image_h), interpolation=cv2.INTER_NEAREST)\n', (6016, 6080), False, 'import cv2\n'), ((6648, 6668), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (6663, 6668), False, 'import cv2\n'), ((6706, 6725), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (6719, 6725), False, 'import cv2\n'), ((6740, 6752), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (6747, 6752), True, 'import numpy as np\n'), ((6893, 6912), 'numpy.zeros_like', 'np.zeros_like', (['rect'], {}), '(rect)\n', (6906, 6912), True, 'import numpy as np\n'), ((7038, 7059), 'numpy.diff', 'np.diff', (['rect'], {'axis': '(1)'}), '(rect, axis=1)\n', (7045, 7059), True, 'import numpy as np\n'), ((7764, 7800), 'cv2.imshow', 'cv2.imshow', (['"""Detected text"""', 'img_tmp'], {}), "('Detected text', img_tmp)\n", (7774, 7800), False, 'import cv2\n'), ((7809, 7823), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7820, 7823), False, 'import cv2\n'), ((7835, 7848), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (7846, 7848), False, 'import cv2\n'), ((7954, 8016), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['args.model_path', "(args.model_path[:-3] + 'bin')"], {}), "(args.model_path, args.model_path[:-3] + 'bin')\n", (7969, 8016), False, 'import cv2\n'), ((8122, 8132), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8130, 8132), False, 'import sys\n'), ((2761, 2786), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['a', 'b'], {}), '(a, b)\n', (2780, 2786), True, 'import numpy as np\n'), ((2802, 2816), 'numpy.any', 'np.any', (['(b == 0)'], {}), '(b == 0)\n', (2808, 2816), True, 'import numpy as np\n'), ((3137, 3150), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (3147, 3150), True, 'import numpy as np\n'), ((3223, 3240), 'numpy.exp', 'np.exp', (['(a - a_max)'], {}), '(a - a_max)\n', (3229, 3240), True, 'import numpy as np\n'), ((3301, 3329), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (3312, 3329), True, 'import numpy as np\n'), ((3347, 3388), 'numpy.sum', 'np.sum', (['tmp'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(tmp, axis=axis, keepdims=keepdims)\n', (3353, 3388), True, 'import numpy as np\n'), ((3541, 3550), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (3547, 3550), True, 'import numpy as np\n'), ((3597, 3625), 'numpy.squeeze', 'np.squeeze', (['a_max'], {'axis': 'axis'}), '(a_max, axis=axis)\n', (3607, 3625), True, 'import numpy as np\n'), ((6967, 6982), 'numpy.argmin', 'np.argmin', (['sums'], {}), '(sums)\n', (6976, 6982), True, 'import numpy as np\n'), ((7006, 7021), 'numpy.argmax', 'np.argmax', (['sums'], {}), '(sums)\n', (7015, 7021), True, 'import numpy as np\n'), ((7082, 7097), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (7091, 7097), True, 'import numpy as np\n'), ((7121, 7136), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (7130, 7136), True, 'import numpy as np\n'), ((7704, 7755), 'cv2.drawContours', 'cv2.drawContours', (['img_tmp', '[box]', '(0)', '(0, 0, 255)', '(2)'], {}), '(img_tmp, [box], 0, (0, 0, 255), 2)\n', (7720, 7755), False, 'import cv2\n'), ((7862, 7885), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7883, 7885), False, 'import cv2\n'), ((3052, 3070), 'numpy.isfinite', 'np.isfinite', (['a_max'], {}), '(a_max)\n', (3063, 3070), True, 'import numpy as np\n'), ((3173, 3190), 'numpy.exp', 'np.exp', (['(a - a_max)'], {}), '(a - a_max)\n', (3179, 3190), True, 'import numpy as np\n'), ((3439, 3449), 'numpy.sign', 'np.sign', (['s'], {}), '(s)\n', (3446, 3449), True, 'import numpy as np\n'), ((2304, 2329), 'numpy.where', 'np.where', (['self.pixel_mask'], {}), '(self.pixel_mask)\n', (2312, 2329), True, 'import numpy as np\n'), ((3011, 3029), 'numpy.isfinite', 'np.isfinite', (['a_max'], {}), '(a_max)\n', (3022, 3029), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import logging
import numpy as np
import utool as ut
import pandas as pd
import itertools as it
import networkx as nx
import vtool as vt
from os.path import join # NOQA
from wbia.algo.graph import nx_utils as nxu
from wbia.algo.graph.nx_utils import e_
from wbia.algo.graph.state import POSTV, NEGTV, INCMP, UNREV # NOQA
from concurrent import futures
import tqdm
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def _cm_breaking_worker(cm_list, review_cfg={}, scoring='annot'):
ranks_top = review_cfg.get('ranks_top', None)
ranks_bot = review_cfg.get('ranks_bot', None)
# Construct K-broken graph
edges = []
if ranks_bot is None:
ranks_bot = 0
scoring = scoring.lower()
assert scoring in ['annot', 'name']
assert ranks_bot == 0
for count, cm in enumerate(cm_list):
score_list = cm.annot_score_list
# rank_list = ut.argsort(score_list)[::-1]
# sortx = ut.argsort(rank_list)
# top_sortx = sortx[:ranks_top]
# bot_sortx = sortx[len(sortx) - ranks_bot :]
# short_sortx = ut.unique(top_sortx + bot_sortx)
# daid_list = ut.take(cm.daid_list, short_sortx)
values = sorted(zip(score_list, cm.daid_list))[::-1]
keep = values[:ranks_top]
daid_list = ut.take_column(keep, 1)
for daid in daid_list:
u, v = (cm.qaid, daid)
if v < u:
u, v = v, u
edges.append((u, v))
return edges
def _make_rankings_worker(args):
import wbia # NOQA
import tqdm # NOQA
(
dbdir,
qaids_chunk,
daids,
cfgdict,
custom_nid_lookup,
verbose,
use_cache,
invalidate_supercache,
ranks_top,
) = args
ibs = wbia.opendb(dbdir=dbdir)
edges = set([])
for qaids in tqdm.tqdm(qaids_chunk):
qreq_ = ibs.new_query_request(
[qaids],
daids,
cfgdict=cfgdict,
custom_nid_lookup=custom_nid_lookup,
verbose=verbose,
)
cm_list = qreq_.execute(
prog_hook=None,
use_cache=use_cache,
invalidate_supercache=False,
)
new_edges = set(_cm_breaking_worker(cm_list, review_cfg={'ranks_top': ranks_top}))
edges = edges | new_edges
return edges
@ut.reloadable_class
class AnnotInfrMatching(object):
"""
Methods for running matching algorithms
"""
@profile
def exec_matching(
infr,
qaids=None,
daids=None,
prog_hook=None,
cfgdict=None,
name_method='node',
use_cache=True,
invalidate_supercache=False,
batch_size=None,
ranks_top=5,
):
"""
Loads chip matches into the inference structure
Uses graph name labeling and ignores wbia labeling
"""
return infr._make_rankings(
qaids,
daids,
prog_hook,
cfgdict,
name_method,
use_cache=use_cache,
invalidate_supercache=invalidate_supercache,
batch_size=batch_size,
ranks_top=ranks_top,
)
def _set_vsmany_info(infr, qreq_, cm_list):
infr.vsmany_qreq_ = qreq_
infr.vsmany_cm_list = cm_list
infr.cm_list = cm_list
infr.qreq_ = qreq_
def _make_rankings(
infr,
qaids=None,
daids=None,
prog_hook=None,
cfgdict=None,
name_method='node',
use_cache=None,
invalidate_supercache=None,
batch_size=None,
ranks_top=5,
):
# from wbia.algo.graph import graph_iden
# TODO: expose other ranking algos like SMK
rank_algo = 'LNBNN'
infr.print('Exec {} ranking algorithm'.format(rank_algo), 1)
ibs = infr.ibs
if qaids is None:
qaids = infr.aids
qaids = ut.ensure_iterable(qaids)
if daids is None:
daids = infr.aids
if cfgdict is None:
cfgdict = {
# 'can_match_samename': False,
'can_match_samename': True,
'can_match_sameimg': True,
# 'augment_queryside_hack': True,
'K': 3,
'Knorm': 3,
'prescore_method': 'csum',
'score_method': 'csum',
}
cfgdict.update(infr.ranker_params)
infr.print('Using LNBNN config = %r' % (cfgdict,))
# hack for using current nids
if name_method == 'node':
aids = sorted(set(ut.aslist(qaids) + ut.aslist(daids)))
custom_nid_lookup = infr.get_node_attrs('name_label', aids)
elif name_method == 'edge':
custom_nid_lookup = {
aid: nid for nid, cc in infr.pos_graph._ccs.items() for aid in cc
}
elif name_method == 'wbia':
custom_nid_lookup = None
else:
raise KeyError('Unknown name_method={}'.format(name_method))
verbose = infr.verbose >= 2
# <HACK FOR PIE V2>
if cfgdict.get('pipeline_root', None) in ['PieTwo']:
from wbia_pie_v2._plugin import distance_to_score
globals().update(locals())
edges = []
for qaid in tqdm.tqdm(qaids):
daids_ = list(set(daids) - set([qaid]))
pie_annot_distances = ibs.pie_v2_predict_light_distance(
qaid,
daids_,
)
score_list = [
distance_to_score(pie_annot_distance, norm=500.0)
for pie_annot_distance in pie_annot_distances
]
values = sorted(zip(score_list, daids_))[::-1]
keep = values[:ranks_top]
daid_list = ut.take_column(keep, 1)
for daid in daid_list:
u, v = (qaid, daid)
if v < u:
u, v = v, u
edges.append((u, v))
edges = set(edges)
return edges
# </HACK>
if batch_size is not None:
qaids_chunks = list(ut.ichunks(qaids, batch_size))
num_chunks = len(qaids_chunks)
arg_iter = list(
zip(
[ibs.dbdir] * num_chunks,
qaids_chunks,
[daids] * num_chunks,
[cfgdict] * num_chunks,
[custom_nid_lookup] * num_chunks,
[verbose] * num_chunks,
[use_cache] * num_chunks,
[invalidate_supercache] * num_chunks,
[ranks_top] * num_chunks,
)
)
nprocs = 8
logger.info('Creating %d processes' % (nprocs,))
executor = futures.ThreadPoolExecutor(nprocs)
logger.info('Submitting workers')
fs_chunk = []
for args in ut.ProgIter(arg_iter, lbl='submit matching threads'):
fs = executor.submit(_make_rankings_worker, args)
fs_chunk.append(fs)
results = []
try:
for fs in ut.ProgIter(fs_chunk, lbl='getting matching result'):
result = fs.result()
results.append(result)
except Exception:
raise
finally:
executor.shutdown(wait=True)
assert len(results) == num_chunks
edges = set(ut.flatten(results))
else:
qreq_ = ibs.new_query_request(
qaids,
daids,
cfgdict=cfgdict,
custom_nid_lookup=custom_nid_lookup,
verbose=infr.verbose >= 2,
)
# cacher = qreq_.get_big_cacher()
# if not cacher.exists():
# pass
# # import sys
# # sys.exit(1)
cm_list = qreq_.execute(
prog_hook=prog_hook,
use_cache=use_cache,
invalidate_supercache=invalidate_supercache,
)
infr._set_vsmany_info(qreq_, cm_list)
edges = set(_cm_breaking_worker(cm_list, review_cfg={'ranks_top': ranks_top}))
return edges
# return cm_list
def _make_matches_from(infr, edges, config=None, prog_hook=None):
from wbia.algo.verif import pairfeat
if config is None:
config = infr.verifier_params
extr = pairfeat.PairwiseFeatureExtractor(infr.ibs, config=config)
match_list = extr._exec_pairwise_match(edges, prog_hook=prog_hook)
return match_list
def exec_vsone_subset(infr, edges, prog_hook=None):
r"""
Args:
prog_hook (None): (default = None)
CommandLine:
python -m wbia.algo.graph.core exec_vsone_subset
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.ensure_full()
>>> edges = [(1, 2), (2, 3)]
>>> result = infr.exec_vsone_subset(edges)
>>> print(result)
"""
match_list = infr._make_matches_from(edges, prog_hook)
# TODO: is this code necessary anymore?
vsone_matches = {e_(u, v): match for (u, v), match in zip(edges, match_list)}
infr.vsone_matches.update(vsone_matches)
edge_to_score = {e: match.fs.sum() for e, match in vsone_matches.items()}
infr.graph.add_edges_from(edge_to_score.keys())
infr.set_edge_attrs('score', edge_to_score)
return match_list
def lookup_cm(infr, aid1, aid2):
"""
Get chipmatch object associated with an edge if one exists.
"""
if infr.cm_list is None:
return None, aid1, aid2
# TODO: keep chip matches in dictionary by default?
aid2_idx = ut.make_index_lookup([cm.qaid for cm in infr.cm_list])
switch_order = False
if aid1 in aid2_idx:
idx = aid2_idx[aid1]
cm = infr.cm_list[idx]
if aid2 not in cm.daid2_idx:
switch_order = True
# raise KeyError('switch order')
else:
switch_order = True
if switch_order:
# switch order
aid1, aid2 = aid2, aid1
idx = aid2_idx[aid1]
cm = infr.cm_list[idx]
if aid2 not in cm.daid2_idx:
raise KeyError('No ChipMatch for edge (%r, %r)' % (aid1, aid2))
return cm, aid1, aid2
@profile
def apply_match_edges(infr, review_cfg={}):
"""
Adds results from one-vs-many rankings as edges in the graph
"""
if infr.cm_list is None:
infr.print('apply_match_edges - matching has not been run!')
return
infr.print('apply_match_edges', 1)
edges = infr._cm_breaking(review_cfg)
# Create match-based graph structure
infr.print('apply_match_edges adding %d edges' % len(edges), 1)
infr.graph.add_edges_from(edges)
infr.apply_match_scores()
def _cm_breaking(infr, cm_list=None, review_cfg={}, scoring='annot'):
"""
>>> from wbia.algo.graph.core import * # NOQA
>>> review_cfg = {}
"""
if cm_list is None:
cm_list = infr.cm_list
return _cm_breaking_worker(
cm_list=cm_list, review_cfg=review_cfg, scoring=scoring
)
def _cm_training_pairs(
infr,
qreq_=None,
cm_list=None,
top_gt=2,
mid_gt=2,
bot_gt=2,
top_gf=2,
mid_gf=2,
bot_gf=2,
rand_gt=2,
rand_gf=2,
rng=None,
):
"""
Constructs training data for a pairwise classifier
CommandLine:
python -m wbia.algo.graph.core _cm_training_pairs
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('PZ_MTEST')
>>> infr.exec_matching(cfgdict={
>>> 'can_match_samename': True,
>>> 'K': 4,
>>> 'Knorm': 1,
>>> 'prescore_method': 'csum',
>>> 'score_method': 'csum'
>>> })
>>> from wbia.algo.graph.core import * # NOQA
>>> exec(ut.execstr_funckw(infr._cm_training_pairs))
>>> rng = np.random.RandomState(42)
>>> aid_pairs = np.array(infr._cm_training_pairs(rng=rng))
>>> print(len(aid_pairs))
>>> assert np.sum(aid_pairs.T[0] == aid_pairs.T[1]) == 0
"""
if qreq_ is None:
cm_list = infr.cm_list
qreq_ = infr.qreq_
ibs = infr.ibs
aid_pairs = []
dnids = qreq_.get_qreq_annot_nids(qreq_.daids)
# dnids = qreq_.get_qreq_annot_nids(qreq_.daids)
rng = ut.ensure_rng(rng)
for cm in ut.ProgIter(cm_list, lbl='building pairs'):
all_gt_aids = cm.get_top_gt_aids(ibs)
all_gf_aids = cm.get_top_gf_aids(ibs)
gt_aids = ut.take_percentile_parts(all_gt_aids, top_gt, mid_gt, bot_gt)
gf_aids = ut.take_percentile_parts(all_gf_aids, top_gf, mid_gf, bot_gf)
# get unscored examples
unscored_gt_aids = [
aid for aid in qreq_.daids[cm.qnid == dnids] if aid not in cm.daid2_idx
]
rand_gt_aids = ut.random_sample(unscored_gt_aids, rand_gt, rng=rng)
# gf_aids = cm.get_groundfalse_daids()
_gf_aids = qreq_.daids[cm.qnid != dnids]
_gf_aids = qreq_.daids.compress(cm.qnid != dnids)
# gf_aids = ibs.get_annot_groundfalse(cm.qaid, daid_list=qreq_.daids)
rand_gf_aids = ut.random_sample(_gf_aids, rand_gf, rng=rng).tolist()
chosen_daids = ut.unique(gt_aids + gf_aids + rand_gf_aids + rand_gt_aids)
aid_pairs.extend([(cm.qaid, aid) for aid in chosen_daids if cm.qaid != aid])
return aid_pairs
def _get_cm_agg_aid_ranking(infr, cc):
aid_to_cm = {cm.qaid: cm for cm in infr.cm_list}
all_scores = ut.ddict(list)
for qaid in cc:
cm = aid_to_cm[qaid]
# should we be doing nids?
for daid, score in zip(cm.get_top_aids(), cm.get_top_scores()):
all_scores[daid].append(score)
max_scores = sorted((max(scores), aid) for aid, scores in all_scores.items())[
::-1
]
ranked_aids = ut.take_column(max_scores, 1)
return ranked_aids
def _get_cm_edge_data(infr, edges, cm_list=None):
symmetric = True
if cm_list is None:
cm_list = infr.cm_list
# Find scores for the edges that exist in the graph
edge_to_data = ut.ddict(dict)
aid_to_cm = {cm.qaid: cm for cm in cm_list}
for u, v in edges:
if symmetric:
u, v = e_(u, v)
cm1 = aid_to_cm.get(u, None)
cm2 = aid_to_cm.get(v, None)
scores = []
ranks = []
for cm in ut.filter_Nones([cm1, cm2]):
for aid in [u, v]:
idx = cm.daid2_idx.get(aid, None)
if idx is None:
continue
score = cm.annot_score_list[idx]
rank = cm.get_annot_ranks([aid])[0]
scores.append(score)
ranks.append(rank)
if len(scores) == 0:
score = None
rank = None
else:
# Choose whichever one gave the best score
idx = vt.safe_argmax(scores, nans=False)
score = scores[idx]
rank = ranks[idx]
edge_to_data[(u, v)]['score'] = score
edge_to_data[(u, v)]['rank'] = rank
return edge_to_data
@profile
def apply_match_scores(infr):
"""
Applies precomputed matching scores to edges that already exist in the
graph. Typically you should run infr.apply_match_edges() before running
this.
CommandLine:
python -m wbia.algo.graph.core apply_match_scores --show
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('PZ_MTEST')
>>> infr.exec_matching()
>>> infr.apply_match_edges()
>>> infr.apply_match_scores()
>>> infr.get_edge_attrs('score')
"""
if infr.cm_list is None:
infr.print('apply_match_scores - no scores to apply!')
return
infr.print('apply_match_scores', 1)
edges = list(infr.graph.edges())
edge_to_data = infr._get_cm_edge_data(edges)
# Remove existing attrs
ut.nx_delete_edge_attr(infr.graph, 'score')
ut.nx_delete_edge_attr(infr.graph, 'rank')
ut.nx_delete_edge_attr(infr.graph, 'normscore')
edges = list(edge_to_data.keys())
edge_scores = list(ut.take_column(edge_to_data.values(), 'score'))
edge_scores = ut.replace_nones(edge_scores, np.nan)
edge_scores = np.array(edge_scores)
edge_ranks = np.array(ut.take_column(edge_to_data.values(), 'rank'))
# take the inf-norm
normscores = edge_scores / vt.safe_max(edge_scores, nans=False)
# Add new attrs
infr.set_edge_attrs('score', ut.dzip(edges, edge_scores))
infr.set_edge_attrs('rank', ut.dzip(edges, edge_ranks))
# Hack away zero probabilites
# probs = np.vstack([p_nomatch, p_match, p_notcomp]).T + 1e-9
# probs = vt.normalize(probs, axis=1, ord=1, out=probs)
# entropy = -(np.log2(probs) * probs).sum(axis=1)
infr.set_edge_attrs('normscore', dict(zip(edges, normscores)))
class InfrLearning(object):
def learn_deploy_verifiers(infr, publish=False):
"""
Uses current knowledge to train verifiers for new unseen pairs.
Example:
>>> # DISABLE_DOCTEST
>>> import wbia
>>> ibs = wbia.opendb('PZ_MTEST')
>>> infr = wbia.AnnotInference(ibs, aids='all')
>>> infr.ensure_mst()
>>> publish = False
>>> infr.learn_deploy_verifiers()
Ignore:
publish = True
"""
infr.print('learn_deploy_verifiers')
from wbia.algo.verif import vsone
pblm = vsone.OneVsOneProblem(infr, verbose=True)
pblm.primary_task_key = 'match_state'
pblm.default_clf_key = 'RF'
pblm.default_data_key = 'learn(sum,glob)'
pblm.setup()
dpath = '.'
task_key = 'match_state'
pblm.deploy(dpath, task_key=task_key, publish=publish)
task_key = 'photobomb_state'
if task_key in pblm.eval_task_keys:
pblm.deploy(dpath, task_key=task_key)
def learn_evaluation_verifiers(infr):
"""
Creates a cross-validated ensemble of classifiers to evaluate
verifier error cases and groundtruth errors.
CommandLine:
python -m wbia.algo.graph.mixin_matching learn_evaluation_verifiers
Doctest:
>>> # xdoctest: +REQUIRES(module:wbia_cnn, --slow)
>>> import wbia
>>> infr = wbia.AnnotInference(
>>> 'PZ_MTEST', aids='all', autoinit='annotmatch',
>>> verbose=4)
>>> verifiers = infr.learn_evaluation_verifiers()
>>> edges = list(infr.edges())
>>> verif = verifiers['match_state']
>>> probs = verif.predict_proba_df(edges)
>>> print(probs)
"""
infr.print('learn_evaluataion_verifiers')
from wbia.algo.verif import vsone
pblm = vsone.OneVsOneProblem(infr, verbose=5)
pblm.primary_task_key = 'match_state'
pblm.eval_clf_keys = ['RF']
pblm.eval_data_keys = ['learn(sum,glob)']
pblm.setup_evaluation()
if True:
pblm.report_evaluation()
verifiers = pblm._make_evaluation_verifiers(pblm.eval_task_keys)
return verifiers
def load_published(infr):
"""
Downloads, caches, and loads pre-trained verifiers.
This is the default action.
"""
from wbia.algo.verif import deploy
ibs = infr.ibs
species = ibs.get_primary_database_species(infr.aids)
infr.print('Loading task_thresh for species: %r' % (species,))
assert species in infr.task_thresh_dict
infr.task_thresh = infr.task_thresh_dict[species]
infr.print('infr.task_thresh: %r' % (infr.task_thresh,))
infr.print('Loading verifiers for species: %r' % (species,))
try:
infr.verifiers = deploy.Deployer().load_published(ibs, species)
message = 'Loaded verifiers %r' % (infr.verifiers,)
infr.print(message)
except TypeError as ex:
message = 'Error: Failed to load verifiers for %r' % (species,)
ut.printex(
ex,
message,
iswarning=True,
tb=True,
)
infr.print(message)
def load_latest_classifiers(infr, dpath):
from wbia.algo.verif import deploy
task_clf_fpaths = deploy.Deployer(dpath).find_latest_local()
classifiers = {}
for task_key, fpath in task_clf_fpaths.items():
clf_info = ut.load_data(fpath)
assert (
clf_info['metadata']['task_key'] == task_key
), 'bad saved clf at fpath={}'.format(fpath)
classifiers[task_key] = clf_info
infr.verifiers = classifiers
# return classifiers
def photobomb_samples(infr):
edges = list(infr.edges())
tags_list = list(infr.gen_edge_values('tags', edges=edges, default=[]))
flags = ut.filterflags_general_tags(tags_list, has_any=['photobomb'])
pb_edges = ut.compress(edges, flags)
return pb_edges
class _RedundancyAugmentation(object):
# def rand_neg_check_edges(infr, c1_nodes, c2_nodes):
# """
# Find enough edges to between two pccs to make them k-negative complete
# """
# k = infr.params['redun.neg']
# existing_edges = nxu.edges_cross(infr.graph, c1_nodes, c2_nodes)
# reviewed_edges = {
# edge: state
# for edge, state in infr.get_edge_attrs(
# 'decision', existing_edges,
# default=UNREV).items()
# if state != UNREV
# }
# n_neg = sum([state == NEGTV for state in reviewed_edges.values()])
# if n_neg < k:
# # Find k random negative edges
# check_edges = existing_edges - set(reviewed_edges)
# if len(check_edges) < k:
# edges = it.starmap(nxu.e_, it.product(c1_nodes, c2_nodes))
# for edge in edges:
# if edge not in reviewed_edges:
# check_edges.add(edge)
# if len(check_edges) == k:
# break
# else:
# check_edges = {}
# return check_edges
def find_neg_augment_edges(infr, cc1, cc2, k=None):
"""
Find enough edges to between two pccs to make them k-negative complete
The two CCs should be disjoint and not have any positive edges between
them.
Args:
cc1 (set): nodes in one PCC
cc2 (set): nodes in another positive-disjoint PCC
k (int): redundnacy level (if None uses infr.params['redun.neg'])
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph import demo
>>> k = 2
>>> cc1, cc2 = {1}, {2, 3}
>>> # --- return an augmentation if feasible
>>> infr = demo.demodata_infr(ccs=[cc1, cc2], ignore_pair=True)
>>> edges = set(infr.find_neg_augment_edges(cc1, cc2, k=k))
>>> assert edges == {(1, 2), (1, 3)}
>>> # --- if infeasible return a partial augmentation
>>> infr.add_feedback((1, 2), INCMP)
>>> edges = set(infr.find_neg_augment_edges(cc1, cc2, k=k))
>>> assert edges == {(1, 3)}
"""
if k is None:
k = infr.params['redun.neg']
assert cc1 is not cc2, 'CCs should be disjoint (but they are the same)'
assert len(cc1.intersection(cc2)) == 0, 'CCs should be disjoint'
existing_edges = set(nxu.edges_cross(infr.graph, cc1, cc2))
reviewed_edges = {
edge: state
for edge, state in zip(
existing_edges, infr.edge_decision_from(existing_edges)
)
if state != UNREV
}
# Find how many negative edges we already have
num = sum([state == NEGTV for state in reviewed_edges.values()])
if num < k:
# Find k random negative edges
check_edges = existing_edges - set(reviewed_edges)
# Check the existing but unreviewed edges first
for edge in check_edges:
num += 1
yield edge
if num >= k:
return
# Check non-existing edges next
seed = 2827295125
try:
seed += sum(cc1) + sum(cc2)
except Exception:
pass
rng = np.random.RandomState(seed)
cc1 = ut.shuffle(list(cc1), rng=rng)
cc2 = ut.shuffle(list(cc2), rng=rng)
cc1 = ut.shuffle(list(cc1), rng=rng)
for edge in it.starmap(nxu.e_, nxu.diag_product(cc1, cc2)):
if edge not in existing_edges:
num += 1
yield edge
if num >= k:
return
def find_pos_augment_edges(infr, pcc, k=None):
"""
# [[1, 0], [0, 2], [1, 2], [3, 1]]
pos_sub = nx.Graph([[0, 1], [1, 2], [0, 2], [1, 3]])
"""
if k is None:
pos_k = infr.params['redun.pos']
else:
pos_k = k
pos_sub = infr.pos_graph.subgraph(pcc)
# TODO:
# weight by pairs most likely to be comparable
# First try to augment only with unreviewed existing edges
unrev_avail = list(nxu.edges_inside(infr.unreviewed_graph, pcc))
try:
check_edges = list(
nxu.k_edge_augmentation(
pos_sub, k=pos_k, avail=unrev_avail, partial=False
)
)
except nx.NetworkXUnfeasible:
check_edges = None
if not check_edges:
# Allow new edges to be introduced
full_sub = infr.graph.subgraph(pcc).copy()
new_avail = ut.estarmap(infr.e_, nx.complement(full_sub).edges())
full_avail = unrev_avail + new_avail
n_max = (len(pos_sub) * (len(pos_sub) - 1)) // 2
n_complement = n_max - pos_sub.number_of_edges()
if len(full_avail) == n_complement:
# can use the faster algorithm
check_edges = list(
nxu.k_edge_augmentation(pos_sub, k=pos_k, partial=True)
)
else:
# have to use the slow approximate algo
check_edges = list(
nxu.k_edge_augmentation(
pos_sub, k=pos_k, avail=full_avail, partial=True
)
)
check_edges = set(it.starmap(e_, check_edges))
return check_edges
@profile
def find_pos_redun_candidate_edges(infr, k=None, verbose=False):
r"""
Searches for augmenting edges that would make PCCs k-positive redundant
Doctest:
>>> from wbia.algo.graph.mixin_matching import * # NOQA
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(ccs=[(1, 2, 3, 4, 5), (7, 8, 9, 10)])
>>> infr.add_feedback((2, 5), 'match')
>>> infr.add_feedback((1, 5), 'notcomp')
>>> infr.params['redun.pos'] = 2
>>> candidate_edges = list(infr.find_pos_redun_candidate_edges())
>>> result = ('candidate_edges = ' + ut.repr2(candidate_edges))
>>> print(result)
candidate_edges = []
"""
# Add random edges between exisiting non-redundant PCCs
if k is None:
k = infr.params['redun.pos']
# infr.find_non_pos_redundant_pccs(k=k, relax=True)
pcc_gen = list(infr.positive_components())
prog = ut.ProgIter(pcc_gen, enabled=verbose, freq=1, adjust=False)
for pcc in prog:
if not infr.is_pos_redundant(pcc, k=k, relax=True, assume_connected=True):
for edge in infr.find_pos_augment_edges(pcc, k=k):
yield nxu.e_(*edge)
@profile
def find_neg_redun_candidate_edges(infr, k=None):
"""
Get pairs of PCCs that are not complete.
Finds edges that might complete them.
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph.mixin_matching import * # NOQA
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(ccs=[(1,), (2,), (3,)], ignore_pair=True)
>>> edges = list(infr.find_neg_redun_candidate_edges())
>>> assert len(edges) == 3, 'all should be needed here'
>>> infr.add_feedback_from(edges, evidence_decision=NEGTV)
>>> assert len(list(infr.find_neg_redun_candidate_edges())) == 0
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(pcc_sizes=[3] * 20, ignore_pair=True)
>>> ccs = list(infr.positive_components())
>>> gen = infr.find_neg_redun_candidate_edges(k=2)
>>> for edge in gen:
>>> # What happens when we make ccs positive
>>> print(infr.node_labels(edge))
>>> infr.add_feedback(edge, evidence_decision=POSTV)
>>> import ubelt as ub
>>> infr = demo.demodata_infr(pcc_sizes=[1] * 30, ignore_pair=True)
>>> ccs = list(infr.positive_components())
>>> gen = infr.find_neg_redun_candidate_edges(k=3)
>>> for chunk in ub.chunks(gen, 2):
>>> for edge in chunk:
>>> # What happens when we make ccs positive
>>> print(infr.node_labels(edge))
>>> infr.add_feedback(edge, evidence_decision=POSTV)
list(gen)
"""
if k is None:
k = infr.params['redun.neg']
# Loop through all pairs
for cc1, cc2 in infr.find_non_neg_redun_pccs(k=k):
if len(cc1.intersection(cc2)) > 0:
# If there is modification of the underlying graph while we
# iterate, then two ccs may not be disjoint. Skip these cases.
continue
for u, v in infr.find_neg_augment_edges(cc1, cc2, k):
edge = e_(u, v)
infr.assert_edge(edge)
yield edge
class CandidateSearch(_RedundancyAugmentation):
"""Search for candidate edges"""
@profile
def find_lnbnn_candidate_edges(
infr,
desired_states=[UNREV],
can_match_samename=False,
can_match_sameimg=False,
K=5,
Knorm=5,
requery=True,
prescore_method='csum',
score_method='csum',
sv_on=True,
cfgdict_=None,
batch_size=None,
):
"""
Example:
>>> # DISABLE_DOCTEST
>>> # xdoctest: +REQUIRES(--slow)
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_mtest_infr()
>>> cand_edges = infr.find_lnbnn_candidate_edges()
>>> assert len(cand_edges) > 200, len(cand_edges)
"""
# Refresh the name labels
# TODO: abstract into a Ranker class
# do LNBNN query for new edges
# Use one-vs-many to establish candidate edges to classify
cfgdict = {
'resize_dim': 'width',
'dim_size': 700,
'requery': requery,
'can_match_samename': can_match_samename,
'can_match_sameimg': can_match_sameimg,
'K': K,
'Knorm': Knorm,
'sv_on': sv_on,
'prescore_method': prescore_method,
'score_method': score_method,
}
if cfgdict_ is not None:
cfgdict.update(cfgdict_)
print('[find_lnbnn_candidate_edges] Using cfgdict = %s' % (ut.repr3(cfgdict),))
ranks_top = infr.params['ranking.ntop']
response = infr.exec_matching(
name_method='edge',
cfgdict=cfgdict,
batch_size=batch_size,
ranks_top=ranks_top,
)
if cfgdict_ is None:
# infr.apply_match_edges(review_cfg={'ranks_top': 5})
lnbnn_results = set(infr._cm_breaking(review_cfg={'ranks_top': ranks_top}))
else:
assert response is not None
lnbnn_results = set(response)
candidate_edges = {
edge
for edge, state in zip(lnbnn_results, infr.edge_decision_from(lnbnn_results))
if state in desired_states
}
infr.print(
'ranking alg found {}/{} {} edges'.format(
len(candidate_edges), len(lnbnn_results), desired_states
),
1,
)
return candidate_edges
def ensure_task_probs(infr, edges):
"""
Ensures that probabilities are assigned to the edges.
This gaurentees that infr.task_probs contains data for edges.
(Currently only the primary task is actually ensured)
CommandLine:
python -m wbia.algo.graph.mixin_matching ensure_task_probs
Doctest:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph.mixin_matching import *
>>> import wbia
>>> infr = wbia.AnnotInference('PZ_MTEST', aids='all',
>>> autoinit='staging')
>>> edges = list(infr.edges())[0:3]
>>> infr.load_published()
>>> assert len(infr.task_probs['match_state']) == 0
>>> infr.ensure_task_probs(edges)
>>> assert len(infr.task_probs['match_state']) == 3
>>> infr.ensure_task_probs(edges)
>>> assert len(infr.task_probs['match_state']) == 3
Doctest:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph.mixin_matching import *
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=6, p_incon=.5, size_std=2)
>>> edges = list(infr.edges())
>>> infr.ensure_task_probs(edges)
>>> assert all([np.isclose(sum(p.values()), 1)
>>> for p in infr.task_probs['match_state'].values()])
"""
if not infr.verifiers:
raise Exception('Verifiers are needed to predict probabilities')
# Construct pairwise features on edges in infr
primary_task = 'match_state'
match_task = infr.task_probs[primary_task]
need_flags = [e not in match_task for e in edges]
if any(need_flags):
need_edges = ut.compress(edges, need_flags)
need_edges = list(set(need_edges))
infr.print(
'There are {} edges without probabilities'.format(len(need_edges)), 1
)
# Only recompute for the needed edges
task_probs = infr._make_task_probs(need_edges)
# Store task probs in internal data structure
# FIXME: this is slow
for task, probs in task_probs.items():
probs_dict = probs.to_dict(orient='index')
if task not in infr.task_probs:
infr.task_probs[task] = probs_dict
else:
infr.task_probs[task].update(probs_dict)
# Set edge task attribute as well
infr.set_edge_attrs(task, probs_dict)
@profile
def ensure_priority_scores(infr, priority_edges):
"""
Ensures that priority attributes are assigned to the edges.
This does not change the state of the queue.
Doctest:
>>> import wbia
>>> ibs = wbia.opendb('PZ_MTEST')
>>> infr = wbia.AnnotInference(ibs, aids='all')
>>> infr.ensure_mst()
>>> priority_edges = list(infr.edges())[0:1]
>>> infr.ensure_priority_scores(priority_edges)
Doctest:
>>> import wbia
>>> ibs = wbia.opendb('PZ_MTEST')
>>> infr = wbia.AnnotInference(ibs, aids='all')
>>> infr.ensure_mst()
>>> # infr.load_published()
>>> priority_edges = list(infr.edges())
>>> infr.ensure_priority_scores(priority_edges)
Doctest:
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=6, p_incon=.5, size_std=2)
>>> edges = list(infr.edges())
>>> infr.ensure_priority_scores(edges)
"""
infr.print('Checking for verifiers: %r' % (infr.verifiers,))
if infr.verifiers and infr.ibs is not None:
infr.print(
'Prioritizing {} edges with one-vs-one probs'.format(len(priority_edges)),
1,
)
infr.print('Using thresholds: %r' % (infr.task_thresh,))
infr.print(
'Using infr.params[autoreview.enabled] : %r'
% (infr.params['autoreview.enabled'],)
)
infr.print(
'Using infr.params[autoreview.prioritize_nonpos]: %r'
% (infr.params['autoreview.prioritize_nonpos'],)
)
infr.ensure_task_probs(priority_edges)
infr.load_published()
primary_task = 'match_state'
match_probs = infr.task_probs[primary_task]
primary_thresh = infr.task_thresh[primary_task]
# Read match_probs into a DataFrame
primary_probs = pd.DataFrame(
ut.take(match_probs, priority_edges),
index=nxu.ensure_multi_index(priority_edges, ('aid1', 'aid2')),
)
# Convert match-state probabilities into priorities
prob_match = primary_probs[POSTV]
# Initialize priorities to probability of matching
default_priority = prob_match.copy()
# If the edges are currently between the same individual, then
# prioritize by non-positive probability (because those edges might
# expose an inconsistency)
already_pos = [
infr.pos_graph.node_label(u) == infr.pos_graph.node_label(v)
for u, v in priority_edges
]
default_priority[already_pos] = 1 - default_priority[already_pos]
if infr.params['autoreview.enabled']:
if infr.params['autoreview.prioritize_nonpos']:
# Give positives that pass automatic thresholds high priority
_probs = primary_probs[POSTV]
flags = _probs > primary_thresh[POSTV]
default_priority[flags] = (
np.maximum(default_priority[flags], _probs[flags]) + 1
)
# Give negatives that pass automatic thresholds high priority
_probs = primary_probs[NEGTV]
flags = _probs > primary_thresh[NEGTV]
default_priority[flags] = (
np.maximum(default_priority[flags], _probs[flags]) + 1
)
# Give not-comps that pass automatic thresholds high priority
_probs = primary_probs[INCMP]
flags = _probs > primary_thresh[INCMP]
default_priority[flags] = (
np.maximum(default_priority[flags], _probs[flags]) + 1
)
infr.set_edge_attrs('prob_match', prob_match.to_dict())
infr.set_edge_attrs('default_priority', default_priority.to_dict())
metric = 'default_priority'
priority = default_priority
elif infr.cm_list is not None:
infr.print(
'Prioritizing {} edges with one-vs-vsmany scores'.format(
len(priority_edges)
)
)
# Not given any deploy classifier, this is the best we can do
scores = infr._make_lnbnn_scores(priority_edges)
metric = 'normscore'
priority = scores
else:
infr.print(
'WARNING: No verifiers to prioritize {} edge(s)'.format(
len(priority_edges)
)
)
metric = 'random'
priority = np.zeros(len(priority_edges)) + 1e-6
infr.set_edge_attrs(metric, ut.dzip(priority_edges, priority))
return metric, priority
def ensure_prioritized(infr, priority_edges):
priority_edges = list(priority_edges)
metric, priority = infr.ensure_priority_scores(priority_edges)
infr.prioritize(metric=metric, edges=priority_edges, scores=priority)
@profile
def add_candidate_edges(infr, candidate_edges):
candidate_edges = list(candidate_edges)
new_edges = infr.ensure_edges_from(candidate_edges)
if infr.test_mode:
infr.apply_edge_truth(new_edges)
if infr.params['redun.enabled']:
priority_edges = list(infr.filter_edges_flagged_as_redun(candidate_edges))
infr.print(
'Got {} candidate edges, {} are new, '
'and {} are non-redundant'.format(
len(candidate_edges), len(new_edges), len(priority_edges)
)
)
else:
infr.print(
'Got {} candidate edges and {} are new'.format(
len(candidate_edges), len(new_edges)
)
)
priority_edges = candidate_edges
if len(priority_edges) > 0:
infr.ensure_prioritized(priority_edges)
if hasattr(infr, 'on_new_candidate_edges'):
# hack callback for demo
infr.on_new_candidate_edges(infr, new_edges)
return len(priority_edges)
@profile
def refresh_candidate_edges(infr):
"""
Search for candidate edges.
Assign each edge a priority and add to queue.
"""
infr.print('refresh_candidate_edges', 1)
infr.assert_consistency_invariant()
if infr.ibs is not None:
candidate_edges = infr.find_lnbnn_candidate_edges()
elif hasattr(infr, 'dummy_verif'):
infr.print('Searching for dummy candidates')
infr.print(
'dummy vsone params ='
+ ut.repr4(infr.dummy_verif.dummy_params, nl=1, si=True)
)
ranks_top = infr.params['ranking.ntop']
candidate_edges = infr.dummy_verif.find_candidate_edges(K=ranks_top)
else:
raise Exception('No method available to search for candidate edges')
infr.add_candidate_edges(candidate_edges)
infr.assert_consistency_invariant()
@profile
def _make_task_probs(infr, edges):
"""
Predict edge probs for each pairwise classifier task
"""
if infr.verifiers is None:
raise ValueError('no classifiers exist')
if not isinstance(infr.verifiers, dict):
raise NotImplementedError('need to deploy or implement eval prediction')
task_keys = list(infr.verifiers.keys())
task_probs = {}
# infr.print('[make_taks_probs] predict {} for {} edges'.format(
# ut.conj_phrase(task_keys, 'and'), len(edges)))
for task_key in task_keys:
infr.print('predict {} for {} edges'.format(task_key, len(edges)))
verif = infr.verifiers[task_key]
probs_df = verif.predict_proba_df(edges)
task_probs[task_key] = probs_df
return task_probs
@profile
def _make_lnbnn_scores(infr, edges):
edge_to_data = infr._get_cm_edge_data(edges)
edges = list(edge_to_data.keys())
edge_scores = list(ut.take_column(edge_to_data.values(), 'score'))
edge_scores = ut.replace_nones(edge_scores, np.nan)
edge_scores = np.array(edge_scores)
# take the inf-norm
normscores = edge_scores / vt.safe_max(edge_scores, nans=False)
return normscores
| [
"utool.ichunks",
"numpy.maximum",
"utool.replace_nones",
"utool.compress",
"utool.dzip",
"utool.unique",
"utool.repr3",
"utool.ProgIter",
"wbia.algo.graph.nx_utils.ensure_multi_index",
"utool.printex",
"wbia.algo.graph.nx_utils.edges_cross",
"utool.ensure_iterable",
"utool.inject2",
"numpy... | [((414, 434), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (424, 434), True, 'import utool as ut\n'), ((444, 469), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (461, 469), False, 'import logging\n'), ((1817, 1841), 'wbia.opendb', 'wbia.opendb', ([], {'dbdir': 'dbdir'}), '(dbdir=dbdir)\n', (1828, 1841), False, 'import wbia\n'), ((1880, 1902), 'tqdm.tqdm', 'tqdm.tqdm', (['qaids_chunk'], {}), '(qaids_chunk)\n', (1889, 1902), False, 'import tqdm\n'), ((1332, 1355), 'utool.take_column', 'ut.take_column', (['keep', '(1)'], {}), '(keep, 1)\n', (1346, 1355), True, 'import utool as ut\n'), ((3975, 4000), 'utool.ensure_iterable', 'ut.ensure_iterable', (['qaids'], {}), '(qaids)\n', (3993, 4000), True, 'import utool as ut\n'), ((8626, 8684), 'wbia.algo.verif.pairfeat.PairwiseFeatureExtractor', 'pairfeat.PairwiseFeatureExtractor', (['infr.ibs'], {'config': 'config'}), '(infr.ibs, config=config)\n', (8659, 8684), False, 'from wbia.algo.verif import pairfeat\n'), ((10072, 10126), 'utool.make_index_lookup', 'ut.make_index_lookup', (['[cm.qaid for cm in infr.cm_list]'], {}), '([cm.qaid for cm in infr.cm_list])\n', (10092, 10126), True, 'import utool as ut\n'), ((13158, 13176), 'utool.ensure_rng', 'ut.ensure_rng', (['rng'], {}), '(rng)\n', (13171, 13176), True, 'import utool as ut\n'), ((13195, 13237), 'utool.ProgIter', 'ut.ProgIter', (['cm_list'], {'lbl': '"""building pairs"""'}), "(cm_list, lbl='building pairs')\n", (13206, 13237), True, 'import utool as ut\n'), ((14410, 14424), 'utool.ddict', 'ut.ddict', (['list'], {}), '(list)\n', (14418, 14424), True, 'import utool as ut\n'), ((14781, 14810), 'utool.take_column', 'ut.take_column', (['max_scores', '(1)'], {}), '(max_scores, 1)\n', (14795, 14810), True, 'import utool as ut\n'), ((15065, 15079), 'utool.ddict', 'ut.ddict', (['dict'], {}), '(dict)\n', (15073, 15079), True, 'import utool as ut\n'), ((17169, 17212), 'utool.nx_delete_edge_attr', 'ut.nx_delete_edge_attr', (['infr.graph', '"""score"""'], {}), "(infr.graph, 'score')\n", (17191, 17212), True, 'import utool as ut\n'), ((17221, 17263), 'utool.nx_delete_edge_attr', 'ut.nx_delete_edge_attr', (['infr.graph', '"""rank"""'], {}), "(infr.graph, 'rank')\n", (17243, 17263), True, 'import utool as ut\n'), ((17272, 17319), 'utool.nx_delete_edge_attr', 'ut.nx_delete_edge_attr', (['infr.graph', '"""normscore"""'], {}), "(infr.graph, 'normscore')\n", (17294, 17319), True, 'import utool as ut\n'), ((17460, 17497), 'utool.replace_nones', 'ut.replace_nones', (['edge_scores', 'np.nan'], {}), '(edge_scores, np.nan)\n', (17476, 17497), True, 'import utool as ut\n'), ((17520, 17541), 'numpy.array', 'np.array', (['edge_scores'], {}), '(edge_scores)\n', (17528, 17541), True, 'import numpy as np\n'), ((18800, 18841), 'wbia.algo.verif.vsone.OneVsOneProblem', 'vsone.OneVsOneProblem', (['infr'], {'verbose': '(True)'}), '(infr, verbose=True)\n', (18821, 18841), False, 'from wbia.algo.verif import vsone\n'), ((20132, 20170), 'wbia.algo.verif.vsone.OneVsOneProblem', 'vsone.OneVsOneProblem', (['infr'], {'verbose': '(5)'}), '(infr, verbose=5)\n', (20153, 20170), False, 'from wbia.algo.verif import vsone\n'), ((22242, 22303), 'utool.filterflags_general_tags', 'ut.filterflags_general_tags', (['tags_list'], {'has_any': "['photobomb']"}), "(tags_list, has_any=['photobomb'])\n", (22269, 22303), True, 'import utool as ut\n'), ((22323, 22348), 'utool.compress', 'ut.compress', (['edges', 'flags'], {}), '(edges, flags)\n', (22334, 22348), True, 'import utool as ut\n'), ((29015, 29074), 'utool.ProgIter', 'ut.ProgIter', (['pcc_gen'], {'enabled': 'verbose', 'freq': '(1)', 'adjust': '(False)'}), '(pcc_gen, enabled=verbose, freq=1, adjust=False)\n', (29026, 29074), True, 'import utool as ut\n'), ((45150, 45187), 'utool.replace_nones', 'ut.replace_nones', (['edge_scores', 'np.nan'], {}), '(edge_scores, np.nan)\n', (45166, 45187), True, 'import utool as ut\n'), ((45210, 45231), 'numpy.array', 'np.array', (['edge_scores'], {}), '(edge_scores)\n', (45218, 45231), True, 'import numpy as np\n'), ((5359, 5375), 'tqdm.tqdm', 'tqdm.tqdm', (['qaids'], {}), '(qaids)\n', (5368, 5375), False, 'import tqdm\n'), ((6928, 6962), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['nprocs'], {}), '(nprocs)\n', (6954, 6962), False, 'from concurrent import futures\n'), ((7059, 7111), 'utool.ProgIter', 'ut.ProgIter', (['arg_iter'], {'lbl': '"""submit matching threads"""'}), "(arg_iter, lbl='submit matching threads')\n", (7070, 7111), True, 'import utool as ut\n'), ((9468, 9476), 'wbia.algo.graph.nx_utils.e_', 'e_', (['u', 'v'], {}), '(u, v)\n', (9470, 9476), False, 'from wbia.algo.graph.nx_utils import e_\n'), ((13361, 13422), 'utool.take_percentile_parts', 'ut.take_percentile_parts', (['all_gt_aids', 'top_gt', 'mid_gt', 'bot_gt'], {}), '(all_gt_aids, top_gt, mid_gt, bot_gt)\n', (13385, 13422), True, 'import utool as ut\n'), ((13445, 13506), 'utool.take_percentile_parts', 'ut.take_percentile_parts', (['all_gf_aids', 'top_gf', 'mid_gf', 'bot_gf'], {}), '(all_gf_aids, top_gf, mid_gf, bot_gf)\n', (13469, 13506), True, 'import utool as ut\n'), ((13705, 13757), 'utool.random_sample', 'ut.random_sample', (['unscored_gt_aids', 'rand_gt'], {'rng': 'rng'}), '(unscored_gt_aids, rand_gt, rng=rng)\n', (13721, 13757), True, 'import utool as ut\n'), ((14114, 14172), 'utool.unique', 'ut.unique', (['(gt_aids + gf_aids + rand_gf_aids + rand_gt_aids)'], {}), '(gt_aids + gf_aids + rand_gf_aids + rand_gt_aids)\n', (14123, 14172), True, 'import utool as ut\n'), ((15368, 15395), 'utool.filter_Nones', 'ut.filter_Nones', (['[cm1, cm2]'], {}), '([cm1, cm2])\n', (15383, 15395), True, 'import utool as ut\n'), ((17682, 17718), 'vtool.safe_max', 'vt.safe_max', (['edge_scores'], {'nans': '(False)'}), '(edge_scores, nans=False)\n', (17693, 17718), True, 'import vtool as vt\n'), ((17781, 17808), 'utool.dzip', 'ut.dzip', (['edges', 'edge_scores'], {}), '(edges, edge_scores)\n', (17788, 17808), True, 'import utool as ut\n'), ((17846, 17872), 'utool.dzip', 'ut.dzip', (['edges', 'edge_ranks'], {}), '(edges, edge_ranks)\n', (17853, 17872), True, 'import utool as ut\n'), ((21807, 21826), 'utool.load_data', 'ut.load_data', (['fpath'], {}), '(fpath)\n', (21819, 21826), True, 'import utool as ut\n'), ((24902, 24939), 'wbia.algo.graph.nx_utils.edges_cross', 'nxu.edges_cross', (['infr.graph', 'cc1', 'cc2'], {}), '(infr.graph, cc1, cc2)\n', (24917, 24939), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((25819, 25846), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (25840, 25846), True, 'import numpy as np\n'), ((26734, 26778), 'wbia.algo.graph.nx_utils.edges_inside', 'nxu.edges_inside', (['infr.unreviewed_graph', 'pcc'], {}), '(infr.unreviewed_graph, pcc)\n', (26750, 26778), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((27936, 27963), 'itertools.starmap', 'it.starmap', (['e_', 'check_edges'], {}), '(e_, check_edges)\n', (27946, 27963), True, 'import itertools as it\n'), ((35879, 35909), 'utool.compress', 'ut.compress', (['edges', 'need_flags'], {}), '(edges, need_flags)\n', (35890, 35909), True, 'import utool as ut\n'), ((41678, 41711), 'utool.dzip', 'ut.dzip', (['priority_edges', 'priority'], {}), '(priority_edges, priority)\n', (41685, 41711), True, 'import utool as ut\n'), ((45295, 45331), 'vtool.safe_max', 'vt.safe_max', (['edge_scores'], {'nans': '(False)'}), '(edge_scores, nans=False)\n', (45306, 45331), True, 'import vtool as vt\n'), ((5896, 5919), 'utool.take_column', 'ut.take_column', (['keep', '(1)'], {}), '(keep, 1)\n', (5910, 5919), True, 'import utool as ut\n'), ((6249, 6278), 'utool.ichunks', 'ut.ichunks', (['qaids', 'batch_size'], {}), '(qaids, batch_size)\n', (6259, 6278), True, 'import utool as ut\n'), ((7284, 7336), 'utool.ProgIter', 'ut.ProgIter', (['fs_chunk'], {'lbl': '"""getting matching result"""'}), "(fs_chunk, lbl='getting matching result')\n", (7295, 7336), True, 'import utool as ut\n'), ((7611, 7630), 'utool.flatten', 'ut.flatten', (['results'], {}), '(results)\n', (7621, 7630), True, 'import utool as ut\n'), ((15208, 15216), 'wbia.algo.graph.nx_utils.e_', 'e_', (['u', 'v'], {}), '(u, v)\n', (15210, 15216), False, 'from wbia.algo.graph.nx_utils import e_\n'), ((15933, 15967), 'vtool.safe_argmax', 'vt.safe_argmax', (['scores'], {'nans': '(False)'}), '(scores, nans=False)\n', (15947, 15967), True, 'import vtool as vt\n'), ((21383, 21431), 'utool.printex', 'ut.printex', (['ex', 'message'], {'iswarning': '(True)', 'tb': '(True)'}), '(ex, message, iswarning=True, tb=True)\n', (21393, 21431), True, 'import utool as ut\n'), ((21660, 21682), 'wbia.algo.verif.deploy.Deployer', 'deploy.Deployer', (['dpath'], {}), '(dpath)\n', (21675, 21682), False, 'from wbia.algo.verif import deploy\n'), ((26037, 26063), 'wbia.algo.graph.nx_utils.diag_product', 'nxu.diag_product', (['cc1', 'cc2'], {}), '(cc1, cc2)\n', (26053, 26063), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((26841, 26916), 'wbia.algo.graph.nx_utils.k_edge_augmentation', 'nxu.k_edge_augmentation', (['pos_sub'], {'k': 'pos_k', 'avail': 'unrev_avail', 'partial': '(False)'}), '(pos_sub, k=pos_k, avail=unrev_avail, partial=False)\n', (26864, 26916), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((31537, 31545), 'wbia.algo.graph.nx_utils.e_', 'e_', (['u', 'v'], {}), '(u, v)\n', (31539, 31545), False, 'from wbia.algo.graph.nx_utils import e_\n'), ((38803, 38839), 'utool.take', 'ut.take', (['match_probs', 'priority_edges'], {}), '(match_probs, priority_edges)\n', (38810, 38839), True, 'import utool as ut\n'), ((5629, 5678), 'wbia_pie_v2._plugin.distance_to_score', 'distance_to_score', (['pie_annot_distance'], {'norm': '(500.0)'}), '(pie_annot_distance, norm=500.0)\n', (5646, 5678), False, 'from wbia_pie_v2._plugin import distance_to_score\n'), ((14033, 14077), 'utool.random_sample', 'ut.random_sample', (['_gf_aids', 'rand_gf'], {'rng': 'rng'}), '(_gf_aids, rand_gf, rng=rng)\n', (14049, 14077), True, 'import utool as ut\n'), ((21120, 21137), 'wbia.algo.verif.deploy.Deployer', 'deploy.Deployer', ([], {}), '()\n', (21135, 21137), False, 'from wbia.algo.verif import deploy\n'), ((27568, 27623), 'wbia.algo.graph.nx_utils.k_edge_augmentation', 'nxu.k_edge_augmentation', (['pos_sub'], {'k': 'pos_k', 'partial': '(True)'}), '(pos_sub, k=pos_k, partial=True)\n', (27591, 27623), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((27772, 27845), 'wbia.algo.graph.nx_utils.k_edge_augmentation', 'nxu.k_edge_augmentation', (['pos_sub'], {'k': 'pos_k', 'avail': 'full_avail', 'partial': '(True)'}), '(pos_sub, k=pos_k, avail=full_avail, partial=True)\n', (27795, 27845), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((33119, 33136), 'utool.repr3', 'ut.repr3', (['cfgdict'], {}), '(cfgdict)\n', (33127, 33136), True, 'import utool as ut\n'), ((38863, 38919), 'wbia.algo.graph.nx_utils.ensure_multi_index', 'nxu.ensure_multi_index', (['priority_edges', "('aid1', 'aid2')"], {}), "(priority_edges, ('aid1', 'aid2'))\n", (38885, 38919), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((4646, 4662), 'utool.aslist', 'ut.aslist', (['qaids'], {}), '(qaids)\n', (4655, 4662), True, 'import utool as ut\n'), ((4665, 4681), 'utool.aslist', 'ut.aslist', (['daids'], {}), '(daids)\n', (4674, 4681), True, 'import utool as ut\n'), ((27213, 27236), 'networkx.complement', 'nx.complement', (['full_sub'], {}), '(full_sub)\n', (27226, 27236), True, 'import networkx as nx\n'), ((29280, 29293), 'wbia.algo.graph.nx_utils.e_', 'nxu.e_', (['*edge'], {}), '(*edge)\n', (29286, 29293), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((39972, 40022), 'numpy.maximum', 'np.maximum', (['default_priority[flags]', '_probs[flags]'], {}), '(default_priority[flags], _probs[flags])\n', (39982, 40022), True, 'import numpy as np\n'), ((40313, 40363), 'numpy.maximum', 'np.maximum', (['default_priority[flags]', '_probs[flags]'], {}), '(default_priority[flags], _probs[flags])\n', (40323, 40363), True, 'import numpy as np\n'), ((40654, 40704), 'numpy.maximum', 'np.maximum', (['default_priority[flags]', '_probs[flags]'], {}), '(default_priority[flags], _probs[flags])\n', (40664, 40704), True, 'import numpy as np\n'), ((43664, 43718), 'utool.repr4', 'ut.repr4', (['infr.dummy_verif.dummy_params'], {'nl': '(1)', 'si': '(True)'}), '(infr.dummy_verif.dummy_params, nl=1, si=True)\n', (43672, 43718), True, 'import utool as ut\n')] |
from dk_metric import image_metrics
import os
from multiprocessing import Process, Lock, Manager
import numpy as np
import time
import sys
'''python3 main.py gt_folder pre_folder output_folder [optional startt endt stepsize]'''
gt_folder = sys.argv[1]
prop_folder = sys.argv[2]
output_csv = os.path.join(sys.argv[3], 'scores.csv')
startt, endt, stepsize = 0.05, 0.9, 0.01
if len(sys.argv) > 4:
startt, endt, stepsize = list(map(float, sys.argv[4:]))
radius = 3
Thread_Cnt = 16
files = os.listdir(prop_folder)
lock = Lock()
ALL_thresholds = []
ALL_precision, ALL_recall, ALL_F1, ALL_Jaccard, ALL_mod_prec, ALL_mod_recall, ALL_mod_F1 = [],[],[],[],[],[],[]
manager = Manager()
def cal_fp_tp(files, l, threshold):
# sTP, sFP, sFN, msTP, msFP, msFN
start_time = time.time()
sTP, sFP, sFN, msTP, msFP, msFN = 0, 0, 0, 0, 0, 0
for i, f in enumerate(files):
gt_path = os.path.join(gt_folder, f)
prop_path = os.path.join(prop_folder, f)
if i != 0 and i % 200 == 0:
print(os.getpid(), i, 'th file... use', time.time() - start_time, 'seconds.')
TP, FP, FN = image_metrics.get_TP_FP_FN(gt_path, prop_path, threshold=threshold)
mTP, mFP, mFN = image_metrics.get_mod_TP_FP_FN(gt_path, prop_path, radius=radius, threshold=threshold)
sTP += TP
sFP += FP
sFN += FN
msTP += mTP
msFP += mFP
msFN += mFN
with lock:
l[0] += sTP
l[1] += sFP
l[2] += sFN
l[3] += msTP
l[4] += msFP
l[5] += msFN
thresholds = np.arange(startt, endt, stepsize).tolist()
for threshold in thresholds:
ALL_thresholds.append(threshold)
print('-------------', threshold, '-------------')
threshold *= 255
l = manager.list([0, 0, 0, 0, 0, 0])
pool = []
files_threads = np.array_split(files, Thread_Cnt)
for i in range(Thread_Cnt):
pool.append(Process(target=cal_fp_tp, args=(files_threads[i].tolist(), l, threshold,)))
for t in pool:
t.start()
for t in pool:
t.join()
sTP, sFP, sFN, msTP, msFP, msFN = list(l)
Precision = sTP / (sTP + sFP) if (sTP + sFP != 0) else 1
Recall = sTP / (sTP + sFN) if(sTP + sFN != 0) else 1
Jaccard = 1 / (1/Precision + 1/Recall - 1) if (Precision > 0 and Recall > 0) else 0
F1 = 2 * Precision * Recall / (Precision + Recall) if (Precision > 0 and Recall > 0) else 0
ALL_precision.append(Precision)
ALL_recall.append(Recall)
ALL_Jaccard.append(Jaccard)
ALL_F1.append(F1)
mPrecision = msTP / (msTP + msFP) if (msTP + msFP != 0) else 1
mRecall = msTP / (msTP + msFN) if(msTP + msFN != 0) else 1
mF1 = 2 * mPrecision * mRecall / (mPrecision + mRecall) if (mPrecision > 0 and mRecall > 0) else 0
ALL_mod_prec.append(mPrecision)
ALL_mod_recall.append(mRecall)
ALL_mod_F1.append(mF1)
with open(output_csv, 'w') as output:
data_thre = 'Threshold,' + ','.join(['{:.6f}'.format(v) for v in ALL_thresholds])
data_pre = 'Precision,' + ','.join(['{:.6f}'.format(v) for v in ALL_precision])
data_rec = 'Recall,' + ','.join(['{:.6f}'.format(v) for v in ALL_recall])
data_jac = 'Jaccard,' + ','.join(['{:.6f}'.format(v) for v in ALL_Jaccard])
data_f1 = 'F1,' + ','.join(['{:.6f}'.format(v) for v in ALL_F1])
data_mpre = 'Mod_Prec,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_prec])
data_mrec = 'Mod_Rec,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_recall])
data_mf1 = 'Mod_F1,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_F1])
output.write('\n'.join([data_thre, data_pre, data_rec, data_jac, data_f1, data_mpre, data_mrec, data_mf1]))
| [
"os.getpid",
"multiprocessing.Lock",
"multiprocessing.Manager",
"dk_metric.image_metrics.get_TP_FP_FN",
"time.time",
"numpy.arange",
"numpy.array_split",
"dk_metric.image_metrics.get_mod_TP_FP_FN",
"os.path.join",
"os.listdir"
] | [((293, 332), 'os.path.join', 'os.path.join', (['sys.argv[3]', '"""scores.csv"""'], {}), "(sys.argv[3], 'scores.csv')\n", (305, 332), False, 'import os\n'), ((494, 517), 'os.listdir', 'os.listdir', (['prop_folder'], {}), '(prop_folder)\n', (504, 517), False, 'import os\n'), ((525, 531), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (529, 531), False, 'from multiprocessing import Process, Lock, Manager\n'), ((675, 684), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (682, 684), False, 'from multiprocessing import Process, Lock, Manager\n'), ((777, 788), 'time.time', 'time.time', ([], {}), '()\n', (786, 788), False, 'import time\n'), ((1827, 1860), 'numpy.array_split', 'np.array_split', (['files', 'Thread_Cnt'], {}), '(files, Thread_Cnt)\n', (1841, 1860), True, 'import numpy as np\n'), ((896, 922), 'os.path.join', 'os.path.join', (['gt_folder', 'f'], {}), '(gt_folder, f)\n', (908, 922), False, 'import os\n'), ((943, 971), 'os.path.join', 'os.path.join', (['prop_folder', 'f'], {}), '(prop_folder, f)\n', (955, 971), False, 'import os\n'), ((1120, 1187), 'dk_metric.image_metrics.get_TP_FP_FN', 'image_metrics.get_TP_FP_FN', (['gt_path', 'prop_path'], {'threshold': 'threshold'}), '(gt_path, prop_path, threshold=threshold)\n', (1146, 1187), False, 'from dk_metric import image_metrics\n'), ((1212, 1303), 'dk_metric.image_metrics.get_mod_TP_FP_FN', 'image_metrics.get_mod_TP_FP_FN', (['gt_path', 'prop_path'], {'radius': 'radius', 'threshold': 'threshold'}), '(gt_path, prop_path, radius=radius, threshold\n =threshold)\n', (1242, 1303), False, 'from dk_metric import image_metrics\n'), ((1566, 1599), 'numpy.arange', 'np.arange', (['startt', 'endt', 'stepsize'], {}), '(startt, endt, stepsize)\n', (1575, 1599), True, 'import numpy as np\n'), ((1026, 1037), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1035, 1037), False, 'import os\n'), ((1060, 1071), 'time.time', 'time.time', ([], {}), '()\n', (1069, 1071), False, 'import time\n')] |
import numpy as np
import multiprocessing
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_array, column_or_1d as c1d
from sklearn.model_selection import ParameterGrid
import tbats.error as error
class Estimator(BaseEstimator):
"""Base estimator for BATS and TBATS models
Methods
-------
fit(y)
Fit to y and select best performing model based on AIC criterion.
"""
def __init__(self, context,
use_box_cox=None, box_cox_bounds=(0, 1),
use_trend=None, use_damped_trend=None,
seasonal_periods=None, use_arma_errors=True,
n_jobs=None):
""" Class constructor
Parameters
----------
context: abstract.ContextInterface
For advanced users only. Provide this to override default behaviors
use_box_cox: bool or None, optional (default=None)
If Box-Cox transformation of original series should be applied.
When None both cases shall be considered and better is selected by AIC.
box_cox_bounds: tuple, shape=(2,), optional (default=(0, 1))
Minimal and maximal Box-Cox parameter values.
use_trend: bool or None, optional (default=None)
Indicates whether to include a trend or not.
When None both cases shall be considered and better is selected by AIC.
use_damped_trend: bool or None, optional (default=None)
Indicates whether to include a damping parameter in the trend or not.
Applies only when trend is used.
When None both cases shall be considered and better is selected by AIC.
seasonal_periods: iterable or array-like, optional (default=None)
Length of each of the periods (amount of observations in each period).
BATS accepts only int values here.
When None or empty array, non-seasonal model shall be fitted.
use_arma_errors: bool, optional (default=True)
When True BATS will try to improve the model by modelling residuals with ARMA.
Best model will be selected by AIC.
If False, ARMA residuals modeling will not be considered.
show_warnings: bool, optional (default=True)
If warnings should be shown or not.
Also see Model.warnings variable that contains all model related warnings.
n_jobs: int, optional (default=None)
How many jobs to run in parallel when fitting BATS model.
When not provided BATS shall try to utilize all available cpu cores.
"""
self.context = context
self.n_jobs = n_jobs
self.seasonal_periods = self._normalize_seasonal_periods(seasonal_periods)
self.use_box_cox = use_box_cox
self.box_cox_bounds = box_cox_bounds
self.use_arma_errors = use_arma_errors
self.use_trend = use_trend
if use_trend is False:
if use_damped_trend is True:
self.context.get_exception_handler().warn(
"When use_damped_trend can be used only with use_trend. Setting damped trend to False.",
error.InputArgsWarning
)
use_damped_trend = False
self.use_damped_trend = use_damped_trend
def _normalize_seasonal_periods(self, seasonal_periods):
# abstract method
raise NotImplementedError()
def _do_fit(self, y):
# abstract method
raise NotImplementedError()
def fit(self, y):
"""Fit model to observations ``y``.
:param y: array-like or iterable, shape=(n_samples,)
:return: abstract.Model, Fitted model
"""
y = self._validate(y)
if y is False:
# Input data is not valid and no exception was raised yet.
# This can happen only when one overrides default exception handler (see tbats.error.ExceptionHandler)
return None
if np.allclose(y, y[0]):
return self.context.create_constant_model(y[0]).fit(y)
best_model = self._do_fit(y)
for warning in best_model.warnings:
self.context.get_exception_handler().warn(warning, error.ModelWarning)
return best_model
def _validate(self, y):
"""Validates input time series. Also adjusts box_cox if necessary."""
try:
y = c1d(check_array(y, ensure_2d=False, force_all_finite=True, ensure_min_samples=1,
copy=True, dtype=np.float64)) # type: np.ndarray
except Exception as validation_exception:
self.context.get_exception_handler().exception(
"y series is invalid", error.InputArgsException, previous_exception=validation_exception
)
return False
if np.any(y <= 0):
if self.use_box_cox is True:
self.context.get_exception_handler().warn(
"Box-Cox transformation (use_box_cox) was forced to True "
"but there are negative values in input series. "
"Setting use_box_cox to False.",
error.InputArgsWarning
)
self.use_box_cox = False
return y
def _case_fit(self, components_combination):
"""Internal method used by parallel computation."""
case = self.context.create_case_from_dictionary(**components_combination)
return case.fit(self._y)
def _choose_model_from_possible_component_settings(self, y, components_grid):
"""Fits all models in a grid and returns best one by AIC
Returns
-------
abstract.Model
Best model by AIC
"""
self._y = y
# note n_jobs = None means to use cpu_count()
pool = multiprocessing.pool.Pool(processes=self.n_jobs)
models = pool.map(self._case_fit, components_grid)
pool.close()
self._y = None # clean-up
if len(models) == 0:
return None
best_model = models[0]
for model in models:
if model.aic < best_model.aic:
best_model = model
return best_model
def _prepare_components_grid(self, seasonal_harmonics=None):
"""Provides a grid of all allowed model component combinations.
Parameters
----------
seasonal_harmonics: array-like or None
When provided all component combinations shall contain those harmonics
"""
allowed_combinations = []
use_box_cox = self.use_box_cox
base_combination = {
'use_box_cox': self.__prepare_component_boolean_combinations(use_box_cox),
'box_cox_bounds': [self.box_cox_bounds],
'use_arma_errors': [self.use_arma_errors],
'seasonal_periods': [self.seasonal_periods],
}
if seasonal_harmonics is not None:
base_combination['seasonal_harmonics'] = [seasonal_harmonics]
if self.use_trend is not True: # False or None
allowed_combinations.append({
**base_combination,
**{
'use_trend': [False],
'use_damped_trend': [False], # Damped trend must be False when trend is False
}
})
if self.use_trend is not False: # True or None
allowed_combinations.append({
**base_combination,
**{
'use_trend': [True],
'use_damped_trend': self.__prepare_component_boolean_combinations(self.use_damped_trend),
}
})
return ParameterGrid(allowed_combinations)
def _prepare_non_seasonal_components_grid(self):
"""Provides a grid of all allowed non-season model component combinations."""
allowed_combinations = []
use_box_cox = self.use_box_cox
base_combination = {
'use_box_cox': self.__prepare_component_boolean_combinations(use_box_cox),
'box_cox_bounds': [self.box_cox_bounds],
'use_arma_errors': [self.use_arma_errors],
'seasonal_periods': [[]],
}
if self.use_trend is not True: # False or None
allowed_combinations.append({
**base_combination,
**{
'use_trend': [False],
'use_damped_trend': [False], # Damped trend must be False when trend is False
}
})
if self.use_trend is not False: # True or None
allowed_combinations.append({
**base_combination,
**{
'use_trend': [True],
'use_damped_trend': self.__prepare_component_boolean_combinations(self.use_damped_trend),
}
})
return ParameterGrid(allowed_combinations)
@staticmethod
def __prepare_component_boolean_combinations(param):
combinations = [param]
if param is None:
combinations = [False, True]
return combinations
def _normalize_seasonal_periods_to_type(self, seasonal_periods, dtype):
"""Validates seasonal periods and normalizes them
Normalization ensures periods are of proper type, unique and sorted.
"""
if seasonal_periods is not None:
try:
seasonal_periods = c1d(check_array(seasonal_periods, ensure_2d=False, force_all_finite=True,
ensure_min_samples=0,
copy=True, dtype=dtype))
except Exception as validation_exception:
self.context.get_exception_handler().exception("seasonal_periods definition is invalid",
error.InputArgsException,
previous_exception=validation_exception)
seasonal_periods = np.unique(seasonal_periods)
if len(seasonal_periods[np.where(seasonal_periods <= 1)]) > 0:
self.context.get_exception_handler().warn(
"All seasonal periods should be values greater than 1. "
"Ignoring all seasonal period values that do not meet this condition.",
error.InputArgsWarning
)
seasonal_periods = seasonal_periods[np.where(seasonal_periods > 1)]
seasonal_periods.sort()
if len(seasonal_periods) == 0:
seasonal_periods = None
return seasonal_periods
| [
"multiprocessing.pool.Pool",
"numpy.allclose",
"numpy.any",
"numpy.where",
"sklearn.model_selection.ParameterGrid",
"numpy.unique",
"sklearn.utils.validation.check_array"
] | [((3991, 4011), 'numpy.allclose', 'np.allclose', (['y', 'y[0]'], {}), '(y, y[0])\n', (4002, 4011), True, 'import numpy as np\n'), ((4838, 4852), 'numpy.any', 'np.any', (['(y <= 0)'], {}), '(y <= 0)\n', (4844, 4852), True, 'import numpy as np\n'), ((5840, 5888), 'multiprocessing.pool.Pool', 'multiprocessing.pool.Pool', ([], {'processes': 'self.n_jobs'}), '(processes=self.n_jobs)\n', (5865, 5888), False, 'import multiprocessing\n'), ((7706, 7741), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['allowed_combinations'], {}), '(allowed_combinations)\n', (7719, 7741), False, 'from sklearn.model_selection import ParameterGrid\n'), ((8913, 8948), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['allowed_combinations'], {}), '(allowed_combinations)\n', (8926, 8948), False, 'from sklearn.model_selection import ParameterGrid\n'), ((10076, 10103), 'numpy.unique', 'np.unique', (['seasonal_periods'], {}), '(seasonal_periods)\n', (10085, 10103), True, 'import numpy as np\n'), ((4413, 4522), 'sklearn.utils.validation.check_array', 'check_array', (['y'], {'ensure_2d': '(False)', 'force_all_finite': '(True)', 'ensure_min_samples': '(1)', 'copy': '(True)', 'dtype': 'np.float64'}), '(y, ensure_2d=False, force_all_finite=True, ensure_min_samples=1,\n copy=True, dtype=np.float64)\n', (4424, 4522), False, 'from sklearn.utils.validation import check_array, column_or_1d as c1d\n'), ((10516, 10546), 'numpy.where', 'np.where', (['(seasonal_periods > 1)'], {}), '(seasonal_periods > 1)\n', (10524, 10546), True, 'import numpy as np\n'), ((9473, 9592), 'sklearn.utils.validation.check_array', 'check_array', (['seasonal_periods'], {'ensure_2d': '(False)', 'force_all_finite': '(True)', 'ensure_min_samples': '(0)', 'copy': '(True)', 'dtype': 'dtype'}), '(seasonal_periods, ensure_2d=False, force_all_finite=True,\n ensure_min_samples=0, copy=True, dtype=dtype)\n', (9484, 9592), False, 'from sklearn.utils.validation import check_array, column_or_1d as c1d\n'), ((10140, 10171), 'numpy.where', 'np.where', (['(seasonal_periods <= 1)'], {}), '(seasonal_periods <= 1)\n', (10148, 10171), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# --------------------------------------------------------
import numpy as np
import os
from os.path import join as pjoin
#from distutils.core import setup
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
#change for windows, by MrX
nvcc_bin = 'nvcc.exe'
lib_dir = 'lib/x64'
import distutils.msvc9compiler
distutils.msvc9compiler.VERSION = 14.0
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
ext_modules = [
# unix _compile: obj, src, ext, cc_args, extra_postargs, pp_opts
Extension(
"bbox",
sources=["bbox.pyx"],
extra_compile_args={},
include_dirs = [numpy_include]
),
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': build_ext},
)
| [
"distutils.extension.Extension",
"numpy.get_numpy_include",
"numpy.get_include",
"setuptools.setup"
] | [((1212, 1299), 'setuptools.setup', 'setup', ([], {'name': '"""fast_rcnn"""', 'ext_modules': 'ext_modules', 'cmdclass': "{'build_ext': build_ext}"}), "(name='fast_rcnn', ext_modules=ext_modules, cmdclass={'build_ext':\n build_ext})\n", (1217, 1299), False, 'from setuptools import setup\n'), ((888, 904), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (902, 904), True, 'import numpy as np\n'), ((1067, 1164), 'distutils.extension.Extension', 'Extension', (['"""bbox"""'], {'sources': "['bbox.pyx']", 'extra_compile_args': '{}', 'include_dirs': '[numpy_include]'}), "('bbox', sources=['bbox.pyx'], extra_compile_args={}, include_dirs\n =[numpy_include])\n", (1076, 1164), False, 'from distutils.extension import Extension\n'), ((950, 972), 'numpy.get_numpy_include', 'np.get_numpy_include', ([], {}), '()\n', (970, 972), True, 'import numpy as np\n')] |
import cv2
from dlr import DLRModel
import greengrasssdk
import logging
import numpy as np
import os
from threading import Timer
import time
import railController
import streamServer
import sys
import utils
WIDTH=640
HEIGHT=480
THRESH = 90
MODEL_PATH = os.environ.get("MODEL_PATH", "./model")
dlr_model = DLRModel(MODEL_PATH, "gpu")
current_milli_time = lambda: int(round(time.time() * 1000))
VIDEO_DEVICE = os.environ.get("VIDEO_DEVICE", "/dev/video0")
VIDEO_WIDTH = int(os.environ.get("VIDEO_WIDTH", "640"))
VIDEO_HEIGHT = int(os.environ.get("VIDEO_HEIGHT", "480"))
STREAM_IMAGE_PATH = "/tmp/bfsushi"
STREAM_IMAGE_FILE = STREAM_IMAGE_PATH + "/detected.jpg"
ANORMAL_COUNT = int(os.environ.get("ANORMAL_COUNT", "2"))
# Setup logging to stdout
formatter = "%(asctime)s : [%(levelname)s] %(message)s"
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=formatter)
# create stream temp dir
if not os.path.exists(STREAM_IMAGE_PATH):
os.mkdir(STREAM_IMAGE_PATH)
# synset.txt is a list of class name
synsets = None
with open("synset.txt", "r") as f:
synsets = [l.rstrip() for l in f]
client = greengrasssdk.client("iot-data")
railController = railController.RailController(client)
anormal_count = 0
def open_usb_camera():
gst_str = ("v4l2src device={} ! "
"video/x-raw, width=(int){}, height=(int){}, framerate=(fraction)30/1 ! "
"videoconvert ! video/x-raw, , format=(string)BGR ! appsink"
).format(
VIDEO_DEVICE, VIDEO_WIDTH, VIDEO_HEIGHT
)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
cap = open_usb_camera()
def predict(image_data):
global anormal_count
# use OpenCV to detect sushi saucer
rect = detect_sushi(image_data)
if rect is None:
cv2.imwrite(STREAM_IMAGE_FILE, image_data)
return
img = image_data[rect[0]:rect[1], rect[2]:rect[3]]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (224, 224))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
img = np.array(img)
flattened_data = img.astype(np.float32)
t1 = current_milli_time()
prediction_scores = dlr_model.run({"data" : flattened_data})#.squeeze()
t2 = current_milli_time()
logger.info("finish predicting. duration: {}ms".format(t2 - t1))
max_score_id = np.argmax(prediction_scores)
max_score = np.max(prediction_scores)
predicted_class_name = synsets[max_score_id].split(" ")[1]
max_score = max_score * 100
if max_score < THRESH:
logger.debug("score is too low {:.2f}% {}".format(max_score, predicted_class_name))
cv2.imwrite(STREAM_IMAGE_FILE, image_data)
return
# Prepare result
color = (240, 168, 34)
if predicted_class_name.endswith("ship"):
anormal_count = 0
logger.debug("Ship detected ignore this.")
cv2.imwrite(STREAM_IMAGE_FILE, image_data)
return
elif predicted_class_name.endswith("anormal"):
# detect anormal sushi
anormal_count = anormal_count + 1
elif predicted_class_name.endswith("empty"):
# detect empty saucer
color = (90, 90, 90)
anormal_count = 0
else:
# normal sushi
anormal_count = 0
if anormal_count >= ANORMAL_COUNT:
color = (0, 0, 255)
anormal_count = 0
logger.debug("detect anormal. publish close message.")
# close the rail
railController.close_rail()
elif anormal_count > 0:
logger.debug("detect anormal {} times.".format(anormal_count))
# write predicted result on image
cv2.rectangle(image_data, (rect[2], rect[0]),(rect[3], rect[1]), color, thickness=2)
label = "{}: {:.2f}%".format(predicted_class_name, max_score)
cv2.putText(image_data, label, (rect[2], rect[0] + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
cv2.imwrite(STREAM_IMAGE_FILE, image_data)
# Send result
logger.info("Prediction Result: score:{:.2f}% {}".format(max_score, predicted_class_name))
# read image from camera and predict
def predict_from_cam():
ret,image_data = cap.read()
if ret:
predict(image_data)
else:
logger.error("image capture faild")
# Use OpenCV HoughCircles to find sushi saucer.
def detect_sushi(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,
dp=1, minDist=100, param1=50, param2=30,
minRadius=170, maxRadius=400)
if circles is None:
return None
obj_width = 0
obj_height = 0
circles = np.uint16(np.around(circles))
for (x, y, r) in circles[0]:
x, y, r = int(x), int(y), int(r)
obj_top = int(y - r - 10)
if obj_top < 0:
obj_top = 0
obj_left = int(x - r - 10)
if obj_left < 0:
obj_left = 0
obj_width = int(r*2+20)
obj_right = obj_left + obj_width
if obj_right > WIDTH:
obj_right = WIDTH
obj_width = WIDTH - obj_left
obj_height = int(r*2+20)
obj_bottom = obj_top + obj_height
if obj_bottom > HEIGHT:
obj_bottom = HEIGHT
obj_height = HEIGHT - obj_top
break
if obj_width < 380 or obj_height < 380 or obj_width > 420 or obj_height > 420:
# skip if circle is small or too large
return None
# return the detected rectangle
return (obj_top, obj_bottom, obj_left, obj_right)
# infinite loop to detect sushi
def detection_run():
if dlr_model is not None:
predict_from_cam()
# Asynchronously schedule this function to be run again
Timer(0, detection_run).start()
detection_run()
# Distribute image stream
streamServer.main()
# Lambda Function is not use for event
def function_handler(event, context):
return | [
"os.mkdir",
"threading.Timer",
"numpy.argmax",
"railController.RailController",
"cv2.medianBlur",
"numpy.around",
"cv2.rectangle",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"dlr.DLRModel",
"numpy.max",
"numpy.swapaxes",
"cv2.resize",
"railController.close_rail",
"greengrasssdk.c... | [((255, 294), 'os.environ.get', 'os.environ.get', (['"""MODEL_PATH"""', '"""./model"""'], {}), "('MODEL_PATH', './model')\n", (269, 294), False, 'import os\n'), ((307, 334), 'dlr.DLRModel', 'DLRModel', (['MODEL_PATH', '"""gpu"""'], {}), "(MODEL_PATH, 'gpu')\n", (315, 334), False, 'from dlr import DLRModel\n'), ((410, 455), 'os.environ.get', 'os.environ.get', (['"""VIDEO_DEVICE"""', '"""/dev/video0"""'], {}), "('VIDEO_DEVICE', '/dev/video0')\n", (424, 455), False, 'import os\n'), ((811, 838), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (828, 838), False, 'import logging\n'), ((839, 915), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'formatter'}), '(stream=sys.stdout, level=logging.INFO, format=formatter)\n', (858, 915), False, 'import logging\n'), ((1152, 1184), 'greengrasssdk.client', 'greengrasssdk.client', (['"""iot-data"""'], {}), "('iot-data')\n", (1172, 1184), False, 'import greengrasssdk\n'), ((1202, 1239), 'railController.RailController', 'railController.RailController', (['client'], {}), '(client)\n', (1231, 1239), False, 'import railController\n'), ((5868, 5887), 'streamServer.main', 'streamServer.main', ([], {}), '()\n', (5885, 5887), False, 'import streamServer\n'), ((474, 510), 'os.environ.get', 'os.environ.get', (['"""VIDEO_WIDTH"""', '"""640"""'], {}), "('VIDEO_WIDTH', '640')\n", (488, 510), False, 'import os\n'), ((531, 568), 'os.environ.get', 'os.environ.get', (['"""VIDEO_HEIGHT"""', '"""480"""'], {}), "('VIDEO_HEIGHT', '480')\n", (545, 568), False, 'import os\n'), ((681, 717), 'os.environ.get', 'os.environ.get', (['"""ANORMAL_COUNT"""', '"""2"""'], {}), "('ANORMAL_COUNT', '2')\n", (695, 717), False, 'import os\n'), ((949, 982), 'os.path.exists', 'os.path.exists', (['STREAM_IMAGE_PATH'], {}), '(STREAM_IMAGE_PATH)\n', (963, 982), False, 'import os\n'), ((988, 1015), 'os.mkdir', 'os.mkdir', (['STREAM_IMAGE_PATH'], {}), '(STREAM_IMAGE_PATH)\n', (996, 1015), False, 'import os\n'), ((1596, 1640), 'cv2.VideoCapture', 'cv2.VideoCapture', (['gst_str', 'cv2.CAP_GSTREAMER'], {}), '(gst_str, cv2.CAP_GSTREAMER)\n', (1612, 1640), False, 'import cv2\n'), ((1946, 1982), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1958, 1982), False, 'import cv2\n'), ((1993, 2020), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (2003, 2020), False, 'import cv2\n'), ((2031, 2053), 'numpy.swapaxes', 'np.swapaxes', (['img', '(0)', '(2)'], {}), '(img, 0, 2)\n', (2042, 2053), True, 'import numpy as np\n'), ((2064, 2086), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(2)'], {}), '(img, 1, 2)\n', (2075, 2086), True, 'import numpy as np\n'), ((2126, 2139), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2134, 2139), True, 'import numpy as np\n'), ((2409, 2437), 'numpy.argmax', 'np.argmax', (['prediction_scores'], {}), '(prediction_scores)\n', (2418, 2437), True, 'import numpy as np\n'), ((2454, 2479), 'numpy.max', 'np.max', (['prediction_scores'], {}), '(prediction_scores)\n', (2460, 2479), True, 'import numpy as np\n'), ((3675, 3764), 'cv2.rectangle', 'cv2.rectangle', (['image_data', '(rect[2], rect[0])', '(rect[3], rect[1])', 'color'], {'thickness': '(2)'}), '(image_data, (rect[2], rect[0]), (rect[3], rect[1]), color,\n thickness=2)\n', (3688, 3764), False, 'import cv2\n'), ((3830, 3931), 'cv2.putText', 'cv2.putText', (['image_data', 'label', '(rect[2], rect[0] + 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(image_data, label, (rect[2], rect[0] + 15), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n', (3841, 3931), False, 'import cv2\n'), ((3931, 3973), 'cv2.imwrite', 'cv2.imwrite', (['STREAM_IMAGE_FILE', 'image_data'], {}), '(STREAM_IMAGE_FILE, image_data)\n', (3942, 3973), False, 'import cv2\n'), ((4360, 4397), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4372, 4397), False, 'import cv2\n'), ((4409, 4432), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (4423, 4432), False, 'import cv2\n'), ((4447, 4564), 'cv2.HoughCircles', 'cv2.HoughCircles', (['blur', 'cv2.HOUGH_GRADIENT'], {'dp': '(1)', 'minDist': '(100)', 'param1': '(50)', 'param2': '(30)', 'minRadius': '(170)', 'maxRadius': '(400)'}), '(blur, cv2.HOUGH_GRADIENT, dp=1, minDist=100, param1=50,\n param2=30, minRadius=170, maxRadius=400)\n', (4463, 4564), False, 'import cv2\n'), ((1822, 1864), 'cv2.imwrite', 'cv2.imwrite', (['STREAM_IMAGE_FILE', 'image_data'], {}), '(STREAM_IMAGE_FILE, image_data)\n', (1833, 1864), False, 'import cv2\n'), ((2702, 2744), 'cv2.imwrite', 'cv2.imwrite', (['STREAM_IMAGE_FILE', 'image_data'], {}), '(STREAM_IMAGE_FILE, image_data)\n', (2713, 2744), False, 'import cv2\n'), ((2940, 2982), 'cv2.imwrite', 'cv2.imwrite', (['STREAM_IMAGE_FILE', 'image_data'], {}), '(STREAM_IMAGE_FILE, image_data)\n', (2951, 2982), False, 'import cv2\n'), ((3505, 3532), 'railController.close_rail', 'railController.close_rail', ([], {}), '()\n', (3530, 3532), False, 'import railController\n'), ((4740, 4758), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (4749, 4758), True, 'import numpy as np\n'), ((5792, 5815), 'threading.Timer', 'Timer', (['(0)', 'detection_run'], {}), '(0, detection_run)\n', (5797, 5815), False, 'from threading import Timer\n'), ((374, 385), 'time.time', 'time.time', ([], {}), '()\n', (383, 385), False, 'import time\n')] |
#!/usr/bin/env python
"""
Test module for TwoPhaseFlow
"""
import pytest
import tables
import numpy as np
import proteus.defaults
from proteus import Context
from proteus import default_so
from proteus.iproteus import *
import os
import sys
Profiling.logLevel=1
Profiling.verbose=True
class TestTwoPhaseFlow(object):
def setup_method(self,method):
self._scriptdir = os.path.dirname(__file__)
self.path = proteus.__path__[0]+"/tests/TwoPhaseFlow/"
def teardown_method(self, method):
""" Tear down function """
FileList = ['marin.h5','marin.xmf'
'moses.h5','moses.xmf'
'damBreak.h5','damBreak.xmf'
'damBreak_solver_options.h5','damBreak_solver_options.xmf'
'TwoDimBucklingFlow.h5','TwoDimBucklingFlow.xmf'
'filling.h5','filling.xmf'
]
for file in FileList:
if os.path.isfile(file):
os.remove(file)
else:
pass
def compare_vs_saved_files(self,name):
actual = tables.open_file(name+'.h5','r')
expected_path = 'comparison_files/' + 'comparison_' + name + '_phi_t2.csv'
#write comparison file
#np.array(actual.root.phi_t2).tofile(os.path.join(self._scriptdir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.phi_t2).flatten(),decimal=6)
expected_path = 'comparison_files/' + 'comparison_' + name + '_velocity_t2.csv'
#write comparison file
#np.array(actual.root.velocity_t2).tofile(os.path.join(self._scriptdir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.velocity_t2).flatten(),decimal=6)
actual.close()
# *** 2D tests *** #
def test_risingBubble(self): #uses structured triangle mesh
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"risingBubble.py -l5 -v -C 'final_time=0.1 dt_output=0.1 refinement=1'")
self.compare_vs_saved_files("risingBubble")
def test_damBreak(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"damBreak.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.1'")
self.compare_vs_saved_files("damBreak")
@pytest.mark.skip(reason="numerics are very sensitive, hashdist build doesn't pass but conda does")
def test_damBreak_solver_options(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"damBreak_solver_options.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.1'")
self.compare_vs_saved_files("damBreak_solver_options")
# @pytest.mark.skip(reason="long test")
def test_TwoDimBucklingFlow(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"TwoDimBucklingFlow.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.09'")
self.compare_vs_saved_files("TwoDimBucklingFlow")
# @pytest.mark.skip(reason="long test")
@pytest.mark.skip(reason="need to redo after history revision")
def test_fillingTank(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"fillingTank.py -l5 -v -C 'final_time=0.02 dt_output=0.02 he=0.01'")
self.compare_vs_saved_files("fillingTank")
# *** 3D tests *** #
def test_marin(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"marin.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.5'")
self.compare_vs_saved_files("marin")
def test_moses(self):
os.system("parun --TwoPhaseFlow --path " + self.path + " "
"moses.py -l5 -v -C 'final_time=0.1 dt_output=0.1 he=0.5'")
self.compare_vs_saved_files("moses")
| [
"os.remove",
"os.path.dirname",
"os.system",
"os.path.isfile",
"numpy.array",
"tables.open_file",
"pytest.mark.skip",
"os.path.join"
] | [((2437, 2540), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""numerics are very sensitive, hashdist build doesn\'t pass but conda does"""'}), '(reason=\n "numerics are very sensitive, hashdist build doesn\'t pass but conda does")\n', (2453, 2540), False, 'import pytest\n'), ((3155, 3217), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""need to redo after history revision"""'}), "(reason='need to redo after history revision')\n", (3171, 3217), False, 'import pytest\n'), ((380, 405), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (395, 405), False, 'import os\n'), ((1095, 1130), 'tables.open_file', 'tables.open_file', (["(name + '.h5')", '"""r"""'], {}), "(name + '.h5', 'r')\n", (1111, 1130), False, 'import tables\n'), ((2003, 2135), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " risingBubble.py -l5 -v -C \'final_time=0.1 dt_output=0.1 refinement=1\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " risingBubble.py -l5 -v -C \'final_time=0.1 dt_output=0.1 refinement=1\'")\n', (2012, 2135), False, 'import os\n'), ((2243, 2365), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " damBreak.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.1\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " damBreak.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.1\'")\n', (2252, 2365), False, 'import os\n'), ((2588, 2730), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " damBreak_solver_options.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.1\'"\n )'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " damBreak_solver_options.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.1\'"\n )\n', (2597, 2730), False, 'import os\n'), ((2897, 3030), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " TwoDimBucklingFlow.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.09\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " TwoDimBucklingFlow.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.09\'")\n', (2906, 3030), False, 'import os\n'), ((3258, 3386), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " fillingTank.py -l5 -v -C \'final_time=0.02 dt_output=0.02 he=0.01\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " fillingTank.py -l5 -v -C \'final_time=0.02 dt_output=0.02 he=0.01\'")\n', (3267, 3386), False, 'import os\n'), ((3515, 3634), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " marin.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.5\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " marin.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.5\'")\n', (3524, 3634), False, 'import os\n'), ((3732, 3851), 'os.system', 'os.system', (['(\'parun --TwoPhaseFlow --path \' + self.path +\n " moses.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.5\'")'], {}), '(\'parun --TwoPhaseFlow --path \' + self.path +\n " moses.py -l5 -v -C \'final_time=0.1 dt_output=0.1 he=0.5\'")\n', (3741, 3851), False, 'import os\n'), ((941, 961), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (955, 961), False, 'import os\n'), ((979, 994), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (988, 994), False, 'import os\n'), ((1393, 1437), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (1405, 1437), False, 'import os\n'), ((1772, 1816), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (1784, 1816), False, 'import os\n'), ((1447, 1475), 'numpy.array', 'np.array', (['actual.root.phi_t2'], {}), '(actual.root.phi_t2)\n', (1455, 1475), True, 'import numpy as np\n'), ((1826, 1859), 'numpy.array', 'np.array', (['actual.root.velocity_t2'], {}), '(actual.root.velocity_t2)\n', (1834, 1859), True, 'import numpy as np\n')] |
# coding: utf-8
# 2021/5/29 @ tongshiwei
import numpy as np
from pathlib import PurePath
from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel
from gensim import corpora
import re
from .const import UNK, PAD
from .meta import Vector
class W2V(Vector):
def __init__(self, filepath, method=None, binary=None):
"""
Parameters
----------
filepath:
path to the pretrained model file
method
binary
"""
fp = PurePath(filepath)
self.binary = binary if binary is not None else (True if fp.suffix == ".bin" else False)
if self.binary is True:
if method == "fasttext":
self.wv = FastText.load(filepath).wv
else:
self.wv = Word2Vec.load(filepath).wv
else:
self.wv = KeyedVectors.load(filepath, mmap="r")
self.method = method
self.constants = {UNK: 0, PAD: 1}
def __len__(self):
return len(self.constants) + len(self.wv.key_to_index)
def key_to_index(self, word):
if word in self.constants:
return self.constants[word]
else:
if word in self.wv.key_to_index:
return self.wv.key_to_index[word] + len(self.constants)
else:
return self.constants[UNK]
@property
def vectors(self):
return np.concatenate([np.zeros((len(self.constants), self.vector_size)), self.wv.vectors], axis=0)
@property
def vector_size(self):
return self.wv.vector_size
def __call__(self, *words):
for word in words:
yield self[word]
def __getitem__(self, item):
return self.wv[item] if item not in self.constants else np.zeros((self.vector_size,))
def infer_vector(self, items, agg="mean", *args, **kwargs) -> np.ndarray:
tokens = self.infer_tokens(items, *args, **kwargs)
return eval("np.%s" % agg)(tokens, axis=1)
def infer_tokens(self, items, *args, **kwargs) -> list:
return [list(self(*item)) for item in items]
class BowLoader(object):
def __init__(self, filepath):
self.dictionary = corpora.Dictionary.load(filepath)
def infer_vector(self, item, return_vec=False):
item = self.dictionary.doc2bow(item)
if not return_vec:
return item # return dic as default
vec = [0 for i in range(len(self.dictionary.keys()))]
for i, v in item:
vec[i] = v
return vec
@property
def vector_size(self):
return len(self.dictionary.keys())
class TfidfLoader(object):
def __init__(self, filepath):
self.tfidf_model = TfidfModel.load(filepath)
# 'tfidf' model shold be used based on 'bow' model
dictionary_path = re.sub(r"(.*)tfidf", r"\1bow", filepath)
self.dictionary = corpora.Dictionary.load(dictionary_path)
def infer_vector(self, item, return_vec=False):
dic_item = self.dictionary.doc2bow(item)
tfidf_item = self.tfidf_model[dic_item]
# return dic as default
if not return_vec:
return tfidf_item # pragma: no cover
vec = [0 for i in range(len(self.dictionary.keys()))]
for i, v in tfidf_item:
vec[i] = v
return vec
@property
def vector_size(self):
return len(self.dictionary.token2id)
class D2V(Vector):
def __init__(self, filepath, method="d2v"):
self._method = method
self._filepath = filepath
if self._method == "d2v":
self.d2v = Doc2Vec.load(filepath)
elif self._method == "bow":
self.d2v = BowLoader(filepath)
elif self._method == "tfidf":
self.d2v = TfidfLoader(filepath)
else:
raise ValueError("Unknown method: %s" % method)
def __call__(self, item):
if self._method == "d2v":
return self.d2v.infer_vector(item)
else:
return self.d2v.infer_vector(item, return_vec=True)
@property
def vector_size(self):
if self._method == "d2v":
return self.d2v.vector_size
elif self._method == "bow":
return self.d2v.vector_size
elif self._method == "tfidf":
return self.d2v.vector_size
def infer_vector(self, items, *args, **kwargs) -> list:
return [self(item) for item in items]
def infer_tokens(self, item, *args, **kwargs) -> ...:
raise NotImplementedError
| [
"gensim.models.FastText.load",
"gensim.models.Doc2Vec.load",
"gensim.models.KeyedVectors.load",
"numpy.zeros",
"pathlib.PurePath",
"gensim.models.Word2Vec.load",
"gensim.corpora.Dictionary.load",
"gensim.models.TfidfModel.load",
"re.sub"
] | [((511, 529), 'pathlib.PurePath', 'PurePath', (['filepath'], {}), '(filepath)\n', (519, 529), False, 'from pathlib import PurePath\n'), ((2185, 2218), 'gensim.corpora.Dictionary.load', 'corpora.Dictionary.load', (['filepath'], {}), '(filepath)\n', (2208, 2218), False, 'from gensim import corpora\n'), ((2698, 2723), 'gensim.models.TfidfModel.load', 'TfidfModel.load', (['filepath'], {}), '(filepath)\n', (2713, 2723), False, 'from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel\n'), ((2809, 2848), 're.sub', 're.sub', (['"""(.*)tfidf"""', '"""\\\\1bow"""', 'filepath'], {}), "('(.*)tfidf', '\\\\1bow', filepath)\n", (2815, 2848), False, 'import re\n'), ((2876, 2916), 'gensim.corpora.Dictionary.load', 'corpora.Dictionary.load', (['dictionary_path'], {}), '(dictionary_path)\n', (2899, 2916), False, 'from gensim import corpora\n'), ((856, 893), 'gensim.models.KeyedVectors.load', 'KeyedVectors.load', (['filepath'], {'mmap': '"""r"""'}), "(filepath, mmap='r')\n", (873, 893), False, 'from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel\n'), ((1765, 1794), 'numpy.zeros', 'np.zeros', (['(self.vector_size,)'], {}), '((self.vector_size,))\n', (1773, 1794), True, 'import numpy as np\n'), ((3589, 3611), 'gensim.models.Doc2Vec.load', 'Doc2Vec.load', (['filepath'], {}), '(filepath)\n', (3601, 3611), False, 'from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel\n'), ((722, 745), 'gensim.models.FastText.load', 'FastText.load', (['filepath'], {}), '(filepath)\n', (735, 745), False, 'from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel\n'), ((793, 816), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['filepath'], {}), '(filepath)\n', (806, 816), False, 'from gensim.models import KeyedVectors, Word2Vec, FastText, Doc2Vec, TfidfModel\n')] |
import tensorflow as tf
import numpy as np
import math
from util import dataset
##### HyperParam Setting####
embedding_size = 50
batch_size = 5000
margin = 1
learning_rate = 0.001
epochs = 1000
############################
####tensorflow setting####
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.05 #using gpu mem
#########################
def trans_e_model( path ):
#read dataset
ds = dataset( path )
entity_size = ds.entity_nums + 1 #add 1 avoid out_of_dict
relation_size = ds.relation_nums[0] + 1
model_path = path + 'model/'
#the distance of h r t
def l1_energy(batch):
#h = t+r
return tf.reduce_sum(tf.abs(batch[:,1,:] - batch[:,0,:] - batch[:,2,:]) ,1)
with tf.device('/cpu:0'):
e_embedding_table = tf.Variable(tf.truncated_normal([entity_size, embedding_size], stddev=1.0/math.sqrt(embedding_size)), name = 'e_embed')
r_embedding_table = tf.Variable(tf.truncated_normal([relation_size, embedding_size], stddev=1.0/math.sqrt(embedding_size)), name = 'r_embed')
postive_sample = tf.placeholder(tf.int32, shape=[batch_size,3], name='p_sample')
negtive_sample = tf.placeholder(tf.int32, shape=[batch_size,3], name='n_sample')
pos_embed_e = tf.nn.embedding_lookup(e_embedding_table, postive_sample[:,:2])
pos_embed_r = tf.nn.embedding_lookup(r_embedding_table, postive_sample[:,-1:])
pos_embed = tf.concat([pos_embed_e,pos_embed_r], axis = 1)
neg_embed_e = tf.nn.embedding_lookup(e_embedding_table, negtive_sample[:,:2])
neg_embed_r = tf.nn.embedding_lookup(r_embedding_table, negtive_sample[:,-1:])
neg_embed = tf.concat([neg_embed_e,neg_embed_r], axis = 1)
p_loss, n_loss = l1_energy(pos_embed), l1_energy(neg_embed)
loss = tf.reduce_sum(tf.nn.relu(margin + p_loss - n_loss)) #loss of TransE
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) #opt
#session
with tf.Session(config=tf_config) as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(max_to_keep=None)
#e_emb, r_emb = [],[]
print("start training with total {0} epochs and each batch size is{1}".format(epochs, batch_size))
for e in range(epochs):
for step in range(len(ds.train_pair)/batch_size):
p, n = ds.get_next_batch(batch_size=batch_size, corpus=ds.train_pair)
feed_dict = {postive_sample:p,negtive_sample:n}
loss_val, _, e_emb, r_emb = sess.run([loss, optimizer, e_embedding_table, r_embedding_table], feed_dict=feed_dict)
print(" loss_val {1} at epoch {2}".format(step, loss_val, e))
saver.save(sess, save_path = model_path + '_TransE.model')
np.save(model_path+"_TransE_ent.npy",e_emb)
np.save(model_path+"_TransE_rel.npy",r_emb)
print("Train Done!")
if __name__ == '__main__':
trans_e_model(path='./data/')
| [
"tensorflow.nn.relu",
"numpy.save",
"tensorflow.abs",
"tensorflow.train.Saver",
"math.sqrt",
"tensorflow.nn.embedding_lookup",
"tensorflow.device",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"util.dataset",
"tensorflow.initialize_all_variab... | [((286, 302), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (300, 302), True, 'import tensorflow as tf\n'), ((468, 481), 'util.dataset', 'dataset', (['path'], {}), '(path)\n', (475, 481), False, 'from util import dataset\n'), ((1148, 1212), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size, 3]', 'name': '"""p_sample"""'}), "(tf.int32, shape=[batch_size, 3], name='p_sample')\n", (1162, 1212), True, 'import tensorflow as tf\n'), ((1233, 1297), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size, 3]', 'name': '"""n_sample"""'}), "(tf.int32, shape=[batch_size, 3], name='n_sample')\n", (1247, 1297), True, 'import tensorflow as tf\n'), ((1316, 1380), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['e_embedding_table', 'postive_sample[:, :2]'], {}), '(e_embedding_table, postive_sample[:, :2])\n', (1338, 1380), True, 'import tensorflow as tf\n'), ((1398, 1463), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['r_embedding_table', 'postive_sample[:, -1:]'], {}), '(r_embedding_table, postive_sample[:, -1:])\n', (1420, 1463), True, 'import tensorflow as tf\n'), ((1479, 1524), 'tensorflow.concat', 'tf.concat', (['[pos_embed_e, pos_embed_r]'], {'axis': '(1)'}), '([pos_embed_e, pos_embed_r], axis=1)\n', (1488, 1524), True, 'import tensorflow as tf\n'), ((1544, 1608), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['e_embedding_table', 'negtive_sample[:, :2]'], {}), '(e_embedding_table, negtive_sample[:, :2])\n', (1566, 1608), True, 'import tensorflow as tf\n'), ((1626, 1691), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['r_embedding_table', 'negtive_sample[:, -1:]'], {}), '(r_embedding_table, negtive_sample[:, -1:])\n', (1648, 1691), True, 'import tensorflow as tf\n'), ((1707, 1752), 'tensorflow.concat', 'tf.concat', (['[neg_embed_e, neg_embed_r]'], {'axis': '(1)'}), '([neg_embed_e, neg_embed_r], axis=1)\n', (1716, 1752), True, 'import tensorflow as tf\n'), ((807, 826), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (816, 826), True, 'import tensorflow as tf\n'), ((1846, 1882), 'tensorflow.nn.relu', 'tf.nn.relu', (['(margin + p_loss - n_loss)'], {}), '(margin + p_loss - n_loss)\n', (1856, 1882), True, 'import tensorflow as tf\n'), ((2041, 2069), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (2051, 2069), True, 'import tensorflow as tf\n'), ((2143, 2175), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (2157, 2175), True, 'import tensorflow as tf\n'), ((2837, 2883), 'numpy.save', 'np.save', (["(model_path + '_TransE_ent.npy')", 'e_emb'], {}), "(model_path + '_TransE_ent.npy', e_emb)\n", (2844, 2883), True, 'import numpy as np\n'), ((2889, 2935), 'numpy.save', 'np.save', (["(model_path + '_TransE_rel.npy')", 'r_emb'], {}), "(model_path + '_TransE_rel.npy', r_emb)\n", (2896, 2935), True, 'import numpy as np\n'), ((742, 798), 'tensorflow.abs', 'tf.abs', (['(batch[:, 1, :] - batch[:, 0, :] - batch[:, 2, :])'], {}), '(batch[:, 1, :] - batch[:, 0, :] - batch[:, 2, :])\n', (748, 798), True, 'import tensorflow as tf\n'), ((1940, 1977), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (1962, 1977), True, 'import tensorflow as tf\n'), ((2096, 2125), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2123, 2125), True, 'import tensorflow as tf\n'), ((930, 955), 'math.sqrt', 'math.sqrt', (['embedding_size'], {}), '(embedding_size)\n', (939, 955), False, 'import math\n'), ((1080, 1105), 'math.sqrt', 'math.sqrt', (['embedding_size'], {}), '(embedding_size)\n', (1089, 1105), False, 'import math\n')] |
import queue
import time
from multiprocessing import Process, Queue
import cv2
import numpy as np
from joblib import Parallel, delayed
from stable_baselines import logger
class ExpertDataset(object):
EXCLUDED_KEYS = {'dataloader', 'train_loader', 'val_loader'}
def __init__(
self,
expert_path=None,
traj_data=None,
train_fraction=0.7,
batch_size=64,
traj_limitation=-1,
randomize=True,
verbose=1,
sequential_preprocessing=False,
):
if traj_data is not None and expert_path is not None:
raise ValueError("Cannot specify both 'traj_data' and 'expert_path'")
if traj_data is None and expert_path is None:
raise ValueError("Must specify one of 'traj_data' or 'expert_path'")
if traj_data is None:
traj_data = np.load(expert_path, allow_pickle=True)
if verbose > 0:
for key, val in traj_data.items():
print(key, val.shape)
episode_starts = traj_data['episode_starts']
traj_limit_idx = len(traj_data['obs'])
if traj_limitation > 0:
n_episodes = 0
for idx, episode_start in enumerate(episode_starts):
n_episodes += int(episode_start)
if n_episodes == (traj_limitation + 1):
traj_limit_idx = idx - 1
observations = traj_data['obs'][:traj_limit_idx]
actions = traj_data['actions'][:traj_limit_idx]
if len(observations.shape) > 2:
observations = np.reshape(
observations, [-1, np.prod(observations.shape[1:])]
)
if len(actions.shape) > 2:
actions = np.reshape(actions, [-1, np.prod(actions.shape[1:])])
indices = np.random.permutation(len(observations)).astype(np.int64)
train_indices = indices[: int(train_fraction * len(indices))]
val_indices = indices[int(train_fraction * len(indices)) :]
assert len(train_indices) > 0, "No sample for the training set"
assert len(val_indices) > 0, "No sample for the validation set"
self.observations = observations
self.actions = actions
self.returns = traj_data['episode_returns'][:traj_limit_idx]
self.avg_ret = sum(self.returns) / len(self.returns)
self.std_ret = np.std(np.array(self.returns))
self.verbose = verbose
assert len(self.observations) == len(self.actions), (
"The number of actions and observations differ "
"please check your expert dataset"
)
self.num_traj = min(traj_limitation, np.sum(episode_starts))
self.num_transition = len(self.observations)
self.randomize = randomize
self.sequential_preprocessing = sequential_preprocessing
self.dataloader = None
self.train_loader = DataLoader(
train_indices,
self.observations,
self.actions,
batch_size,
shuffle=self.randomize,
start_process=False,
sequential=sequential_preprocessing,
)
self.val_loader = DataLoader(
val_indices,
self.observations,
self.actions,
batch_size,
shuffle=self.randomize,
start_process=False,
sequential=sequential_preprocessing,
)
if self.verbose >= 1:
self.log_info()
def init_dataloader(self, batch_size):
indices = np.random.permutation(len(self.observations)).astype(np.int64)
self.dataloader = DataLoader(
indices,
self.observations,
self.actions,
batch_size,
shuffle=self.randomize,
start_process=False,
sequential=self.sequential_preprocessing,
)
def __del__(self):
for key in self.EXCLUDED_KEYS:
if self.__dict__.get(key) is not None:
del self.__dict__[key]
def __getstate__(self):
return {
key: val
for key, val in self.__dict__.items()
if key not in self.EXCLUDED_KEYS
}
def __setstate__(self, state):
self.__dict__.update(state)
for excluded_key in self.EXCLUDED_KEYS:
assert excluded_key not in state
self.dataloader = None
self.train_loader = None
self.val_loader = None
def log_info(self):
logger.log("Total trajectories: {}".format(self.num_traj))
logger.log("Total transitions: {}".format(self.num_transition))
logger.log("Average returns: {}".format(self.avg_ret))
logger.log("Std for returns: {}".format(self.std_ret))
def get_next_batch(self, split=None):
dataloader = {
None: self.dataloader,
'train': self.train_loader,
'val': self.val_loader,
}[split]
if dataloader.process is None:
dataloader.start_process()
try:
return next(dataloader)
except StopIteration:
dataloader = iter(dataloader)
return next(dataloader)
def plot(self):
import matplotlib.pyplot as plt
plt.hist(self.returns)
plt.show()
class DataLoader(object):
def __init__(
self,
indices,
observations,
actions,
batch_size,
n_workers=1,
infinite_loop=True,
max_queue_len=1,
shuffle=False,
start_process=True,
backend='threading',
sequential=False,
partial_minibatch=True,
):
super(DataLoader, self).__init__()
self.n_workers = n_workers
self.infinite_loop = infinite_loop
self.indices = indices
self.original_indices = indices.copy()
self.n_minibatches = len(indices) // batch_size
if partial_minibatch and len(indices) % batch_size > 0:
self.n_minibatches += 1
self.batch_size = batch_size
self.observations = observations
self.actions = actions
self.shuffle = shuffle
self.queue = Queue(max_queue_len)
self.process = None
self.load_images = isinstance(observations[0], str)
self.backend = backend
self.sequential = sequential
self.start_idx = 0
if start_process:
self.start_process()
def start_process(self):
if self.sequential:
return
self.process = Process(target=self._run)
self.process.daemon = True
self.process.start()
@property
def _minibatch_indices(self):
return self.indices[self.start_idx : self.start_idx + self.batch_size]
def sequential_next(self):
if self.start_idx > len(self.indices):
raise StopIteration
if self.start_idx == 0:
if self.shuffle:
np.random.shuffle(self.indices)
obs = self.observations[self._minibatch_indices]
if self.load_images:
obs = np.concatenate(
[self._make_batch_element(image_path) for image_path in obs], axis=0
)
actions = self.actions[self._minibatch_indices]
self.start_idx += self.batch_size
return obs, actions
def _run(self):
start = True
with Parallel(
n_jobs=self.n_workers, batch_size="auto", backend=self.backend
) as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
np.random.shuffle(self.indices)
for minibatch_idx in range(self.n_minibatches):
self.start_idx = minibatch_idx * self.batch_size
obs = self.observations[self._minibatch_indices]
if self.load_images:
if self.n_workers <= 1:
obs = [
self._make_batch_element(image_path)
for image_path in obs
]
else:
obs = parallel(
delayed(self._make_batch_element)(image_path)
for image_path in obs
)
obs = np.concatenate(obs, axis=0)
actions = self.actions[self._minibatch_indices]
self.queue.put((obs, actions))
del obs
self.queue.put(None)
@classmethod
def _make_batch_element(cls, image_path):
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
if image is None:
raise ValueError(
"Tried to load {}, but it was not found".format(image_path)
)
if image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.reshape((1,) + image.shape)
return image
def __len__(self):
return self.n_minibatches
def __iter__(self):
self.start_idx = 0
self.indices = self.original_indices.copy()
return self
def __next__(self):
if self.sequential:
return self.sequential_next()
if self.process is None:
raise ValueError(
"You must call .start_process() before using the dataloader"
)
while True:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
time.sleep(0.001)
continue
if val is None:
raise StopIteration
return val
def __del__(self):
if self.process is not None:
self.process.terminate()
| [
"numpy.load",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.hist",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.prod",
"time.sleep",
"cv2.imread",
"numpy.array",
"multiprocessing.Queue",
"joblib.Parallel",
"multiprocessing.Process",
"joblib.delayed",
"numpy.random.shuffle"
] | [((5217, 5239), 'matplotlib.pyplot.hist', 'plt.hist', (['self.returns'], {}), '(self.returns)\n', (5225, 5239), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5256, 5258), True, 'import matplotlib.pyplot as plt\n'), ((6130, 6150), 'multiprocessing.Queue', 'Queue', (['max_queue_len'], {}), '(max_queue_len)\n', (6135, 6150), False, 'from multiprocessing import Process, Queue\n'), ((6493, 6518), 'multiprocessing.Process', 'Process', ([], {'target': 'self._run'}), '(target=self._run)\n', (6500, 6518), False, 'from multiprocessing import Process, Queue\n'), ((8639, 8683), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_UNCHANGED'], {}), '(image_path, cv2.IMREAD_UNCHANGED)\n', (8649, 8683), False, 'import cv2\n'), ((853, 892), 'numpy.load', 'np.load', (['expert_path'], {'allow_pickle': '(True)'}), '(expert_path, allow_pickle=True)\n', (860, 892), True, 'import numpy as np\n'), ((2351, 2373), 'numpy.array', 'np.array', (['self.returns'], {}), '(self.returns)\n', (2359, 2373), True, 'import numpy as np\n'), ((2631, 2653), 'numpy.sum', 'np.sum', (['episode_starts'], {}), '(episode_starts)\n', (2637, 2653), True, 'import numpy as np\n'), ((7331, 7403), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_workers', 'batch_size': '"""auto"""', 'backend': 'self.backend'}), "(n_jobs=self.n_workers, batch_size='auto', backend=self.backend)\n", (7339, 7403), False, 'from joblib import Parallel, delayed\n'), ((8961, 8999), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (8973, 8999), False, 'import cv2\n'), ((6899, 6930), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (6916, 6930), True, 'import numpy as np\n'), ((1603, 1634), 'numpy.prod', 'np.prod', (['observations.shape[1:]'], {}), '(observations.shape[1:])\n', (1610, 1634), True, 'import numpy as np\n'), ((1732, 1758), 'numpy.prod', 'np.prod', (['actions.shape[1:]'], {}), '(actions.shape[1:])\n', (1739, 1758), True, 'import numpy as np\n'), ((7569, 7600), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (7586, 7600), True, 'import numpy as np\n'), ((9655, 9672), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (9665, 9672), False, 'import time\n'), ((8347, 8374), 'numpy.concatenate', 'np.concatenate', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (8361, 8374), True, 'import numpy as np\n'), ((8187, 8220), 'joblib.delayed', 'delayed', (['self._make_batch_element'], {}), '(self._make_batch_element)\n', (8194, 8220), False, 'from joblib import Parallel, delayed\n')] |
# Copyright (c) 2018 <NAME>
# MIT License
"""
DDPGWithVAE inherits DDPG from stable-baselines
and reimplements learning method.
"""
import time
import os
import numpy as np
import pandas as pd
from mpi4py import MPI
from stable_baselines import logger
from stable_baselines.ddpg.ddpg import DDPG
class DDPGWithVAE(DDPG):
"""
Modified learn method from stable-baselines
- Stop rollout on episode done.
- More verbosity.
- Add VAE optimization step.
"""
def learn(self, total_timesteps, callback=None, vae=None, skip_episodes=5):
rank = MPI.COMM_WORLD.Get_rank()
# we assume symmetric actions.
assert np.all(np.abs(self.env.action_space.low) == self.env.action_space.high)
self.episode_reward = np.zeros((1,))
with self.sess.as_default(), self.graph.as_default():
# Prepare everything.
self._reset()
episode_reward = 0.
episode_step = 0
episodes = 0
step = 0
total_steps = 0
start_time = time.time()
actor_losses = []
critic_losses = []
while True:
obs = self.env.reset()
# Rollout one episode.
while True:
if total_steps >= total_timesteps:
return self
# Predict next action.
action, q_value = self._policy(obs, apply_noise=True, compute_q=True)
print(action)
assert action.shape == self.env.action_space.shape
# Execute next action.
if rank == 0 and self.render:
self.env.render()
new_obs, reward, done, _ = self.env.step(action * np.abs(self.action_space.low))
step += 1
total_steps += 1
if rank == 0 and self.render:
self.env.render()
episode_reward += reward
episode_step += 1
# Book-keeping.
# Do not record observations, while we skip DDPG training.
if (episodes + 1) > skip_episodes:
self._store_transition(obs, action, reward, new_obs, done)
obs = new_obs
if callback is not None:
callback(locals(), globals())
if done:
print("episode finished. Reward: ", episode_reward)
# Episode done.
episode_reward = 0.
episode_step = 0
episodes += 1
self._reset()
obs = self.env.reset()
# Finish rollout on episode finish.
break
print("rollout finished")
# Train VAE.
train_start = time.time()
# vae.optimize()
# print("VAE training duration:", time.time() - train_start)
# Train DDPG.
actor_losses = []
critic_losses = []
train_start = time.time()
if episodes > skip_episodes:
for t_train in range(self.nb_train_steps):
critic_loss, actor_loss = self._train_step(0, None, log=t_train == 0)
critic_losses.append(critic_loss)
actor_losses.append(actor_loss)
self._update_target_net()
print("DDPG training duration:", time.time() - train_start)
mpi_size = MPI.COMM_WORLD.Get_size()
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = self._get_stats()
combined_stats = stats.copy()
combined_stats['train/loss_actor'] = np.mean(actor_losses)
combined_stats['train/loss_critic'] = np.mean(critic_losses)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(step) / float(duration)
combined_stats['total/episodes'] = episodes
def as_scalar(scalar):
"""
check and return the input if it is a scalar, otherwise raise ValueError
:param scalar: (Any) the object to check
:return: (Number) the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar)
combined_stats_sums = MPI.COMM_WORLD.allreduce(
np.array([as_scalar(x) for x in combined_stats.values()]))
combined_stats = {k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/steps'] = step
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.dump_tabular()
logger.info('')
df = pd.DataFrame([combined_stats])
header = combined_stats.keys()
if os.path.exists('logs.csv'):
header = False
df.to_csv('logs.csv', mode='a', header=header, index=False) | [
"pandas.DataFrame",
"stable_baselines.logger.record_tabular",
"numpy.abs",
"stable_baselines.logger.info",
"numpy.isscalar",
"mpi4py.MPI.COMM_WORLD.Get_rank",
"numpy.zeros",
"os.path.exists",
"time.time",
"numpy.mean",
"stable_baselines.logger.dump_tabular",
"mpi4py.MPI.COMM_WORLD.Get_size"
] | [((578, 603), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (601, 603), False, 'from mpi4py import MPI\n'), ((761, 775), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (769, 775), True, 'import numpy as np\n'), ((1059, 1070), 'time.time', 'time.time', ([], {}), '()\n', (1068, 1070), False, 'import time\n'), ((665, 698), 'numpy.abs', 'np.abs', (['self.env.action_space.low'], {}), '(self.env.action_space.low)\n', (671, 698), True, 'import numpy as np\n'), ((3009, 3020), 'time.time', 'time.time', ([], {}), '()\n', (3018, 3020), False, 'import time\n'), ((3261, 3272), 'time.time', 'time.time', ([], {}), '()\n', (3270, 3272), False, 'import time\n'), ((3751, 3776), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (3774, 3776), False, 'from mpi4py import MPI\n'), ((4093, 4114), 'numpy.mean', 'np.mean', (['actor_losses'], {}), '(actor_losses)\n', (4100, 4114), True, 'import numpy as np\n'), ((4173, 4195), 'numpy.mean', 'np.mean', (['critic_losses'], {}), '(critic_losses)\n', (4180, 4195), True, 'import numpy as np\n'), ((5631, 5652), 'stable_baselines.logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (5650, 5652), False, 'from stable_baselines import logger\n'), ((5673, 5688), 'stable_baselines.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (5684, 5688), False, 'from stable_baselines import logger\n'), ((5714, 5744), 'pandas.DataFrame', 'pd.DataFrame', (['[combined_stats]'], {}), '([combined_stats])\n', (5726, 5744), True, 'import pandas as pd\n'), ((5819, 5845), 'os.path.exists', 'os.path.exists', (['"""logs.csv"""'], {}), "('logs.csv')\n", (5833, 5845), False, 'import os\n'), ((3915, 3926), 'time.time', 'time.time', ([], {}), '()\n', (3924, 3926), False, 'import time\n'), ((5563, 5610), 'stable_baselines.logger.record_tabular', 'logger.record_tabular', (['key', 'combined_stats[key]'], {}), '(key, combined_stats[key])\n', (5584, 5610), False, 'from stable_baselines import logger\n'), ((1800, 1829), 'numpy.abs', 'np.abs', (['self.action_space.low'], {}), '(self.action_space.low)\n', (1806, 1829), True, 'import numpy as np\n'), ((3692, 3703), 'time.time', 'time.time', ([], {}), '()\n', (3701, 3703), False, 'import time\n'), ((4935, 4954), 'numpy.isscalar', 'np.isscalar', (['scalar'], {}), '(scalar)\n', (4946, 4954), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import scipy.constants
import cPickle
from bunch import Bunch
import echolect as el
import radarmodel
import prx
basefilename = 'head_and_flare'
with open(basefilename + '.pkl', 'rb') as f:
data = cPickle.load(f)
n = 128
m = data.vlt.shape[-1]
freqs = np.fft.fftfreq(int(n), data.ts/np.timedelta64(1, 's'))
v = freqs/data.f0*sp.constants.c/2
lmbda = data.noise_sigma/np.sqrt(n)*2
As = []
Astars = []
nodelays = []
for k, code in enumerate(data.codes):
s = (code/np.linalg.norm(code)).astype(data.vlt.dtype)
A = radarmodel.point.fastest_forward(s, n, m, 1)
Astar = radarmodel.point.fastest_adjoint(s, n, m, 1)
As.append(A)
Astars.append(Astar)
try:
code_delay = data.code_delays[k]
except:
code_delay = 0
delay = Astar.delays + code_delay
nodelays.append(slice(np.searchsorted(delay, 0), np.searchsorted(delay, m)))
vlt_sig = np.zeros((data.vlt.shape[0], n, m), data.vlt.dtype)
vlt_noise = np.zeros_like(vlt_sig)
h = np.zeros_like(data.vlt.real)
h_sig = np.zeros_like(h)
h_noise = np.zeros_like(data.vlt)
x0s = [np.zeros(A.inshape, A.indtype) for A in As]
for p in xrange(data.vlt.shape[0]):
y = data.vlt[p]
A = As[p % len(As)]
Astar = Astars[p % len(Astars)]
x = prx.l1rls(A, Astar, y, lmbda=lmbda, x0=x0s[p % len(As)], printrate=100)
nz = Astar(y - A(x))
# matched filter result with sidelobes removed is vlt_sig + vlt_noise
nodelayslc = nodelays[p % len(nodelays)]
vlt_sig[p] = x[:, nodelayslc]/np.sqrt(n)
vlt_noise[p] = nz[:, nodelayslc]*np.sqrt(n)
h_sig[p] = np.sqrt(np.sum(vlt_sig[p].real**2 + vlt_sig[p].imag**2, axis=0))
# use zero Doppler noise since by definition noise is wideband with no Doppler shift
h_noise[p] = np.abs(vlt_noise[p, 0])
# sqrt(n) factor included in noise term by summing n terms of nz[0]
h[p] = np.sqrt(np.sum(np.abs(vlt_sig[p] + nz[0, nodelayslc])**2, axis=0))
x0s[p % len(As)] = x
recovered = Bunch(vlt_sig=vlt_sig, vlt_noise=vlt_noise, t=data.t, f=freqs, v=v, r=data.r, n=n,
ts=data.ts, ipp=data.ipp, f0=data.f0,
noise_sigma=data.noise_sigma)
with open(basefilename + '_recovered.pkl', 'wb') as f:
cPickle.dump(recovered, f, protocol=-1)
rec_range = Bunch(h=h, h_sig=h_sig, h_noise=h_noise, t=data.t, r=data.r, n=n,
ts=data.ts, ipp=data.ipp, f0=data.f0,
noise_sigma=data.noise_sigma)
with open(basefilename + '_recovered_range.pkl', 'wb') as f:
cPickle.dump(rec_range, f, protocol=-1) | [
"numpy.zeros_like",
"numpy.abs",
"numpy.sum",
"bunch.Bunch",
"numpy.zeros",
"cPickle.load",
"numpy.searchsorted",
"cPickle.dump",
"numpy.timedelta64",
"radarmodel.point.fastest_adjoint",
"numpy.linalg.norm",
"numpy.sqrt",
"radarmodel.point.fastest_forward"
] | [((940, 991), 'numpy.zeros', 'np.zeros', (['(data.vlt.shape[0], n, m)', 'data.vlt.dtype'], {}), '((data.vlt.shape[0], n, m), data.vlt.dtype)\n', (948, 991), True, 'import numpy as np\n'), ((1004, 1026), 'numpy.zeros_like', 'np.zeros_like', (['vlt_sig'], {}), '(vlt_sig)\n', (1017, 1026), True, 'import numpy as np\n'), ((1031, 1059), 'numpy.zeros_like', 'np.zeros_like', (['data.vlt.real'], {}), '(data.vlt.real)\n', (1044, 1059), True, 'import numpy as np\n'), ((1068, 1084), 'numpy.zeros_like', 'np.zeros_like', (['h'], {}), '(h)\n', (1081, 1084), True, 'import numpy as np\n'), ((1095, 1118), 'numpy.zeros_like', 'np.zeros_like', (['data.vlt'], {}), '(data.vlt)\n', (1108, 1118), True, 'import numpy as np\n'), ((2021, 2176), 'bunch.Bunch', 'Bunch', ([], {'vlt_sig': 'vlt_sig', 'vlt_noise': 'vlt_noise', 't': 'data.t', 'f': 'freqs', 'v': 'v', 'r': 'data.r', 'n': 'n', 'ts': 'data.ts', 'ipp': 'data.ipp', 'f0': 'data.f0', 'noise_sigma': 'data.noise_sigma'}), '(vlt_sig=vlt_sig, vlt_noise=vlt_noise, t=data.t, f=freqs, v=v, r=data.\n r, n=n, ts=data.ts, ipp=data.ipp, f0=data.f0, noise_sigma=data.noise_sigma)\n', (2026, 2176), False, 'from bunch import Bunch\n'), ((2322, 2460), 'bunch.Bunch', 'Bunch', ([], {'h': 'h', 'h_sig': 'h_sig', 'h_noise': 'h_noise', 't': 'data.t', 'r': 'data.r', 'n': 'n', 'ts': 'data.ts', 'ipp': 'data.ipp', 'f0': 'data.f0', 'noise_sigma': 'data.noise_sigma'}), '(h=h, h_sig=h_sig, h_noise=h_noise, t=data.t, r=data.r, n=n, ts=data.\n ts, ipp=data.ipp, f0=data.f0, noise_sigma=data.noise_sigma)\n', (2327, 2460), False, 'from bunch import Bunch\n'), ((241, 256), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (253, 256), False, 'import cPickle\n'), ((566, 610), 'radarmodel.point.fastest_forward', 'radarmodel.point.fastest_forward', (['s', 'n', 'm', '(1)'], {}), '(s, n, m, 1)\n', (598, 610), False, 'import radarmodel\n'), ((623, 667), 'radarmodel.point.fastest_adjoint', 'radarmodel.point.fastest_adjoint', (['s', 'n', 'm', '(1)'], {}), '(s, n, m, 1)\n', (655, 667), False, 'import radarmodel\n'), ((1126, 1156), 'numpy.zeros', 'np.zeros', (['A.inshape', 'A.indtype'], {}), '(A.inshape, A.indtype)\n', (1134, 1156), True, 'import numpy as np\n'), ((1804, 1827), 'numpy.abs', 'np.abs', (['vlt_noise[p, 0]'], {}), '(vlt_noise[p, 0])\n', (1810, 1827), True, 'import numpy as np\n'), ((2269, 2308), 'cPickle.dump', 'cPickle.dump', (['recovered', 'f'], {'protocol': '(-1)'}), '(recovered, f, protocol=-1)\n', (2281, 2308), False, 'import cPickle\n'), ((2559, 2598), 'cPickle.dump', 'cPickle.dump', (['rec_range', 'f'], {'protocol': '(-1)'}), '(rec_range, f, protocol=-1)\n', (2571, 2598), False, 'import cPickle\n'), ((328, 350), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (342, 350), True, 'import numpy as np\n'), ((413, 423), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (420, 423), True, 'import numpy as np\n'), ((1554, 1564), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1561, 1564), True, 'import numpy as np\n'), ((1602, 1612), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1609, 1612), True, 'import numpy as np\n'), ((1641, 1700), 'numpy.sum', 'np.sum', (['(vlt_sig[p].real ** 2 + vlt_sig[p].imag ** 2)'], {'axis': '(0)'}), '(vlt_sig[p].real ** 2 + vlt_sig[p].imag ** 2, axis=0)\n', (1647, 1700), True, 'import numpy as np\n'), ((874, 899), 'numpy.searchsorted', 'np.searchsorted', (['delay', '(0)'], {}), '(delay, 0)\n', (889, 899), True, 'import numpy as np\n'), ((901, 926), 'numpy.searchsorted', 'np.searchsorted', (['delay', 'm'], {}), '(delay, m)\n', (916, 926), True, 'import numpy as np\n'), ((513, 533), 'numpy.linalg.norm', 'np.linalg.norm', (['code'], {}), '(code)\n', (527, 533), True, 'import numpy as np\n'), ((1926, 1964), 'numpy.abs', 'np.abs', (['(vlt_sig[p] + nz[0, nodelayslc])'], {}), '(vlt_sig[p] + nz[0, nodelayslc])\n', (1932, 1964), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import itertools
import numpy as np
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestTranspose(Caffe2OnnxLayerTest):
def create_net(self, shape, perm, ir_version):
"""
ONNX net IR net
Input->Transpose->Sigmoid->Output => Input->Permute->sigmoid
"""
#
# Create ONNX model
#
from onnx import helper
from onnx import TensorProto
output_shape = np.transpose(np.ones(shape), perm).shape
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
args = dict()
if perm:
args['perm'] = perm
node_def = helper.make_node(
'Transpose',
inputs=['input'],
outputs=['transpose'],
**args
)
sigmoid_def = helper.make_node(
'Sigmoid',
inputs=['transpose'],
outputs=['output']
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def, sigmoid_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
if not perm:
perm = list(reversed(range(len(shape))))
return onnx_net, ref_net
def create_net_const(self, shape, perm, ir_version):
"""
ONNX net IR net
Input->Concat(+transposed const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
from onnx import helper
from onnx import TensorProto
constant = np.random.randint(-127, 127, shape).astype(np.float)
constant_transposed = np.transpose(constant, perm)
concat_axis = 0
input_shape = list(constant_transposed.shape)
output_shape = input_shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
node_const_def = helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
args = dict()
if perm:
args['perm'] = perm
node_def = helper.make_node(
'Transpose',
inputs=['const1'],
outputs=['transpose'],
**args
)
node_concat_def = helper.make_node(
'Concat',
inputs=['input', 'transpose'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_def, node_concat_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
return onnx_net, ref_net
test_data_precommit = [dict(shape=[4, 6, 8, 10, 12], perm=None),
dict(shape=[8, 10, 12], perm=[2, 1, 0]),
dict(shape=[6, 8, 10, 12], perm=[0, 3, 1, 2]),
dict(shape=[4, 6, 8, 10, 12], perm=[1, 0, 4, 3, 2])]
test_data = [dict(shape=[10, 12], perm=None),
dict(shape=[8, 10, 12], perm=None),
dict(shape=[6, 8, 10, 12], perm=None),
dict(shape=[4, 6, 8, 10, 12], perm=None)]
for shape in [[10, 12], [8, 10, 12], [6, 8, 10, 12], [4, 6, 8, 10, 12]]:
for perm in itertools.permutations(np.arange(len(shape))):
test_data.append(dict(shape=shape, perm=list(perm)))
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.nightly
def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
| [
"onnx.helper.make_node",
"onnx.helper.make_model",
"onnx.helper.make_tensor_value_info",
"numpy.transpose",
"numpy.ones",
"numpy.random.randint",
"pytest.mark.parametrize",
"onnx.helper.make_graph"
] | [((4438, 4492), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data_precommit'], {}), "('params', test_data_precommit)\n", (4461, 4492), False, 'import pytest\n'), ((4760, 4804), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data'], {}), "('params', test_data)\n", (4783, 4804), False, 'import pytest\n'), ((5060, 5114), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data_precommit'], {}), "('params', test_data_precommit)\n", (5083, 5114), False, 'import pytest\n'), ((5392, 5436), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""params"""', 'test_data'], {}), "('params', test_data)\n", (5415, 5436), False, 'import pytest\n'), ((657, 721), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input"""', 'TensorProto.FLOAT', 'shape'], {}), "('input', TensorProto.FLOAT, shape)\n", (686, 721), False, 'from onnx import helper\n'), ((739, 811), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.FLOAT', 'output_shape'], {}), "('output', TensorProto.FLOAT, output_shape)\n", (768, 811), False, 'from onnx import helper\n'), ((903, 981), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""'], {'inputs': "['input']", 'outputs': "['transpose']"}), "('Transpose', inputs=['input'], outputs=['transpose'], **args)\n", (919, 981), False, 'from onnx import helper\n'), ((1063, 1132), 'onnx.helper.make_node', 'helper.make_node', (['"""Sigmoid"""'], {'inputs': "['transpose']", 'outputs': "['output']"}), "('Sigmoid', inputs=['transpose'], outputs=['output'])\n", (1079, 1132), False, 'from onnx import helper\n'), ((1240, 1315), 'onnx.helper.make_graph', 'helper.make_graph', (['[node_def, sigmoid_def]', '"""test_model"""', '[input]', '[output]'], {}), "([node_def, sigmoid_def], 'test_model', [input], [output])\n", (1257, 1315), False, 'from onnx import helper\n'), ((1435, 1491), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""test_model"""'}), "(graph_def, producer_name='test_model')\n", (1452, 1491), False, 'from onnx import helper\n'), ((2140, 2168), 'numpy.transpose', 'np.transpose', (['constant', 'perm'], {}), '(constant, perm)\n', (2152, 2168), True, 'import numpy as np\n'), ((2346, 2416), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input"""', 'TensorProto.FLOAT', 'input_shape'], {}), "('input', TensorProto.FLOAT, input_shape)\n", (2375, 2416), False, 'from onnx import helper\n'), ((2434, 2506), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.FLOAT', 'output_shape'], {}), "('output', TensorProto.FLOAT, output_shape)\n", (2463, 2506), False, 'from onnx import helper\n'), ((2944, 3023), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""'], {'inputs': "['const1']", 'outputs': "['transpose']"}), "('Transpose', inputs=['const1'], outputs=['transpose'], **args)\n", (2960, 3023), False, 'from onnx import helper\n'), ((3109, 3209), 'onnx.helper.make_node', 'helper.make_node', (['"""Concat"""'], {'inputs': "['input', 'transpose']", 'outputs': "['output']", 'axis': 'concat_axis'}), "('Concat', inputs=['input', 'transpose'], outputs=['output'\n ], axis=concat_axis)\n", (3125, 3209), False, 'from onnx import helper\n'), ((3324, 3423), 'onnx.helper.make_graph', 'helper.make_graph', (['[node_const_def, node_def, node_concat_def]', '"""test_model"""', '[input]', '[output]'], {}), "([node_const_def, node_def, node_concat_def], 'test_model',\n [input], [output])\n", (3341, 3423), False, 'from onnx import helper\n'), ((3539, 3595), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""test_model"""'}), "(graph_def, producer_name='test_model')\n", (3556, 3595), False, 'from onnx import helper\n'), ((613, 627), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (620, 627), True, 'import numpy as np\n'), ((2057, 2092), 'numpy.random.randint', 'np.random.randint', (['(-127)', '(127)', 'shape'], {}), '(-127, 127, shape)\n', (2074, 2092), True, 'import numpy as np\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.stats as st
import numpy as np
def initialize(self, runInfoDict, inputFiles):
"""
Method to generate the observed data
@ In, runInfoDict, dict, the dictionary containing the runInfo
@ In, inputFiles, list, the list of input files
@ Out, None
"""
self.dim = 10
seed = 1086
np.random.seed(seed)
self.cov = 10**(np.random.randn(self.dim)*1.5)
self.mu = st.norm(loc=0, scale=10).rvs(self.dim)
def run(self, inputDict):
"""
Method required by RAVEN to run this as an external model.
log likelihood function
@ In, self, object, object to store members on
@ In, inputDict, dict, dictionary containing inputs from RAVEN
@ Out, None
"""
vars = ['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
xin = []
for var in vars:
xin.extend(inputDict[var])
xin = np.asarray(xin)
if np.all(xin < 500) and np.all(xin > -500):
zout = st.multivariate_normal(mean=self.mu, cov=self.cov).logpdf(xin)
else:
zout = -1.0E6
self.zout = np.atleast_1d(zout)
| [
"scipy.stats.norm",
"numpy.random.seed",
"numpy.random.randn",
"numpy.asarray",
"scipy.stats.multivariate_normal",
"numpy.atleast_1d",
"numpy.all"
] | [((902, 922), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (916, 922), True, 'import numpy as np\n'), ((1418, 1433), 'numpy.asarray', 'np.asarray', (['xin'], {}), '(xin)\n', (1428, 1433), True, 'import numpy as np\n'), ((1595, 1614), 'numpy.atleast_1d', 'np.atleast_1d', (['zout'], {}), '(zout)\n', (1608, 1614), True, 'import numpy as np\n'), ((1439, 1456), 'numpy.all', 'np.all', (['(xin < 500)'], {}), '(xin < 500)\n', (1445, 1456), True, 'import numpy as np\n'), ((1461, 1479), 'numpy.all', 'np.all', (['(xin > -500)'], {}), '(xin > -500)\n', (1467, 1479), True, 'import numpy as np\n'), ((941, 966), 'numpy.random.randn', 'np.random.randn', (['self.dim'], {}), '(self.dim)\n', (956, 966), True, 'import numpy as np\n'), ((984, 1008), 'scipy.stats.norm', 'st.norm', ([], {'loc': '(0)', 'scale': '(10)'}), '(loc=0, scale=10)\n', (991, 1008), True, 'import scipy.stats as st\n'), ((1492, 1542), 'scipy.stats.multivariate_normal', 'st.multivariate_normal', ([], {'mean': 'self.mu', 'cov': 'self.cov'}), '(mean=self.mu, cov=self.cov)\n', (1514, 1542), True, 'import scipy.stats as st\n')] |
import unittest
from mltoolkit.mldp.steps.transformers.nlp import WindowSlider
from mltoolkit.mldp.steps.transformers.nlp.helpers import create_new_field_name
from mltoolkit.mldp.utils.tools import DataChunk
import numpy as np
class TestWindowSlider(unittest.TestCase):
def setUp(self):
self.field_name = "dummy"
self.suffix = "window"
self.new_field_name = create_new_field_name(self.field_name,
suffix=self.suffix)
# TODO: more descriptive method names would be nice to have
def test_scenario1(self):
window_size = 2
step_size = 1
only_full_windows = False
input_seqs = np.array([list(range(6)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5]],
[[0, 1]]])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario2(self):
window_size = 3
step_size = 3
only_full_windows = False
input_seqs = np.array([list(range(7)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1, 2], [3, 4, 5], [6]],
[[0, 1]]])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario3(self):
window_size = 3
step_size = 10
only_full_windows = False
input_seqs = np.array([list(range(3)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.empty(2, dtype="object")
expect_seqs[0] = [[0, 1, 2]]
expect_seqs[1] = [[0, 1]]
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario4(self):
window_size = 2
step_size = 1
only_full_windows = True
input_seqs = np.array([list(range(6)), list(range(3)), list(range(1))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]],
[[0, 1], [1, 2]],
[]
])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def _test_window_setup(self, input_chunk, expected_output_chunk,
field_name, suffix,
window_size, step_size,
only_full_windows):
window_slider = WindowSlider(field_names=field_name,
window_size=window_size,
step_size=step_size,
new_window_field_name_suffix=suffix,
only_full_windows=only_full_windows)
actual_output_chunk = window_slider(input_chunk)
self.assertTrue(expected_output_chunk == actual_output_chunk)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.empty",
"mltoolkit.mldp.utils.tools.DataChunk",
"numpy.array",
"mltoolkit.mldp.steps.transformers.nlp.helpers.create_new_field_name",
"mltoolkit.mldp.steps.transformers.nlp.WindowSlider"
] | [((4553, 4568), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4566, 4568), False, 'import unittest\n'), ((388, 446), 'mltoolkit.mldp.steps.transformers.nlp.helpers.create_new_field_name', 'create_new_field_name', (['self.field_name'], {'suffix': 'self.suffix'}), '(self.field_name, suffix=self.suffix)\n', (409, 446), False, 'from mltoolkit.mldp.steps.transformers.nlp.helpers import create_new_field_name\n'), ((765, 807), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs})\n', (774, 807), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((830, 897), 'numpy.array', 'np.array', (['[[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5]], [[0, 1]]]'], {}), '([[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5]], [[0, 1]]])\n', (838, 897), True, 'import numpy as np\n'), ((955, 1031), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs, self.new_field_name: expect_seqs})\n', (964, 1031), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((1575, 1617), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs})\n', (1584, 1617), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((1640, 1689), 'numpy.array', 'np.array', (['[[[0, 1, 2], [3, 4, 5], [6]], [[0, 1]]]'], {}), '([[[0, 1, 2], [3, 4, 5], [6]], [[0, 1]]])\n', (1648, 1689), True, 'import numpy as np\n'), ((1747, 1823), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs, self.new_field_name: expect_seqs})\n', (1756, 1823), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((2373, 2415), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs})\n', (2382, 2415), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((2438, 2465), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': '"""object"""'}), "(2, dtype='object')\n", (2446, 2465), True, 'import numpy as np\n'), ((2569, 2645), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs, self.new_field_name: expect_seqs})\n', (2578, 2645), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((3214, 3256), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs})\n', (3223, 3256), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((3279, 3353), 'numpy.array', 'np.array', (['[[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]], [[0, 1], [1, 2]], []]'], {}), '([[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]], [[0, 1], [1, 2]], []])\n', (3287, 3353), True, 'import numpy as np\n'), ((3432, 3508), 'mltoolkit.mldp.utils.tools.DataChunk', 'DataChunk', ([], {}), '(**{self.field_name: input_seqs, self.new_field_name: expect_seqs})\n', (3441, 3508), False, 'from mltoolkit.mldp.utils.tools import DataChunk\n'), ((4087, 4253), 'mltoolkit.mldp.steps.transformers.nlp.WindowSlider', 'WindowSlider', ([], {'field_names': 'field_name', 'window_size': 'window_size', 'step_size': 'step_size', 'new_window_field_name_suffix': 'suffix', 'only_full_windows': 'only_full_windows'}), '(field_names=field_name, window_size=window_size, step_size=\n step_size, new_window_field_name_suffix=suffix, only_full_windows=\n only_full_windows)\n', (4099, 4253), False, 'from mltoolkit.mldp.steps.transformers.nlp import WindowSlider\n')] |
#!/usr/bin/env python2
import ptvsd
# Allow other computers to attach to ptvsd at this IP address and port, using the secret
ptvsd.enable_attach("my_secret", address = ('0.0.0.0', 3000))
# Pause the program until a remote debugger is attached
ptvsd.wait_for_attach()
import time
start = time.time()
import argparse
import cv2
import itertools
import os
import numpy as np
np.set_printoptions(precision=2)
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('imgs', type=str, nargs='+', help="Input images.")
parser.add_argument('outputDir', type=str, help="Output directory of aligned images.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
print("Argument parsing and loading libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
start = time.time()
if args.verbose:
print("Processing {}.".format(imgPath))
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bbs = align.getAllFaceBoundingBoxes(rgbImg)
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bbs:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
i = 1
for alignedFace in alignedFaces:
(inputImageName, ext) = os.path.splitext(os.path.basename(imgPath))
outputImgPath = "{}.jpg".format(os.path.join(args.outputDir, inputImageName + '-' + str(i)))
cv2.imwrite(outputImgPath, alignedFace)
reps.append(net.forward(alignedFace))
i = i + 1
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
for bb in bbs:
cv2.rectangle(bgrImg,(bb.left(), bb.top()), (bb.right(), bb.bottom()),(0,255,0),2)
# cv2.imshow('image', bgrImg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return (reps,bbs)
for img in args.imgs:
repsAndBBs = getRep(img)
reps = repsAndBBs[0]
bbs = repsAndBBs[1]
| [
"openface.TorchNeuralNet",
"numpy.set_printoptions",
"ptvsd.enable_attach",
"argparse.ArgumentParser",
"os.path.basename",
"ptvsd.wait_for_attach",
"cv2.cvtColor",
"os.path.realpath",
"cv2.imwrite",
"time.time",
"openface.AlignDlib",
"cv2.imread",
"os.path.join"
] | [((126, 185), 'ptvsd.enable_attach', 'ptvsd.enable_attach', (['"""my_secret"""'], {'address': "('0.0.0.0', 3000)"}), "('my_secret', address=('0.0.0.0', 3000))\n", (145, 185), False, 'import ptvsd\n'), ((244, 267), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (265, 267), False, 'import ptvsd\n'), ((290, 301), 'time.time', 'time.time', ([], {}), '()\n', (299, 301), False, 'import time\n'), ((377, 409), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (396, 409), True, 'import numpy as np\n'), ((493, 530), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""models"""'], {}), "(fileDir, '..', 'models')\n", (505, 530), False, 'import os\n'), ((546, 576), 'os.path.join', 'os.path.join', (['modelDir', '"""dlib"""'], {}), "(modelDir, 'dlib')\n", (558, 576), False, 'import os\n'), ((596, 630), 'os.path.join', 'os.path.join', (['modelDir', '"""openface"""'], {}), "(modelDir, 'openface')\n", (608, 630), False, 'import os\n'), ((641, 666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (664, 666), False, 'import argparse\n'), ((1502, 1513), 'time.time', 'time.time', ([], {}), '()\n', (1511, 1513), False, 'import time\n'), ((1522, 1564), 'openface.AlignDlib', 'openface.AlignDlib', (['args.dlibFacePredictor'], {}), '(args.dlibFacePredictor)\n', (1540, 1564), False, 'import openface\n'), ((1571, 1626), 'openface.TorchNeuralNet', 'openface.TorchNeuralNet', (['args.networkModel', 'args.imgDim'], {}), '(args.networkModel, args.imgDim)\n', (1594, 1626), False, 'import openface\n'), ((454, 480), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (470, 480), False, 'import os\n'), ((1782, 1793), 'time.time', 'time.time', ([], {}), '()\n', (1791, 1793), False, 'import time\n'), ((1881, 1900), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (1891, 1900), False, 'import cv2\n'), ((1993, 2032), 'cv2.cvtColor', 'cv2.cvtColor', (['bgrImg', 'cv2.COLOR_BGR2RGB'], {}), '(bgrImg, cv2.COLOR_BGR2RGB)\n', (2005, 2032), False, 'import cv2\n'), ((2229, 2240), 'time.time', 'time.time', ([], {}), '()\n', (2238, 2240), False, 'import time\n'), ((2401, 2412), 'time.time', 'time.time', ([], {}), '()\n', (2410, 2412), False, 'import time\n'), ((2846, 2857), 'time.time', 'time.time', ([], {}), '()\n', (2855, 2857), False, 'import time\n'), ((946, 1013), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (958, 1013), False, 'import os\n'), ((1128, 1178), 'os.path.join', 'os.path.join', (['openfaceModelDir', '"""nn4.small2.v1.t7"""'], {}), "(openfaceModelDir, 'nn4.small2.v1.t7')\n", (1140, 1178), False, 'import os\n'), ((3105, 3144), 'cv2.imwrite', 'cv2.imwrite', (['outputImgPath', 'alignedFace'], {}), '(outputImgPath, alignedFace)\n', (3116, 3144), False, 'import cv2\n'), ((2969, 2994), 'os.path.basename', 'os.path.basename', (['imgPath'], {}), '(imgPath)\n', (2985, 2994), False, 'import os\n'), ((1471, 1482), 'time.time', 'time.time', ([], {}), '()\n', (1480, 1482), False, 'import time\n'), ((1726, 1737), 'time.time', 'time.time', ([], {}), '()\n', (1735, 1737), False, 'import time\n'), ((2194, 2205), 'time.time', 'time.time', ([], {}), '()\n', (2203, 2205), False, 'import time\n'), ((2366, 2377), 'time.time', 'time.time', ([], {}), '()\n', (2375, 2377), False, 'import time\n'), ((2811, 2822), 'time.time', 'time.time', ([], {}), '()\n', (2820, 2822), False, 'import time\n'), ((3312, 3323), 'time.time', 'time.time', ([], {}), '()\n', (3321, 3323), False, 'import time\n')] |
import numpy as np
from collections import defaultdict
import space
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
from bc import Boundary
def ddx(f, dx):
return (f[1:-1,2:] - f[1:-1,:-2])/2.0/dx
def ddy(f, dy):
return (f[2:,1:-1] - f[:-2,1:-1])/2.0/dy
def laplacian(f, dx, dy):
return (f[1:-1,2:] - 2.0*f[1:-1,1:-1] + f[1:-1,:-2])/dx/dx + \
(f[2:,1:-1] -2.0*f[1:-1,1:-1] + f[:-2,1:-1])/dy/dy
def div(u,v,dx,dy):
return ddx(u,dx) + ddy(v,dy)
def momentum(u, v, dx, dy, nu):
# u rhs: - d(uu)/dx - d(vu)/dy + ν d2(u)
# v rhs: - d(uv)/dx - d(vv)/dy + ν d2(v)
return (
- ddx(u*u,dx) - ddy(v*u,dy) + nu*laplacian(u,dx,dy),
- ddx(u*v,dx) - ddy(v*v,dy) + nu*laplacian(v,dx,dy)
)
def momentum_staggered(u, v, dx, dy, nu):
# do x-momentum - u is of size (nx + 2) x (ny + 2) - only need to do the interior points
# u is horizontonal component of velocity, dimension 1
# LL = u[1,2] , UR = u[n,n]
ue = 0.5*(u[1:-1, 2:-1] + u[1:-1, 3: ])
uw = 0.5*(u[1:-1, 1:-2] + u[1:-1, 2:-1])
un = 0.5*(u[1:-1, 2:-1] + u[2:, 2:-1])
us = 0.5*(u[:-2, 2:-1] + u[1:-1, 2:-1])
vn = 0.5*(v[2:, 1:-2] + v[2:, 2:-1])
vs = 0.5*(v[1:-1, 1:-2] + v[1:-1, 2:-1])
convection = - (ue**2 - uw**2)/dx - (un*vn - us*vs)/dy
diffusion = nu * laplacian(u,dx,dy)[:,1:]#[1:-1,2:-1]
mx = convection + diffusion
# do y-momentum - only need to do interior points
# v is vertical component of velocity, staggered negative on dimension 0
# v LL = v[2,1], UR = v[n,n]
ve = 0.5*(v[2:-1, 1:-1] + v[2:-1, 2: ])
vw = 0.5*(v[2:-1, :-2 ] + v[2:-1, 1:-1])
vn = 0.5*(v[2:-1, 1:-1] + v[3:, 1:-1])
vs = 0.5*(v[1:-2, 1:-1] + v[2:-1, 1:-1])
ue = 0.5*(u[1:-2, 2: ] + u[2:-1, 2: ])
uw = 0.5*(u[1:-2, 1:-1] + u[2:-1, 1:-1])
convection = - (ue*ve - uw*vw)/dx - (vn**2 - vs**2)/dy
diffusion = nu * laplacian(v,dx,dy)[1:,:]#[2:-1,1:-1]
my = convection + diffusion
return mx, my
def pressure_poisson(u, v, dx, dy, dt, tol, max_its, b=None, p=None, bcs=None):
bcs = bcs if bcs else []
if b is None:
b = np.zeros_like(u)
if p is None:
p = np.zeros_like(u)
b[1:-1,1:-1] = div(u,v,dx,dy)/dt
pn = p.copy()
it = 0
err = float("inf")
while it < max_its and err > tol:
for bc in bcs:
bc.apply(p)
np.copyto(pn, p)
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) *
b[1:-1,1:-1])
err = np.linalg.norm(p - pn, 2)
it += 1
return p, err
def sparse_pressure_matrix(nx, ny, dx, dy, bc_left, bc_right, bc_bottom, bc_top):
Ap = np.zeros([ny,nx])
Ae = 1.0/dx/dx*np.ones([ny,nx])
As = 1.0/dy/dy*np.ones([ny,nx])
An = 1.0/dy/dy*np.ones([ny,nx])
Aw = 1.0/dx/dx*np.ones([ny,nx])
# a little optimistic that this generalizes to non-dirichlet bcs
bc_left.apply(Aw)
bc_right.apply(Ae)
bc_bottom.apply(As)
bc_top.apply(An)
Ap = -(Aw + Ae + An + As)
n = nx*ny
d0 = Ap.reshape(n)
de = Ae.reshape(n)[:-1]
dw = Aw.reshape(n)[1:]
ds = As.reshape(n)[nx:]
dn = An.reshape(n)[:-nx]
A1 = scipy.sparse.diags([d0, de, dw, dn, ds], [0, 1, -1, nx, -nx], format='csr')
return A1
def pressure_poisson_sparse(A1, u, v, dx, dy, dt, nx, ny):
# do pressure - prhs = 1/dt * div(uhat)
# we will only need to fill the interior points. This size is for convenient indexing
divut = np.zeros([ny+2,nx+2])
#divut[1:-1,1:-1] = ddx(ut,dx) + ddy(vt,dy)#div(ut,vt,dx,dy)
divut[1:-1,1:-1] = (u[1:-1,2:] - u[1:-1,1:-1])/dx + (v[2:,1:-1] - v[1:-1,1:-1])/dy
prhs = 1.0/dt * divut
###### Use the sparse linear solver
# pt = scipy.sparse.linalg.spsolve(A1,prhs[1:-1,1:-1].ravel()) #theta=sc.linalg.solve_triangular(A,d)
pt,info = scipy.sparse.linalg.bicg(A1,prhs[1:-1,1:-1].ravel(),tol=1e-10) #theta=sc.linalg.solve_triangular(A,d)
return pt.reshape([ny,nx])
class Fluid:
def __init__(self, space):
self.space = space
self.bcs = defaultdict(list)
def add_boundary_condition(self, name, bc, **kwargs):
bctype = bc.pop('type')
self.bcs[name].append(bctype(space=self.space,**bc,**kwargs))
def add_boundary_conditions(self, name, bcs, **kwargs):
for bc in bcs:
self.add_boundary_condition(name,bc,**kwargs)
def get_boundary_conditions(self, name):
return self.bcs[name]
def get_boundary_condition(self, name, dim, b):
for bc in self.bcs[name]:
if bc.dim == dim and bc.b == b:
return bc
return None
def solve(self, dt, cb=None, **kwargs):
raise NotImplementedError
class NavierStokesProjectionMethod(Fluid):
def __init__(self, N, extent, rho, nu, f=None):
super().__init__(space=space.RegularGrid(N, extent))
self.rho = rho # density
self.nu = nu # viscosity
self.f = np.zeros(2) if f is None else f
self.u = np.zeros(self.space.N)
self.v = np.zeros(self.space.N)
self.p = np.zeros(self.space.N)
self._x = [ np.zeros_like(self.p) for _ in range(3) ]
def solve(self, dt, cb=None, its=100, p_tol=1e-3, p_max_its=50):
cb = cb if cb is not None else lambda a,b,c,d: None
u,v,p,uh,vh,b = self.u, self.v, self.p, self._x[0], self._x[1], self._x[2]
dx,dy = self.space.delta
p_bcs = self.get_boundary_conditions('p')
u_bcs = self.get_boundary_conditions('u')
v_bcs = self.get_boundary_conditions('v')
for i in range(its):
for bc in u_bcs:
bc.apply(u)
for bc in v_bcs:
bc.apply(v)
# do the x-momentum RHS
uRHS, vRHS = momentum(u,v,dx,dy,self.nu)
uh[1:-1,1:-1] = u[1:-1,1:-1] + dt*uRHS
vh[1:-1,1:-1] = v[1:-1,1:-1] + dt*vRHS
p,err = pressure_poisson(uh, vh, dx, dy, dt, b=b, p=p,
tol=p_tol, max_its=p_max_its,
bcs=p_bcs)
# finally compute the true velocities
# u_{n+1} = uh - dt*dpdx
u[1:-1,1:-1] = uh[1:-1,1:-1] - dt*ddx(p,dx)
v[1:-1,1:-1] = vh[1:-1,1:-1] - dt*ddy(p,dy)
cb(i, u, v, p)
np.copyto(self.u, u)
np.copyto(self.v, v)
class NavierStokesFVM(Fluid):
def __init__(self, N, extent, nu, beta):
super().__init__(space=space.StaggeredGrid(N, extent))
self.nu = nu
self.beta = beta
# initialize velocities - we stagger everything in the negative direction. A scalar cell owns its minus face, only.
# Then, for example, the u velocity field has a ghost cell at x0 - dx and the plus ghost cell at lx
self.u = np.zeros(self.space.N+2) # include ghost cells
# # same thing for the y-velocity component
self.v = np.zeros(self.space.N+2) # include ghost cells
self.ut = np.zeros_like(self.u)
self.vt = np.zeros_like(self.v)
# initialize the pressure
self.p = np.zeros(self.space.N+2) # include ghost cells
def solve(self, dt, cb=None, its=100):
ny,nx = self.space.N
dy,dx = self.space.delta
u,v,ut,vt,p = self.u, self.v, self.ut, self.vt, self.p
u_bcs = self.get_boundary_conditions('u')
v_bcs = self.get_boundary_conditions('v')
p_bcs = [
self.get_boundary_condition('p',dim=1,b=Boundary.MIN), # left
self.get_boundary_condition('p',dim=1,b=Boundary.MAX), # right
self.get_boundary_condition('p',dim=0,b=Boundary.MIN), # bottom
self.get_boundary_condition('p',dim=0,b=Boundary.MAX), # top
]
A1 = sparse_pressure_matrix(nx, ny, dx, dy, *p_bcs)
nsteps = 1000
for n in range(0,nsteps):
for bc in u_bcs:
bc.apply(u)
for bc in v_bcs:
bc.apply(v)
mx, my = momentum_staggered(u, v, dx, dy, self.nu)
ut[1:-1,2:-1] = u[1:-1,2:-1] + dt * mx
vt[2:-1,1:-1] = v[2:-1,1:-1] + dt * my
p[:,:] = 0
p[1:-1,1:-1] = pressure_poisson_sparse(A1, ut, vt, dx, dy, dt, nx, ny)
# time advance
u[1:-1,2:-1] = ut[1:-1,2:-1] - dt * (p[1:-1,2:-1] - p[1:-1,1:-2])/dx
v[2:-1,1:-1] = vt[2:-1,1:-1] - dt * (p[2:-1,1:-1] - p[1:-2,1:-1])/dy
class LatticeBoltzmann(Fluid):
def __init__(self, N, extent, rho0, tau):
super().__init__(space=space.LatticeGrid(N, extent))
self.rho0 = 100 # average density
self.tau = 0.6 # collision timescale
self.F = np.zeros(np.append(self.space.N, [self.space.NL]))
self.rho = np.zeros(self.space.N)
self.ux = np.zeros(self.space.N)
self.uy = np.zeros(self.space.N)
self.object_mask = self.space.grid_coords[0] < 0
@property
def vorticity(self):
v = ( (np.roll(self.ux, -1, axis=0) - np.roll(self.ux, 1, axis=0)) -
(np.roll(self.uy, -1, axis=1) - np.roll(self.uy, 1, axis=1)) )
v[self.object_mask] = np.nan
return v
def solve(self, dt, its, cb=None):
Ny, Nx = self.space.N
idxs, cxs, cys, weights = self.space.idxs, self.space.cxs, self.space.cys, self.space.weights
self.rho = np.sum(self.F,2)
for i in idxs:
self.F[:,:,i] *= self.rho0 / self.rho
# Simulation Main Loop
for it in range(its):
# Drift
for i, cx, cy in zip(idxs, cxs, cys):
self.F[:,:,i] = np.roll(self.F[:,:,i], cx, axis=1)
self.F[:,:,i] = np.roll(self.F[:,:,i], cy, axis=0)
# Set reflective boundaries
bndryF = self.F[self.object_mask,:]
bndryF = bndryF[:,[0,5,6,7,8,1,2,3,4]]
# Calculate fluid variables
self.rho = np.sum(self.F,2)
self.ux = np.sum(self.F*cxs,2) / self.rho
self.uy = np.sum(self.F*cys,2) / self.rho
# Apply Collision
Feq = np.zeros(self.F.shape)
for i, cx, cy, w in zip(idxs, cxs, cys, weights):
Feq[:,:,i] = self.rho * w * ( 1 + 3*(cx*self.ux+cy*self.uy) +
9*(cx*self.ux+cy*self.uy)**2/2 -
3*(self.ux**2+self.uy**2)/2 )
self.F += -(1.0/self.tau) * (self.F - Feq)
# Apply boundary
self.F[self.object_mask,:] = bndryF
self.ux[self.object_mask] = 0
self.uy[self.object_mask] = 0
if it % 10 == 0:
if cb:
cb(it, self) | [
"numpy.zeros_like",
"numpy.sum",
"numpy.roll",
"numpy.zeros",
"numpy.ones",
"space.LatticeGrid",
"collections.defaultdict",
"numpy.append",
"numpy.linalg.norm",
"space.RegularGrid",
"numpy.copyto",
"space.StaggeredGrid"
] | [((2942, 2960), 'numpy.zeros', 'np.zeros', (['[ny, nx]'], {}), '([ny, nx])\n', (2950, 2960), True, 'import numpy as np\n'), ((3751, 3777), 'numpy.zeros', 'np.zeros', (['[ny + 2, nx + 2]'], {}), '([ny + 2, nx + 2])\n', (3759, 3777), True, 'import numpy as np\n'), ((2184, 2200), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (2197, 2200), True, 'import numpy as np\n'), ((2232, 2248), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (2245, 2248), True, 'import numpy as np\n'), ((2439, 2455), 'numpy.copyto', 'np.copyto', (['pn', 'p'], {}), '(pn, p)\n', (2448, 2455), True, 'import numpy as np\n'), ((2780, 2805), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - pn)', '(2)'], {}), '(p - pn, 2)\n', (2794, 2805), True, 'import numpy as np\n'), ((2979, 2996), 'numpy.ones', 'np.ones', (['[ny, nx]'], {}), '([ny, nx])\n', (2986, 2996), True, 'import numpy as np\n'), ((3015, 3032), 'numpy.ones', 'np.ones', (['[ny, nx]'], {}), '([ny, nx])\n', (3022, 3032), True, 'import numpy as np\n'), ((3051, 3068), 'numpy.ones', 'np.ones', (['[ny, nx]'], {}), '([ny, nx])\n', (3058, 3068), True, 'import numpy as np\n'), ((3087, 3104), 'numpy.ones', 'np.ones', (['[ny, nx]'], {}), '([ny, nx])\n', (3094, 3104), True, 'import numpy as np\n'), ((4350, 4367), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4361, 4367), False, 'from collections import defaultdict\n'), ((5311, 5333), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (5319, 5333), True, 'import numpy as np\n'), ((5351, 5373), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (5359, 5373), True, 'import numpy as np\n'), ((5391, 5413), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (5399, 5413), True, 'import numpy as np\n'), ((6722, 6742), 'numpy.copyto', 'np.copyto', (['self.u', 'u'], {}), '(self.u, u)\n', (6731, 6742), True, 'import numpy as np\n'), ((6751, 6771), 'numpy.copyto', 'np.copyto', (['self.v', 'v'], {}), '(self.v, v)\n', (6760, 6771), True, 'import numpy as np\n'), ((7208, 7234), 'numpy.zeros', 'np.zeros', (['(self.space.N + 2)'], {}), '(self.space.N + 2)\n', (7216, 7234), True, 'import numpy as np\n'), ((7325, 7351), 'numpy.zeros', 'np.zeros', (['(self.space.N + 2)'], {}), '(self.space.N + 2)\n', (7333, 7351), True, 'import numpy as np\n'), ((7391, 7412), 'numpy.zeros_like', 'np.zeros_like', (['self.u'], {}), '(self.u)\n', (7404, 7412), True, 'import numpy as np\n'), ((7431, 7452), 'numpy.zeros_like', 'np.zeros_like', (['self.v'], {}), '(self.v)\n', (7444, 7452), True, 'import numpy as np\n'), ((7509, 7535), 'numpy.zeros', 'np.zeros', (['(self.space.N + 2)'], {}), '(self.space.N + 2)\n', (7517, 7535), True, 'import numpy as np\n'), ((9229, 9251), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (9237, 9251), True, 'import numpy as np\n'), ((9270, 9292), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (9278, 9292), True, 'import numpy as np\n'), ((9311, 9333), 'numpy.zeros', 'np.zeros', (['self.space.N'], {}), '(self.space.N)\n', (9319, 9333), True, 'import numpy as np\n'), ((9858, 9875), 'numpy.sum', 'np.sum', (['self.F', '(2)'], {}), '(self.F, 2)\n', (9864, 9875), True, 'import numpy as np\n'), ((5261, 5272), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5269, 5272), True, 'import numpy as np\n'), ((5435, 5456), 'numpy.zeros_like', 'np.zeros_like', (['self.p'], {}), '(self.p)\n', (5448, 5456), True, 'import numpy as np\n'), ((9168, 9208), 'numpy.append', 'np.append', (['self.space.N', '[self.space.NL]'], {}), '(self.space.N, [self.space.NL])\n', (9177, 9208), True, 'import numpy as np\n'), ((10466, 10483), 'numpy.sum', 'np.sum', (['self.F', '(2)'], {}), '(self.F, 2)\n', (10472, 10483), True, 'import numpy as np\n'), ((10654, 10676), 'numpy.zeros', 'np.zeros', (['self.F.shape'], {}), '(self.F.shape)\n', (10662, 10676), True, 'import numpy as np\n'), ((5139, 5167), 'space.RegularGrid', 'space.RegularGrid', (['N', 'extent'], {}), '(N, extent)\n', (5156, 5167), False, 'import space\n'), ((6879, 6909), 'space.StaggeredGrid', 'space.StaggeredGrid', (['N', 'extent'], {}), '(N, extent)\n', (6898, 6909), False, 'import space\n'), ((8993, 9021), 'space.LatticeGrid', 'space.LatticeGrid', (['N', 'extent'], {}), '(N, extent)\n', (9010, 9021), False, 'import space\n'), ((9457, 9485), 'numpy.roll', 'np.roll', (['self.ux', '(-1)'], {'axis': '(0)'}), '(self.ux, -1, axis=0)\n', (9464, 9485), True, 'import numpy as np\n'), ((9488, 9515), 'numpy.roll', 'np.roll', (['self.ux', '(1)'], {'axis': '(0)'}), '(self.ux, 1, axis=0)\n', (9495, 9515), True, 'import numpy as np\n'), ((9534, 9562), 'numpy.roll', 'np.roll', (['self.uy', '(-1)'], {'axis': '(1)'}), '(self.uy, -1, axis=1)\n', (9541, 9562), True, 'import numpy as np\n'), ((9565, 9592), 'numpy.roll', 'np.roll', (['self.uy', '(1)'], {'axis': '(1)'}), '(self.uy, 1, axis=1)\n', (9572, 9592), True, 'import numpy as np\n'), ((10128, 10164), 'numpy.roll', 'np.roll', (['self.F[:, :, i]', 'cx'], {'axis': '(1)'}), '(self.F[:, :, i], cx, axis=1)\n', (10135, 10164), True, 'import numpy as np\n'), ((10195, 10231), 'numpy.roll', 'np.roll', (['self.F[:, :, i]', 'cy'], {'axis': '(0)'}), '(self.F[:, :, i], cy, axis=0)\n', (10202, 10231), True, 'import numpy as np\n'), ((10506, 10529), 'numpy.sum', 'np.sum', (['(self.F * cxs)', '(2)'], {}), '(self.F * cxs, 2)\n', (10512, 10529), True, 'import numpy as np\n'), ((10561, 10584), 'numpy.sum', 'np.sum', (['(self.F * cys)', '(2)'], {}), '(self.F * cys, 2)\n', (10567, 10584), True, 'import numpy as np\n')] |
import torch.utils.data as data
import torch
import pandas as pd
from PIL import Image
from glob import glob
import torchvision.transforms as transforms
import numpy as np
class ImageTensorFolder(data.Dataset):
def __init__(self, img_path, tensor_path, img_fmt="npy", tns_fmt="npy", transform=None):
self.img_fmt = img_fmt
self.tns_fmt = tns_fmt
# self.img_paths = self.get_all_files(img_path, file_format=img_fmt)
self.tensor_paths = self.get_all_files(tensor_path, file_format=tns_fmt)
self.get_img_files_from_tensors(img_path, tensor_path)
self.transform = transform
self.to_tensor = transforms.ToTensor()
self.to_pil = transforms.ToPILImage()
def get_all_files(self, path, file_format="png"):
filepaths = path + "/*.{}".format(file_format)
files = glob(filepaths)
print(files[0:10])
return files
def get_img_files_from_tensors(self, img_path, tensor_path):
"""
Only get image files corresponding to tensors for training
"""
self.img_paths = []
for tensor in self.tensor_paths:
img_name = tensor.replace(tensor_path + '/', '').replace('_rec', '').replace(self.tns_fmt, self.img_fmt)
self.img_paths.append(img_path + img_name)
def load_img(self, filepath, file_format="png"):
if file_format in ["png", "jpg", "jpeg"]:
img = Image.open(filepath)
img = img.resize((224, 224)) # TODO remove this -- hardcoded for celeba
# Drop alpha channel
if self.to_tensor(img).shape[0] == 4:
img = self.to_tensor(img)[:3, :, :]
img = self.to_pil(img)
elif file_format == "npy":
img = np.load(filepath)
#cifar10_mean = [0.4914, 0.4822, 0.4466]
#cifar10_std = [0.247, 0.243, 0.261]
img = np.uint8(255 * img)
img = self.to_pil(img)
elif file_format == "pt":
img = torch.load(filepath)
else:
print("Unknown format")
exit()
return img
def load_tensor(self, filepath, file_format="png"):
if file_format in ["png", "jpg", "jpeg"]:
tensor = Image.open(filepath)
# Drop alpha channel
if self.to_tensor(tensor).shape[0] == 4:
tensor = self.to_tensor(tensor)[:3, :, :]
else:
tensor = self.to_tensor(tensor)
elif file_format == "npy":
tensor = np.load(filepath)
tensor = self.to_tensor(tensor)
elif file_format == "pt":
tensor = torch.load(filepath)
tensor.requires_grad = False
return tensor
def __getitem__(self, index):
img = self.load_img(self.img_paths[index], file_format=self.img_fmt)
img_num = self.img_paths[index].split("/")[-1].replace('_rec', '').split(".")[0]
intermed_rep = self.load_tensor(self.tensor_paths[index], file_format=self.tns_fmt)
if self.transform is not None:
img = self.transform(img)
return img, intermed_rep, img_num
def __len__(self):
return len(self.img_paths)
class TensorPredictionData(data.Dataset):
def __init__(self, tensor_path, labels_path, pred_gender=False,
pred_smile=False, pred_race=False, tns_fmt="pt"):
self.tensor_paths = self.get_all_files(tensor_path, file_format=tns_fmt)
self.tns_fmt = tns_fmt
self.pred_gender = pred_gender
self.pred_smile = pred_smile
self.pred_race = pred_race
self.gender_index = 20
self.smile_index = 31
if self.pred_gender or self.pred_smile:
self.label_dict = get_celeba_attr_dict(labels_path)
elif self.pred_race:
label_csv = pd.read_csv(labels_path)
self.label_csv = label_csv.set_index("file")
self.label_mapping = {}
self.label_mapping["race"] = {"East Asian": 0,
"Indian": 1,
"Black": 2,
"White": 3,
"Middle Eastern": 4,
"Latino_Hispanic": 5,
"Southeast Asian": 6}
def get_all_files(self, path, file_format):
filepaths = path + "/*.{}".format(file_format)
files = glob(filepaths)
return files
def load_tensor(self, filepath, file_format="png"):
if file_format in ["png", "jpg", "jpeg"]:
tensor = Image.open(filepath)
# Drop alpha channel
if self.to_tensor(tensor).shape[0] == 4:
tensor = self.to_tensor(tensor)[:3, :, :]
else:
tensor = self.to_tensor(tensor)
elif file_format == "npy":
tensor = np.load(filepath)
tensor = self.to_tensor(tensor)
elif file_format == "pt":
tensor = torch.load(filepath)
tensor.requires_grad = False
return tensor
def __getitem__(self, index):
img_num = self.tensor_paths[index].split("/")[-1].split(".")[0]
intermed_rep = self.load_tensor(self.tensor_paths[index], file_format=self.tns_fmt)
if self.pred_gender:
label = int(self.label_dict[img_num][self.gender_index])
label = 1 if label > 0 else 0
elif self.pred_smile:
label = int(self.label_dict[img_num][self.smile_index])
label = 1 if label > 0 else 0
elif self.pred_race:
filename = 'train/{}.jpg'.format(index+1)
labels_row = self.label_csv.loc[filename]
label = self.label_mapping["race"][labels_row['race']]
else:
raise ValueError("only gender prediction supported for now")
return label, intermed_rep, img_num
def __len__(self):
return len(self.tensor_paths)
#returns dict of image_num to list of attributes
def get_celeba_attr_dict(attr_path):
rfile = open(attr_path, 'r' )
texts = rfile.read().split("\n")
rfile.close()
columns = np.array(texts[1].split(" "))
columns = columns[columns != ""]
df = []
label_dict = {}
for txt in texts[2:]:
if txt == '': continue
row = np.array(txt.split(" "))
row = row[row!= ""]
img_num = row[0].split('.')[0]
label_dict[img_num] = row[1:]
return label_dict
| [
"numpy.load",
"numpy.uint8",
"pandas.read_csv",
"torch.load",
"torchvision.transforms.ToPILImage",
"PIL.Image.open",
"glob.glob",
"torchvision.transforms.ToTensor"
] | [((650, 671), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (669, 671), True, 'import torchvision.transforms as transforms\n'), ((694, 717), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (715, 717), True, 'import torchvision.transforms as transforms\n'), ((844, 859), 'glob.glob', 'glob', (['filepaths'], {}), '(filepaths)\n', (848, 859), False, 'from glob import glob\n'), ((4487, 4502), 'glob.glob', 'glob', (['filepaths'], {}), '(filepaths)\n', (4491, 4502), False, 'from glob import glob\n'), ((1428, 1448), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (1438, 1448), False, 'from PIL import Image\n'), ((2243, 2263), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (2253, 2263), False, 'from PIL import Image\n'), ((4652, 4672), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (4662, 4672), False, 'from PIL import Image\n'), ((1761, 1778), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (1768, 1778), True, 'import numpy as np\n'), ((1899, 1918), 'numpy.uint8', 'np.uint8', (['(255 * img)'], {}), '(255 * img)\n', (1907, 1918), True, 'import numpy as np\n'), ((2530, 2547), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (2537, 2547), True, 'import numpy as np\n'), ((3832, 3856), 'pandas.read_csv', 'pd.read_csv', (['labels_path'], {}), '(labels_path)\n', (3843, 3856), True, 'import pandas as pd\n'), ((4939, 4956), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (4946, 4956), True, 'import numpy as np\n'), ((2006, 2026), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (2016, 2026), False, 'import torch\n'), ((2647, 2667), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (2657, 2667), False, 'import torch\n'), ((5056, 5076), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (5066, 5076), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from scipy import interpolate, signal
from scipy.stats import linregress
from pyproj import Proj, transform
def calcR2(H,T,slope,igflag=0):
"""
%
% [R2,S,setup, Sinc, SIG, ir] = calcR2(H,T,slope,igflag);
%
% Calculated 2% runup (R2), swash (S), setup (setup), incident swash (Sinc)
% and infragravity swash (SIG) elevations based on parameterizations from runup paper
% also Iribarren (ir)
% August 2010 - Included 15% runup (R16) statistic that, for a Guassian distribution,
% represents mean+sigma. It is calculated as R16 = setup + swash/4.
% In a wave tank, Palmsten et al (2010) found this statistic represented initiation of dune erosion.
%
%
% H = significant wave height, reverse shoaled to deep water
% T = deep-water peak wave period
% slope = radians
% igflag = 0 (default)use full equation for all data
% = 1 use dissipative-specific calculations when dissipative conditions exist (Iribarren < 0.3)
% = 2 use dissipative-specific (IG energy) calculation for all data
%
% based on:
% <NAME>., <NAME>, <NAME>, and <NAME>. (2006),
% Empirical parameterization of setup, swash, and runup,
% Coastal Engineering, 53, 573-588.
% author: <EMAIL>
# Converted to Python by <EMAIL>
"""
g = 9.81
# make slopes positive!
slope = np.abs(slope)
# compute wavelength and Iribarren
L = (g*T**2) / (2.*np.pi)
sqHL = np.sqrt(H*L)
ir = slope/np.sqrt(H/L)
if igflag == 2: # use dissipative equations (IG) for ALL data
R2 = 1.1*(0.039 * sqHL)
S = 0.046*sqHL
setup = 0.016*sqHL
elif igflag == 1 and ir < 0.3: # if dissipative site use diss equations
R2 = 1.1*(0.039 * sqHL)
S = 0.046*sqHL
setup = 0.016*sqHL
else: # if int/ref site, use full equations
setup = 0.35*slope*sqHL
Sinc = 0.75*slope*sqHL
SIG = 0.06*sqHL
S = np.sqrt(Sinc**2 + SIG**2)
R2 = 1.1*(setup + S/2.)
R16 = 1.1*(setup + S/4.)
return R2, S, setup, Sinc, SIG, ir, R16
def nanlsfit(x,y):
"""least-squares fit of data with NaNs"""
ok = ~np.isnan(x) & ~np.isnan(y)
n = len(ok[ok==True])
xx = x[ok]
yy = y[ok]
slope, intercept, r, p, stderr = linregress(xx,yy)
print("n={}; slope, intercept= {:.4f},{:.4f}; r={:.4f} p={:.4f}, stderr={:.4f} ".format(n, slope, intercept, r, p, stderr))
return n, slope, intercept, r, p, stderr
def stat_summary(x,iprint=False):
n = len(x)
nnan = np.sum(np.isnan(x))
nvalid = n-nnan
# intitialize with NaNs
if n > nnan:
meanx = np.nanmean(x)
stdx = np.nanstd(x)
minx = np.nanmin(x)
d5 = np.nanpercentile(x,5.)
d25 = np.nanpercentile(x,25.)
d50 = np.nanpercentile(x,50.)
d75 = np.nanpercentile(x,75.)
d95 = np.nanpercentile(x,95.)
maxx = np.nanmax(x)
else:
meanx = np.NaN
stdx = np.NaN
minx = np.NaN
d5 = np.NaN
d25 = np.NaN
d50 = np.NaN
d75 = np.NaN
d95 = np.NaN
maxx = np.NaN
# return it in a dict
s = {'n':n,'nnan':nnan,'nvalid':nvalid,'mean':meanx,'std':stdx,'min':minx,'max':maxx,\
'd5':d5,'d25':d25,'d50':d50,'d75':d75,'d95':d95}
# if iprint:
# for key,value in s.items():
# print('{:6s} = {:.3f}'.format(key,value)),
if iprint:
print(" n, nnan, nvalid: ",s['n'],s['nnan'],s['nvalid'])
print(" mean, std, min, max : {:.3f} {:.3f} {:.3f} {:.3f}".\
format(s['mean'],s['std'],s['min'],s['max']))
print(" d5, d25, d50, d75, d95: {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".\
format(s['d5'],s['d25'],s['d50'],s['d75'],s['d95']))
return s
def analyze_channels(x,diff,dx=1.,vthresh=0.5):
"""
Calculate channel data from alongshore difference vector
Input:
x - vector of alongshore locations
diff - vector of alongshore elevations (m)
dx - spacing of points in diff (m)
vthres - vertical threshold for channel id (m)
hthresh - horizonal threshold (width) for channel id (m)
Assumes diff is postive
"""
# sumple calculation of channel area
diff[diff <= vthresh]=0.
chana = np.cumsum(diff)*dx
dlength = len(diff)*dx
print('Total channel area m^2/m: {:.2f}'.format(chana[-1]/dlength) )
nc = 0
channel_strt = np.array([])
channel_width = np.array([])
channel_max_depth = np.array([])
channel_avg_depth = np.array([])
channel_area = np.array([])
run = False
nc = 0
for i, z in enumerate(diff):
if i == 0:
if z >= vthresh:
# handle first points
run=True
nc = nc+1
channel_strt = np.append( channel_strt, x[i] )
channel_width = np.append( channel_width, dx )
channel_max_depth = np.append( channel_max_depth, z)
channel_avg_depth = np.append( channel_avg_depth, z)
channel_area = np.append( channel_area, z*dx )
channel_sum_depth = z
else:
if z >= vthresh and run is False:
# start new channel
run = True
nc = nc+1
channel_strt = np.append( channel_strt, x[i] )
channel_width = np.append( channel_width, dx )
channel_max_depth = np.append( channel_max_depth, z)
channel_avg_depth = np.append( channel_avg_depth, z)
channel_area = np.append( channel_area, z*dx )
channel_sum_depth = z
elif z >= vthresh and run is True:
# update existing channel
run = True
channel_width[nc-1] = channel_width[nc-1]+dx
channel_max_depth[nc-1] = np.max( (channel_max_depth[nc-1], z) )
channel_sum_depth = channel_sum_depth + z
channel_avg_depth[nc-1] = channel_sum_depth/(channel_width[nc-1]/dx)
channel_area[nc-1] = channel_avg_depth[nc-1]*channel_width[nc-1]
elif z <= vthresh:
# reset
run = False
channel_ctr = channel_strt + 0.5*channel_width
return nc, channel_ctr, channel_area, channel_width, channel_max_depth, channel_avg_depth
def pvol(dist,profs,pfill,dcrest_est,dback,
title_str,pnames,imethod='extend',
dx = 1.,
datum=0.4,
maxdist=200.,ztoe=2.4,zowp=1.25,nsmooth=51,
iverbose=True,iplot=True,iprint=True):
"""
Calculate cross-sectional volumes for barrier island profiles above datum.
Assumes distance increases from offshore landward, but plots with ocean to right.
This is not designed to analyze datum below zero. To do that, fill values that are zeros here should
be reconsidered...maybe turned into datum.
Input (lp is length of profiles, nmaps is number of profiles):
dist(lP) - cross-shore distance (m), starting from arbitrary offshore location, equally spaced at dx
profs(nmaps, lp) - multiple profiles elevations (m relative to some datum)
pfill(lp) - single profile used to fill gaps in other profiles (pre-storm profile)
dcrest_est - cross-shore location of dune crest (estimated)
dback - cross-shore location of barrier platform (estimated as 1.25-m contour)
title_str - string used for title in plots
pnames - strings with names (dates) of profiles
imethod -"extend" or "clip" TODO: check clip code...that code is stale
dx - profile spacing (m) TODO: check to make sure dx==1 is not assumed
datum - elevation used as floor to calculate volumes (m) (not same as profile datum)
ztoe=2.4 - elevation for estimating dune toe (m)
maxdist=200.,,zowp=1.25,nsmooth=51,
iverbose - "True" produces extra output
iplot - "True" produces plot
iprint - "True" saves plot
Returns:
v - volume of profile between first datum and back of island (m2)
vp - volume of profile between first datum and back of platform (m2)
cxcy - x,y pair with centriod location (cross-shore, elevation) (m, m) [misnamed: should be cxcz]
zmax - highest point in the profile (m)
dmax - profile distance to highest point (m)
zcrest - elevation of highest point near digitized dune line (m)
dcrest - profile distance to zcrest (m)
zcrest0 - elevation at same loction as dcrest[0] (m)
dtoe - profile distance to first elevation >= ztoe (m)
width_island - distance from first point above datum to back of island (m)
width_platform - distance from first point above datum to dback (m)
"""
# Colors from colorbrewer...but one more than needed so we can skip the first one (too light)
cols=['#feedde','#fdbe85','#fd8d3c','#e6550d','#a63603']
nmaps, lp = np.shape(profs)
if(iverbose):
print('dx: ',dx)
print("nmaps, length profiles: ",nmaps,lp)
print("Shape of dist: ",np.shape(dist))
print("Shape of profs: ",np.shape(profs))
print("Shape of pfill: ",np.shape(pfill))
if(iverbose and iplot):
fig=plt.figure(figsize=(12,8))
plt.plot(dist,pfill,':r')
for i in range(0,nmaps):
plt.plot(dist,profs[i,:],'-',c=cols[i+1])
# make a copy of the unchanged profiles for plotting
profr = profs.copy()
# find first good value (do this before fitting profile or filling)
ix = np.zeros((nmaps), dtype=int)
for i in range(0,nmaps):
try:
ix[i] = int(np.argwhere(np.isfinite(profs[i,:]))[0])
if iverbose:
print(i,ix[i],profs[i,ix[i]-3:ix[i]+3])
except:
# fails because entire profile is NaN
ix[i] = 0
# extend the profiles with linear fit or zeros
if(imethod is 'extend' ):
title_str = title_str+'_extended'
if iverbose:
print('extend')
npts = int(5/dx)
# fit a straight line to first 5 points
for i in range((nmaps)):
try:
# Not sure why one of these breaks down in
p = np.polyfit( dist[int(ix[i]+1):int(ix[i]+1+npts)],\
profs[i,int(ix[i]+1):int(ix[i]+1+npts)],1)
if iverbose:
print("Slope is: {:.4f}".format(p[0]))
# if slope is less than 1:50, replace
if(p[0]>0.02):
# if slope is positive, replace NaNs with line
profs[i,0:int(ix[i])]=np.polyval(p,dist[0:int(ix[i])])
else:
# if slope is not positive, replace NaNs with zeros
profs[i,0:int(ix[i])]=0.
# print("warning: replacing slope of {:.4f} with {:.4f}".format(p[0],0.02))
# p[0]=0.02
# profs[i,0:int(ix[i])]=np.polyval(p,dist[0:int(ix[i])])
except:
if iverbose:
print('cant calculate slope')
print('dist, profs',dist[int(ix[i]+1):int(ix[i]+1+npts)],\
profs[i,int(ix[i]+1):int(ix[i]+1+npts)])
# fill with zeros
profs[i,0:int(ix[i])]=0.
elif(imethod is 'clip'):
# truncate the profiles to start at common point (profile w/ least data)
title_str = title_str+'_clip'
if iverbose:
print('clipped')
imx = int(np.max(ix))
profs[:,0:imx]=0.
# determine first point >= datum (do this after fitting profile)
ixd = np.zeros((nmaps), dtype=int)
dshore = np.zeros((nmaps), dtype=np.float)
for i in range(0,nmaps):
try:
ixd[i] = int(np.argwhere((profs[i,:]>=datum))[0])
if iverbose:
print(i,ix[i],profs[i,ixd[i]-3:ixd[i]+3])
except:
# fails because entire profile is NaN
ixd[i] = 0
# replace NaNs with fill values from September
for i in range((nmaps)):
# replace NaNs the fill values
idx = np.isnan(profs[i,:])
profs[i,idx]=pfill[idx]
# replace any other NaNs with zero
for i in range(0,nmaps):
profs[i,np.isnan(profs[i,:])]=0.
# find the back of the island using datum
iisl = np.zeros((nmaps), dtype=int)
disl = np.ones((nmaps))*np.nan
for i in range((nmaps)):
try:
# find last point >= datum
#iisl = np.squeeze(np.where(profs[i,int(ix[i]):int(ix[i]+maxdist)]>=datum))[-1]
iisl[i] = np.squeeze(np.where(profs[i,int(ix[i]):-1]>=datum))[-1]
disl[i] = dist[int(ix[i]+iisl[i])]
except:
pass
if iverbose:
print("iisl, disl",iisl[i], disl[i])
# find the highest point in the profile
zmax = np.ones((nmaps))*np.nan
dmax = np.ones((nmaps))*np.nan
for i in range((nmaps)):
try:
imxh = int ( np.nanargmax(profs[i,:]) )
zmax[i] = profs[i,imxh]
dmax[i] = dist[imxh]
except:
pass
if iverbose:
print("i, zmax, dmax",i, zmax[i], dmax[i])
# find highest point within 10 meters of estimated dune crest
idc = np.ones((nmaps),dtype=int)
ni = 15
zcrest0 = np.ones((nmaps))*np.nan
zcrest = np.ones((nmaps))*np.nan
dcrest = np.ones((nmaps))*np.nan
if np.isfinite(dcrest_est) and dcrest_est >= 0:
idcrest = int(max(dcrest_est/dx,0.))
idcrest_min = int(max(idcrest-ni,0))
idcrest_max = int(min(idcrest+ni,lp))
if iverbose:
print('dcrest_est, idcrest: ',dcrest_est, idcrest)
for i in range((nmaps)):
try:
idc[i] = int ( np.nanargmax( profs[i,idcrest_min:idcrest_max]) )
if i == 0:
idc0 = idc[0]
zcrest[i] = profs[i,idc[i]+idcrest-ni]
zcrest0[i] = profs[i,idc0+idcrest-ni] # z at location os zmax in first map
dcrest[i] = dist[idc[i]+idcrest-ni]
except:
pass
if iverbose:
print("idc, zcrest, dcrest",idc[i], zcrest[i], dcrest[i])
# find dune toe as first point >= ztoe
idt = np.zeros((nmaps), dtype=int)
dtoe = np.ones((nmaps))*np.nan
for i in range((nmaps)):
try:
# have to squeeze because where returns (0,n). Want first one, so [0]
idt[i] = np.squeeze(np.where(profs[i,int(ix[i]):int(ix[i]+maxdist)]>=ztoe))[0]
dtoe[i] = dist[int(ix[i]+idt[i])]
except:
pass
if iverbose:
print("i, dtoe",idt, dtoe)
# find the back of the overwash platform using zowp
# this code is no longer used...back of platform now comes in as dback
# dowp = np.ones((nmaps))*np.nan
# for i in range((nmaps)):
# # smooth the profile
# ps = smooth(np.squeeze(profs[i,:]),nsmooth)
# # find last point >zowp
# # iowp = np.squeeze(np.where(profs[i,int(ix[i]):int(ix[i]+maxdist)]>=zowp))[-1]
# # iowp = np.squeeze(np.where(ps[int(ix[i]):int(ix[i]+maxdist)]>=zowp))[-1]
# try:
# iowp = np.squeeze(np.where(ps[int(ix[i]):-1]>=zowp))[-1]
# dowp[i] = dist[int(ix[i]+iowp)]
# if iverbose:
# print("i, dowp",i, dowp[i])
# except:
# if iverbose:
# print("i, dowp",i, dowp[i])
# # if back of platforn is not found, use half the distance from the crest to the back of the island
# if not np.isfinite(dowp[0]):
# if np.isfinite(disl[0]):
# dowp[0] = dmax[0]+0.5*(disl[0]-dmax[0])
# else:
# dowp[0] = dmax[0]
# calculate total width of Island
width_island = np.zeros((nmaps))*np.NaN
width_platform = np.zeros((nmaps))*np.NaN
for i in range((nmaps)):
try:
width_island[i] = disl[i]-dist[ixd[i]]
except:
pass
try:
# print('dback, ixd[i], dist[ixd[i]]: ',dback, ixd[i], dist[ixd[i]])
width_platform[i]= dback-dist[ixd[i]]
except:
pass
if iverbose:
print("width, platform width",width_island[i], width_platform[i])
# Calculate volumes
profd = profs.copy()-datum
profd[np.where(profd<=0.)]=0.
try:
v = np.sum(profd,1)*dx
except:
v = np.NaN
try:
vp = np.sum(profd[:,ixd[i]:int(dback/dx)],1)*dx
except:
vp = np.NaN
if iverbose:
print("Island volumes: ", v)
print('Platform volumes:', vp)
# Calculate centroids
cxcy = np.zeros((nmaps,2))
profc = profs.copy()
profc[np.where(profc<=datum)]=np.nan
for i in range(0,nmaps):
try:
cxcy[i,0],cxcy[i,1] = centroid(dist,profc[i,:])
except:
cxcy[i,0],cxcy[i,1] = np.nan, np.nan
if iverbose:
print("Centroids: \n",cxcy)
# nice plot if requested
if iplot:
fig=plt.figure(figsize=(12,8))
plt.plot(dist,np.ones_like(dist)*datum,'--',c='dimgray',linewidth=2)
for i in range(0,4):
lab = '{0} {1: .0f} m$^3$/m'.format(pnames[i],v[i])
plt.plot(dist,profr[i,:],'-',linewidth=3,c=cols[i+1],label=lab)
plt.plot(dist,profs[i,:],':',linewidth=3,c=cols[i+1])
for i in range(0,4):
plt.plot(cxcy[i,0],cxcy[i,1],'ok',ms=12)
plt.plot(cxcy[i,0],cxcy[i,1],'o',c=cols[i])
for i in range(0,4):
plt.plot(dmax[i],zmax[i],'or',ms=12)
plt.plot(dmax[i],zmax[i],'o',c=cols[i])
for i in range(0,4):
plt.plot(dtoe[i],ztoe,'ob',ms=12)
plt.plot(dtoe[i],ztoe,'o',c=cols[i])
for i in range(0,4):
plt.plot(dist[ixd[i]],datum,'vr',ms=12)
plt.plot(dist[ixd[i]],datum,'v',c=cols[i])
for i in range(0,4):
plt.plot(dcrest[i],zcrest[i],'^r',ms=12)
plt.plot(dcrest[i],zcrest[i],'^',c=cols[i])
plt.plot(dback,zowp,'vr',ms=12)
for i in range(0,4):
plt.plot(disl[i],datum,'<y',ms=12)
plt.plot(disl[i],datum,'<',c=cols[i])
plt.legend()
plt.ylim((-1., 6.))
plt.xlim((lp*dx,0)) # this plots xaxis backwards
plt.ylabel('Elevation (m NAVD88)')
plt.xlabel('Across-shore Distance (m)')
plt.title(title_str)
if iprint:
pfn = 'p_'+title_str+'.png'
plt.savefig(pfn,format='png',dpi=300)
return v, vp, cxcy, zmax, dmax, zcrest, dcrest, zcrest0, dtoe, width_island, width_platform
def running_mean(y, npts):
'''
Smooth a 1-d array with a moving average
https://stackoverflow.com/questions/20618804/how-to-smooth-a-curve-in-the-right-way
Input:
y - 1-d array
npts - number of points to average
Returns:
ys - smoothed arrays
'''
box = np.ones(npts)/npts
ys = np.convolve(y, box, mode='same')
return ys
def running_nanmean(y, npts):
'''
Smooth a 1-d array with a moving average
https://stackoverflow.com/questions/40773275/sliding-standard-deviation-on-a-1d-numpy-array
Input:
y - 1-d array
npts - number of points to average
Returns:
ys - smoothed arrays
'''
sy = np.ones_like(y)*np.nan
nrows = y.size - npts + 1
n = y.strides[0]
y2D = np.lib.stride_tricks.as_strided(y,shape=(nrows,npts),strides=(n,n))
nclip = int((npts-1)/2)
# print(nclip)
sy[nclip:-nclip] = np.nanmean(y2D,1)
return sy
def running_nanmin(y, npts):
'''
Smooth a 1-d array with a moving minimum
https://stackoverflow.com/questions/40773275/sliding-standard-deviation-on-a-1d-numpy-array
Input:
y - 1-d array
npts - number of points to average
Returns:
ys - smoothed arrays
'''
sy = np.ones_like(y)*np.nan
nrows = y.size - npts + 1
n = y.strides[0]
y2D = np.lib.stride_tricks.as_strided(y,shape=(nrows,npts),strides=(n,n))
nclip = int((npts-1)/2)
# print(nclip)
sy[nclip:-nclip] = np.nanmin(y2D,1)
return sy
def running_stddev(y, npts):
"""
Smooth a 1-d array w/ moving average of npts
Return array of smoothed data and moving std. deviation
https://stackoverflow.com/questions/40773275/sliding-standard-deviation-on-a-1d-numpy-array
Input:
y - 1-d array
npts - number of points to average
Returns:
sy - array of running std. deviation
"""
sy = np.ones_like(y)*np.nan
nrows = y.size - npts + 1
n = y.strides[0]
y2D = np.lib.stride_tricks.as_strided(y,shape=(nrows,npts),strides=(n,n))
nclip = int((npts-1)/2)
# print(nclip)
sy[nclip:-nclip] = np.nanstd(y2D,1)
return sy
def centroid(x,z):
cz = np.nanmean(z)
cx = np.nansum(z*x)/np.nansum(z)
return(cx,cz)
def box2UTMh(x, y, x0, y0, theta):
'''
2D rotation and translation of x, y
Input:
x, y - row vectors of original coordinates (must be same size)
x0, y0 - Offset (location of x, y = (0,0) in new coordinate system)
theta - Angle of rotation (degrees, CCW from x-axis == Cartesian coorinates)
Returns:
xr, yr - rotated, offset coordinates
'''
thetar = np.radians(theta)
c, s = np.cos(thetar), np.sin(thetar)
# homogenous rotation matrix
Rh = np.array(((c, -s, 0.),\
(s, c, 0.),\
(0., 0., 1.)))
# homogenous translation matrix
Th = np.array(((1., 0., x0),\
(0., 1., y0),\
(0., 0., 1.)))
# homogenous input x,y
xyh = np.vstack((x,y,np.ones_like(x)))
# perform rotation and translation
xyrh=np.matmul(np.matmul(Th,Rh),xyh)
xr = xyrh[0,:]
yr = xyrh[1,:]
return xr, yr
def pcoord(x, y):
"""
Convert x, y to polar coordinates r, az (geographic convention)
r,az = pcoord(x, y)
"""
r = np.sqrt( x**2 + y**2 )
az=np.degrees( np.arctan2(x, y) )
# az[where(az<0.)[0]] += 360.
az = (az+360.)%360.
return r, az
def xycoord(r, az):
"""
Convert r, az [degrees, geographic convention] to rectangular coordinates
x,y = xycoord(r, az)
"""
x = r * np.sin(np.radians(az))
y = r * np.cos(np.radians(az))
return x, y
def UTM2Island(eutm, nutm, eoff=378489.45785127, noff=3855740.50113774, rot=42.):
"""
Convert UTM NAD83 Zone 18N easting, northing to N. Core Banks alongshore, cross-shore coordinates
xisl, yisl = UTM2Island( eutm, nutm )
"""
[r,az]=pcoord(eutm-eoff,nutm-noff)
az = az+rot;
[xisl,yisl]=xycoord(r,az)
return xisl, yisl
def LatLon2UTM(lat,lon,initepsg='epsg:26918'):
"""
Convert lat lon (WGS84) to UTM.
Defaults to Zone 18N
TODO: Update to Proj 6 and correct this syntax
"""
inProj = Proj(init='epsg:4326')
outProj = Proj(init=initepsg)
outx,outy=transform(inProj,outProj,lon,lat)
return outx, outy
def UTM2LatLon(easting,northing,initepsg='epsg:26918'):
"""
Convert UTM to lat, lon (WGS84)
Defaults to Zone 18N
TODO: Update to Proj 6 and correct this syntax
"""
outProj = Proj(init='epsg:4326')
inProj = Proj(init=initepsg)
lon,lat=transform(inProj,outProj,easting,northing)
return lon, lat
def UTM2rot(xutm,yutm,r):
"""
Convert UTM coordinates to rotated coordinates
Now deprecated by UTM2Island ... delete
"""
# Convert origin to UTM
xu,yu = box2UTMh(0.,0.,r['e0'],r['n0'],r['theta'])
# reverse the calc to find the origin (UTM =0,0) in box coordinates.
# First, just do the rotation to see where Box = 0,0 falls
xb0,yb0 = box2UTMh(xu,yu,0.,0.,-r['theta'])
# Then put in negative values for the offset
#TODO: why does this return a list of arrays?
xbl,ybl = box2UTMh(xutm,yutm,-xb0,-yb0,-r['theta'])
# this fixes it...probably should fix box2UTMh
xb = np.concatenate(xbl).ravel()
yb = np.concatenate(ybl).ravel()
return xb, yb
def map_stats(mp,sfile):
'''
Calculate some basic statistics for 3D map arrays
'''
mean = np.nanmean(mp,axis=(1,2))
mad = np.nanmean(np.abs(mp),axis=(1,2))
dmin = np.nanmin(mp,axis=(1,2))
dmax = np.nanmax(mp,axis=(1,2))
rms = np.sqrt(np.nanmean(mp**2.,axis=(1,2)))
s = np.shape(mp)
num = []
numn = []
for i in range(s[0]):
num.append(mp[i,:,:].size)
numn.append(np.count_nonzero(np.isnan(mp[i,:,:])))
print("Shape: ",s,file=sfile)
print("mean",mean,file=sfile)
print("mad",mad,file=sfile)
print("min",dmin,file=sfile)
print("max",dmax,file=sfile)
print("rms",rms,file=sfile)
print("nans",numn,file=sfile)
print("size",num,file=sfile)
return mean, mad
def map_stats2d(mp,sfile):
'''
Calculate some basic statistics for 2D map arrays
'''
mean = np.nanmean(mp,axis=(0,1))
mad = np.nanmean(np.abs(mp),axis=(0,1))
dmin = np.nanmin(mp,axis=(0,1))
dmax = np.nanmax(mp,axis=(0,1))
rms = np.sqrt(np.nanmean(mp**2.,axis=(0,1)))
s = np.shape(mp)
num = (mp[:,:].size)
numn = (np.count_nonzero(np.isnan(mp[:,:])))
print("Shape: ",s,file=sfile)
print("mean",mean,file=sfile)
print("mad",mad,file=sfile)
print("min",dmin,file=sfile)
print("max",dmax,file=sfile)
print("rms",rms,file=sfile)
print("nans",numn,file=sfile)
print("size",num,file=sfile)
return mean, mad
| [
"matplotlib.pyplot.title",
"numpy.nanpercentile",
"numpy.abs",
"numpy.arctan2",
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.convolve",
"numpy.nanmean",
"numpy.isfinite",
"numpy.cumsum",
"numpy.append",
"numpy.max",
"scipy.s... | [((1441, 1454), 'numpy.abs', 'np.abs', (['slope'], {}), '(slope)\n', (1447, 1454), True, 'import numpy as np\n'), ((1536, 1550), 'numpy.sqrt', 'np.sqrt', (['(H * L)'], {}), '(H * L)\n', (1543, 1550), True, 'import numpy as np\n'), ((2420, 2438), 'scipy.stats.linregress', 'linregress', (['xx', 'yy'], {}), '(xx, yy)\n', (2430, 2438), False, 'from scipy.stats import linregress\n'), ((4575, 4587), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4583, 4587), True, 'import numpy as np\n'), ((4608, 4620), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4616, 4620), True, 'import numpy as np\n'), ((4645, 4657), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4653, 4657), True, 'import numpy as np\n'), ((4682, 4694), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4690, 4694), True, 'import numpy as np\n'), ((4714, 4726), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4722, 4726), True, 'import numpy as np\n'), ((9091, 9106), 'numpy.shape', 'np.shape', (['profs'], {}), '(profs)\n', (9099, 9106), True, 'import numpy as np\n'), ((9702, 9728), 'numpy.zeros', 'np.zeros', (['nmaps'], {'dtype': 'int'}), '(nmaps, dtype=int)\n', (9710, 9728), True, 'import numpy as np\n'), ((11807, 11833), 'numpy.zeros', 'np.zeros', (['nmaps'], {'dtype': 'int'}), '(nmaps, dtype=int)\n', (11815, 11833), True, 'import numpy as np\n'), ((11849, 11880), 'numpy.zeros', 'np.zeros', (['nmaps'], {'dtype': 'np.float'}), '(nmaps, dtype=np.float)\n', (11857, 11880), True, 'import numpy as np\n'), ((12514, 12540), 'numpy.zeros', 'np.zeros', (['nmaps'], {'dtype': 'int'}), '(nmaps, dtype=int)\n', (12522, 12540), True, 'import numpy as np\n'), ((13443, 13468), 'numpy.ones', 'np.ones', (['nmaps'], {'dtype': 'int'}), '(nmaps, dtype=int)\n', (13450, 13468), True, 'import numpy as np\n'), ((14455, 14481), 'numpy.zeros', 'np.zeros', (['nmaps'], {'dtype': 'int'}), '(nmaps, dtype=int)\n', (14463, 14481), True, 'import numpy as np\n'), ((16855, 16875), 'numpy.zeros', 'np.zeros', (['(nmaps, 2)'], {}), '((nmaps, 2))\n', (16863, 16875), True, 'import numpy as np\n'), ((19155, 19187), 'numpy.convolve', 'np.convolve', (['y', 'box'], {'mode': '"""same"""'}), "(y, box, mode='same')\n", (19166, 19187), True, 'import numpy as np\n'), ((19602, 19673), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['y'], {'shape': '(nrows, npts)', 'strides': '(n, n)'}), '(y, shape=(nrows, npts), strides=(n, n))\n', (19633, 19673), True, 'import numpy as np\n'), ((19740, 19758), 'numpy.nanmean', 'np.nanmean', (['y2D', '(1)'], {}), '(y2D, 1)\n', (19750, 19758), True, 'import numpy as np\n'), ((20171, 20242), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['y'], {'shape': '(nrows, npts)', 'strides': '(n, n)'}), '(y, shape=(nrows, npts), strides=(n, n))\n', (20202, 20242), True, 'import numpy as np\n'), ((20309, 20326), 'numpy.nanmin', 'np.nanmin', (['y2D', '(1)'], {}), '(y2D, 1)\n', (20318, 20326), True, 'import numpy as np\n'), ((20820, 20891), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['y'], {'shape': '(nrows, npts)', 'strides': '(n, n)'}), '(y, shape=(nrows, npts), strides=(n, n))\n', (20851, 20891), True, 'import numpy as np\n'), ((20958, 20975), 'numpy.nanstd', 'np.nanstd', (['y2D', '(1)'], {}), '(y2D, 1)\n', (20967, 20975), True, 'import numpy as np\n'), ((21018, 21031), 'numpy.nanmean', 'np.nanmean', (['z'], {}), '(z)\n', (21028, 21031), True, 'import numpy as np\n'), ((21493, 21510), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (21503, 21510), True, 'import numpy as np\n'), ((21596, 21650), 'numpy.array', 'np.array', (['((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0))'], {}), '(((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0)))\n', (21604, 21650), True, 'import numpy as np\n'), ((21734, 21793), 'numpy.array', 'np.array', (['((1.0, 0.0, x0), (0.0, 1.0, y0), (0.0, 0.0, 1.0))'], {}), '(((1.0, 0.0, x0), (0.0, 1.0, y0), (0.0, 0.0, 1.0)))\n', (21742, 21793), True, 'import numpy as np\n'), ((22171, 22195), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (22178, 22195), True, 'import numpy as np\n'), ((23074, 23096), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (23078, 23096), False, 'from pyproj import Proj, transform\n'), ((23111, 23130), 'pyproj.Proj', 'Proj', ([], {'init': 'initepsg'}), '(init=initepsg)\n', (23115, 23130), False, 'from pyproj import Proj, transform\n'), ((23145, 23181), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'lon', 'lat'], {}), '(inProj, outProj, lon, lat)\n', (23154, 23181), False, 'from pyproj import Proj, transform\n'), ((23401, 23423), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (23405, 23423), False, 'from pyproj import Proj, transform\n'), ((23437, 23456), 'pyproj.Proj', 'Proj', ([], {'init': 'initepsg'}), '(init=initepsg)\n', (23441, 23456), False, 'from pyproj import Proj, transform\n'), ((23469, 23514), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'easting', 'northing'], {}), '(inProj, outProj, easting, northing)\n', (23478, 23514), False, 'from pyproj import Proj, transform\n'), ((24345, 24372), 'numpy.nanmean', 'np.nanmean', (['mp'], {'axis': '(1, 2)'}), '(mp, axis=(1, 2))\n', (24355, 24372), True, 'import numpy as np\n'), ((24426, 24452), 'numpy.nanmin', 'np.nanmin', (['mp'], {'axis': '(1, 2)'}), '(mp, axis=(1, 2))\n', (24435, 24452), True, 'import numpy as np\n'), ((24462, 24488), 'numpy.nanmax', 'np.nanmax', (['mp'], {'axis': '(1, 2)'}), '(mp, axis=(1, 2))\n', (24471, 24488), True, 'import numpy as np\n'), ((24544, 24556), 'numpy.shape', 'np.shape', (['mp'], {}), '(mp)\n', (24552, 24556), True, 'import numpy as np\n'), ((25097, 25124), 'numpy.nanmean', 'np.nanmean', (['mp'], {'axis': '(0, 1)'}), '(mp, axis=(0, 1))\n', (25107, 25124), True, 'import numpy as np\n'), ((25178, 25204), 'numpy.nanmin', 'np.nanmin', (['mp'], {'axis': '(0, 1)'}), '(mp, axis=(0, 1))\n', (25187, 25204), True, 'import numpy as np\n'), ((25214, 25240), 'numpy.nanmax', 'np.nanmax', (['mp'], {'axis': '(0, 1)'}), '(mp, axis=(0, 1))\n', (25223, 25240), True, 'import numpy as np\n'), ((25296, 25308), 'numpy.shape', 'np.shape', (['mp'], {}), '(mp)\n', (25304, 25308), True, 'import numpy as np\n'), ((1564, 1578), 'numpy.sqrt', 'np.sqrt', (['(H / L)'], {}), '(H / L)\n', (1571, 1578), True, 'import numpy as np\n'), ((2679, 2690), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2687, 2690), True, 'import numpy as np\n'), ((2774, 2787), 'numpy.nanmean', 'np.nanmean', (['x'], {}), '(x)\n', (2784, 2787), True, 'import numpy as np\n'), ((2803, 2815), 'numpy.nanstd', 'np.nanstd', (['x'], {}), '(x)\n', (2812, 2815), True, 'import numpy as np\n'), ((2831, 2843), 'numpy.nanmin', 'np.nanmin', (['x'], {}), '(x)\n', (2840, 2843), True, 'import numpy as np\n'), ((2857, 2881), 'numpy.nanpercentile', 'np.nanpercentile', (['x', '(5.0)'], {}), '(x, 5.0)\n', (2873, 2881), True, 'import numpy as np\n'), ((2894, 2919), 'numpy.nanpercentile', 'np.nanpercentile', (['x', '(25.0)'], {}), '(x, 25.0)\n', (2910, 2919), True, 'import numpy as np\n'), ((2932, 2957), 'numpy.nanpercentile', 'np.nanpercentile', (['x', '(50.0)'], {}), '(x, 50.0)\n', (2948, 2957), True, 'import numpy as np\n'), ((2970, 2995), 'numpy.nanpercentile', 'np.nanpercentile', (['x', '(75.0)'], {}), '(x, 75.0)\n', (2986, 2995), True, 'import numpy as np\n'), ((3008, 3033), 'numpy.nanpercentile', 'np.nanpercentile', (['x', '(95.0)'], {}), '(x, 95.0)\n', (3024, 3033), True, 'import numpy as np\n'), ((3047, 3059), 'numpy.nanmax', 'np.nanmax', (['x'], {}), '(x)\n', (3056, 3059), True, 'import numpy as np\n'), ((4425, 4440), 'numpy.cumsum', 'np.cumsum', (['diff'], {}), '(diff)\n', (4434, 4440), True, 'import numpy as np\n'), ((9389, 9416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (9399, 9416), True, 'import matplotlib.pyplot as plt\n'), ((9424, 9451), 'matplotlib.pyplot.plot', 'plt.plot', (['dist', 'pfill', '""":r"""'], {}), "(dist, pfill, ':r')\n", (9432, 9451), True, 'import matplotlib.pyplot as plt\n'), ((12293, 12314), 'numpy.isnan', 'np.isnan', (['profs[i, :]'], {}), '(profs[i, :])\n', (12301, 12314), True, 'import numpy as np\n'), ((12554, 12568), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (12561, 12568), True, 'import numpy as np\n'), ((13035, 13049), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (13042, 13049), True, 'import numpy as np\n'), ((13070, 13084), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (13077, 13084), True, 'import numpy as np\n'), ((13496, 13510), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (13503, 13510), True, 'import numpy as np\n'), ((13533, 13547), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (13540, 13547), True, 'import numpy as np\n'), ((13570, 13584), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (13577, 13584), True, 'import numpy as np\n'), ((13601, 13624), 'numpy.isfinite', 'np.isfinite', (['dcrest_est'], {}), '(dcrest_est)\n', (13612, 13624), True, 'import numpy as np\n'), ((14495, 14509), 'numpy.ones', 'np.ones', (['nmaps'], {}), '(nmaps)\n', (14502, 14509), True, 'import numpy as np\n'), ((15992, 16007), 'numpy.zeros', 'np.zeros', (['nmaps'], {}), '(nmaps)\n', (16000, 16007), True, 'import numpy as np\n'), ((16038, 16053), 'numpy.zeros', 'np.zeros', (['nmaps'], {}), '(nmaps)\n', (16046, 16053), True, 'import numpy as np\n'), ((16532, 16554), 'numpy.where', 'np.where', (['(profd <= 0.0)'], {}), '(profd <= 0.0)\n', (16540, 16554), True, 'import numpy as np\n'), ((16910, 16934), 'numpy.where', 'np.where', (['(profc <= datum)'], {}), '(profc <= datum)\n', (16918, 16934), True, 'import numpy as np\n'), ((17217, 17244), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (17227, 17244), True, 'import matplotlib.pyplot as plt\n'), ((18230, 18264), 'matplotlib.pyplot.plot', 'plt.plot', (['dback', 'zowp', '"""vr"""'], {'ms': '(12)'}), "(dback, zowp, 'vr', ms=12)\n", (18238, 18264), True, 'import matplotlib.pyplot as plt\n'), ((18396, 18408), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18406, 18408), True, 'import matplotlib.pyplot as plt\n'), ((18417, 18438), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0, 6.0)'], {}), '((-1.0, 6.0))\n', (18425, 18438), True, 'import matplotlib.pyplot as plt\n'), ((18445, 18467), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(lp * dx, 0)'], {}), '((lp * dx, 0))\n', (18453, 18467), True, 'import matplotlib.pyplot as plt\n'), ((18502, 18536), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Elevation (m NAVD88)"""'], {}), "('Elevation (m NAVD88)')\n", (18512, 18536), True, 'import matplotlib.pyplot as plt\n'), ((18545, 18584), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Across-shore Distance (m)"""'], {}), "('Across-shore Distance (m)')\n", (18555, 18584), True, 'import matplotlib.pyplot as plt\n'), ((18593, 18613), 'matplotlib.pyplot.title', 'plt.title', (['title_str'], {}), '(title_str)\n', (18602, 18613), True, 'import matplotlib.pyplot as plt\n'), ((19127, 19140), 'numpy.ones', 'np.ones', (['npts'], {}), '(npts)\n', (19134, 19140), True, 'import numpy as np\n'), ((19518, 19533), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (19530, 19533), True, 'import numpy as np\n'), ((20087, 20102), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (20099, 20102), True, 'import numpy as np\n'), ((20736, 20751), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (20748, 20751), True, 'import numpy as np\n'), ((21041, 21057), 'numpy.nansum', 'np.nansum', (['(z * x)'], {}), '(z * x)\n', (21050, 21057), True, 'import numpy as np\n'), ((21056, 21068), 'numpy.nansum', 'np.nansum', (['z'], {}), '(z)\n', (21065, 21068), True, 'import numpy as np\n'), ((21522, 21536), 'numpy.cos', 'np.cos', (['thetar'], {}), '(thetar)\n', (21528, 21536), True, 'import numpy as np\n'), ((21538, 21552), 'numpy.sin', 'np.sin', (['thetar'], {}), '(thetar)\n', (21544, 21552), True, 'import numpy as np\n'), ((21957, 21974), 'numpy.matmul', 'np.matmul', (['Th', 'Rh'], {}), '(Th, Rh)\n', (21966, 21974), True, 'import numpy as np\n'), ((22213, 22229), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (22223, 22229), True, 'import numpy as np\n'), ((24392, 24402), 'numpy.abs', 'np.abs', (['mp'], {}), '(mp)\n', (24398, 24402), True, 'import numpy as np\n'), ((24505, 24539), 'numpy.nanmean', 'np.nanmean', (['(mp ** 2.0)'], {'axis': '(1, 2)'}), '(mp ** 2.0, axis=(1, 2))\n', (24515, 24539), True, 'import numpy as np\n'), ((25144, 25154), 'numpy.abs', 'np.abs', (['mp'], {}), '(mp)\n', (25150, 25154), True, 'import numpy as np\n'), ((25257, 25291), 'numpy.nanmean', 'np.nanmean', (['(mp ** 2.0)'], {'axis': '(0, 1)'}), '(mp ** 2.0, axis=(0, 1))\n', (25267, 25291), True, 'import numpy as np\n'), ((25363, 25381), 'numpy.isnan', 'np.isnan', (['mp[:, :]'], {}), '(mp[:, :])\n', (25371, 25381), True, 'import numpy as np\n'), ((2088, 2117), 'numpy.sqrt', 'np.sqrt', (['(Sinc ** 2 + SIG ** 2)'], {}), '(Sinc ** 2 + SIG ** 2)\n', (2095, 2117), True, 'import numpy as np\n'), ((2300, 2311), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2308, 2311), True, 'import numpy as np\n'), ((2315, 2326), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (2323, 2326), True, 'import numpy as np\n'), ((9233, 9247), 'numpy.shape', 'np.shape', (['dist'], {}), '(dist)\n', (9241, 9247), True, 'import numpy as np\n'), ((9282, 9297), 'numpy.shape', 'np.shape', (['profs'], {}), '(profs)\n', (9290, 9297), True, 'import numpy as np\n'), ((9332, 9347), 'numpy.shape', 'np.shape', (['pfill'], {}), '(pfill)\n', (9340, 9347), True, 'import numpy as np\n'), ((9495, 9542), 'matplotlib.pyplot.plot', 'plt.plot', (['dist', 'profs[i, :]', '"""-"""'], {'c': 'cols[i + 1]'}), "(dist, profs[i, :], '-', c=cols[i + 1])\n", (9503, 9542), True, 'import matplotlib.pyplot as plt\n'), ((16577, 16593), 'numpy.sum', 'np.sum', (['profd', '(1)'], {}), '(profd, 1)\n', (16583, 16593), True, 'import numpy as np\n'), ((17426, 17497), 'matplotlib.pyplot.plot', 'plt.plot', (['dist', 'profr[i, :]', '"""-"""'], {'linewidth': '(3)', 'c': 'cols[i + 1]', 'label': 'lab'}), "(dist, profr[i, :], '-', linewidth=3, c=cols[i + 1], label=lab)\n", (17434, 17497), True, 'import matplotlib.pyplot as plt\n'), ((17502, 17562), 'matplotlib.pyplot.plot', 'plt.plot', (['dist', 'profs[i, :]', '""":"""'], {'linewidth': '(3)', 'c': 'cols[i + 1]'}), "(dist, profs[i, :], ':', linewidth=3, c=cols[i + 1])\n", (17510, 17562), True, 'import matplotlib.pyplot as plt\n'), ((17597, 17642), 'matplotlib.pyplot.plot', 'plt.plot', (['cxcy[i, 0]', 'cxcy[i, 1]', '"""ok"""'], {'ms': '(12)'}), "(cxcy[i, 0], cxcy[i, 1], 'ok', ms=12)\n", (17605, 17642), True, 'import matplotlib.pyplot as plt\n'), ((17650, 17698), 'matplotlib.pyplot.plot', 'plt.plot', (['cxcy[i, 0]', 'cxcy[i, 1]', '"""o"""'], {'c': 'cols[i]'}), "(cxcy[i, 0], cxcy[i, 1], 'o', c=cols[i])\n", (17658, 17698), True, 'import matplotlib.pyplot as plt\n'), ((17735, 17774), 'matplotlib.pyplot.plot', 'plt.plot', (['dmax[i]', 'zmax[i]', '"""or"""'], {'ms': '(12)'}), "(dmax[i], zmax[i], 'or', ms=12)\n", (17743, 17774), True, 'import matplotlib.pyplot as plt\n'), ((17784, 17826), 'matplotlib.pyplot.plot', 'plt.plot', (['dmax[i]', 'zmax[i]', '"""o"""'], {'c': 'cols[i]'}), "(dmax[i], zmax[i], 'o', c=cols[i])\n", (17792, 17826), True, 'import matplotlib.pyplot as plt\n'), ((17865, 17901), 'matplotlib.pyplot.plot', 'plt.plot', (['dtoe[i]', 'ztoe', '"""ob"""'], {'ms': '(12)'}), "(dtoe[i], ztoe, 'ob', ms=12)\n", (17873, 17901), True, 'import matplotlib.pyplot as plt\n'), ((17911, 17950), 'matplotlib.pyplot.plot', 'plt.plot', (['dtoe[i]', 'ztoe', '"""o"""'], {'c': 'cols[i]'}), "(dtoe[i], ztoe, 'o', c=cols[i])\n", (17919, 17950), True, 'import matplotlib.pyplot as plt\n'), ((17989, 18031), 'matplotlib.pyplot.plot', 'plt.plot', (['dist[ixd[i]]', 'datum', '"""vr"""'], {'ms': '(12)'}), "(dist[ixd[i]], datum, 'vr', ms=12)\n", (17997, 18031), True, 'import matplotlib.pyplot as plt\n'), ((18041, 18086), 'matplotlib.pyplot.plot', 'plt.plot', (['dist[ixd[i]]', 'datum', '"""v"""'], {'c': 'cols[i]'}), "(dist[ixd[i]], datum, 'v', c=cols[i])\n", (18049, 18086), True, 'import matplotlib.pyplot as plt\n'), ((18125, 18168), 'matplotlib.pyplot.plot', 'plt.plot', (['dcrest[i]', 'zcrest[i]', '"""^r"""'], {'ms': '(12)'}), "(dcrest[i], zcrest[i], '^r', ms=12)\n", (18133, 18168), True, 'import matplotlib.pyplot as plt\n'), ((18178, 18224), 'matplotlib.pyplot.plot', 'plt.plot', (['dcrest[i]', 'zcrest[i]', '"""^"""'], {'c': 'cols[i]'}), "(dcrest[i], zcrest[i], '^', c=cols[i])\n", (18186, 18224), True, 'import matplotlib.pyplot as plt\n'), ((18303, 18340), 'matplotlib.pyplot.plot', 'plt.plot', (['disl[i]', 'datum', '"""<y"""'], {'ms': '(12)'}), "(disl[i], datum, '<y', ms=12)\n", (18311, 18340), True, 'import matplotlib.pyplot as plt\n'), ((18350, 18390), 'matplotlib.pyplot.plot', 'plt.plot', (['disl[i]', 'datum', '"""<"""'], {'c': 'cols[i]'}), "(disl[i], datum, '<', c=cols[i])\n", (18358, 18390), True, 'import matplotlib.pyplot as plt\n'), ((18685, 18724), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pfn'], {'format': '"""png"""', 'dpi': '(300)'}), "(pfn, format='png', dpi=300)\n", (18696, 18724), True, 'import matplotlib.pyplot as plt\n'), ((21880, 21895), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (21892, 21895), True, 'import numpy as np\n'), ((22466, 22480), 'numpy.radians', 'np.radians', (['az'], {}), '(az)\n', (22476, 22480), True, 'import numpy as np\n'), ((22501, 22515), 'numpy.radians', 'np.radians', (['az'], {}), '(az)\n', (22511, 22515), True, 'import numpy as np\n'), ((24153, 24172), 'numpy.concatenate', 'np.concatenate', (['xbl'], {}), '(xbl)\n', (24167, 24172), True, 'import numpy as np\n'), ((24190, 24209), 'numpy.concatenate', 'np.concatenate', (['ybl'], {}), '(ybl)\n', (24204, 24209), True, 'import numpy as np\n'), ((4956, 4985), 'numpy.append', 'np.append', (['channel_strt', 'x[i]'], {}), '(channel_strt, x[i])\n', (4965, 4985), True, 'import numpy as np\n'), ((5020, 5048), 'numpy.append', 'np.append', (['channel_width', 'dx'], {}), '(channel_width, dx)\n', (5029, 5048), True, 'import numpy as np\n'), ((5087, 5118), 'numpy.append', 'np.append', (['channel_max_depth', 'z'], {}), '(channel_max_depth, z)\n', (5096, 5118), True, 'import numpy as np\n'), ((5156, 5187), 'numpy.append', 'np.append', (['channel_avg_depth', 'z'], {}), '(channel_avg_depth, z)\n', (5165, 5187), True, 'import numpy as np\n'), ((5220, 5251), 'numpy.append', 'np.append', (['channel_area', '(z * dx)'], {}), '(channel_area, z * dx)\n', (5229, 5251), True, 'import numpy as np\n'), ((5470, 5499), 'numpy.append', 'np.append', (['channel_strt', 'x[i]'], {}), '(channel_strt, x[i])\n', (5479, 5499), True, 'import numpy as np\n'), ((5534, 5562), 'numpy.append', 'np.append', (['channel_width', 'dx'], {}), '(channel_width, dx)\n', (5543, 5562), True, 'import numpy as np\n'), ((5601, 5632), 'numpy.append', 'np.append', (['channel_max_depth', 'z'], {}), '(channel_max_depth, z)\n', (5610, 5632), True, 'import numpy as np\n'), ((5670, 5701), 'numpy.append', 'np.append', (['channel_avg_depth', 'z'], {}), '(channel_avg_depth, z)\n', (5679, 5701), True, 'import numpy as np\n'), ((5734, 5765), 'numpy.append', 'np.append', (['channel_area', '(z * dx)'], {}), '(channel_area, z * dx)\n', (5743, 5765), True, 'import numpy as np\n'), ((11689, 11699), 'numpy.max', 'np.max', (['ix'], {}), '(ix)\n', (11695, 11699), True, 'import numpy as np\n'), ((12431, 12452), 'numpy.isnan', 'np.isnan', (['profs[i, :]'], {}), '(profs[i, :])\n', (12439, 12452), True, 'import numpy as np\n'), ((13161, 13186), 'numpy.nanargmax', 'np.nanargmax', (['profs[i, :]'], {}), '(profs[i, :])\n', (13173, 13186), True, 'import numpy as np\n'), ((17266, 17284), 'numpy.ones_like', 'np.ones_like', (['dist'], {}), '(dist)\n', (17278, 17284), True, 'import numpy as np\n'), ((24680, 24701), 'numpy.isnan', 'np.isnan', (['mp[i, :, :]'], {}), '(mp[i, :, :])\n', (24688, 24701), True, 'import numpy as np\n'), ((6023, 6061), 'numpy.max', 'np.max', (['(channel_max_depth[nc - 1], z)'], {}), '((channel_max_depth[nc - 1], z))\n', (6029, 6061), True, 'import numpy as np\n'), ((11950, 11983), 'numpy.argwhere', 'np.argwhere', (['(profs[i, :] >= datum)'], {}), '(profs[i, :] >= datum)\n', (11961, 11983), True, 'import numpy as np\n'), ((13951, 13998), 'numpy.nanargmax', 'np.nanargmax', (['profs[i, idcrest_min:idcrest_max]'], {}), '(profs[i, idcrest_min:idcrest_max])\n', (13963, 13998), True, 'import numpy as np\n'), ((9809, 9833), 'numpy.isfinite', 'np.isfinite', (['profs[i, :]'], {}), '(profs[i, :])\n', (9820, 9833), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
import time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import recall_score, precision_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.neighbors import NearestNeighbors
from sklearn.naive_bayes import GaussianNB
class SpotMarketPrediction(object):
def __init__(self):
# self.input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# self.train_sessions = "AA Run "
self.input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
self.session_name = "AA_predict "
self.test_session = "AA_predict 5\\"
self.train_y = []
self.train_x = []
self.test_x = []
self.test_y = []
self.bid_ask_list = []
self.prediction_history = []
self.input_X_train = None
self.input_y_train = None
self.input_X_test = None
self.input_y_test = None
self.y_hat = None
self.knn = None
self.period_splits = None
self.indices = []
self.predicted_bids = []
self.predicted_asks = []
def get_data(self):
# for i in range(5):
# train_file = pd.read_csv(self.input_path + self.train_sessions + str(i + 1) + "\\" + "Bid_Ask_History.csv", header=0,
# delimiter=',')
# train_values = train_file._get_numeric_data()
# train_X = train_values.as_matrix()
#
# for j in train_X:
# # train_x.append(j[1:3]) # trader, amt, strategy
# # train_x.append(j) # time, trader, amt, strategy
# # train_x.append(j[2]) # amt
# self.train_x.append(j) # amt, strategy
# # for k in range(len(train_X)):
# # self.train_y.append(train_X[k][2]) # amt targets
# print(self.train_x)
# print()
# test_file = pd.read_csv(self.input_path + self.test_session + "Bid_Ask_History.csv", header=0, delimiter=',')
# # for i in test_file.as_matrix():
# # self.bid_ask_list.append(i[2])
# test_data = test_file._get_numeric_data()
# test_X = test_data.as_matrix()
# print(self.test_x)
# for i in range(len(test_X)):
# self.test_y.append(test_X[i][2]) # amt targets
# for i in test_X:
# # test_x.append(i[1:3]) # trader, amt, strategy
# # test_x.append(i) # time, trader, amt, strategy
# # test_x.append(i[2]) # amt
# self.test_x.append(i) # amt, strategy
for i in range(4):
input_file = pd.read_csv(self.input_path + self.session_name + str(i + 1) + "\\" + "Bid_Ask_History.csv", header=None, delimiter=',')
input_values = input_file._get_numeric_data()
input_X = input_values.as_matrix()
for j in input_X:
self.train_x.append(j)
for k in range(len(input_X)):
self.train_y.append(input_X[k][1])
test_file = pd.read_csv(self.input_path + self.test_session + "Bid_Ask_History.csv", header=None, delimiter=',')
for i in test_file.as_matrix():
self.bid_ask_list.append(i[2])
test_data = test_file._get_numeric_data()
test_X = test_data.as_matrix()
for i in test_X:
self.test_x.append(i)
for i in range(len(test_X)):
self.test_y.append(test_X[i][1])
def predict_market(self): # TODO condense back into two lists of bids asks
self.input_X_train = pd.DataFrame(self.train_x)
#self.input_X_train = [[30, 35, 40, 45, 50], [60, 65, 70, 75, 80]]
self.input_y_train = self.train_y
#self.input_y_train = [[1], [2]]
self.input_X_test = pd.DataFrame(self.test_x)
#self.input_X_test = [45, 50, 55, 60, 65]
self.input_y_test = self.test_y
#self.input_y_test = [1]
# testx = [40, 50, 60, 70, 80]
# testy = [2]
# print(self.input_X_train)
# print()
# print(self.input_y_train)
# print()
# print(self.input_X_test)
# print()
# print(self.input_y_test)
# self.knn = KNeighborsClassifier(n_neighbors=2, weights='distance')
# self.knn.fit(self.input_X_train, self.input_y_train)
# #self.knn.fit(testx, testy)
# self.y_hat = self.knn.predict_proba(self.input_X_test)
gnb = GaussianNB()
gnb.fit(self.input_X_train, self.input_y_train)
self.y_hat = gnb.predict(self.input_X_train)
print(self.y_hat)
# for i in range(len(self.y_hat)):
# self.prediction_history.append([self.bid_ask_list[i], self.y_hat[i]])
#
# for i in range(len(self.prediction_history)):
# if self.prediction_history[i][0] == 'bid':
# self.predicted_bids.append(self.prediction_history[i][1])
# elif self.prediction_history[i][0] == 'ask':
# self.predicted_asks.append(self.prediction_history[i][1])
# else:
# print("ERROR: predictions for given offer type DNE!")
# print()
# print(self.prediction_history)
def give_trader_info(self, offer_type):
type_request = offer_type
if type_request == 'bid':
return self.predicted_bids
elif type_request == 'ask':
return self.predicted_asks
else:
print("ERROR: given offer type DNE!")
# print()
# print(self.predicted_bids)
# print()
# print(self.predicted_asks)
def display_info(self):
print("----------------------------------------------------------------------------------")
print("Actual Values")
print(self.input_y_test)
print("Number of Values: " + str(len(self.input_y_test)))
print()
print("Predicted Values")
print(self.y_hat.tolist())
print("Number of Values: " + str(len(self.y_hat)))
correct_count = 0
count_one_off = 0
count_rest_off = 0
for i in range(len(self.y_hat)):
if self.y_hat[i] == self.input_y_test[i]:
correct_count = correct_count + 1
elif self.y_hat[i] == self.input_y_test[i] - 1 or self.y_hat[i] == self.input_y_test[i] + 1:
count_one_off = count_one_off + 1
elif self.y_hat[i] != self.input_y_test[i] - 1 or self.y_hat[i] != self.input_y_test[i] + 1:\
count_rest_off = count_rest_off + 1
wrong_count = len(self.y_hat) - correct_count
print()
percent_correct = correct_count/len(self.y_hat)
percent_wrong_one = count_one_off/wrong_count
print("Correct Predictions: " + str(correct_count))
print("Wrong Predictions: " + str(wrong_count))
print("Number of Predictions Off by One: " + str(count_one_off))
print("Number of Predictions Off more than 1: " + str(count_rest_off))
print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
print("Percentage of Wrong Predictions Off by Only One: " + str(percent_wrong_one*100) + "%")
print("Train Data Score: " + str(self.knn.score(self.input_X_train, self.input_y_train)))
print("Test Data Score: " + str(self.knn.score(self.input_X_test, self.input_y_test)))
# precision = precision_score(input_y_test, y_hat, average="weighted")
# recall = recall_score(input_y_test, y_hat, average="weighted")
# print("Precision: " + str(precision)) # false positives: guessed true when false
# print("Recall: " + str(recall)) # false negatives: guessed true when false
print("-------------------------------------------------------------------------------------------")
print("Market Bid Predictions")
print(self.predicted_bids)
print(len(self.predicted_bids))
print()
print("Market Ask Predictions")
print(self.predicted_asks)
print(len(self.predicted_asks))
def graph_predictions(self):
trace1 = go.Scatter(
x=np.array(range(len(self.input_y_test))),
y=np.array(self.input_y_test), name='Actual Amount',
mode='markers',
line=dict(color='rgba(152, 0, 0, .8)', width=4),
marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
trace2 = go.Scatter(
x=np.array(range(len(self.y_hat))),
y=np.array(self.y_hat), name='Predicted Amount',
mode='markers',
line=dict(color='rgba(200, 150, 150, .9)', width=4),
marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
data = [trace1, trace2]
layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
paper_bgcolor='rgb(255,255,255)',
title='Trader Predictions (6 datasets)',
xaxis=dict(title='Order of Data (Start-->Finish)',
gridcolor='rgb(255,255,255)',
showgrid=True,
showline=False,
showticklabels=True,
tickcolor='rgb(127,127,127)',
ticks='outside',
zeroline=False,
titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
yaxis=dict(title='Trader ID (22 traders, index start at 0) ',
gridcolor='rgb(255,255,255)',
showgrid=True,
showline=False,
showticklabels=True,
tickcolor='rgb(127,127,127)',
ticks='outside',
zeroline=False,
titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
fig = go.Figure(data=data, layout=layout)
py.offline.plot(fig)
if __name__ == "__main__":
# TODO see if it would be possible to employ this algorithm to shock markets to equilibrium
# TODO build AI_trader that uses this algorithm to place better bids/asks
# '''Below algorithm uses one dataset of bid/ask values...
# ... uses half the dataset to train, then predicts the other half
# ... generates about 82% correct predictions'''
# bid_ask = []
# input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# session = "AI_predict Test 1\\"
# input_file_1 = pd.read_csv(input_path + session + "Bid_Ask_History.csv", header=0, delimiter=',')
# input_X = input_file_1._get_numeric_data()
# input_x = input_X.as_matrix()
# for i in range(len(input_x)):
# bid_ask.append(input_x[i][1])
# input_y = bid_ask
#
# input_X_train = input_x[:-1000]
# input_y_train = input_y[:-1000]
# input_X_test = input_x[-1000:]
# input_y_test = input_y[-1000:]
# knn = KNeighborsClassifier()
# knn.fit(input_X_train, input_y_train)
# y_hat = knn.predict(input_X_test)
# print("Bid/Ask Predictions with 1 dataset")
# print("----------------------------------------------------------------------------------------------------")
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
# correct_count = 0
# wrong_count = 0
#
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# else:
# wrong_count = wrong_count + 1
#
# print()
# data_used = len(bid_ask) - len(y_hat)
# percent_data = data_used/len(bid_ask)
# percent_correct = correct_count/len(y_hat)
# print("Percentage of Data Used: " + str(percent_data*100) + "%")
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train))) # how accurate the model is with train data
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test))) # how accurate the model is with test data
# # precision = precision_score(input_y_test, y_hat, average="weighted")
# # recall = recall_score(input_y_test, y_hat, average="weighted")
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed true when false
# print("--------------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual Amount',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
# # graph avg transaction per period
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted Amount',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Bid/Ask Predictions (1 Dataset)',
# xaxis=dict(title='Bid/Ask Order',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Bid/Ask Amount ($)',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# time.sleep(0.75)
#
# '''Below algorithm pulls from one small dataset of contract transaction prices...
# ... trains with about half the values, then predicts the other half
# ... only generates about 20% correct predictions'''
# contract = []
# input_file_2 = pd.read_csv(input_path + session + "Contract_History.csv", header=0, delimiter=',')
# input_X = input_file_2._get_numeric_data()
# input_x = input_X.as_matrix()
#
# for i in range(len(input_x)):
# contract.append(input_x[i][0])
# input_y = contract
#
# input_X_train = input_x[:-13]
# input_y_train = input_y[:-13]
# input_X_test = input_x[-13:]
# input_y_test = input_y[-13:]
# knn = KNeighborsClassifier()
# knn.fit(input_X_train, input_y_train)
# y_hat = knn.predict(input_X_test)
# print("Contract Transaction Predictions with 1 dataset")
# print("----------------------------------------------------------------------------------------------")
# print("Size of Dataset: " + str(len(input_y)))
# print()
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
# correct_count = 0
# wrong_count = 0
#
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# else:
# wrong_count = wrong_count + 1
#
# print()
# data_used = len(contract) - len(y_hat)
# percent_data = data_used/len(contract)
# percent_correct = correct_count/len(y_hat)
# print("Percentage of Data Used: " + str(percent_data*100) + "%")
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train))) # how accurate the model is with train data
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test))) # how accurate the model is with test data
# # precision = precision_score(input_y_test, y_hat, average="weighted")
# # recall = recall_score(input_y_test, y_hat, average="weighted")
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed true when false
# print("----------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual Amount',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
# # graph avg transaction per period
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted Amount',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Contract Transaction Predictions (1 Dataset)',
# xaxis=dict(title='Contract Order',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Transaction Amount ($)',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# time.sleep(0.75)
#
# '''Below is code that reads data values from 19 datasets of contract values...
# ... then predicts the values of the 20th dataset
# ... generates 84% correct predictions'''
# input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# session_name = "AI_predict Test "
# input_y = []
# input_x = []
#
# for i in range(19):
# input_file = pd.read_csv(input_path + session_name + str(i + 1) + "\\" + "Contract_History.csv", header=0, delimiter=',')
# input_values = input_file._get_numeric_data()
# input_X = input_values.as_matrix()
# for array in input_X:
# for value in array:
# input_x.append(value)
# input_y.append(value)
#
# contract = []
# session = "AI_predict Test 20\\"
# test_file = pd.read_csv(input_path + session + "Contract_History.csv", header=0, delimiter=',')
# test_X = test_file._get_numeric_data()
# test_x = test_X.as_matrix()
#
# for i in range(len(test_x)):
# contract.append(test_x[i][0])
#
# test_y = contract
# input_X_train = pd.DataFrame(input_x)
# input_y_train = input_y
# input_X_test = test_x
# input_y_test = test_y
# knn = KNeighborsClassifier()
# knn.fit(input_X_train, input_y_train)
# y_hat = knn.predict(input_X_test)
# print("Contract Transaction Predictions with 19 datasets")
# print("--------------------------------------------------------------------------------------------")
# print("Size of Datasets: " + str(len(input_y)))
# print()
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
#
# correct_count = 0
# wrong_count = 0
#
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# else:
# wrong_count = wrong_count + 1
#
# print()
# percent_correct = correct_count/len(y_hat)
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train)))
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test)))
# # precision = precision_score(input_y_test, y_hat, average="weighted") # throws ill-defined error???
# # recall = recall_score(input_y_test, y_hat, average="weighted") # throws ill-defined error??
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed true when false
# print("-------------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual Amount',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
#
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted Amount',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Transaction Price Predictions (19 Datasets)',
# xaxis=dict(title='Contract Order',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Transaction Amount ($)',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# time.sleep(0.75)
#
#
# '''The below algorithm uses 9 datasets of bid/ask behavior (15,842 values)...
# ... to predict the 10th datasets 1,730 values
# ... generates 99.7% correct predictions
# ... only uses AA trading strategy'''
# input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# session_name = "AI_predict Test "
# input_y = []
# input_x = []
#
# for i in range(9):
# input_file = pd.read_csv(input_path + session_name + str(i + 1) + "\\" + "Bid_Ask_History.csv", header=0, delimiter=',')
# input_values = input_file._get_numeric_data()
# input_X = input_values.as_matrix()
# for j in input_X:
# input_x.append(j)
# for k in range(len(input_X)):
# input_y.append(input_X[k][1])
#
#
# test_x = []
# test_y = []
# session = "AI_predict Test 10\\"
# test_file = pd.read_csv(input_path + session + "Bid_Ask_History.csv", header=0, delimiter=',')
# test_data = test_file._get_numeric_data()
# test_X = test_data.as_matrix()
# for i in test_X:
# test_x.append(i)
# for i in range(len(test_X)):
# test_y.append(test_X[i][1])
#
# input_X_train = pd.DataFrame(input_x)
# input_y_train = input_y
# input_X_test = pd.DataFrame(test_x)
# input_y_test = test_y
# knn = KNeighborsClassifier()
# knn.fit(input_X_train, input_y_train)
# y_hat = knn.predict(input_X_test)
# print("Bid Ask Predictions with 9 datasets")
# print("----------------------------------------------------------------------------------")
# print("Size of Datasets: " + str(len(input_y)))
# print()
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
#
# correct_count = 0
# wrong_count = 0
#
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# else:
# wrong_count = wrong_count + 1
#
# print()
# percent_correct = correct_count/len(y_hat)
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train)))
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test)))
# # precision = precision_score(input_y_test, y_hat, average="weighted")
# # recall = recall_score(input_y_test, y_hat, average="weighted")
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed true when false
# print("-------------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual Amount',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
#
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted Amount',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Bid Ask Predictions (9 datasets)',
# xaxis=dict(title='Contract Order',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Transaction Amount ($)',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# time.sleep(0.75)
#
#
# '''The below algorithm uses 5 datasets of containing time,trader,bid/ask,amount,strategy ...
# ... to predict the strategies used in the 6th dataset
# ... Strategy Index 0:AA, 1:GD, 2:PS, 3:ZIP, 4:ZIC
# ... generates predictions at about 88.86% accuracy'''
# input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# session_name = "AI_strat Test "
# input_y = []
# input_x = []
#
# for i in range(5):
# input_file = pd.read_csv(input_path + session_name + str(i + 1) + "\\" + "Bid_Ask_History.csv", header=0, delimiter=',')
# input_values = input_file._get_numeric_data()
# input_X = input_values.as_matrix()
# for j in input_X:
# input_x.append(j)
# for k in range(len(input_X)):
# input_y.append(input_X[k][3])
#
# test_x = []
# test_y = []
# session = "AI_strat Test 6\\"
# test_file = pd.read_csv(input_path + session + "Bid_Ask_History.csv", header=0, delimiter=',')
# test_data = test_file._get_numeric_data() # eliminates strings in dataframe
# test_X = test_data.as_matrix() # turns data into matrix
# for i in test_X: # turns matrix into list in order to change to pandas dataframe
# test_x.append(i)
# for i in range(len(test_X)): # appending just strategies into list
# test_y.append(test_X[i][3])
#
# input_X_train = pd.DataFrame(input_x)
# print()
# print("Example of Data Structures Used")
# print("========================================================================")
# print("input_X_train")
# print(input_X_train)
# print()
# input_y_train = input_y
# print("input_y_train")
# print(input_y_train)
# print()
# input_X_test = pd.DataFrame(test_x)
# print("input_X_test")
# print(input_X_test)
# print()
# input_y_test = test_y
# print("input_y_test")
# print(input_y_test)
# print()
# print("=========================================================================")
# print()
# print()
# knn = KNeighborsClassifier(n_neighbors=15) # nearest neighbors is how the data is split into sections???
# knn.fit(input_X_train, input_y_train) # computer fits train data to 3-d plot???
# y_hat = knn.predict(input_X_test) # predicts based on how close value is to nearest neighbor???
# print("Strategy Predictions with 6 datasets")
# print("----------------------------------------------------------------------------------")
# print("Size of Datasets: " + str(len(input_y)))
# print()
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
#
# correct_count = 0
# wrong_count = 0
#
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# else:
# wrong_count = wrong_count + 1
#
# print()
# percent_correct = correct_count/len(y_hat)
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train))) # how accurate the model is with train data
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test))) # how accurate the model is with test data
# # precision = precision_score(input_y_test, y_hat, average="weighted")
# # recall = recall_score(input_y_test, y_hat, average="weighted")
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed false when true
# print("-------------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
#
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Strategy Predictions [0:AA, 1:GD, 2:PS, 3:ZIP, 4:ZIC]',
# xaxis=dict(title='Strategy Order',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Strategy Index',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# '''The below algorithm uses 5 datasets of bid/ask behavior...
# ... to predict the 6th datasets traders
# ... uses multiple trading strategies'''
# input_path = "C:\\Users\\Summer17\\Desktop\\Repos\\DoubleAuctionMisc\\period data\\"
# session_name = "AI_strat Test "
# input_y = []
# input_x = []
#
# for i in range(5):
# train_file = pd.read_csv(input_path + session_name + str(i + 1) + "\\" + "Bid_Ask_History.csv", header=0, delimiter=',')
# train_values = train_file._get_numeric_data()
# input_X = train_values.as_matrix()
# for j in input_X:
# # input_x.append(j[1:3]) # trader, amt, strategy
# # input_x.append(j) # time, trader, amt, strategy
# # input_x.append(j[2]) # amt
# input_x.append(j[2:3]) # amt, strategy
# for k in range(len(input_X)):
# input_y.append(input_X[k][2]) # amt targets
#
#
# test_x = []
# test_y = []
# session = "AI_strat Test 6\\"
# test_file = pd.read_csv(input_path + session + "Bid_Ask_History.csv", header=0, delimiter=',')
# bid_ask_list = []
# for i in test_file.as_matrix():
# bid_ask_list.append(i[2])
# test_data = test_file._get_numeric_data()
# test_X = test_data.as_matrix()
# for i in test_X:
# # test_x.append(i[1:3]) # trader, amt, strategy
# # test_x.append(i) # time, trader, amt, strategy
# # test_x.append(i[2]) # amt
# test_x.append(i[2:3]) # amt, strategy
# for i in range(len(test_X)):
# test_y.append(test_X[i][2]) # amt targets
#
# input_X_train = pd.DataFrame(input_x)
# input_y_train = input_y
# input_X_test = pd.DataFrame(test_x)
# input_y_test = test_y
# knn = KNeighborsClassifier(weights='distance')
# knn.fit(input_X_train, input_y_train)
# y_hat = knn.predict(input_X_test)
# prediction_history = []
# for i in range(len(y_hat)):
# prediction_history.append({bid_ask_list[i]: y_hat[i]})
#
# print(bid_ask_list)
# print(prediction_history) # TODO split into periods etc then build AI Trader
# period_splits = len(prediction_history)/5
# if period_splits != int:
# period_splits =
# print("Period 1 Predictions")
# print(prediction_history[0:431])
# print("Period 2 Predictions")
# print(prediction_history[431:862])
# print(prediction_history[862:1293])
# print(prediction_history[1293:1724])
# print(prediction_history[1724:2155])
# print(prediction_history[2155:2586])
# print("Trader Predictions with 6 datasets")
# print("----------------------------------------------------------------------------------")
# print("Size of Datasets: " + str(len(input_y)))
# print()
# print("Actual Values")
# print(input_y_test)
# print("Number of Values: " + str(len(input_y_test)))
# print()
# print("Predicted Values")
# print(y_hat.tolist())
# print("Number of Values: " + str(len(y_hat)))
#
# correct_count = 0
# count_one_off = 0
# count_rest_off = 0
# for i in range(len(y_hat)):
# if y_hat[i] == input_y_test[i]:
# correct_count = correct_count + 1
# elif y_hat[i] == input_y_test[i] - 1 or y_hat[i] == input_y_test[i] + 1:
# count_one_off = count_one_off + 1
# elif y_hat[i] != input_y_test[i] - 1 or y_hat[i] != input_y_test[i] + 1:\
# count_rest_off = count_rest_off + 1
# wrong_count = len(y_hat) - correct_count
# print()
# percent_correct = correct_count/len(y_hat)
# percent_wrong_one = count_one_off/wrong_count
# print("Correct Predictions: " + str(correct_count))
# print("Wrong Predictions: " + str(wrong_count))
# print("Number of Predictions Off by One: " + str(count_one_off))
# print("Number of Predictions Off more than 1: " + str(count_rest_off))
# print("Percentage of Right Predictions: " + str(percent_correct*100) + "%")
# print("Percentage of Wrong Predictions Off by Only One: " + str(percent_wrong_one*100) + "%")
# print("Train Data Score: " + str(knn.score(input_X_train, input_y_train)))
# print("Test Data Score: " + str(knn.score(input_X_test, input_y_test)))
# # precision = precision_score(input_y_test, y_hat, average="weighted")
# # recall = recall_score(input_y_test, y_hat, average="weighted")
# # print("Precision: " + str(precision)) # false positives: guessed true when false
# # print("Recall: " + str(recall)) # false negatives: guessed true when false
# print("-------------------------------------------------------------------------------------------")
# print()
# print()
#
# trace1 = go.Scatter(
# x=np.array(range(len(input_y_test))),
# y=np.array(input_y_test), name='Actual Amount',
# mode='markers',
# line=dict(color='rgba(152, 0, 0, .8)', width=4),
# marker=dict(size=10, color='rgba(152, 0, 0, .8)'))
#
# trace2 = go.Scatter(
# x=np.array(range(len(y_hat))),
# y=np.array(y_hat), name='Predicted Amount',
# mode='markers',
# line=dict(color='rgba(200, 150, 150, .9)', width=4),
# marker=dict(size=10, color='rgba(200, 150, 150, .9)'))
# data = [trace1, trace2]
# layout = go.Layout(plot_bgcolor='rgb(229,229,229)',
# paper_bgcolor='rgb(255,255,255)',
# title='Trader Predictions (6 datasets)',
# xaxis=dict(title='Order of Data (Start-->Finish)',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')),
# yaxis=dict(title='Trader ID (22 traders, index start at 0) ',
# gridcolor='rgb(255,255,255)',
# showgrid=True,
# showline=False,
# showticklabels=True,
# tickcolor='rgb(127,127,127)',
# ticks='outside',
# zeroline=False,
# titlefont=dict(family='Courier New, monospace', size=18, color='#7f7f7f')))
# fig = go.Figure(data=data, layout=layout)
# py.offline.plot(fig)
# time.sleep(0.75)
prd = SpotMarketPrediction()
prd.get_data()
prd.predict_market()
prd.display_info()
prd.graph_predictions()
| [
"pandas.DataFrame",
"sklearn.naive_bayes.GaussianNB",
"pandas.read_csv",
"numpy.array",
"plotly.offline.offline.plot",
"plotly.graph_objs.Figure"
] | [((3226, 3330), 'pandas.read_csv', 'pd.read_csv', (["(self.input_path + self.test_session + 'Bid_Ask_History.csv')"], {'header': 'None', 'delimiter': '""","""'}), "(self.input_path + self.test_session + 'Bid_Ask_History.csv',\n header=None, delimiter=',')\n", (3237, 3330), True, 'import pandas as pd\n'), ((3751, 3777), 'pandas.DataFrame', 'pd.DataFrame', (['self.train_x'], {}), '(self.train_x)\n', (3763, 3777), True, 'import pandas as pd\n'), ((3964, 3989), 'pandas.DataFrame', 'pd.DataFrame', (['self.test_x'], {}), '(self.test_x)\n', (3976, 3989), True, 'import pandas as pd\n'), ((4630, 4642), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (4640, 4642), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10551, 10586), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (10560, 10586), True, 'import plotly.graph_objs as go\n'), ((10595, 10615), 'plotly.offline.offline.plot', 'py.offline.plot', (['fig'], {}), '(fig)\n', (10610, 10615), True, 'import plotly.offline as py\n'), ((8393, 8420), 'numpy.array', 'np.array', (['self.input_y_test'], {}), '(self.input_y_test)\n', (8401, 8420), True, 'import numpy as np\n'), ((8720, 8740), 'numpy.array', 'np.array', (['self.y_hat'], {}), '(self.y_hat)\n', (8728, 8740), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from ..Utils import getModel
from sklearn.svm import SVR
from sklearn import ensemble
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
class StackingRegressor:
def __init__(self, **params):
"""
Wrapper class for Stacking Regressor.
Parameters
----------
stack list[tuple]: List of tuples (model name, model object)
params list[dict]: List of model parameters for stack
"""
# Defaults
self._estimator_type = 'regressor'
self.trained = False
self.level_one = None
self.model = None
self.params = params
self.stack = []
self.n_samples = 0
self.n_features = 0
self.mean = None
self.std = None
self.set_params(**params)
def _add_default_models(self, stack: list) -> list:
"""
Prepares the models stack
"""
# Add default models
models = [i[0] for i in stack]
if 'KNeighborsRegressor' not in models:
stack.append(('KNeighborsRegressor', KNeighborsRegressor()))
if 'DecisionTreeRegressor' not in models:
stack.append(('DecisionTreeRegressor', DecisionTreeRegressor()))
if 'LinearRegression' not in models:
stack.append(('LinearRegression', LinearRegression()))
if 'SVR' not in models and self.n_samples < 5000:
stack.append(('SVR', SVR()))
return stack
def fit(self, x: pd.DataFrame, y: pd.Series):
# Set info
self.n_samples = x.shape[0]
self.n_features = x.shape[1]
self.mean = np.mean(x, axis=0)
self.std = np.std(x, axis=0)
self.std[self.std == 0] = 1
# Normalize
x = (x - self.mean) / self.std
# Create stack
self.level_one = LinearRegression()
self.stack = self._add_default_models(self.stack)
self.model = ensemble.StackingRegressor(self.stack, final_estimator=self.level_one)
# Fit
self.model.fit(x, y)
# Set flag
self.trained = True
def set_params(self, **params):
"""
Set params for the models in the stack
Parameters
----------
params dict: Nested dictionary, first keys are model names, second params
"""
# Overwrite old params
for k, v in params.items():
self.params[k] = v
# Set default
if 'n_samples' in params:
self.n_samples = params['n_samples']
params.pop('n_samples')
if 'n_features' in params:
self.n_features = params['n_features']
params.pop('n_features')
for model_name, param in params.items():
# Get index
ind = [i for i, x in enumerate(self.stack) if x[0] == model_name]
# Add if not in stack
if len(ind) == 0:
model = getModel(model_name, mode='regression', samples=self.n_samples)
self.stack.append((model_name, model.set_params(**param)))
# Update otherwise
else:
self.stack[ind[0]][1].set_params(**param)
return self
def get_params(self, **args):
"""
Returns a dictionary with all params.
"""
return self.params
def predict(self, x):
assert self.trained
return self.model.predict((x - self.mean) / self.std).reshape(-1)
| [
"sklearn.svm.SVR",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.tree.DecisionTreeRegressor",
"numpy.std",
"sklearn.linear_model.LinearRegression",
"numpy.mean",
"sklearn.ensemble.StackingRegressor"
] | [((1738, 1756), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1745, 1756), True, 'import numpy as np\n'), ((1776, 1793), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1782, 1793), True, 'import numpy as np\n'), ((1939, 1957), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1955, 1957), False, 'from sklearn.linear_model import LinearRegression\n'), ((2037, 2107), 'sklearn.ensemble.StackingRegressor', 'ensemble.StackingRegressor', (['self.stack'], {'final_estimator': 'self.level_one'}), '(self.stack, final_estimator=self.level_one)\n', (2063, 2107), False, 'from sklearn import ensemble\n'), ((1192, 1213), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (1211, 1213), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((1317, 1340), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1338, 1340), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1434, 1452), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1450, 1452), False, 'from sklearn.linear_model import LinearRegression\n'), ((1546, 1551), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (1549, 1551), False, 'from sklearn.svm import SVR\n')] |
from sklearn.model_selection import train_test_split
from synthesizer.utils.text import text_to_sequence
from synthesizer.infolog import log
import tensorflow as tf
import numpy as np
import threading
import time
import os
_batches_per_group = 16
class Feeder:
"""
Feeds batches of data into queue on a background thread.
"""
def __init__(self, coordinator, metadata_filename, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(",")]
self._train_offset = 0
self._test_offset = 0
# Load metadata
self._mel_dir = os.path.join(os.path.dirname(metadata_filename), "mels")
self._embed_dir = os.path.join(os.path.dirname(metadata_filename), "embeds")
with open(metadata_filename, encoding="utf-8") as f:
self._metadata = [line.strip().split("|") for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in self._metadata]) * frame_shift_ms / (3600)
log("Loaded metadata for {} examples ({:.2f} hours)".format(len(self._metadata), hours))
#Train test split
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches is not None
test_size = (hparams.tacotron_test_size if hparams.tacotron_test_size is not None
else hparams.tacotron_test_batches * hparams.tacotron_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(
indices, test_size=test_size, random_state=hparams.tacotron_data_random_state)
#Make sure test_indices is a multiple of batch_size else round up
len_test_indices = self._round_down(len(test_indices), hparams.tacotron_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.tacotron_batch_size
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches == self.test_steps
#pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
#explicitely setting the padding to a value that doesn"t originally exist in the spectogram
#to avoid any possible conflicts, without affecting the output range of the model too much
if hparams.symmetric_mels:
self._target_pad = -hparams.max_abs_value
else:
self._target_pad = 0.
#Mark finished sequences with 1s
self._token_pad = 1.
with tf.device("/cpu:0"):
# Create placeholders for inputs and targets. Don"t specify batch size because we want
# to be able to feed different batch sizes at eval time.
self._placeholders = [
tf.placeholder(tf.int32, shape=(None, None), name="inputs"),
tf.placeholder(tf.int32, shape=(None, ), name="input_lengths"),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_mels),
name="mel_targets"),
tf.placeholder(tf.float32, shape=(None, None), name="token_targets"),
tf.placeholder(tf.int32, shape=(None, ), name="targets_lengths"),
tf.placeholder(tf.int32, shape=(hparams.tacotron_num_gpus, None),
name="split_infos"),
# SV2TTS
tf.placeholder(tf.float32, shape=(None, hparams.speaker_embedding_size),
name="speaker_embeddings")
]
# Create queue for buffering data
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="input_queue")
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.token_targets, \
self.targets_lengths, self.split_infos, self.speaker_embeddings = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.token_targets.set_shape(self._placeholders[3].shape)
self.targets_lengths.set_shape(self._placeholders[4].shape)
self.split_infos.set_shape(self._placeholders[5].shape)
self.speaker_embeddings.set_shape(self._placeholders[6].shape)
# Create eval queue for buffering eval data
eval_queue = tf.FIFOQueue(1, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="eval_queue")
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, \
self.eval_token_targets, self.eval_targets_lengths, \
self.eval_split_infos, self.eval_speaker_embeddings = eval_queue.dequeue()
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_input_lengths.set_shape(self._placeholders[1].shape)
self.eval_mel_targets.set_shape(self._placeholders[2].shape)
self.eval_token_targets.set_shape(self._placeholders[3].shape)
self.eval_targets_lengths.set_shape(self._placeholders[4].shape)
self.eval_split_infos.set_shape(self._placeholders[5].shape)
self.eval_speaker_embeddings.set_shape(self._placeholders[6].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name="background", target=self._enqueue_next_train_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
thread = threading.Thread(name="background", target=self._enqueue_next_test_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
#Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def make_test_batches(self):
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
#Test on entire test set
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated %d test batches of size %d in %.3f sec" % (len(batches), n, time.time() - start))
return batches, r
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
#Create test batches once and evaluate on them for all test steps
test_batches, r = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
"""Gets a single example (input, mel_target, token_target, linear_target, mel_length) from_ disk
"""
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
#Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def _prepare_batch(self, batches, outputs_per_step):
assert 0 == len(batches) % self._hparams.tacotron_num_gpus
size_per_device = int(len(batches) / self._hparams.tacotron_num_gpus)
np.random.shuffle(batches)
inputs = None
mel_targets = None
token_targets = None
targets_lengths = None
split_infos = []
targets_lengths = np.asarray([x[-1] for x in batches], dtype=np.int32) #Used to mask loss
input_lengths = np.asarray([len(x[0]) for x in batches], dtype=np.int32)
for i in range(self._hparams.tacotron_num_gpus):
batch = batches[size_per_device*i:size_per_device*(i+1)]
input_cur_device, input_max_len = self._prepare_inputs([x[0] for x in batch])
inputs = np.concatenate((inputs, input_cur_device), axis=1) if inputs is not None else input_cur_device
mel_target_cur_device, mel_target_max_len = self._prepare_targets([x[1] for x in batch], outputs_per_step)
mel_targets = np.concatenate((mel_targets, mel_target_cur_device), axis=1) if mel_targets is not None else mel_target_cur_device
#Pad sequences with 1 to infer that the sequence is done
token_target_cur_device, token_target_max_len = self._prepare_token_targets([x[2] for x in batch], outputs_per_step)
token_targets = np.concatenate((token_targets, token_target_cur_device),axis=1) if token_targets is not None else token_target_cur_device
split_infos.append([input_max_len, mel_target_max_len, token_target_max_len])
split_infos = np.asarray(split_infos, dtype=np.int32)
### SV2TTS ###
embed_targets = np.asarray([x[3] for x in batches])
##############
return inputs, input_lengths, mel_targets, token_targets, targets_lengths, \
split_infos, embed_targets
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len
def _prepare_targets(self, targets, alignment):
max_len = max([len(t) for t in targets])
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_target(t, data_len) for t in targets]), data_len
def _prepare_token_targets(self, targets, alignment):
max_len = max([len(t) for t in targets]) + 1
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_token_target(t, data_len) for t in targets]), data_len
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=self._pad)
def _pad_target(self, t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode="constant", constant_values=self._target_pad)
def _pad_token_target(self, t, length):
return np.pad(t, (0, length - t.shape[0]), mode="constant", constant_values=self._token_pad)
def _round_up(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _round_down(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x - remainder
| [
"numpy.pad",
"threading.Thread",
"numpy.concatenate",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"os.path.dirname",
"tensorflow.device",
"synthesizer.utils.text.text_to_sequence",
"time.time",
"tensorflow.placeholder",
"numpy.array",
"tensorflow.FIFOQueue",
"os.path.join",
... | [((1632, 1732), 'sklearn.model_selection.train_test_split', 'train_test_split', (['indices'], {'test_size': 'test_size', 'random_state': 'hparams.tacotron_data_random_state'}), '(indices, test_size=test_size, random_state=hparams.\n tacotron_data_random_state)\n', (1648, 1732), False, 'from sklearn.model_selection import train_test_split\n'), ((2040, 2083), 'numpy.concatenate', 'np.concatenate', (['[train_indices, extra_test]'], {}), '([train_indices, extra_test])\n', (2054, 2083), True, 'import numpy as np\n'), ((6067, 6141), 'threading.Thread', 'threading.Thread', ([], {'name': '"""background"""', 'target': 'self._enqueue_next_train_group'}), "(name='background', target=self._enqueue_next_train_group)\n", (6083, 6141), False, 'import threading\n'), ((6249, 6322), 'threading.Thread', 'threading.Thread', ([], {'name': '"""background"""', 'target': 'self._enqueue_next_test_group'}), "(name='background', target=self._enqueue_next_test_group)\n", (6265, 6322), False, 'import threading\n'), ((7072, 7083), 'time.time', 'time.time', ([], {}), '()\n', (7081, 7083), False, 'import time\n'), ((7531, 7557), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (7548, 7557), True, 'import numpy as np\n'), ((10096, 10122), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (10113, 10122), True, 'import numpy as np\n'), ((10285, 10337), 'numpy.asarray', 'np.asarray', (['[x[-1] for x in batches]'], {'dtype': 'np.int32'}), '([x[-1] for x in batches], dtype=np.int32)\n', (10295, 10337), True, 'import numpy as np\n'), ((11501, 11540), 'numpy.asarray', 'np.asarray', (['split_infos'], {'dtype': 'np.int32'}), '(split_infos, dtype=np.int32)\n', (11511, 11540), True, 'import numpy as np\n'), ((11606, 11641), 'numpy.asarray', 'np.asarray', (['[x[3] for x in batches]'], {}), '([x[3] for x in batches])\n', (11616, 11641), True, 'import numpy as np\n'), ((12524, 12603), 'numpy.pad', 'np.pad', (['x', '(0, length - x.shape[0])'], {'mode': '"""constant"""', 'constant_values': 'self._pad'}), "(x, (0, length - x.shape[0]), mode='constant', constant_values=self._pad)\n", (12530, 12603), True, 'import numpy as np\n'), ((12658, 12758), 'numpy.pad', 'np.pad', (['t', '[(0, length - t.shape[0]), (0, 0)]'], {'mode': '"""constant"""', 'constant_values': 'self._target_pad'}), "(t, [(0, length - t.shape[0]), (0, 0)], mode='constant',\n constant_values=self._target_pad)\n", (12664, 12758), True, 'import numpy as np\n'), ((12815, 12905), 'numpy.pad', 'np.pad', (['t', '(0, length - t.shape[0])'], {'mode': '"""constant"""', 'constant_values': 'self._token_pad'}), "(t, (0, length - t.shape[0]), mode='constant', constant_values=self.\n _token_pad)\n", (12821, 12905), True, 'import numpy as np\n'), ((717, 751), 'os.path.dirname', 'os.path.dirname', (['metadata_filename'], {}), '(metadata_filename)\n', (732, 751), False, 'import os\n'), ((800, 834), 'os.path.dirname', 'os.path.dirname', (['metadata_filename'], {}), '(metadata_filename)\n', (815, 834), False, 'import os\n'), ((2925, 2944), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (2934, 2944), True, 'import tensorflow as tf\n'), ((4014, 4132), 'tensorflow.FIFOQueue', 'tf.FIFOQueue', (['(8)', '[tf.int32, tf.int32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32]'], {'name': '"""input_queue"""'}), "(8, [tf.int32, tf.int32, tf.float32, tf.float32, tf.int32, tf.\n int32, tf.float32], name='input_queue')\n", (4026, 4132), True, 'import tensorflow as tf\n'), ((4983, 5100), 'tensorflow.FIFOQueue', 'tf.FIFOQueue', (['(1)', '[tf.int32, tf.int32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32]'], {'name': '"""eval_queue"""'}), "(1, [tf.int32, tf.int32, tf.float32, tf.float32, tf.int32, tf.\n int32, tf.float32], name='eval_queue')\n", (4995, 5100), True, 'import tensorflow as tf\n'), ((6583, 6626), 'synthesizer.utils.text.text_to_sequence', 'text_to_sequence', (['text', 'self._cleaner_names'], {}), '(text, self._cleaner_names)\n', (6599, 6626), False, 'from synthesizer.utils.text import text_to_sequence\n'), ((6673, 6709), 'os.path.join', 'os.path.join', (['self._mel_dir', 'meta[1]'], {}), '(self._mel_dir, meta[1])\n', (6685, 6709), False, 'import os\n'), ((6895, 6933), 'os.path.join', 'os.path.join', (['self._embed_dir', 'meta[2]'], {}), '(self._embed_dir, meta[2])\n', (6907, 6933), False, 'import os\n'), ((7799, 7810), 'time.time', 'time.time', ([], {}), '()\n', (7808, 7810), False, 'import time\n'), ((8259, 8285), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (8276, 8285), True, 'import numpy as np\n'), ((9272, 9307), 'numpy.random.shuffle', 'np.random.shuffle', (['self._train_meta'], {}), '(self._train_meta)\n', (9289, 9307), True, 'import numpy as np\n'), ((9450, 9493), 'synthesizer.utils.text.text_to_sequence', 'text_to_sequence', (['text', 'self._cleaner_names'], {}), '(text, self._cleaner_names)\n', (9466, 9493), False, 'from synthesizer.utils.text import text_to_sequence\n'), ((9540, 9576), 'os.path.join', 'os.path.join', (['self._mel_dir', 'meta[1]'], {}), '(self._mel_dir, meta[1])\n', (9552, 9576), False, 'import os\n'), ((9762, 9800), 'os.path.join', 'os.path.join', (['self._embed_dir', 'meta[2]'], {}), '(self._embed_dir, meta[2])\n', (9774, 9800), False, 'import os\n'), ((2117, 2141), 'numpy.array', 'np.array', (['self._metadata'], {}), '(self._metadata)\n', (2125, 2141), True, 'import numpy as np\n'), ((2189, 2213), 'numpy.array', 'np.array', (['self._metadata'], {}), '(self._metadata)\n', (2197, 2213), True, 'import numpy as np\n'), ((3165, 3224), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, None)', 'name': '"""inputs"""'}), "(tf.int32, shape=(None, None), name='inputs')\n", (3179, 3224), True, 'import tensorflow as tf\n'), ((3242, 3303), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""input_lengths"""'}), "(tf.int32, shape=(None,), name='input_lengths')\n", (3256, 3303), True, 'import tensorflow as tf\n'), ((3322, 3411), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, hparams.num_mels)', 'name': '"""mel_targets"""'}), "(tf.float32, shape=(None, None, hparams.num_mels), name=\n 'mel_targets')\n", (3336, 3411), True, 'import tensorflow as tf\n'), ((3456, 3524), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None)', 'name': '"""token_targets"""'}), "(tf.float32, shape=(None, None), name='token_targets')\n", (3470, 3524), True, 'import tensorflow as tf\n'), ((3542, 3605), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""targets_lengths"""'}), "(tf.int32, shape=(None,), name='targets_lengths')\n", (3556, 3605), True, 'import tensorflow as tf\n'), ((3624, 3714), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(hparams.tacotron_num_gpus, None)', 'name': '"""split_infos"""'}), "(tf.int32, shape=(hparams.tacotron_num_gpus, None), name=\n 'split_infos')\n", (3638, 3714), True, 'import tensorflow as tf\n'), ((3801, 3904), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, hparams.speaker_embedding_size)', 'name': '"""speaker_embeddings"""'}), "(tf.float32, shape=(None, hparams.speaker_embedding_size),\n name='speaker_embeddings')\n", (3815, 3904), True, 'import tensorflow as tf\n'), ((10684, 10734), 'numpy.concatenate', 'np.concatenate', (['(inputs, input_cur_device)'], {'axis': '(1)'}), '((inputs, input_cur_device), axis=1)\n', (10698, 10734), True, 'import numpy as np\n'), ((10924, 10984), 'numpy.concatenate', 'np.concatenate', (['(mel_targets, mel_target_cur_device)'], {'axis': '(1)'}), '((mel_targets, mel_target_cur_device), axis=1)\n', (10938, 10984), True, 'import numpy as np\n'), ((11266, 11330), 'numpy.concatenate', 'np.concatenate', (['(token_targets, token_target_cur_device)'], {'axis': '(1)'}), '((token_targets, token_target_cur_device), axis=1)\n', (11280, 11330), True, 'import numpy as np\n'), ((7644, 7655), 'time.time', 'time.time', ([], {}), '()\n', (7653, 7655), False, 'import time\n'), ((8383, 8394), 'time.time', 'time.time', ([], {}), '()\n', (8392, 8394), False, 'import time\n')] |
import numpy as np
import re
import warnings
from .endf_data import reaction, atomic_relaxation
class endf_reader:
"""ENDF-6 format reader.
See https://www.nndc.bnl.gov/csewg/docs/endf-manual.pdf for the specification.
Only (MF=1, MT=451), MF=23, MF=26 are implemented.
Properties:
- MAT: Material identifier.
- reactions: Dictionary of reaction instances.
Key is MT number.
- atomic_relaxation: Dictionary of atomic relaxation data.
Key is subshell ID.
"""
def __init__(self, file_handle):
"""Starts reading the file in file_handle,
populating the reactions and atomic_relaxation members."""
self._file = file_handle
self.reactions = {}
self.atomic_relaxation = {}
# Determine MAT number for this file,
# while skipping ahead to start of first header
MF = 0
while MF == 0:
position = self._file.tell()
line = self._file.readline()
MF = int(line[70:72])
self.MAT = int(line[66:70])
self._file.seek(position)
self._read()
def _read(self):
self._read_1_451()
while True:
# Next line should be one of
# - Start of new section
# - End of file (FEND)
# - End of material (MEND)
line = self._peekline()
if self._is_FEND(line):
self._FEND(self.MAT)
continue
if self._is_MEND(line):
self._MEND()
break
# Next line is start of section.
# Forward reading to relevant member function
MF = endf_reader._eint(line[70:72])
MT = endf_reader._eint(line[72:75])
if MF == 23:
self._read_23(MT)
elif MF == 26:
self._read_26(MT)
elif MF == 28 and MT == 533:
self._read_28_533()
else:
# Don't know how to read this file.
warnings.warn("Not reading file {}".format(MF))
self._seek_FEND()
#
# Seeking through file
#
def _peekline(self):
pos = self._file.tell()
line = self._file.readline()
self._file.seek(pos)
return line
def _seek_FEND(self):
while not self._is_FEND(self._file.readline()):
continue
#
# Reading individual files
#
# Header
def _read_1_451(self):
# Doesn't actually read the data :)
MMM = [self.MAT, 1, 451]
self._print_debug(MMM)
self._HEAD(MMM)
self._CONT(MMM)
self._CONT(MMM)
TEMP, _, LDRV, _, NWD, NXC = self._CONT(MMM)
for _ in range(NWD):
self._TEXT(MMM)
for _ in range(NXC):
self._CONT(MMM, blankC=True)
self._SEND(MMM[0], 1)
self._FEND(MMM[0])
# Photo-atomic or electro-atomic interaction data
def _read_23(self, MT):
MMM = [self.MAT, 23, MT]
self._print_debug(MMM)
if MT not in self.reactions:
self.reactions[MT] = reaction(MT)
rx = self.reactions[MT]
self._HEAD(MMM) # ZA, AWR, which are in File 1 anyway
params, rx.cross_section = self._TAB1(MMM)
if MT >= 534 and MT <= 599:
# Subshell ionization
rx.binding_energy = params[0]
rx.fluorescence_yield = params[1]
self._SEND(MMM[0], MMM[1])
# Secondary distributions
def _read_26(self, MT):
MMM = [self.MAT, 26, MT]
self._print_debug(MMM)
if MT not in self.reactions:
self.reactions[MT] = reaction(MT)
rx = self.reactions[MT]
ZA, AWR, _, _, NK, _ = self._HEAD(MMM)
for i in range(NK):
product = {}
rx.products.append(product)
params, _yield = self._TAB1(MMM)
product['ZAP'] = params[0] # Product identifier
product['LAW'] = params[3]
if product['LAW'] == 1:
params, [NBT, INT] = self._TAB2(MMM)
NE = params[5]
product['LANG'] = params[2]
product['LEP'] = params[3]
product['E1'] = np.zeros(NE)
product['ND'] = np.zeros(NE, dtype=int) # Number of discrete energies
product['Ep'] = [] # Outgoing energy, list of arrays, each with length ND[i]
product['b'] = [] # Amplitude, list of (ND x a) 2D arrays
for i in range(NE):
params, data = self._LIST(MMM)
data = data.reshape((params[5], params[3]+2))
product['E1'][i] = params[1]
product['ND'][i] = params[2]
product['Ep'].append(data[:,0])
product['b'].append(data[:,1:])
elif product['LAW'] == 2:
params, [NBT, INT] = self._TAB2(MMM)
NE = params[5]
product['E1'] = np.zeros(NE)
product['LANG'] = np.zeros(NE, dtype=int)
product['Al'] = []
for i in range(NE):
params, data = self._LIST(MMM)
product['E1'][i] = params[1]
product['LANG'][i] = params[2]
product['Al'].append(data)
elif product['LAW'] == 8:
params, product['ET'] = self._TAB1(MMM)
self._SEND(MMM[0], MMM[1])
# Atomic relaxation
def _read_28_533(self):
MMM = [self.MAT, 28, 533]
self._print_debug(MMM)
ZA, AWR, _, _, NSS, _ = self._HEAD(MMM)
# NSS: Number of subshells
for i in range(NSS):
params, data = self._LIST(MMM)
SUBI = int(params[0])
NTR = int(params[5])
EBI = data[0]
ELN = data[1]
ar = atomic_relaxation(SUBI)
self.atomic_relaxation[SUBI] = ar
ar.binding_energy = EBI
ar.number_electrons = ELN
# NTR: Number of transitions
for j in range(NTR):
SUBJ = int(data[6*(j+1) + 0])
SUBK = int(data[6*(j+1) + 1])
ETR = data[6*(j+1) + 2]
FTR = data[6*(j+1) + 3]
ar.transitions.append((SUBJ, SUBK, ETR, FTR))
self._SEND(MMM[0], MMM[1])
#
# Reading individual records
#
def _TEXT(self, MMM):
line = self._file.readline()
self._verify_MMM(line, MMM)
return line[:66]
def _CONT(self, MMM, blankC=False):
line = self._file.readline()
self._verify_MMM(line, MMM)
if blankC:
C1 = None
C2 = None
else:
C1 = self._efloat(line[:11])
C2 = self._efloat(line[11:22])
L1 = self._eint(line[22:33])
L2 = self._eint(line[33:44])
N1 = self._eint(line[44:55])
N2 = self._eint(line[55:66])
return [C1, C2, L1, L2, N1, N2]
def _HEAD(self, MMM):
line = self._file.readline()
self._verify_MMM(line, MMM)
ZA = int(self._efloat(line[:11]))
AWR = self._efloat(line[11:22])
L1 = self._eint(line[22:33])
L2 = self._eint(line[33:44])
N1 = self._eint(line[44:55])
N2 = self._eint(line[55:66])
return [ZA, AWR, L1, L2, N1, N2]
def _SEND(self, MAT, MF):
self._verify_MMM(self._file.readline(), [MAT, MF, 0])
def _FEND(self, MAT):
self._verify_MMM(self._file.readline(), [MAT, 0, 0])
def _MEND(self):
self._verify_MMM(self._file.readline(), [0, 0, 0])
def _TEND(self):
self._verify_MMM(self._file.readline(), [-1, 0, 0])
def _DIR(self):
raise NotImplementedError()
def _LIST(self, MMM):
items = self._CONT(MMM)
NPL = items[4]
# Read tabulated data
B = np.zeros(NPL)
for ln in range((NPL - 1)//6 + 1):
line = self._file.readline()
self._verify_MMM(line, MMM)
for col in range(min(6, NPL - 6*ln)):
B[6*ln+col] = self._efloat(line[:11])
line = line[11:]
return items, B
def _TAB1(self, MMM):
C1, C2, L1, L2, NR, NP = self._CONT(MMM)
# Read interpolation region data
NBT = np.zeros(NR, dtype=int)
INT = np.zeros(NR, dtype=int)
for ln in range((NR - 1)//3 + 1):
line = self._file.readline()
self._verify_MMM(line, MMM)
for col in range(min(3, NR - 3*ln)):
NBT[3*ln+col] = self._eint(line[:11])
INT[3*ln+col] = self._eint(line[11:22])
line = line[22:]
# Read tabulated data
x = np.zeros(NP)
y = np.zeros(NP)
for ln in range((NP - 1)//3 + 1):
line = self._file.readline()
self._verify_MMM(line, MMM)
for col in range(min(3, NP - 3*ln)):
x[3*ln+col] = self._efloat(line[:11])
y[3*ln+col] = self._efloat(line[11:22])
line = line[22:]
return [C1, C2, L1, L2], TAB1(x, y, NBT, INT)
def _TAB2(self, MMM):
C1, C2, L1, L2, NR, NZ = self._CONT(MMM)
# Read interpolation region data
NBT = np.zeros(NR, dtype=int)
INT = np.zeros(NR, dtype=int)
for ln in range((NR - 1)//3 + 1):
line = self._file.readline()
self._verify_MMM(line, MMM)
for col in range(min(3, NR - 3*ln)):
NBT[3*ln+col] = self._eint(line[:11])
INT[3*ln+col] = self._eint(line[11:22])
line = line[22:]
return [C1, C2, L1, L2, NR, NZ], \
[NBT, INT]
def _INTG(self):
raise NotImplementedError()
@staticmethod
def _is_FEND(line):
MAT = endf_reader._eint(line[66:70])
MF = endf_reader._eint(line[70:72])
MT = endf_reader._eint(line[72:75])
return MAT != 0 and MF == 0 and MT == 0
@staticmethod
def _is_MEND(line):
MAT = endf_reader._eint(line[66:70])
MF = endf_reader._eint(line[70:72])
MT = endf_reader._eint(line[72:75])
return MAT == 0 and MF == 0 and MT == 0
@staticmethod
def _verify_MMM(line, MMM):
if MMM == None:
return
MAT = endf_reader._eint(line[66:70])
MF = endf_reader._eint(line[70:72])
MT = endf_reader._eint(line[72:75])
if MAT != MMM[0] or MF != MMM[1] or MT != MMM[2]:
raise ValueError('Unexpected MAT/MF/MT found in file!')
@staticmethod
def _efloat(string):
# The re.sub deals with "E-less numbers", which are supposed to be the
# default; but it turns out that, sometimes, a 'D' is printed...
fixed_string = re.sub(r'([0-9]+)([\+|-])([0-9]+)',
r'\1E\2\3',
string.replace('d', 'e').replace('D', 'E'))
return float(fixed_string)
@staticmethod
def _eint(string):
return int(string)
@staticmethod
def _print_debug(MMM):
# print("Reading MF=%d, MT=%d" % (MMM[1], MMM[2]))
pass
class TAB1:
"""Represents a TAB1 record in the ENDF file format"""
def __init__(self, x, y, NBT, INT):
self.x = x
self.y = y
self.NBT = NBT
self.INT = INT
| [
"numpy.zeros"
] | [((6382, 6395), 'numpy.zeros', 'np.zeros', (['NPL'], {}), '(NPL)\n', (6390, 6395), True, 'import numpy as np\n'), ((6729, 6752), 'numpy.zeros', 'np.zeros', (['NR'], {'dtype': 'int'}), '(NR, dtype=int)\n', (6737, 6752), True, 'import numpy as np\n'), ((6761, 6784), 'numpy.zeros', 'np.zeros', (['NR'], {'dtype': 'int'}), '(NR, dtype=int)\n', (6769, 6784), True, 'import numpy as np\n'), ((7062, 7074), 'numpy.zeros', 'np.zeros', (['NP'], {}), '(NP)\n', (7070, 7074), True, 'import numpy as np\n'), ((7081, 7093), 'numpy.zeros', 'np.zeros', (['NP'], {}), '(NP)\n', (7089, 7093), True, 'import numpy as np\n'), ((7499, 7522), 'numpy.zeros', 'np.zeros', (['NR'], {'dtype': 'int'}), '(NR, dtype=int)\n', (7507, 7522), True, 'import numpy as np\n'), ((7531, 7554), 'numpy.zeros', 'np.zeros', (['NR'], {'dtype': 'int'}), '(NR, dtype=int)\n', (7539, 7554), True, 'import numpy as np\n'), ((3454, 3466), 'numpy.zeros', 'np.zeros', (['NE'], {}), '(NE)\n', (3462, 3466), True, 'import numpy as np\n'), ((3487, 3510), 'numpy.zeros', 'np.zeros', (['NE'], {'dtype': 'int'}), '(NE, dtype=int)\n', (3495, 3510), True, 'import numpy as np\n'), ((4051, 4063), 'numpy.zeros', 'np.zeros', (['NE'], {}), '(NE)\n', (4059, 4063), True, 'import numpy as np\n'), ((4086, 4109), 'numpy.zeros', 'np.zeros', (['NE'], {'dtype': 'int'}), '(NE, dtype=int)\n', (4094, 4109), True, 'import numpy as np\n')] |
import jax
import jax.numpy as np
from jax import random, jit
import matplotlib.pyplot as plt
from jax.scipy.stats import norm
import pickle as pkl
import numpy as onp
from scipy.stats import norm as onorm
from jax.experimental.optimizers import adam
import argparse
class MSC:
def __init__(self, seed, n_latent):
self.seed = seed
self.key = random.PRNGKey(seed)
self.n_latent = n_latent
self.step_size = 0.01
self.opt_init, self.opt_update, self.get_params = adam(self.step_size)
# Wrapper function
def sample_from_normal(self, shape):
if type(shape) == int:
shape = (shape,)
self.key, subkey = random.split(self.key)
return random.normal(subkey, shape=shape)
def init_params(self, random_init):
if random_init:
return self.sample_from_normal(shape=self.n_latent), self.sample_from_normal(shape=self.n_latent)
return 0.1 * self.sample_from_normal(shape=self.n_latent), 0.5 + 0.1 * self.sample_from_normal(
shape=self.n_latent)
# Sample examples from proposal. Shape of output : (n_latent, n_samples)
def sample_from_proposal(self, mu, log_sigma, n_samples):
noise = self.sample_from_normal(shape=(self.n_latent, n_samples))
return mu.reshape(-1, 1) + np.exp(log_sigma).reshape(-1, 1) * noise
# Randomly sample 1..N according to weights
def sample_according_to_weights(self, weights):
self.key, subkey = random.split(self.key)
x = random.uniform(subkey)
bins = np.cumsum(weights)
return np.digitize(x, bins)
# Log of the prior: log P(z) where P(z) ~ N(0,1)
def log_prior(self, z):
return np.sum(norm.logpdf(z), axis=0)
# Log of proposal distribution
def log_proposal(self, z, mu, log_sigma):
sigma = np.exp(log_sigma)
return np.sum(norm.logpdf(z, loc=mu.reshape(-1, 1), scale=sigma.reshape(-1, 1)), axis=0)
# Log of the likelihood: log P(y|x, z)
# SF : Survival Function = 1-CDF
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html
def log_likelihood(self, y, x, z):
return np.sum(y * norm.logcdf(np.dot(x, z)) + (1 - y) * onorm.logsf(np.dot(x, z)), axis=0)
# Conditional Importance Sampling
def cis(self, mu, log_sigma, z_old, n_samples, x, y):
# Sample n examples and replace the first example using z_old
z = self.sample_from_proposal(mu, log_sigma, n_samples)
# Replace the first sample of every latent variable with the conditional sample
if z_old is not None:
z = jax.ops.index_update(z, jax.ops.index[:, 0], z_old)
# Compute importance weights : w = p(z) p(y|z,x)/q(z)
# Size of log_w : (n_latent, 1)
log_w = self.log_prior(z) + self.log_likelihood(y, x, z) - self.log_proposal(z, mu, log_sigma)
max_log_w = np.max(log_w)
shifted_w = np.exp(log_w - max_log_w)
importance_weights = shifted_w / np.sum(shifted_w)
if z_old is not None:
# Sample next conditional sample
j = self.sample_according_to_weights(importance_weights)
return z, z[:, j], importance_weights
else:
return z, None, importance_weights
def objective(self, importance_weights, z, mu, log_sigma):
return -np.sum(importance_weights * self.log_proposal(z, mu, log_sigma))
# # https://jax.readthedocs.io/en/latest/jax.experimental.optimizers.html
def step(self, step, mu, log_sigma, importance_weights, z, opt_state):
# Differentiate wrt 3rd and 4th parameter
gradient = self.derivative_of_objective()(importance_weights, z, mu, log_sigma)
opt_state = self.opt_update(step, gradient, opt_state)
return opt_state, self.get_params(opt_state)
def derivative_of_objective(self):
return jax.jit(jax.grad(self.objective, (2, 3)))
def init_conditional_sample(self, cis=True, random_init=False):
if not cis:
return None
if random_init:
return self.sample_from_normal(shape=self.n_latent)
return 0.1 * self.sample_from_normal(shape=self.n_latent)
def approximate(self, train_x, train_y, n_samples=10, n_iterations=1000, log_frequency=500, cis=False,
random_init=True):
conditional_sample = self.init_conditional_sample(cis, random_init)
mu, log_sigma = self.init_params(random_init)
opt_state = self.opt_init((mu, log_sigma))
mu_ = []
log_sigma_ = []
for k in range(n_iterations):
z, conditional_sample, importance_weights = self.cis(mu, log_sigma, conditional_sample, n_samples, train_x,
train_y)
# Compute derivative wrt mu and log_sigma
# opt_state, value = self.step(k, opt_state, opt_update, importance_weights, z)
opt_state, (mu, log_sigma) = self.step(k, mu, log_sigma, importance_weights, z, opt_state)
mu_.append(mu)
log_sigma_.append(log_sigma)
if k % log_frequency == 0:
value = self.objective(importance_weights, z, mu, log_sigma)
print(f"Iteration: {k}, Objective Value : {value}")
return mu, log_sigma, mu_, log_sigma_
def train_test_split(features_data, target_data, test_percentage=0.1):
n_examples = features_data.shape[0]
n_test = int(n_examples * test_percentage)
permuted_indices = onp.random.permutation(n_examples)
test_indices = permuted_indices[:n_test]
train_indices = permuted_indices[n_test:]
train_x = features_data[train_indices]
train_y = target_data[train_indices]
test_x = features_data[test_indices]
test_y = target_data[test_indices]
return (train_x, train_y), (test_x, test_y)
# https://rpubs.com/cakapourani/variational-bayes-bpr
def evaluate(x, y, mu, variance):
predictive_prob = norm.cdf(np.dot(x, mu) / np.sqrt(1 + np.sum(np.dot(x, variance) * x, axis=1)))
prediction = (predictive_prob > 0.5).astype('float').reshape(-1, 1)
test_error = 1 - np.sum(prediction == y) / len(y)
return test_error
def main(args):
augment_bias = args.augment_bias.lower() == "true"
# Load data
raw_data = onp.genfromtxt(args.file_path, missing_values='?', delimiter=',')
raw_data = raw_data[~onp.isnan(raw_data).any(axis=1)]
features_data = raw_data[:, :-1]
if augment_bias:
features_data = onp.insert(features_data, 0, 1, axis=1)
target_data = raw_data[:, -1]
target_data = (target_data > 0).astype('float').reshape(-1, 1)
n_latent = features_data.shape[1]
cis = args.cis.lower() == "true"
random_init = args.random_init.lower() == "true"
print(f"Arguments: {args}, CIS: {cis}, Random Init: {random_init}")
for i in range(args.n_experiments):
# Train and test split
(train_x, train_y), (test_x, test_y) = train_test_split(features_data, target_data)
msc = MSC(seed=args.seed, n_latent=n_latent)
mu, log_sigma, mu_history, log_sigma_history = msc.approximate(train_x, train_y, n_samples=args.n_samples,
n_iterations=args.n_iterations, cis=cis,
random_init=random_init)
mu_opt = np.mean(np.array(mu_history[-150:]), axis=0)
var_opt = np.diag(np.mean(np.exp(2 * np.array(log_sigma_history[-150:])), axis=0))
test_error = evaluate(test_x, test_y, mu_opt, var_opt)
print(f"Test error: {test_error}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--file_path', type=str, help='Path of data file')
parser.add_argument('--n_samples', type=int, help='Number of samples to sample from proposal', default=10)
parser.add_argument('--n_iterations', type=int, help='Number of gradient steps to run', default=10000)
parser.add_argument('--n_experiments', type=int, help='Number of times to run the experiment', default=10)
parser.add_argument('--seed', type=int, help='Seed RNG', default=42)
parser.add_argument('--cis', type=str, help='Whether to run conditional IS or IS', default="true")
parser.add_argument('--random_init', type=str,
help='Whether to run with random initialization or initialization in paper', default="true")
parser.add_argument('--augment_bias', type=str, help='Append extra feature to include the bias term',
default="true")
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"numpy.isnan",
"jax.random.PRNGKey",
"jax.experimental.optimizers.adam",
"jax.random.uniform",
"jax.random.normal",
"jax.numpy.cumsum",
"numpy.genfromtxt",
"numpy.insert",
"jax.scipy.stats.norm.logpdf",
"jax.numpy.sum",
"numpy.random.permutation",
"jax.numpy.array"... | [((5519, 5553), 'numpy.random.permutation', 'onp.random.permutation', (['n_examples'], {}), '(n_examples)\n', (5541, 5553), True, 'import numpy as onp\n'), ((6304, 6369), 'numpy.genfromtxt', 'onp.genfromtxt', (['args.file_path'], {'missing_values': '"""?"""', 'delimiter': '""","""'}), "(args.file_path, missing_values='?', delimiter=',')\n", (6318, 6369), True, 'import numpy as onp\n'), ((7695, 7756), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (7718, 7756), False, 'import argparse\n'), ((364, 384), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (378, 384), False, 'from jax import random, jit\n'), ((506, 526), 'jax.experimental.optimizers.adam', 'adam', (['self.step_size'], {}), '(self.step_size)\n', (510, 526), False, 'from jax.experimental.optimizers import adam\n'), ((679, 701), 'jax.random.split', 'random.split', (['self.key'], {}), '(self.key)\n', (691, 701), False, 'from jax import random, jit\n'), ((717, 751), 'jax.random.normal', 'random.normal', (['subkey'], {'shape': 'shape'}), '(subkey, shape=shape)\n', (730, 751), False, 'from jax import random, jit\n'), ((1482, 1504), 'jax.random.split', 'random.split', (['self.key'], {}), '(self.key)\n', (1494, 1504), False, 'from jax import random, jit\n'), ((1517, 1539), 'jax.random.uniform', 'random.uniform', (['subkey'], {}), '(subkey)\n', (1531, 1539), False, 'from jax import random, jit\n'), ((1555, 1573), 'jax.numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (1564, 1573), True, 'import jax.numpy as np\n'), ((1589, 1609), 'jax.numpy.digitize', 'np.digitize', (['x', 'bins'], {}), '(x, bins)\n', (1600, 1609), True, 'import jax.numpy as np\n'), ((1836, 1853), 'jax.numpy.exp', 'np.exp', (['log_sigma'], {}), '(log_sigma)\n', (1842, 1853), True, 'import jax.numpy as np\n'), ((2894, 2907), 'jax.numpy.max', 'np.max', (['log_w'], {}), '(log_w)\n', (2900, 2907), True, 'import jax.numpy as np\n'), ((2928, 2953), 'jax.numpy.exp', 'np.exp', (['(log_w - max_log_w)'], {}), '(log_w - max_log_w)\n', (2934, 2953), True, 'import jax.numpy as np\n'), ((6510, 6549), 'numpy.insert', 'onp.insert', (['features_data', '(0)', '(1)'], {'axis': '(1)'}), '(features_data, 0, 1, axis=1)\n', (6520, 6549), True, 'import numpy as onp\n'), ((1714, 1728), 'jax.scipy.stats.norm.logpdf', 'norm.logpdf', (['z'], {}), '(z)\n', (1725, 1728), False, 'from jax.scipy.stats import norm\n'), ((2616, 2667), 'jax.ops.index_update', 'jax.ops.index_update', (['z', 'jax.ops.index[:, 0]', 'z_old'], {}), '(z, jax.ops.index[:, 0], z_old)\n', (2636, 2667), False, 'import jax\n'), ((2995, 3012), 'jax.numpy.sum', 'np.sum', (['shifted_w'], {}), '(shifted_w)\n', (3001, 3012), True, 'import jax.numpy as np\n'), ((3885, 3917), 'jax.grad', 'jax.grad', (['self.objective', '(2, 3)'], {}), '(self.objective, (2, 3))\n', (3893, 3917), False, 'import jax\n'), ((5981, 5994), 'jax.numpy.dot', 'np.dot', (['x', 'mu'], {}), '(x, mu)\n', (5987, 5994), True, 'import jax.numpy as np\n'), ((6144, 6167), 'jax.numpy.sum', 'np.sum', (['(prediction == y)'], {}), '(prediction == y)\n', (6150, 6167), True, 'import jax.numpy as np\n'), ((7419, 7446), 'jax.numpy.array', 'np.array', (['mu_history[-150:]'], {}), '(mu_history[-150:])\n', (7427, 7446), True, 'import jax.numpy as np\n'), ((6395, 6414), 'numpy.isnan', 'onp.isnan', (['raw_data'], {}), '(raw_data)\n', (6404, 6414), True, 'import numpy as onp\n'), ((1313, 1330), 'jax.numpy.exp', 'np.exp', (['log_sigma'], {}), '(log_sigma)\n', (1319, 1330), True, 'import jax.numpy as np\n'), ((2190, 2202), 'jax.numpy.dot', 'np.dot', (['x', 'z'], {}), '(x, z)\n', (2196, 2202), True, 'import jax.numpy as np\n'), ((2228, 2240), 'jax.numpy.dot', 'np.dot', (['x', 'z'], {}), '(x, z)\n', (2234, 2240), True, 'import jax.numpy as np\n'), ((7501, 7535), 'jax.numpy.array', 'np.array', (['log_sigma_history[-150:]'], {}), '(log_sigma_history[-150:])\n', (7509, 7535), True, 'import jax.numpy as np\n'), ((6016, 6035), 'jax.numpy.dot', 'np.dot', (['x', 'variance'], {}), '(x, variance)\n', (6022, 6035), True, 'import jax.numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 13:30:36 2018
@author: engelen
"""
from glob import glob
import numpy as np
import os, sys
import netCDF4 as nc4
from datetime import datetime
import re
#%%
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def max_date(folder):
files = glob(os.path.join(folder, "head_*_l1_p001.idf"))
dates = [int(re.search(r"\d{14}", i).group(0)) for i in files]
return(max(dates))
def wildcard_to_nr(folder, folder_glob):
return(int(folder.split(sep = folder_glob.split(sep = "*")[0])[1]))
#%%
##Example arguments
#folder_glob = r"/home/path/results_*"
#inittxt = r"/home/path/init_times_test.txt"
#cont_nr = 1
folder_glob = sys.argv[1]
inittxt = sys.argv[2]
if len(sys.argv)>3:
cont_nr = int(sys.argv[3])
else:
cont_nr = None
folders = [f for f in glob(folder_glob) if os.path.isdir(f)]
folders.sort(key=natural_sort_key)
folders_cont = []
if cont_nr is not None:
init_times = list(np.loadtxt(inittxt, ndmin=1))[:cont_nr]
for fol in folders:
nr = wildcard_to_nr(fol, folder_glob)
if nr >= cont_nr:
folders_cont.append(fol)
else:
init_times = [0.]
folders_cont = folders
for i, f in enumerate(folders_cont):
try:
dt = datetime.strptime(str(max_date(f)), '%Y%m%d%H%M%S')
units = "hours since 2000-01-01 00:00:00.0"
date = nc4.date2num(dt, units = units, calendar = "gregorian")
init_time = init_times[-1] + date
init_times.append(init_time)
except ValueError:
pass
np.savetxt(inittxt, np.array(init_times)) | [
"re.split",
"os.path.isdir",
"netCDF4.date2num",
"numpy.array",
"numpy.loadtxt",
"re.search",
"glob.glob",
"os.path.join",
"re.compile"
] | [((242, 264), 're.compile', 're.compile', (['"""([0-9]+)"""'], {}), "('([0-9]+)')\n", (252, 264), False, 'import re\n'), ((1668, 1688), 'numpy.array', 'np.array', (['init_times'], {}), '(init_times)\n', (1676, 1688), True, 'import numpy as np\n'), ((410, 452), 'os.path.join', 'os.path.join', (['folder', '"""head_*_l1_p001.idf"""'], {}), "(folder, 'head_*_l1_p001.idf')\n", (422, 452), False, 'import os, sys\n'), ((930, 947), 'glob.glob', 'glob', (['folder_glob'], {}), '(folder_glob)\n', (934, 947), False, 'from glob import glob\n'), ((951, 967), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (964, 967), False, 'import os, sys\n'), ((1476, 1527), 'netCDF4.date2num', 'nc4.date2num', (['dt'], {'units': 'units', 'calendar': '"""gregorian"""'}), "(dt, units=units, calendar='gregorian')\n", (1488, 1527), True, 'import netCDF4 as nc4\n'), ((349, 367), 're.split', 're.split', (['_nsre', 's'], {}), '(_nsre, s)\n', (357, 367), False, 'import re\n'), ((1070, 1098), 'numpy.loadtxt', 'np.loadtxt', (['inittxt'], {'ndmin': '(1)'}), '(inittxt, ndmin=1)\n', (1080, 1098), True, 'import numpy as np\n'), ((471, 494), 're.search', 're.search', (['"""\\\\d{14}"""', 'i'], {}), "('\\\\d{14}', i)\n", (480, 494), False, 'import re\n')] |
import copy
import intprim
import matplotlib.pyplot as plt
import matplotlib.animation
import numpy as np
import numpy.random
import sklearn.metrics
try:
import IPython.display
except:
pass
animation_plots = []
def create_2d_handwriting_data(num_trajectories, translation_mean, translation_std, noise_std, length_mean, length_std):
# A single example of a handwriting trajectory
xdata = np.array([
2.52147861, 2.68261873, 2.84009521, 2.99269205, 3.13926385,
3.27876056, 3.41025573, 3.5329778 , 3.64634321, 3.74998937,
3.8438048 , 3.92795314, 4.00288777, 4.0693539 , 4.12837543,
4.18122498, 4.22937664, 4.27444203, 4.31809201, 4.36196737,
4.40758299, 4.4562309 , 4.50888808, 4.56613502, 4.62809093,
4.69437067, 4.76406782, 4.83576665, 4.90758435, 4.97724312,
5.04216954, 5.099617 , 5.14680484, 5.18106677, 5.1999997 ,
5.20160394, 5.18440564, 5.14755368, 5.09088427, 5.01494897,
4.92100438, 4.8109641 , 4.68731662, 4.55301474, 4.41134412,
4.26577973, 4.11983926, 3.97694226, 3.84028296, 3.71272292,
3.59670796, 3.4942117 , 3.4067061 , 3.33515726, 3.28004369,
3.24139282, 3.21883106, 3.21164261, 3.21883242, 3.23918946,
3.27134723, 3.31383944, 3.36515007, 3.42375745, 3.48817336,
3.55697803, 3.62885243, 3.70260907, 3.77722187, 3.85185522,
3.92589153, 3.99895578, 4.07093474, 4.14198835, 4.21255021,
4.2833145 , 4.35520693, 4.42933819, 4.50693958, 4.5892814 ,
4.67757669, 4.7728736 , 4.87594169, 4.98715824, 5.10640159,
5.23295916, 5.36545793, 5.50182437, 5.63928031, 5.7743792 ,
5.90308534, 6.02089593, 6.12300271, 6.20448725, 6.26054043,
6.28669463, 6.27905489, 6.23451447, 6.15094057, 6.02731681
])
ydata = np.array([
2.60877965, 2.76485925, 2.91587601, 3.06074461, 3.19850088,
3.32832259, 3.44955237, 3.56172269, 3.66458245, 3.75812375,
3.84260667, 3.9185795 , 3.98689125, 4.04869382, 4.10543106,
4.1588132 , 4.21077576, 4.26342334, 4.31895999, 4.37960871,
4.44752397, 4.52470161, 4.61289081, 4.71351323, 4.82759375,
4.95570667, 5.09794052, 5.25388323, 5.42262803, 5.60279957,
5.79259769, 5.98985598, 6.19211079, 6.39667626, 6.60072087,
6.80134129, 6.99563046, 7.18073763, 7.35391969, 7.51258424,
7.6543261 , 7.77695956, 7.87854902, 7.95744025, 8.0122939 ,
8.0421214 , 8.0463223 , 8.0247204 , 7.97759496, 7.90570262,
7.81028529, 7.69306011, 7.55618819, 7.40222104, 7.23402506,
7.05468668, 6.86740265, 6.67536129, 6.48162182, 6.28899902,
6.09996034, 5.916542 , 5.74028898, 5.57222266, 5.41283782,
5.26212897, 5.11964415, 4.98456294, 4.85579367, 4.73208409,
4.61213865, 4.49473531, 4.37883468, 4.26367447, 4.14884334,
4.0343288 , 3.9205359 , 3.80827461, 3.69871613, 3.59332021,
3.49373739, 3.40169213, 3.31885379, 3.24670384, 3.18640788,
3.13870115, 3.10379544, 3.08131435, 3.07026211, 3.06902906,
3.07543489, 3.08680804, 3.10009753, 3.11201102, 3.11917145,
3.1182827 , 3.10629444, 3.08055594, 3.03894936, 2.97999426
])
new_data = []
basis_model = intprim.basis.GaussianModel(8, 0.1, ["X", "Y"])
# From this single example, create noisy demonstrations.
# Approximate the original data with a basis model so that we can sub/super sample it to create
# trajectories of different lengths while maintaining the same shape.
# Add 30 demonstrations which are generated from the writing sample
for demo in range(num_trajectories):
# Randomly generate a new length
demonstration_length = int(np.round(np.random.normal(length_mean, length_std)))
# Fit the single demonstration to the pre-defined basis model
domain = np.linspace(0, 1, xdata.shape[0], dtype = intprim.constants.DTYPE)
weights = basis_model.fit_basis_functions_linear_closed_form(domain, np.array([xdata, ydata]).T).T
# Resample a new trajectory from the basis model with the desired length
new_interaction = np.zeros((2, demonstration_length))
domain = np.linspace(0, 1, demonstration_length, dtype = intprim.constants.DTYPE)
for idx in range(demonstration_length):
new_interaction[:, idx] = basis_model.apply_coefficients(domain[idx], weights)
# Apply a random translation
new_interaction = (new_interaction.T + np.random.normal(translation_mean, translation_std)).T
new_interaction = np.random.normal(new_interaction, noise_std)
new_data.append(new_interaction)
return new_data
def train_model(primitive, training_trajectories):
for trajectory in training_trajectories:
primitive.compute_standardization(trajectory)
for trajectory in training_trajectories:
primitive.add_demonstration(trajectory)
return primitive
def get_phase_stats(training_trajectories):
phase_velocities = []
for trajectory in training_trajectories:
phase_velocities.append(1.0 / trajectory.shape[1])
return np.mean(phase_velocities), np.var(phase_velocities)
def get_observation_noise(basis_selector, basis_model, training_trajectories, bias):
for trajectory in training_trajectories:
basis_selector.add_demonstration(trajectory)
error = basis_selector.get_model_mse(basis_model, np.array(range(training_trajectories[0].shape[0])), 0.0, 1.0)
observation_noise = np.diag(error) * bias
observation_noise[0, 0] = 10000
return observation_noise
def animate_results(generated_data, observed_data, mean_data):
fig = plt.figure()
ax = plt.axes(xlim=(-5, 15), ylim=(-5, 15))
# plot_lines = [plt.plot([], [])[0] for _ in range(3)]
plot_lines = [
plt.plot([], [], "--", color = "#ff6a6a", label = "Generated", linewidth = 2.0)[0],
plt.plot([], [], color = "#6ba3ff", label = "Observed", linewidth = 2.0)[0],
plt.plot([], [], color = "#85d87f", label = "Mean")[0]
]
fig.suptitle('Probable trajectory')
def init():
plot_lines[0].set_data([], [])
plot_lines[1].set_data([], [])
plot_lines[2].set_data(mean_data[0], mean_data[1])
return plot_lines
def animate(i):
plot_lines[0].set_data(generated_data[i][0], generated_data[i][1])
plot_lines[1].set_data(observed_data[i][0], observed_data[i][1])
return plot_lines
anim = matplotlib.animation.FuncAnimation(fig, animate, init_func = init,
frames = len(generated_data), interval = 500, blit = True)
animation_plots.append(anim)
plt.legend(loc = "upper left")
plt.show()
def evaluate_trajectories(primitive, filter, test_trajectories, observation_noise, delay_prob = 0.0, delay_ratio = 0.0):
for test_trajectory in test_trajectories:
test_trajectory_partial = np.array(test_trajectory, copy = True)
test_trajectory_partial[0, :] = 0.0
new_filter = copy.deepcopy(filter)
primitive.set_filter(new_filter)
# all_gen_trajectories = []
# all_test_trajectories = []
mean_trajectory = primitive.get_mean_trajectory()
mean_mse = 0.0
phase_mae = 0.0
mse_count = 0
prev_observed_index = 0
for observed_index in range(8, test_trajectory.shape[1], 8):
gen_trajectory, phase, mean, var = primitive.generate_probable_trajectory_recursive(test_trajectory_partial[:, prev_observed_index:observed_index], observation_noise, np.array([1]), num_samples = test_trajectory_partial.shape[1] - observed_index)
mse = sklearn.metrics.mean_squared_error(test_trajectory[:, observed_index:], gen_trajectory)
mean_mse += mse
mse_count += 1
phase_mae += np.abs((float(observed_index) / test_trajectory.shape[1]) - phase)
if(delay_prob > 0.0 and np.random.binomial(1, delay_prob) == 1):
length = int(delay_ratio * test_trajectory.shape[1])
# Repeat the last observation for delay_ratio times.
delay_trajectory = np.tile(test_trajectory[:, observed_index - 1], (length, 1)).T
gen_trajectory, phase, mean, var = primitive.generate_probable_trajectory_recursive(delay_trajectory, observation_noise, np.array([1]), num_samples = test_trajectory_partial.shape[1] - observed_index)
mse = sklearn.metrics.mean_squared_error(test_trajectory[:, observed_index:], gen_trajectory)
mean_mse += mse
mse_count += 1
phase_mae += np.abs((float(observed_index) / test_trajectory.shape[1]) - phase)
# Plot the phase/phase velocity PDF for each time step? Want to show it for temporal non-linearity.
intprim.util.visualization.plot_partial_trajectory(gen_trajectory, test_trajectory[:, :observed_index], mean_trajectory)
# all_gen_trajectories.append(gen_trajectory)
# all_test_trajectories.append(test_trajectory[:, :observed_index])
prev_observed_index = observed_index
print("Mean DoF MSE: " + str(mean_mse / mse_count) + ". Phase MAE: " + str(phase_mae / mse_count))
# animate_results(all_gen_trajectories, all_test_trajectories, mean_trajectory)
| [
"copy.deepcopy",
"matplotlib.pyplot.show",
"numpy.random.binomial",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"intprim.basis.GaussianModel",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"numpy.... | [((406, 1676), 'numpy.array', 'np.array', (['[2.52147861, 2.68261873, 2.84009521, 2.99269205, 3.13926385, 3.27876056, \n 3.41025573, 3.5329778, 3.64634321, 3.74998937, 3.8438048, 3.92795314, \n 4.00288777, 4.0693539, 4.12837543, 4.18122498, 4.22937664, 4.27444203, \n 4.31809201, 4.36196737, 4.40758299, 4.4562309, 4.50888808, 4.56613502, \n 4.62809093, 4.69437067, 4.76406782, 4.83576665, 4.90758435, 4.97724312,\n 5.04216954, 5.099617, 5.14680484, 5.18106677, 5.1999997, 5.20160394, \n 5.18440564, 5.14755368, 5.09088427, 5.01494897, 4.92100438, 4.8109641, \n 4.68731662, 4.55301474, 4.41134412, 4.26577973, 4.11983926, 3.97694226,\n 3.84028296, 3.71272292, 3.59670796, 3.4942117, 3.4067061, 3.33515726, \n 3.28004369, 3.24139282, 3.21883106, 3.21164261, 3.21883242, 3.23918946,\n 3.27134723, 3.31383944, 3.36515007, 3.42375745, 3.48817336, 3.55697803,\n 3.62885243, 3.70260907, 3.77722187, 3.85185522, 3.92589153, 3.99895578,\n 4.07093474, 4.14198835, 4.21255021, 4.2833145, 4.35520693, 4.42933819, \n 4.50693958, 4.5892814, 4.67757669, 4.7728736, 4.87594169, 4.98715824, \n 5.10640159, 5.23295916, 5.36545793, 5.50182437, 5.63928031, 5.7743792, \n 5.90308534, 6.02089593, 6.12300271, 6.20448725, 6.26054043, 6.28669463,\n 6.27905489, 6.23451447, 6.15094057, 6.02731681]'], {}), '([2.52147861, 2.68261873, 2.84009521, 2.99269205, 3.13926385, \n 3.27876056, 3.41025573, 3.5329778, 3.64634321, 3.74998937, 3.8438048, \n 3.92795314, 4.00288777, 4.0693539, 4.12837543, 4.18122498, 4.22937664, \n 4.27444203, 4.31809201, 4.36196737, 4.40758299, 4.4562309, 4.50888808, \n 4.56613502, 4.62809093, 4.69437067, 4.76406782, 4.83576665, 4.90758435,\n 4.97724312, 5.04216954, 5.099617, 5.14680484, 5.18106677, 5.1999997, \n 5.20160394, 5.18440564, 5.14755368, 5.09088427, 5.01494897, 4.92100438,\n 4.8109641, 4.68731662, 4.55301474, 4.41134412, 4.26577973, 4.11983926, \n 3.97694226, 3.84028296, 3.71272292, 3.59670796, 3.4942117, 3.4067061, \n 3.33515726, 3.28004369, 3.24139282, 3.21883106, 3.21164261, 3.21883242,\n 3.23918946, 3.27134723, 3.31383944, 3.36515007, 3.42375745, 3.48817336,\n 3.55697803, 3.62885243, 3.70260907, 3.77722187, 3.85185522, 3.92589153,\n 3.99895578, 4.07093474, 4.14198835, 4.21255021, 4.2833145, 4.35520693, \n 4.42933819, 4.50693958, 4.5892814, 4.67757669, 4.7728736, 4.87594169, \n 4.98715824, 5.10640159, 5.23295916, 5.36545793, 5.50182437, 5.63928031,\n 5.7743792, 5.90308534, 6.02089593, 6.12300271, 6.20448725, 6.26054043, \n 6.28669463, 6.27905489, 6.23451447, 6.15094057, 6.02731681])\n', (414, 1676), True, 'import numpy as np\n'), ((1875, 3144), 'numpy.array', 'np.array', (['[2.60877965, 2.76485925, 2.91587601, 3.06074461, 3.19850088, 3.32832259, \n 3.44955237, 3.56172269, 3.66458245, 3.75812375, 3.84260667, 3.9185795, \n 3.98689125, 4.04869382, 4.10543106, 4.1588132, 4.21077576, 4.26342334, \n 4.31895999, 4.37960871, 4.44752397, 4.52470161, 4.61289081, 4.71351323,\n 4.82759375, 4.95570667, 5.09794052, 5.25388323, 5.42262803, 5.60279957,\n 5.79259769, 5.98985598, 6.19211079, 6.39667626, 6.60072087, 6.80134129,\n 6.99563046, 7.18073763, 7.35391969, 7.51258424, 7.6543261, 7.77695956, \n 7.87854902, 7.95744025, 8.0122939, 8.0421214, 8.0463223, 8.0247204, \n 7.97759496, 7.90570262, 7.81028529, 7.69306011, 7.55618819, 7.40222104,\n 7.23402506, 7.05468668, 6.86740265, 6.67536129, 6.48162182, 6.28899902,\n 6.09996034, 5.916542, 5.74028898, 5.57222266, 5.41283782, 5.26212897, \n 5.11964415, 4.98456294, 4.85579367, 4.73208409, 4.61213865, 4.49473531,\n 4.37883468, 4.26367447, 4.14884334, 4.0343288, 3.9205359, 3.80827461, \n 3.69871613, 3.59332021, 3.49373739, 3.40169213, 3.31885379, 3.24670384,\n 3.18640788, 3.13870115, 3.10379544, 3.08131435, 3.07026211, 3.06902906,\n 3.07543489, 3.08680804, 3.10009753, 3.11201102, 3.11917145, 3.1182827, \n 3.10629444, 3.08055594, 3.03894936, 2.97999426]'], {}), '([2.60877965, 2.76485925, 2.91587601, 3.06074461, 3.19850088, \n 3.32832259, 3.44955237, 3.56172269, 3.66458245, 3.75812375, 3.84260667,\n 3.9185795, 3.98689125, 4.04869382, 4.10543106, 4.1588132, 4.21077576, \n 4.26342334, 4.31895999, 4.37960871, 4.44752397, 4.52470161, 4.61289081,\n 4.71351323, 4.82759375, 4.95570667, 5.09794052, 5.25388323, 5.42262803,\n 5.60279957, 5.79259769, 5.98985598, 6.19211079, 6.39667626, 6.60072087,\n 6.80134129, 6.99563046, 7.18073763, 7.35391969, 7.51258424, 7.6543261, \n 7.77695956, 7.87854902, 7.95744025, 8.0122939, 8.0421214, 8.0463223, \n 8.0247204, 7.97759496, 7.90570262, 7.81028529, 7.69306011, 7.55618819, \n 7.40222104, 7.23402506, 7.05468668, 6.86740265, 6.67536129, 6.48162182,\n 6.28899902, 6.09996034, 5.916542, 5.74028898, 5.57222266, 5.41283782, \n 5.26212897, 5.11964415, 4.98456294, 4.85579367, 4.73208409, 4.61213865,\n 4.49473531, 4.37883468, 4.26367447, 4.14884334, 4.0343288, 3.9205359, \n 3.80827461, 3.69871613, 3.59332021, 3.49373739, 3.40169213, 3.31885379,\n 3.24670384, 3.18640788, 3.13870115, 3.10379544, 3.08131435, 3.07026211,\n 3.06902906, 3.07543489, 3.08680804, 3.10009753, 3.11201102, 3.11917145,\n 3.1182827, 3.10629444, 3.08055594, 3.03894936, 2.97999426])\n', (1883, 3144), True, 'import numpy as np\n'), ((3370, 3417), 'intprim.basis.GaussianModel', 'intprim.basis.GaussianModel', (['(8)', '(0.1)', "['X', 'Y']"], {}), "(8, 0.1, ['X', 'Y'])\n", (3397, 3417), False, 'import intprim\n'), ((5801, 5813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5811, 5813), True, 'import matplotlib.pyplot as plt\n'), ((5824, 5862), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(-5, 15)', 'ylim': '(-5, 15)'}), '(xlim=(-5, 15), ylim=(-5, 15))\n', (5832, 5862), True, 'import matplotlib.pyplot as plt\n'), ((6818, 6846), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6828, 6846), True, 'import matplotlib.pyplot as plt\n'), ((6853, 6863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6861, 6863), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4049), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'xdata.shape[0]'], {'dtype': 'intprim.constants.DTYPE'}), '(0, 1, xdata.shape[0], dtype=intprim.constants.DTYPE)\n', (3996, 4049), True, 'import numpy as np\n'), ((4267, 4302), 'numpy.zeros', 'np.zeros', (['(2, demonstration_length)'], {}), '((2, demonstration_length))\n', (4275, 4302), True, 'import numpy as np\n'), ((4320, 4390), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'demonstration_length'], {'dtype': 'intprim.constants.DTYPE'}), '(0, 1, demonstration_length, dtype=intprim.constants.DTYPE)\n', (4331, 4390), True, 'import numpy as np\n'), ((4698, 4742), 'numpy.random.normal', 'np.random.normal', (['new_interaction', 'noise_std'], {}), '(new_interaction, noise_std)\n', (4714, 4742), True, 'import numpy as np\n'), ((5261, 5286), 'numpy.mean', 'np.mean', (['phase_velocities'], {}), '(phase_velocities)\n', (5268, 5286), True, 'import numpy as np\n'), ((5288, 5312), 'numpy.var', 'np.var', (['phase_velocities'], {}), '(phase_velocities)\n', (5294, 5312), True, 'import numpy as np\n'), ((5639, 5653), 'numpy.diag', 'np.diag', (['error'], {}), '(error)\n', (5646, 5653), True, 'import numpy as np\n'), ((7066, 7102), 'numpy.array', 'np.array', (['test_trajectory'], {'copy': '(True)'}), '(test_trajectory, copy=True)\n', (7074, 7102), True, 'import numpy as np\n'), ((7171, 7192), 'copy.deepcopy', 'copy.deepcopy', (['filter'], {}), '(filter)\n', (7184, 7192), False, 'import copy\n'), ((5951, 6024), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""--"""'], {'color': '"""#ff6a6a"""', 'label': '"""Generated"""', 'linewidth': '(2.0)'}), "([], [], '--', color='#ff6a6a', label='Generated', linewidth=2.0)\n", (5959, 6024), True, 'import matplotlib.pyplot as plt\n'), ((6043, 6109), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'color': '"""#6ba3ff"""', 'label': '"""Observed"""', 'linewidth': '(2.0)'}), "([], [], color='#6ba3ff', label='Observed', linewidth=2.0)\n", (6051, 6109), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6175), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'color': '"""#85d87f"""', 'label': '"""Mean"""'}), "([], [], color='#85d87f', label='Mean')\n", (6136, 6175), True, 'import matplotlib.pyplot as plt\n'), ((8983, 9107), 'intprim.util.visualization.plot_partial_trajectory', 'intprim.util.visualization.plot_partial_trajectory', (['gen_trajectory', 'test_trajectory[:, :observed_index]', 'mean_trajectory'], {}), '(gen_trajectory,\n test_trajectory[:, :observed_index], mean_trajectory)\n', (9033, 9107), False, 'import intprim\n'), ((3853, 3894), 'numpy.random.normal', 'np.random.normal', (['length_mean', 'length_std'], {}), '(length_mean, length_std)\n', (3869, 3894), True, 'import numpy as np\n'), ((4617, 4668), 'numpy.random.normal', 'np.random.normal', (['translation_mean', 'translation_std'], {}), '(translation_mean, translation_std)\n', (4633, 4668), True, 'import numpy as np\n'), ((7717, 7730), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (7725, 7730), True, 'import numpy as np\n'), ((4129, 4153), 'numpy.array', 'np.array', (['[xdata, ydata]'], {}), '([xdata, ydata])\n', (4137, 4153), True, 'import numpy as np\n'), ((8089, 8122), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'delay_prob'], {}), '(1, delay_prob)\n', (8107, 8122), True, 'import numpy as np\n'), ((8303, 8363), 'numpy.tile', 'np.tile', (['test_trajectory[:, observed_index - 1]', '(length, 1)'], {}), '(test_trajectory[:, observed_index - 1], (length, 1))\n', (8310, 8363), True, 'import numpy as np\n'), ((8504, 8517), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8512, 8517), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import argparse
import os
import subprocess
import tempfile
import itertools
import numpy as np
from CMash import MinHash as MH
from scipy.sparse import csc_matrix, save_npz
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Creates a y-vector (i.e. sample vector) when presented with a fasta or fastq input WGS metagenome.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', '--k_size', type=int,
help="k-mer size to use")
parser.add_argument('-m', '--max_ram', type=int,
help="max amount of RAM in GB", default=12)
parser.add_argument('--ci', type=int,
help="minimum count of appearances for a k-mer to be included", default=0)
parser.add_argument('--cs', type=int,
help="maximum count of appearances recorded for a k-mer", default=256)
parser.add_argument('-c', '--count_complements', action="store_true",
help="count compliment of sequences as well", default=False)
parser.add_argument('-i', '--input_file', type=str, help="File name of input data")
parser.add_argument('-t', '--training_prefix', type=str,
help="File path to training files (only prefix)", required=True)
parser.add_argument('-o', '--output_file', type=str,
help="Output file of the y-vector in .mat format.",
required=True)
## Read in the arguments
args = parser.parse_args()
k_size = args.k_size
max_ram = args.max_ram
ci = args.ci
cs = args.cs
count_rev = args.count_complements
input_file_name = args.input_file
output_file_name = args.output_file
training_prefix = args.training_prefix
## Existence checks (input, kmc, kmc_tools, kmc_dump)
# check if the input exists
if not os.path.exists(input_file_name):
raise Exception(f"The input file {input_file_name} does not appear to exist")
# check if kmc is installed
res = subprocess.run("kmc", shell=True, stdout=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(
"It appears that kmc is not installed. Please consult the README, install kmc, and try again.")
# check if kmc_tools is installed
res = subprocess.run("kmc_tools", shell=True, stdout=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(
"It appears that kmc_tools is not installed. Please consult the README, install kmc_tools, and try again.")
# check if kmc_dump is installed
res = subprocess.run("kmc_dump", shell=True, stdout=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(
"It appears that kmc_dump is not installed. Please consult the README, install kmc_dump, and try again.")
## Run KMC and intersect
with tempfile.TemporaryDirectory() as temp_dir:
with tempfile.NamedTemporaryFile() as kmc_output:
# Count k-mers (note: the output is named f"{kmc_output.name}.pre" and f"{kmc_output.name}.suf")
to_run = f"kmc -k{k_size} {~count_rev * '-b '}-ci{ci} -cs{cs} -fm -m{max_ram} {input_file_name} {kmc_output.name} {temp_dir}"
res = subprocess.run(to_run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(
"An unexpected error was encountered while running kmc, please check the input FASTA file is in the "
"correct format. If errors persist, contact the developers.")
with tempfile.NamedTemporaryFile() as intersect_file:
# Intersect with training kmers, keeping counts of sample kmers
to_run = f"kmc_tools simple {training_prefix} {kmc_output.name} intersect {intersect_file.name} -ocright"
res = subprocess.run(to_run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception("An unexpected error was encountered while running kmc_tools simple.")
# Dump the intersected counted k-mers for reading into matrix
to_run = f"kmc_dump -ci0 -cs100000 {intersect_file.name} {intersect_file.name}"
res = subprocess.run(to_run, shell=True, stdout=subprocess.PIPE)
if res.returncode != 0:
raise Exception("An unexpected error was encountered while running kmc_tools.")
## Load in list of k-mers
database = MH.import_multiple_from_single_hdf5(training_prefix + ".h5")
kmers = sorted(set(itertools.chain.from_iterable(genome._kmers for genome in database)))
## Iterate through KMC's dump file to extract k-mers and their counts while determining
# their corresponding index
indices = []
data = []
for line in intersect_file:
info = line.split()
try:
indices.append(kmers.index(info[0].decode("utf-8")))
data.append(int(info[1]))
except ValueError:
print("k-mer mismatch") ## TODO: Identify why there are so many mismatches
## Sort the indices and data
sorter = sorted(range(len(indices)), key=indices.__getitem__)
indices = np.array([indices[i] for i in sorter])
data = np.array([data[i] for i in sorter])
data = data / np.sum(data)
indptr = np.array([0, len(indices)])
## Create and save a csc_matrix as .npz file
save_npz(output_file_name, csc_matrix((data, indices, indptr), shape=(len(indices), 1)), compressed=True)
| [
"subprocess.run",
"tempfile.NamedTemporaryFile",
"CMash.MinHash.import_multiple_from_single_hdf5",
"tempfile.TemporaryDirectory",
"argparse.ArgumentParser",
"numpy.sum",
"os.path.exists",
"numpy.array",
"itertools.chain.from_iterable"
] | [((238, 441), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creates a y-vector (i.e. sample vector) when presented with a fasta or fastq input WGS metagenome."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Creates a y-vector (i.e. sample vector) when presented with a fasta or fastq input WGS metagenome.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (261, 441), False, 'import argparse\n'), ((2091, 2151), 'subprocess.run', 'subprocess.run', (['"""kmc"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL'}), "('kmc', shell=True, stdout=subprocess.DEVNULL)\n", (2105, 2151), False, 'import subprocess\n'), ((2362, 2428), 'subprocess.run', 'subprocess.run', (['"""kmc_tools"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL'}), "('kmc_tools', shell=True, stdout=subprocess.DEVNULL)\n", (2376, 2428), False, 'import subprocess\n'), ((2650, 2715), 'subprocess.run', 'subprocess.run', (['"""kmc_dump"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL'}), "('kmc_dump', shell=True, stdout=subprocess.DEVNULL)\n", (2664, 2715), False, 'import subprocess\n'), ((5503, 5541), 'numpy.array', 'np.array', (['[indices[i] for i in sorter]'], {}), '([indices[i] for i in sorter])\n', (5511, 5541), True, 'import numpy as np\n'), ((5553, 5588), 'numpy.array', 'np.array', (['[data[i] for i in sorter]'], {}), '([data[i] for i in sorter])\n', (5561, 5588), True, 'import numpy as np\n'), ((1929, 1960), 'os.path.exists', 'os.path.exists', (['input_file_name'], {}), '(input_file_name)\n', (1943, 1960), False, 'import os\n'), ((2926, 2955), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2953, 2955), False, 'import tempfile\n'), ((5607, 5619), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (5613, 5619), True, 'import numpy as np\n'), ((2982, 3011), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3009, 3011), False, 'import tempfile\n'), ((3293, 3383), 'subprocess.run', 'subprocess.run', (['to_run'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.DEVNULL'}), '(to_run, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.DEVNULL)\n', (3307, 3383), False, 'import subprocess\n'), ((3670, 3699), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3697, 3699), False, 'import tempfile\n'), ((3944, 4034), 'subprocess.run', 'subprocess.run', (['to_run'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.DEVNULL'}), '(to_run, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.DEVNULL)\n', (3958, 4034), False, 'import subprocess\n'), ((4374, 4432), 'subprocess.run', 'subprocess.run', (['to_run'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(to_run, shell=True, stdout=subprocess.PIPE)\n', (4388, 4432), False, 'import subprocess\n'), ((4643, 4703), 'CMash.MinHash.import_multiple_from_single_hdf5', 'MH.import_multiple_from_single_hdf5', (["(training_prefix + '.h5')"], {}), "(training_prefix + '.h5')\n", (4678, 4703), True, 'from CMash import MinHash as MH\n'), ((4739, 4806), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(genome._kmers for genome in database)'], {}), '(genome._kmers for genome in database)\n', (4768, 4806), False, 'import itertools\n')] |
'''
TODO:
- warning ketika Id yg diinputkan sudah ada ✓
- simpan gambar user per folder dengan nama folder == Id ✓
- jika menggunakan kamera dengan resolusi kamera lebih besar,
perlu pencahayaan yang baik ✗
- jika inputan kosong, berikan error handle atau sediakan default
values untuk gender dan crime_status pada variable ✗
'''
from imutils.video import VideoStream
import numpy as np
import cv2 as cv
import imutils
import os
import time
import pymysql as psql
def add():
vGender = None
vCrime_status = None
vId = input("Type your user Id: ")
vName = input("Type your user Name: ")
vGender = input("Input your Sex: ")
vCrime_status = input("Input your Criminal State: ")
# open database connection
print("[INFO] preparing connection database")
db = psql.connect("localhost", "admin", "12345", "fr")
cursor = db.cursor()
# make a directory of Ids
dirname = vId
dirCheck = os.path.exists("dataset/" + dirname)
# if(dirCheck == True):
if dirCheck:
exit()
print("directory sudah ada")
else:
os.makedirs("dataset/" + dirname)
# if not os.path.exists("dataset/" + dirname):
# print("User ID has already used")
# exit()
# else:
# os.makedirs("dataset/" + dirname)
# insert into database
sql = "INSERT INTO People (Id, Name, Gender, Crime_status)\
VALUES (" + vId + ",'" + vName + "','" + str(vGender)\
+ "','" + str(vCrime_status) + "')"
try:
# execute the sql insert command
cursor.execute(sql) # dimas
# commit your changes in the database
db.commit()
except Exception:
# rollback in case there is any error
db.rollback()
# load model from disk
print("[INFO] loading model...")
net = cv.dnn.readNetFromCaffe("assets/deploy.prototxt.txt",
"assets/res10_300x300_ssd_iter_14\
0000.caffemodel")
# initialize the video stream and warming up camera
print("[INFO] starting video stream...")
vs = VideoStream(src=0, resolution=(640, 480)).start()
time.sleep(2.0)
# loop over the frames from the video stream
sampleNum = 0
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 800 pixels
frame = vs.read()
frame = imutils.resize(frame, width=800)
# grab the frame dimensions and convert it to blob
(h, w) = frame.shape[:2]
blob = cv.dnn.blobFromImage(cv.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections
# and predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence associated with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence < 0.5:
continue
# compute the (x, y)-coordinate of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box of the face along with the
# associated probability
text = vName + " - {:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
sampleNum = sampleNum + 1
cv.imwrite("dataset/" + dirname + "/" + str(vId) + "." +
str(sampleNum) + ".jpg", frame) # dimas
cv.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
cv.putText(frame, text, (startX, y), cv.FONT_HERSHEY_SIMPLEX,
0.45, (0, 0, 255), 2)
cv.imshow('Frame', frame)
cv.waitKey(100)
if sampleNum == 50:
vs.stop()
cv.destroyAllWindows()
print("\n" + sql + "\n")
print("Your input samples are: " + str(sampleNum))
db.close
break
if __name__ == '__main__':
add()
| [
"imutils.video.VideoStream",
"cv2.putText",
"os.makedirs",
"cv2.waitKey",
"cv2.destroyAllWindows",
"os.path.exists",
"cv2.imshow",
"time.sleep",
"cv2.rectangle",
"numpy.array",
"cv2.dnn.readNetFromCaffe",
"imutils.resize",
"pymysql.connect",
"cv2.resize"
] | [((822, 871), 'pymysql.connect', 'psql.connect', (['"""localhost"""', '"""admin"""', '"""12345"""', '"""fr"""'], {}), "('localhost', 'admin', '12345', 'fr')\n", (834, 871), True, 'import pymysql as psql\n'), ((961, 997), 'os.path.exists', 'os.path.exists', (["('dataset/' + dirname)"], {}), "('dataset/' + dirname)\n", (975, 997), False, 'import os\n'), ((1821, 1968), 'cv2.dnn.readNetFromCaffe', 'cv.dnn.readNetFromCaffe', (['"""assets/deploy.prototxt.txt"""', '"""assets/res10_300x300_ssd_iter_14 0000.caffemodel"""'], {}), "('assets/deploy.prototxt.txt',\n 'assets/res10_300x300_ssd_iter_14 0000.caffemodel'\n )\n", (1844, 1968), True, 'import cv2 as cv\n'), ((2161, 2176), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2171, 2176), False, 'import time\n'), ((1113, 1146), 'os.makedirs', 'os.makedirs', (["('dataset/' + dirname)"], {}), "('dataset/' + dirname)\n", (1124, 1146), False, 'import os\n'), ((2421, 2453), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(800)'}), '(frame, width=800)\n', (2435, 2453), False, 'import imutils\n'), ((4062, 4087), 'cv2.imshow', 'cv.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (4071, 4087), True, 'import cv2 as cv\n'), ((4096, 4111), 'cv2.waitKey', 'cv.waitKey', (['(100)'], {}), '(100)\n', (4106, 4111), True, 'import cv2 as cv\n'), ((2107, 2148), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)', 'resolution': '(640, 480)'}), '(src=0, resolution=(640, 480))\n', (2118, 2148), False, 'from imutils.video import VideoStream\n'), ((2583, 2611), 'cv2.resize', 'cv.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (2592, 2611), True, 'import cv2 as cv\n'), ((3866, 3933), 'cv2.rectangle', 'cv.rectangle', (['frame', '(startX, startY)', '(endX, endY)', '(0, 255, 0)', '(2)'], {}), '(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)\n', (3878, 3933), True, 'import cv2 as cv\n'), ((3946, 4034), 'cv2.putText', 'cv.putText', (['frame', 'text', '(startX, y)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(frame, text, (startX, y), cv.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, \n 255), 2)\n', (3956, 4034), True, 'import cv2 as cv\n'), ((4174, 4196), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (4194, 4196), True, 'import cv2 as cv\n'), ((3368, 3390), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (3376, 3390), True, 'import numpy as np\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy
from sagemaker.mxnet import MXNet
from sagemaker.predictor import csv_serializer
import local_mode_utils
from test.integration import MODEL_SUCCESS_FILES, NUM_MODEL_SERVER_WORKERS, RESOURCE_PATH
MNIST_PATH = os.path.join(RESOURCE_PATH, 'mnist')
SCRIPT_PATH = os.path.join(MNIST_PATH, 'mnist.py')
TRAIN_INPUT = 'file://{}'.format(os.path.join(MNIST_PATH, 'train'))
TEST_INPUT = 'file://{}'.format(os.path.join(MNIST_PATH, 'test'))
def test_mnist_training_and_serving(docker_image, sagemaker_local_session, local_instance_type,
framework_version, tmpdir):
mx = MXNet(entry_point=SCRIPT_PATH, role='SageMakerRole', train_instance_count=1,
train_instance_type=local_instance_type, sagemaker_session=sagemaker_local_session,
image_name=docker_image, framework_version=framework_version,
output_path='file://{}'.format(tmpdir))
_train_and_assert_success(mx, str(tmpdir))
with local_mode_utils.lock():
try:
model = mx.create_model(model_server_workers=NUM_MODEL_SERVER_WORKERS)
predictor = _csv_predictor(model, local_instance_type)
data = numpy.zeros(shape=(1, 1, 28, 28))
prediction = predictor.predict(data)
finally:
mx.delete_endpoint()
# Check that there is a probability for each possible class in the prediction
prediction_values = prediction.decode('utf-8').split(',')
assert len(prediction_values) == 10
def _csv_predictor(model, instance_type):
predictor = model.deploy(1, instance_type)
predictor.content_type = 'text/csv'
predictor.serializer = csv_serializer
predictor.accept = 'text/csv'
predictor.deserializer = None
return predictor
def test_distributed_mnist_training(docker_image, sagemaker_local_session, framework_version,
tmpdir):
mx = MXNet(entry_point=SCRIPT_PATH, role='SageMakerRole', train_instance_count=2,
train_instance_type='local', sagemaker_session=sagemaker_local_session,
image_name=docker_image, framework_version=framework_version,
output_path='file://{}'.format(tmpdir),
hyperparameters={'sagemaker_parameter_server_enabled': True})
_train_and_assert_success(mx, str(tmpdir))
def _train_and_assert_success(estimator, output_path):
estimator.fit({'train': TRAIN_INPUT, 'test': TEST_INPUT})
for directory, files in MODEL_SUCCESS_FILES.items():
local_mode_utils.assert_output_files_exist(output_path, directory, files)
| [
"numpy.zeros",
"local_mode_utils.lock",
"local_mode_utils.assert_output_files_exist",
"test.integration.MODEL_SUCCESS_FILES.items",
"os.path.join"
] | [((858, 894), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""mnist"""'], {}), "(RESOURCE_PATH, 'mnist')\n", (870, 894), False, 'import os\n'), ((909, 945), 'os.path.join', 'os.path.join', (['MNIST_PATH', '"""mnist.py"""'], {}), "(MNIST_PATH, 'mnist.py')\n", (921, 945), False, 'import os\n'), ((980, 1013), 'os.path.join', 'os.path.join', (['MNIST_PATH', '"""train"""'], {}), "(MNIST_PATH, 'train')\n", (992, 1013), False, 'import os\n'), ((1047, 1079), 'os.path.join', 'os.path.join', (['MNIST_PATH', '"""test"""'], {}), "(MNIST_PATH, 'test')\n", (1059, 1079), False, 'import os\n'), ((3124, 3151), 'test.integration.MODEL_SUCCESS_FILES.items', 'MODEL_SUCCESS_FILES.items', ([], {}), '()\n', (3149, 3151), False, 'from test.integration import MODEL_SUCCESS_FILES, NUM_MODEL_SERVER_WORKERS, RESOURCE_PATH\n'), ((1618, 1641), 'local_mode_utils.lock', 'local_mode_utils.lock', ([], {}), '()\n', (1639, 1641), False, 'import local_mode_utils\n'), ((3161, 3234), 'local_mode_utils.assert_output_files_exist', 'local_mode_utils.assert_output_files_exist', (['output_path', 'directory', 'files'], {}), '(output_path, directory, files)\n', (3203, 3234), False, 'import local_mode_utils\n'), ((1825, 1858), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(1, 1, 28, 28)'}), '(shape=(1, 1, 28, 28))\n', (1836, 1858), False, 'import numpy\n')] |
"""General module to help train SBERT for NLI tasks."""
import datetime
import math
import os
import pickle
import shutil
import numpy as np
import torch
import torch.optim as optim
import wget
from sentence_transformers import SentenceTransformer, SentencesDataset
from sentence_transformers import losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer
from .dataloader import ClassifierDataset, NLIDataReader, collate_fn, multi_acc
from .dataloader import format_create
from ..data.make_dataset import remove_tokens_get_sentence_sbert
class SBERTPredictor(SentenceTransformer):
"""SBERT Prediction class."""
def __init__(self,
word_embedding_model,
pooling_model,
num_classes: int = 3,
logistic_model=True,
device: str = None):
"""Initialize the class.
:param word_embedding_model: the rod embedding model
:param pooling_model: the pooling model
:param num_classes: number of classes in output, defaults to 3
:param logistic_model: should a logistic regression model be used for classification
:param device: device type (cuda/cpu)
:type device: str, optional
"""
super().__init__()
self.embedding_model = SentenceTransformer(
modules=[word_embedding_model, pooling_model], device=device)
# self.linear = nn.Linear(6912, num_classes)
# self.linear = nn.Linear(4608, num_classes)
self.linear = nn.Linear(2304, num_classes) # using mean of tokens only
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax(dim=1)
if device is None:
self._target_device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
else:
self._target_device = torch.device(device)
self.to(self._target_device)
self.logistic_model = logistic_model # should logistic model be trained or not
self.logisticregression = LogisticRegression(
warm_start=False, max_iter=500, class_weight="balanced")
def forward(self, sentence1, sentence2):
"""Forward function.
:param sentence1: batch of sentence1
:param sentence2: batch of sentence2
:return: sigmoid output
:rtype: torch.Tensor
"""
sentence1_embedding = torch.tensor(self.embedding_model.encode(
sentence1, is_pretokenized=True), device=self._target_device).reshape(-1, 768)
sentence2_embedding = torch.tensor(self.embedding_model.encode(
sentence2, is_pretokenized=True), device=self._target_device).reshape(-1, 768)
net_vector = torch.cat(
(sentence1_embedding,
sentence2_embedding,
torch.abs(sentence1_embedding - sentence2_embedding)),
1)
# net_vector = torch.cat((sentence1_embedding, sentence2_embedding), 1)
linear = self.linear(net_vector)
# h_out = self.sigmoid(linear)
# h_out = self.softmax(linear)
h_out = linear
return h_out
def vector(self, sentence1, sentence2):
"""Create word tensors to be given as input to Logistic regression.
:param sentence1: batch of sentence1
:param sentence2: batch of sentence2
:return: sigmoid output
:rtype: torch.Tensor
"""
sentence1_embedding = self.embedding_model.encode(
sentence1, is_pretokenized=False)
sentence2_embedding = self.embedding_model.encode(
sentence2, is_pretokenized=False)
net_vector = np.concatenate(
(sentence1_embedding,
sentence2_embedding,
np.abs(sentence1_embedding - sentence2_embedding)),
1)
return net_vector
def predict(self, sentence1, sentence2):
"""Predict class based on input sentences.
:param sentence1: list of input sentence1
:type sentence1: list(str)
:param sentence2: list of input sentence2
:type sentence2: list(str)
"""
if self.logistic_model is True:
net_vector = self.vector(sentence1, sentence2)
predictions = self.logisticregression.predict(net_vector)
return predictions
else:
net_vector = self.vector(sentence1, sentence2)
predictions = self.linear(
torch.tensor(
net_vector,
device=self._target_device))
predictions = torch.log_softmax(predictions, dim=1)
predictions = torch.argmax(predictions, dim=1)
return predictions.cpu().numpy()
def freeze_layer(layer):
"""Freeze's the mentioned layer.
:param layer: torch model layer
"""
for param in layer.parameters():
param.requires_grad = False
def unfreeze_layer(layer):
"""Unfreeze's the mentioned layer.
:param layer: torch model layer
"""
for param in layer.parameters():
param.requires_grad = True
def trainer(model: SBERTPredictor,
tokenizer,
df_train,
df_val,
epochs: int = 1,
learning_rate: float = 1e-5,
batch_size: int = 16,
embedding_epochs: int = None,
enable_class_weights: bool = True,
):
"""Train the SBERT model using a training data loader and a validation dataloader.
:param model: SBERTPredicor model
:type model: SBERT_Predictor
:param tokenizer: tokenizer used in SBERT model
:param df_train: train dataframe
:type train_dataloader: pd.DataFrame()
:param df_val: validation dataframe
:type df_val: pd.DataFrame()
:param epochs: numer of epochs
:type epochs: int
:param learning_rate: learning rate
:type learning_rate: float
:param batch_size: batch size to be used for training
:type batch_size: int
"""
if embedding_epochs is None:
embedding_epochs = epochs
nli_reader = NLIDataReader(df_train.append(df_val))
train_num_labels = nli_reader.get_num_labels()
train_data = SentencesDataset(
nli_reader.get_examples(),
model=model.embedding_model)
train_data.label_type = torch.long
# some bug in sentence_transformer library causes it to be identified as
# float by default
train_dataloader_embed = DataLoader(
train_data, shuffle=True, batch_size=batch_size)
train_loss_embed = losses.SoftmaxLoss(
model=model.embedding_model,
sentence_embedding_dimension=model.embedding_model.get_sentence_embedding_dimension(),
num_labels=train_num_labels)
val_nli_reader = NLIDataReader(df_val)
dev_data = SentencesDataset(
val_nli_reader.get_examples(),
model=model.embedding_model)
dev_data.label_type = torch.long
evaluator = EmbeddingSimilarityEvaluator(
sentences1=df_val["sentence1"].values,
sentences2=df_val["sentence2"].values,
scores=df_val["label"].values / 2.,
batch_size=batch_size)
warmup_steps = math.ceil(
len(train_dataloader_embed) * epochs / batch_size * 0.1)
# 10% of train data for warm-up
# now to train the final layer
train_dataset = ClassifierDataset(df_train, tokenizer=tokenizer)
val_dataset = ClassifierDataset(df_val, tokenizer=tokenizer)
if enable_class_weights is False:
class_weights = None
else:
class_weights = train_dataset.class_weights()
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
shuffle=True)
val_dataloader = DataLoader(dataset=val_dataset,
batch_size=1,
collate_fn=collate_fn)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
model.to(device)
print("------TRAINING STARTS----------") # noqa: T001
# train embedding layer
unfreeze_layer(model.embedding_model)
model.embedding_model.fit(
train_objectives=[
(train_dataloader_embed,
train_loss_embed)],
evaluator=evaluator,
epochs=1,
evaluation_steps=1000,
warmup_steps=warmup_steps,
) # train the Transformer layer
freeze_layer(model.embedding_model)
x, y = format_create(df=df_train, model=model)
x_test, y_test = format_create(df=df_val, model=model)
if model.logistic_model is True:
model.logisticregression.fit(x, y)
print(classification_report(y_test, model.logisticregression.predict(x_test))) # noqa: T001
else:
accuracy_stats = {"train": [],
"val": [],
}
loss_stats = {"train": [],
"val": [],
}
for e in range(epochs):
train_epoch_loss = 0
train_epoch_acc = 0
model.train()
for sentence1, sentence2, label in tqdm(train_dataloader):
label = label.to(device)
optimizer.zero_grad()
y_train_pred = model(sentence1, sentence2)
train_loss = criterion(y_train_pred, label)
train_acc = multi_acc(y_train_pred, label)
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
train_epoch_acc += train_acc.item()
# VALIDATION
with torch.no_grad():
val_epoch_loss = 0
val_epoch_acc = 0
model.eval()
for sentence1, sentence2, label in val_dataloader:
label = label.to(device)
y_val_pred = model(sentence1, sentence2)
val_loss = criterion(y_val_pred, label)
val_acc = multi_acc(y_val_pred, label)
val_epoch_loss += val_loss.item()
val_epoch_acc += val_acc.item()
loss_stats['train'].append(
train_epoch_loss / len(train_dataloader))
loss_stats['val'].append(val_epoch_loss / len(val_dataloader))
accuracy_stats['train'].append(
train_epoch_acc / len(train_dataloader))
accuracy_stats['val'].append(val_epoch_acc / len(val_dataloader))
print(f"Epoch {e+0:03}: | Train Loss: {train_epoch_loss/len(train_dataloader):.5f} \
| Val Loss: {val_epoch_loss / len(val_dataloader):.5f} \
| Train Acc: {train_epoch_acc/len(train_dataloader):.3f} \
| Val Acc: {val_epoch_acc/len(val_dataloader):.3f}") # noqa: T001
print("---------TRAINING ENDED------------") # noqa: T001
def build_sbert_model(model_name: str, logistic_model: bool = True):
"""Build SBERT model, based on model name provided.
:param model_name: model to be used, currently supported: covidbert or biobert
:type model_name: str
:param logistic_model: use logistic regression as classifier
:type logistic_model: bool
:return: SBERT model and corresponding tokenizer
"""
if model_name == "covidbert":
model_name = "deepset/covid_bert_base"
covid_bert_path = "covid_bert_path"
model_save_path = covid_bert_path
os.makedirs(model_save_path, exist_ok=True)
wget.download(
"https://cdn.huggingface.co/deepset/covid_bert_base/vocab.txt",
out=f"{model_save_path}/") # download the vocab file
else:
model_name = "allenai/biomed_roberta_base"
model_save_path = "biobert_path"
os.makedirs(model_save_path, exist_ok=True)
wget.download(
"https://cdn.huggingface.co/allenai/biomed_roberta_base/merges.txt",
out=f"{model_save_path}/")
wget.download(
"https://cdn.huggingface.co/allenai/biomed_roberta_base/vocab.json",
out=f"{model_save_path}/") # download the vocab file
bert_model = AutoModel.from_pretrained(model_name)
bert_model.save_pretrained(model_save_path)
tokenizer = AutoTokenizer.from_pretrained(model_name)
del bert_model
word_embedding_model = models.Transformer(model_save_path)
shutil.rmtree(model_save_path)
pooling_model = models.Pooling(768,
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
# generating biobert sentence embeddings (mean pooling of sentence
# embedding vectors)
sbert_model = SBERTPredictor(
word_embedding_model,
pooling_model,
logistic_model=logistic_model)
return sbert_model, tokenizer
def train_sbert_model(sbert_model,
tokenizer,
use_man_con=False,
use_med_nli=False,
use_multi_nli=False,
use_cord=False,
multi_nli_train_x: np.ndarray = None,
multi_nli_train_y: np.ndarray = None,
multi_nli_test_x: np.ndarray = None,
multi_nli_test_y: np.ndarray = None,
med_nli_train_x: np.ndarray = None,
med_nli_train_y: np.ndarray = None,
med_nli_test_x: np.ndarray = None,
med_nli_test_y: np.ndarray = None,
man_con_train_y: np.ndarray = None,
man_con_train_x: np.ndarray = None,
man_con_test_x: np.ndarray = None,
man_con_test_y: np.ndarray = None,
cord_train_x: np.ndarray = None,
cord_train_y: np.ndarray = None,
cord_test_x: np.ndarray = None,
cord_test_y: np.ndarray = None,
batch_size: int = 2,
num_epochs: int = 1,
learning_rate: float = 1e-7,
embedding_epochs: int = None,
enable_class_weights: bool = True,
):
"""Train SBERT on any NLI dataset.
:param model_name: model to be used, currently supported: covidbert or biobert
:param tokenizer: the tokenizer corresponding to the model being used"
:param use_man_con: [description], defaults to False
:type use_man_con: bool, optional
:param use_med_nli: [description], defaults to False
:type use_med_nli: bool, optional
:param use_multi_nli: [description], defaults to False
:type use_multi_nli: bool, optional
:param multi_nli_train_x: [description], defaults to None
:type multi_nli_train_x: np.ndarray, optional
:param multi_nli_train_y: [description], defaults to None
:type multi_nli_train_y: np.ndarray, optional
:param multi_nli_test_x: [description], defaults to None
:type multi_nli_test_x: np.ndarray, optional
:param multi_nli_test_y: [description], defaults to None
:type multi_nli_test_y: np.ndarray, optional
:param batch_size: [description], defaults to 2
:type batch_size: int, optional
:param num_epochs: [description], defaults to 1
:type num_epochs: int, optional
:param learning_rate: defaults to 1e-7
:type learning_rate: float
"""
if use_multi_nli:
if multi_nli_train_x is not None:
df_multi_train = remove_tokens_get_sentence_sbert(
multi_nli_train_x, multi_nli_train_y)
df_multi_val = remove_tokens_get_sentence_sbert(
multi_nli_test_x, multi_nli_test_y)
trainer(
model=sbert_model,
tokenizer=tokenizer,
df_train=df_multi_train,
df_val=df_multi_val,
epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
embedding_epochs=embedding_epochs,
enable_class_weights=enable_class_weights)
if use_med_nli:
if med_nli_train_x is not None:
df_mednli_train = remove_tokens_get_sentence_sbert(
med_nli_train_x, med_nli_train_y)
df_mednli_val = remove_tokens_get_sentence_sbert(
med_nli_test_x, med_nli_test_y)
trainer(
model=sbert_model,
tokenizer=tokenizer,
df_train=df_mednli_train,
df_val=df_mednli_val,
epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
embedding_epochs=embedding_epochs,
enable_class_weights=enable_class_weights)
if use_man_con:
if man_con_train_x is not None:
df_mancon_train = remove_tokens_get_sentence_sbert(
man_con_train_x, man_con_train_y)
df_mancon_val = remove_tokens_get_sentence_sbert(
man_con_test_x, man_con_test_y)
trainer(
model=sbert_model,
tokenizer=tokenizer,
df_train=df_mancon_train,
df_val=df_mancon_val,
epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
embedding_epochs=embedding_epochs,
enable_class_weights=enable_class_weights)
if use_cord:
if cord_train_x is not None:
df_cord_train = remove_tokens_get_sentence_sbert(
cord_train_x, cord_train_y)
df_cord_val = remove_tokens_get_sentence_sbert(
cord_test_x, cord_test_y)
trainer(
model=sbert_model,
tokenizer=tokenizer,
df_train=df_cord_train,
df_val=df_cord_val,
epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
embedding_epochs=embedding_epochs,
enable_class_weights=enable_class_weights)
# return sbert_model
def save_sbert_model(model: SBERTPredictor,
timed_dir_name: bool = True,
transformer_dir: str = 'output/sbert_model'):
"""Save SBERT trained model.
:param model: end-to-end SBERT model
:type model: SBERTPredictor
:param timed_dir_name: should directory name have time stamp, defaults to True
:param transformer_dir: directory name, defaults to 'output/sbert_model'
:type transformer_dir: str, optional
"""
if timed_dir_name:
now = datetime.datetime.now()
transformer_dir = os.path.join(
transformer_dir, f"{now.month}-{now.day}-{now.year}")
if not os.path.exists(transformer_dir):
os.makedirs(transformer_dir)
pickle.dump(
model,
open(
os.path.join(
transformer_dir,
'sigmoid.pickle'),
"wb"))
def load_sbert_model(transformer_dir: str = 'output/sbert_model',
file_name: str = 'sigmoid.pickle'):
"""Load the pickle file containing the model weights.
:param transformer_dir: folder directory, defaults to 'output/sbert_model'
:param file_name: file name, defaults to 'sigmoid.pickle'
:return: SBERT model stored at given location
:rtype: SBERTPredictor
"""
sbert_model = pickle.load(
open(
os.path.join(
transformer_dir,
file_name),
"rb"))
return sbert_model
| [
"numpy.abs",
"sentence_transformers.models.Transformer",
"sentence_transformers.evaluation.EmbeddingSimilarityEvaluator",
"torch.argmax",
"transformers.AutoModel.from_pretrained",
"torch.device",
"shutil.rmtree",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"os.path.exists",
... | [((6675, 6734), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(train_data, shuffle=True, batch_size=batch_size)\n', (6685, 6734), False, 'from torch.utils.data import DataLoader\n'), ((7162, 7337), 'sentence_transformers.evaluation.EmbeddingSimilarityEvaluator', 'EmbeddingSimilarityEvaluator', ([], {'sentences1': "df_val['sentence1'].values", 'sentences2': "df_val['sentence2'].values", 'scores': "(df_val['label'].values / 2.0)", 'batch_size': 'batch_size'}), "(sentences1=df_val['sentence1'].values,\n sentences2=df_val['sentence2'].values, scores=df_val['label'].values / \n 2.0, batch_size=batch_size)\n", (7190, 7337), False, 'from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\n'), ((7817, 7915), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'collate_fn': 'collate_fn', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=batch_size, collate_fn=\n collate_fn, shuffle=True)\n', (7827, 7915), False, 'from torch.utils.data import DataLoader\n'), ((8034, 8102), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': '(1)', 'collate_fn': 'collate_fn'}), '(dataset=val_dataset, batch_size=1, collate_fn=collate_fn)\n', (8044, 8102), False, 'from torch.utils.data import DataLoader\n'), ((12545, 12582), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['model_name'], {}), '(model_name)\n', (12570, 12582), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((12647, 12688), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (12676, 12688), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((12736, 12771), 'sentence_transformers.models.Transformer', 'models.Transformer', (['model_save_path'], {}), '(model_save_path)\n', (12754, 12771), False, 'from sentence_transformers import losses, models\n'), ((12776, 12806), 'shutil.rmtree', 'shutil.rmtree', (['model_save_path'], {}), '(model_save_path)\n', (12789, 12806), False, 'import shutil\n'), ((12827, 12943), 'sentence_transformers.models.Pooling', 'models.Pooling', (['(768)'], {'pooling_mode_mean_tokens': '(True)', 'pooling_mode_cls_token': '(False)', 'pooling_mode_max_tokens': '(False)'}), '(768, pooling_mode_mean_tokens=True, pooling_mode_cls_token=\n False, pooling_mode_max_tokens=False)\n', (12841, 12943), False, 'from sentence_transformers import losses, models\n'), ((1559, 1645), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', ([], {'modules': '[word_embedding_model, pooling_model]', 'device': 'device'}), '(modules=[word_embedding_model, pooling_model], device=\n device)\n', (1578, 1645), False, 'from sentence_transformers import SentenceTransformer, SentencesDataset\n'), ((1798, 1826), 'torch.nn.Linear', 'nn.Linear', (['(2304)', 'num_classes'], {}), '(2304, num_classes)\n', (1807, 1826), False, 'from torch import nn\n'), ((2306, 2381), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'warm_start': '(False)', 'max_iter': '(500)', 'class_weight': '"""balanced"""'}), "(warm_start=False, max_iter=500, class_weight='balanced')\n", (2324, 2381), False, 'from sklearn.linear_model import LogisticRegression\n'), ((11850, 11893), 'os.makedirs', 'os.makedirs', (['model_save_path'], {'exist_ok': '(True)'}), '(model_save_path, exist_ok=True)\n', (11861, 11893), False, 'import os\n'), ((11902, 12010), 'wget.download', 'wget.download', (['"""https://cdn.huggingface.co/deepset/covid_bert_base/vocab.txt"""'], {'out': 'f"""{model_save_path}/"""'}), "('https://cdn.huggingface.co/deepset/covid_bert_base/vocab.txt',\n out=f'{model_save_path}/')\n", (11915, 12010), False, 'import wget\n'), ((12170, 12213), 'os.makedirs', 'os.makedirs', (['model_save_path'], {'exist_ok': '(True)'}), '(model_save_path, exist_ok=True)\n', (12181, 12213), False, 'import os\n'), ((12222, 12340), 'wget.download', 'wget.download', (['"""https://cdn.huggingface.co/allenai/biomed_roberta_base/merges.txt"""'], {'out': 'f"""{model_save_path}/"""'}), "(\n 'https://cdn.huggingface.co/allenai/biomed_roberta_base/merges.txt',\n out=f'{model_save_path}/')\n", (12235, 12340), False, 'import wget\n'), ((12365, 12483), 'wget.download', 'wget.download', (['"""https://cdn.huggingface.co/allenai/biomed_roberta_base/vocab.json"""'], {'out': 'f"""{model_save_path}/"""'}), "(\n 'https://cdn.huggingface.co/allenai/biomed_roberta_base/vocab.json',\n out=f'{model_save_path}/')\n", (12378, 12483), False, 'import wget\n'), ((19188, 19211), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19209, 19211), False, 'import datetime\n'), ((19238, 19304), 'os.path.join', 'os.path.join', (['transformer_dir', 'f"""{now.month}-{now.day}-{now.year}"""'], {}), "(transformer_dir, f'{now.month}-{now.day}-{now.year}')\n", (19250, 19304), False, 'import os\n'), ((19330, 19361), 'os.path.exists', 'os.path.exists', (['transformer_dir'], {}), '(transformer_dir)\n', (19344, 19361), False, 'import os\n'), ((19371, 19399), 'os.makedirs', 'os.makedirs', (['transformer_dir'], {}), '(transformer_dir)\n', (19382, 19399), False, 'import os\n'), ((2126, 2146), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (2138, 2146), False, 'import torch\n'), ((4822, 4859), 'torch.log_softmax', 'torch.log_softmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (4839, 4859), False, 'import torch\n'), ((4886, 4918), 'torch.argmax', 'torch.argmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (4898, 4918), False, 'import torch\n'), ((8206, 8231), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8229, 8231), False, 'import torch\n'), ((9515, 9537), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (9519, 9537), False, 'from tqdm import tqdm\n'), ((19459, 19506), 'os.path.join', 'os.path.join', (['transformer_dir', '"""sigmoid.pickle"""'], {}), "(transformer_dir, 'sigmoid.pickle')\n", (19471, 19506), False, 'import os\n'), ((20027, 20067), 'os.path.join', 'os.path.join', (['transformer_dir', 'file_name'], {}), '(transformer_dir, file_name)\n', (20039, 20067), False, 'import os\n'), ((3073, 3125), 'torch.abs', 'torch.abs', (['(sentence1_embedding - sentence2_embedding)'], {}), '(sentence1_embedding - sentence2_embedding)\n', (3082, 3125), False, 'import torch\n'), ((3999, 4048), 'numpy.abs', 'np.abs', (['(sentence1_embedding - sentence2_embedding)'], {}), '(sentence1_embedding - sentence2_embedding)\n', (4005, 4048), True, 'import numpy as np\n'), ((4701, 4753), 'torch.tensor', 'torch.tensor', (['net_vector'], {'device': 'self._target_device'}), '(net_vector, device=self._target_device)\n', (4713, 4753), False, 'import torch\n'), ((10019, 10034), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10032, 10034), False, 'import torch\n'), ((2040, 2065), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2063, 2065), False, 'import torch\n')] |
# ------------------------------------------------------------------------------
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
nms operation
'''
from __future__ import division
import numpy as np
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
'''
oks_iou
'''
if not isinstance(sigmas, np.ndarray):
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72,
.62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
var = (sigmas * 2) ** 2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros((d.shape[0]))
for n_d in range(0, d.shape[0]):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx ** 2 + dy ** 2) / var / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if in_vis_thre is not None:
ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
return ious
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh, overlap = oks
:param kpts_db
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
kpts = len(kpts_db)
if kpts == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, in_vis_thre)
inds = np.where(oks_ovr <= thresh)[0]
order = order[inds + 1]
return keep
| [
"numpy.zeros",
"numpy.spacing",
"numpy.where",
"numpy.array",
"numpy.exp"
] | [((1192, 1212), 'numpy.zeros', 'np.zeros', (['d.shape[0]'], {}), '(d.shape[0])\n', (1200, 1212), True, 'import numpy as np\n'), ((970, 1086), 'numpy.array', 'np.array', (['[0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, \n 1.07, 0.87, 0.87, 0.89, 0.89]'], {}), '([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62,\n 1.07, 1.07, 0.87, 0.87, 0.89, 0.89])\n', (978, 1086), True, 'import numpy as np\n'), ((2515, 2542), 'numpy.where', 'np.where', (['(oks_ovr <= thresh)'], {}), '(oks_ovr <= thresh)\n', (2523, 2542), True, 'import numpy as np\n'), ((1436, 1449), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (1446, 1449), True, 'import numpy as np\n'), ((1609, 1619), 'numpy.exp', 'np.exp', (['(-e)'], {}), '(-e)\n', (1615, 1619), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import os
import sys
from collections import OrderedDict
from typing import Callable, Tuple
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
IMAGE_DTYPES = OrderedDict([
('uint8', np.uint8),
('uint16', np.uint8),
('uint32', np.uint8),
('uint64', np.uint8),
])
DEFAULT_IMAGE_WIDTH = 1000
DEFAULT_IMAGE_HEIGHT = 1000
DEFAULT_IMAGE_DTYPE = next(iter(IMAGE_DTYPES))
DEFAULT_KERNEL_RADIUS_MIN = 5
DEFAULT_KERNEL_RADIUS_MAX = 100
DEFAULT_KERNEL_RADIUS_STEP = 5
DEFAULT_KERNEL_SIGMA = 10.0
DEFAULT_RUNS = 10
def random_array(size: Tuple[int, int], dtype: np.dtype) -> np.ndarray:
if np.issubdtype(dtype, np.integer):
dtype_max = np.iinfo(dtype).max
return np.random.randint(dtype_max + 1, size=size, dtype=dtype)
if np.issubdtype(dtype, np.floating):
dtype_max = np.finfo(dtype).max
return np.random.uniform(high=dtype_max, size=size).astype(dtype)
raise ValueError("invalid datatype: {}".format(dtype))
def convolve(img: np.ndarray, kernel: np.ndarray) -> np.ndarray:
return cv.filter2D(img, -1, kernel)
def fft_pad(mat: np.ndarray, min: Tuple[int, int] = None):
if min is not None:
initial_width = max(min[0], mat.shape[0])
initial_height = max(min[1], mat.shape[1])
else:
initial_width, initial_height = mat.shape
dft_size = (cv.getOptimalDFTSize(initial_width),
cv.getOptimalDFTSize(initial_height))
row_pad = dft_size[0] - mat.shape[0]
row_pad_top = row_pad // 2
row_pad_bottom = row_pad - row_pad_top
col_pad = dft_size[1] - mat.shape[1]
col_pad_left = col_pad // 2
col_pad_right = col_pad - col_pad_left
res = cv.copyMakeBorder(mat,
row_pad_top, row_pad_bottom,
col_pad_left, col_pad_right,
cv.BORDER_CONSTANT, value=0)
return res, [row_pad_top, row_pad_bottom, col_pad_left, col_pad_right]
def fft_apply(mat: np.ndarray):
mat_complex = cv.merge([mat.astype(np.float32),
np.zeros_like(mat, dtype=np.float32)])
cv.dft(mat_complex, dst=mat_complex)
return mat_complex
def ifft_apply(mat: np.ndarray):
return cv.dft(mat, flags=cv.DFT_INVERSE)[:, :, 0]
def fft_filter(img: np.ndarray, kernel: np.ndarray) -> np.ndarray:
# apply FFT to image
img_padded, img_pads = fft_pad(img)
img_fft = fft_apply(img_padded)
# apply FFT to kernel
kernel_padded, _ = fft_pad(kernel, min=img.shape)
kernel_fft = fft_apply(kernel_padded)
# apply kernel to image
kernel_re, kernel_im = cv.split(kernel_fft)
kernel_magnitude = cv.magnitude(kernel_re, kernel_im)
img_fft[:, :, 0] *= kernel_magnitude
img_fft[:, :, 1] *= kernel_magnitude
# transform back to spatial domain
img_filtered = ifft_apply(img_fft)
# remove padding and return result
return img_filtered[img_pads[0]:-img_pads[1], img_pads[2]:-img_pads[3]]
def profile(func: Callable[[np.ndarray, np.ndarray], np.ndarray],
img: np.ndarray,
kernel: np.ndarray,
runs: int) -> Tuple[float, float, float]:
res = []
for _ in range(runs):
t0 = cv.getTickCount()
func(img, kernel)
res.append((cv.getTickCount() - t0) * 1000 / cv.getTickFrequency())
return sum(res) / len(res), res[len(res) // 2], np.std(res)
if __name__ == '__main__':
# parse arguments
def formatter_class(prog):
return argparse.RawTextHelpFormatter(prog, max_help_position=80)
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTION...]",
description=str("Compare runtimes of gauss filtering by convolution\n"
"in the spatial domain and multiplication in the\n"
"frequency domain for different filter kernel sizes.\n"
"\n"
"Either specify a concrete input image on which\n"
"profiling should be performed using --image or\n"
"alternatively specify --random-image."),
formatter_class=formatter_class)
parser.add_argument('-s', '--silent',
action='store_true',
help="do not write progress to stdout")
parser.add_argument('--image',
help="input image")
parser.add_argument('--random-image',
action='store_true',
help="randomly generate input image")
image_width_help = "random input image width (default {})"
parser.add_argument('--image-width',
type=int,
help=image_width_help.format(DEFAULT_IMAGE_WIDTH))
image_height_help = "random input image height (default {})"
parser.add_argument('--image-height',
type=int,
help=image_height_help.format(DEFAULT_IMAGE_HEIGHT))
image_dtype_help = "random input image datatype (default {})"
parser.add_argument('--image-dtype',
choices=IMAGE_DTYPES.keys(),
help=image_dtype_help.format(DEFAULT_IMAGE_DTYPE))
kernel_radius_min_help = "minimal filter kernel radius (default {})"
parser.add_argument('--kernel-radius-min',
default=DEFAULT_KERNEL_RADIUS_MIN,
type=int,
help=kernel_radius_min_help.format(
DEFAULT_KERNEL_RADIUS_MIN))
kernel_radius_max_help = "maximal filter kernel radius (default {})"
parser.add_argument('--kernel-radius-max',
default=DEFAULT_KERNEL_RADIUS_MAX,
type=int,
help=kernel_radius_max_help.format(
DEFAULT_KERNEL_RADIUS_MAX))
kernel_radius_step_help = "filter kernel radius step size (default {})"
parser.add_argument('--kernel-radius-step',
default=DEFAULT_KERNEL_RADIUS_STEP,
type=int,
help=kernel_radius_step_help.format(
DEFAULT_KERNEL_RADIUS_STEP))
runs_help = "profiling runs per kernel (default {})"
parser.add_argument('--runs',
default=DEFAULT_RUNS,
type=int,
help=runs_help.format(DEFAULT_RUNS))
args = parser.parse_args()
if args.image is not None:
if args.random_image:
print("can not simultaneously specify --image and --random-image",
file=sys.stderr)
sys.exit(1)
if args.image_width is not None:
print("Warning: --image specified, ignoring --image-width",
file=sys.stderr)
if args.image_height is not None:
print("Warning: --image specified, ignoring --image-height",
file=sys.stderr)
if args.image_dtype is not None:
print("Warning: --image specified, ignoring --image-dtype",
file=sys.stderr)
else:
if not args.random_image:
parser.print_usage(sys.stderr)
fmt = "{}: error: either --image or --random-image must be specified"
print(fmt.format(os.path.basename(__file__)), file=sys.stderr)
sys.exit(1)
if args.image_width is None:
args.image_width = DEFAULT_IMAGE_WIDTH
if args.image_height is None:
args.image_height = DEFAULT_IMAGE_HEIGHT
if args.image_dtype is None:
args.image_dtype = DEFAULT_IMAGE_DTYPE
# obtain test-image
if args.image is not None:
img = cv.imread(args.image, flags=cv.IMREAD_GRAYSCALE)
if img is None:
print("Failed to read image file '{}'".format(args.image),
file=sys.stderr)
sys.exit(1)
else:
dtype = IMAGE_DTYPES[args.image_dtype]
img = random_array((args.image_width, args.image_height), dtype)
# profile convolution and FFT
rmin = args.kernel_radius_min
rmax = args.kernel_radius_max
rstep = args.kernel_radius_step
radii = list(range(rmin, rmax + 1, rstep))
results_convolution = []
results_fft = []
for i, r in enumerate(radii):
w = 2 * r + 1
if not args.silent:
fmt = "({}/{}) profiling {w}x{w} kernel..."
print(fmt.format(i + 1, len(radii), w=w))
gaussian1D = cv.getGaussianKernel(w, DEFAULT_KERNEL_SIGMA)
kernel = gaussian1D * gaussian1D.T
results_convolution.append(
profile(convolve, img, kernel, args.runs))
results_fft.append(
profile(fft_filter, img, kernel, args.runs))
if i == len(radii) - 1:
img_convolved = convolve(img, kernel)
img_fft_filtered = fft_filter(img, kernel)
# display results
if args.image:
fig, axes = plt.subplots(2, 2)
else:
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(12, 7)
# convolution
title_conv = "Convolution ({}x{})".format(img.shape[0], img.shape[1])
t_conv_avg, t_conv_med, t_conv_std = zip(*results_convolution)
if args.image:
axes[0, 0].imshow(img_convolved, cmap='gray')
post_title = ", {w}x{w} kernel".format(w=2 * radii[-1] + 1)
axes[0, 0].set_title(title_conv + post_title)
ax_conv_data = axes[0, 1] if args.image else axes[0]
ax_conv_data.set_title(title_conv)
ax_conv_data.set_xlabel("Kernel Radius (px)")
ax_conv_data.set_ylabel("Execution Time (ms)")
ax_conv_data.plot(radii, t_conv_avg,
label="average ({} runs)".format(args.runs))
ax_conv_data.errorbar(radii, t_conv_med, t_conv_std,
capsize=5,
elinewidth=1,
label="median and stddev ({} runs)".format(args.runs))
ax_conv_data.legend()
# FFT
title_fft = "FFT ({}x{})".format(img.shape[0], img.shape[1])
t_fft_avg, t_fft_med, t_fft_std = zip(*results_fft)
if args.image:
axes[1, 0].imshow(img_fft_filtered, cmap='gray')
post_title = ", {w}x{w} kernel".format(w=2 * radii[-1] + 1)
axes[1, 0].set_title(title_fft + post_title)
ax_fft_data = axes[1, 1] if args.image else axes[1]
ax_fft_data.set_title(title_fft)
ax_fft_data.set_xlabel("Kernel Radius (px)")
ax_fft_data.set_ylabel("Execution Time (ms)")
ax_fft_data.plot(radii, t_fft_avg,
label="average ({} runs)".format(args.runs))
ax_fft_data.errorbar(radii, t_fft_med, t_fft_std,
capsize=5,
elinewidth=1,
label="median and stddev ({} runs)".format(args.runs))
ax_fft_data.legend()
# show plots
plt.tight_layout()
plt.show()
| [
"cv2.getTickCount",
"numpy.iinfo",
"numpy.random.randint",
"cv2.dft",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like",
"cv2.magnitude",
"cv2.filter2D",
"cv2.getTickFrequency",
"numpy.std",
"cv2.copyMakeBorder",
"numpy.finfo",
"cv2.split",
"matplotlib.pyplot.subplots",
"matplotlib.py... | [((217, 322), 'collections.OrderedDict', 'OrderedDict', (["[('uint8', np.uint8), ('uint16', np.uint8), ('uint32', np.uint8), ('uint64',\n np.uint8)]"], {}), "([('uint8', np.uint8), ('uint16', np.uint8), ('uint32', np.uint8\n ), ('uint64', np.uint8)])\n", (228, 322), False, 'from collections import OrderedDict\n'), ((662, 694), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (675, 694), True, 'import numpy as np\n'), ((816, 849), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.floating'], {}), '(dtype, np.floating)\n', (829, 849), True, 'import numpy as np\n'), ((1103, 1131), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (1114, 1131), True, 'import cv2 as cv\n'), ((1730, 1843), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['mat', 'row_pad_top', 'row_pad_bottom', 'col_pad_left', 'col_pad_right', 'cv.BORDER_CONSTANT'], {'value': '(0)'}), '(mat, row_pad_top, row_pad_bottom, col_pad_left,\n col_pad_right, cv.BORDER_CONSTANT, value=0)\n', (1747, 1843), True, 'import cv2 as cv\n'), ((2158, 2194), 'cv2.dft', 'cv.dft', (['mat_complex'], {'dst': 'mat_complex'}), '(mat_complex, dst=mat_complex)\n', (2164, 2194), True, 'import cv2 as cv\n'), ((2657, 2677), 'cv2.split', 'cv.split', (['kernel_fft'], {}), '(kernel_fft)\n', (2665, 2677), True, 'import cv2 as cv\n'), ((2701, 2735), 'cv2.magnitude', 'cv.magnitude', (['kernel_re', 'kernel_im'], {}), '(kernel_re, kernel_im)\n', (2713, 2735), True, 'import cv2 as cv\n'), ((10918, 10936), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10934, 10936), True, 'import matplotlib.pyplot as plt\n'), ((10942, 10952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10950, 10952), True, 'import matplotlib.pyplot as plt\n'), ((751, 807), 'numpy.random.randint', 'np.random.randint', (['(dtype_max + 1)'], {'size': 'size', 'dtype': 'dtype'}), '(dtype_max + 1, size=size, dtype=dtype)\n', (768, 807), True, 'import numpy as np\n'), ((1395, 1430), 'cv2.getOptimalDFTSize', 'cv.getOptimalDFTSize', (['initial_width'], {}), '(initial_width)\n', (1415, 1430), True, 'import cv2 as cv\n'), ((1448, 1484), 'cv2.getOptimalDFTSize', 'cv.getOptimalDFTSize', (['initial_height'], {}), '(initial_height)\n', (1468, 1484), True, 'import cv2 as cv\n'), ((2265, 2298), 'cv2.dft', 'cv.dft', (['mat'], {'flags': 'cv.DFT_INVERSE'}), '(mat, flags=cv.DFT_INVERSE)\n', (2271, 2298), True, 'import cv2 as cv\n'), ((3250, 3267), 'cv2.getTickCount', 'cv.getTickCount', ([], {}), '()\n', (3265, 3267), True, 'import cv2 as cv\n'), ((3425, 3436), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (3431, 3436), True, 'import numpy as np\n'), ((3534, 3591), 'argparse.RawTextHelpFormatter', 'argparse.RawTextHelpFormatter', (['prog'], {'max_help_position': '(80)'}), '(prog, max_help_position=80)\n', (3563, 3591), False, 'import argparse\n'), ((7763, 7811), 'cv2.imread', 'cv.imread', (['args.image'], {'flags': 'cv.IMREAD_GRAYSCALE'}), '(args.image, flags=cv.IMREAD_GRAYSCALE)\n', (7772, 7811), True, 'import cv2 as cv\n'), ((8554, 8599), 'cv2.getGaussianKernel', 'cv.getGaussianKernel', (['w', 'DEFAULT_KERNEL_SIGMA'], {}), '(w, DEFAULT_KERNEL_SIGMA)\n', (8574, 8599), True, 'import cv2 as cv\n'), ((9021, 9039), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (9033, 9039), True, 'import matplotlib.pyplot as plt\n'), ((9070, 9088), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (9082, 9088), True, 'import matplotlib.pyplot as plt\n'), ((716, 731), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (724, 731), True, 'import numpy as np\n'), ((871, 886), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (879, 886), True, 'import numpy as np\n'), ((2114, 2150), 'numpy.zeros_like', 'np.zeros_like', (['mat'], {'dtype': 'np.float32'}), '(mat, dtype=np.float32)\n', (2127, 2150), True, 'import numpy as np\n'), ((6692, 6703), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6700, 6703), False, 'import sys\n'), ((7411, 7422), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7419, 7422), False, 'import sys\n'), ((7956, 7967), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7964, 7967), False, 'import sys\n'), ((906, 950), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': 'dtype_max', 'size': 'size'}), '(high=dtype_max, size=size)\n', (923, 950), True, 'import numpy as np\n'), ((3349, 3370), 'cv2.getTickFrequency', 'cv.getTickFrequency', ([], {}), '()\n', (3368, 3370), True, 'import cv2 as cv\n'), ((7352, 7378), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (7368, 7378), False, 'import os\n'), ((3316, 3333), 'cv2.getTickCount', 'cv.getTickCount', ([], {}), '()\n', (3331, 3333), True, 'import cv2 as cv\n')] |
"""Test charting functionality"""
import itertools
import platform
import matplotlib.pyplot as plt
import numpy as np
import pytest
import pyvista
from pyvista import examples
from pyvista.plotting import charts, system_supports_plotting
skip_mac = pytest.mark.skipif(platform.system() == 'Darwin',
reason='MacOS CI fails when downloading examples')
skip_no_plotting = pytest.mark.skipif(
not system_supports_plotting(),
reason="Test requires system to support plotting"
)
# skip all tests if VTK<9.1.0
if pyvista.vtk_version_info < (9, 1):
pytestmark = pytest.mark.skip
def vtk_array_to_tuple(arr):
return tuple(arr.GetValue(i) for i in range(arr.GetNumberOfValues()))
def to_vtk_scientific(val):
parts = val.split('e')
sign, exp = parts[1][0], parts[1][1:]
exp = exp.lstrip("0") # Remove leading zeros of exponent
return parts[0] + "e" + sign + exp if exp != "" else parts[0] # Remove exponent altogether if it is 0
@pytest.fixture
def pl():
p = pyvista.Plotter(window_size=(600, 600))
p.background_color = 'w'
return p
@pytest.fixture
def chart_2d():
return pyvista.Chart2D()
@pytest.fixture
def chart_box():
return pyvista.ChartBox([[1, 2, 3]])
@pytest.fixture
def chart_pie():
return pyvista.ChartPie([1, 2, 3])
@pytest.fixture
def chart_mpl():
f, ax = plt.subplots()
ax.plot([0, 1, 2], [3, 1, 2])
return pyvista.ChartMPL(f)
@pytest.fixture
def line_plot_2d(chart_2d):
return chart_2d.line([0, 1, 2], [3, 1, 2])
@pytest.fixture
def scatter_plot_2d(chart_2d):
return chart_2d.scatter([0, 1, 2], [3, 1, 2])
@pytest.fixture
def area_plot(chart_2d):
return chart_2d.area([0, 1, 2], [2, 1, 3], [0, 2, 0])
@pytest.fixture
def bar_plot(chart_2d):
return chart_2d.bar([0, 1, 2], [[2, 1, 3], [1, 2, 0]])
@pytest.fixture
def stack_plot(chart_2d):
return chart_2d.stack([0, 1, 2], [[2, 1, 3], [1, 2, 0]])
@pytest.fixture
def box_plot(chart_box):
return chart_box.plot
@pytest.fixture
def pie_plot(chart_pie):
return chart_pie.plot
def test_pen():
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
w_thin, w_thick = 2, 10
s_dash, s_dot, s_inv = "--", ":", "|"
assert s_inv not in charts.Pen.LINE_STYLES, "New line styles added? Change this test."
# Test constructor arguments
pen = charts.Pen(color=c_red, width=w_thin, style=s_dash)
assert np.allclose(pen.color, c_red)
assert np.isclose(pen.width, w_thin)
assert pen.style == s_dash
# Test properties
pen.color = c_blue
color = [0.0, 0.0, 0.0]
pen.GetColorF(color)
color.append(pen.GetOpacity() / 255)
assert np.allclose(pen.color, c_blue)
assert np.allclose(color, c_blue)
pen.width = w_thick
assert np.isclose(pen.width, w_thick)
assert np.isclose(pen.GetWidth(), w_thick)
pen.style = s_dot
assert pen.style == s_dot
assert pen.GetLineType() == charts.Pen.LINE_STYLES[s_dot]["id"]
with pytest.raises(ValueError):
pen.style = s_inv
def test_wrapping():
width = 5
# Test wrapping of VTK Pen object
vtkPen = pyvista._vtk.vtkPen()
wrappedPen = charts.Pen(_wrap=vtkPen)
assert wrappedPen.__this__ == vtkPen.__this__
assert wrappedPen.width == vtkPen.GetWidth()
wrappedPen.width = width
assert wrappedPen.width == vtkPen.GetWidth()
assert vtkPen.GetWidth() == width
@skip_mac
def test_brush():
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
t_masonry = examples.download_masonry_texture()
t_puppy = examples.download_puppy_texture()
# Test constructor arguments
brush = charts.Brush(color=c_red, texture=t_masonry)
assert np.allclose(brush.color, c_red)
assert np.allclose(brush.texture.to_array(), t_masonry.to_array())
# Test properties
brush.color = c_blue
color = [0.0, 0.0, 0.0, 0.0]
brush.GetColorF(color)
assert np.allclose(brush.color, c_blue)
assert np.allclose(color, c_blue)
brush.texture = t_puppy
t = pyvista.Texture(brush.GetTexture())
assert np.allclose(brush.texture.to_array(), t_puppy.to_array())
assert np.allclose(t.to_array(), t_puppy.to_array())
brush.texture_interpolate = False
assert not brush.texture_interpolate
NEAREST = 0x01
assert brush.GetTextureProperties() & NEAREST
brush.texture_repeat = True
assert brush.texture_repeat
REPEAT = 0x08
assert brush.GetTextureProperties() & REPEAT
@skip_no_plotting
def test_axis(chart_2d):
l = "Y axis"
r_fix, r_auto = [2, 5], None
m = 50
tc = 10
tlabels = ["Foo", "Blub", "Spam"]
tlocs, tlocs_large = [1, 5.5, 8], [5.2, 340, 9999.999]
ts = 5
tlo = 10
# Test constructor arguments
axis = charts.Axis(label=l, range=r_fix, grid=True)
assert axis.label == l
assert np.allclose(axis.range, r_fix) and axis.behavior == "fixed"
assert axis.grid
# Test properties, using the y axis of a 2D chart
chart_2d.line([0, 1], [1, 10])
chart_2d.show()
axis = chart_2d.y_axis
axis.label = l
assert axis.label == l
assert axis.GetTitle() == l
axis.label_visible = False
assert not axis.label_visible
assert not axis.GetTitleVisible()
axis.range = r_auto
assert axis.behavior == "auto"
axis.range = r_fix
r = [0.0, 0.0]
axis.GetRange(r)
assert np.allclose(axis.range, r_fix)
assert np.allclose(r, r_fix)
assert axis.behavior == "fixed"
assert axis.GetBehavior() == charts.Axis.BEHAVIORS["fixed"]
axis.behavior = "auto"
assert axis.behavior == "auto"
assert axis.GetBehavior() == charts.Axis.BEHAVIORS["auto"]
with pytest.raises(ValueError):
axis.behavior = "invalid"
axis.margin = m
assert axis.margin == m
assert axis.GetMargins()[0] == m
axis.log_scale = True # Log scale can be enabled for the currently drawn plot
chart_2d.show() # We have to call show to update all chart properties (calls Update and Paint methods of chart/plot objects).
assert axis.log_scale
assert axis.GetLogScaleActive()
axis.log_scale = False
chart_2d.show()
assert not axis.log_scale
assert not axis.GetLogScaleActive()
# TODO: following lines cause "vtkMath::Jacobi: Error extracting eigenfunctions" warning to be printed.
# This is a VTK issue that will be fixed once PR (!8618) is merged.
chart_2d.line([0, 1], [-10, 10]) # Plot for which log scale cannot be enabled
axis.log_scale = True
chart_2d.show()
assert not axis.log_scale
assert not axis.GetLogScaleActive()
axis.grid = False
assert not axis.grid
assert not axis.GetGridVisible()
axis.visible = False
assert not axis.visible
assert not axis.GetAxisVisible()
axis.toggle()
assert axis.visible
assert axis.GetAxisVisible()
tc0 = axis.tick_count
axis.tick_count = tc
assert axis.tick_count == tc
assert axis.GetNumberOfTicks() == tc
axis.tick_count = None
assert axis.tick_count == tc0
assert axis.GetNumberOfTicks() == tc0
axis.tick_count = -1
assert axis.tick_count == tc0
assert axis.GetNumberOfTicks() == tc0
tlocs0 = axis.tick_locations
tlabels0 = axis.tick_labels
axis.tick_locations = tlocs
axis.tick_labels = tlabels
assert np.allclose(axis.tick_locations, tlocs)
assert np.allclose(axis.GetTickPositions(), tlocs)
assert tuple(axis.tick_labels) == tuple(tlabels)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(tlabels)
axis.tick_labels = "2f"
chart_2d.show()
assert tuple(axis.tick_labels) == tuple(f"{loc:.2f}" for loc in tlocs)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(f"{loc:.2f}" for loc in tlocs)
assert axis.GetNotation() == charts.Axis.FIXED_NOTATION
assert axis.GetPrecision() == 2
axis.tick_labels = "4e"
axis.tick_locations = tlocs_large # Add some more variety to labels
chart_2d.show()
assert tuple(axis.tick_labels) == tuple(to_vtk_scientific(f"{loc:.4e}") for loc in tlocs_large)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(to_vtk_scientific(f"{loc:.4e}") for loc in tlocs_large)
assert axis.GetNotation() == charts.Axis.SCIENTIFIC_NOTATION
assert axis.GetPrecision() == 4
axis.tick_locations = None
axis.tick_labels = None
chart_2d.show()
assert np.allclose(axis.tick_locations, tlocs0)
assert np.allclose(axis.GetTickPositions(), tlocs0)
assert tuple(axis.tick_labels) == tuple(tlabels0)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(tlabels0)
axis.tick_size = ts
assert axis.tick_size == ts
assert axis.GetTickLength() == ts
axis.tick_labels_offset = tlo
assert axis.tick_labels_offset == tlo
assert axis.GetLabelOffset() == tlo
axis.tick_labels_visible = False
assert not axis.tick_labels_visible
assert not axis.GetLabelsVisible()
assert not axis.GetRangeLabelsVisible()
axis.ticks_visible = False
assert not axis.ticks_visible
assert not axis.GetTicksVisible()
def test_axis_label_font_size(chart_2d):
_ = chart_2d.line([0, 1, 2], [2, 1, 3])
axis = chart_2d.x_axis
font_size = 20
axis.label_size = font_size
assert axis.label_size == font_size
assert axis.GetTitleProperties().GetFontSize() == font_size
axis.tick_label_size = font_size
assert axis.tick_label_size == font_size
assert axis.GetLabelProperties().GetFontSize() == font_size
@skip_no_plotting
@pytest.mark.parametrize("chart_f", ("chart_2d", "chart_box", "chart_pie", "chart_mpl"))
def test_chart_common(pl, chart_f, request):
# Test the common chart functionalities
chart = request.getfixturevalue(chart_f)
title = "Chart title"
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
bw = 10
bs = "--"
# Check scene and renderer properties
assert chart._scene is None
assert chart._renderer is None
pl.add_chart(chart)
assert chart._scene is pl.renderer._charts._scene
assert chart._renderer is pl.renderer and chart._renderer is pl.renderer._charts._renderer
with pytest.raises((AssertionError, ValueError)):
chart.size = (-1, 1)
with pytest.raises((AssertionError, ValueError)):
chart.loc = (-1, 1)
try: # Try block for now as not all charts support a custom size and loc
chart.size = (0.5, 0.5)
chart.loc = (0.25, 0.25)
assert chart.size == (0.5, 0.5)
assert chart.loc == (0.25, 0.25)
except ValueError:
pass
# Check geometry and resizing
w, h = pl.window_size
chart._render_event()
assert chart._geometry == (chart.loc[0]*w, chart.loc[1]*h, chart.size[0]*w, chart.size[1]*h)
w, h = pl.window_size = [200, 200]
chart._render_event()
assert chart._geometry == (chart.loc[0]*w, chart.loc[1]*h, chart.size[0]*w, chart.size[1]*h)
# Check is_within
assert chart._is_within(((chart.loc[0]+chart.size[0]/2)*w, (chart.loc[1]+chart.size[1]/2)*h))
assert not chart._is_within(((chart.loc[0]+chart.size[0]/2)*w, chart.loc[1]*h-5))
assert not chart._is_within((chart.loc[0]*w-5, (chart.loc[1]+chart.size[1]/2)*h))
assert not chart._is_within((chart.loc[0]*w-5, chart.loc[1]*h-5))
chart.border_color = c_red
assert np.allclose(chart.border_color, c_red)
chart.border_width = bw
assert chart.border_width == bw
chart.border_style = bs
assert chart.border_style == bs
chart.background_color = c_blue
assert np.allclose(chart.background_color, c_blue)
# Check remaining properties and methods
chart.visible = False
assert not chart.visible
assert not chart.GetVisible()
chart.toggle()
assert chart.visible
assert chart.GetVisible()
chart.title = title
assert chart.title == title
chart.legend_visible = False
assert not chart.legend_visible
@pytest.mark.parametrize("plot_f", ("line_plot_2d", "scatter_plot_2d", "area_plot", "bar_plot", "stack_plot", "box_plot", "pie_plot"))
def test_plot_common(plot_f, request):
# Test the common plot functionalities
plot = request.getfixturevalue(plot_f)
c = (1, 0, 1, 1)
w = 5
s = "-."
l = "Label"
plot.color = c
assert np.allclose(plot.color, c)
assert np.allclose(plot.brush.color, c)
if hasattr(plot, "GetPen"):
assert plot.pen.__this__ == plot.GetPen().__this__
if hasattr(plot, "GetBrush"):
assert plot.brush.__this__ == plot.GetBrush().__this__
plot.line_width = w
assert plot.pen.width == w
plot.line_style = s
assert plot.pen.style == s
plot.label = l
assert plot.label == l
assert plot.GetLabel() == l
plot.visible = False
assert not plot.visible
assert not plot.GetVisible()
plot.toggle()
assert plot.visible
assert plot.GetVisible()
@pytest.mark.parametrize("plot_f", ("bar_plot", "stack_plot", "box_plot", "pie_plot"))
def test_multicomp_plot_common(plot_f, request):
# Test the common multicomp plot functionalities
plot = request.getfixturevalue(plot_f)
cs = "spectrum"
cs_colors = [(0.0, 0.0, 0.0, 1.0),
(0.8941176470588236, 0.10196078431372549, 0.10980392156862745, 1.0),
(0.21568627450980393, 0.49411764705882355, 0.7215686274509804, 1.0),
(0.30196078431372547, 0.6862745098039216, 0.2901960784313726, 1.0),
(0.596078431372549, 0.3058823529411765, 0.6392156862745098, 1.0),
(1.0, 0.4980392156862745, 0.0, 1.0),
(0.6509803921568628, 0.33725490196078434, 0.1568627450980392, 1.0)]
colors = [(1, 0, 1, 1), (0, 1, 1, 1), (1, 1, 0, 1)]
labels = ["Foo", "Spam", "Bla"]
plot.color_scheme = cs
assert plot.color_scheme == cs
assert plot._color_series.GetColorScheme() == plot.COLOR_SCHEMES[cs]["id"]
assert np.allclose(plot.colors, cs_colors)
series_colors = [plot._from_c3ub(plot._color_series.GetColor(i)) for i in range(len(cs_colors))]
assert np.allclose(series_colors, cs_colors)
lookup_colors = [plot._lookup_table.GetTableValue(i) for i in range(len(cs_colors))]
assert np.allclose(lookup_colors, cs_colors)
assert np.allclose(plot.brush.color, cs_colors[0])
plot.colors = None
assert plot.color_scheme == plot.DEFAULT_COLOR_SCHEME
plot.colors = cs
assert plot.color_scheme == cs
plot.colors = colors
assert np.allclose(plot.colors, colors)
series_colors = [plot._from_c3ub(plot._color_series.GetColor(i)) for i in range(len(colors))]
assert np.allclose(series_colors, colors)
lookup_colors = [plot._lookup_table.GetTableValue(i) for i in range(len(colors))]
assert np.allclose(lookup_colors, colors)
assert np.allclose(plot.brush.color, colors[0])
plot.color = colors[1]
assert np.allclose(plot.color, colors[1])
assert np.allclose(plot.colors, [colors[1]])
assert np.allclose(plot.brush.color, colors[1])
plot.labels = labels
assert tuple(plot.labels) == tuple(labels)
assert plot.label == labels[0]
plot.labels = None
assert plot.labels == []
assert plot.label == ""
plot.label = labels[1]
assert tuple(plot.labels) == (labels[1],)
assert plot.label == labels[1]
plot.label = None
assert plot.labels == []
assert plot.label == ""
def test_lineplot2d(line_plot_2d):
x = [-2, -1, 0, 1, 2]
y = [4, 1, 0, -1, -4]
c = (1, 0, 1, 1)
w = 5
s = "-."
l = "Line"
# Test constructor
plot = charts.LinePlot2D(x, y, c, w, s, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.color, c)
assert plot.line_width == w
assert plot.line_style == s
assert plot.label == l
# Test remaining properties
line_plot_2d.update(x, y)
assert np.allclose(line_plot_2d.x, x)
assert np.allclose(line_plot_2d.y, y)
def test_scatterplot2d(scatter_plot_2d):
x = [-2, -1, 0, 1, 2]
y = [4, 1, 0, -1, -4]
c = (1, 0, 1, 1)
sz = 5
st, st_inv = "o", "^"
l = "Scatter"
assert st_inv not in charts.ScatterPlot2D.MARKER_STYLES, "New marker styles added? Change this test."
# Test constructor
plot = charts.ScatterPlot2D(x, y, c, sz, st, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.color, c)
assert plot.marker_size == sz
assert plot.marker_style == st
assert plot.label == l
# Test remaining properties
scatter_plot_2d.update(x, y)
assert np.allclose(scatter_plot_2d.x, x)
assert np.allclose(scatter_plot_2d.y, y)
scatter_plot_2d.marker_size = sz
assert scatter_plot_2d.marker_size == sz
assert scatter_plot_2d.GetMarkerSize() == sz
scatter_plot_2d.marker_style = None
assert scatter_plot_2d.marker_style == ""
scatter_plot_2d.marker_style = st
assert scatter_plot_2d.marker_style == st
assert scatter_plot_2d.GetMarkerStyle() == scatter_plot_2d.MARKER_STYLES[st]["id"]
with pytest.raises(ValueError):
scatter_plot_2d.marker_style = st_inv
def test_areaplot(area_plot):
x = [-2, -1, 0, 1, 2]
y1 = [4, 1, 0, -1, -4]
y2 = [-4, -2, 0, 2, 4]
c = (1, 0, 1, 1)
l = "Line"
# Test constructor
plot = charts.AreaPlot(x, y1, y2, c, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y1, y1)
assert np.allclose(plot.y2, y2)
assert np.allclose(plot.color, c)
assert plot.label == l
# Test remaining properties
area_plot.update(x, y1, y2)
assert np.allclose(area_plot.x, x)
assert np.allclose(area_plot.y1, y1)
assert np.allclose(area_plot.y2, y2)
def test_barplot(bar_plot):
x = [0, 1, 2]
y = [[1, 2, 3], [2, 1, 0], [1, 1, 1]]
c = [(1, 0, 1, 1), (1, 1, 0, 1), (0, 1, 1, 1)]
ori, ori_inv = "H", "I"
l = ["Foo", "Spam", "Bla"]
assert ori_inv not in charts.BarPlot.ORIENTATIONS, "New orientations added? Change this test."
# Test multi comp constructor
plot = charts.BarPlot(x, y, c, ori, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.colors, c)
assert plot.orientation == ori
assert plot.labels == l
# Test single comp constructor
plot = charts.BarPlot(x, y[0], c[0], ori, l[0])
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y[0])
assert np.allclose(plot.color, c[0])
assert plot.orientation == ori
assert plot.label == l[0]
# Test multi and single comp constructors with inconsistent arguments
with pytest.raises(ValueError):
charts.BarPlot(x, y, c[0], ori, l)
# charts.BarPlot(x, y, c, off, ori, l[0]) # This one is valid
with pytest.raises(ValueError):
charts.BarPlot(x, y[0], c, ori, l[0])
with pytest.raises(ValueError):
charts.BarPlot(x, y[0], c[0], ori, l)
# Test remaining properties
bar_plot.update(x, y)
assert np.allclose(bar_plot.x, x)
assert np.allclose(bar_plot.y, y)
bar_plot.orientation = ori
assert bar_plot.orientation == ori
assert bar_plot.GetOrientation() == bar_plot.ORIENTATIONS[ori]
with pytest.raises(ValueError):
bar_plot.orientation = ori_inv
def test_stackplot(stack_plot):
x = [0, 1, 2]
ys = [[1, 2, 3], [2, 1, 0], [1, 1, 1]]
c = [(1, 0, 1, 1), (1, 1, 0, 1), (0, 1, 1, 1)]
l = ["Foo", "Spam", "Bla"]
# Test multi comp constructor
plot = charts.StackPlot(x, ys, c, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.ys, ys)
assert np.allclose(plot.colors, c)
assert plot.labels == l
# Test single comp constructor
plot = charts.StackPlot(x, ys[0], c[0], l[0])
assert np.allclose(plot.x, x)
assert np.allclose(plot.ys, ys[0])
assert np.allclose(plot.color, c[0])
assert plot.label == l[0]
# Test multi and single comp constructors with inconsistent arguments
with pytest.raises(ValueError):
charts.StackPlot(x, ys, c[0], l)
# charts.StackPlot(x, ys, c, l[0]) # This one is valid
with pytest.raises(ValueError):
charts.StackPlot(x, ys[0], c, l[0])
with pytest.raises(ValueError):
charts.StackPlot(x, ys[0], c[0], l)
# Test remaining properties
stack_plot.update(x, ys)
assert np.allclose(stack_plot.x, x)
assert np.allclose(stack_plot.ys, ys)
@skip_no_plotting
def test_chart_2d(pl, chart_2d):
size = (0.5, 0.5)
loc = (0.25, 0.25)
lx = "X label"
ly = "Y label"
rx = [0, 5]
ry = [0, 1]
x = np.arange(11)-5
y = x**2
ys = [np.sin(x), np.cos(x), np.tanh(x)]
col = (1, 0, 1, 1)
cs = "citrus"
sz = 5
ms = "d"
w = 10
ls = "-."
ori = "V"
# Test constructor
chart = pyvista.Chart2D(size, loc, lx, ly, False)
assert chart.size == size
assert chart.loc == loc
assert chart.x_label == lx
assert chart.y_label == ly
assert not chart.grid
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (loc[0]*r_w, loc[1]*r_h, size[0]*r_w, size[1]*r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (loc[0]*r_w/2, loc[1]*r_h/2, size[0]*r_w/2, size[1]*r_h/2))
# Test parse_format
colors = itertools.chain(pyvista.hexcolors, pyvista.color_char_to_word, ["#fa09b6", ""])
for m in charts.ScatterPlot2D.MARKER_STYLES:
for l in charts.Pen.LINE_STYLES:
for c in colors:
cp = "b" if c == "" else c
assert (m, l, cp) == chart_2d._parse_format(m + l + c)
assert (m, l, cp) == chart_2d._parse_format(m + c + l)
assert (m, l, cp) == chart_2d._parse_format(l + m + c)
assert (m, l, cp) == chart_2d._parse_format(l + c + m)
assert (m, l, cp) == chart_2d._parse_format(c + m + l)
assert (m, l, cp) == chart_2d._parse_format(c + l + m)
# Test plotting methods
s, l = chart_2d.plot(x, y, "")
assert s is None and l is None
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(y, "-")
assert s is None and l is not None
assert l in chart_2d.plots("line")
chart_2d.remove_plot(l)
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(y, "x")
assert s is not None and l is None
assert s in chart_2d.plots("scatter")
chart_2d.clear("scatter")
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(x, y, "x-")
assert s is not None and l is not None
assert s in chart_2d.plots("scatter") and l in chart_2d.plots("line")
chart_2d.plot(x, y, "x-") # Check clearing of multiple plots (of the same type)
chart_2d.clear()
assert len([*chart_2d.plots()]) == 0
s = chart_2d.scatter(x, y, col, sz, ms, lx)
assert np.allclose(s.x, x)
assert np.allclose(s.y, y)
assert np.allclose(s.color, col)
assert s.marker_size == sz
assert s.marker_style == ms
assert s.label == lx
assert s in chart_2d.plots("scatter")
assert chart_2d.GetPlotIndex(s) >= 0
l = chart_2d.line(x, y, col, w, ls, lx)
assert np.allclose(l.x, x)
assert np.allclose(l.y, y)
assert np.allclose(l.color, col)
assert l.line_width == w
assert l.line_style == ls
assert l.label == lx
assert l in chart_2d.plots("line")
assert chart_2d.GetPlotIndex(l) >= 0
a = chart_2d.area(x, -y, y, col, lx)
assert np.allclose(a.x, x)
assert np.allclose(a.y1, -y)
assert np.allclose(a.y2, y)
assert np.allclose(a.color, col)
assert a.label == lx
assert a in chart_2d.plots("area")
assert chart_2d.GetPlotIndex(a) >= 0
b = chart_2d.bar(x, -y, col, ori, lx)
assert np.allclose(b.x, x)
assert np.allclose(b.y, -y)
assert np.allclose(b.color, col)
assert b.orientation == ori
assert b.label == lx
assert b in chart_2d.plots("bar")
assert chart_2d.GetPlotIndex(b) >= 0
s = chart_2d.stack(x, ys, cs, [lx, ly])
assert np.allclose(s.x, x)
assert np.allclose(s.ys, ys)
assert s.color_scheme == cs
assert tuple(s.labels) == (lx, ly)
assert s in chart_2d.plots("stack")
assert chart_2d.GetPlotIndex(s) >= 0
inv_type = "blub"
with pytest.raises(KeyError):
next(chart_2d.plots(inv_type))
with pytest.raises(KeyError):
chart_2d.clear(inv_type)
assert len([*chart_2d.plots()]) == 5
chart_2d.clear()
assert len([*chart_2d.plots()]) == 0
with pytest.raises(ValueError):
chart_2d.remove_plot(s)
# Check remaining properties
assert chart_2d.x_axis.__this__ == chart_2d.GetAxis(charts.Axis.BOTTOM).__this__
assert chart_2d.y_axis.__this__ == chart_2d.GetAxis(charts.Axis.LEFT).__this__
chart_2d.x_label = lx
assert chart_2d.x_label == lx
assert chart_2d.x_axis.label == lx
chart_2d.y_label = ly
assert chart_2d.y_label == ly
assert chart_2d.y_axis.label == ly
chart_2d.x_range = rx
assert np.allclose(chart_2d.x_range, rx)
assert np.allclose(chart_2d.x_axis.range, rx)
chart_2d.y_range = ry
assert np.allclose(chart_2d.y_range, ry)
assert np.allclose(chart_2d.y_axis.range, ry)
chart_2d.grid = True
assert chart_2d.grid
assert chart_2d.x_axis.grid and chart_2d.y_axis.grid
chart_2d.hide_axes()
for axis in (chart_2d.x_axis, chart_2d.y_axis):
assert not (axis.visible or axis.label_visible or axis.ticks_visible or axis.tick_labels_visible or axis.grid)
@skip_no_plotting
def test_chart_box(pl, chart_box, box_plot):
data = [[0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 5, 5, 6]]
stats = [np.quantile(d, [0.0, 0.25, 0.5, 0.75, 1.0]) for d in data]
cs = "wild_flower"
ls = ["Datalabel"]
# Test constructor
chart = pyvista.ChartBox(data, cs, ls)
assert np.allclose(chart.plot.data, data)
assert chart.plot.color_scheme == cs
assert tuple(chart.plot.labels) == tuple(ls)
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (0, 0, r_w, r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (0, 0, r_w/2, r_h/2))
# Test remaining properties
assert chart_box.loc == (0, 0)
assert chart_box.size == (1, 1)
assert chart_box.plot.__this__ == chart_box.GetPlot(0).__this__
box_plot.update(data)
assert np.allclose(box_plot.data, data)
assert np.allclose(box_plot.stats, stats)
@skip_no_plotting
def test_chart_pie(pl, chart_pie, pie_plot):
data = [3, 4, 5]
cs = "wild_flower"
ls = ["Tic", "Tac", "Toe"]
# Test constructor
chart = pyvista.ChartPie(data, cs, ls)
assert np.allclose(chart.plot.data, data)
assert chart.plot.color_scheme == cs
assert tuple(chart.plot.labels) == tuple(ls)
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (0, 0, r_w, r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (0, 0, r_w/2, r_h/2))
# Test remaining properties
assert chart_pie.loc == (0, 0)
assert chart_pie.size == (1, 1)
assert chart_pie.plot.__this__ == chart_pie.GetPlot(0).__this__
pie_plot.update(data)
assert np.allclose(pie_plot.data, data)
@skip_no_plotting
def test_chart_mpl(pl, chart_mpl):
size = (0.5, 0.5)
loc = (0.25, 0.25)
# Test constructor
f, ax = plt.subplots()
chart = pyvista.ChartMPL(f, size, loc)
assert chart.size == size
assert chart.loc == loc
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (loc[0]*r_w, loc[1]*r_h, size[0]*r_w, size[1]*r_h))
assert np.allclose(chart.position, (loc[0]*r_w, loc[1]*r_h))
assert np.allclose(chart._canvas.get_width_height(), (size[0]*r_w, size[1]*r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (loc[0]*r_w/2, loc[1]*r_h/2, size[0]*r_w/2, size[1]*r_h/2))
assert np.allclose(chart.position, (loc[0]*r_w/2, loc[1]*r_h/2))
assert np.allclose(chart._canvas.get_width_height(), (size[0]*r_w/2, size[1]*r_h/2))
# test set position throw
with pytest.raises(ValueError, match="must be length 2"):
chart.position = (1, 2, 3)
@skip_no_plotting
def test_charts(pl):
win_size = pl.window_size
top_left = pyvista.Chart2D(size=(0.5, 0.5), loc=(0, 0.5))
bottom_right = pyvista.Chart2D(size=(0.5, 0.5), loc=(0.5, 0))
# Test add_chart
pl.add_chart(top_left)
assert pl.renderers[0].__this__ == top_left._renderer.__this__
assert pl.renderers[0]._charts._scene.__this__ == top_left._scene.__this__
pl.add_chart(bottom_right)
assert len(pl.renderers[0]._charts) == 2
# Test toggle_interaction
pl.show(auto_close=False) # We need to plot once to let the charts compute their true geometry
assert not top_left.GetInteractive()
assert not bottom_right.GetInteractive()
assert pl.renderers[0]._charts.toggle_interaction((0.75*win_size[0], 0.25*win_size[1])) is bottom_right._scene
assert not top_left.GetInteractive()
assert bottom_right.GetInteractive()
assert pl.renderers[0]._charts.toggle_interaction((0, 0)) is None
assert not top_left.GetInteractive()
assert not bottom_right.GetInteractive()
# Test remove_chart
pl.remove_chart(1)
assert len(pl.renderers[0]._charts) == 1
assert pl.renderers[0]._charts[0] == top_left
assert top_left in pl.renderers[0]._charts
pl.remove_chart(top_left)
assert len(pl.renderers[0]._charts) == 0
# Test deep_clean
pl.add_chart(top_left, bottom_right)
pl.deep_clean()
assert len(pl.renderers[0]._charts) == 0
assert pl.renderers[0]._charts._scene is None
@skip_no_plotting
def test_iren_context_style(pl):
chart = pyvista.Chart2D(size=(0.5, 0.5), loc=(0.5, 0.5))
win_size = pl.window_size
pl.add_chart(chart)
pl.show(auto_close=False) # We need to plot once to let the charts compute their true geometry
style = pl.iren._style
style_class = pl.iren._style_class
# Simulate right click on the chart:
pl.iren._mouse_right_button_press(int(0.75*win_size[0]), int(0.75*win_size[1]))
assert chart.GetInteractive()
assert pl.iren._style == "Context"
assert pl.iren._style_class == pl.iren._context_style
assert pl.iren._context_style.GetScene().__this__ == chart._scene.__this__
# Simulate right click outside the chart:
pl.iren._mouse_right_button_press(0, 0)
assert not chart.GetInteractive()
assert pl.iren._style == style
assert pl.iren._style_class == style_class
assert pl.iren._context_style.GetScene() is None
def test_get_background_texture(chart_2d):
t_puppy = examples.download_puppy_texture()
chart_2d
chart_2d.background_texture = t_puppy
assert chart_2d.background_texture == t_puppy
| [
"numpy.allclose",
"numpy.isclose",
"numpy.sin",
"numpy.arange",
"pytest.mark.parametrize",
"pyvista.Chart2D",
"pyvista.plotting.charts.BarPlot",
"pyvista.plotting.charts.Axis",
"pyvista.Plotter",
"pyvista._vtk.vtkPen",
"pyvista.examples.download_masonry_texture",
"pyvista.plotting.charts.Stack... | [((9497, 9588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chart_f"""', "('chart_2d', 'chart_box', 'chart_pie', 'chart_mpl')"], {}), "('chart_f', ('chart_2d', 'chart_box', 'chart_pie',\n 'chart_mpl'))\n", (9520, 9588), False, 'import pytest\n'), ((11874, 12011), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""plot_f"""', "('line_plot_2d', 'scatter_plot_2d', 'area_plot', 'bar_plot', 'stack_plot',\n 'box_plot', 'pie_plot')"], {}), "('plot_f', ('line_plot_2d', 'scatter_plot_2d',\n 'area_plot', 'bar_plot', 'stack_plot', 'box_plot', 'pie_plot'))\n", (11897, 12011), False, 'import pytest\n'), ((12835, 12924), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""plot_f"""', "('bar_plot', 'stack_plot', 'box_plot', 'pie_plot')"], {}), "('plot_f', ('bar_plot', 'stack_plot', 'box_plot',\n 'pie_plot'))\n", (12858, 12924), False, 'import pytest\n'), ((1028, 1067), 'pyvista.Plotter', 'pyvista.Plotter', ([], {'window_size': '(600, 600)'}), '(window_size=(600, 600))\n', (1043, 1067), False, 'import pyvista\n'), ((1155, 1172), 'pyvista.Chart2D', 'pyvista.Chart2D', ([], {}), '()\n', (1170, 1172), False, 'import pyvista\n'), ((1219, 1248), 'pyvista.ChartBox', 'pyvista.ChartBox', (['[[1, 2, 3]]'], {}), '([[1, 2, 3]])\n', (1235, 1248), False, 'import pyvista\n'), ((1295, 1322), 'pyvista.ChartPie', 'pyvista.ChartPie', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1311, 1322), False, 'import pyvista\n'), ((1370, 1384), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1382, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1449), 'pyvista.ChartMPL', 'pyvista.ChartMPL', (['f'], {}), '(f)\n', (1446, 1449), False, 'import pyvista\n'), ((2357, 2408), 'pyvista.plotting.charts.Pen', 'charts.Pen', ([], {'color': 'c_red', 'width': 'w_thin', 'style': 's_dash'}), '(color=c_red, width=w_thin, style=s_dash)\n', (2367, 2408), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((2420, 2449), 'numpy.allclose', 'np.allclose', (['pen.color', 'c_red'], {}), '(pen.color, c_red)\n', (2431, 2449), True, 'import numpy as np\n'), ((2461, 2490), 'numpy.isclose', 'np.isclose', (['pen.width', 'w_thin'], {}), '(pen.width, w_thin)\n', (2471, 2490), True, 'import numpy as np\n'), ((2673, 2703), 'numpy.allclose', 'np.allclose', (['pen.color', 'c_blue'], {}), '(pen.color, c_blue)\n', (2684, 2703), True, 'import numpy as np\n'), ((2715, 2741), 'numpy.allclose', 'np.allclose', (['color', 'c_blue'], {}), '(color, c_blue)\n', (2726, 2741), True, 'import numpy as np\n'), ((2778, 2808), 'numpy.isclose', 'np.isclose', (['pen.width', 'w_thick'], {}), '(pen.width, w_thick)\n', (2788, 2808), True, 'import numpy as np\n'), ((3127, 3148), 'pyvista._vtk.vtkPen', 'pyvista._vtk.vtkPen', ([], {}), '()\n', (3146, 3148), False, 'import pyvista\n'), ((3166, 3190), 'pyvista.plotting.charts.Pen', 'charts.Pen', ([], {'_wrap': 'vtkPen'}), '(_wrap=vtkPen)\n', (3176, 3190), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((3499, 3534), 'pyvista.examples.download_masonry_texture', 'examples.download_masonry_texture', ([], {}), '()\n', (3532, 3534), False, 'from pyvista import examples\n'), ((3549, 3582), 'pyvista.examples.download_puppy_texture', 'examples.download_puppy_texture', ([], {}), '()\n', (3580, 3582), False, 'from pyvista import examples\n'), ((3629, 3673), 'pyvista.plotting.charts.Brush', 'charts.Brush', ([], {'color': 'c_red', 'texture': 't_masonry'}), '(color=c_red, texture=t_masonry)\n', (3641, 3673), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((3685, 3716), 'numpy.allclose', 'np.allclose', (['brush.color', 'c_red'], {}), '(brush.color, c_red)\n', (3696, 3716), True, 'import numpy as np\n'), ((3907, 3939), 'numpy.allclose', 'np.allclose', (['brush.color', 'c_blue'], {}), '(brush.color, c_blue)\n', (3918, 3939), True, 'import numpy as np\n'), ((3951, 3977), 'numpy.allclose', 'np.allclose', (['color', 'c_blue'], {}), '(color, c_blue)\n', (3962, 3977), True, 'import numpy as np\n'), ((4743, 4787), 'pyvista.plotting.charts.Axis', 'charts.Axis', ([], {'label': 'l', 'range': 'r_fix', 'grid': '(True)'}), '(label=l, range=r_fix, grid=True)\n', (4754, 4787), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((5361, 5391), 'numpy.allclose', 'np.allclose', (['axis.range', 'r_fix'], {}), '(axis.range, r_fix)\n', (5372, 5391), True, 'import numpy as np\n'), ((5403, 5424), 'numpy.allclose', 'np.allclose', (['r', 'r_fix'], {}), '(r, r_fix)\n', (5414, 5424), True, 'import numpy as np\n'), ((7302, 7341), 'numpy.allclose', 'np.allclose', (['axis.tick_locations', 'tlocs'], {}), '(axis.tick_locations, tlocs)\n', (7313, 7341), True, 'import numpy as np\n'), ((8360, 8400), 'numpy.allclose', 'np.allclose', (['axis.tick_locations', 'tlocs0'], {}), '(axis.tick_locations, tlocs0)\n', (8371, 8400), True, 'import numpy as np\n'), ((11279, 11317), 'numpy.allclose', 'np.allclose', (['chart.border_color', 'c_red'], {}), '(chart.border_color, c_red)\n', (11290, 11317), True, 'import numpy as np\n'), ((11493, 11536), 'numpy.allclose', 'np.allclose', (['chart.background_color', 'c_blue'], {}), '(chart.background_color, c_blue)\n', (11504, 11536), True, 'import numpy as np\n'), ((12224, 12250), 'numpy.allclose', 'np.allclose', (['plot.color', 'c'], {}), '(plot.color, c)\n', (12235, 12250), True, 'import numpy as np\n'), ((12262, 12294), 'numpy.allclose', 'np.allclose', (['plot.brush.color', 'c'], {}), '(plot.brush.color, c)\n', (12273, 12294), True, 'import numpy as np\n'), ((13849, 13884), 'numpy.allclose', 'np.allclose', (['plot.colors', 'cs_colors'], {}), '(plot.colors, cs_colors)\n', (13860, 13884), True, 'import numpy as np\n'), ((13997, 14034), 'numpy.allclose', 'np.allclose', (['series_colors', 'cs_colors'], {}), '(series_colors, cs_colors)\n', (14008, 14034), True, 'import numpy as np\n'), ((14135, 14172), 'numpy.allclose', 'np.allclose', (['lookup_colors', 'cs_colors'], {}), '(lookup_colors, cs_colors)\n', (14146, 14172), True, 'import numpy as np\n'), ((14184, 14227), 'numpy.allclose', 'np.allclose', (['plot.brush.color', 'cs_colors[0]'], {}), '(plot.brush.color, cs_colors[0])\n', (14195, 14227), True, 'import numpy as np\n'), ((14402, 14434), 'numpy.allclose', 'np.allclose', (['plot.colors', 'colors'], {}), '(plot.colors, colors)\n', (14413, 14434), True, 'import numpy as np\n'), ((14544, 14578), 'numpy.allclose', 'np.allclose', (['series_colors', 'colors'], {}), '(series_colors, colors)\n', (14555, 14578), True, 'import numpy as np\n'), ((14676, 14710), 'numpy.allclose', 'np.allclose', (['lookup_colors', 'colors'], {}), '(lookup_colors, colors)\n', (14687, 14710), True, 'import numpy as np\n'), ((14722, 14762), 'numpy.allclose', 'np.allclose', (['plot.brush.color', 'colors[0]'], {}), '(plot.brush.color, colors[0])\n', (14733, 14762), True, 'import numpy as np\n'), ((14802, 14836), 'numpy.allclose', 'np.allclose', (['plot.color', 'colors[1]'], {}), '(plot.color, colors[1])\n', (14813, 14836), True, 'import numpy as np\n'), ((14848, 14885), 'numpy.allclose', 'np.allclose', (['plot.colors', '[colors[1]]'], {}), '(plot.colors, [colors[1]])\n', (14859, 14885), True, 'import numpy as np\n'), ((14897, 14937), 'numpy.allclose', 'np.allclose', (['plot.brush.color', 'colors[1]'], {}), '(plot.brush.color, colors[1])\n', (14908, 14937), True, 'import numpy as np\n'), ((15497, 15532), 'pyvista.plotting.charts.LinePlot2D', 'charts.LinePlot2D', (['x', 'y', 'c', 'w', 's', 'l'], {}), '(x, y, c, w, s, l)\n', (15514, 15532), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((15544, 15566), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (15555, 15566), True, 'import numpy as np\n'), ((15578, 15600), 'numpy.allclose', 'np.allclose', (['plot.y', 'y'], {}), '(plot.y, y)\n', (15589, 15600), True, 'import numpy as np\n'), ((15612, 15638), 'numpy.allclose', 'np.allclose', (['plot.color', 'c'], {}), '(plot.color, c)\n', (15623, 15638), True, 'import numpy as np\n'), ((15804, 15834), 'numpy.allclose', 'np.allclose', (['line_plot_2d.x', 'x'], {}), '(line_plot_2d.x, x)\n', (15815, 15834), True, 'import numpy as np\n'), ((15846, 15876), 'numpy.allclose', 'np.allclose', (['line_plot_2d.y', 'y'], {}), '(line_plot_2d.y, y)\n', (15857, 15876), True, 'import numpy as np\n'), ((16189, 16229), 'pyvista.plotting.charts.ScatterPlot2D', 'charts.ScatterPlot2D', (['x', 'y', 'c', 'sz', 'st', 'l'], {}), '(x, y, c, sz, st, l)\n', (16209, 16229), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((16241, 16263), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (16252, 16263), True, 'import numpy as np\n'), ((16275, 16297), 'numpy.allclose', 'np.allclose', (['plot.y', 'y'], {}), '(plot.y, y)\n', (16286, 16297), True, 'import numpy as np\n'), ((16309, 16335), 'numpy.allclose', 'np.allclose', (['plot.color', 'c'], {}), '(plot.color, c)\n', (16320, 16335), True, 'import numpy as np\n'), ((16509, 16542), 'numpy.allclose', 'np.allclose', (['scatter_plot_2d.x', 'x'], {}), '(scatter_plot_2d.x, x)\n', (16520, 16542), True, 'import numpy as np\n'), ((16554, 16587), 'numpy.allclose', 'np.allclose', (['scatter_plot_2d.y', 'y'], {}), '(scatter_plot_2d.y, y)\n', (16565, 16587), True, 'import numpy as np\n'), ((17243, 17275), 'pyvista.plotting.charts.AreaPlot', 'charts.AreaPlot', (['x', 'y1', 'y2', 'c', 'l'], {}), '(x, y1, y2, c, l)\n', (17258, 17275), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((17287, 17309), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (17298, 17309), True, 'import numpy as np\n'), ((17321, 17345), 'numpy.allclose', 'np.allclose', (['plot.y1', 'y1'], {}), '(plot.y1, y1)\n', (17332, 17345), True, 'import numpy as np\n'), ((17357, 17381), 'numpy.allclose', 'np.allclose', (['plot.y2', 'y2'], {}), '(plot.y2, y2)\n', (17368, 17381), True, 'import numpy as np\n'), ((17393, 17419), 'numpy.allclose', 'np.allclose', (['plot.color', 'c'], {}), '(plot.color, c)\n', (17404, 17419), True, 'import numpy as np\n'), ((17523, 17550), 'numpy.allclose', 'np.allclose', (['area_plot.x', 'x'], {}), '(area_plot.x, x)\n', (17534, 17550), True, 'import numpy as np\n'), ((17562, 17591), 'numpy.allclose', 'np.allclose', (['area_plot.y1', 'y1'], {}), '(area_plot.y1, y1)\n', (17573, 17591), True, 'import numpy as np\n'), ((17603, 17632), 'numpy.allclose', 'np.allclose', (['area_plot.y2', 'y2'], {}), '(area_plot.y2, y2)\n', (17614, 17632), True, 'import numpy as np\n'), ((17978, 18009), 'pyvista.plotting.charts.BarPlot', 'charts.BarPlot', (['x', 'y', 'c', 'ori', 'l'], {}), '(x, y, c, ori, l)\n', (17992, 18009), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((18021, 18043), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (18032, 18043), True, 'import numpy as np\n'), ((18055, 18077), 'numpy.allclose', 'np.allclose', (['plot.y', 'y'], {}), '(plot.y, y)\n', (18066, 18077), True, 'import numpy as np\n'), ((18089, 18116), 'numpy.allclose', 'np.allclose', (['plot.colors', 'c'], {}), '(plot.colors, c)\n', (18100, 18116), True, 'import numpy as np\n'), ((18227, 18267), 'pyvista.plotting.charts.BarPlot', 'charts.BarPlot', (['x', 'y[0]', 'c[0]', 'ori', 'l[0]'], {}), '(x, y[0], c[0], ori, l[0])\n', (18241, 18267), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((18279, 18301), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (18290, 18301), True, 'import numpy as np\n'), ((18313, 18338), 'numpy.allclose', 'np.allclose', (['plot.y', 'y[0]'], {}), '(plot.y, y[0])\n', (18324, 18338), True, 'import numpy as np\n'), ((18350, 18379), 'numpy.allclose', 'np.allclose', (['plot.color', 'c[0]'], {}), '(plot.color, c[0])\n', (18361, 18379), True, 'import numpy as np\n'), ((18900, 18926), 'numpy.allclose', 'np.allclose', (['bar_plot.x', 'x'], {}), '(bar_plot.x, x)\n', (18911, 18926), True, 'import numpy as np\n'), ((18938, 18964), 'numpy.allclose', 'np.allclose', (['bar_plot.y', 'y'], {}), '(bar_plot.y, y)\n', (18949, 18964), True, 'import numpy as np\n'), ((19401, 19430), 'pyvista.plotting.charts.StackPlot', 'charts.StackPlot', (['x', 'ys', 'c', 'l'], {}), '(x, ys, c, l)\n', (19417, 19430), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((19442, 19464), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (19453, 19464), True, 'import numpy as np\n'), ((19476, 19500), 'numpy.allclose', 'np.allclose', (['plot.ys', 'ys'], {}), '(plot.ys, ys)\n', (19487, 19500), True, 'import numpy as np\n'), ((19512, 19539), 'numpy.allclose', 'np.allclose', (['plot.colors', 'c'], {}), '(plot.colors, c)\n', (19523, 19539), True, 'import numpy as np\n'), ((19615, 19653), 'pyvista.plotting.charts.StackPlot', 'charts.StackPlot', (['x', 'ys[0]', 'c[0]', 'l[0]'], {}), '(x, ys[0], c[0], l[0])\n', (19631, 19653), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((19665, 19687), 'numpy.allclose', 'np.allclose', (['plot.x', 'x'], {}), '(plot.x, x)\n', (19676, 19687), True, 'import numpy as np\n'), ((19699, 19726), 'numpy.allclose', 'np.allclose', (['plot.ys', 'ys[0]'], {}), '(plot.ys, ys[0])\n', (19710, 19726), True, 'import numpy as np\n'), ((19738, 19767), 'numpy.allclose', 'np.allclose', (['plot.color', 'c[0]'], {}), '(plot.color, c[0])\n', (19749, 19767), True, 'import numpy as np\n'), ((20243, 20271), 'numpy.allclose', 'np.allclose', (['stack_plot.x', 'x'], {}), '(stack_plot.x, x)\n', (20254, 20271), True, 'import numpy as np\n'), ((20283, 20313), 'numpy.allclose', 'np.allclose', (['stack_plot.ys', 'ys'], {}), '(stack_plot.ys, ys)\n', (20294, 20313), True, 'import numpy as np\n'), ((20703, 20744), 'pyvista.Chart2D', 'pyvista.Chart2D', (['size', 'loc', 'lx', 'ly', '(False)'], {}), '(size, loc, lx, ly, False)\n', (20718, 20744), False, 'import pyvista\n'), ((21031, 21124), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(loc[0] * r_w, loc[1] * r_h, size[0] * r_w, size[1] * r_h)'], {}), '(chart._geometry, (loc[0] * r_w, loc[1] * r_h, size[0] * r_w, \n size[1] * r_h))\n', (21042, 21124), True, 'import numpy as np\n'), ((21248, 21356), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(loc[0] * r_w / 2, loc[1] * r_h / 2, size[0] * r_w / 2, size[1] * r_h / 2)'], {}), '(chart._geometry, (loc[0] * r_w / 2, loc[1] * r_h / 2, size[0] *\n r_w / 2, size[1] * r_h / 2))\n', (21259, 21356), True, 'import numpy as np\n'), ((21375, 21454), 'itertools.chain', 'itertools.chain', (['pyvista.hexcolors', 'pyvista.color_char_to_word', "['#fa09b6', '']"], {}), "(pyvista.hexcolors, pyvista.color_char_to_word, ['#fa09b6', ''])\n", (21390, 21454), False, 'import itertools\n'), ((22909, 22928), 'numpy.allclose', 'np.allclose', (['s.x', 'x'], {}), '(s.x, x)\n', (22920, 22928), True, 'import numpy as np\n'), ((22940, 22959), 'numpy.allclose', 'np.allclose', (['s.y', 'y'], {}), '(s.y, y)\n', (22951, 22959), True, 'import numpy as np\n'), ((22971, 22996), 'numpy.allclose', 'np.allclose', (['s.color', 'col'], {}), '(s.color, col)\n', (22982, 22996), True, 'import numpy as np\n'), ((23224, 23243), 'numpy.allclose', 'np.allclose', (['l.x', 'x'], {}), '(l.x, x)\n', (23235, 23243), True, 'import numpy as np\n'), ((23255, 23274), 'numpy.allclose', 'np.allclose', (['l.y', 'y'], {}), '(l.y, y)\n', (23266, 23274), True, 'import numpy as np\n'), ((23286, 23311), 'numpy.allclose', 'np.allclose', (['l.color', 'col'], {}), '(l.color, col)\n', (23297, 23311), True, 'import numpy as np\n'), ((23529, 23548), 'numpy.allclose', 'np.allclose', (['a.x', 'x'], {}), '(a.x, x)\n', (23540, 23548), True, 'import numpy as np\n'), ((23560, 23581), 'numpy.allclose', 'np.allclose', (['a.y1', '(-y)'], {}), '(a.y1, -y)\n', (23571, 23581), True, 'import numpy as np\n'), ((23593, 23613), 'numpy.allclose', 'np.allclose', (['a.y2', 'y'], {}), '(a.y2, y)\n', (23604, 23613), True, 'import numpy as np\n'), ((23625, 23650), 'numpy.allclose', 'np.allclose', (['a.color', 'col'], {}), '(a.color, col)\n', (23636, 23650), True, 'import numpy as np\n'), ((23810, 23829), 'numpy.allclose', 'np.allclose', (['b.x', 'x'], {}), '(b.x, x)\n', (23821, 23829), True, 'import numpy as np\n'), ((23841, 23861), 'numpy.allclose', 'np.allclose', (['b.y', '(-y)'], {}), '(b.y, -y)\n', (23852, 23861), True, 'import numpy as np\n'), ((23873, 23898), 'numpy.allclose', 'np.allclose', (['b.color', 'col'], {}), '(b.color, col)\n', (23884, 23898), True, 'import numpy as np\n'), ((24091, 24110), 'numpy.allclose', 'np.allclose', (['s.x', 'x'], {}), '(s.x, x)\n', (24102, 24110), True, 'import numpy as np\n'), ((24122, 24143), 'numpy.allclose', 'np.allclose', (['s.ys', 'ys'], {}), '(s.ys, ys)\n', (24133, 24143), True, 'import numpy as np\n'), ((25069, 25102), 'numpy.allclose', 'np.allclose', (['chart_2d.x_range', 'rx'], {}), '(chart_2d.x_range, rx)\n', (25080, 25102), True, 'import numpy as np\n'), ((25114, 25152), 'numpy.allclose', 'np.allclose', (['chart_2d.x_axis.range', 'rx'], {}), '(chart_2d.x_axis.range, rx)\n', (25125, 25152), True, 'import numpy as np\n'), ((25190, 25223), 'numpy.allclose', 'np.allclose', (['chart_2d.y_range', 'ry'], {}), '(chart_2d.y_range, ry)\n', (25201, 25223), True, 'import numpy as np\n'), ((25235, 25273), 'numpy.allclose', 'np.allclose', (['chart_2d.y_axis.range', 'ry'], {}), '(chart_2d.y_axis.range, ry)\n', (25246, 25273), True, 'import numpy as np\n'), ((25851, 25881), 'pyvista.ChartBox', 'pyvista.ChartBox', (['data', 'cs', 'ls'], {}), '(data, cs, ls)\n', (25867, 25881), False, 'import pyvista\n'), ((25893, 25927), 'numpy.allclose', 'np.allclose', (['chart.plot.data', 'data'], {}), '(chart.plot.data, data)\n', (25904, 25927), True, 'import numpy as np\n'), ((26158, 26204), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(0, 0, r_w, r_h)'], {}), '(chart._geometry, (0, 0, r_w, r_h))\n', (26169, 26204), True, 'import numpy as np\n'), ((26341, 26395), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(0, 0, r_w / 2, r_h / 2)'], {}), '(chart._geometry, (0, 0, r_w / 2, r_h / 2))\n', (26352, 26395), True, 'import numpy as np\n'), ((26602, 26634), 'numpy.allclose', 'np.allclose', (['box_plot.data', 'data'], {}), '(box_plot.data, data)\n', (26613, 26634), True, 'import numpy as np\n'), ((26646, 26680), 'numpy.allclose', 'np.allclose', (['box_plot.stats', 'stats'], {}), '(box_plot.stats, stats)\n', (26657, 26680), True, 'import numpy as np\n'), ((26857, 26887), 'pyvista.ChartPie', 'pyvista.ChartPie', (['data', 'cs', 'ls'], {}), '(data, cs, ls)\n', (26873, 26887), False, 'import pyvista\n'), ((26899, 26933), 'numpy.allclose', 'np.allclose', (['chart.plot.data', 'data'], {}), '(chart.plot.data, data)\n', (26910, 26933), True, 'import numpy as np\n'), ((27164, 27210), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(0, 0, r_w, r_h)'], {}), '(chart._geometry, (0, 0, r_w, r_h))\n', (27175, 27210), True, 'import numpy as np\n'), ((27347, 27401), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(0, 0, r_w / 2, r_h / 2)'], {}), '(chart._geometry, (0, 0, r_w / 2, r_h / 2))\n', (27358, 27401), True, 'import numpy as np\n'), ((27608, 27640), 'numpy.allclose', 'np.allclose', (['pie_plot.data', 'data'], {}), '(pie_plot.data, data)\n', (27619, 27640), True, 'import numpy as np\n'), ((27777, 27791), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (27789, 27791), True, 'import matplotlib.pyplot as plt\n'), ((27804, 27834), 'pyvista.ChartMPL', 'pyvista.ChartMPL', (['f', 'size', 'loc'], {}), '(f, size, loc)\n', (27820, 27834), False, 'import pyvista\n'), ((28033, 28126), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(loc[0] * r_w, loc[1] * r_h, size[0] * r_w, size[1] * r_h)'], {}), '(chart._geometry, (loc[0] * r_w, loc[1] * r_h, size[0] * r_w, \n size[1] * r_h))\n', (28044, 28126), True, 'import numpy as np\n'), ((28125, 28182), 'numpy.allclose', 'np.allclose', (['chart.position', '(loc[0] * r_w, loc[1] * r_h)'], {}), '(chart.position, (loc[0] * r_w, loc[1] * r_h))\n', (28136, 28182), True, 'import numpy as np\n'), ((28400, 28508), 'numpy.allclose', 'np.allclose', (['chart._geometry', '(loc[0] * r_w / 2, loc[1] * r_h / 2, size[0] * r_w / 2, size[1] * r_h / 2)'], {}), '(chart._geometry, (loc[0] * r_w / 2, loc[1] * r_h / 2, size[0] *\n r_w / 2, size[1] * r_h / 2))\n', (28411, 28508), True, 'import numpy as np\n'), ((28500, 28565), 'numpy.allclose', 'np.allclose', (['chart.position', '(loc[0] * r_w / 2, loc[1] * r_h / 2)'], {}), '(chart.position, (loc[0] * r_w / 2, loc[1] * r_h / 2))\n', (28511, 28565), True, 'import numpy as np\n'), ((28860, 28906), 'pyvista.Chart2D', 'pyvista.Chart2D', ([], {'size': '(0.5, 0.5)', 'loc': '(0, 0.5)'}), '(size=(0.5, 0.5), loc=(0, 0.5))\n', (28875, 28906), False, 'import pyvista\n'), ((28926, 28972), 'pyvista.Chart2D', 'pyvista.Chart2D', ([], {'size': '(0.5, 0.5)', 'loc': '(0.5, 0)'}), '(size=(0.5, 0.5), loc=(0.5, 0))\n', (28941, 28972), False, 'import pyvista\n'), ((30323, 30371), 'pyvista.Chart2D', 'pyvista.Chart2D', ([], {'size': '(0.5, 0.5)', 'loc': '(0.5, 0.5)'}), '(size=(0.5, 0.5), loc=(0.5, 0.5))\n', (30338, 30371), False, 'import pyvista\n'), ((31251, 31284), 'pyvista.examples.download_puppy_texture', 'examples.download_puppy_texture', ([], {}), '()\n', (31282, 31284), False, 'from pyvista import examples\n'), ((272, 289), 'platform.system', 'platform.system', ([], {}), '()\n', (287, 289), False, 'import platform\n'), ((432, 458), 'pyvista.plotting.system_supports_plotting', 'system_supports_plotting', ([], {}), '()\n', (456, 458), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((2986, 3011), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2999, 3011), False, 'import pytest\n'), ((4826, 4856), 'numpy.allclose', 'np.allclose', (['axis.range', 'r_fix'], {}), '(axis.range, r_fix)\n', (4837, 4856), True, 'import numpy as np\n'), ((5660, 5685), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5673, 5685), False, 'import pytest\n'), ((10111, 10154), 'pytest.raises', 'pytest.raises', (['(AssertionError, ValueError)'], {}), '((AssertionError, ValueError))\n', (10124, 10154), False, 'import pytest\n'), ((10194, 10237), 'pytest.raises', 'pytest.raises', (['(AssertionError, ValueError)'], {}), '((AssertionError, ValueError))\n', (10207, 10237), False, 'import pytest\n'), ((16987, 17012), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17000, 17012), False, 'import pytest\n'), ((18529, 18554), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18542, 18554), False, 'import pytest\n'), ((18564, 18598), 'pyvista.plotting.charts.BarPlot', 'charts.BarPlot', (['x', 'y', 'c[0]', 'ori', 'l'], {}), '(x, y, c[0], ori, l)\n', (18578, 18598), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((18675, 18700), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18688, 18700), False, 'import pytest\n'), ((18710, 18747), 'pyvista.plotting.charts.BarPlot', 'charts.BarPlot', (['x', 'y[0]', 'c', 'ori', 'l[0]'], {}), '(x, y[0], c, ori, l[0])\n', (18724, 18747), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((18757, 18782), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18770, 18782), False, 'import pytest\n'), ((18792, 18829), 'pyvista.plotting.charts.BarPlot', 'charts.BarPlot', (['x', 'y[0]', 'c[0]', 'ori', 'l'], {}), '(x, y[0], c[0], ori, l)\n', (18806, 18829), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((19112, 19137), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19125, 19137), False, 'import pytest\n'), ((19882, 19907), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19895, 19907), False, 'import pytest\n'), ((19917, 19949), 'pyvista.plotting.charts.StackPlot', 'charts.StackPlot', (['x', 'ys', 'c[0]', 'l'], {}), '(x, ys, c[0], l)\n', (19933, 19949), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((20019, 20044), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20032, 20044), False, 'import pytest\n'), ((20054, 20089), 'pyvista.plotting.charts.StackPlot', 'charts.StackPlot', (['x', 'ys[0]', 'c', 'l[0]'], {}), '(x, ys[0], c, l[0])\n', (20070, 20089), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((20099, 20124), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20112, 20124), False, 'import pytest\n'), ((20134, 20169), 'pyvista.plotting.charts.StackPlot', 'charts.StackPlot', (['x', 'ys[0]', 'c[0]', 'l'], {}), '(x, ys[0], c[0], l)\n', (20150, 20169), False, 'from pyvista.plotting import charts, system_supports_plotting\n'), ((20490, 20503), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (20499, 20503), True, 'import numpy as np\n'), ((20529, 20538), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (20535, 20538), True, 'import numpy as np\n'), ((20540, 20549), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (20546, 20549), True, 'import numpy as np\n'), ((20551, 20561), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (20558, 20561), True, 'import numpy as np\n'), ((24328, 24351), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (24341, 24351), False, 'import pytest\n'), ((24401, 24424), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (24414, 24424), False, 'import pytest\n'), ((24571, 24596), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (24584, 24596), False, 'import pytest\n'), ((25710, 25753), 'numpy.quantile', 'np.quantile', (['d', '[0.0, 0.25, 0.5, 0.75, 1.0]'], {}), '(d, [0.0, 0.25, 0.5, 0.75, 1.0])\n', (25721, 25753), True, 'import numpy as np\n'), ((28687, 28738), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""must be length 2"""'}), "(ValueError, match='must be length 2')\n", (28700, 28738), False, 'import pytest\n')] |
# _ __ _ _
# /\_/\ | '__| | | |
# [===] | | | |_| |
# \./ |_| \__,_|
#
# /***************//***************//***************/
# /* statspack.py *//* <NAME> *//* www.hakkeray.com */
# /***************//***************//***************/
# ________________________
# | hakkeray | Updated: |
# | v3.0.0 | 8.12.2020 |
# ------------------------
#
# * note: USZIPCODE pypi library is required to run zip_stats()
# Using pip in the notebook:
# !pip install -U uszipcode
# fsds tool required
# !pip install -U fsds_100719
# STANDARD libraries
import pandas as pd
from pandas import Series
import numpy as np
from numpy import log
# PLOTTING
import matplotlib as mpl
import matplotlib.pyplot as plt
import IPython.display as dp
plt.style.use('seaborn-bright')
mpl.style.use('seaborn-bright')
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 24}
mpl.rc('font', **font)
import seaborn as sns
sns.set_style('whitegrid')
#ignore pink warnings
import warnings
warnings.filterwarnings('ignore')
# Allow for large # columns
pd.set_option('display.max_columns', 0)
# pd.set_option('display.max_rows','')
# import plotly.express as px
# import plotly.graph_objects as go
# STATSMODELS
import statsmodels.api as sm
import statsmodels.stats.api as sms
import statsmodels.formula.api as smf
#import statsmodels.formula.api as ols
import statsmodels.stats.multicomp
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# SCIPY
import scipy.stats as stats
from scipy.stats import normaltest as normtest # D'Agostino and Pearson's omnibus test
from collections import Counter
# SKLEARN
from sklearn.metrics import mean_squared_error as mse
from sklearn.preprocessing import RobustScaler
# ADDITIONAL LIBRARIES
#import researchpy as rp
import uszipcode
from uszipcode import SearchEngine
# HOT_STATS() function: display statistical summaries of a feature column
def hot_stats(data, column, verbose=False, t=None):
"""
Scans the values of a column within a dataframe and displays its datatype,
nulls (incl. pct of total), unique values, non-null value counts, and
statistical info (if the datatype is numeric).
---------------------------------------------
Parameters:
**args:
data: accepts dataframe
column: accepts name of column within dataframe (should be inside quotes '')
**kwargs:
verbose: (optional) accepts a boolean (default=False); verbose=True will display all
unique values found.
t: (optional) accepts column name as target to calculate correlation coefficient against
using pandas data.corr() function.
-------------
Examples:
hot_stats(df, 'str_column') --> where df = data, 'string_column' = column you want to scan
hot_stats(df, 'numeric_column', t='target') --> where 'target' = column to check correlation value
-----------------
Future:
#todo: get mode(s)
#todo: functionality for string objects
#todo: pass multiple columns at once and display all
-----------------
"""
# assigns variables to call later as shortcuts
feature = data[column]
rdash = "-------->"
ldash = "<--------"
# figure out which hot_stats to display based on dtype
if feature.dtype == 'float':
hot_stats = feature.describe().round(2)
elif feature.dtype == 'int':
hot_stats = feature.describe()
elif feature.dtype == 'object' or 'category' or 'datetime64[ns]':
hot_stats = feature.agg(['min','median','max'])
t = None # ignores corr check for non-numeric dtypes by resetting t
else:
hot_stats = None
# display statistics (returns different info depending on datatype)
print(rdash)
print("HOT!STATS")
print(ldash)
# display column name formatted with underline
print(f"\n{feature.name.upper()}")
# display the data type
print(f"Data Type: {feature.dtype}\n")
# display the mode
print(hot_stats,"\n")
print(f"à-la-Mode: \n{feature.mode()}\n")
# find nulls and display total count and percentage
if feature.isna().sum() > 0:
print(f"Found\n{feature.isna().sum()} Nulls out of {len(feature)}({round(feature.isna().sum()/len(feature)*100,2)}%)\n")
else:
print("\nNo Nulls Found!\n")
# display value counts (non-nulls)
print(f"Non-Null Value Counts:\n{feature.value_counts()}\n")
# display count of unique values
print(f"# Unique Values: {len(feature.unique())}\n")
# displays all unique values found if verbose set to true
if verbose == True:
print(f"Unique Values:\n {feature.unique()}\n")
# display correlation coefficient with target for numeric columns:
if t != None:
corr = feature.corr(data[t]).round(4)
print(f"Correlation with {t.upper()}: {corr}")
# NULL_HUNTER() function: display Null counts per column/feature
def null_hunter(data):
print(f"Columns with Null Values")
print("------------------------")
for column in data:
if data[column].isna().sum() > 0:
print(f"{data[column].name}: \n{data[column].isna().sum()} out of {len(data[column])} ({round(data[column].isna().sum()/len(data[column])*100,2)}%)\n")
# CORRCOEF_DICT() function: calculates correlation coefficients assoc. with features and stores in a dictionary
def corr_dict(data, X, y):
corr_coefs = []
for x in X:
corr = data[x].corr(data[y])
corr_coefs.append(corr)
corr_dict = {}
for x, c in zip(X, corr_coefs):
corr_dict[x] = c
return corr_dict
# SUB_SCATTER() function: pass list of features (x_cols) and compare against target (or another feature)
def sub_scatter(data, x_cols, y, color=None, nrows=None, ncols=None):
"""
Desc: displays set of scatterplots for multiple columns or features of a dataframe.
pass in list of column names (x_cols) to plot against y-target (or another feature for
multicollinearity analysis)
args: data, x_cols, y
kwargs: color (default is magenta (#C839C5))
example:
x_cols = ['col1', 'col2', 'col3']
y = 'col4'
sub_scatter(df, x_cols, y)
example with color kwarg:
sub_scatter(df, x_cols, y, color=#)
alternatively you can pass the column list and target directly:
sub_scatter(df, ['col1', 'col2', 'col3'], 'price')
"""
if nrows == None:
nrows = 1
if ncols == None:
ncols = 3
if color == None:
color = '#C839C5'
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16,4))
for x_col, ax in zip(x_cols, axes):
data.plot(kind='scatter', x=x_col, y=y, ax=ax, color=color)
ax.set_title(x_col.capitalize() + " vs. " + y.capitalize())
# SUB_HISTS() function: plot histogram subplots
def sub_hists(data):
plt.style.use('seaborn-bright')
for column in data.describe():
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(121)
ax.hist(data[column], density=True, label = column+' histogram', bins=20)
ax.set_title(column.capitalize())
ax.legend()
fig.tight_layout()
# --------- ZIP_STATS() --------- #
def zip_stats(zipcodes,
minimum=0, maximum=5000000,
simple=True):
"""
Lookup median home values for zipcodes or return zip codes of a min and max median home value
#TODO: add input options for city state county
#TODO: add input options for other keywords besides median home val
*Prerequisites: USZIPCODE() pypi package is a required dependency
**ARGS
zipcodes: dataframe or array of strings (zipcodes)
> Example1: zipcodes=df[zipcode']
> Example2: zipcodes=['01267','90025']
minimum: integer for dollar amount min threshold (default is 0)
maximum: integer for dollar amount max threshold (default is 5000000, i.e. no maximum)
**KWARGS
simple: default=True
> set simple_zipcode=False to use rich info database (will only apply once TODOs above are added)
"""
# pypi package for retrieving information based on us zipcodes
import uszipcode
from uszipcode import SearchEngine
# set simple_zipcode=False to use rich info database
if simple:
search = SearchEngine(simple_zipcode=True)
else:
search = SearchEngine(simple_zipcode=False)
# create empty dictionary
dzip = {}
# search pypi uszipcode library to retrieve data for each zipcode
for code in zipcodes:
z = search.by_zipcode(code)
dzip[code] = z.to_dict()
keyword='median_home_value'
# # pull just the median home values from dataset and append to list
# create empty lists for keys and vals
keys = []
zips = []
for index in dzip:
keys.append(dzip[index][keyword])
# put zipcodes in other list
for index in dzip:
zips.append(dzip[index]['zipcode'])
# zip both lists into dictionary
zipkey = dict(zip(zips, keys))
zipvals = {}
for k,v in zipkey.items():
if v > minimum and v < maximum:
zipvals[k]=v
return zipvals
"""
>>>>>>>>>>>>>>>>>> TIME SERIES <<<<<<<<<<<<<<<<<<<<<<
* makeTime()
* checkTime()
* mapTime()
"""
def makeTime(data, idx):
"""
Converts a column (`idx`) to datetime formatted index for a dataframe (`data`)
Returns copy of original dataframe
new_df = makeTime(df_original, 'DateTime')
"""
df = data.copy()
df[idx] = pd.to_datetime(df[idx], errors='coerce')
df['DateTime'] = df[idx].copy()
df.set_index(idx, inplace=True, drop=True)
return df
def melt_data(df): # from flatiron starter notebook
melted = pd.melt(df, id_vars=['RegionID','RegionName', 'City', 'State', 'Metro', 'CountyName',
'SizeRank'], var_name='Month', value_name='MeanValue')
melted['Month'] = pd.to_datetime(melted['Month'], format='%Y-%m')
melted = melted.dropna(subset=['MeanValue'])
return melted
def cityzip_dicts(df, col1, col2):
"""
Creates 3 dictionaries:
# dc1 : Dictionary of cities and zipcodes for quick referencing
# dc2: Dictionary of dataframes for each zipcode.
# city_zip: dictionary of zipcodes for each city
dc1 key: zipcodes
dc2 key: cities
city_zip key: city name
Returns dc1, dc2, city_zip
Ex:
NYC, nyc, city_zip = cityzip_dictionaries(df=NY, col1='RegionName', col2='City')
# dc1: returns dataframe for a given zipcode, or dict values of given column
NYC[10549] --> dataframe
NYC[10549]['MeanValue'] --> dict
# dc2: return dataframe for a given city, or just zipcodes for a given city:
nyc['New Rochelle'] --> dataframe
nyc['New Rochelle']['RegionName'].unique() --> dict of zip codes
# city_zip: returns dict of all zip codes in a city
city_zip['Yonkers']
"""
dc1 = {}
dc2 = {}
for zipcode in df[col1].unique():
dc1[zipcode] = df.groupby(col1).get_group(zipcode).resample('MS').asfreq()
for city in df[col2].unique():
dc2[city] = df.groupby(col2).get_group(city)
# create reference dict of city and zipcode matches
#zipcodes, cities in westchester
zips = df.RegionName.unique() #cities
cities = df.City.unique()
print("# ZIP CODES: ", len(zips))
print("# CITIES: ", len(cities))
city_zip = {}
for city in cities:
c = str(f'{city}')
city = df.loc[df['City'] == city]
zc = list(city['RegionName'].unique())
city_zip[c] = zc
return dc1, dc2, city_zip
def time_dict(d, xcol='RegionName', ycol='MeanValue'):
# zipcodes to plot
zipcodes = list(d.keys())
# create empty dictionary for plotting
txd = {}
for i,zc in enumerate(zipcodes):
# store each zipcode as ts
ts = d[zc][ycol].rename(zc)
txd[zc] = ts
return txd
def mapTime(d, xcol, ycol='MeanValue', X=None, vlines=None, MEAN=True):
"""
Draws a timeseries 'map' of zipcodes and their mean values.
fig,ax = mapTime(d=HUDSON, xcol='RegionName', ycol='MeanValue', MEAN=True, vlines=None)
**ARGS
d: takes a dictionary of dataframes OR a single dataframe
xcol: column in dataframe containing x-axis values (ex: zipcode)
ycol: column in dataframe containing y-axis values (ex: price)
X: list of x values to plot on x-axis (defaults to all x in d if empty)
**kw_args
mean: plots the mean of X (default=True)
vlines : default is None: shows MIN_, MAX_, crash
*Ex1: `d` = dataframe
mapTime(d=NY, xcol='RegionName', ycol='MeanValue', X=list_of_zips)
*Ex2: `d` = dictionary of dataframes
mapTime(d=NYC, xcol='RegionName', y='MeanValue')
"""
import matplotlib as mpl
mpl.rc('font', **font)
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 24}
#mpl.rc('font', **font)
# create figure for timeseries plot
fig, ax = plt.subplots(figsize=(21,13))
plt.title(label=f'Time Series Plot: {str(ycol)}')
ax.set(title='Mean Home Values', xlabel='Year', ylabel='Price($)', font_dict=font)
zipcodes = []
#check if `d` is dataframe or dictionary
if type(d) == pd.core.frame.DataFrame:
# if X is empty, create list of all zipcodes
if len(X) == 0:
zipcodes = list(d[xcol].unique())
else:
zipcodes = X
# cut list in half
breakpoint = len(zipcodes)//2
for zc in zipcodes:
if zc < breakpoint:
ls='-'
else:
ls='--'
ts = d[zc][ycol].rename(zc)#.loc[zc]
ts = d[ycol].loc[zc]
### PLOT each zipcode as timeseries `ts`
ts.plot(label=str(zc), ax=ax, ls=ls)
## Calculate and plot the MEAN
if MEAN:
mean = d[ycol].mean(axis=1)
mean.plot(label='Mean',lw=5,color='black')
elif type(d) == dict:
# if X passed in as empty list, create list of all zipcodes
if len(X) == 0:
zipcodes = list(d.keys())
else:
zipcodes = X
# cut list in half
breakpoint = len(zipcodes)//2
# create empty dictionary for plotting
txd = {}
# create different linestyles for zipcodes (easier to distinguish if list is long)
for i,zc in enumerate(zipcodes):
if i < breakpoint:
ls='-'
else:
ls='--'
# store each zipcode as ts
ts = d[zc][ycol].rename(zc)
### PLOT each zipcode as timeseries `ts`
ts.plot(label=str(zc), ax=ax, ls=ls, lw=2)
txd[zc] = ts
if MEAN:
mean = pd.DataFrame(txd).mean(axis=1)
mean.plot(label='Mean',lw=5,color='black')
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", ncol=2)
if vlines:
## plot crash, min and max vlines
crash = '01-2009'
ax.axvline(crash, label='Housing Index Drops',color='red',ls=':',lw=2)
MIN_ = ts.loc[crash:].idxmin()
MAX_ = ts.loc['2004':'2010'].idxmax()
ax.axvline(MIN_, label=f'Min Price Post Crash {MIN_}', color='black',lw=2)
ax.axvline(MAX_,label='Max Price', color='black', ls=':',lw=2)
return fig, ax
# # Check Seasonality
def freeze_time(ts, mode='A'):
"""
Calculates and plots Seasonal Decomposition for a time series
ts : time-series
mode : 'A' for 'additive' or 'M' for 'multiplicative'
"""
from statsmodels.tsa.seasonal import seasonal_decompose
if mode == 'A': #default
decomp = seasonal_decompose(ts, model='additive')
elif mode == 'M':
decomp = seasonal_decompose(ts, model='multiplicative')
freeze = decomp.plot()
ts_seas = decomp.seasonal
plt.figure()
plt.tight_layout()
ax = ts_seas.plot(c='green')
fig = ax.get_figure()
fig.set_size_inches(12,5)
## Get min and max idx
min_ = ts_seas.idxmin()
max_ = ts_seas.idxmax()
min_2 = ts_seas.loc[max_:].idxmin()
ax.axvline(min_,label=min_,c='red')
ax.axvline(max_,c='red',ls=':', lw=2)
ax.axvline(min_2,c='red', lw=2)
period = min_2 - min_
ax.set_title(f'Season Length = {period}')
return freeze
#### clockTime() --- time-series snapshot statistical summary ###
#
# /\ /\ /\ /\
# / CLOCKTIME STATS /
# \/ \/ \/
#
"""
clockTime()
Dependencies include the following METHODS:
- check_time(data, time) >>> convert to datetimeindex
- test_time(TS, y) >>> dickey-fuller (stationarity) test
- roll_time() >>> rolling mean
- freeze_time() >>> seasonality check
- diff_time() >>> differencing
- autoplot() >>> autocorrelation and partial autocorrelation plots
"""
# class clockTime():
# def __init__(data, time, x1, x2, y, freq=None):
# self.data = data
# self.time = time
# self.x1 = x1
# self.x2 = x2
# self.y = y
# self.freq = freq
def clockTime(ts, lags, d, TS, y):
"""
/\ /\ /\ /\ ______________/\/\/\__-_-_
/ CLOCKTIME STATS / \/
\/ \/ \/
# clockTime(ts, lags=43, d=5, TS=NY, y='MeanValue',figsize=(13,11))
#
# ts = df.loc[df['RegionName']== zc]["MeanValue"].rename(zc).resample('MS').asfreq()
"""
# import required libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
import pandas as pd
from pandas import Series
from pandas.plotting import autocorrelation_plot
from pandas.plotting import lag_plot
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
print(' /\\ '*3+' /')
print('/ CLOCKTIME STATS')
print(' \/'*3)
#**************#
# Plot Time Series
#original
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(21,13))
ts.plot(label='Original', ax=axes[0,0],c='red')
# autocorrelation
autocorrelation_plot(ts, ax=axes[0,1], c='magenta')
# 1-lag
autocorrelation_plot(ts.diff().dropna(), ax=axes[1,0], c='green')
lag_plot(ts, lag=1, ax=axes[1,1])
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
# DICKEY-FULLER Stationarity Test
# TS = NY | y = 'MeanValue'
dtest = adfuller(TS[y].dropna())
if dtest[1] < 0.05:
## difference data before checking autoplot
stationary = False
r = 'rejected'
else:
### skip differencing and check autoplot
stationary = True
r = 'accepted'
#**************#
# ts orders of difference
ts1 = ts.diff().dropna()
ts2 = ts.diff().diff().dropna()
ts3 = ts.diff().diff().diff().dropna()
ts4 = ts.diff().diff().diff().diff().dropna()
tdiff = [ts1,ts2,ts3,ts4]
# Calculate Standard Deviation of Differenced Data
sd = []
for td in tdiff:
sd.append(np.std(td))
#sd = [np.std(ts1), np.std(ts2),np.std(ts3),np.std(ts4)]
SD = pd.DataFrame(data=sd,index=['ts1',' ts2', 'ts3', 'ts4'], columns={'sd'})
#SD['sd'] = [np.std(ts1), np.std(ts2),np.std(ts3),np.std(ts4)]
SD['D'] = ['d=1','d=2','d=3','d=4']
MIN = SD.loc[SD['sd'] == np.min(sd)]['sd']
# Extract and display full test results
output = dict(zip(['ADF Stat','p-val','# Lags','# Obs'], dtest[:4]))
for key, value in dtest[4].items():
output['Crit. Val (%s)'%key] = value
output['min std dev'] = MIN
output['NULL HYPOTHESIS'] = r
output['STATIONARY'] = stationary
# Finding optimal value for order of differencing
# from pmdarima.arima.utils import ndiffs
# adf = ndiffs(x=ts, test='adf')
# kpss = ndiffs(x=ts, test='kpss')
# pp = ndiffs(x=ts, test='pp')
# output['adf,kpss,pp'] = [adf,kpss,pp]
#**************#
# show differencing up to `d` on single plot (default = 5)
fig2 = plt.figure(figsize=(13,5))
ax = fig2.gca()
for i in range(d):
ax = ts.diff(i).plot(label=i)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", ncol=2)
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
#**************#
# DIFFERENCED SERIES
fig3 = plt.figure(figsize=(13,5))
ts1.plot(label='d=1',figsize=(13,5), c='blue',lw=1,alpha=.7)
ts2.plot(label='d=2',figsize=(13,5), c='red',lw=1.2,alpha=.8)
ts3.plot(label='d=3',figsize=(13,5), c='magenta',lw=1,alpha=.7)
ts4.plot(label='d=4',figsize=(13,5), c='green',lw=1,alpha=.7)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=True,
fancybox=True, facecolor='lightgray')
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
#**************#
# Plot ACF, PACF
fig4,axes = plt.subplots(nrows=2, ncols=2, figsize=(21,13))
plot_acf(ts1,ax=axes[0,0],lags=lags)
plot_pacf(ts1, ax=axes[0,1],lags=lags)
plot_acf(ts2,ax=axes[1,0],lags=lags)
plot_pacf(ts2, ax=axes[1,1],lags=lags)
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
#**************#
# plot rolling mean and std
#Determine rolling statistics
rolmean = ts.rolling(window=12, center=False).mean()
rolstd = ts.rolling(window=12, center=False).std()
#Plot rolling statistics
fig = plt.figure(figsize=(13,5))
orig = plt.plot(ts, color='red', label='original')
mean = plt.plot(rolmean, color='cyan', label='rolling mean')
std = plt.plot(rolstd, color='orange', label='rolling std')
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.title('Rolling mean and standard deviation')
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
#**************#
# # Check Seasonality
"""
Calculates and plots Seasonal Decomposition for a time series
"""
from statsmodels.tsa.seasonal import seasonal_decompose
decomp = seasonal_decompose(ts, model='additive') # model='multiplicative'
decomp.plot()
ts_seas = decomp.seasonal
ax = ts_seas.plot(c='green')
fig = ax.get_figure()
fig.set_size_inches(13,11)
## Get min and max idx
min_ = ts_seas.idxmin()
max_ = ts_seas.idxmax()
min_2 = ts_seas.loc[max_:].idxmin()
ax.axvline(min_,label=min_,c='red')
ax.axvline(max_,c='red',ls=':', lw=2)
ax.axvline(min_2,c='red', lw=2)
period = min_2 - min_
ax.set_title(f'Season Length = {period}')
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
#*******#
clock = pd.DataFrame.from_dict(output, orient='index')
print(' /\\ '*3+' /')
print('/ CLOCK-TIME STATS')
print(' \/'*3)
#display results
print('---'*9)
return clock
"""
>>>>>>>>>>>>>>>>>> Machine Learning MODELS <<<<<<<<<<<<<<<<<<<<<<
* ttXsplit()
"""
#### ----> ttXsplit()
def ttXsplit(tx, tSIZE, tMIN):
"""
Performs a train-test split on timeseries data
# train, test = ttXsplit(ts, 0.2, 2)
"""
# idXsplit
import math
idx_split = math.floor(len(tx.index)*(1-tSIZE))
n = len(tx.iloc[idx_split:])
if n < tMIN:
idx_split = (len(tx) - tMIN)
train = tx.iloc[:idx_split]
test = tx.iloc[idx_split:]
print(f'train: {len(train)} | test: {len(test)}')
return train, test
def mind_your_PDQs(P=range(0,3), D=range(1,3), Q=range(0,3), s=None):
"""
pdqs = mind_your_PDQs()
pdqs['pdq']
pdq = pdqs['pdq']
"""
import itertools
pdqs = {}
if s is None:
pdqs['pdq'] = list(itertools.product(P,D,Q))
else:
pdqs['PDQs'] = list(itertools.product(P,D,Q,s))
return pdqs
def stopwatch(time='time'):
"""
# stopwatch('stop')
"""
import datetime as dt
import tzlocal as tz
if time == 'now':
now = dt.datetime.now(tz=tz.get_localzone())
print(now)
if time=='start':
now = dt.datetime.now(tz=tz.get_localzone())
start = now.strftime('%m/%d/%Y - %I:%M:%S %p')
print('start:', start)
elif time == 'stop':
now = dt.datetime.now(tz=tz.get_localzone())
stop = now.strftime('%m/%d/%Y - %I:%M:%S %p')
print('stop:', stop)
elif time == 'time':
now = dt.datetime.now(tz=tz.get_localzone())
time = now.strftime('%m/%d/%Y - %I:%M:%S %p')
print(time,'|', now)
return time
# From <NAME> (Bootcamp) https://github.com/jirvingphd/fsds/blob/master/fsds/jmi/jmi.py
def thiels_U(ys_true=None, ys_pred=None,display_equation=True,display_table=True):
"""Calculate's Thiel's U metric for forecasting accuracy.
Accepts true values and predicted values.
Returns Thiel's U"""
from IPython.display import Markdown, Latex, display
import numpy as np
display(Markdown(""))
eqn=" $$U = \\sqrt{\\frac{ \\sum_{t=1 }^{n-1}\\left(\\frac{\\bar{Y}_{t+1} - Y_{t+1}}{Y_t}\\right)^2}{\\sum_{t=1 }^{n-1}\\left(\\frac{Y_{t+1} - Y_{t}}{Y_t}\\right)^2}}$$"
# url="['Explanation'](https://docs.oracle.com/cd/E57185_01/CBREG/ch06s02s03s04.html)"
markdown_explanation ="|Thiel's U Value | Interpretation |\n\
| --- | --- |\n\
| <1 | Forecasting is better than guessing| \n\
| 1 | Forecasting is about as good as guessing| \n\
|>1 | Forecasting is worse than guessing| \n"
if display_equation and display_table:
display(Latex(eqn),Markdown(markdown_explanation))#, Latex(eqn))
elif display_equation:
display(Latex(eqn))
elif display_table:
display(Markdown(markdown_explanation))
if ys_true is None and ys_pred is None:
return
# sum_list = []
num_list=[]
denom_list=[]
for t in range(len(ys_true)-1):
num_exp = (ys_pred[t+1] - ys_true[t+1])/ys_true[t]
num_list.append([num_exp**2])
denom_exp = (ys_true[t+1] - ys_true[t])/ys_true[t]
denom_list.append([denom_exp**2])
U = np.sqrt( np.sum(num_list) / np.sum(denom_list))
return U
# From <NAME>
def model_evaluation(ts_true,ts_pred,show=True,show_u_info=False):
import fsds_100719 as fs
from sklearn.metrics import mean_squared_error,r2_score
res= [['Metric','Value']]
res.append(['RMSE', np.sqrt(mean_squared_error(ts_true,ts_pred))])
res.append(['R2',r2_score(ts_true,ts_pred)])
res.append(["<NAME>", thiels_U(ts_true,ts_pred,
display_equation=show_u_info,
display_table=show_u_info)])
res = fs.list2df(res)
if show:
display(res)
return res
# Run a grid with pdq and seasonal pdq parameters calculated above and get the best AIC value
def gridMAX(ts, pdq, PDQM=None, verbose=False):
"""
Runs a gridsearch with pdq and seasonal pdq parameters to get the best AIC value
Returns grid and best params
Ex:
gridX, best_params = gridMAX(ts,pdq=pdq)
"""
from statsmodels.tsa.statespace.sarimax import SARIMAX
import statsmodels.api as sm
stopwatch('start')
print(f'[*] STARTING GRID SEARCH')
# store to df_res
grid = [['pdq','PDQM','AIC']]
for comb in pdq:
if PDQM is None:
PDQM=[(0, 0, 0, 0)]
for combs in PDQM:
mod = sm.tsa.statespace.SARIMAX(ts,
order=comb,
seasonal_order=combs,
enforce_stationarity=False,
enforce_invertibility=False)
output = mod.fit()
grid.append([comb, combs, output.aic])
if verbose:
print('ARIMA {} x {}12 : AIC Calculated ={}'.format(comb,
combs,
output.aic))
stopwatch('stop')
print(f"[**] GRID SEARCH COMPLETE")
gridX = pd.DataFrame(grid[1:], columns=grid[0])
gridX = gridX.sort_values('AIC').reset_index()
best_params = dict(order=gridX.iloc[0].loc['pdq'])
best_pdq = gridX.iloc[0][1]
best_pdqm = gridX.iloc[0][2]
#display(gridX, best_params)
return gridX, best_params
def calcROI(investment, final_value):
"""This function takes in a series of forecasts to predict the return
on investment spanning the entire forecast.
r = calcROI(investment, final_value)
"""
r = np.round(((final_value - investment) / investment)*100,3)
return r
#ts = NYC[zc]['MeanValue'].rename(zc)
def forecastX(model_output, train, test, start=None, end=None, get_metrics=True):
"""
Uses get_prediction=() and conf_int() methods from statsmodels
get_prediction (exog,transform,weightsrow_labels,pred_kwds)
"""
if start is None:
start = test.index[0]
if end is None:
end = test.index[-1]
# Get predictions starting from 2013 and calculate confidence intervals.
prediction = model_output.get_prediction(start=start,end=end, dynamic=True)
forecast = prediction.conf_int()
forecast['predicted_mean'] = prediction.predicted_mean
fc_plot = pd.concat([forecast, train], axis=1)
## Get ROI Forecast:
r = calcROI(investment=forecast['predicted_mean'].iloc[0],
final_value=forecast['predicted_mean'].iloc[-1])
zc = train.name
fig, ax = plt.subplots(figsize=(21,13))
train.plot(ax=ax,label='Training Data',lw=4) # train.index[0] '1996-04-01, train.index[-1] # 2013-11-01
test.plot(ax=ax,label='Test Data',lw=4) # test.index[0] '2013-12-01 , test.index[-1] '2018-04-01
forecast['predicted_mean'].plot(ax=ax, label='Forecast', color='magenta',lw=4)
ax.fill_between(forecast.index,
forecast.iloc[:,0],
forecast.iloc[:,1],
color="white",
alpha=.5,
label = 'conf_int')
ax.fill_betweenx(ax.get_ylim(), test.index[0], test.index[-1], color='darkslategray',alpha=0.5, zorder=-1)
ax.fill_betweenx(ax.get_ylim(), start, end, color='darkslategray',zorder=-1)
ax.legend(loc="upper left",bbox_to_anchor=(1.04,1), ncol=2,fontsize='small',frameon=True, fancybox=True, framealpha=.15, facecolor='k')
ax.set(title=f"Predictions for {zc}: ROI = {r}%")
ax.set_xlabel('Year')
ax.set_ylabel('Mean Home Value $USD')
fig = ax.get_figure()
fc_plot['zipcode']= train.name
plt.show()
if get_metrics == True:
metrics = model_evaluation(ts_true=test, ts_pred=forecast['predicted_mean'])
return r, forecast, fig, ax
#
# r,forecast, fig, ax = forecastX(model_output, train, test, get_metrics=True)
# forecast
# r
#
# OR:
# forecast, fig, ax = forecastX(model_output, train, test)
# r,forecast, fig, ax = forecastX(model_output, train, test, get_metrics=True)
# forecast
# r
def gridMAXmeta(KEYS, s=False):
"""
Makes a forecast prediction based on combined train + test sets of a trained model
Opt1: gridMAXmeta(KEYS=NYC, s=False)
KEYS: dict of dfs or timeseries
NOTE: if passing in dict of full dataframes, s=True
(gridMAXmeta will create dict of ts for you)
Opt2: gridMAXmeta(KEYS=txd, s=True)
KEYS: dictionary of ts - skip the meta ts creation
"""
if s is False:
# loop thru each zipcode to create timeseries from its df in KEYS dfdict
txd = {}
for i,zc in enumerate(KEYS):
# store each zipcode as ts
ts = KEYS[zc]['MeanValue'].rename(zc)
txd[zc] = ts
else:
txd = KEYS
pdqs = mind_your_PDQs()
metagrid = {}
ROI = {}
for zc,ts in txd.items():
print('\n')
print('---'*30)
print('---'*30)
print(f'ZIPCODE: {zc}')
## Train test split
train, test = ttXsplit(ts, 0.1, 2)
## gridMAX gridsearch
###### TEST DATA ####
gridX, best_params = gridMAX(train, pdq=pdqs['pdq'])
metagrid[zc]={}
metagrid[zc]['gridX']= gridX.iloc[0]
metagrid[zc]['pdq'] = best_params
metagrid[zc]['aic'] = gridX.iloc[0][3]
## Using best params
#best_params
##### SARIMAX: USING ENTIRE TIME SERIES ###
model_output = SARIMAX(ts,
**best_params,
enforce_invertibility=False,
enforce_stationarity=False).fit()
metagrid[zc]['model'] = model_output
r, forecast,fig,ax = forecastX(model_output,
train, test,
start=ts.index[-1],
end=ts.index.shift(24)[-1],
get_metrics=False)
metagrid[zc]['forecast'] = forecast
ROI[zc] = r
metagrid[zc]['ROI'] = r
ROI[zc] = r
return metagrid, ROI, best_params
# metagrid, ROI, best_params = gridMAXmeta(KEYS=NYC, s=False) | [
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.sum",
"matplotlib.style.use",
"IPython.display.Markdown",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"statsmodels.graphics.tsaplots.plot_pacf",
"matplotlib.pyplot.tight_layout",
"pandas.set_option",
... | [((749, 780), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (762, 780), True, 'import matplotlib.pyplot as plt\n'), ((781, 812), 'matplotlib.style.use', 'mpl.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (794, 812), True, 'import matplotlib as mpl\n'), ((895, 917), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (901, 917), True, 'import matplotlib as mpl\n'), ((941, 967), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (954, 967), True, 'import seaborn as sns\n'), ((1006, 1039), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1029, 1039), False, 'import warnings\n'), ((1068, 1107), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(0)'], {}), "('display.max_columns', 0)\n", (1081, 1107), True, 'import pandas as pd\n'), ((6700, 6755), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(16, 4)'}), '(nrows=nrows, ncols=ncols, figsize=(16, 4))\n', (6712, 6755), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7037), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (7019, 7037), True, 'import matplotlib.pyplot as plt\n'), ((9692, 9732), 'pandas.to_datetime', 'pd.to_datetime', (['df[idx]'], {'errors': '"""coerce"""'}), "(df[idx], errors='coerce')\n", (9706, 9732), True, 'import pandas as pd\n'), ((9896, 10041), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['RegionID', 'RegionName', 'City', 'State', 'Metro', 'CountyName', 'SizeRank']", 'var_name': '"""Month"""', 'value_name': '"""MeanValue"""'}), "(df, id_vars=['RegionID', 'RegionName', 'City', 'State', 'Metro',\n 'CountyName', 'SizeRank'], var_name='Month', value_name='MeanValue')\n", (9903, 10041), True, 'import pandas as pd\n'), ((10094, 10141), 'pandas.to_datetime', 'pd.to_datetime', (["melted['Month']"], {'format': '"""%Y-%m"""'}), "(melted['Month'], format='%Y-%m')\n", (10108, 10141), True, 'import pandas as pd\n'), ((13039, 13061), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (13045, 13061), True, 'import matplotlib as mpl\n'), ((13230, 13260), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(21, 13)'}), '(figsize=(21, 13))\n', (13242, 13260), True, 'import matplotlib.pyplot as plt\n'), ((16175, 16187), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16185, 16187), True, 'import matplotlib.pyplot as plt\n'), ((16192, 16210), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16208, 16210), True, 'import matplotlib.pyplot as plt\n'), ((18339, 18387), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(21, 13)'}), '(nrows=2, ncols=2, figsize=(21, 13))\n', (18351, 18387), True, 'import matplotlib.pyplot as plt\n'), ((18466, 18518), 'pandas.plotting.autocorrelation_plot', 'autocorrelation_plot', (['ts'], {'ax': 'axes[0, 1]', 'c': '"""magenta"""'}), "(ts, ax=axes[0, 1], c='magenta')\n", (18486, 18518), False, 'from pandas.plotting import autocorrelation_plot\n'), ((18605, 18639), 'pandas.plotting.lag_plot', 'lag_plot', (['ts'], {'lag': '(1)', 'ax': 'axes[1, 1]'}), '(ts, lag=1, ax=axes[1, 1])\n', (18613, 18639), False, 'from pandas.plotting import lag_plot\n'), ((18648, 18666), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18664, 18666), True, 'import matplotlib.pyplot as plt\n'), ((18701, 18711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18709, 18711), True, 'import matplotlib.pyplot as plt\n'), ((19493, 19567), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'sd', 'index': "['ts1', ' ts2', 'ts3', 'ts4']", 'columns': "{'sd'}"}), "(data=sd, index=['ts1', ' ts2', 'ts3', 'ts4'], columns={'sd'})\n", (19505, 19567), True, 'import pandas as pd\n'), ((20394, 20421), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 5)'}), '(figsize=(13, 5))\n', (20404, 20421), True, 'import matplotlib.pyplot as plt\n'), ((20571, 20589), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20587, 20589), True, 'import matplotlib.pyplot as plt\n'), ((20624, 20634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20632, 20634), True, 'import matplotlib.pyplot as plt\n'), ((20698, 20725), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 5)'}), '(figsize=(13, 5))\n', (20708, 20725), True, 'import matplotlib.pyplot as plt\n'), ((20994, 21104), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""', 'frameon': '(True)', 'fancybox': '(True)', 'facecolor': '"""lightgray"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left', frameon=True,\n fancybox=True, facecolor='lightgray')\n", (21004, 21104), True, 'import matplotlib.pyplot as plt\n'), ((21120, 21138), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21136, 21138), True, 'import matplotlib.pyplot as plt\n'), ((21173, 21183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21181, 21183), True, 'import matplotlib.pyplot as plt\n'), ((21248, 21296), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(21, 13)'}), '(nrows=2, ncols=2, figsize=(21, 13))\n', (21260, 21296), True, 'import matplotlib.pyplot as plt\n'), ((21300, 21339), 'statsmodels.graphics.tsaplots.plot_acf', 'plot_acf', (['ts1'], {'ax': 'axes[0, 0]', 'lags': 'lags'}), '(ts1, ax=axes[0, 0], lags=lags)\n', (21308, 21339), False, 'from statsmodels.graphics.tsaplots import plot_acf\n'), ((21341, 21381), 'statsmodels.graphics.tsaplots.plot_pacf', 'plot_pacf', (['ts1'], {'ax': 'axes[0, 1]', 'lags': 'lags'}), '(ts1, ax=axes[0, 1], lags=lags)\n', (21350, 21381), False, 'from statsmodels.graphics.tsaplots import plot_pacf\n'), ((21384, 21423), 'statsmodels.graphics.tsaplots.plot_acf', 'plot_acf', (['ts2'], {'ax': 'axes[1, 0]', 'lags': 'lags'}), '(ts2, ax=axes[1, 0], lags=lags)\n', (21392, 21423), False, 'from statsmodels.graphics.tsaplots import plot_acf\n'), ((21425, 21465), 'statsmodels.graphics.tsaplots.plot_pacf', 'plot_pacf', (['ts2'], {'ax': 'axes[1, 1]', 'lags': 'lags'}), '(ts2, ax=axes[1, 1], lags=lags)\n', (21434, 21465), False, 'from statsmodels.graphics.tsaplots import plot_pacf\n'), ((21468, 21486), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21484, 21486), True, 'import matplotlib.pyplot as plt\n'), ((21521, 21531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21529, 21531), True, 'import matplotlib.pyplot as plt\n'), ((21785, 21812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 5)'}), '(figsize=(13, 5))\n', (21795, 21812), True, 'import matplotlib.pyplot as plt\n'), ((21823, 21866), 'matplotlib.pyplot.plot', 'plt.plot', (['ts'], {'color': '"""red"""', 'label': '"""original"""'}), "(ts, color='red', label='original')\n", (21831, 21866), True, 'import matplotlib.pyplot as plt\n'), ((21878, 21931), 'matplotlib.pyplot.plot', 'plt.plot', (['rolmean'], {'color': '"""cyan"""', 'label': '"""rolling mean"""'}), "(rolmean, color='cyan', label='rolling mean')\n", (21886, 21931), True, 'import matplotlib.pyplot as plt\n'), ((21942, 21995), 'matplotlib.pyplot.plot', 'plt.plot', (['rolstd'], {'color': '"""orange"""', 'label': '"""rolling std"""'}), "(rolstd, color='orange', label='rolling std')\n", (21950, 21995), True, 'import matplotlib.pyplot as plt\n'), ((22005, 22059), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (22015, 22059), True, 'import matplotlib.pyplot as plt\n'), ((22064, 22112), 'matplotlib.pyplot.title', 'plt.title', (['"""Rolling mean and standard deviation"""'], {}), "('Rolling mean and standard deviation')\n", (22073, 22112), True, 'import matplotlib.pyplot as plt\n'), ((22117, 22135), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22133, 22135), True, 'import matplotlib.pyplot as plt\n'), ((22170, 22180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22178, 22180), True, 'import matplotlib.pyplot as plt\n'), ((22391, 22431), 'statsmodels.tsa.seasonal.seasonal_decompose', 'seasonal_decompose', (['ts'], {'model': '"""additive"""'}), "(ts, model='additive')\n", (22409, 22431), False, 'from statsmodels.tsa.seasonal import seasonal_decompose\n'), ((22918, 22936), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22934, 22936), True, 'import matplotlib.pyplot as plt\n'), ((22971, 22981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22979, 22981), True, 'import matplotlib.pyplot as plt\n'), ((23013, 23059), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['output'], {'orient': '"""index"""'}), "(output, orient='index')\n", (23035, 23059), True, 'import pandas as pd\n'), ((26993, 27008), 'fsds_100719.list2df', 'fs.list2df', (['res'], {}), '(res)\n', (27003, 27008), True, 'import fsds_100719 as fs\n'), ((28468, 28507), 'pandas.DataFrame', 'pd.DataFrame', (['grid[1:]'], {'columns': 'grid[0]'}), '(grid[1:], columns=grid[0])\n', (28480, 28507), True, 'import pandas as pd\n'), ((28967, 29025), 'numpy.round', 'np.round', (['((final_value - investment) / investment * 100)', '(3)'], {}), '((final_value - investment) / investment * 100, 3)\n', (28975, 29025), True, 'import numpy as np\n'), ((29704, 29740), 'pandas.concat', 'pd.concat', (['[forecast, train]'], {'axis': '(1)'}), '([forecast, train], axis=1)\n', (29713, 29740), True, 'import pandas as pd\n'), ((29932, 29962), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(21, 13)'}), '(figsize=(21, 13))\n', (29944, 29962), True, 'import matplotlib.pyplot as plt\n'), ((31014, 31024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31022, 31024), True, 'import matplotlib.pyplot as plt\n'), ((7087, 7114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (7097, 7114), True, 'import matplotlib.pyplot as plt\n'), ((8448, 8481), 'uszipcode.SearchEngine', 'SearchEngine', ([], {'simple_zipcode': '(True)'}), '(simple_zipcode=True)\n', (8460, 8481), False, 'from uszipcode import SearchEngine\n'), ((8509, 8543), 'uszipcode.SearchEngine', 'SearchEngine', ([], {'simple_zipcode': '(False)'}), '(simple_zipcode=False)\n', (8521, 8543), False, 'from uszipcode import SearchEngine\n'), ((15981, 16021), 'statsmodels.tsa.seasonal.seasonal_decompose', 'seasonal_decompose', (['ts'], {'model': '"""additive"""'}), "(ts, model='additive')\n", (15999, 16021), False, 'from statsmodels.tsa.seasonal import seasonal_decompose\n'), ((25265, 25277), 'IPython.display.Markdown', 'Markdown', (['""""""'], {}), "('')\n", (25273, 25277), False, 'from IPython.display import Markdown, Latex, display\n'), ((27035, 27047), 'IPython.display.display', 'display', (['res'], {}), '(res)\n', (27042, 27047), False, 'from IPython.display import Markdown, Latex, display\n'), ((16061, 16107), 'statsmodels.tsa.seasonal.seasonal_decompose', 'seasonal_decompose', (['ts'], {'model': '"""multiplicative"""'}), "(ts, model='multiplicative')\n", (16079, 16107), False, 'from statsmodels.tsa.seasonal import seasonal_decompose\n'), ((18671, 18680), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (18678, 18680), True, 'import matplotlib.pyplot as plt\n'), ((19406, 19416), 'numpy.std', 'np.std', (['td'], {}), '(td)\n', (19412, 19416), True, 'import numpy as np\n'), ((20594, 20603), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20601, 20603), True, 'import matplotlib.pyplot as plt\n'), ((21143, 21152), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21150, 21152), True, 'import matplotlib.pyplot as plt\n'), ((21491, 21500), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21498, 21500), True, 'import matplotlib.pyplot as plt\n'), ((22140, 22149), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22147, 22149), True, 'import matplotlib.pyplot as plt\n'), ((22941, 22950), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22948, 22950), True, 'import matplotlib.pyplot as plt\n'), ((24019, 24045), 'itertools.product', 'itertools.product', (['P', 'D', 'Q'], {}), '(P, D, Q)\n', (24036, 24045), False, 'import itertools\n'), ((24083, 24112), 'itertools.product', 'itertools.product', (['P', 'D', 'Q', 's'], {}), '(P, D, Q, s)\n', (24100, 24112), False, 'import itertools\n'), ((25851, 25861), 'IPython.display.Latex', 'Latex', (['eqn'], {}), '(eqn)\n', (25856, 25861), False, 'from IPython.display import Markdown, Latex, display\n'), ((25862, 25892), 'IPython.display.Markdown', 'Markdown', (['markdown_explanation'], {}), '(markdown_explanation)\n', (25870, 25892), False, 'from IPython.display import Markdown, Latex, display\n'), ((26401, 26417), 'numpy.sum', 'np.sum', (['num_list'], {}), '(num_list)\n', (26407, 26417), True, 'import numpy as np\n'), ((26420, 26438), 'numpy.sum', 'np.sum', (['denom_list'], {}), '(denom_list)\n', (26426, 26438), True, 'import numpy as np\n'), ((26757, 26783), 'sklearn.metrics.r2_score', 'r2_score', (['ts_true', 'ts_pred'], {}), '(ts_true, ts_pred)\n', (26765, 26783), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((27742, 27866), 'statsmodels.api.tsa.statespace.SARIMAX', 'sm.tsa.statespace.SARIMAX', (['ts'], {'order': 'comb', 'seasonal_order': 'combs', 'enforce_stationarity': '(False)', 'enforce_invertibility': '(False)'}), '(ts, order=comb, seasonal_order=combs,\n enforce_stationarity=False, enforce_invertibility=False)\n', (27767, 27866), True, 'import statsmodels.api as sm\n'), ((19702, 19712), 'numpy.min', 'np.min', (['sd'], {}), '(sd)\n', (19708, 19712), True, 'import numpy as np\n'), ((24305, 24323), 'tzlocal.get_localzone', 'tz.get_localzone', ([], {}), '()\n', (24321, 24323), True, 'import tzlocal as tz\n'), ((24399, 24417), 'tzlocal.get_localzone', 'tz.get_localzone', ([], {}), '()\n', (24415, 24417), True, 'import tzlocal as tz\n'), ((25951, 25961), 'IPython.display.Latex', 'Latex', (['eqn'], {}), '(eqn)\n', (25956, 25961), False, 'from IPython.display import Markdown, Latex, display\n'), ((26692, 26728), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['ts_true', 'ts_pred'], {}), '(ts_true, ts_pred)\n', (26710, 26728), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((32860, 32947), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'SARIMAX', (['ts'], {'enforce_invertibility': '(False)', 'enforce_stationarity': '(False)'}), '(ts, **best_params, enforce_invertibility=False,\n enforce_stationarity=False)\n', (32867, 32947), False, 'from statsmodels.tsa.statespace.sarimax import SARIMAX\n'), ((24572, 24590), 'tzlocal.get_localzone', 'tz.get_localzone', ([], {}), '()\n', (24588, 24590), True, 'import tzlocal as tz\n'), ((26003, 26033), 'IPython.display.Markdown', 'Markdown', (['markdown_explanation'], {}), '(markdown_explanation)\n', (26011, 26033), False, 'from IPython.display import Markdown, Latex, display\n'), ((15053, 15070), 'pandas.DataFrame', 'pd.DataFrame', (['txd'], {}), '(txd)\n', (15065, 15070), True, 'import pandas as pd\n'), ((24738, 24756), 'tzlocal.get_localzone', 'tz.get_localzone', ([], {}), '()\n', (24754, 24756), True, 'import tzlocal as tz\n')] |
import os
from imageio import imread
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def get_positive_features(train_path_pos, cell_size, window_size, block_size, nbins):
'''
'train_path_pos' is a string. This directory contains 36x36 images of
faces
'feature_params' is a struct, with fields
feature_params.template_size (probably 36), the number of pixels
spanned by each train / test template and
feature_params.hog_cell_size (default 6), the number of pixels in each
HoG cell. template size should be evenly divisible by hog_cell_size.
Smaller HoG cell sizes tend to work better, but they make things
slower because the feature dimensionality increases and more
importantly the step size of the classifier decreases at test time.
'features_pos' is N by D matrix where N is the number of faces and D
is the template dimensionality, which would be
(feature_params.template_size / feature_params.hog_cell_size)^2 * 31
if you're using the default vl_hog parameters
'''
image_files = [os.path.join(train_path_pos, f) for f in os.listdir(train_path_pos) if f.endswith('.jpg')]
num_images = len(image_files)
total_block_size = block_size * cell_size
template_size = int((np.floor((window_size[0] - 2) / (total_block_size / 2)) - 1) * (np.floor((window_size[1] - 2) / (total_block_size / 2)) - 1))
D = template_size * block_size * block_size * nbins
features_pos = np.zeros((num_images, D))
for i in range(num_images):
img = imread(image_files[i])
features_pos[i] = compute_hog_features(img, cell_size, block_size, nbins).reshape(-1)
return features_pos
def get_random_negative_features(non_face_scn_path, cell_size, window_size, block_size, nbins, num_samples):
'''
'non_face_scn_path' is a string. This directory contains many images
which have no faces in them.
'feature_params' is a struct, with fields
feature_params.template_size (probably 36), the number of pixels
spanned by each train / test template and
feature_params.hog_cell_size (default 6), the number of pixels in each
HoG cell. template size should be evenly divisible by hog_cell_size.
Smaller HoG cell sizes tend to work better, but they make things
slower because the feature dimensionality increases and more
importantly the step size of the classifier decreases at test time.
'num_samples' is the number of random negatives to be mined, it's not
important for the function to find exactly 'num_samples' non-face
features, e.g. you might try to sample some number from each image, but
some images might be too small to find enough.
'features_neg' is N by D matrix where N is the number of non-faces and D
is the template dimensionality, which would be
(feature_params.template_size / feature_params.hog_cell_size)^2 * 31
if you're using the default vl_hog parameters
'''
image_files = [os.path.join(non_face_scn_path, f) for f in os.listdir(non_face_scn_path) if f.endswith('.jpg')]
num_images = len(image_files)
num_sample_per_img = int(np.ceil(num_samples * 1. / num_images))
total_block_size = block_size * cell_size
template_size = [int(np.floor((window_size[0] - 2) / (total_block_size / 2)) - 1), int(np.floor((window_size[1] - 2) / (total_block_size / 2)) - 1)]
D = template_size[0] * template_size[1] * block_size * block_size * nbins
features_neg = np.zeros((num_images * num_sample_per_img, D))
for i in range(num_images):
img = np.mean(imread(image_files[i]), 2).astype(np.uint8)
height, width = img.shape
for j in range(num_sample_per_img):
top_left_x = int(np.ceil(np.random.rand() * (width - window_size[1])))
top_left_y = int(np.ceil(np.random.rand() * (height - window_size[0])))
index = i * num_sample_per_img + j
cropped = img[top_left_y : top_left_y + window_size[0], top_left_x : top_left_x + window_size[1]]
features_neg[index,...] = compute_hog_features(cropped, cell_size, block_size, nbins).reshape(-1)
return features_neg
'''
COMPUTE_GRADIENT Given an image, computes the pixel gradients
Arguments:
im - a grayscale image, represented as an ndarray of size (H, W) containing
the pixel values
Returns:
angles - ndarray of size (H-2, W-2) containing gradient angles in degrees
magnitudes - ndarray of size (H-2, W-2) containing gradient magnitudes
The way that the angles and magnitude per pixel are computed as follows:
Given the following pixel grid
P1 P2 P3
P4 P5 P6
P7 P8 P9
We compute the angle on P5 as arctan(dy/dx) = arctan(P2-P8 / P4-P6).
Note that we should be using np.arctan2, which is more numerically stable.
However, this puts us in the range [-180, 180] degrees. To be in the range
[0,180], we need to simply add 180 degrees to the negative angles.
The magnitude is simply sqrt((P4-P6)^2 + (P2-P8)^2)
'''
def compute_gradient(im):
H, W = im.shape
xgrad = np.zeros((H-2, W-2))
ygrad = np.zeros((H-2, W-2))
xgrad = im[1:-1, :-2] - im[1:-1, 2:]
ygrad = im[:-2, 1:-1] - im[2:, 1:-1]
angles = np.arctan2(ygrad, xgrad)
angles[angles < 0] += math.pi
angles = np.degrees(angles)
magnitudes = np.sqrt(xgrad ** 2 + ygrad ** 2)
return angles, magnitudes
'''
GENERATE_HISTOGRAM Given matrices of angles and magnitudes of the image
gradient, generate the histogram of angles
Arguments:
angles - ndarray of size (M, N) containing gradient angles in degrees
magnitudes - ndarray of size (M, N) containing gradient magnitudes
nbins - the number of bins that you want to bin the histogram into
Returns:
histogram - an ndarray of size (nbins,) containing the distribution
of gradient angles.
This method should be implemented as follows:
1)Each histogram will bin the angle from 0 to 180 degrees. The number of bins
will dictate what angles fall into what bin (i.e. if nbins=9, then first bin
will contain the votes of angles close to 10, the second bin will contain those
close to 30, etc).
2) To create these histogram, iterate over the gradient grids, putting each
gradient into its respective bins. To do this properly, we interpolate and
weight the voting by both its magnitude and how close it is to the average
angle of the two bins closest to the angle of the gradient. For example, if we
have nbins = 9 and receive angle of 20 degrees with magnitude 1, then we the
vote contribution to the histogram weights equally with the first and second bins
(since its closest to both 10 and 30 degrees). If instead, we recieve angle of
25 degrees with magnitude 2, then it is weighted 25% in the first bin and 75%
in second bin, but with twice the voting power.
Mathematically, if you have an angle, magnitude, the center_angle1 of the lower
bin center_angle2 of the higher bin, then:
histogram[bin1] += magnitude * |angle - center_angle2| / (180 / nbins)
histogram[bin2] += magnitude * |angle - center_angle1| / (180 / nbins)
Notice how that we're weighting by the distance to the opposite center. The
further the angle is from one center means it is closer to the opposite center
(and should be weighted more).
One special case you will have to take care of is when the angle is near
180 degrees or 0 degrees. It may be that the two nearest bins are the first and
last bins respectively.OA
'''
def generate_histogram(angles, magnitudes, nbins = 9):
histogram = np.zeros(nbins)
bin_size = float(180 / nbins)
# iterate over the pixels
for h in xrange(angles.shape[0]):
for w in xrange(angles.shape[1]):
ang = angles[h,w]
mag = magnitudes[h,w]
if ang >= 180:
ang = ang - 180
# interpolate the votes
lower_idx = int(ang / bin_size) - 1
upper_idx = lower_idx + 1
lower_ang = lower_idx * bin_size + 90/nbins
upper_ang = upper_idx * bin_size + 90/nbins
# Account for edge case
if upper_idx >= nbins:
upper_idx = 0
if lower_idx < 0:
lower_idx = nbins - 1
lower_diff= abs(ang - lower_ang)
upper_diff = abs(ang - upper_ang)
lower_percent = upper_diff/ bin_size
upper_percent = lower_diff/ bin_size
histogram[lower_idx] += lower_percent * mag
histogram[upper_idx] += upper_percent * mag
return histogram
'''
COMPUTE_HOG_FEATURES Computes the histogram of gradients features
Arguments:
im - the image matrix
pixels_in_cell - each cell will be of size (pixels_in_cell, pixels_in_cell)
pixels
cells_in_block - each block will be of size (cells_in_block, cells_in_block)
cells
nbins - number of histogram bins
Returns:
features - the hog features of the image represented as an ndarray of size
(H_blocks, W_blocks, cells_in_block * cells_in_block * nbins), where
H_blocks is the number of blocks that fit height-wise
W_blocks is the number of blocks that fit width-wise
Generating the HoG features can be done as follows:
1) Compute the gradient for the image, generating angles and magnitudes
2) Define a cell, which is a grid of (pixels_in_cell, pixels_in_cell) pixels.
Also, define a block, which is a grid of (cells_in_block, cells_in_block) cells.
This means each block is a grid of side length pixels_in_cell * cell_in_block
pixels.
3) Pass a sliding window over the image, with the window size being the size of
a block. The stride of the sliding window should be half the block size, (50%
overlap). Each cell in each block will store a histogram of the gradients in
that cell. Consequently, there will be cells_in_block * cells_in_block
histograms in each block. This means that each block feature will initially
represented as a (cells_in_block, cells_in_block, nbins) ndarray, that can
reshaped into a (cells_in_block * cells_in_block *nbins,) ndarray. Make sure to
normalize such that the norm of this flattened block feature is 1.
4) The overall hog feature that you return will be a grid of all these flattened
block features.
Note: The final overall feature ndarray can be flattened if you want to use to
train a classifier or use it as a feature vector.
'''
def compute_hog_features(im, pixels_in_cell, cells_in_block, nbins):
height = im.shape[0] - 2
width = im.shape[1] - 2
angles, magnitudes = compute_gradient(im)
total_cells_in_block = cells_in_block * pixels_in_cell
stride = total_cells_in_block / 2
features = np.zeros((int(math.floor(height / stride)) - 1,
int(math.floor(width / stride)) - 1,
nbins * cells_in_block * cells_in_block))
# iterate over the blocks, 50% overlap
for w in xrange(0, width - total_cells_in_block, stride):
for h in xrange(0, height - total_cells_in_block, stride):
block_features = np.zeros((cells_in_block, cells_in_block, nbins))
block_magnitude = magnitudes[h:h+total_cells_in_block,
w:w+total_cells_in_block]
block_angle = angles[h:h+total_cells_in_block,
w:w+total_cells_in_block]
# iterate over the cells
for i in xrange(cells_in_block):
for j in xrange(cells_in_block):
cell_magnitudes = block_magnitude[i * pixels_in_cell:(i+1)
* pixels_in_cell,
j*pixels_in_cell:(j+1)*pixels_in_cell]
cell_angles = block_angle[i * pixels_in_cell:(i+1)
* pixels_in_cell,
j*pixels_in_cell:(j+1)*pixels_in_cell]
block_features[i,j,:] = generate_histogram(cell_angles,
cell_magnitudes, nbins)
block_features = block_features.flatten()
block_features = block_features \
/ np.sqrt(np.linalg.norm(block_features) ** 2 + .01)
features[int(math.ceil(h/(stride))),
int(math.ceil(w/(stride))),:] = block_features
return features
# Displays the HoG features next to the original image
def plot_img_with_bbox(im, bbox, title_text = None):
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(bbox.shape[0]):
ax.add_patch(
patches.Rectangle(
(bbox[i,0], bbox[i,1]),
bbox[i,2],
bbox[i,3],
fill=False,
edgecolor='red'
)
)
plt.imshow(im, 'gray')
if title_text is not None:
plt.title(title_text)
| [
"matplotlib.pyplot.title",
"numpy.arctan2",
"numpy.ceil",
"numpy.degrees",
"matplotlib.patches.Rectangle",
"math.ceil",
"matplotlib.pyplot.imshow",
"imageio.imread",
"numpy.floor",
"numpy.zeros",
"math.floor",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.random.rand",
"os.path... | [((1537, 1562), 'numpy.zeros', 'np.zeros', (['(num_images, D)'], {}), '((num_images, D))\n', (1545, 1562), True, 'import numpy as np\n'), ((3575, 3621), 'numpy.zeros', 'np.zeros', (['(num_images * num_sample_per_img, D)'], {}), '((num_images * num_sample_per_img, D))\n', (3583, 3621), True, 'import numpy as np\n'), ((5152, 5176), 'numpy.zeros', 'np.zeros', (['(H - 2, W - 2)'], {}), '((H - 2, W - 2))\n', (5160, 5176), True, 'import numpy as np\n'), ((5185, 5209), 'numpy.zeros', 'np.zeros', (['(H - 2, W - 2)'], {}), '((H - 2, W - 2))\n', (5193, 5209), True, 'import numpy as np\n'), ((5303, 5327), 'numpy.arctan2', 'np.arctan2', (['ygrad', 'xgrad'], {}), '(ygrad, xgrad)\n', (5313, 5327), True, 'import numpy as np\n'), ((5375, 5393), 'numpy.degrees', 'np.degrees', (['angles'], {}), '(angles)\n', (5385, 5393), True, 'import numpy as np\n'), ((5411, 5443), 'numpy.sqrt', 'np.sqrt', (['(xgrad ** 2 + ygrad ** 2)'], {}), '(xgrad ** 2 + ygrad ** 2)\n', (5418, 5443), True, 'import numpy as np\n'), ((7621, 7636), 'numpy.zeros', 'np.zeros', (['nbins'], {}), '(nbins)\n', (7629, 7636), True, 'import numpy as np\n'), ((12482, 12494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12492, 12494), True, 'import matplotlib.pyplot as plt\n'), ((12795, 12817), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im', '"""gray"""'], {}), "(im, 'gray')\n", (12805, 12817), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1171), 'os.path.join', 'os.path.join', (['train_path_pos', 'f'], {}), '(train_path_pos, f)\n', (1152, 1171), False, 'import os\n'), ((1609, 1631), 'imageio.imread', 'imread', (['image_files[i]'], {}), '(image_files[i])\n', (1615, 1631), False, 'from imageio import imread\n'), ((3079, 3113), 'os.path.join', 'os.path.join', (['non_face_scn_path', 'f'], {}), '(non_face_scn_path, f)\n', (3091, 3113), False, 'import os\n'), ((3239, 3278), 'numpy.ceil', 'np.ceil', (['(num_samples * 1.0 / num_images)'], {}), '(num_samples * 1.0 / num_images)\n', (3246, 3278), True, 'import numpy as np\n'), ((12857, 12878), 'matplotlib.pyplot.title', 'plt.title', (['title_text'], {}), '(title_text)\n', (12866, 12878), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1207), 'os.listdir', 'os.listdir', (['train_path_pos'], {}), '(train_path_pos)\n', (1191, 1207), False, 'import os\n'), ((3123, 3152), 'os.listdir', 'os.listdir', (['non_face_scn_path'], {}), '(non_face_scn_path)\n', (3133, 3152), False, 'import os\n'), ((11117, 11166), 'numpy.zeros', 'np.zeros', (['(cells_in_block, cells_in_block, nbins)'], {}), '((cells_in_block, cells_in_block, nbins))\n', (11125, 11166), True, 'import numpy as np\n'), ((12594, 12695), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(bbox[i, 0], bbox[i, 1])', 'bbox[i, 2]', 'bbox[i, 3]'], {'fill': '(False)', 'edgecolor': '"""red"""'}), "((bbox[i, 0], bbox[i, 1]), bbox[i, 2], bbox[i, 3], fill=\n False, edgecolor='red')\n", (12611, 12695), True, 'import matplotlib.patches as patches\n'), ((1336, 1391), 'numpy.floor', 'np.floor', (['((window_size[0] - 2) / (total_block_size / 2))'], {}), '((window_size[0] - 2) / (total_block_size / 2))\n', (1344, 1391), True, 'import numpy as np\n'), ((1400, 1455), 'numpy.floor', 'np.floor', (['((window_size[1] - 2) / (total_block_size / 2))'], {}), '((window_size[1] - 2) / (total_block_size / 2))\n', (1408, 1455), True, 'import numpy as np\n'), ((3350, 3405), 'numpy.floor', 'np.floor', (['((window_size[0] - 2) / (total_block_size / 2))'], {}), '((window_size[0] - 2) / (total_block_size / 2))\n', (3358, 3405), True, 'import numpy as np\n'), ((3416, 3471), 'numpy.floor', 'np.floor', (['((window_size[1] - 2) / (total_block_size / 2))'], {}), '((window_size[1] - 2) / (total_block_size / 2))\n', (3424, 3471), True, 'import numpy as np\n'), ((3676, 3698), 'imageio.imread', 'imread', (['image_files[i]'], {}), '(image_files[i])\n', (3682, 3698), False, 'from imageio import imread\n'), ((10784, 10811), 'math.floor', 'math.floor', (['(height / stride)'], {}), '(height / stride)\n', (10794, 10811), False, 'import math\n'), ((10831, 10857), 'math.floor', 'math.floor', (['(width / stride)'], {}), '(width / stride)\n', (10841, 10857), False, 'import math\n'), ((3835, 3851), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3849, 3851), True, 'import numpy as np\n'), ((3918, 3934), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3932, 3934), True, 'import numpy as np\n'), ((12254, 12275), 'math.ceil', 'math.ceil', (['(h / stride)'], {}), '(h / stride)\n', (12263, 12275), False, 'import math\n'), ((12298, 12319), 'math.ceil', 'math.ceil', (['(w / stride)'], {}), '(w / stride)\n', (12307, 12319), False, 'import math\n'), ((12186, 12216), 'numpy.linalg.norm', 'np.linalg.norm', (['block_features'], {}), '(block_features)\n', (12200, 12216), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 18:16:51 2021
@author: Enrique
"""
'''
Several tests for the graphicality of a degree sequence.
These are naive implementations. One can use instead the
functions of the NetworkX library.
'''
import numpy as np
def sequence_is_even(deg_seq):
''' Check if the elements of the degree sequence deg_seq
add up to an even number. '''
sum_deg = np.sum(deg_seq)
return (sum_deg%2 == 0)
def order_seq_descending(deg_seq):
''' Order a degree sequence in descending order.
Ordering is done in place. '''
deg_seq[::-1].sort() # interesting behavior of numpy views
return None
def erdos_gallai(deg_seq):
''' Naive implementation of Erdos-Gallai test.
Test for the graphicality of a simple undirected graph.'''
# check if even
if not sequence_is_even(deg_seq):
return False # degrees have to sum up to an even number
# sort deg_seq
order_seq_descending(deg_seq)
# iterate (maybe can be done in vector form)
cum_sum_d = np.cumsum(deg_seq)
n = len(deg_seq)
k_vec = np.arange(n)
k_times_k_plus_1 = k_vec*(k_vec + 1)
for k in range(n):
sum_min_k_d = np.sum(np.minimum(k + 1, deg_seq[k+1:]))
if cum_sum_d[k] > k_times_k_plus_1[k] + sum_min_k_d:
return False # condition is failed for one k
return True
def erdos_gallai_vec(deg_seq):
''' Naive implementation of Erdos-Gallai test.
Test for the graphicality of a simple undirected graph.
Avoids Python loops.'''
# check if even
if not sequence_is_even(deg_seq):
return False # degrees have to sum up to an even number
# sort deg_seq
order_seq_descending(deg_seq)
# iterate (maybe can be done in vector form)
cum_sum_d = np.cumsum(deg_seq)
n = len(deg_seq)
k_vec = np.arange(n) + 1 # starting from 1
k_times_k_minus_1 = k_vec*(k_vec - 1)
min_k_d = np.triu(np.minimum(deg_seq, k_vec[:,np.newaxis]), k=1)
sum_min_k_d = np.sum(min_k_d, axis=1)
# erdos gallai condition, vector form
erdos_gallai_cond = (cum_sum_d <= k_times_k_minus_1 + sum_min_k_d)
if np.all(erdos_gallai_cond):
return True
else:
return False
if __name__ == '__main__':
import networkx as nx
seq1 = np.array([4, 5, 6, 7, 2], dtype=int)
seq2 = np.array([], dtype=int) # Empty graph
seq3 = np.array([3, 2, 2, 2, 1], dtype=int) # B and D example
seq4 = np.array([7,8,5,1,1,2,8,10,4,2,4,5,3,6,7,3,2,7,6,
1,2,9,6,1,3,4,6,3,3,3,2,4,4], dtype=int)
# test Erdos-Gallai
print('Naive implementation (loops)')
print(erdos_gallai(seq1))
print(erdos_gallai(seq2))
print(erdos_gallai(seq3))
print(erdos_gallai(seq4))
print('Naive implementation (vectorized)')
print(erdos_gallai_vec(seq1))
print(erdos_gallai_vec(seq2))
print(erdos_gallai_vec(seq3))
print(erdos_gallai_vec(seq4))
print('Networkx implementation O(n)')
print(nx.is_graphical(seq1))
print(nx.is_graphical(seq2))
print(nx.is_graphical(seq3))
print(nx.is_graphical(seq4))
# naive vectorized on seq4: 63.5 microsecs
# networkx vectorized on seq4: 59.8 microsecs
| [
"numpy.minimum",
"numpy.sum",
"numpy.cumsum",
"numpy.array",
"numpy.arange",
"networkx.is_graphical",
"numpy.all"
] | [((429, 444), 'numpy.sum', 'np.sum', (['deg_seq'], {}), '(deg_seq)\n', (435, 444), True, 'import numpy as np\n'), ((1058, 1076), 'numpy.cumsum', 'np.cumsum', (['deg_seq'], {}), '(deg_seq)\n', (1067, 1076), True, 'import numpy as np\n'), ((1110, 1122), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1119, 1122), True, 'import numpy as np\n'), ((1795, 1813), 'numpy.cumsum', 'np.cumsum', (['deg_seq'], {}), '(deg_seq)\n', (1804, 1813), True, 'import numpy as np\n'), ((2011, 2034), 'numpy.sum', 'np.sum', (['min_k_d'], {'axis': '(1)'}), '(min_k_d, axis=1)\n', (2017, 2034), True, 'import numpy as np\n'), ((2155, 2180), 'numpy.all', 'np.all', (['erdos_gallai_cond'], {}), '(erdos_gallai_cond)\n', (2161, 2180), True, 'import numpy as np\n'), ((2313, 2349), 'numpy.array', 'np.array', (['[4, 5, 6, 7, 2]'], {'dtype': 'int'}), '([4, 5, 6, 7, 2], dtype=int)\n', (2321, 2349), True, 'import numpy as np\n'), ((2361, 2384), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (2369, 2384), True, 'import numpy as np\n'), ((2410, 2446), 'numpy.array', 'np.array', (['[3, 2, 2, 2, 1]'], {'dtype': 'int'}), '([3, 2, 2, 2, 1], dtype=int)\n', (2418, 2446), True, 'import numpy as np\n'), ((2477, 2602), 'numpy.array', 'np.array', (['[7, 8, 5, 1, 1, 2, 8, 10, 4, 2, 4, 5, 3, 6, 7, 3, 2, 7, 6, 1, 2, 9, 6, 1, 3,\n 4, 6, 3, 3, 3, 2, 4, 4]'], {'dtype': 'int'}), '([7, 8, 5, 1, 1, 2, 8, 10, 4, 2, 4, 5, 3, 6, 7, 3, 2, 7, 6, 1, 2, 9,\n 6, 1, 3, 4, 6, 3, 3, 3, 2, 4, 4], dtype=int)\n', (2485, 2602), True, 'import numpy as np\n'), ((1847, 1859), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1856, 1859), True, 'import numpy as np\n'), ((1946, 1987), 'numpy.minimum', 'np.minimum', (['deg_seq', 'k_vec[:, np.newaxis]'], {}), '(deg_seq, k_vec[:, np.newaxis])\n', (1956, 1987), True, 'import numpy as np\n'), ((3015, 3036), 'networkx.is_graphical', 'nx.is_graphical', (['seq1'], {}), '(seq1)\n', (3030, 3036), True, 'import networkx as nx\n'), ((3048, 3069), 'networkx.is_graphical', 'nx.is_graphical', (['seq2'], {}), '(seq2)\n', (3063, 3069), True, 'import networkx as nx\n'), ((3081, 3102), 'networkx.is_graphical', 'nx.is_graphical', (['seq3'], {}), '(seq3)\n', (3096, 3102), True, 'import networkx as nx\n'), ((3114, 3135), 'networkx.is_graphical', 'nx.is_graphical', (['seq4'], {}), '(seq4)\n', (3129, 3135), True, 'import networkx as nx\n'), ((1216, 1250), 'numpy.minimum', 'np.minimum', (['(k + 1)', 'deg_seq[k + 1:]'], {}), '(k + 1, deg_seq[k + 1:])\n', (1226, 1250), True, 'import numpy as np\n')] |
import robotoc
import pinocchio
from pinocchio.robot_wrapper import RobotWrapper
from os.path import abspath, dirname, join
import numpy as np
import math
import time
class TrajectoryViewer:
def __init__(self, path_to_urdf, path_to_pkg=None,
base_joint_type=robotoc.BaseJointType.FixedBase,
viewer_type='gepetto'):
self.path_to_urdf = abspath(path_to_urdf)
if path_to_pkg is None:
path_to_pkg = join(dirname(self.path_to_urdf), '../..')
if base_joint_type == robotoc.BaseJointType.FloatingBase:
self.robot= RobotWrapper.BuildFromURDF(self.path_to_urdf, path_to_pkg,
pinocchio.JointModelFreeFlyer())
else:
self.robot= RobotWrapper.BuildFromURDF(self.path_to_urdf, path_to_pkg)
self.path_to_urdf = path_to_urdf
self.base_joint_type = base_joint_type
self.play_speed = 1.0
self.force_radius = 0.015
self.force_length = 0.5
self.force_scale = 0.75
self.force_color = [1.0, 1.0, 0.0, 1.0]
self.display_contact = False
self.contact_frames = []
self.mu = 0.
self.cone_scale = [0.15, 0.15, 0.15]
self.cone_color = [0.3, 0.3, 0.7, 0.7]
self.x_axis = np.array([1.0, 0.0, 0.0])
self.viewer_type = viewer_type
if viewer_type == 'gepetto':
import subprocess, os
launched = subprocess.getstatusoutput("ps aux |grep 'gepetto-gui'|grep -v 'grep'|wc -l")
if int(launched[1]) == 0:
os.system('gepetto-gui &')
time.sleep(2)
from pinocchio.visualize import GepettoVisualizer
self.viewer = GepettoVisualizer(self.robot.model,
self.robot.collision_model,
self.robot.visual_model)
self.window_name = 'robotoc.TrajectoryViewer'
self.camera_pos = [2.2, -3.5, 1.13]
self.camera_angle = [0.60612, 0.166663, 0.19261, 0.753487]
elif viewer_type == 'meshcat':
from pinocchio.visualize import MeshcatVisualizer
import meshcat.transformations
self.viewer = MeshcatVisualizer(self.robot.model,
self.robot.collision_model,
self.robot.visual_model)
self.camera_tf = meshcat.transformations.translation_matrix([0.8, -2.5, -0.2])
self.zoom = 3.0
else:
print('Please choose viewer_type from "gepetto" or "meshcat"!')
return NotImplementedError()
def set_contact_info(self, contact_frames, mu):
self.display_contact = True
self.contact_frames = contact_frames
self.mu = mu
def set_camera_transform_gepetto(self, camera_pos=None, camera_angle=None):
if camera_pos is not None:
self.camera_pos = camera_pos
if camera_angle is not None:
self.camera_angle = camera_angle
def set_camera_transform_meshcat(self, camera_tf_vec=None, zoom=None):
import meshcat.transformations
if camera_tf_vec is not None:
self.camera_tf = meshcat.transformations.translation_matrix(camera_tf_vec)
if zoom is not None:
self.zoom = zoom
def display(self, dt, q_traj, f_traj=None):
if self.viewer_type == 'gepetto':
self.display_gepetto(dt, q_traj, f_traj)
elif self.viewer_type == 'meshcat':
self.display_meshcat(dt, q_traj)
def display_gepetto(self, dt, q_traj, f_traj):
if isinstance(dt, float):
time_steps = dt * np.ones(len(q_traj)-1)
dt = time_steps
assert len(q_traj)-1 == len(dt)
if f_traj is not None:
assert len(dt) == len(f_traj)
self.robot.setVisualizer(self.viewer)
self.robot.initViewer(windowName=self.window_name, loadModel=False)
self.robot.loadViewerModel(rootNodeName=self.window_name)
gui = self.robot.viewer.gui
# init the window
window_id = gui.getWindowID(self.window_name)
gui.setBackgroundColor1(window_id, [1., 1., 1., 1.])
gui.setBackgroundColor2(window_id, [1., 1., 1., 1.])
# init the floor
floor_name = 'world/floor'
gui.addFloor(floor_name)
gui.setColor(floor_name, [0.7, 0.7, 0.7, 1.0])
gui.setLightingMode(floor_name, 'OFF')
# init contact forces and friction cones
if f_traj is not None and self.display_contact:
# create cones
self.robot.viewer.gui.createGroup('world/friction_cones')
cone = [self.mu, self.mu, 1.0]
for i in range(len(self.contact_frames)):
gui.createGroup('world/friction_cones/friction_cone_'+str(i))
gui.addCurve('world/friction_cones/friction_cone_'+str(i)+'/vertex',
[[0., 0., 0.],
[ cone[0], cone[1], cone[2]],
[-cone[0], cone[1], cone[2]],
[-cone[0], -cone[1], cone[2]],
[ cone[0], -cone[1], cone[2]],
[ cone[0], cone[1], cone[2]]], self.cone_color)
gui.setCurveMode('world/friction_cones/friction_cone_'+str(i)+'/vertex', 'TRIANGLE_FAN')
gui.addLine('world/friction_cones/friction_cone_'+str(i)+'/line1',
[0., 0., 0.], [ cone[0], cone[1], cone[2]], self.cone_color)
gui.addLine('world/friction_cones/friction_cone_'+str(i)+'/line2',
[0., 0., 0.], [-cone[0], cone[1], cone[2]], self.cone_color)
gui.addLine('world/friction_cones/friction_cone_'+str(i)+'/line3',
[0., 0., 0.], [-cone[0], -cone[1], cone[2]], self.cone_color)
gui.addLine('world/friction_cones/friction_cone_'+str(i)+'/line4',
[0., 0., 0.], [ cone[0], -cone[1], cone[2]], self.cone_color)
gui.setScale('world/friction_cones/friction_cone_'+str(i)+'/vertex', self.cone_scale)
gui.setScale('world/friction_cones/friction_cone_'+str(i)+'/line1', self.cone_scale)
gui.setScale('world/friction_cones/friction_cone_'+str(i)+'/line2', self.cone_scale)
gui.setScale('world/friction_cones/friction_cone_'+str(i)+'/line3', self.cone_scale)
gui.setScale('world/friction_cones/friction_cone_'+str(i)+'/line4', self.cone_scale)
gui.setFloatProperty('world/friction_cones/friction_cone_'+str(i)+'/vertex', 'Alpha', 0.)
gui.setFloatProperty('world/friction_cones/friction_cone_'+str(i)+'/line1', 'Alpha', 0.2)
gui.setFloatProperty('world/friction_cones/friction_cone_'+str(i)+'/line2', 'Alpha', 0.2)
gui.setFloatProperty('world/friction_cones/friction_cone_'+str(i)+'/line3', 'Alpha', 0.2)
gui.setFloatProperty('world/friction_cones/friction_cone_'+str(i)+'/line4', 'Alpha', 0.2)
# create forces
gui.createGroup('world/contact_forces')
for i in range(len(self.contact_frames)):
gui.addArrow('world/contact_forces/contact_force_'+str(i),
self.force_radius, self.force_length, self.force_color)
gui.setFloatProperty('world/contact_forces/contact_force_'+str(i), 'Alpha', 1.0)
gui.setVisibility('world/contact_forces/contact_force_'+str(i), 'ALWAYS_ON_TOP')
# set camera
camera = self.camera_pos
camera.extend(self.camera_angle)
gui.setCameraTransform(self.robot.viz.windowID, camera)
# display
if f_traj is not None:
contact_types = [robotoc.ContactType.PointContact for frame in self.contact_frames]
robot = robotoc.Robot(self.path_to_urdf, self.base_joint_type,
self.contact_frames, contact_types, [0, 0])
for q, f, dts in zip(q_traj, f_traj, dt):
robot.forward_kinematics(q)
for i in range(len(self.contact_frames)):
fi = f[3*i:3*(i+1)]
f_scale = [math.sqrt(self.force_scale*np.linalg.norm(fi)/robot.total_weight()), 1.0, 1.0]
gui.setVector3Property('world/contact_forces/contact_force_'+str(i), "Scale", f_scale)
fpos = robot.frame_position(self.contact_frames[i])
quat = pinocchio.Quaternion(self.x_axis, fi).normalized()
pose = np.concatenate((fpos, np.array([quat.x, quat.y, quat.z, quat.w])), axis=None).tolist()
gui.applyConfiguration('world/contact_forces/contact_force_'+str(i), pose)
if self.mu > 0.0:
pose = np.concatenate((fpos, np.array([0., 0., 0., 1.])), axis=None).tolist()
gui.applyConfiguration('world/friction_cones/friction_cone_'+str(i), pose)
if np.linalg.norm(fi) > 0.0 and self.mu > 0.0:
gui.setVisibility('world/friction_cones/friction_cone_'+str(i), 'ON')
gui.setVisibility('world/contact_forces/contact_force_'+str(i), 'ON')
else:
gui.setVisibility('world/friction_cones/friction_cone_'+str(i), 'OFF')
gui.setVisibility('world/contact_forces/contact_force_'+str(i), 'OFF')
gui.refresh()
self.robot.display(q)
sleep_time = dts / self.play_speed
time.sleep(sleep_time)
self.robot.display(q_traj[-1])
else:
for q, dts in zip(q_traj, dt):
self.robot.display(q)
sleep_time = dts / self.play_speed
time.sleep(sleep_time)
self.robot.display(q_traj[-1])
def display_meshcat(self, dt, q_traj, open=True):
if isinstance(dt, float):
time_steps = dt * np.ones(len(q_traj)-1)
dt = time_steps
assert len(q_traj)-1 == len(dt)
self.robot.setVisualizer(self.viewer)
self.robot.initViewer(open=open)
self.robot.loadViewerModel(rootNodeName='robotoc.TrajectoryViewer')
self.viewer.viewer["/Cameras/default"].set_transform(self.camera_tf)
self.viewer.viewer["/Cameras/default/rotated/<object>"].set_property("zoom", self.zoom)
self.viewer.viewer["/Background"].set_property("visible", True)
self.viewer.viewer["/Background"].set_property("top_color", [0.9, 0.9, 0.9])
self.viewer.viewer["/Background"].set_property("bottom_color", [0.9, 0.9, 0.9])
for q, dts in zip(q_traj, dt):
self.robot.display(q)
sleep_time = dts / self.play_speed
time.sleep(sleep_time)
self.robot.display(q_traj[-1]) | [
"os.path.abspath",
"pinocchio.JointModelFreeFlyer",
"robotoc.Robot",
"os.path.dirname",
"os.system",
"time.sleep",
"pinocchio.Quaternion",
"subprocess.getstatusoutput",
"numpy.array",
"pinocchio.visualize.MeshcatVisualizer",
"numpy.linalg.norm",
"pinocchio.visualize.GepettoVisualizer",
"pino... | [((384, 405), 'os.path.abspath', 'abspath', (['path_to_urdf'], {}), '(path_to_urdf)\n', (391, 405), False, 'from os.path import abspath, dirname, join\n'), ((1308, 1333), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1316, 1333), True, 'import numpy as np\n'), ((777, 835), 'pinocchio.robot_wrapper.RobotWrapper.BuildFromURDF', 'RobotWrapper.BuildFromURDF', (['self.path_to_urdf', 'path_to_pkg'], {}), '(self.path_to_urdf, path_to_pkg)\n', (803, 835), False, 'from pinocchio.robot_wrapper import RobotWrapper\n'), ((1468, 1545), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""ps aux |grep \'gepetto-gui\'|grep -v \'grep\'|wc -l"""'], {}), '("ps aux |grep \'gepetto-gui\'|grep -v \'grep\'|wc -l")\n', (1494, 1545), False, 'import subprocess, os\n'), ((1639, 1652), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1649, 1652), False, 'import time\n'), ((1741, 1834), 'pinocchio.visualize.GepettoVisualizer', 'GepettoVisualizer', (['self.robot.model', 'self.robot.collision_model', 'self.robot.visual_model'], {}), '(self.robot.model, self.robot.collision_model, self.robot.\n visual_model)\n', (1758, 1834), False, 'from pinocchio.visualize import GepettoVisualizer\n'), ((8017, 8119), 'robotoc.Robot', 'robotoc.Robot', (['self.path_to_urdf', 'self.base_joint_type', 'self.contact_frames', 'contact_types', '[0, 0]'], {}), '(self.path_to_urdf, self.base_joint_type, self.contact_frames,\n contact_types, [0, 0])\n', (8030, 8119), False, 'import robotoc\n'), ((10990, 11012), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (11000, 11012), False, 'import time\n'), ((469, 495), 'os.path.dirname', 'dirname', (['self.path_to_urdf'], {}), '(self.path_to_urdf)\n', (476, 495), False, 'from os.path import abspath, dirname, join\n'), ((706, 737), 'pinocchio.JointModelFreeFlyer', 'pinocchio.JointModelFreeFlyer', ([], {}), '()\n', (735, 737), False, 'import pinocchio\n'), ((1600, 1626), 'os.system', 'os.system', (['"""gepetto-gui &"""'], {}), "('gepetto-gui &')\n", (1609, 1626), False, 'import subprocess, os\n'), ((2269, 2362), 'pinocchio.visualize.MeshcatVisualizer', 'MeshcatVisualizer', (['self.robot.model', 'self.robot.collision_model', 'self.robot.visual_model'], {}), '(self.robot.model, self.robot.collision_model, self.robot.\n visual_model)\n', (2286, 2362), False, 'from pinocchio.visualize import MeshcatVisualizer\n'), ((9772, 9794), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (9782, 9794), False, 'import time\n'), ((10000, 10022), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (10010, 10022), False, 'import time\n'), ((8663, 8700), 'pinocchio.Quaternion', 'pinocchio.Quaternion', (['self.x_axis', 'fi'], {}), '(self.x_axis, fi)\n', (8683, 8700), False, 'import pinocchio\n'), ((9185, 9203), 'numpy.linalg.norm', 'np.linalg.norm', (['fi'], {}), '(fi)\n', (9199, 9203), True, 'import numpy as np\n'), ((8405, 8423), 'numpy.linalg.norm', 'np.linalg.norm', (['fi'], {}), '(fi)\n', (8419, 8423), True, 'import numpy as np\n'), ((8763, 8805), 'numpy.array', 'np.array', (['[quat.x, quat.y, quat.z, quat.w]'], {}), '([quat.x, quat.y, quat.z, quat.w])\n', (8771, 8805), True, 'import numpy as np\n'), ((9014, 9044), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0])\n', (9022, 9044), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.target_assigner."""
import numpy as np
import tensorflow as tf
from box_coders import keypoint_box_coder
from box_coders import mean_stddev_box_coder
from core import box_list
from core import region_similarity_calculator
from core import standard_fields as fields
from core import target_assigner as targetassigner
from matchers import argmax_matcher
from matchers import bipartite_matcher
from utils import test_case
class TargetAssignerTest(test_case.TestCase):
def test_assign_agnostic(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0]], dtype=np.float32)
anchor_stddevs = np.array(3 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.3)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
anchor_stddevs = np.array(3 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]], dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 0]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_agnostic_with_keypoints(self):
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder, unmatched_cls_target=None)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass_with_groundtruth_weights(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels, groundtruth_weights):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels,
groundtruth_weights)
(_, cls_weights, _, reg_weights, _) = result
return (cls_weights, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32)
exp_cls_weights = [0.3, 0., 1, 0.5] # background class gets weight of 1.
exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0.
(cls_weights_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels,
groundtruth_weights])
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_assign_multidimensional_class_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([[0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, .5]]], np.float32)
exp_cls_targets = [[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, .5]]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_empty_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([0, 0, 0], tf.float32)
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
result = target_assigner.assign(anchors_boxlist, groundtruth_boxlist,
groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
groundtruth_labels = np.zeros((0, 3), dtype=np.float32)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]],
dtype=np.float32)
anchor_stddevs = np.array(4 * [4 * [.1]], dtype=np.float32)
exp_cls_targets = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
exp_cls_weights = [1, 1, 1, 1]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_reg_weights = [0, 0, 0, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_box_corners,
groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]])
prior_stddevs = tf.constant(4 * [4 * [.1]])
priors = box_list.BoxList(prior_means)
priors.add_field('stddev', prior_stddevs)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.8],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], tf.float32)
with self.assertRaisesRegexp(ValueError, 'Unequal shapes'):
target_assigner.assign(priors, boxes, groundtruth_labels,
num_valid_rows=3)
def test_raises_error_on_invalid_groundtruth_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]])
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors = box_list.BoxList(prior_means)
priors.add_field('stddev', prior_stddevs)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32)
with self.assertRaises(ValueError):
target_assigner.assign(priors, boxes, groundtruth_labels,
num_valid_rows=3)
class BatchTargetAssignerTest(test_case.TestCase):
def _get_agnostic_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=None)
def _get_multi_class_target_assigner(self, num_classes):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant([1] + num_classes * [0], tf.float32)
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
def _get_multi_dimensional_target_assigner(self, target_dimensions):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_cls_target = tf.constant(np.zeros(target_dimensions),
tf.float32)
return targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder,
unmatched_cls_target=unmatched_cls_target)
def test_batch_assign_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
agnostic_target_assigner = self._get_agnostic_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
agnostic_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets_with_padded_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2,
groundtruth_weights1, groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, gt_weights)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2,
groundtruth_weights1,
groundtruth_weights2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multidimensional_targets(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_boxlist1,
groundtruth_boxlist2, class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_dimensional_target_assigner(
target_dimensions=(2, 3))
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1, 1, 1],
[1, 1, 1, 1]]
exp_cls_targets = [[[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]]],
[[[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 0.]]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means, anchor_stddevs,
groundtruth_boxlist1,
groundtruth_boxlist2,
class_targets1,
class_targets2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, anchor_stddevs, groundtruth_box_corners,
gt_class_targets):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_targets_batch = [gt_class_targets]
anchors_boxlist = box_list.BoxList(anchor_means)
anchors_boxlist.add_field('stddev', anchor_stddevs)
multiclass_target_assigner = self._get_multi_class_target_assigner(
num_classes=3)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist,
gt_box_batch, gt_class_targets_batch)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
anchor_stddevs = np.array([[.1, .1, .1, .1],
[.1, .1, .1, .1]], dtype=np.float32)
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_cls_weights = [[1, 1]]
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
num_classes = 3
pad = 1
gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32)
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(
graph_fn, [anchor_means, anchor_stddevs, groundtruth_box_corners,
gt_class_targets])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
class CreateTargetAssignerTest(tf.test.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO: Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors.add_field('stddev', prior_stddevs)
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"core.region_similarity_calculator.IouSimilarity",
"core.box_list.BoxList",
"core.region_similarity_calculator.NegSqDistSimilarity",
"box_coders.keypoint_box_coder.KeypointBoxCoder",
"core.target_assigner.create_target_assigner",
"numpy.zeros",
"tensorflow.constant",
"matcher... | [((43958, 43972), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (43970, 43972), True, 'import tensorflow as tf\n'), ((2114, 2210), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0]],\n dtype=np.float32)\n', (2122, 2210), True, 'import numpy as np\n'), ((2285, 2328), 'numpy.array', 'np.array', (['(3 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(3 * [4 * [0.1]], dtype=np.float32)\n', (2293, 2328), True, 'import numpy as np\n'), ((2358, 2430), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32)\n', (2366, 2430), True, 'import numpy as np\n'), ((4686, 4784), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0.0, 0.5, 0.9, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0.0, 0.5, 0.9, 1.0]],\n dtype=np.float32)\n', (4694, 4784), True, 'import numpy as np\n'), ((4859, 4902), 'numpy.array', 'np.array', (['(3 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(3 * [4 * [0.1]], dtype=np.float32)\n', (4867, 4902), True, 'import numpy as np\n'), ((4932, 5004), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32)\n', (4940, 5004), True, 'import numpy as np\n'), ((7015, 7113), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.9, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.9, 1.0]],\n dtype=np.float32)\n', (7023, 7113), True, 'import numpy as np\n'), ((7197, 7273), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32)\n', (7205, 7273), True, 'import numpy as np\n'), ((7381, 7560), 'numpy.array', 'np.array', (['[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[\n 0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]]'], {'dtype': 'np.float32'}), '([[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9,\n 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4\n ]]], dtype=np.float32)\n', (7389, 7560), True, 'import numpy as np\n'), ((10158, 10256), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.9, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.9, 1.0]],\n dtype=np.float32)\n', (10166, 10256), True, 'import numpy as np\n'), ((10340, 10416), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32)\n', (10348, 10416), True, 'import numpy as np\n'), ((10524, 10703), 'numpy.array', 'np.array', (['[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[\n 0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]]'], {'dtype': 'np.float32'}), '([[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9,\n 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4\n ]]], dtype=np.float32)\n', (10532, 10703), True, 'import numpy as np\n'), ((12963, 13082), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [0.75, 0, \n 1.0, 0.25]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [\n 0.75, 0, 1.0, 0.25]], dtype=np.float32)\n', (12971, 13082), True, 'import numpy as np\n'), ((13183, 13226), 'numpy.array', 'np.array', (['(4 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(4 * [4 * [0.1]], dtype=np.float32)\n', (13191, 13226), True, 'import numpy as np\n'), ((13256, 13356), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]\n ], dtype=np.float32)\n', (13264, 13356), True, 'import numpy as np\n'), ((13454, 13556), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, \n 0]], dtype=np.float32)\n', (13462, 13556), True, 'import numpy as np\n'), ((15946, 16065), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [0.75, 0, \n 1.0, 0.25]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [\n 0.75, 0, 1.0, 0.25]], dtype=np.float32)\n', (15954, 16065), True, 'import numpy as np\n'), ((16166, 16209), 'numpy.array', 'np.array', (['(4 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(4 * [4 * [0.1]], dtype=np.float32)\n', (16174, 16209), True, 'import numpy as np\n'), ((16239, 16339), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]\n ], dtype=np.float32)\n', (16247, 16339), True, 'import numpy as np\n'), ((16437, 16539), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, \n 0]], dtype=np.float32)\n', (16445, 16539), True, 'import numpy as np\n'), ((16631, 16674), 'numpy.array', 'np.array', (['[0.3, 0.0, 0.5]'], {'dtype': 'np.float32'}), '([0.3, 0.0, 0.5], dtype=np.float32)\n', (16639, 16674), True, 'import numpy as np\n'), ((18398, 18517), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [0.75, 0, \n 1.0, 0.25]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [\n 0.75, 0, 1.0, 0.25]], dtype=np.float32)\n', (18406, 18517), True, 'import numpy as np\n'), ((18618, 18661), 'numpy.array', 'np.array', (['(4 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(4 * [4 * [0.1]], dtype=np.float32)\n', (18626, 18661), True, 'import numpy as np\n'), ((18691, 18791), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [0.75, 0, 0.95, 0.27]\n ], dtype=np.float32)\n', (18699, 18791), True, 'import numpy as np\n'), ((18890, 18968), 'numpy.array', 'np.array', (['[[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 1], [1, 0.5]]]', 'np.float32'], {}), '([[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 1], [1, 0.5]]], np.float32)\n', (18898, 18968), True, 'import numpy as np\n'), ((21288, 21322), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (21296, 21322), True, 'import numpy as np\n'), ((21348, 21382), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {'dtype': 'np.float32'}), '((0, 3), dtype=np.float32)\n', (21356, 21382), True, 'import numpy as np\n'), ((21402, 21521), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [0.75, 0, \n 1.0, 0.25]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [\n 0.75, 0, 1.0, 0.25]], dtype=np.float32)\n', (21410, 21521), True, 'import numpy as np\n'), ((21650, 21693), 'numpy.array', 'np.array', (['(4 * [4 * [0.1]])'], {'dtype': 'np.float32'}), '(4 * [4 * [0.1]], dtype=np.float32)\n', (21658, 21693), True, 'import numpy as np\n'), ((22882, 22932), 'core.region_similarity_calculator.NegSqDistSimilarity', 'region_similarity_calculator.NegSqDistSimilarity', ([], {}), '()\n', (22930, 22932), False, 'from core import region_similarity_calculator\n'), ((22947, 22989), 'matchers.bipartite_matcher.GreedyBipartiteMatcher', 'bipartite_matcher.GreedyBipartiteMatcher', ([], {}), '()\n', (22987, 22989), False, 'from matchers import bipartite_matcher\n'), ((23006, 23048), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (23046, 23048), False, 'from box_coders import mean_stddev_box_coder\n'), ((23076, 23122), 'tensorflow.constant', 'tf.constant', (['[1, 0, 0, 0, 0, 0, 0]', 'tf.float32'], {}), '([1, 0, 0, 0, 0, 0, 0], tf.float32)\n', (23087, 23122), True, 'import tensorflow as tf\n'), ((23145, 23258), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (23174, 23258), True, 'from core import target_assigner as targetassigner\n'), ((23291, 23394), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0], [0.75, 0, \n 1.0, 0.25]]'], {}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, 0.5, 1.0],\n [0.75, 0, 1.0, 0.25]])\n', (23302, 23394), True, 'import tensorflow as tf\n'), ((23501, 23529), 'tensorflow.constant', 'tf.constant', (['(4 * [4 * [0.1]])'], {}), '(4 * [4 * [0.1]])\n', (23512, 23529), True, 'import tensorflow as tf\n'), ((23542, 23571), 'core.box_list.BoxList', 'box_list.BoxList', (['prior_means'], {}), '(prior_means)\n', (23558, 23571), False, 'from core import box_list\n'), ((23862, 23961), 'tensorflow.constant', 'tf.constant', (['[[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]]', 'tf.float32'], {}), '([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, \n 0, 0]], tf.float32)\n', (23873, 23961), True, 'import tensorflow as tf\n'), ((24292, 24342), 'core.region_similarity_calculator.NegSqDistSimilarity', 'region_similarity_calculator.NegSqDistSimilarity', ([], {}), '()\n', (24340, 24342), False, 'from core import region_similarity_calculator\n'), ((24357, 24399), 'matchers.bipartite_matcher.GreedyBipartiteMatcher', 'bipartite_matcher.GreedyBipartiteMatcher', ([], {}), '()\n', (24397, 24399), False, 'from matchers import bipartite_matcher\n'), ((24416, 24458), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (24456, 24458), False, 'from box_coders import mean_stddev_box_coder\n'), ((24486, 24535), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [0, 0], [0, 0]]', 'tf.float32'], {}), '([[0, 0], [0, 0], [0, 0]], tf.float32)\n', (24497, 24535), True, 'import tensorflow as tf\n'), ((24558, 24671), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (24587, 24671), True, 'from core import target_assigner as targetassigner\n'), ((24704, 24739), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.5, 0.5]]'], {}), '([[0.0, 0.0, 0.5, 0.5]])\n', (24715, 24739), True, 'import tensorflow as tf\n'), ((24760, 24795), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0]])\n', (24771, 24795), True, 'import tensorflow as tf\n'), ((24809, 24838), 'core.box_list.BoxList', 'box_list.BoxList', (['prior_means'], {}), '(prior_means)\n', (24825, 24838), False, 'from core import box_list\n'), ((25087, 25130), 'tensorflow.constant', 'tf.constant', (['[[[0, 1], [1, 0]]]', 'tf.float32'], {}), '([[[0, 1], [1, 0]]], tf.float32)\n', (25098, 25130), True, 'import tensorflow as tf\n'), ((25402, 25446), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (25444, 25446), False, 'from core import region_similarity_calculator\n'), ((25461, 25537), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (25489, 25537), False, 'from matchers import argmax_matcher\n'), ((25597, 25639), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (25637, 25639), False, 'from box_coders import mean_stddev_box_coder\n'), ((25651, 25748), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'None'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=None)\n', (25680, 25748), True, 'from core import target_assigner as targetassigner\n'), ((25844, 25888), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (25886, 25888), False, 'from core import region_similarity_calculator\n'), ((25903, 25979), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (25931, 25979), False, 'from matchers import argmax_matcher\n'), ((26039, 26081), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (26079, 26081), False, 'from box_coders import mean_stddev_box_coder\n'), ((26109, 26157), 'tensorflow.constant', 'tf.constant', (['([1] + num_classes * [0])', 'tf.float32'], {}), '([1] + num_classes * [0], tf.float32)\n', (26120, 26157), True, 'import tensorflow as tf\n'), ((26169, 26282), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (26198, 26282), True, 'from core import target_assigner as targetassigner\n'), ((26390, 26434), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (26432, 26434), False, 'from core import region_similarity_calculator\n'), ((26449, 26525), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (26477, 26525), False, 'from matchers import argmax_matcher\n'), ((26585, 26627), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (26625, 26627), False, 'from box_coders import mean_stddev_box_coder\n'), ((26758, 26871), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (26787, 26871), True, 'from core import target_assigner as targetassigner\n'), ((27712, 27762), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.2, 0.2]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.2, 0.2]], dtype=np.float32)\n', (27720, 27762), True, 'import numpy as np\n'), ((27788, 27880), 'numpy.array', 'np.array', (['[[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]]'], {'dtype': 'np.float32'}), '([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]],\n dtype=np.float32)\n', (27796, 27880), True, 'import numpy as np\n'), ((27969, 28079), 'numpy.array', 'np.array', (['[[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, 0.75, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, \n 0.75, 1, 1]], dtype=np.float32)\n', (27977, 28079), True, 'import numpy as np\n'), ((28175, 28295), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, \n 0.1, 0.1, 0.1]]'], {'dtype': 'np.float32'}), '([[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1]], dtype=np.float32)\n', (28183, 28295), True, 'import numpy as np\n'), ((30400, 30450), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.2, 0.2]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.2, 0.2]], dtype=np.float32)\n', (30408, 30450), True, 'import numpy as np\n'), ((30476, 30568), 'numpy.array', 'np.array', (['[[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]]'], {'dtype': 'np.float32'}), '([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]],\n dtype=np.float32)\n', (30484, 30568), True, 'import numpy as np\n'), ((30659, 30701), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 0, 0]], dtype=np.float32)\n', (30667, 30701), True, 'import numpy as np\n'), ((30723, 30779), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32)\n', (30731, 30779), True, 'import numpy as np\n'), ((30831, 30941), 'numpy.array', 'np.array', (['[[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, 0.75, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, \n 0.75, 1, 1]], dtype=np.float32)\n', (30839, 30941), True, 'import numpy as np\n'), ((31037, 31157), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, \n 0.1, 0.1, 0.1]]'], {'dtype': 'np.float32'}), '([[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1]], dtype=np.float32)\n', (31045, 31157), True, 'import numpy as np\n'), ((33767, 33839), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.2, 0.2], [0.0, 0.0, 0.0, 0.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.2, 0.2], [0.0, 0.0, 0.0, 0.0]], dtype=np.float32)\n', (33775, 33839), True, 'import numpy as np\n'), ((33898, 33932), 'numpy.array', 'np.array', (['[1, 0]'], {'dtype': 'np.float32'}), '([1, 0], dtype=np.float32)\n', (33906, 33932), True, 'import numpy as np\n'), ((33960, 34066), 'numpy.array', 'np.array', (['[[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0,\n 0, 0]], dtype=np.float32)\n', (33968, 34066), True, 'import numpy as np\n'), ((34200, 34237), 'numpy.array', 'np.array', (['[1, 1, 0]'], {'dtype': 'np.float32'}), '([1, 1, 0], dtype=np.float32)\n', (34208, 34237), True, 'import numpy as np\n'), ((34259, 34315), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)\n', (34267, 34315), True, 'import numpy as np\n'), ((34337, 34407), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.float32)\n', (34345, 34407), True, 'import numpy as np\n'), ((34490, 34600), 'numpy.array', 'np.array', (['[[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, 0.75, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, \n 0.75, 1, 1]], dtype=np.float32)\n', (34498, 34600), True, 'import numpy as np\n'), ((34696, 34816), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, \n 0.1, 0.1, 0.1]]'], {'dtype': 'np.float32'}), '([[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1]], dtype=np.float32)\n', (34704, 34816), True, 'import numpy as np\n'), ((37428, 37478), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.2, 0.2]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.2, 0.2]], dtype=np.float32)\n', (37436, 37478), True, 'import numpy as np\n'), ((37504, 37596), 'numpy.array', 'np.array', (['[[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]]'], {'dtype': 'np.float32'}), '([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]],\n dtype=np.float32)\n', (37512, 37596), True, 'import numpy as np\n'), ((37687, 37729), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 0, 0]], dtype=np.float32)\n', (37695, 37729), True, 'import numpy as np\n'), ((37751, 37807), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32)\n', (37759, 37807), True, 'import numpy as np\n'), ((37860, 37912), 'numpy.array', 'np.array', (['[[[0, 1, 1], [1, 1, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 1], [1, 1, 0]]], dtype=np.float32)\n', (37868, 37912), True, 'import numpy as np\n'), ((37966, 38042), 'numpy.array', 'np.array', (['[[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]], dtype=np.float32)\n', (37974, 38042), True, 'import numpy as np\n'), ((38158, 38268), 'numpy.array', 'np.array', (['[[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, 0.75, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0.25, 0.25], [0, 0.25, 1, 1], [0, 0.1, 0.5, 0.5], [0.75, \n 0.75, 1, 1]], dtype=np.float32)\n', (38166, 38268), True, 'import numpy as np\n'), ((38364, 38484), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, \n 0.1, 0.1, 0.1]]'], {'dtype': 'np.float32'}), '([[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1]], dtype=np.float32)\n', (38372, 38484), True, 'import numpy as np\n'), ((41186, 41220), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (41194, 41220), True, 'import numpy as np\n'), ((41240, 41305), 'numpy.array', 'np.array', (['[[0, 0, 0.25, 0.25], [0, 0.25, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0.25, 0.25], [0, 0.25, 1, 1]], dtype=np.float32)\n', (41248, 41305), True, 'import numpy as np\n'), ((41353, 41425), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1]]'], {'dtype': 'np.float32'}), '([[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1]], dtype=np.float32)\n', (41361, 41425), True, 'import numpy as np\n'), ((41720, 41770), 'numpy.zeros', 'np.zeros', (['(0, num_classes + pad)'], {'dtype': 'np.float32'}), '((0, num_classes + pad), dtype=np.float32)\n', (41728, 41770), True, 'import numpy as np\n'), ((42589, 42624), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0]])\n', (42600, 42624), True, 'import tensorflow as tf\n'), ((42690, 42757), 'core.target_assigner.create_target_assigner', 'targetassigner.create_target_assigner', (['"""Multibox"""'], {'stage': '"""proposal"""'}), "('Multibox', stage='proposal')\n", (42727, 42757), True, 'from core import target_assigner as targetassigner\n'), ((43147, 43216), 'core.target_assigner.create_target_assigner', 'targetassigner.create_target_assigner', (['"""FasterRCNN"""'], {'stage': '"""proposal"""'}), "('FasterRCNN', stage='proposal')\n", (43184, 43216), True, 'from core import target_assigner as targetassigner\n'), ((43386, 43435), 'core.target_assigner.create_target_assigner', 'targetassigner.create_target_assigner', (['"""FastRCNN"""'], {}), "('FastRCNN')\n", (43423, 43435), True, 'from core import target_assigner as targetassigner\n'), ((43537, 43607), 'core.target_assigner.create_target_assigner', 'targetassigner.create_target_assigner', (['"""FasterRCNN"""'], {'stage': '"""detection"""'}), "('FasterRCNN', stage='detection')\n", (43574, 43607), True, 'from core import target_assigner as targetassigner\n'), ((1325, 1369), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (1367, 1369), False, 'from core import region_similarity_calculator\n'), ((1386, 1462), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (1414, 1462), False, 'from matchers import argmax_matcher\n'), ((1526, 1568), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (1566, 1568), False, 'from box_coders import mean_stddev_box_coder\n'), ((1593, 1690), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'None'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=None)\n', (1622, 1690), True, 'from core import target_assigner as targetassigner\n'), ((1722, 1752), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (1738, 1752), False, 'from core import box_list\n'), ((1839, 1880), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (1855, 1880), False, 'from core import box_list\n'), ((3897, 3941), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (3939, 3941), False, 'from core import region_similarity_calculator\n'), ((3958, 4034), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.3)'}), '(matched_threshold=0.5, unmatched_threshold=0.3)\n', (3986, 4034), False, 'from matchers import argmax_matcher\n'), ((4098, 4140), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (4138, 4140), False, 'from box_coders import mean_stddev_box_coder\n'), ((4165, 4262), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'None'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=None)\n', (4194, 4262), True, 'from core import target_assigner as targetassigner\n'), ((4294, 4324), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (4310, 4324), False, 'from core import box_list\n'), ((4411, 4452), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (4427, 4452), False, 'from core import box_list\n'), ((6098, 6142), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (6140, 6142), False, 'from core import region_similarity_calculator\n'), ((6159, 6235), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (6187, 6235), False, 'from matchers import argmax_matcher\n'), ((6299, 6394), 'box_coders.keypoint_box_coder.KeypointBoxCoder', 'keypoint_box_coder.KeypointBoxCoder', ([], {'num_keypoints': '(6)', 'scale_factors': '[10.0, 10.0, 5.0, 5.0]'}), '(num_keypoints=6, scale_factors=[10.0, \n 10.0, 5.0, 5.0])\n', (6334, 6394), False, 'from box_coders import keypoint_box_coder\n'), ((6425, 6522), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'None'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=None)\n', (6454, 6522), True, 'from core import target_assigner as targetassigner\n'), ((6554, 6584), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (6570, 6584), False, 'from core import box_list\n'), ((6613, 6654), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (6629, 6654), False, 'from core import box_list\n'), ((9241, 9285), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (9283, 9285), False, 'from core import region_similarity_calculator\n'), ((9302, 9378), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (9330, 9378), False, 'from matchers import argmax_matcher\n'), ((9442, 9537), 'box_coders.keypoint_box_coder.KeypointBoxCoder', 'keypoint_box_coder.KeypointBoxCoder', ([], {'num_keypoints': '(6)', 'scale_factors': '[10.0, 10.0, 5.0, 5.0]'}), '(num_keypoints=6, scale_factors=[10.0, \n 10.0, 5.0, 5.0])\n', (9477, 9537), False, 'from box_coders import keypoint_box_coder\n'), ((9568, 9665), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'None'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=None)\n', (9597, 9665), True, 'from core import target_assigner as targetassigner\n'), ((9697, 9727), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (9713, 9727), False, 'from core import box_list\n'), ((9756, 9797), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (9772, 9797), False, 'from core import box_list\n'), ((12013, 12057), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (12055, 12057), False, 'from core import region_similarity_calculator\n'), ((12074, 12150), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (12102, 12150), False, 'from matchers import argmax_matcher\n'), ((12214, 12256), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (12254, 12256), False, 'from box_coders import mean_stddev_box_coder\n'), ((12286, 12332), 'tensorflow.constant', 'tf.constant', (['[1, 0, 0, 0, 0, 0, 0]', 'tf.float32'], {}), '([1, 0, 0, 0, 0, 0, 0], tf.float32)\n', (12297, 12332), True, 'import tensorflow as tf\n'), ((12357, 12470), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (12386, 12470), True, 'from core import target_assigner as targetassigner\n'), ((12513, 12543), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (12529, 12543), False, 'from core import box_list\n'), ((12630, 12671), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (12646, 12671), False, 'from core import box_list\n'), ((14983, 15027), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (15025, 15027), False, 'from core import region_similarity_calculator\n'), ((15044, 15120), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (15072, 15120), False, 'from matchers import argmax_matcher\n'), ((15184, 15226), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (15224, 15226), False, 'from box_coders import mean_stddev_box_coder\n'), ((15256, 15302), 'tensorflow.constant', 'tf.constant', (['[1, 0, 0, 0, 0, 0, 0]', 'tf.float32'], {}), '([1, 0, 0, 0, 0, 0, 0], tf.float32)\n', (15267, 15302), True, 'import tensorflow as tf\n'), ((15327, 15440), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (15356, 15440), True, 'from core import target_assigner as targetassigner\n'), ((15483, 15513), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (15499, 15513), False, 'from core import box_list\n'), ((15600, 15641), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (15616, 15641), False, 'from core import box_list\n'), ((17452, 17496), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (17494, 17496), False, 'from core import region_similarity_calculator\n'), ((17513, 17589), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (17541, 17589), False, 'from matchers import argmax_matcher\n'), ((17653, 17695), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (17693, 17695), False, 'from box_coders import mean_stddev_box_coder\n'), ((17726, 17767), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [0, 0]]', 'tf.float32'], {}), '([[0, 0], [0, 0]], tf.float32)\n', (17737, 17767), True, 'import tensorflow as tf\n'), ((17792, 17905), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (17821, 17905), True, 'from core import target_assigner as targetassigner\n'), ((17948, 17978), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (17964, 17978), False, 'from core import box_list\n'), ((18065, 18106), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (18081, 18106), False, 'from core import box_list\n'), ((20340, 20384), 'core.region_similarity_calculator.IouSimilarity', 'region_similarity_calculator.IouSimilarity', ([], {}), '()\n', (20382, 20384), False, 'from core import region_similarity_calculator\n'), ((20401, 20477), 'matchers.argmax_matcher.ArgMaxMatcher', 'argmax_matcher.ArgMaxMatcher', ([], {'matched_threshold': '(0.5)', 'unmatched_threshold': '(0.5)'}), '(matched_threshold=0.5, unmatched_threshold=0.5)\n', (20429, 20477), False, 'from matchers import argmax_matcher\n'), ((20541, 20583), 'box_coders.mean_stddev_box_coder.MeanStddevBoxCoder', 'mean_stddev_box_coder.MeanStddevBoxCoder', ([], {}), '()\n', (20581, 20583), False, 'from box_coders import mean_stddev_box_coder\n'), ((20613, 20647), 'tensorflow.constant', 'tf.constant', (['[0, 0, 0]', 'tf.float32'], {}), '([0, 0, 0], tf.float32)\n', (20624, 20647), True, 'import tensorflow as tf\n'), ((20672, 20702), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (20688, 20702), False, 'from core import box_list\n'), ((20789, 20830), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (20805, 20830), False, 'from core import box_list\n'), ((20855, 20968), 'core.target_assigner.TargetAssigner', 'targetassigner.TargetAssigner', (['similarity_calc', 'matcher', 'box_coder'], {'unmatched_cls_target': 'unmatched_cls_target'}), '(similarity_calc, matcher, box_coder,\n unmatched_cls_target=unmatched_cls_target)\n', (20884, 20968), True, 'from core import target_assigner as targetassigner\n'), ((23810, 23834), 'tensorflow.constant', 'tf.constant', (['box_corners'], {}), '(box_corners)\n', (23821, 23834), True, 'import tensorflow as tf\n'), ((25036, 25060), 'tensorflow.constant', 'tf.constant', (['box_corners'], {}), '(box_corners)\n', (25047, 25060), True, 'import tensorflow as tf\n'), ((26667, 26694), 'numpy.zeros', 'np.zeros', (['target_dimensions'], {}), '(target_dimensions)\n', (26675, 26694), True, 'import numpy as np\n'), ((27052, 27090), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist1'], {}), '(groundtruth_boxlist1)\n', (27068, 27090), False, 'from core import box_list\n'), ((27109, 27147), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist2'], {}), '(groundtruth_boxlist2)\n', (27125, 27147), False, 'from core import box_list\n'), ((27254, 27284), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (27270, 27284), False, 'from core import box_list\n'), ((27484, 27598), 'core.target_assigner.batch_assign_targets', 'targetassigner.batch_assign_targets', (['agnostic_target_assigner', 'anchors_boxlist', 'gt_box_batch', 'gt_class_targets'], {}), '(agnostic_target_assigner,\n anchors_boxlist, gt_box_batch, gt_class_targets)\n', (27519, 27598), True, 'from core import target_assigner as targetassigner\n'), ((29689, 29727), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist1'], {}), '(groundtruth_boxlist1)\n', (29705, 29727), False, 'from core import box_list\n'), ((29746, 29784), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist2'], {}), '(groundtruth_boxlist2)\n', (29762, 29784), False, 'from core import box_list\n'), ((29911, 29941), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (29927, 29941), False, 'from core import box_list\n'), ((30170, 30286), 'core.target_assigner.batch_assign_targets', 'targetassigner.batch_assign_targets', (['multiclass_target_assigner', 'anchors_boxlist', 'gt_box_batch', 'gt_class_targets'], {}), '(multiclass_target_assigner,\n anchors_boxlist, gt_box_batch, gt_class_targets)\n', (30205, 30286), True, 'from core import target_assigner as targetassigner\n'), ((32980, 33018), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist1'], {}), '(groundtruth_boxlist1)\n', (32996, 33018), False, 'from core import box_list\n'), ((33037, 33075), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist2'], {}), '(groundtruth_boxlist2)\n', (33053, 33075), False, 'from core import box_list\n'), ((33266, 33296), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (33282, 33296), False, 'from core import box_list\n'), ((33525, 33653), 'core.target_assigner.batch_assign_targets', 'targetassigner.batch_assign_targets', (['multiclass_target_assigner', 'anchors_boxlist', 'gt_box_batch', 'gt_class_targets', 'gt_weights'], {}), '(multiclass_target_assigner,\n anchors_boxlist, gt_box_batch, gt_class_targets, gt_weights)\n', (33560, 33653), True, 'from core import target_assigner as targetassigner\n'), ((36700, 36738), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist1'], {}), '(groundtruth_boxlist1)\n', (36716, 36738), False, 'from core import box_list\n'), ((36757, 36795), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_boxlist2'], {}), '(groundtruth_boxlist2)\n', (36773, 36795), False, 'from core import box_list\n'), ((36922, 36952), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (36938, 36952), False, 'from core import box_list\n'), ((37198, 37314), 'core.target_assigner.batch_assign_targets', 'targetassigner.batch_assign_targets', (['multiclass_target_assigner', 'anchors_boxlist', 'gt_box_batch', 'gt_class_targets'], {}), '(multiclass_target_assigner,\n anchors_boxlist, gt_box_batch, gt_class_targets)\n', (37233, 37314), True, 'from core import target_assigner as targetassigner\n'), ((40527, 40568), 'core.box_list.BoxList', 'box_list.BoxList', (['groundtruth_box_corners'], {}), '(groundtruth_box_corners)\n', (40543, 40568), False, 'from core import box_list\n'), ((40686, 40716), 'core.box_list.BoxList', 'box_list.BoxList', (['anchor_means'], {}), '(anchor_means)\n', (40702, 40716), False, 'from core import box_list\n'), ((40947, 41069), 'core.target_assigner.batch_assign_targets', 'targetassigner.batch_assign_targets', (['multiclass_target_assigner', 'anchors_boxlist', 'gt_box_batch', 'gt_class_targets_batch'], {}), '(multiclass_target_assigner,\n anchors_boxlist, gt_box_batch, gt_class_targets_batch)\n', (40982, 41069), True, 'from core import target_assigner as targetassigner\n'), ((42494, 42514), 'tensorflow.constant', 'tf.constant', (['corners'], {}), '(corners)\n', (42505, 42514), True, 'import tensorflow as tf\n'), ((42547, 42567), 'tensorflow.constant', 'tf.constant', (['corners'], {}), '(corners)\n', (42558, 42567), True, 'import tensorflow as tf\n'), ((43093, 43113), 'tensorflow.constant', 'tf.constant', (['corners'], {}), '(corners)\n', (43104, 43113), True, 'import tensorflow as tf\n'), ((43803, 43882), 'core.target_assigner.create_target_assigner', 'targetassigner.create_target_assigner', (['"""InvalidDetector"""'], {'stage': '"""invalid_stage"""'}), "('InvalidDetector', stage='invalid_stage')\n", (43840, 43882), True, 'from core import target_assigner as targetassigner\n')] |
from unittest import TestCase, main
from numpy import diag_indices, dot, finfo, float64
from numpy.random import random
from numpy.testing import assert_allclose
from cogent3.maths.matrix_exponentiation import PadeExponentiator
from cogent3.maths.matrix_logarithm import logm
from cogent3.maths.measure import (
jsd,
jsm,
paralinear_continuous_time,
paralinear_discrete_time,
)
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Alpha"
def gen_q_p():
q1 = random((4, 4))
indices = diag_indices(4)
q1[indices] = 0
q1[indices] = -q1.sum(axis=1)
p1 = PadeExponentiator(q1)()
return q1, p1
def gen_qs_ps():
q1, p1 = gen_q_p()
q2, p2 = gen_q_p()
p3 = dot(p1, p2)
q3 = logm(p3)
return (q1, p1), (q2, p2), (q3, p3)
def next_pi(pi, p):
return dot(pi, p)
class ParalinearTest(TestCase):
def test_paralinear_discrete_time(self):
"""tests paralinear_discrete_time to compare it with the output of paralinear_continuous_time"""
qp1, qp2, qp3 = gen_qs_ps()
pi1 = random(4)
pi1 /= pi1.sum()
pi2 = next_pi(pi1, qp1[1])
pi3 = next_pi(pi2, qp2[1])
con_time_pl1 = paralinear_continuous_time(qp1[1], pi1, qp1[0])
dis_time_pl1 = paralinear_discrete_time(qp1[1], pi1)
assert_allclose(con_time_pl1, dis_time_pl1)
con_time_pl2 = paralinear_continuous_time(qp2[1], pi2, qp2[0])
dis_time_pl2 = paralinear_discrete_time(qp2[1], pi2)
assert_allclose(con_time_pl2, dis_time_pl2)
con_time_pl3 = paralinear_continuous_time(qp3[1], pi3, qp3[0])
dis_time_pl3 = paralinear_discrete_time(qp3[1], pi3)
assert_allclose(con_time_pl3, dis_time_pl3)
def test_paralinear_continuous_time(self):
"""paralinear_continuous_time is additive from random matrices"""
qp1, qp2, qp3 = gen_qs_ps()
pi1 = random(4)
pi1 /= pi1.sum()
pi2 = next_pi(pi1, qp1[1])
pl1 = paralinear_continuous_time(qp1[1], pi1, qp1[0])
pl2 = paralinear_continuous_time(qp2[1], pi2, qp2[0])
pl3 = paralinear_continuous_time(qp3[1], pi1, qp3[0])
assert_allclose(pl1 + pl2, pl3)
def test_paralinear_continuous_time_validate(self):
"""paralinear_continuous_time validate check consistency"""
qp1, qp2, qp3 = gen_qs_ps()
pi1 = random(4)
with self.assertRaises(AssertionError):
paralinear_continuous_time(
qp1[1], qp1[0], qp1[0], validate=True
) # pi invalid shape
with self.assertRaises(AssertionError):
paralinear_continuous_time(
qp1[1], pi1, qp1[0], validate=True
) # pi invalid values
pi1 /= pi1.sum()
with self.assertRaises(AssertionError):
paralinear_continuous_time(qp1[1], pi1, qp1[1], validate=True) # invalid Q
with self.assertRaises(AssertionError):
paralinear_continuous_time(qp1[0], pi1, qp1[0], validate=True) # invalid P
qp2[0][0, 0] = 9
with self.assertRaises(AssertionError):
paralinear_continuous_time(qp1[1], pi1, qp2[0], validate=True) # invalid Q
qp2[1][0, 3] = 9
with self.assertRaises(AssertionError):
paralinear_continuous_time(qp2[1], pi1, qp1[0], validate=True) # invalid P
class TestJensenShannon(TestCase):
# the following value is 4x machine precision, used to handle
# architectures that have lower precision and do not produce 0.0 from
# numerical calcs involved in jsd/jsm
atol = 4 * finfo(float64).eps
def test_jsd_validation(self):
"""jsd fails with malformed data"""
freqs1 = random(5)
normalised_freqs1 = freqs1 / freqs1.sum()
two_dimensional_freqs1 = [freqs1, freqs1]
shorter_freqs1 = freqs1[:4]
freqs2 = random(5)
normalised_freqs2 = freqs2 / freqs2.sum()
two_dimensional_freqs2 = [freqs2, freqs2]
shorter_freqs2 = freqs2[:4]
with self.assertRaises(AssertionError):
jsd(
freqs1, two_dimensional_freqs2, validate=True
) # freqs1/freqs2 mismatched shape
with self.assertRaises(AssertionError):
jsd(
two_dimensional_freqs1, freqs2, validate=True
) # freqs1/freqs2 mismatched shape
with self.assertRaises(AssertionError):
jsd(freqs1, shorter_freqs2, validate=True) # freqs1/freqs2 mismatched shape
with self.assertRaises(AssertionError):
jsd(shorter_freqs1, freqs2, validate=True) # freqs1/freqs2 mismatched shape
with self.assertRaises(AssertionError):
jsd(
two_dimensional_freqs1, freqs2, validate=True
) # freqs1 has incorrect dimension
with self.assertRaises(AssertionError):
jsd(
two_dimensional_freqs1, two_dimensional_freqs2, validate=True
) # freqs1 has incorrect dimension
with self.assertRaises(AssertionError):
jsd(
freqs1, two_dimensional_freqs2, validate=True
) # freqs2 has incorrect dimension
with self.assertRaises(AssertionError):
jsd(freqs1, freqs2, validate=True) # invalid freqs1
with self.assertRaises(AssertionError):
jsd(freqs1, normalised_freqs2, validate=True) # invalid freqs1
with self.assertRaises(AssertionError):
jsd(normalised_freqs1, freqs2, validate=True) # invalid freqs2
def test_jsd(self):
"""evaluate jsd between identical, and non-identical distributions"""
# case1 is testing if the jsd between two identical distributions is 0.0
case1 = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
for index in range(len(case1[0])):
case1[0][index] = 1.0
case1[1][index] = 1.0
assert_allclose(
jsd(case1[0], case1[1], validate=True),
0.0,
err_msg="Testing case1 for jsd failed",
atol=self.atol,
)
case1[0][index] = 0.0
case1[1][index] = 0.0
# case2 is testing the numerical output of jsd between two distant distributions
case2 = [[1 / 10, 9 / 10, 0], [0, 1 / 10, 9 / 10]]
assert_allclose(
jsd(case2[0], case2[1], validate=True),
0.7655022032053593,
err_msg="Testing case2 for jsd failed",
atol=self.atol,
)
# case3 is testing the numerical output of jsd between two distant distributions
case3 = [[1.0, 0.0], [1 / 2, 1 / 2]]
assert_allclose(
jsd(case3[0], case3[1], validate=True),
0.3112781244591328,
err_msg="Testing case3 for jsd failed",
atol=self.atol,
)
# case4 - the jsd between two identical uniform distributions is 0.0
case4 = [
[1 / 10] * 10,
[1 / 10] * 10,
]
assert_allclose(
jsd(case4[0], case4[1], validate=True),
0.0,
err_msg="Testing case4 for jsd failed",
atol=self.atol,
)
def test_jsm(self):
"""evaluate jsm between identical, and non-identical distributions"""
case1 = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
for index in range(len(case1[0])):
case1[0][index] = 1.0
case1[1][index] = 1.0
assert_allclose(
jsm(case1[0], case1[1], validate=True),
0.0,
err_msg="Testing case1 for jsm failed",
atol=self.atol,
)
case1[0][index] = 0.0
case1[1][index] = 0.0
# case2 is testing the numerical output of jsm between two random distributions
case2 = [[1 / 10, 9 / 10, 0], [0, 1 / 10, 9 / 10]]
assert_allclose(
jsm(case2[0], case2[1], validate=True),
0.8749298275892526,
err_msg="Testing case2 for jsm failed",
atol=self.atol,
)
# case3 is testing the numerical output of jsm between two random distributions
case3 = [[1.0, 0.0], [1 / 2, 1 / 2]]
assert_allclose(
jsm(case3[0], case3[1], validate=True),
0.5579230452841438,
err_msg="Testing case3 for jsm failed",
atol=self.atol,
)
# case4 is testing if the jsm between two identical uniform distributions is 0.0
case4 = [
[1 / 10] * 10,
[1 / 10] * 10,
]
assert_allclose(
jsm(case4[0], case4[1], validate=True),
0.0,
err_msg="Testing case4 for jsm failed",
atol=self.atol,
)
if __name__ == "__main__":
main()
| [
"unittest.main",
"cogent3.maths.measure.jsd",
"cogent3.maths.matrix_exponentiation.PadeExponentiator",
"numpy.testing.assert_allclose",
"numpy.diag_indices",
"cogent3.maths.matrix_logarithm.logm",
"cogent3.maths.measure.paralinear_continuous_time",
"numpy.finfo",
"numpy.random.random",
"cogent3.ma... | [((657, 671), 'numpy.random.random', 'random', (['(4, 4)'], {}), '((4, 4))\n', (663, 671), False, 'from numpy.random import random\n'), ((686, 701), 'numpy.diag_indices', 'diag_indices', (['(4)'], {}), '(4)\n', (698, 701), False, 'from numpy import diag_indices, dot, finfo, float64\n'), ((881, 892), 'numpy.dot', 'dot', (['p1', 'p2'], {}), '(p1, p2)\n', (884, 892), False, 'from numpy import diag_indices, dot, finfo, float64\n'), ((902, 910), 'cogent3.maths.matrix_logarithm.logm', 'logm', (['p3'], {}), '(p3)\n', (906, 910), False, 'from cogent3.maths.matrix_logarithm import logm\n'), ((984, 994), 'numpy.dot', 'dot', (['pi', 'p'], {}), '(pi, p)\n', (987, 994), False, 'from numpy import diag_indices, dot, finfo, float64\n'), ((9037, 9043), 'unittest.main', 'main', ([], {}), '()\n', (9041, 9043), False, 'from unittest import TestCase, main\n'), ((765, 786), 'cogent3.maths.matrix_exponentiation.PadeExponentiator', 'PadeExponentiator', (['q1'], {}), '(q1)\n', (782, 786), False, 'from cogent3.maths.matrix_exponentiation import PadeExponentiator\n'), ((1229, 1238), 'numpy.random.random', 'random', (['(4)'], {}), '(4)\n', (1235, 1238), False, 'from numpy.random import random\n'), ((1358, 1405), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'pi1', 'qp1[0]'], {}), '(qp1[1], pi1, qp1[0])\n', (1384, 1405), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1429, 1466), 'cogent3.maths.measure.paralinear_discrete_time', 'paralinear_discrete_time', (['qp1[1]', 'pi1'], {}), '(qp1[1], pi1)\n', (1453, 1466), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1475, 1518), 'numpy.testing.assert_allclose', 'assert_allclose', (['con_time_pl1', 'dis_time_pl1'], {}), '(con_time_pl1, dis_time_pl1)\n', (1490, 1518), False, 'from numpy.testing import assert_allclose\n'), ((1543, 1590), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp2[1]', 'pi2', 'qp2[0]'], {}), '(qp2[1], pi2, qp2[0])\n', (1569, 1590), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1614, 1651), 'cogent3.maths.measure.paralinear_discrete_time', 'paralinear_discrete_time', (['qp2[1]', 'pi2'], {}), '(qp2[1], pi2)\n', (1638, 1651), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1660, 1703), 'numpy.testing.assert_allclose', 'assert_allclose', (['con_time_pl2', 'dis_time_pl2'], {}), '(con_time_pl2, dis_time_pl2)\n', (1675, 1703), False, 'from numpy.testing import assert_allclose\n'), ((1728, 1775), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp3[1]', 'pi3', 'qp3[0]'], {}), '(qp3[1], pi3, qp3[0])\n', (1754, 1775), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1799, 1836), 'cogent3.maths.measure.paralinear_discrete_time', 'paralinear_discrete_time', (['qp3[1]', 'pi3'], {}), '(qp3[1], pi3)\n', (1823, 1836), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((1845, 1888), 'numpy.testing.assert_allclose', 'assert_allclose', (['con_time_pl3', 'dis_time_pl3'], {}), '(con_time_pl3, dis_time_pl3)\n', (1860, 1888), False, 'from numpy.testing import assert_allclose\n'), ((2061, 2070), 'numpy.random.random', 'random', (['(4)'], {}), '(4)\n', (2067, 2070), False, 'from numpy.random import random\n'), ((2146, 2193), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'pi1', 'qp1[0]'], {}), '(qp1[1], pi1, qp1[0])\n', (2172, 2193), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((2208, 2255), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp2[1]', 'pi2', 'qp2[0]'], {}), '(qp2[1], pi2, qp2[0])\n', (2234, 2255), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((2270, 2317), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp3[1]', 'pi1', 'qp3[0]'], {}), '(qp3[1], pi1, qp3[0])\n', (2296, 2317), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((2327, 2358), 'numpy.testing.assert_allclose', 'assert_allclose', (['(pl1 + pl2)', 'pl3'], {}), '(pl1 + pl2, pl3)\n', (2342, 2358), False, 'from numpy.testing import assert_allclose\n'), ((2534, 2543), 'numpy.random.random', 'random', (['(4)'], {}), '(4)\n', (2540, 2543), False, 'from numpy.random import random\n'), ((3869, 3878), 'numpy.random.random', 'random', (['(5)'], {}), '(5)\n', (3875, 3878), False, 'from numpy.random import random\n'), ((4033, 4042), 'numpy.random.random', 'random', (['(5)'], {}), '(5)\n', (4039, 4042), False, 'from numpy.random import random\n'), ((2605, 2670), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'qp1[0]', 'qp1[0]'], {'validate': '(True)'}), '(qp1[1], qp1[0], qp1[0], validate=True)\n', (2631, 2670), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((2782, 2844), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'pi1', 'qp1[0]'], {'validate': '(True)'}), '(qp1[1], pi1, qp1[0], validate=True)\n', (2808, 2844), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((2982, 3044), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'pi1', 'qp1[1]'], {'validate': '(True)'}), '(qp1[1], pi1, qp1[1], validate=True)\n', (3008, 3044), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((3119, 3181), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[0]', 'pi1', 'qp1[0]'], {'validate': '(True)'}), '(qp1[0], pi1, qp1[0], validate=True)\n', (3145, 3181), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((3281, 3343), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp1[1]', 'pi1', 'qp2[0]'], {'validate': '(True)'}), '(qp1[1], pi1, qp2[0], validate=True)\n', (3307, 3343), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((3443, 3505), 'cogent3.maths.measure.paralinear_continuous_time', 'paralinear_continuous_time', (['qp2[1]', 'pi1', 'qp1[0]'], {'validate': '(True)'}), '(qp2[1], pi1, qp1[0], validate=True)\n', (3469, 3505), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((3753, 3767), 'numpy.finfo', 'finfo', (['float64'], {}), '(float64)\n', (3758, 3767), False, 'from numpy import diag_indices, dot, finfo, float64\n'), ((4240, 4290), 'cogent3.maths.measure.jsd', 'jsd', (['freqs1', 'two_dimensional_freqs2'], {'validate': '(True)'}), '(freqs1, two_dimensional_freqs2, validate=True)\n', (4243, 4290), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((4416, 4466), 'cogent3.maths.measure.jsd', 'jsd', (['two_dimensional_freqs1', 'freqs2'], {'validate': '(True)'}), '(two_dimensional_freqs1, freqs2, validate=True)\n', (4419, 4466), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((4592, 4634), 'cogent3.maths.measure.jsd', 'jsd', (['freqs1', 'shorter_freqs2'], {'validate': '(True)'}), '(freqs1, shorter_freqs2, validate=True)\n', (4595, 4634), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((4730, 4772), 'cogent3.maths.measure.jsd', 'jsd', (['shorter_freqs1', 'freqs2'], {'validate': '(True)'}), '(shorter_freqs1, freqs2, validate=True)\n', (4733, 4772), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((4868, 4918), 'cogent3.maths.measure.jsd', 'jsd', (['two_dimensional_freqs1', 'freqs2'], {'validate': '(True)'}), '(two_dimensional_freqs1, freqs2, validate=True)\n', (4871, 4918), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((5044, 5110), 'cogent3.maths.measure.jsd', 'jsd', (['two_dimensional_freqs1', 'two_dimensional_freqs2'], {'validate': '(True)'}), '(two_dimensional_freqs1, two_dimensional_freqs2, validate=True)\n', (5047, 5110), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((5236, 5286), 'cogent3.maths.measure.jsd', 'jsd', (['freqs1', 'two_dimensional_freqs2'], {'validate': '(True)'}), '(freqs1, two_dimensional_freqs2, validate=True)\n', (5239, 5286), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((5412, 5446), 'cogent3.maths.measure.jsd', 'jsd', (['freqs1', 'freqs2'], {'validate': '(True)'}), '(freqs1, freqs2, validate=True)\n', (5415, 5446), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((5526, 5571), 'cogent3.maths.measure.jsd', 'jsd', (['freqs1', 'normalised_freqs2'], {'validate': '(True)'}), '(freqs1, normalised_freqs2, validate=True)\n', (5529, 5571), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((5651, 5696), 'cogent3.maths.measure.jsd', 'jsd', (['normalised_freqs1', 'freqs2'], {'validate': '(True)'}), '(normalised_freqs1, freqs2, validate=True)\n', (5654, 5696), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((6557, 6595), 'cogent3.maths.measure.jsd', 'jsd', (['case2[0]', 'case2[1]'], {'validate': '(True)'}), '(case2[0], case2[1], validate=True)\n', (6560, 6595), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((6890, 6928), 'cogent3.maths.measure.jsd', 'jsd', (['case3[0]', 'case3[1]'], {'validate': '(True)'}), '(case3[0], case3[1], validate=True)\n', (6893, 6928), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((7248, 7286), 'cogent3.maths.measure.jsd', 'jsd', (['case4[0]', 'case4[1]'], {'validate': '(True)'}), '(case4[0], case4[1], validate=True)\n', (7251, 7286), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((8155, 8193), 'cogent3.maths.measure.jsm', 'jsm', (['case2[0]', 'case2[1]'], {'validate': '(True)'}), '(case2[0], case2[1], validate=True)\n', (8158, 8193), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((8487, 8525), 'cogent3.maths.measure.jsm', 'jsm', (['case3[0]', 'case3[1]'], {'validate': '(True)'}), '(case3[0], case3[1], validate=True)\n', (8490, 8525), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((8857, 8895), 'cogent3.maths.measure.jsm', 'jsm', (['case4[0]', 'case4[1]'], {'validate': '(True)'}), '(case4[0], case4[1], validate=True)\n', (8860, 8895), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((6141, 6179), 'cogent3.maths.measure.jsd', 'jsd', (['case1[0]', 'case1[1]'], {'validate': '(True)'}), '(case1[0], case1[1], validate=True)\n', (6144, 6179), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n'), ((7740, 7778), 'cogent3.maths.measure.jsm', 'jsm', (['case1[0]', 'case1[1]'], {'validate': '(True)'}), '(case1[0], case1[1], validate=True)\n', (7743, 7778), False, 'from cogent3.maths.measure import jsd, jsm, paralinear_continuous_time, paralinear_discrete_time\n')] |
import numpy as np
from scipy.sparse import coo_matrix
from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator
from .StagHuntModel import StagHuntModel
from .util import *
class MatrixStagHuntModel(StagHuntModel):
def __init__(self):
"""
MRF formulation of the game, using mrftools package
"""
super().__init__()
self.mrf = None
self.bp = None
def _set_agent_vars(self, mn):
"""
Sets clamped unary potentials to x11,...,x1M
Sets uniform unary potentials to x21,...,x2M,...,x(T-1)M
Sets hare-reward unary potentials to xT1,...,xTM
All unary factors involving agent variables are defined here
:param mn: MarkovNet object
:return: Modified MarkovNet object
"""
for i in range(1, self.horizon + 1):
for j, agent_pos in enumerate(self.aPos):
agent_index = j + 1
var_key = new_var('x', i, agent_index)
if i == 1: # t = 1 initial state -> clamp
factor = np.full(self.N, self.MIN, dtype=np.float64)
factor[self.get_index(agent_pos)] = self.NEU
elif i < self.horizon: # t = 2,...,T-1 -> uniform
factor = np.full(self.N, self.NEU, dtype=np.float64)
else: # t = T -> \prod_{k=1}^{k=H}phi_{h_k}
factor = np.full(self.N, self.NEU, dtype=np.float64)
for hare_pos in self.hPos:
# factor[self.get_index(hare_pos)] = np.exp(-self.r_h / self.lmb)
factor[self.get_index(hare_pos)] = -self.r_h / self.lmb
# set factor
mn.set_unary_factor(var_key, factor)
return mn
def build_phi_q(self):
"""
Efficient way to compute the pairwise factor between agent vars (uncontrolled dynamics)
:return:
"""
phi_q = np.full((self.N, self.N), self.MIN, dtype=np.float64)
# fill diagonal
phi_q[range(self.N), range(self.N)] = self.NEU
# fill n-diagonals
phi_q[range(self.N - self.size[0]), range(self.size[0], self.N)] = self.NEU
phi_q[range(self.size[0], self.N), range(self.N - self.size[0])] = self.NEU
# 1-diagonal with gaps
index_1 = np.arange(self.N - 1)
index_1 = index_1[(index_1 + 1) % self.size[0] != 0]
index_2 = np.arange(1, self.N)
index_2 = index_2[index_2 % self.size[0] != 0]
phi_q[index_1, index_2] = self.NEU
phi_q[index_2, index_1] = self.NEU
return phi_q
def build_ground_model(self):
"""
Builds the mrftools MarkovNet ground truth model based on the game definition
Works only if the number of agents is exactly equal to 2
:return: none - sets markov_net attribute
"""
if not len(self.aPos) == 2:
raise ValueError('Ground truth model can only be built when the number of agents is 2')
mn = MarkovNet()
mn = self._set_agent_vars(mn)
mn = self._set_uncontrolled_dynamics(mn)
# stag control factor -> ones and stag reward when both agents are in the position of a stag
factor = np.full((self.N, self.N), self.NEU, dtype=np.float64)
for stag_pos in self.sPos:
s_ind = self.get_index(stag_pos)
factor[s_ind, s_ind] = -self.r_s / self.lmb
# one factor
var_keys = (new_var('x', self.horizon, 1), new_var('x', self.horizon, 2))
mn.set_edge_factor(var_keys, factor)
mn.create_matrices()
self.mrf = mn
self.build = 1
def build_model(self):
"""
Builds the mrftools library MarkovNet model based on the game definition
:return: none - sets markov_net attribute
"""
mn = MarkovNet()
self.time = 1
mn = self._set_agent_vars(mn)
mn = self._set_uncontrolled_dynamics(mn)
# unary and pairwise factors involving auxiliary variables d_ij, u_ij, z_ij
for i, agent_pos in enumerate(self.aPos):
agent_index = i + 1
for j, stag_pos in enumerate(self.sPos):
stag_index = j + 1
# declare d_ij variables and set uniform unary potentials
var_key_d = new_var('d', agent_index, stag_index)
mn.set_unary_factor(var_key_d, np.full(2, self.NEU, dtype=np.float64))
# declare u_{ij} variables and set uniform unary potentials
if agent_index > 1:
var_key_u = new_var('u', agent_index, stag_index)
mn.set_unary_factor(var_key_u, np.full(3, self.NEU, dtype=np.float64))
var_key_z = new_var('z', agent_index, stag_index)
if agent_index == 2:
mn.set_unary_factor(var_key_z, np.full(12, self.NEU, dtype=np.float64))
mn.set_edge_factor((var_key_z, new_var('d', agent_index-1, stag_index)),
np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 0),
dtype=np.float64))
mn.set_edge_factor((var_key_z, var_key_d),
np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 1),
dtype=np.float64))
mn.set_edge_factor((var_key_z, var_key_u),
np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 2),
dtype=np.float64))
else:
mn.set_unary_factor(var_key_z, np.full(18, self.NEU, dtype=np.float64))
mn.set_edge_factor((var_key_z, var_key_d),
np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 1),
dtype=np.float64))
mn.set_edge_factor((var_key_z, var_key_u),
np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 2),
dtype=np.float64))
mn.set_edge_factor((var_key_z, new_var('u', agent_index-1, stag_index)),
np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 0),
dtype=np.float64))
# build and set phi_{s_j} potentials
var_key_x = new_var('x', self.horizon, agent_index)
# inefficient but obvious way to fill the potential phi_{s_j}
phi_s = np.full((self.N, 2), self.MIN, dtype=np.float64)
for x in range(phi_s.shape[0]):
for d in range(phi_s.shape[1]):
if d == kronecker_delta(x, self.get_index(stag_pos)):
phi_s[x, d] = self.NEU
mn.set_edge_factor((var_key_x, var_key_d), phi_s)
factor = np.full(3, self.NEU, dtype=np.float64)
factor[2] = -self.r_s / self.lmb
for j in range(len(self.sPos)):
mn.set_unary_factor(new_var('u', len(self.aPos), j+1), factor)
mn.create_matrices()
self.mrf = mn
self.build = 1
def _clamp_agents(self):
"""
Util that clamps the position of the agents to the current time index
:return:
"""
factors = self.MIN * np.ones((self.N, len(self.aPos)))
for index, agent in enumerate(self.aPos):
factors[self.get_index(agent), index] = self.NEU
self.mrf.unary_mat[:, np.arange((self.time - 1)*len(self.aPos), self.time*len(self.aPos))] = factors
def _fast_util(self, f_start, n_slices, chunk, from_cols, to_cols):
f_end = f_start + n_slices
b_start = self.mrf.num_edges + f_start
b_end = b_start + n_slices
self.mrf.edge_pot_tensor[0:chunk.shape[0], 0:chunk.shape[1], f_start:f_end] = chunk
self.mrf.edge_pot_tensor[0:chunk.shape[1], 0:chunk.shape[0], b_start:b_end] = chunk.transpose((1, 0, 2))
f_messages = list(range(f_start, f_end))
b_messages = list(range(b_start, b_end))
self.mrf.f_rows += f_messages
self.mrf.t_rows += f_messages
self.mrf.f_rows += b_messages
self.mrf.t_rows += b_messages
self.mrf.f_cols += from_cols
self.mrf.t_cols += to_cols
self.mrf.f_cols += to_cols
self.mrf.t_cols += from_cols
self.mrf.message_index.update({(self.mrf.var_list[from_cols[i]],
self.mrf.var_list[to_cols[i]]): (i + f_start)
for i in range(n_slices)})
def fast_build_model(self):
"""
Build the model by directly building the tensors
:return:
"""
if self.N < 18:
print("Fast model building is only available for game sizes N >= 18")
return
self.mrf = MarkovNet()
self.mrf.matrix_mode = True
self.mrf.max_states = self.N
n_stag = len(self.sPos)
n_agnt = len(self.aPos)
n_vars = n_agnt * self.horizon + n_stag * (n_agnt + 2 * (n_agnt - 1))
n_edges = n_agnt * (self.horizon - 1) + n_agnt * n_stag + 3 * (n_agnt - 1) * n_stag
self.mrf.degrees = np.zeros(n_vars, dtype=np.float64)
# VARIABLES OF THE MODEL
self.mrf.var_list = []
self.mrf.var_len = {}
# agent vars
self.mrf.var_list += [new_var('x', i + 1, j + 1) for i in range(self.horizon) for j in range(n_agnt)]
self.mrf.var_len.update({new_var('x', i + 1, j + 1): self.N
for i in range(self.horizon) for j in range(n_agnt)})
# d vars
self.mrf.var_list += [new_var('d', i + 1, j + 1) for j in range(n_stag) for i in range(n_agnt)]
self.mrf.var_len.update({new_var('d', i + 1, j + 1): 2 for j in range(n_stag) for i in range(n_agnt)})
# u vars
self.mrf.var_list += [new_var('u', i + 2, j + 1) for j in range(n_stag) for i in range(n_agnt - 1)]
self.mrf.var_len.update({new_var('u', i + 2, j + 1): 3 for j in range(n_stag) for i in range(n_agnt - 1)})
# z vars
self.mrf.var_list += [new_var('z', i + 2, j + 1) for i in range(n_agnt - 1) for j in range(n_stag)]
self.mrf.var_len.update({new_var('z', 2, j + 1): 12 for j in range(n_stag)})
self.mrf.var_len.update({new_var('z', i + 3, j + 1): 18 for i in range(n_agnt - 2) for j in range(n_stag)})
# index
self.mrf.var_index = {self.mrf.var_list[i]: i for i in range(len(self.mrf.var_list))}
self.mrf.variables = set(self.mrf.var_list)
# UNARY POTENTIALS MATRIX
self.mrf.unary_mat = -np.inf * np.ones((self.N, n_vars), dtype=np.float64)
# clamped agent vars
self._clamp_agents()
# non-clamped agent vars
col_start = n_agnt
col_end = n_agnt * self.horizon
self.mrf.unary_mat[:, np.arange(n_agnt, col_end)] = self.NEU * np.ones((self.N, col_end - col_start))
self.mrf.unary_mat[[i for s in [n_agnt*[self.get_index(pos)] for pos in self.hPos] for i in s],
len(self.hPos)*list(range(col_end-col_start, col_end))] = -self.r_h / self.lmb
# d vars
col_start = col_end
col_end = col_start + n_agnt*n_stag
self.mrf.unary_mat[0:2, col_start:col_end] = self.NEU * np.ones((2, col_end - col_start))
# u vars
col_start = col_end
col_end = col_start + n_stag*(n_agnt - 1)
self.mrf.unary_mat[0:3, col_start:col_end] = self.NEU * np.ones((3, col_end - col_start))
self.mrf.unary_mat[2, self._get_var_indices([new_var('u', n_agnt, i+1) for i in range(n_stag)])] = \
(-self.r_s / self.lmb) * np.ones(n_stag)
# z vars
col_start = col_end
col_end = col_start + n_stag
self.mrf.unary_mat[0:12, col_start:col_end] = self.NEU * np.ones((12, col_end - col_start))
col_start = col_end
col_end = col_start + n_stag*(n_agnt - 2)
self.mrf.unary_mat[0:18, col_start:col_end] = self.NEU * np.ones((18, col_end - col_start))
# EDGE POTENTIALS TENSOR
self.mrf.num_edges = n_edges
self.mrf.edge_pot_tensor = -np.inf * np.ones((self.N, self.N, 2 * self.mrf.num_edges), dtype=np.float64)
self.mrf.message_index = {}
# set up sparse matrix representation of adjacency
self.mrf.f_rows, self.mrf.f_cols, self.mrf.t_rows, self.mrf.t_cols = [], [], [], []
# phi_q potentials between x vars
start = 0
n_slices = n_agnt * (self.horizon - 1)
self._fast_util(f_start=start, n_slices=n_slices,
chunk=np.repeat(self.build_phi_q()[:, :, np.newaxis], n_agnt*(self.horizon - 1), axis=2),
from_cols=list(range(0, n_slices)), to_cols=list(range(n_agnt, n_agnt + n_slices)))
# phi_s potentials between x vars and d vars
start += n_slices
n_slices = n_agnt * n_stag
factor = np.repeat(np.stack((self.NEU * np.ones(self.N), self.MIN * np.ones(self.N)))[:, :, np.newaxis],
n_agnt*n_stag, axis=2)
s_index = [i for s in [n_agnt*[self.get_index(pos)] for pos in self.sPos] for i in s]
factor[((n_agnt * n_stag) * [0], s_index, range(n_agnt * n_stag))] = self.MIN
factor[((n_agnt * n_stag) * [1], s_index, range(n_agnt * n_stag))] = self.NEU
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=n_stag * list(range(n_agnt * (self.horizon - 1), n_agnt * self.horizon)),
to_cols=list(range(n_agnt * self.horizon, n_agnt * (self.horizon + n_stag))))
# factors between d_1j - z_2j, and d_2j and z_2j
start += n_slices
n_slices = 2 * n_stag
factor = np.tile((np.stack((np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 0)),
np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 1))), axis=2)), n_stag)
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=[i for i in range(n_agnt * self.horizon, n_agnt * (self.horizon + n_stag))
if i % n_agnt in {0, 1}],
to_cols=list(np.repeat(range(n_vars - (n_agnt - 1) * n_stag,
n_vars - (n_agnt - 1) * n_stag + n_stag), 2)))
# factors between u_2j - z_2j
start += n_slices
n_slices = n_stag
factor = np.repeat(np.array(self.edge_factor(([0, 1], [0, 1], [0, 1, 2]), 2))[:, :, np.newaxis],
n_stag, axis=2)
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=self._get_var_indices([new_var('u', 2, j+1) for j in range(n_stag)]),
to_cols=list(range(n_vars - (n_agnt - 1) * n_stag, n_vars - (n_agnt - 1) * n_stag + n_stag)))
# factors between d_ij - z_ij, i>2
start += n_slices
n_slices = n_stag * (n_agnt - 2)
factor = np.repeat(np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 1))[:, :, np.newaxis],
n_slices, axis=2)
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=[i for i in range(n_agnt * self.horizon, n_agnt * (self.horizon + n_stag))
if i % n_agnt not in {0, 1}],
to_cols=self._get_var_indices([new_var('z', i+1, j+1)
for j in range(n_stag) for i in range(2, n_agnt)]))
# factors between u_ij - z_ij, i>2
start += n_slices
n_slices = n_stag * (n_agnt - 2)
factor = np.repeat(np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 2))[:, :, np.newaxis],
n_slices, axis=2)
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=self._get_var_indices([new_var('u', i + 1, j + 1)
for j in range(n_stag) for i in range(2, n_agnt)]),
to_cols=self._get_var_indices([new_var('z', i + 1, j + 1)
for j in range(n_stag) for i in range(2, n_agnt)]))
# factors between u_ij - z_ij, i>2
start += n_slices
n_slices = n_stag * (n_agnt - 2)
factor = np.repeat(np.array(self.edge_factor(([0, 1, 2], [0, 1], [0, 1, 2]), 0))[:, :, np.newaxis],
n_slices, axis=2)
self._fast_util(f_start=start, n_slices=n_slices, chunk=factor,
from_cols=self._get_var_indices([new_var('u', i, j + 1)
for j in range(n_stag) for i in range(2, n_agnt)]),
to_cols=self._get_var_indices([new_var('z', i + 1, j + 1)
for j in range(n_stag) for i in range(2, n_agnt)]))
# generate a sparse matrix representation of the message indices to variables that receive messages
self.mrf.message_to_map = coo_matrix((np.ones(len(self.mrf.t_rows), dtype=np.float64),
(self.mrf.t_rows, self.mrf.t_cols)),
(2 * self.mrf.num_edges, n_vars))
# store an array that lists which variable each message is sent to
self.mrf.message_to = np.zeros(2 * n_edges, dtype=np.intp)
self.mrf.message_to[self.mrf.t_rows] = self.mrf.t_cols
# store an array that lists which variable each message is received from
self.mrf.message_from = np.zeros(2 * n_edges, dtype=np.intp)
self.mrf.message_from[self.mrf.f_rows] = self.mrf.f_cols
self.build = 2
def update_model(self):
"""
Updates the mrf model by clamping the current position of the agents
:return: None
"""
if self.build == 1:
for j, agent_pos in enumerate(self.aPos):
agent_index = j + 1
var_key = new_var('x', self.time, agent_index)
factor = np.full(self.N, self.MIN, dtype=np.float64)
factor[self.get_index(agent_pos)] = self.NEU
self.mrf.set_unary_factor(var_key, factor)
self.mrf.create_matrices() # IMPORTANT
elif self.build == 2:
self._clamp_agents()
def infer(self, inference_type=None, max_iter=30000, display='none'):
"""
Runs matrix inference on the current MRF. Sets the object bp to the resulting BeliefPropagator object.
:param display: belief propagation verbosity: none, final or iter.
:param inference_type: Type of inference: slow - python loops BP OR matrix - sparse matrix BP
:param max_iter: Max number of iterations of BP
:return: None
"""
if inference_type == 'matrix':
bp = MatrixBeliefPropagator(self.mrf)
else:
bp = BeliefPropagator(self.mrf) # DEFAULT: slow BP
bp.set_max_iter(max_iter)
bp.infer(display=display)
bp.load_beliefs()
self.bp = bp
def compute_probabilities(self):
"""
If the bp object is loaded, computes the conditional probabilities for every variable pair in pair_beliefs
:return: None
"""
if self.bp:
# convert variable beliefs into probabilities
self.bp.var_probabilities = {}
for key in self.bp.var_beliefs:
var_probabilities = np.exp(self.bp.var_beliefs[key])
# fix zeroes with tolerance
self.bp.var_probabilities[key] = round_by_tol(var_probabilities, self.TOL)
# compute pair conditional probabilities from pair (joint) probabilities and var probabilities
# P(x2 | x1) is stored in key (x1, x2)
self.bp.conditional_probabilities = {}
for key in self.bp.pair_beliefs:
with np.errstate(divide='ignore', invalid='ignore'):
pair_prob = round_by_tol(np.exp(self.bp.pair_beliefs[key]), self.TOL)
cond_prob = np.transpose(np.transpose(pair_prob) / self.bp.var_probabilities[key[0]])
self.bp.conditional_probabilities[key] = cond_prob
def move_next(self, break_ties='random'):
"""
Look for the states of maximum probability and move the agents accordingly, breaking ties randomly.
:return: none - state change
"""
if not(self.bp.conditional_probabilities and self.bp.var_probabilities) or self.time == self.horizon:
return
for i in range(len(self.aPos)):
var_key = (new_var('x', self.time, i + 1), new_var('x', self.time + 1, i + 1))
trans_mat = self.bp.conditional_probabilities[var_key]
i_from, i_to = np.unravel_index(np.nanargmax(trans_mat), trans_mat.shape)
if break_ties == 'first': # TESTING: need to be able to go always to the same destination (matrix VS torch)
i_to = np.isclose(trans_mat, trans_mat[i_from, i_to]).nonzero()[1][0]
elif break_ties == 'random': # NORMALLY: we break ties randomly
possibilities = np.isclose(trans_mat, trans_mat[i_from, i_to]).nonzero()[1]
random_index = np.random.choice(possibilities.shape[0], 1, replace=False).item()
i_to = possibilities[random_index]
self.aPos[i] = self.get_pos(i_to)
self.time += 1
def run_game(self, inference_type='matrix', display='none', verbose=True, break_ties='random', max_iter=30000):
"""
Run the inference to the horizon clamping the variables at every time step as decisions are taken
:param display: belief propagation iter verbosity: none, final or iter
:param inference_type: Type of inference: slow - python loops BP OR matrix - sparse matrix BP
:param verbose: Prints info about the agents final positions
:param break_ties: Way in which ties are broken, either random or first
:return: None
"""
if not self.build:
raise Exception("Model must be built before running the game")
for i in range(self.horizon - 1):
self.infer(inference_type=inference_type, display=display, max_iter=max_iter)
self.compute_probabilities()
self.move_next(break_ties=break_ties)
self.update_model()
# Print trajectories and preys in final positions if verbose
if verbose:
for agent in range(1, len(self.aPos) + 1):
trajectory = []
for i in range(1, self.horizon + 1):
if inference_type == 'matrix':
var_index = self.mrf.var_index[new_var('x', i, agent)]
position_index = np.argmax(self.mrf.unary_mat[:, var_index])
else:
position_index = np.argmax(self.mrf.unary_potentials[new_var('x', i, agent)])
trajectory.append(self.get_pos(position_index))
if trajectory[-1] in self.hPos:
sth = 'hare'
elif trajectory[-1] in self.sPos:
sth = 'stag'
else:
sth = 'nothing'
print("->".join([str(el) for el in trajectory]), sth)
| [
"numpy.full",
"mrftools.MarkovNet",
"numpy.argmax",
"mrftools.BeliefPropagator",
"numpy.zeros",
"numpy.ones",
"numpy.errstate",
"numpy.transpose",
"numpy.isclose",
"numpy.arange",
"numpy.exp",
"numpy.random.choice",
"numpy.nanargmax",
"mrftools.MatrixBeliefPropagator"
] | [((1958, 2011), 'numpy.full', 'np.full', (['(self.N, self.N)', 'self.MIN'], {'dtype': 'np.float64'}), '((self.N, self.N), self.MIN, dtype=np.float64)\n', (1965, 2011), True, 'import numpy as np\n'), ((2335, 2356), 'numpy.arange', 'np.arange', (['(self.N - 1)'], {}), '(self.N - 1)\n', (2344, 2356), True, 'import numpy as np\n'), ((2436, 2456), 'numpy.arange', 'np.arange', (['(1)', 'self.N'], {}), '(1, self.N)\n', (2445, 2456), True, 'import numpy as np\n'), ((3031, 3042), 'mrftools.MarkovNet', 'MarkovNet', ([], {}), '()\n', (3040, 3042), False, 'from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator\n'), ((3249, 3302), 'numpy.full', 'np.full', (['(self.N, self.N)', 'self.NEU'], {'dtype': 'np.float64'}), '((self.N, self.N), self.NEU, dtype=np.float64)\n', (3256, 3302), True, 'import numpy as np\n'), ((3858, 3869), 'mrftools.MarkovNet', 'MarkovNet', ([], {}), '()\n', (3867, 3869), False, 'from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator\n'), ((7160, 7198), 'numpy.full', 'np.full', (['(3)', 'self.NEU'], {'dtype': 'np.float64'}), '(3, self.NEU, dtype=np.float64)\n', (7167, 7198), True, 'import numpy as np\n'), ((9152, 9163), 'mrftools.MarkovNet', 'MarkovNet', ([], {}), '()\n', (9161, 9163), False, 'from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator\n'), ((9499, 9533), 'numpy.zeros', 'np.zeros', (['n_vars'], {'dtype': 'np.float64'}), '(n_vars, dtype=np.float64)\n', (9507, 9533), True, 'import numpy as np\n'), ((17842, 17878), 'numpy.zeros', 'np.zeros', (['(2 * n_edges)'], {'dtype': 'np.intp'}), '(2 * n_edges, dtype=np.intp)\n', (17850, 17878), True, 'import numpy as np\n'), ((18056, 18092), 'numpy.zeros', 'np.zeros', (['(2 * n_edges)'], {'dtype': 'np.intp'}), '(2 * n_edges, dtype=np.intp)\n', (18064, 18092), True, 'import numpy as np\n'), ((10949, 10992), 'numpy.ones', 'np.ones', (['(self.N, n_vars)'], {'dtype': 'np.float64'}), '((self.N, n_vars), dtype=np.float64)\n', (10956, 10992), True, 'import numpy as np\n'), ((11222, 11260), 'numpy.ones', 'np.ones', (['(self.N, col_end - col_start)'], {}), '((self.N, col_end - col_start))\n', (11229, 11260), True, 'import numpy as np\n'), ((11624, 11657), 'numpy.ones', 'np.ones', (['(2, col_end - col_start)'], {}), '((2, col_end - col_start))\n', (11631, 11657), True, 'import numpy as np\n'), ((11817, 11850), 'numpy.ones', 'np.ones', (['(3, col_end - col_start)'], {}), '((3, col_end - col_start))\n', (11824, 11850), True, 'import numpy as np\n'), ((11997, 12012), 'numpy.ones', 'np.ones', (['n_stag'], {}), '(n_stag)\n', (12004, 12012), True, 'import numpy as np\n'), ((12160, 12194), 'numpy.ones', 'np.ones', (['(12, col_end - col_start)'], {}), '((12, col_end - col_start))\n', (12167, 12194), True, 'import numpy as np\n'), ((12338, 12372), 'numpy.ones', 'np.ones', (['(18, col_end - col_start)'], {}), '((18, col_end - col_start))\n', (12345, 12372), True, 'import numpy as np\n'), ((12489, 12556), 'numpy.ones', 'np.ones', (['(self.N, self.N, 2 * self.mrf.num_edges)'], {'dtype': 'np.float64'}), '((self.N, self.N, 2 * self.mrf.num_edges), dtype=np.float64)\n', (12496, 12556), True, 'import numpy as np\n'), ((19341, 19373), 'mrftools.MatrixBeliefPropagator', 'MatrixBeliefPropagator', (['self.mrf'], {}), '(self.mrf)\n', (19363, 19373), False, 'from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator\n'), ((19405, 19431), 'mrftools.BeliefPropagator', 'BeliefPropagator', (['self.mrf'], {}), '(self.mrf)\n', (19421, 19431), False, 'from mrftools import MarkovNet, BeliefPropagator, MatrixBeliefPropagator\n'), ((6797, 6845), 'numpy.full', 'np.full', (['(self.N, 2)', 'self.MIN'], {'dtype': 'np.float64'}), '((self.N, 2), self.MIN, dtype=np.float64)\n', (6804, 6845), True, 'import numpy as np\n'), ((11181, 11207), 'numpy.arange', 'np.arange', (['n_agnt', 'col_end'], {}), '(n_agnt, col_end)\n', (11190, 11207), True, 'import numpy as np\n'), ((18540, 18583), 'numpy.full', 'np.full', (['self.N', 'self.MIN'], {'dtype': 'np.float64'}), '(self.N, self.MIN, dtype=np.float64)\n', (18547, 18583), True, 'import numpy as np\n'), ((19968, 20000), 'numpy.exp', 'np.exp', (['self.bp.var_beliefs[key]'], {}), '(self.bp.var_beliefs[key])\n', (19974, 20000), True, 'import numpy as np\n'), ((21315, 21338), 'numpy.nanargmax', 'np.nanargmax', (['trans_mat'], {}), '(trans_mat)\n', (21327, 21338), True, 'import numpy as np\n'), ((1077, 1120), 'numpy.full', 'np.full', (['self.N', 'self.MIN'], {'dtype': 'np.float64'}), '(self.N, self.MIN, dtype=np.float64)\n', (1084, 1120), True, 'import numpy as np\n'), ((4422, 4460), 'numpy.full', 'np.full', (['(2)', 'self.NEU'], {'dtype': 'np.float64'}), '(2, self.NEU, dtype=np.float64)\n', (4429, 4460), True, 'import numpy as np\n'), ((20412, 20458), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (20423, 20458), True, 'import numpy as np\n'), ((1282, 1325), 'numpy.full', 'np.full', (['self.N', 'self.NEU'], {'dtype': 'np.float64'}), '(self.N, self.NEU, dtype=np.float64)\n', (1289, 1325), True, 'import numpy as np\n'), ((1416, 1459), 'numpy.full', 'np.full', (['self.N', 'self.NEU'], {'dtype': 'np.float64'}), '(self.N, self.NEU, dtype=np.float64)\n', (1423, 1459), True, 'import numpy as np\n'), ((4695, 4733), 'numpy.full', 'np.full', (['(3)', 'self.NEU'], {'dtype': 'np.float64'}), '(3, self.NEU, dtype=np.float64)\n', (4702, 4733), True, 'import numpy as np\n'), ((20505, 20538), 'numpy.exp', 'np.exp', (['self.bp.pair_beliefs[key]'], {}), '(self.bp.pair_beliefs[key])\n', (20511, 20538), True, 'import numpy as np\n'), ((23308, 23351), 'numpy.argmax', 'np.argmax', (['self.mrf.unary_mat[:, var_index]'], {}), '(self.mrf.unary_mat[:, var_index])\n', (23317, 23351), True, 'import numpy as np\n'), ((4901, 4940), 'numpy.full', 'np.full', (['(12)', 'self.NEU'], {'dtype': 'np.float64'}), '(12, self.NEU, dtype=np.float64)\n', (4908, 4940), True, 'import numpy as np\n'), ((5773, 5812), 'numpy.full', 'np.full', (['(18)', 'self.NEU'], {'dtype': 'np.float64'}), '(18, self.NEU, dtype=np.float64)\n', (5780, 5812), True, 'import numpy as np\n'), ((13295, 13310), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (13302, 13310), True, 'import numpy as np\n'), ((13323, 13338), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (13330, 13338), True, 'import numpy as np\n'), ((20595, 20618), 'numpy.transpose', 'np.transpose', (['pair_prob'], {}), '(pair_prob)\n', (20607, 20618), True, 'import numpy as np\n'), ((21764, 21822), 'numpy.random.choice', 'np.random.choice', (['possibilities.shape[0]', '(1)'], {'replace': '(False)'}), '(possibilities.shape[0], 1, replace=False)\n', (21780, 21822), True, 'import numpy as np\n'), ((21501, 21547), 'numpy.isclose', 'np.isclose', (['trans_mat', 'trans_mat[i_from, i_to]'], {}), '(trans_mat, trans_mat[i_from, i_to])\n', (21511, 21547), True, 'import numpy as np\n'), ((21673, 21719), 'numpy.isclose', 'np.isclose', (['trans_mat', 'trans_mat[i_from, i_to]'], {}), '(trans_mat, trans_mat[i_from, i_to])\n', (21683, 21719), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as sp
import pytest
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_warns_message
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import silhouette_samples
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import calinski_harabasz_score
from sklearn.metrics.cluster import calinski_harabaz_score
from sklearn.metrics.cluster import davies_bouldin_score
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert score_precomputed > 0
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
pytest.approx(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
pytest.approx(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert score_precomputed > 0
assert score_euclidean > 0
pytest.approx(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
pytest.approx(score_euclidean, score_dense_with_sampling)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.], [1.], [1.], [2.], [3.], [3.]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert not np.isnan(silhouette)
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, .5, .5, 0, 1, 1])
def test_silhouette_paper_example():
# Explicitly check per-sample results against Rousseeuw (1987)
# Data from Table 1
lower = [5.58,
7.00, 6.50,
7.08, 7.00, 3.83,
4.83, 5.08, 8.17, 5.83,
2.17, 5.75, 6.67, 6.92, 4.92,
6.42, 5.00, 5.58, 6.00, 4.67, 6.42,
3.42, 5.50, 6.42, 6.42, 5.00, 3.92, 6.17,
2.50, 4.92, 6.25, 7.33, 4.50, 2.25, 6.33, 2.75,
6.08, 6.67, 4.25, 2.67, 6.00, 6.17, 6.17, 6.92, 6.17,
5.25, 6.83, 4.50, 3.75, 5.75, 5.42, 6.08, 5.83, 6.67, 3.67,
4.75, 3.00, 6.08, 6.67, 5.00, 5.58, 4.83, 6.17, 5.67, 6.50, 6.92]
D = np.zeros((12, 12))
D[np.tril_indices(12, -1)] = lower
D += D.T
names = ['BEL', 'BRA', 'CHI', 'CUB', 'EGY', 'FRA', 'IND', 'ISR', 'USA',
'USS', 'YUG', 'ZAI']
# Data from Figure 2
labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]
expected1 = {'USA': .43, 'BEL': .39, 'FRA': .35, 'ISR': .30, 'BRA': .22,
'EGY': .20, 'ZAI': .19, 'CUB': .40, 'USS': .34, 'CHI': .33,
'YUG': .26, 'IND': -.04}
score1 = .28
# Data from Figure 3
labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]
expected2 = {'USA': .47, 'FRA': .44, 'BEL': .42, 'ISR': .37, 'EGY': .02,
'ZAI': .28, 'BRA': .25, 'IND': .17, 'CUB': .48, 'USS': .44,
'YUG': .31, 'CHI': .31}
score2 = .33
for labels, expected, score in [(labels1, expected1, score1),
(labels2, expected2, score2)]:
expected = [expected[name] for name in names]
# we check to 2dp because that's what's in the paper
pytest.approx(expected,
silhouette_samples(D, np.array(labels),
metric='precomputed'),
abs=1e-2)
pytest.approx(score,
silhouette_score(D, np.array(labels),
metric='precomputed'),
abs=1e-2)
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
err_msg = (r'Number of labels is %d\. Valid values are 2 '
r'to n_samples - 1 \(inclusive\)' % len(np.unique(y)))
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
err_msg = (r'Number of labels is %d\. Valid values are 2 '
r'to n_samples - 1 \(inclusive\)' % len(np.unique(y)))
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert (
silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels))
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert (
silhouette_score(list(X), list(y)) == silhouette_score(X, y))
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_silhouette_nonzero_diag(dtype):
# Make sure silhouette_samples requires diagonal to be zero.
# Non-regression test for #12178
# Construct a zero-diagonal matrix
dists = pairwise_distances(
np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T)
labels = [0, 0, 0, 1, 1, 1]
# small values on the diagonal are OK
dists[2][2] = np.finfo(dists.dtype).eps * 10
silhouette_samples(dists, labels, metric='precomputed')
# values bigger than eps * 100 are not
dists[2][2] = np.finfo(dists.dtype).eps * 1000
with pytest.raises(ValueError, match='contains non-zero'):
silhouette_samples(dists, labels, metric='precomputed')
def assert_raises_on_only_one_label(func):
"""Assert message when there is only one label"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.zeros(10))
def assert_raises_on_all_points_same_cluster(func):
"""Assert message when all point are in different clusters"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.arange(10))
def test_calinski_harabasz_score():
assert_raises_on_only_one_label(calinski_harabasz_score)
assert_raises_on_all_points_same_cluster(calinski_harabasz_score)
# Assert the value is 1. when all samples are equals
assert 1. == calinski_harabasz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5)
# Assert the value is 0. when all the mean cluster are equal
assert 0. == calinski_harabasz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10)
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(calinski_harabasz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
def test_deprecated_calinski_harabaz_score():
depr_message = ("Function 'calinski_harabaz_score' has been renamed "
"to 'calinski_harabasz_score' "
"and will be removed in version 0.23.")
assert_warns_message(FutureWarning, depr_message,
calinski_harabaz_score,
np.ones((10, 2)), [0] * 5 + [1] * 5)
def test_davies_bouldin_score():
assert_raises_on_only_one_label(davies_bouldin_score)
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
# Assert the value is 0. when all samples are equals
assert davies_bouldin_score(np.ones((10, 2)),
[0] * 5 + [1] * 5) == pytest.approx(0.0)
# Assert the value is 0. when all the mean cluster are equal
assert davies_bouldin_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10) == pytest.approx(0.0)
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
# Ensure divide by zero warning is not raised in general case
with pytest.warns(None) as record:
davies_bouldin_score(X, labels)
div_zero_warnings = [
warning for warning in record
if "divide by zero encountered" in warning.message.args[0]
]
assert len(div_zero_warnings) == 0
# General case - cluster have one sample
X = ([[0, 0], [2, 2], [3, 3], [5, 5]])
labels = [0, 0, 1, 2]
pytest.approx(davies_bouldin_score(X, labels), (5. / 4) / 3)
| [
"sklearn.datasets.load_iris",
"numpy.ones",
"numpy.isnan",
"scipy.sparse.lil_matrix",
"numpy.arange",
"pytest.mark.parametrize",
"numpy.unique",
"sklearn.utils._testing.assert_array_equal",
"pytest.warns",
"sklearn.metrics.cluster.calinski_harabasz_score",
"numpy.random.RandomState",
"numpy.fi... | [((6414, 6472), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '(np.float32, np.float64)'], {}), "('dtype', (np.float32, np.float64))\n", (6437, 6472), False, 'import pytest\n'), ((645, 665), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (663, 665), False, 'from sklearn import datasets\n'), ((705, 724), 'scipy.sparse.csr_matrix', 'csr_matrix', (['X_dense'], {}), '(X_dense)\n', (715, 724), False, 'from scipy.sparse import csr_matrix\n'), ((737, 759), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['X_dense'], {}), '(X_dense)\n', (750, 759), True, 'import scipy.sparse as sp\n'), ((772, 794), 'scipy.sparse.lil_matrix', 'sp.lil_matrix', (['X_dense'], {}), '(X_dense)\n', (785, 794), True, 'import scipy.sparse as sp\n'), ((2648, 2676), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 2, 2]'], {}), '([0, 1, 1, 1, 2, 2])\n', (2656, 2676), True, 'import numpy as np\n'), ((3033, 3060), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'labels'], {}), '(X, labels)\n', (3049, 3060), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((3106, 3135), 'sklearn.metrics.cluster.silhouette_samples', 'silhouette_samples', (['X', 'labels'], {}), '(X, labels)\n', (3124, 3135), False, 'from sklearn.metrics.cluster import silhouette_samples\n'), ((3140, 3186), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['ss', '[0, 0.5, 0.5, 0, 1, 1]'], {}), '(ss, [0, 0.5, 0.5, 0, 1, 1])\n', (3158, 3186), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((3862, 3880), 'numpy.zeros', 'np.zeros', (['(12, 12)'], {}), '((12, 12))\n', (3870, 3880), True, 'import numpy as np\n'), ((5329, 5349), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (5347, 5349), False, 'from sklearn import datasets\n'), ((5407, 5428), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (5416, 5428), True, 'import numpy as np\n'), ((5672, 5692), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (5680, 5692), True, 'import numpy as np\n'), ((5955, 5975), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (5973, 5975), False, 'from sklearn import datasets\n'), ((6263, 6283), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (6281, 6283), False, 'from sklearn import datasets\n'), ((6886, 6941), 'sklearn.metrics.cluster.silhouette_samples', 'silhouette_samples', (['dists', 'labels'], {'metric': '"""precomputed"""'}), "(dists, labels, metric='precomputed')\n", (6904, 6941), False, 'from sklearn.metrics.cluster import silhouette_samples\n'), ((7273, 7302), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (7294, 7302), True, 'import numpy as np\n'), ((7542, 7571), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (7563, 7571), True, 'import numpy as np\n'), ((876, 917), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {'metric': '"""euclidean"""'}), "(X, metric='euclidean')\n", (894, 917), False, 'from sklearn.metrics import pairwise_distances\n'), ((1045, 1089), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['D', 'y'], {'metric': '"""precomputed"""'}), "(D, y, metric='precomputed')\n", (1061, 1089), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((1190, 1232), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'y'], {'metric': '"""euclidean"""'}), "(X, y, metric='euclidean')\n", (1206, 1232), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((1241, 1290), 'pytest.approx', 'pytest.approx', (['score_precomputed', 'score_euclidean'], {}), '(score_precomputed, score_euclidean)\n', (1254, 1290), False, 'import pytest\n'), ((2017, 2066), 'pytest.approx', 'pytest.approx', (['score_euclidean', 'score_precomputed'], {}), '(score_euclidean, score_precomputed)\n', (2030, 2066), False, 'import pytest\n'), ((3076, 3096), 'numpy.isnan', 'np.isnan', (['silhouette'], {}), '(silhouette)\n', (3084, 3096), True, 'import numpy as np\n'), ((3887, 3910), 'numpy.tril_indices', 'np.tril_indices', (['(12)', '(-1)'], {}), '(12, -1)\n', (3902, 3910), True, 'import numpy as np\n'), ((5571, 5611), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'err_msg'}), '(ValueError, match=err_msg)\n', (5584, 5611), False, 'import pytest\n'), ((5621, 5643), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'y'], {}), '(X, y)\n', (5637, 5643), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((5835, 5875), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'err_msg'}), '(ValueError, match=err_msg)\n', (5848, 5875), False, 'import pytest\n'), ((5885, 5907), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'y'], {}), '(X, y)\n', (5901, 5907), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((6046, 6082), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', '(labels * 2 + 10)'], {}), '(X, labels * 2 + 10)\n', (6062, 6082), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((6086, 6113), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'labels'], {}), '(X, labels)\n', (6102, 6113), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((6147, 6185), 'sklearn.metrics.cluster.silhouette_samples', 'silhouette_samples', (['X', '(labels * 2 + 10)'], {}), '(X, labels * 2 + 10)\n', (6165, 6185), False, 'from sklearn.metrics.cluster import silhouette_samples\n'), ((6187, 6216), 'sklearn.metrics.cluster.silhouette_samples', 'silhouette_samples', (['X', 'labels'], {}), '(X, labels)\n', (6205, 6216), False, 'from sklearn.metrics.cluster import silhouette_samples\n'), ((6387, 6409), 'sklearn.metrics.cluster.silhouette_score', 'silhouette_score', (['X', 'y'], {}), '(X, y)\n', (6403, 6409), False, 'from sklearn.metrics.cluster import silhouette_score\n'), ((7046, 7098), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""contains non-zero"""'}), "(ValueError, match='contains non-zero')\n", (7059, 7098), False, 'import pytest\n'), ((7108, 7163), 'sklearn.metrics.cluster.silhouette_samples', 'silhouette_samples', (['dists', 'labels'], {'metric': '"""precomputed"""'}), "(dists, labels, metric='precomputed')\n", (7126, 7163), False, 'from sklearn.metrics.cluster import silhouette_samples\n'), ((7312, 7366), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Number of labels is"""'}), "(ValueError, match='Number of labels is')\n", (7325, 7366), False, 'import pytest\n'), ((7581, 7635), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Number of labels is"""'}), "(ValueError, match='Number of labels is')\n", (7594, 7635), False, 'import pytest\n'), ((8112, 8181), 'sklearn.metrics.cluster.calinski_harabasz_score', 'calinski_harabasz_score', (['([[-1, -1], [1, 1]] * 10)', '([0] * 10 + [1] * 10)'], {}), '([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)\n', (8135, 8181), False, 'from sklearn.metrics.cluster import calinski_harabasz_score\n'), ((8449, 8483), 'sklearn.metrics.cluster.calinski_harabasz_score', 'calinski_harabasz_score', (['X', 'labels'], {}), '(X, labels)\n', (8472, 8483), False, 'from sklearn.metrics.cluster import calinski_harabasz_score\n'), ((8896, 8912), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (8903, 8912), True, 'import numpy as np\n'), ((9255, 9273), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (9268, 9273), False, 'import pytest\n'), ((9351, 9417), 'sklearn.metrics.cluster.davies_bouldin_score', 'davies_bouldin_score', (['([[-1, -1], [1, 1]] * 10)', '([0] * 10 + [1] * 10)'], {}), '([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)\n', (9371, 9417), False, 'from sklearn.metrics.cluster import davies_bouldin_score\n'), ((9453, 9471), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (9466, 9471), False, 'import pytest\n'), ((9698, 9729), 'sklearn.metrics.cluster.davies_bouldin_score', 'davies_bouldin_score', (['X', 'labels'], {}), '(X, labels)\n', (9718, 9729), False, 'from sklearn.metrics.cluster import davies_bouldin_score\n'), ((9829, 9847), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (9841, 9847), False, 'import pytest\n'), ((9867, 9898), 'sklearn.metrics.cluster.davies_bouldin_score', 'davies_bouldin_score', (['X', 'labels'], {}), '(X, labels)\n', (9887, 9898), False, 'from sklearn.metrics.cluster import davies_bouldin_score\n'), ((10208, 10239), 'sklearn.metrics.cluster.davies_bouldin_score', 'davies_bouldin_score', (['X', 'labels'], {}), '(X, labels)\n', (10228, 10239), False, 'from sklearn.metrics.cluster import davies_bouldin_score\n'), ((1404, 1464), 'pytest.approx', 'pytest.approx', (['score_euclidean', 'score_dense_without_sampling'], {}), '(score_euclidean, score_dense_without_sampling)\n', (1417, 1464), False, 'import pytest\n'), ((2177, 2234), 'pytest.approx', 'pytest.approx', (['score_euclidean', 'score_dense_with_sampling'], {}), '(score_euclidean, score_dense_with_sampling)\n', (2190, 2234), False, 'import pytest\n'), ((5547, 5559), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5556, 5559), True, 'import numpy as np\n'), ((5811, 5823), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5820, 5823), True, 'import numpy as np\n'), ((6696, 6754), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]]'], {'dtype': 'dtype'}), '([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype)\n', (6704, 6754), True, 'import numpy as np\n'), ((6851, 6872), 'numpy.finfo', 'np.finfo', (['dists.dtype'], {}), '(dists.dtype)\n', (6859, 6872), True, 'import numpy as np\n'), ((7004, 7025), 'numpy.finfo', 'np.finfo', (['dists.dtype'], {}), '(dists.dtype)\n', (7012, 7025), True, 'import numpy as np\n'), ((7398, 7410), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7406, 7410), True, 'import numpy as np\n'), ((7667, 7680), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7676, 7680), True, 'import numpy as np\n'), ((7951, 7967), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (7958, 7967), True, 'import numpy as np\n'), ((9183, 9199), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (9190, 9199), True, 'import numpy as np\n'), ((4948, 4964), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4956, 4964), True, 'import numpy as np\n'), ((5133, 5149), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5141, 5149), True, 'import numpy as np\n'), ((9735, 9747), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (9742, 9747), True, 'import numpy as np\n')] |
import uuid
import json
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from utils import *
#def check_files(prefix, episodefiles):
# pathfiles = {}
# for ep_file in episodefiles:
# pathfile = prefix + str('/') + str(ep_file)
# ep_file_status = False
# try:
# current_file = open(pathfile)
# ep_file_status = True
# #print("Sucess.")
# except IOError:
# print("File not accessible: ", pathfile)
# finally:
# current_file.close()
#
# if ep_file_status:
# ep_file_id = uuid.uuid4()
# pathfiles[ep_file_id] = pathfile
#
# return pathfiles
#
#
#def decode_mean_distortion(mean_distortion_dict):
# mean_distortion_list = []
# for iteration, mean_distortion in mean_distortion_dict.items():
# mean_distortion_list.append(mean_distortion)
# return mean_distortion_list
if __name__ == '__main__':
profile_pathfile = 'profile.json'
with open(profile_pathfile) as profile:
data = profile.read()
d = json.loads(data)
prefix_pathfiles = d['results_directory']
result_files = os.listdir(prefix_pathfiles)
pathfiles = check_files(prefix_pathfiles, result_files)
print ('# of json files: ', len(pathfiles))
# From here it is going to open each json file to see each parameters and data from algorithm perform. May you should to implement some decode or transate functions to deal with json data from files to python data format. There are some decode functions on utils library.
#trial_result = (initial_alphabet_opt, distortion_measure_opt, num_of_levels, variance_of_samples, norm)
occurences = []
samples_random_seeds = {}
katsavounidis_results = {}
xiaoxiao_results = {}
unitary_until_num_of_elements_results = {}
random_from_samples_results = {}
sa_results = {}
for pathfile_id, pathfile in pathfiles.items():
with open(pathfile) as result:
data = result.read()
d = json.loads(data)
# May you edit from right here! Tip: Read *json file in results to decide how to deal from here.
initial_alphabet_opt = d['initial_alphabet_opt']
variance_of_samples = d['variance_of_samples']
distortion_measure_opt = d['distortion_measure_opt']
initial_alphabet_opt = d['initial_alphabet_opt']
num_of_elements = d['num_of_elements']
num_of_levels = num_of_elements
num_of_samples = d['num_of_samples']
samples_random_seed = d['samples_random_seed']
mean_distortion_by_round = d['mean_distortion_by_round']
#normal_vector = np.ones(num_of_levels) * (num_of_samples/num_of_levels)
#sets = d['sets']
#set_vector = []
#for k, v in sets.items():
# set_vector.append(v)
#set_vector = np.array(set_vector)
#norm = np.sqrt(np.sum(np.power(np.abs(set_vector - normal_vector), 2)))
#if norm == 0 and num_of_elements == 9 and variance_of_samples == 1.0 and initial_alphabet_method == 'katsavounidis':
#if norm == 0 and num_of_elements == 4 and variance_of_samples == 0.1 and initial_alphabet_method == 'katsavounidis'
#if variance_of_samples == 0.1 and num_of_elements == 4 and initial_alphabet_opt == 'katsavounidis':
#trial_info = {'norm': norm}
#occurences.append(trial_info)
#if num_of_elements == 4:
samples_random_seeds[int(samples_random_seed)] = 1
if initial_alphabet_opt == 'katsavounidis' and distortion_measure_opt == 'mse':
last_k = ''
for k in mean_distortion_by_round.keys():
last_k = k
mean_distortion_by_round_list = decode_mean_distortion(mean_distortion_by_round[last_k])
katsavounidis_results[str(int(samples_random_seed))] = mean_distortion_by_round_list[-1]
if initial_alphabet_opt == 'xiaoxiao' and distortion_measure_opt == 'mse':
last_k = ''
for k in mean_distortion_by_round.keys():
last_k = k
mean_distortion_by_round_list = decode_mean_distortion(mean_distortion_by_round[last_k])
xiaoxiao_results[str(int(samples_random_seed))] = mean_distortion_by_round_list[-1]
if initial_alphabet_opt == 'sa' and distortion_measure_opt == 'mse':
last_k = ''
for k in mean_distortion_by_round.keys():
last_k = k
mean_distortion_by_round_list = decode_mean_distortion(mean_distortion_by_round[last_k])
sa_results[str(int(samples_random_seed))] = mean_distortion_by_round_list[-1]
if initial_alphabet_opt == 'unitary_until_num_of_elements' and distortion_measure_opt == 'mse':
last_k = ''
for k in mean_distortion_by_round.keys():
last_k = k
mean_distortion_by_round_list = decode_mean_distortion(mean_distortion_by_round[last_k])
unitary_until_num_of_elements_results[str(int(samples_random_seed))] = mean_distortion_by_round_list[-1]
if initial_alphabet_opt == 'random_from_samples' and distortion_measure_opt == 'mse':
last_k = ''
for k in mean_distortion_by_round.keys():
last_k = k
mean_distortion_by_round_list = decode_mean_distortion(mean_distortion_by_round[last_k])
random_from_samples_results[str(int(samples_random_seed))] = mean_distortion_by_round_list[-1]
occurences.append(1)
print('occurences.len: ', len(occurences))
samples_random_seeds = samples_random_seeds.items()
samples_random_seeds_k = np.array([str(k[0]) for k in sorted(samples_random_seeds)])
labels = samples_random_seeds_k
interval_list = []
# 't' distribution. Degrees of freedom. For a sample of size n, the t distribution will have n-1 degrees of freedom. As the sample size n increases, the t distribution becomes closer to the normal distribution, since the standard error approaches the true standard deviation for large n.
t = 1.699 # 95% confidence, 29 degrees of fredom
# ---------------------------katsavounivis--------------------------------------
katsavounidis_results = katsavounidis_results.items()
print ('len(katsavounidis_results): ', len(katsavounidis_results))
katsavounidis_v = np.array([float(v[1]) for v in sorted(katsavounidis_results)])
interval_list.append(get_confidence_interval(katsavounidis_v, t))
#---------------------------xiaoxiao-----------------------------------------------
xiaoxiao_results = xiaoxiao_results.items()
print ('len(xiaoxio_results): ', len(xiaoxiao_results))
xiaoxiao_v = np.array([float(v[1]) for v in sorted(xiaoxiao_results)])
interval_list.append(get_confidence_interval(xiaoxiao_v, t))
#---------------------------sa-----------------------------------------------
sa_results = sa_results.items()
print ('len(sa_results): ', len(sa_results))
sa_v = np.array([float(v[1]) for v in sorted(sa_results)])
interval_list.append(get_confidence_interval(sa_v, t))
#---------------------------unitary_until_num_of_elements-----------------------------------------------
unitary_until_num_of_elements_results = unitary_until_num_of_elements_results.items()
print ('len(unitary_until_num_of_elements_results): ', len(unitary_until_num_of_elements_results))
unitary_until_num_of_elements_v = np.array([float(v[1]) for v in sorted(unitary_until_num_of_elements_results)])
interval_list.append(get_confidence_interval(unitary_until_num_of_elements_v, t))
#---------------------------random_from_samples-----------------------------------------------
random_from_samples_results = random_from_samples_results.items()
print ('len(random_from_samples_results): ', len(random_from_samples_results))
random_from_samples_v = np.array([float(v[1]) for v in sorted(random_from_samples_results)])
interval_list.append(get_confidence_interval(random_from_samples_v, t))
#----------------------------- Percentile stuff ---------------------------------------------------------
#whis_value = 1.57
#katsavounidis_1st_percentile, katsavounidis_median, katsavounidis_3rd_percentile, katsavounidis_iqr = get_percentiles(katsavounidis_v)
#xiaoxiao_1st_percentile, xiaoxiao_median, xiaoxiao_3rd_percentile, xiaoxiao_iqr = get_percentiles(xiaoxiao_v)
#sa_1st_percentile, sa_median, sa_3rd_percentile, sa_iqr = get_percentiles(sa_v)
#unitary_until_num_of_elements_1st_percentile, unitary_until_num_of_elements_median, unitary_until_num_of_elements_3rd_percentile, unitary_until_num_of_elements_iqr = get_percentiles(unitary_until_num_of_elements_v)
#random_from_samples_1st_percentile, random_from_samples_median, random_from_samples_3rd_percentile, random_from_samples_iqr = get_percentiles(random_from_samples_v)
fig, ax = plt.subplots()
interval_list = np.array(interval_list)
print (interval_list[:,0])
print (interval_list[:,1])
print (interval_list[:,2])
err = interval_list[:,2] - interval_list[:,0]
x_labels = ["katsavounidis", "xiaoxiao", "sa", "unitary", "random"]
plt.errorbar(x_labels, interval_list[:,1], yerr=err, fmt='o')
plt.show()
#
# #rects1 = ax.bar(x + width, katsavounidis_mean, width, label='katsavounidis', yerr=katsavounidis_stddev)
# #rects2 = ax.bar(x + 2 * width, xiaoxiao_mean, width, label='xiaoxiao', yerr=xiaoxiao_stddev)
# #rects3 = ax.bar(x + 3 * width, sa_mean, width, label='sa', yerr=sa_stddev)
# #rects4 = ax.bar(x + 4 * width, unitary_until_num_of_elements_mean, width, label='unitary', yerr=unitary_until_num_of_elements_stddev)
# #rects5 = ax.bar(x + 5 * width, random_from_samples_mean, width, label='random', yerr=random_from_samples_stddev)
#
# plt.boxplot(interval_list)
#
# ax.set_ylabel('Minimal distortion')
# ax.set_xlabel('Seed samples from trials')
# ax.set_title('Minimal distortion by initial alphabet method - Nt = 16, k = 8000, var = 1.0')
# ax.set_xticks(x)
# #ax.set_xticklabels(labels)
# ax.legend()
#
# plt.show()
#
# #print (sorted(random_from_samples_results))
#
# ##norm_values_array_l1 = np.array(sorted(norm_values_l1, key=lambda k: k['norm'], reverse=True))
# #norm_values_array_l1 = np.array([v['norm'] for v in norm_values_l1])
# #norm_values_array_l1 = norm_values_array_l1/np.sqrt((np.sum(np.power(norm_values_array_l1, 2))))
# #plt.plot(norm_values_array_l1, 'r*', label='variance = 0.1')
#
# ##norm_values_array_l2 = np.array(sorted(norm_values_l2, key=lambda k: k['norm'], reverse=True))
# #norm_values_array_l2 = np.array([v['norm'] for v in norm_values_l2])
# #norm_values_array_l2 = norm_values_array_l2/np.sqrt((np.sum(np.power(norm_values_array_l2, 2))))
# #plt.plot(xiaoxiao_v, '-', label='xiaoxiao_v')
#
#
#
# #plt.savefig('results_graph1.png')
| [
"matplotlib.pyplot.show",
"json.loads",
"numpy.array",
"matplotlib.pyplot.subplots",
"os.listdir",
"matplotlib.pyplot.errorbar"
] | [((1178, 1206), 'os.listdir', 'os.listdir', (['prefix_pathfiles'], {}), '(prefix_pathfiles)\n', (1188, 1206), False, 'import os\n'), ((8979, 8993), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8991, 8993), True, 'import matplotlib.pyplot as plt\n'), ((9014, 9037), 'numpy.array', 'np.array', (['interval_list'], {}), '(interval_list)\n', (9022, 9037), True, 'import numpy as np\n'), ((9261, 9323), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x_labels', 'interval_list[:, 1]'], {'yerr': 'err', 'fmt': '"""o"""'}), "(x_labels, interval_list[:, 1], yerr=err, fmt='o')\n", (9273, 9323), True, 'import matplotlib.pyplot as plt\n'), ((9327, 9337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9335, 9337), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1111), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1105, 1111), False, 'import json\n'), ((2055, 2071), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2065, 2071), False, 'import json\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.engine import training_v2_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class CallbackFallbackTest(test.TestCase):
def setUp(self):
super(CallbackFallbackTest, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.fallback_ckeckpoint_cb = cbks.ModelCheckpoint(
self.get_temp_dir(), save_freq=10)
self.normal_checkpoint_cb = cbks.ModelCheckpoint(
self.get_temp_dir(), save_freq='epoch')
self.fallback_tensorboard_cb = cbks.TensorBoard(update_freq=10)
self.normal_tensorboard_cb = cbks.TensorBoard(update_freq='batch')
self.unaffected_cb = cbks.CSVLogger(self.get_temp_dir())
def test_not_fallback_based_on_input(self):
callback_list = [self.fallback_ckeckpoint_cb]
test_cases = [
[(self.numpy_input, self.numpy_target), False],
[[self.tensor_input, self.tensor_target], False],
[self.sequence_input, False],
[self.dataset_input, True],
[self.generator_input, True],
]
for case in test_cases:
inputs, expected_result = case
self.assertEqual(training_v2_utils.should_fallback_to_v1_for_callback(
inputs, callback_list), expected_result)
def test_fallback_based_on_callbacks(self):
inputs = self.dataset_input
test_cases = [
[[self.fallback_ckeckpoint_cb], True],
[[self.normal_checkpoint_cb], False],
[[self.fallback_ckeckpoint_cb, self.normal_checkpoint_cb], True],
[[self.fallback_tensorboard_cb], True],
[[self.normal_tensorboard_cb], False],
[[self.unaffected_cb], False],
]
for case in test_cases:
callbacks, expected_result = case
self.assertEqual(training_v2_utils.should_fallback_to_v1_for_callback(
inputs, callbacks), expected_result)
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"numpy.zeros",
"numpy.ones",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.keras.callbacks.TensorBoard",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices",
"tensorflow.python.ke... | [((3876, 3887), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (3885, 3887), False, 'from tensorflow.python.platform import test\n'), ((1716, 1734), 'numpy.zeros', 'np.zeros', (['(50, 10)'], {}), '((50, 10))\n', (1724, 1734), True, 'import numpy as np\n'), ((1759, 1770), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (1766, 1770), True, 'import numpy as np\n'), ((1795, 1836), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2.0)'], {'shape': '(50, 10)'}), '(2.0, shape=(50, 10))\n', (1815, 1836), False, 'from tensorflow.python.framework import constant_op\n'), ((1862, 1883), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(50,)'], {}), '((50,))\n', (1876, 1883), False, 'from tensorflow.python.ops import array_ops\n'), ((2540, 2572), 'tensorflow.python.keras.callbacks.TensorBoard', 'cbks.TensorBoard', ([], {'update_freq': '(10)'}), '(update_freq=10)\n', (2556, 2572), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((2606, 2643), 'tensorflow.python.keras.callbacks.TensorBoard', 'cbks.TensorBoard', ([], {'update_freq': '"""batch"""'}), "(update_freq='batch')\n", (2622, 2643), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((1432, 1479), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.feature_shape)'], {}), '((self.batch_size, self.feature_shape))\n', (1440, 1479), True, 'import numpy as np\n'), ((1493, 1520), 'numpy.ones', 'np.ones', (['(self.batch_size,)'], {}), '((self.batch_size,))\n', (1500, 1520), True, 'import numpy as np\n'), ((3143, 3218), 'tensorflow.python.keras.engine.training_v2_utils.should_fallback_to_v1_for_callback', 'training_v2_utils.should_fallback_to_v1_for_callback', (['inputs', 'callback_list'], {}), '(inputs, callback_list)\n', (3195, 3218), False, 'from tensorflow.python.keras.engine import training_v2_utils\n'), ((3745, 3816), 'tensorflow.python.keras.engine.training_v2_utils.should_fallback_to_v1_for_callback', 'training_v2_utils.should_fallback_to_v1_for_callback', (['inputs', 'callbacks'], {}), '(inputs, callbacks)\n', (3797, 3816), False, 'from tensorflow.python.keras.engine import training_v2_utils\n'), ((2081, 2112), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 10)'], {}), '((self.batch_size, 10))\n', (2089, 2112), True, 'import numpy as np\n'), ((2114, 2138), 'numpy.ones', 'np.ones', (['self.batch_size'], {}), '(self.batch_size)\n', (2121, 2138), True, 'import numpy as np\n'), ((1909, 1988), 'tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices', 'dataset_ops.DatasetV2.from_tensor_slices', (['(self.numpy_input, self.numpy_target)'], {}), '((self.numpy_input, self.numpy_target))\n', (1949, 1988), False, 'from tensorflow.python.data.ops import dataset_ops\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Fixtures for QMT unit tests."""
import os
import pytest
import qmt
import sys
@pytest.fixture(scope="session")
def fix_exampleDir():
"""Return the example directory path."""
rootPath = os.path.join(os.path.dirname(qmt.__file__), os.pardir)
return os.path.abspath(os.path.join(rootPath, "examples"))
@pytest.fixture(scope="function")
def fix_FCDoc():
"""Set up and tear down a FreeCAD document."""
import FreeCAD
doc = FreeCAD.newDocument("testDoc")
yield doc
FreeCAD.closeDocument("testDoc")
################################################################################
# Sketches
@pytest.fixture(scope="function")
def fix_two_cycle_sketch():
"""Return two-cycle sketch function object."""
def aux_two_cycle_sketch(
a=(20, 20, 0),
b=(-30, 20, 0),
c=(-30, -10, 0),
d=(20, -10, 0),
e=(50, 50, 0),
f=(60, 50, 0),
g=(55, 60, 0),
):
"""Helper function to drop a simple multi-cycle sketch.
The segments are ordered into one rectangle and one triangle.
"""
# Note: the z-component is zero, as sketches are plane objects.
# Adjust orientation with Sketch.Placement(Normal, Rotation)
import Part
import FreeCAD
vec = FreeCAD.Vector
lseg = Part.LineSegment
doc = FreeCAD.ActiveDocument
sketch = doc.addObject("Sketcher::SketchObject", "Sketch")
sketch.addGeometry(lseg(vec(*a), vec(*b)), False)
sketch.addGeometry(lseg(vec(*b), vec(*c)), False)
sketch.addGeometry(lseg(vec(*c), vec(*d)), False)
sketch.addGeometry(lseg(vec(*d), vec(*a)), False)
sketch.addGeometry(lseg(vec(*e), vec(*f)), False)
sketch.addGeometry(lseg(vec(*f), vec(*g)), False)
sketch.addGeometry(lseg(vec(*g), vec(*e)), False)
doc.recompute()
return sketch
return aux_two_cycle_sketch
@pytest.fixture(scope="function")
def fix_rectangle_sketch():
"""Return unit square sketch function object."""
def aux_rectangle_sketch(x_length=1, y_length=1, x_start=0, y_start=0):
"""Helper function to drop a simple unit square sketch.
The segments are carefully ordered.
"""
import FreeCAD
import Part
vec = FreeCAD.Vector
lseg = Part.LineSegment
a = (x_start, y_start, 0)
b = (x_length, y_start, 0)
c = (x_length, y_length, 0)
d = (x_start, y_length, 0)
doc = FreeCAD.ActiveDocument
sketch = doc.addObject("Sketcher::SketchObject", "Sketch")
sketch.addGeometry(lseg(vec(*a), vec(*b)), False)
sketch.addGeometry(lseg(vec(*b), vec(*c)), False)
sketch.addGeometry(lseg(vec(*c), vec(*d)), False)
sketch.addGeometry(lseg(vec(*d), vec(*a)), False)
doc.recompute()
return sketch
return aux_rectangle_sketch
@pytest.fixture(scope="function")
def fix_hexagon_sketch():
"""Return hexagon sketch function object."""
def aux_hexagon_sketch(r=1):
"""Helper function to drop a hexagonal sketch."""
import FreeCAD
import ProfileLib.RegularPolygon
vec = FreeCAD.Vector
doc = FreeCAD.ActiveDocument
sketch = doc.addObject("Sketcher::SketchObject", "HexSketch")
ProfileLib.RegularPolygon.makeRegularPolygon(
"HexSketch", 6, vec(1, 1, 0), vec(1 + r, 1, 0), False
)
doc.recompute()
return sketch
return aux_hexagon_sketch
################################################################################
# Tasks environment
@pytest.fixture(scope="function")
def fix_task_env():
"""
Set up a testing environment for tasks.
"""
import numpy as np
def input_task_example(parts_dict):
"""Simple example task. This is the first task in the chain.
:param dict parts_dict: Dictionary specifying the input parts. It should be of the form
{"part":list_of_points}.
"""
for key_val in parts_dict:
print(str(key_val) + " " + str(parts_dict[key_val]))
return parts_dict
def gathered_task_example(input_data_list, num_points):
"""Takes the example task and does some work on it. This is a gathered task, which means
that all the previous work is gathered up and worked on together.
:param list input_data_list: List of dictionaries from several input tasks.
:param list num_points: List of ints specifying the number of grid points for a given
geometry.
"""
return_list = []
for i, geom in enumerate(input_data_list):
geometry_obj = input_data_list[i]
mesh = {}
for part in geometry_obj:
mesh[part] = np.linspace(0.0, 1.0, num_points[i])
return_list += [mesh]
return return_list
def post_processing_task_example(input_data, gathered_data, prefactor):
"""Takes input from the gathered task and does some more work in parallel.
:param dict input_data: An input geometry.
:param dict gathered_data: One of the meshes produced from gathered_task_example.
:param float prefactor: Prefactor used to scale the output
"""
result = 0.0
for part in input_data:
result += prefactor * np.sum(input_data[part]) * np.sum(gathered_data[part])
return result
return input_task_example, gathered_task_example, post_processing_task_example
| [
"FreeCAD.closeDocument",
"numpy.sum",
"os.path.dirname",
"pytest.fixture",
"FreeCAD.newDocument",
"numpy.linspace",
"os.path.join"
] | [((180, 211), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (194, 211), False, 'import pytest\n'), ((415, 447), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (429, 447), False, 'import pytest\n'), ((725, 757), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (739, 757), False, 'import pytest\n'), ((2035, 2067), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2049, 2067), False, 'import pytest\n'), ((3014, 3046), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3028, 3046), False, 'import pytest\n'), ((3728, 3760), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3742, 3760), False, 'import pytest\n'), ((546, 576), 'FreeCAD.newDocument', 'FreeCAD.newDocument', (['"""testDoc"""'], {}), "('testDoc')\n", (565, 576), False, 'import FreeCAD\n'), ((595, 627), 'FreeCAD.closeDocument', 'FreeCAD.closeDocument', (['"""testDoc"""'], {}), "('testDoc')\n", (616, 627), False, 'import FreeCAD\n'), ((307, 336), 'os.path.dirname', 'os.path.dirname', (['qmt.__file__'], {}), '(qmt.__file__)\n', (322, 336), False, 'import os\n'), ((376, 410), 'os.path.join', 'os.path.join', (['rootPath', '"""examples"""'], {}), "(rootPath, 'examples')\n", (388, 410), False, 'import os\n'), ((4894, 4930), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'num_points[i]'], {}), '(0.0, 1.0, num_points[i])\n', (4905, 4930), True, 'import numpy as np\n'), ((5487, 5514), 'numpy.sum', 'np.sum', (['gathered_data[part]'], {}), '(gathered_data[part])\n', (5493, 5514), True, 'import numpy as np\n'), ((5460, 5484), 'numpy.sum', 'np.sum', (['input_data[part]'], {}), '(input_data[part])\n', (5466, 5484), True, 'import numpy as np\n')] |
'''
MIT License
Copyright (c) 2017 <NAME>
'''
import numpy as np
def get_lookup_tables(text):
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
return int2char, char2int
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
'''
batch_size = n_seqs * n_steps
n_batches = len(arr)//batch_size
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+n_steps]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
| [
"numpy.zeros_like",
"numpy.multiply",
"numpy.arange"
] | [((790, 806), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (803, 806), True, 'import numpy as np\n'), ((1086, 1109), 'numpy.multiply', 'np.multiply', (['*arr.shape'], {}), '(*arr.shape)\n', (1097, 1109), True, 'import numpy as np\n'), ((1203, 1230), 'numpy.arange', 'np.arange', (['one_hot.shape[0]'], {}), '(one_hot.shape[0])\n', (1212, 1230), True, 'import numpy as np\n')] |
#!python3
'''This module provides all of the interaction with scikit-learn and
performs the logistic regressions'''
import numpy
from sklearn import linear_model
from sklearn.metrics import log_loss
def create_school_array(base_table):
'''Returns a numpy array that can be fed to the regression tool where
base_table is a Table with the reduced data-set but all columns'''
# First order of business is get a reduced table:
reduced_table = list(base_table.get_columns(['GPA', 'ACT', 'Y']))
return numpy.array(reduced_table)
def create_standard_array(base_table,ACTcase='ACT25'):
'''Returns a numpy array that can be fed to the regression tool where
base_table is a Table with the reduced data-set but all columns'''
# First order of business is get a reduced table:
reduced_table = list(base_table.get_columns(['GPA', 'ACT', 'Y', ACTcase]))
diff_table = [[x[0], x[1]-x[3], x[2]] for x in
reduced_table if isinstance(x[3],float)]
return numpy.array(diff_table)
def run_lregression(data):
'''Returns the logistic regression results for the passed numpy array
where the first columns are the independent variables and the final
column is the outcome (Y)'''
lr = linear_model.LogisticRegression(C=10000000000, solver='newton-cg')
X = data[:,:-1]
Y = data[:,-1]
lr.fit(X, Y)
GPAcoef = lr.coef_[0][0]
ACTcoef = lr.coef_[0][1]
intercept = lr.intercept_[0]
score = lr.score(X,Y)
loss = log_loss(Y, lr.predict_proba(X))
# now create some sensitivity stats
# first find the average gpa of points near 50/50
preds = lr.predict_proba(X)
gpa_yes = []
for i in range(len(preds)):
if (preds[i][0] > 0.35) and (preds[i][0] < 0.65):
gpa_yes.append(X[i,0])
# then calculate the ACT that corresponds to this average
avg_yes_gpa = numpy.mean(gpa_yes)
avg_act_yes = (-intercept - avg_yes_gpa*GPAcoef)/ACTcoef
# next, build a sensitivity matrix and check the predictions
X_check = numpy.array([[avg_yes_gpa, avg_act_yes],
[avg_yes_gpa+0.05, avg_act_yes+.5],
[avg_yes_gpa+0.1, avg_act_yes+1]])
pred_check = lr.predict_proba(X_check)
return [Y.sum(), GPAcoef, ACTcoef, intercept, score, loss,
avg_yes_gpa, avg_act_yes, pred_check[0][1],pred_check[1][1],
pred_check[2][1]]
| [
"sklearn.linear_model.LogisticRegression",
"numpy.mean",
"numpy.array"
] | [((529, 555), 'numpy.array', 'numpy.array', (['reduced_table'], {}), '(reduced_table)\n', (540, 555), False, 'import numpy\n'), ((1021, 1044), 'numpy.array', 'numpy.array', (['diff_table'], {}), '(diff_table)\n', (1032, 1044), False, 'import numpy\n'), ((1269, 1335), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(10000000000)', 'solver': '"""newton-cg"""'}), "(C=10000000000, solver='newton-cg')\n", (1300, 1335), False, 'from sklearn import linear_model\n'), ((1920, 1939), 'numpy.mean', 'numpy.mean', (['gpa_yes'], {}), '(gpa_yes)\n', (1930, 1939), False, 'import numpy\n'), ((2085, 2209), 'numpy.array', 'numpy.array', (['[[avg_yes_gpa, avg_act_yes], [avg_yes_gpa + 0.05, avg_act_yes + 0.5], [\n avg_yes_gpa + 0.1, avg_act_yes + 1]]'], {}), '([[avg_yes_gpa, avg_act_yes], [avg_yes_gpa + 0.05, avg_act_yes +\n 0.5], [avg_yes_gpa + 0.1, avg_act_yes + 1]])\n', (2096, 2209), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Jul 1, 2019
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
"""
from math import pi
from typing import Union, Optional, Iterable
import numpy as np
from scipy import sparse
from sknetwork.utils import Bunch
from sknetwork.utils.format import directed2undirected
from sknetwork.utils.parse import edgelist2adjacency
def block_model(sizes: Iterable, p_in: Union[float, list, np.ndarray] = .2, p_out: float = .05,
random_state: Optional[int] = None, metadata: bool = False) \
-> Union[sparse.csr_matrix, Bunch]:
"""Stochastic block model.
Parameters
----------
sizes :
Block sizes.
p_in :
Probability of connection within blocks.
p_out :
Probability of connection across blocks.
random_state :
Seed of the random generator (optional).
metadata :
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (labels).
Example
-------
>>> from sknetwork.data import block_model
>>> sizes = np.array([4, 5])
>>> adjacency = block_model(sizes)
>>> adjacency.shape
(9, 9)
References
----------
<NAME>., <NAME>., <NAME>., <NAME>. (2007).
`Mixed membership stochastic blockmodels. <https://arxiv.org/pdf/0705.4485.pdf>`_
Journal of Machine Learning Research.
"""
np.random.seed(random_state)
sizes = np.array(sizes)
if type(p_in) == float:
p_in = p_in * np.ones_like(sizes)
else:
p_in = np.array(p_in)
# each edge is considered twice
p_in = p_in / 2
matrix = []
for i, a in enumerate(sizes):
row = []
for j, b in enumerate(sizes):
if j < i:
row.append(None)
elif j > i:
row.append(sparse.random(a, b, p_out, dtype=bool))
else:
row.append(sparse.random(a, a, p_in[i], dtype=bool))
matrix.append(row)
adjacency = sparse.bmat(matrix)
adjacency.setdiag(0)
adjacency = directed2undirected(adjacency.tocsr(), weighted=False)
if metadata:
graph = Bunch()
graph.adjacency = adjacency
labels = np.repeat(np.arange(len(sizes)), sizes)
graph.labels = labels
return graph
else:
return adjacency
def erdos_renyi(n: int = 20, p: float = .3, random_state: Optional[int] = None) -> sparse.csr_matrix:
"""Erdos-Renyi graph.
Parameters
----------
n :
Number of nodes.
p :
Probability of connection between nodes.
random_state :
Seed of the random generator (optional).
Returns
-------
adjacency : sparse.csr_matrix
Adjacency matrix.
Example
-------
>>> from sknetwork.data import erdos_renyi
>>> adjacency = erdos_renyi(7)
>>> adjacency.shape
(7, 7)
References
----------
<NAME>., <NAME>. (1959). `On Random Graphs. <https://www.renyi.hu/~p_erdos/1959-11.pdf>`_
Publicationes Mathematicae.
"""
return block_model(np.array([n]), p, 0., random_state, metadata=False)
def linear_digraph(n: int = 3, metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Linear graph (directed).
Parameters
----------
n : int
Number of nodes.
metadata : bool
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import linear_digraph
>>> adjacency = linear_digraph(5)
>>> adjacency.shape
(5, 5)
"""
row = np.arange(n - 1)
col = np.arange(1, n)
adjacency = sparse.csr_matrix((np.ones(len(row), dtype=int), (row, col)), shape=(n, n))
if metadata:
x = np.arange(n)
y = np.zeros(n)
graph = Bunch()
graph.adjacency = adjacency
graph.position = np.array((x, y)).T
return graph
else:
return adjacency
def linear_graph(n: int = 3, metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Linear graph (undirected).
Parameters
----------
n : int
Number of nodes.
metadata : bool
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import linear_graph
>>> adjacency = linear_graph(5)
>>> adjacency.shape
(5, 5)
"""
graph = linear_digraph(n, True)
adjacency = graph.adjacency
adjacency = adjacency + adjacency.T
if metadata:
graph.adjacency = adjacency
return graph
else:
return adjacency
def cyclic_position(n: int) -> np.ndarray:
"""Position nodes on a circle of unit radius.
Parameters
----------
n : int
Number of nodes.
Returns
-------
position : np.ndarray
Position of nodes.
"""
t = 2 * pi * np.arange(n).astype(float) / n
x = np.cos(t)
y = np.sin(t)
position = np.array((x, y)).T
return position
def cyclic_digraph(n: int = 3, metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Cyclic graph (directed).
Parameters
----------
n : int
Number of nodes.
metadata : bool
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import cyclic_digraph
>>> adjacency = cyclic_digraph(5)
>>> adjacency.shape
(5, 5)
"""
row = np.arange(n)
col = np.array(list(np.arange(1, n)) + [0])
adjacency = sparse.csr_matrix((np.ones(len(row), dtype=int), (row, col)), shape=(n, n))
if metadata:
graph = Bunch()
graph.adjacency = adjacency
graph.position = cyclic_position(n)
return graph
else:
return adjacency
def cyclic_graph(n: int = 3, metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Cyclic graph (undirected).
Parameters
----------
n : int
Number of nodes.
metadata : bool
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import cyclic_graph
>>> adjacency = cyclic_graph(5)
>>> adjacency.shape
(5, 5)
"""
graph = cyclic_digraph(n, True)
graph.adjacency = directed2undirected(graph.adjacency)
if metadata:
return graph
else:
return graph.adjacency
def grid(n1: int = 10, n2: int = 10, metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Grid (undirected).
Parameters
----------
n1, n2 : int
Grid dimension.
metadata : bool
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import grid
>>> adjacency = grid(10, 5)
>>> adjacency.shape
(50, 50)
"""
nodes = [(i1, i2) for i1 in range(n1) for i2 in range(n2)]
edges = [((i1, i2), (i1 + 1, i2)) for i1 in range(n1 - 1) for i2 in range(n2)]
edges += [((i1, i2), (i1, i2 + 1)) for i1 in range(n1) for i2 in range(n2 - 1)]
node_id = {u: i for i, u in enumerate(nodes)}
edges = list(map(lambda edge: (node_id[edge[0]], node_id[edge[1]]), edges))
adjacency = edgelist2adjacency(edges, undirected=True)
if metadata:
graph = Bunch()
graph.adjacency = adjacency
graph.position = np.array(nodes)
return graph
else:
return adjacency
def albert_barabasi(n: int = 100, degree: int = 3, undirected: bool = True, seed: Optional[int] = None) \
-> sparse.csr_matrix:
"""Albert-Barabasi model.
Parameters
----------
n : int
Number of nodes.
degree : int
Degree of incoming nodes (less than **n**).
undirected : bool
If ``True``, return an undirected graph.
seed :
Seed of the random generator (optional).
Returns
-------
adjacency : sparse.csr_matrix
Adjacency matrix.
Example
-------
>>> from sknetwork.data import albert_barabasi
>>> adjacency = albert_barabasi(30, 3)
>>> adjacency.shape
(30, 30)
References
----------
<NAME>., <NAME>. (2002). `Statistical mechanics of complex networks
<https://journals.aps.org/rmp/abstract/10.1103/RevModPhys.74.47>`_
Reviews of Modern Physics.
"""
np.random.seed(seed)
degrees = np.zeros(n, int)
degrees[:degree] = degree - 1
edges = [(i, j) for i in range(degree) for j in range(i)]
for i in range(degree, n):
neighbors = np.random.choice(i, p=degrees[:i]/degrees.sum(), size=degree, replace=False)
degrees[neighbors] += 1
degrees[i] = degree
edges += [(i, j) for j in neighbors]
return edgelist2adjacency(edges, undirected)
def watts_strogatz(n: int = 100, degree: int = 6, prob: float = 0.05, seed: Optional[int] = None,
metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:
"""Watts-Strogatz model.
Parameters
----------
n :
Number of nodes.
degree :
Initial degree of nodes.
prob :
Probability of edge modification.
seed :
Seed of the random generator (optional).
metadata :
If ``True``, return a `Bunch` object with metadata.
Returns
-------
adjacency or graph : Union[sparse.csr_matrix, Bunch]
Adjacency matrix or graph with metadata (positions).
Example
-------
>>> from sknetwork.data import watts_strogatz
>>> adjacency = watts_strogatz(30, 4, 0.02)
>>> adjacency.shape
(30, 30)
References
----------
<NAME>., <NAME>. (1998). Collective dynamics of small-world networks, Nature.
"""
np.random.seed(seed)
edges = np.array([(i, (i + j + 1) % n) for i in range(n) for j in range(degree // 2)])
row, col = edges[:, 0], edges[:, 1]
adjacency = sparse.coo_matrix((np.ones_like(row, int), (row, col)), shape=(n, n))
adjacency = sparse.lil_matrix(adjacency + adjacency.T)
nodes = np.arange(n)
for i in range(n):
neighbors = adjacency.rows[i]
candidates = list(set(nodes) - set(neighbors) - {i})
for j in neighbors:
if np.random.random() < prob:
node = np.random.choice(candidates)
adjacency[i, node] = 1
adjacency[node, i] = 1
adjacency[i, j] = 0
adjacency[j, i] = 0
adjacency = sparse.csr_matrix(adjacency, shape=adjacency.shape)
if metadata:
graph = Bunch()
graph.adjacency = adjacency
graph.position = cyclic_position(n)
return graph
else:
return adjacency
| [
"sknetwork.utils.parse.edgelist2adjacency",
"numpy.random.choice",
"numpy.random.seed",
"numpy.ones_like",
"scipy.sparse.random",
"numpy.zeros",
"scipy.sparse.bmat",
"scipy.sparse.lil_matrix",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"scipy.sparse.csr_matrix",
"sknetwork.ut... | [((1535, 1563), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (1549, 1563), True, 'import numpy as np\n'), ((1576, 1591), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (1584, 1591), True, 'import numpy as np\n'), ((2142, 2161), 'scipy.sparse.bmat', 'sparse.bmat', (['matrix'], {}), '(matrix)\n', (2153, 2161), False, 'from scipy import sparse\n'), ((3848, 3864), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (3857, 3864), True, 'import numpy as np\n'), ((3875, 3890), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (3884, 3890), True, 'import numpy as np\n'), ((5300, 5309), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (5306, 5309), True, 'import numpy as np\n'), ((5318, 5327), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (5324, 5327), True, 'import numpy as np\n'), ((5964, 5976), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5973, 5976), True, 'import numpy as np\n'), ((6921, 6957), 'sknetwork.utils.format.directed2undirected', 'directed2undirected', (['graph.adjacency'], {}), '(graph.adjacency)\n', (6940, 6957), False, 'from sknetwork.utils.format import directed2undirected\n'), ((7975, 8017), 'sknetwork.utils.parse.edgelist2adjacency', 'edgelist2adjacency', (['edges'], {'undirected': '(True)'}), '(edges, undirected=True)\n', (7993, 8017), False, 'from sknetwork.utils.parse import edgelist2adjacency\n'), ((9086, 9106), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9100, 9106), True, 'import numpy as np\n'), ((9121, 9137), 'numpy.zeros', 'np.zeros', (['n', 'int'], {}), '(n, int)\n', (9129, 9137), True, 'import numpy as np\n'), ((9478, 9515), 'sknetwork.utils.parse.edgelist2adjacency', 'edgelist2adjacency', (['edges', 'undirected'], {}), '(edges, undirected)\n', (9496, 9515), False, 'from sknetwork.utils.parse import edgelist2adjacency\n'), ((10449, 10469), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10463, 10469), True, 'import numpy as np\n'), ((10703, 10745), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(adjacency + adjacency.T)'], {}), '(adjacency + adjacency.T)\n', (10720, 10745), False, 'from scipy import sparse\n'), ((10758, 10770), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (10767, 10770), True, 'import numpy as np\n'), ((11181, 11232), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['adjacency'], {'shape': 'adjacency.shape'}), '(adjacency, shape=adjacency.shape)\n', (11198, 11232), False, 'from scipy import sparse\n'), ((1688, 1702), 'numpy.array', 'np.array', (['p_in'], {}), '(p_in)\n', (1696, 1702), True, 'import numpy as np\n'), ((2292, 2299), 'sknetwork.utils.Bunch', 'Bunch', ([], {}), '()\n', (2297, 2299), False, 'from sknetwork.utils import Bunch\n'), ((3214, 3227), 'numpy.array', 'np.array', (['[n]'], {}), '([n])\n', (3222, 3227), True, 'import numpy as np\n'), ((4013, 4025), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4022, 4025), True, 'import numpy as np\n'), ((4038, 4049), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4046, 4049), True, 'import numpy as np\n'), ((4066, 4073), 'sknetwork.utils.Bunch', 'Bunch', ([], {}), '()\n', (4071, 4073), False, 'from sknetwork.utils import Bunch\n'), ((5343, 5359), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (5351, 5359), True, 'import numpy as np\n'), ((6151, 6158), 'sknetwork.utils.Bunch', 'Bunch', ([], {}), '()\n', (6156, 6158), False, 'from sknetwork.utils import Bunch\n'), ((8051, 8058), 'sknetwork.utils.Bunch', 'Bunch', ([], {}), '()\n', (8056, 8058), False, 'from sknetwork.utils import Bunch\n'), ((8120, 8135), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (8128, 8135), True, 'import numpy as np\n'), ((11266, 11273), 'sknetwork.utils.Bunch', 'Bunch', ([], {}), '()\n', (11271, 11273), False, 'from sknetwork.utils import Bunch\n'), ((1643, 1662), 'numpy.ones_like', 'np.ones_like', (['sizes'], {}), '(sizes)\n', (1655, 1662), True, 'import numpy as np\n'), ((4135, 4151), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (4143, 4151), True, 'import numpy as np\n'), ((10636, 10658), 'numpy.ones_like', 'np.ones_like', (['row', 'int'], {}), '(row, int)\n', (10648, 10658), True, 'import numpy as np\n'), ((6001, 6016), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (6010, 6016), True, 'import numpy as np\n'), ((10936, 10954), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10952, 10954), True, 'import numpy as np\n'), ((10986, 11014), 'numpy.random.choice', 'np.random.choice', (['candidates'], {}), '(candidates)\n', (11002, 11014), True, 'import numpy as np\n'), ((5261, 5273), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5270, 5273), True, 'import numpy as np\n'), ((1972, 2010), 'scipy.sparse.random', 'sparse.random', (['a', 'b', 'p_out'], {'dtype': 'bool'}), '(a, b, p_out, dtype=bool)\n', (1985, 2010), False, 'from scipy import sparse\n'), ((2057, 2097), 'scipy.sparse.random', 'sparse.random', (['a', 'a', 'p_in[i]'], {'dtype': 'bool'}), '(a, a, p_in[i], dtype=bool)\n', (2070, 2097), False, 'from scipy import sparse\n')] |
# -*- coding: utf-8 -*-
"""grab_worldbank retrieves global average temperatures from the Worldbank
https://data.worldbank.org/topic/climate-change
This python module contains a single function, grab_worldbank, that
retrieves the global average temperature estimates from the Worldbank dataset
(Celsius). This is the climate data that is used to evaluate global climate
change. Global averages are computed using yearly data from all countries on
earth between the years 1901 and 2012.
Usage:
To use, one will need to install the 'wbpy' Python packageself.
https://github.com/mattduck/wbpy
'wbpy' is the recommended directly by the WorldBank as a wrapper library
to access its databases. It supports and ensures clean, correct
data access, as well as legacy ISO country code formats
Install via any standard method, i.e.:
pip install wbpy
function usage is then simple:
>>> df = grab_worldbank()
Dependencies:
wbpy
numpy
pandas
Written by <NAME>
2018
"""
import wbpy
import pandas as pd
import sys
import numpy as np
MIN_YEAR = 1901 # constant defining minimum year value in WorldBank dataset
MAX_YEAR = 2012 # likewise maximum
def grab_worldbank(start_date=1901, end_date=2012):
"""Returns a dataframe of (Year, GlobalAverageTemperature) tuples with data
from the WorldBank database.
https://data.worldbank.org/topic/climate-change
Args:
start_date (int): Starting year for data retrieval; minimum 1901.
Defaults to 1901
end_date (int): End year for data retrieval; maximum 2012.
Defaults to 2012
Returns:
pandas dataframe: Dataframe pointing to the results from the worldbank
Columns are of type Date (yyyy-mm-dd string);
Tabsolute_C (float)
NOTE: January 1st chosen as a dummy month-date
for each year
Examples:
>>> df = grab_worldbank()
>>> print(df.head())
Date Tabsolute_C
0 1901-01-01 19.002034
1 1902-01-01 18.882094
2 1903-01-01 18.925365
3 1904-01-01 18.835930
4 1905-01-01 18.877793
>>> df = grab_worldbank(2011,2012)
>>> print(df.head())
Date Tabsolute_C
0 2011-01-01 19.002201
1 2012-01-01 19.026535
"""
if (start_date is not None and not isinstance(start_date, int) or
end_date is not None and not isinstance(end_date, int)):
raise ValueError("Error: Invalid argument type. Must be integer")
sys.exit(0)
if (start_date is not None and (start_date < MIN_YEAR or start_date > MAX_YEAR)):
raise ValueError("Error: Starting date cannot precede 1901")
sys.exit(0)
if (end_date is not None and (end_date > MAX_YEAR or end_date < MIN_YEAR)):
raise ValueError("Error: Ending date cannot exceed 2012")
sys.exit(0)
"""Dictionary to store countries who do NOT HAVE DATA"""
err_dict = {}
"""Instantiate API Interface using wbpy package"""
climate_api = wbpy.ClimateAPI()
"""Obtain ISO country codes for ALL countries in the world"""
iso_df = pd.read_csv('https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv',
delimiter=',')
codes_list = np.array(iso_df['ISO3166-1-Alpha-3']) # i.e. [DEU, GHA, GIB,...]
country_list = np.array(iso_df['official_name_en']) # i.e. [Germany, Ghana, Gibraltar, ...]
codes_list = list(filter(lambda v: v == v, codes_list))
country_list = list(filter(lambda v: v == v, country_list))
"""
Create a 'temporary' _dataset_ of (country, year, average annual
temperature) triples. To do this, we will need to use the
climate_api.get_instrumental() wrapper function,
and store the wrapper results into a dictionary called _dataset_
"""
code_country_pairs = [(country_list[k],codes_list[k]) for k in range(0,len(codes_list))]
dataset = {}
for k in range(0, len(code_country_pairs)):
code = code_country_pairs[k][1]
try:
dataset[code] = climate_api.get_instrumental(data_type='tas',
interval='year', locations=[code])
except Exception as e:
print("Warning: Data Does Not Exist for Country Code: {}".format(code))
# Add erroneous country to error dictionary
err_dict[code] = 1
continue
"""
Create a pandas dataframe from the dataset dictionary
by parsing the content of the calls to climate_api.get_instrumental(),
stored in dataset
"""
df = pd.DataFrame(columns=['Country', 'Date', 'Tabsolute_C'])
count = 0
# For all countries
for k in range(0, len(code_country_pairs)):
c_name, c_code = code_country_pairs[k][0], code_country_pairs[k][1]
# if is a valid country
if (c_code not in err_dict):
# convert the get_instrumental() content to dictionary and access
# it's 'subdictionary' using the appropriate country code
country_data = dataset[c_code].as_dict()[c_code]
# for all years that are in the valid time window
for year_key in country_data.keys():
if (int(year_key) >= start_date and int(year_key) <= end_date):
# Append to dataframe
df.loc[count] = [c_name,int(year_key), country_data[year_key]]
count += 1
"""
At this point, df points to a relation df -> R(Country, Year,
Temperature). Since we need to return global average temperature,
we compute the mean of all countries per given year, and store the
results in a new dataframe
"""
print(".......computing worldwide averages across all years...hang on!!!")
df_worldbank = df.groupby(df['Date']).mean()
df_worldbank = df_worldbank.reset_index()
# Convert DATE columns to yyyy-mm-dd format!
df_worldbank['Date'] = df['Date'].astype(str) + '-01-01'
return df_worldbank
if __name__ == '__main__':
df = grab_worldbank(2011, 2012)
print(df.head())
| [
"pandas.DataFrame",
"wbpy.ClimateAPI",
"pandas.read_csv",
"numpy.array",
"sys.exit"
] | [((3173, 3190), 'wbpy.ClimateAPI', 'wbpy.ClimateAPI', ([], {}), '()\n', (3188, 3190), False, 'import wbpy\n'), ((3271, 3397), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv"""'], {'delimiter': '""","""'}), "(\n 'https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv'\n , delimiter=',')\n", (3282, 3397), True, 'import pandas as pd\n'), ((3430, 3467), 'numpy.array', 'np.array', (["iso_df['ISO3166-1-Alpha-3']"], {}), "(iso_df['ISO3166-1-Alpha-3'])\n", (3438, 3467), True, 'import numpy as np\n'), ((3515, 3551), 'numpy.array', 'np.array', (["iso_df['official_name_en']"], {}), "(iso_df['official_name_en'])\n", (3523, 3551), True, 'import numpy as np\n'), ((4785, 4841), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Country', 'Date', 'Tabsolute_C']"}), "(columns=['Country', 'Date', 'Tabsolute_C'])\n", (4797, 4841), True, 'import pandas as pd\n'), ((2666, 2677), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2674, 2677), False, 'import sys\n'), ((2841, 2852), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2849, 2852), False, 'import sys\n'), ((3007, 3018), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3015, 3018), False, 'import sys\n')] |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.extractor import FrontExtractorOp
from mo.ops.op import Op
from mo.front.onnx.extractors.utils import onnx_attr
class PriorBoxFrontExtractor(FrontExtractorOp):
op = 'PriorBox'
enabled = True
@staticmethod
def extract(node):
variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: np.array(x, dtype=np.float32))
if len(variance) == 0:
variance = [0.1]
update_attrs = {
'aspect_ratio': onnx_attr(node, 'aspect_ratio', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'min_size': onnx_attr(node, 'min_size', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'max_size': onnx_attr(node, 'max_size', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'flip': onnx_attr(node, 'flip', 'i', default=0),
'clip': onnx_attr(node, 'clip', 'i', default=0),
'variance': list(variance),
'img_size': onnx_attr(node, 'img_size', 'i', default=0),
'img_h': onnx_attr(node, 'img_h', 'i', default=0),
'img_w': onnx_attr(node, 'img_w', 'i', default=0),
'step': onnx_attr(node, 'step', 'f', default=0.0),
'step_h': onnx_attr(node, 'step_h', 'f', default=0.0),
'step_w': onnx_attr(node, 'step_w', 'f', default=0.0),
'offset': onnx_attr(node, 'offset', 'f', default=0.0),
}
# update the attributes of the node
Op.get_op_class_by_name(__class__.op).update_node_stat(node, update_attrs)
return __class__.enabled
| [
"numpy.array",
"mo.ops.op.Op.get_op_class_by_name",
"mo.front.onnx.extractors.utils.onnx_attr"
] | [((1430, 1469), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""flip"""', '"""i"""'], {'default': '(0)'}), "(node, 'flip', 'i', default=0)\n", (1439, 1469), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1491, 1530), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""clip"""', '"""i"""'], {'default': '(0)'}), "(node, 'clip', 'i', default=0)\n", (1500, 1530), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1596, 1639), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""img_size"""', '"""i"""'], {'default': '(0)'}), "(node, 'img_size', 'i', default=0)\n", (1605, 1639), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1662, 1702), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""img_h"""', '"""i"""'], {'default': '(0)'}), "(node, 'img_h', 'i', default=0)\n", (1671, 1702), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1725, 1765), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""img_w"""', '"""i"""'], {'default': '(0)'}), "(node, 'img_w', 'i', default=0)\n", (1734, 1765), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1787, 1828), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""step"""', '"""f"""'], {'default': '(0.0)'}), "(node, 'step', 'f', default=0.0)\n", (1796, 1828), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1852, 1895), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""step_h"""', '"""f"""'], {'default': '(0.0)'}), "(node, 'step_h', 'f', default=0.0)\n", (1861, 1895), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1919, 1962), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""step_w"""', '"""f"""'], {'default': '(0.0)'}), "(node, 'step_w', 'f', default=0.0)\n", (1928, 1962), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((1986, 2029), 'mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""offset"""', '"""f"""'], {'default': '(0.0)'}), "(node, 'offset', 'f', default=0.0)\n", (1995, 2029), False, 'from mo.front.onnx.extractors.utils import onnx_attr\n'), ((2094, 2131), 'mo.ops.op.Op.get_op_class_by_name', 'Op.get_op_class_by_name', (['__class__.op'], {}), '(__class__.op)\n', (2117, 2131), False, 'from mo.ops.op import Op\n'), ((946, 975), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (954, 975), True, 'import numpy as np\n'), ((1152, 1181), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1160, 1181), True, 'import numpy as np\n'), ((1265, 1294), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1273, 1294), True, 'import numpy as np\n'), ((1378, 1407), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1386, 1407), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 15:02:07 2019
@author: wangyf
"""
import os
import sys
#os.chdir('..')
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
import json
import lattice_functions as lf
import pickle
import os
font = {'family' : 'normal', 'size' : 16}
matplotlib.rc('font', **font)
datasize_batch = [np.load('pi_iso_0.npy').shape[0],
np.load('pi_iso_1.npy').shape[0],
np.load('pi_iso_2.npy').shape[0],
np.load('pi_iso_3.npy').shape[0]]
datasize = np.cumsum(datasize_batch)
model_name = 'lasso'
model_batches = ['0', '01', '012', '0123']
model_names = [model_name + '_' + bi for bi in model_batches]
base_dir = os.path.join(os.getcwd(),'cv_results' )
error_per_atom = []
error_per_site = []
n_clusters = []
for mi in model_names:
[Gcv, J, intercept, RMSE_test_atom, RMSE_test_site] = pickle.load(open(os.path.join(base_dir, mi, mi + '.p'), 'rb'))
n_clusters.append(len(Gcv))
error_per_atom.append(RMSE_test_atom)
error_per_site.append(RMSE_test_site *1000) # in meV
#%%
'''
Plot LASSO Accuracy vs number of coefficients selected
'''
fig, ax1 = plt.subplots(figsize=(6,4))
ax1.plot(datasize, error_per_site, 'bo--')
ax1.set_xlabel('Database Size')
ax1.set_xlim([1000, 6500])
ax1.set_ylim([0, 2])
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('RMSE/site (meV)', color='b')
ax1.tick_params('y', colors='b')
ax1.legend(bbox_to_anchor = (1.05, 1),loc= 'upper left', frameon=False)
ax2 = ax1.twinx()
ax2.plot(datasize, n_clusters, 'ro--')
ax2.set_ylabel('# nonzero coefficients', color='r')
ax2.set_ylim([0, 100])
ax2.tick_params('y', colors='r')
#ax2.legend(bbox_to_anchor = (1.3, 1),loc= 'lower left', frameon=False)
fig.tight_layout()
plt.show()
#fig.savefig('elastic_net.png')
#%%
'''
Plot the Pd number distributio sampled in each batch of data
'''
from structure_constants import Ec as Ec_init
from structure_constants import config as config_init
n_all_batch = len(config_init)
NPd_list_batch = []
for batch_i in range(n_all_batch):
# name of the json file
json_name = 'ES_iso_' + str(batch_i) + '.json'
# name of the pi file
pi_name = 'pi_iso_' + str(batch_i) + '.npy'
with open(json_name) as f:
ES_data = json.load(f)
Ec_batch_i = ES_data['E_iso']
config_batch_i = ES_data['config_iso']
# the number of Pd atoms in each structure
NPd_list_batch.append(lf.get_NPd_list(config_batch_i))
fig = plt.figure(figsize=(6, 4))
n_c, bins, patches = plt.hist(NPd_list_batch[3], 10, facecolor='g', alpha=0.5, width = 0.9)
plt.ylim([0,500])
plt.xlabel('Number of Pd atoms')
plt.ylabel('Configuration Counts')
plt.xticks([0,5,10,15,20])
#fig.savefig('configuration_distribution.png')
| [
"matplotlib.rc",
"json.load",
"matplotlib.pyplot.show",
"numpy.load",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylim",
"os.getcwd",
"lattice_functions.get_NPd_list",
"os.path.join",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matp... | [((328, 357), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (341, 357), False, 'import matplotlib\n'), ((585, 610), 'numpy.cumsum', 'np.cumsum', (['datasize_batch'], {}), '(datasize_batch)\n', (594, 610), True, 'import numpy as np\n'), ((1204, 1232), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (1216, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1840), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1838, 1840), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2569, 2585), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2675), 'matplotlib.pyplot.hist', 'plt.hist', (['NPd_list_batch[3]', '(10)'], {'facecolor': '"""g"""', 'alpha': '(0.5)', 'width': '(0.9)'}), "(NPd_list_batch[3], 10, facecolor='g', alpha=0.5, width=0.9)\n", (2615, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2697), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 500]'], {}), '([0, 500])\n', (2687, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Pd atoms"""'], {}), "('Number of Pd atoms')\n", (2707, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Configuration Counts"""'], {}), "('Configuration Counts')\n", (2742, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2799), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 5, 10, 15, 20]'], {}), '([0, 5, 10, 15, 20])\n', (2779, 2799), True, 'import matplotlib.pyplot as plt\n'), ((762, 773), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (771, 773), False, 'import os\n'), ((2343, 2355), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2352, 2355), False, 'import json\n'), ((2515, 2546), 'lattice_functions.get_NPd_list', 'lf.get_NPd_list', (['config_batch_i'], {}), '(config_batch_i)\n', (2530, 2546), True, 'import lattice_functions as lf\n'), ((378, 401), 'numpy.load', 'np.load', (['"""pi_iso_0.npy"""'], {}), "('pi_iso_0.npy')\n", (385, 401), True, 'import numpy as np\n'), ((432, 455), 'numpy.load', 'np.load', (['"""pi_iso_1.npy"""'], {}), "('pi_iso_1.npy')\n", (439, 455), True, 'import numpy as np\n'), ((485, 508), 'numpy.load', 'np.load', (['"""pi_iso_2.npy"""'], {}), "('pi_iso_2.npy')\n", (492, 508), True, 'import numpy as np\n'), ((539, 562), 'numpy.load', 'np.load', (['"""pi_iso_3.npy"""'], {}), "('pi_iso_3.npy')\n", (546, 562), True, 'import numpy as np\n'), ((945, 982), 'os.path.join', 'os.path.join', (['base_dir', 'mi', "(mi + '.p')"], {}), "(base_dir, mi, mi + '.p')\n", (957, 982), False, 'import os\n')] |
import bpy, os, sys, re, platform, subprocess
import numpy as np
class TLM_OIDN_Denoise:
image_array = []
image_output_destination = ""
denoised_array = []
def __init__(self, oidnProperties, img_array, dirpath):
self.oidnProperties = oidnProperties
self.image_array = img_array
self.image_output_destination = dirpath
self.check_binary()
def check_binary(self):
oidnPath = self.oidnProperties.tlm_oidn_path
if oidnPath != "":
file = os.path.basename(os.path.realpath(oidnPath))
filename, file_extension = os.path.splitext(file)
if platform.system() == 'Windows':
if(file_extension == ".exe"):
pass
else:
self.oidnProperties.tlm_oidn_path = os.path.join(self.oidnProperties.tlm_oidn_path,"oidnDenoise.exe")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Please provide OIDN path")
def denoise(self):
for image in self.image_array:
if image not in self.denoised_array:
image_path = os.path.join(self.image_output_destination, image)
#Save to pfm
loaded_image = bpy.data.images.load(image_path, check_existing=False)
width = loaded_image.size[0]
height = loaded_image.size[1]
image_output_array = np.zeros([width, height, 3], dtype="float32")
image_output_array = np.array(loaded_image.pixels)
image_output_array = image_output_array.reshape(height, width, 4)
image_output_array = np.float32(image_output_array[:,:,:3])
image_output_denoise_destination = image_path[:-4] + ".pfm"
image_output_denoise_result_destination = image_path[:-4] + "_denoised.pfm"
with open(image_output_denoise_destination, "wb") as fileWritePFM:
self.save_pfm(fileWritePFM, image_output_array)
#Denoise
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Loaded image: " + str(loaded_image))
verbose = bpy.context.scene.TLM_SceneProperties.tlm_verbose
affinity = self.oidnProperties.tlm_oidn_affinity
if verbose:
print("Denoiser search: " + bpy.path.abspath(self.oidnProperties.tlm_oidn_path))
v = "3"
else:
v = "0"
if affinity:
a = "1"
else:
a = "0"
threads = str(self.oidnProperties.tlm_oidn_threads)
maxmem = str(self.oidnProperties.tlm_oidn_maxmem)
if platform.system() == 'Windows':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath, '-f', 'RTLightmap', '-hdr', image_output_denoise_destination, '-o', image_output_denoise_result_destination, '-verbose', v, '-threads', threads, '-affinity', a, '-maxmem', maxmem]
elif platform.system() == 'Darwin':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
else:
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
oidnPath = oidnPath.replace(' ', '\\ ')
image_output_denoise_destination = image_output_denoise_destination.replace(' ', '\\ ')
image_output_denoise_result_destination = image_output_denoise_result_destination.replace(' ', '\\ ')
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
if not verbose:
denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)
else:
denoisePipe = subprocess.Popen(pipePath, shell=True)
denoisePipe.communicate()[0]
if platform.system() != 'Windows':
image_output_denoise_result_destination = image_output_denoise_result_destination.replace('\\', '')
with open(image_output_denoise_result_destination, "rb") as f:
denoise_data, scale = self.load_pfm(f)
ndata = np.array(denoise_data)
ndata2 = np.dstack((ndata, np.ones((width,height))))
img_array = ndata2.ravel()
loaded_image.pixels = img_array
loaded_image.filepath_raw = image_output_denoise_result_destination = image_path[:-10] + "_denoised.hdr"
loaded_image.file_format = "HDR"
loaded_image.save()
self.denoised_array.append(image)
print(image_path)
def clean(self):
self.denoised_array.clear()
self.image_array.clear()
for file in self.image_output_destination:
if file.endswith("_baked.hdr"):
baked_image_array.append(file)
#self.image_output_destination
#Clean temporary files here..
#...pfm
#...denoised.hdr
def load_pfm(self, file, as_flat_list=False):
#start = time()
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("utf-8").rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
if as_flat_list:
result = data
else:
result = np.reshape(data, shape)
#print("PFM import took %.3f s" % (time() - start))
return result, scale
def save_pfm(self, file, image, scale=1):
#start = time()
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32 (got %s)" % image.dtype.name)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write(b"PF\n" if color else b"Pf\n")
file.write(b"%d %d\n" % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(b"%f\n" % scale)
image.tofile(file)
#print("PFM export took %.3f s" % (time() - start))
| [
"subprocess.Popen",
"bpy.path.abspath",
"numpy.fromfile",
"os.path.realpath",
"numpy.float32",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"os.path.splitext",
"numpy.reshape",
"platform.system",
"bpy.data.images.load",
"os.path.join"
] | [((6400, 6431), 'numpy.fromfile', 'np.fromfile', (['file', "(endian + 'f')"], {}), "(file, endian + 'f')\n", (6411, 6431), True, 'import numpy as np\n'), ((610, 632), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (626, 632), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((6583, 6606), 'numpy.reshape', 'np.reshape', (['data', 'shape'], {}), '(data, shape)\n', (6593, 6606), True, 'import numpy as np\n'), ((543, 569), 'os.path.realpath', 'os.path.realpath', (['oidnPath'], {}), '(oidnPath)\n', (559, 569), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((674, 691), 'platform.system', 'platform.system', ([], {}), '()\n', (689, 691), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((1265, 1315), 'os.path.join', 'os.path.join', (['self.image_output_destination', 'image'], {}), '(self.image_output_destination, image)\n', (1277, 1315), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((1377, 1431), 'bpy.data.images.load', 'bpy.data.images.load', (['image_path'], {'check_existing': '(False)'}), '(image_path, check_existing=False)\n', (1397, 1431), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((1562, 1607), 'numpy.zeros', 'np.zeros', (['[width, height, 3]'], {'dtype': '"""float32"""'}), "([width, height, 3], dtype='float32')\n", (1570, 1607), True, 'import numpy as np\n'), ((1645, 1674), 'numpy.array', 'np.array', (['loaded_image.pixels'], {}), '(loaded_image.pixels)\n', (1653, 1674), True, 'import numpy as np\n'), ((1794, 1834), 'numpy.float32', 'np.float32', (['image_output_array[:, :, :3]'], {}), '(image_output_array[:, :, :3])\n', (1804, 1834), True, 'import numpy as np\n'), ((4803, 4825), 'numpy.array', 'np.array', (['denoise_data'], {}), '(denoise_data)\n', (4811, 4825), True, 'import numpy as np\n'), ((923, 989), 'os.path.join', 'os.path.join', (['self.oidnProperties.tlm_oidn_path', '"""oidnDenoise.exe"""'], {}), "(self.oidnProperties.tlm_oidn_path, 'oidnDenoise.exe')\n", (935, 989), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((2928, 2945), 'platform.system', 'platform.system', ([], {}), '()\n', (2943, 2945), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((2991, 3042), 'bpy.path.abspath', 'bpy.path.abspath', (['self.oidnProperties.tlm_oidn_path'], {}), '(self.oidnProperties.tlm_oidn_path)\n', (3007, 3042), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((4250, 4325), 'subprocess.Popen', 'subprocess.Popen', (['pipePath'], {'stdout': 'subprocess.PIPE', 'stderr': 'None', 'shell': '(True)'}), '(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)\n', (4266, 4325), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((4382, 4420), 'subprocess.Popen', 'subprocess.Popen', (['pipePath'], {'shell': '(True)'}), '(pipePath, shell=True)\n', (4398, 4420), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((4487, 4504), 'platform.system', 'platform.system', ([], {}), '()\n', (4502, 4504), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((3286, 3303), 'platform.system', 'platform.system', ([], {}), '()\n', (3301, 3303), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((3348, 3399), 'bpy.path.abspath', 'bpy.path.abspath', (['self.oidnProperties.tlm_oidn_path'], {}), '(self.oidnProperties.tlm_oidn_path)\n', (3364, 3399), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((3637, 3688), 'bpy.path.abspath', 'bpy.path.abspath', (['self.oidnProperties.tlm_oidn_path'], {}), '(self.oidnProperties.tlm_oidn_path)\n', (3653, 3688), False, 'import bpy, os, sys, re, platform, subprocess\n'), ((4869, 4893), 'numpy.ones', 'np.ones', (['(width, height)'], {}), '((width, height))\n', (4876, 4893), True, 'import numpy as np\n'), ((2534, 2585), 'bpy.path.abspath', 'bpy.path.abspath', (['self.oidnProperties.tlm_oidn_path'], {}), '(self.oidnProperties.tlm_oidn_path)\n', (2550, 2585), False, 'import bpy, os, sys, re, platform, subprocess\n')] |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [val, key, seg, weight, alpha], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op2, [val, key, seg, weight], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
| [
"scipy.sparse.coo_matrix",
"caffe2.python.core.CreateOperator",
"numpy.random.rand",
"hypothesis.strategies.integers",
"hypothesis.strategies.floats"
] | [((1555, 1583), 'numpy.random.rand', 'np.random.rand', (['n_data', 'n_in'], {}), '(n_data, n_in)\n', (1569, 1583), True, 'import numpy as np\n'), ((1628, 1641), 'scipy.sparse.coo_matrix', 'coo_matrix', (['A'], {}), '(A)\n', (1638, 1641), False, 'from scipy.sparse import coo_matrix\n'), ((1941, 2046), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""FunHash"""', "['val', 'key', 'seg', 'weight', 'alpha']", "['out']"], {'num_outputs': 'n_out'}), "('FunHash', ['val', 'key', 'seg', 'weight', 'alpha'], [\n 'out'], num_outputs=n_out)\n", (1960, 2046), False, 'from caffe2.python import core\n'), ((2496, 2591), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""FunHash"""', "['val', 'key', 'seg', 'weight']", "['out']"], {'num_outputs': 'n_out'}), "('FunHash', ['val', 'key', 'seg', 'weight'], ['out'],\n num_outputs=n_out)\n", (2515, 2591), False, 'from caffe2.python import core\n'), ((1082, 1120), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(5)', 'max_value': '(20)'}), '(min_value=5, max_value=20)\n', (1093, 1120), True, 'import hypothesis.strategies as st\n'), ((1138, 1177), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(10)', 'max_value': '(20)'}), '(min_value=10, max_value=20)\n', (1149, 1177), True, 'import hypothesis.strategies as st\n'), ((1197, 1234), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(2)', 'max_value': '(8)'}), '(min_value=2, max_value=8)\n', (1208, 1234), True, 'import hypothesis.strategies as st\n'), ((1256, 1294), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(8)', 'max_value': '(15)'}), '(min_value=8, max_value=15)\n', (1267, 1294), True, 'import hypothesis.strategies as st\n'), ((1315, 1352), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(3)', 'max_value': '(8)'}), '(min_value=3, max_value=8)\n', (1326, 1352), True, 'import hypothesis.strategies as st\n'), ((1374, 1413), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.1)', 'max_value': '(1.0)'}), '(min_value=0.1, max_value=1.0)\n', (1383, 1413), True, 'import hypothesis.strategies as st\n'), ((1717, 1741), 'numpy.random.rand', 'np.random.rand', (['n_weight'], {}), '(n_weight)\n', (1731, 1741), True, 'import numpy as np\n'), ((1777, 1800), 'numpy.random.rand', 'np.random.rand', (['n_alpha'], {}), '(n_alpha)\n', (1791, 1800), True, 'import numpy as np\n')] |
import subprocess
import numpy as np
from pathlib import Path
import os, sys
import requests
supported_openvino_version = '2020.1'
def relative_to_abs_path(relative_path):
dirname = Path(__file__).parent
try:
return str((dirname / relative_path).resolve())
except FileNotFoundError:
return None
model_downloader_path = relative_to_abs_path('downloader/downloader.py')
ir_converter_path = relative_to_abs_path('downloader/converter.py')
download_folder_path = relative_to_abs_path('downloads') + "/"
def download_model(model, model_zoo_folder):
model_downloader_options = ['--precisions', 'FP16', '--output_dir', f'{download_folder_path}', '--cache_dir', f'{download_folder_path}/.cache', \
'--num_attempts', '5', '--name', f'{model}', '--model_root', f'{model_zoo_folder}']
downloader_cmd = [sys.executable, f'{model_downloader_path}']
downloader_cmd.extend(model_downloader_options)
# print('"{}"'.format('" "'.join(downloader_cmd)))
result = subprocess.run(downloader_cmd)
if result.returncode != 0:
raise RuntimeError("Model downloader failed!")
download_location = Path(download_folder_path) / model
if(not download_location.exists()):
raise RuntimeError(f"{download_location} doesn't exist for downloaded model!")
return download_location
def convert_model_to_ir(model, model_zoo_folder):
converter_path = Path(ir_converter_path)
model_converter_options=['--precisions', 'FP16', '--output_dir', f'{download_folder_path}', '--download_dir', f'{download_folder_path}', \
'--name', f'{model}', '--model_root', f'{model_zoo_folder}']
converter_cmd = [sys.executable, f'{converter_path}']
converter_cmd.extend(model_converter_options)
# print('"{}"'.format('" "'.join(converter_cmd)))
result = subprocess.run(converter_cmd)
if result.returncode != 0:
raise RuntimeError("Model converter failed!")
ir_model_location = Path(download_folder_path) / model / "FP16"
return ir_model_location
def myriad_compile_model_local(shaves, cmx_slices, nces, xml_path, output_file):
myriad_compile_path = None
if myriad_compile_path is None:
try:
myriad_compile_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/inference_engine/lib/intel64/myriad_compile'
except KeyError:
sys.exit('Unable to locate Model Optimizer. '
+ 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
PLATFORM="VPU_MYRIAD_2450" if nces == 0 else "VPU_MYRIAD_2480"
myriad_compiler_options = ['-ip U8', '-VPU_MYRIAD_PLATFORM', f'{PLATFORM}', '-VPU_NUMBER_OF_SHAVES', f'{shaves}', \
'-VPU_NUMBER_OF_CMX_SLICES', f'{cmx_slices}', '-m', f'{xml_path}', '-o', f'{output_file}']
myriad_compile_cmd = np.concatenate(([myriad_compile_path], myriad_compiler_options))
# print('"{}"'.format('" "'.join(myriad_compile_cmd)))
result = subprocess.run(myriad_compile_cmd)
if result.returncode != 0:
raise RuntimeError("Myriad compiler failed!")
return 0
def myriad_compile_model_cloud(xml, bin, shaves, cmx_slices, nces, output_file):
PLATFORM="VPU_MYRIAD_2450" if nces == 0 else "VPU_MYRIAD_2480"
# use 69.214.171 instead luxonis.com to bypass cloudflare limitation of max file size
url = "http://192.168.127.12:8083/compile"
payload = {
'compile_type': 'myriad',
'compiler_params': '-ip U8 -VPU_MYRIAD_PLATFORM ' + PLATFORM + ' -VPU_NUMBER_OF_SHAVES ' + str(shaves) +' -VPU_NUMBER_OF_CMX_SLICES ' + str(cmx_slices)
}
files = {
'definition': open(Path(xml), 'rb'),
'weights': open(Path(bin), 'rb')
}
params = {
"version": supported_openvino_version
}
try:
response = requests.post(url, data=payload, files=files, params=params)
response.raise_for_status()
except Exception as ex:
if getattr(ex, 'response', None) is None:
print(f"Unknown error occured: {ex}")
return 1
print("Model compilation failed with error code: " + str(ex.response.status_code))
print(str(ex.response.text))
return 2
blob_file = open(output_file,'wb')
blob_file.write(response.content)
blob_file.close()
return 0
def download_and_compile_NN_model(model, model_zoo_folder, shaves, cmx_slices, nces, output_file, model_compilation_target='auto'):
if model_compilation_target == 'auto' or model_compilation_target == 'local':
try:
openvino_dir = os.environ['INTEL_OPENVINO_DIR']
print(f'Openvino installation detected {openvino_dir}')
installed_openvino_version_path = Path(openvino_dir) / "deployment_tools/model_optimizer/version.txt"
installed_openvino_version = "not detected"
with open(installed_openvino_version_path, "r") as fp:
installed_openvino_version = fp.read()
installed_openvino_version = installed_openvino_version.strip()
print("Installed openvino version: ",installed_openvino_version)
if supported_openvino_version in installed_openvino_version:
model_compilation_target = 'local'
print(f'Supported openvino version installed: {supported_openvino_version}')
else:
if model_compilation_target == 'local':
raise ValueError
model_compilation_target = 'cloud'
print(f'Unsupported openvino version installed at {openvino_dir}, version {installed_openvino_version}, supported version is: {supported_openvino_version}')
except:
if model_compilation_target == 'local':
raise SystemExit(f"Local model compilation was requested, but environment variables are not initialized for openvino {supported_openvino_version}. Run setupvars.sh/setupvars.bat from the OpenVINO toolkit.")
model_compilation_target = 'cloud'
print(f'model_compilation_target: {model_compilation_target}')
output_location = Path(model_zoo_folder) / model / output_file
download_location = download_model(model, model_zoo_folder)
if model_compilation_target == 'local':
ir_model_location = convert_model_to_ir(model, model_zoo_folder)
if(not ir_model_location.exists()):
raise RuntimeError(f"{ir_model_location} doesn't exist for downloaded model!")
xml_path = ir_model_location / (model + ".xml")
if(not xml_path.exists()):
raise RuntimeError(f"{xml_path} doesn't exist for downloaded model!")
return myriad_compile_model_local(shaves, cmx_slices, nces, xml_path, output_file)
elif model_compilation_target == 'cloud':
ir_model_location = Path(download_location) / "FP16"
if(not ir_model_location.exists()):
raise RuntimeError(f"{ir_model_location} doesn't exist for downloaded model!")
xml_path = ir_model_location / (model + ".xml")
if(not xml_path.exists()):
raise RuntimeError(f"{xml_path} doesn't exist for downloaded model!")
bin_path = ir_model_location / (model + ".bin")
if(not bin_path.exists()):
raise RuntimeError(f"{bin_path} doesn't exist for downloaded model!")
result = myriad_compile_model_cloud(xml=xml_path, bin=bin_path, shaves = shaves, cmx_slices=cmx_slices, nces=nces, output_file=output_location)
if result == 1:
raise RuntimeError("Model compiler failed! Not connected to the internet?")
elif result == 2:
raise RuntimeError("Model compiler failed! Check logs for details")
else:
assert 'model_compilation_target must be either : ["auto", "local", "cloud"]'
return 0
def main(args):
model = args['model_name']
model_zoo_folder = args['model_zoo_folder']
shaves = args['shaves']
cmx_slices = args['cmx_slices']
nces = args['nces']
output_file = args['output']
model_compilation_target = args['model_compilation_target']
return download_and_compile_NN_model(model, model_zoo_folder, shaves, cmx_slices, nces, output_file, model_compilation_target)
if __name__ == '__main__':
import argparse
from argparse import ArgumentParser
def parse_args():
epilog_text = '''
Myriad blob compiler.
'''
parser = ArgumentParser(epilog=epilog_text,formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-model", "--model_name", default=None,
type=str, required=True,
help="model name")
parser.add_argument("-sh", "--shaves", default=4, type=int,
help="Number of shaves used by NN.")
parser.add_argument("-cmx", "--cmx_slices", default=4, type=int,
help="Number of cmx slices used by NN.")
parser.add_argument("-nce", "--nces", default=1, type=int,
help="Number of NCEs used by NN.")
parser.add_argument("-o", "--output", default=None,
type=Path, required=True,
help=".blob output")
parser.add_argument("-mct", "--model-compilation-target", default="auto",
type=str, required=False, choices=["auto","local","cloud"],
help="Compile model lcoally or in cloud?")
parser.add_argument("-mz", "--model-zoo-folder", default=None,
type=str, required=True,
help="Path to folder with models")
options = parser.parse_args()
return options
args = vars(parse_args())
ret = main(args)
exit(ret) | [
"subprocess.run",
"argparse.ArgumentParser",
"sys.exit",
"pathlib.Path",
"requests.post",
"numpy.concatenate"
] | [((1013, 1043), 'subprocess.run', 'subprocess.run', (['downloader_cmd'], {}), '(downloader_cmd)\n', (1027, 1043), False, 'import subprocess\n'), ((1427, 1450), 'pathlib.Path', 'Path', (['ir_converter_path'], {}), '(ir_converter_path)\n', (1431, 1450), False, 'from pathlib import Path\n'), ((1846, 1875), 'subprocess.run', 'subprocess.run', (['converter_cmd'], {}), '(converter_cmd)\n', (1860, 1875), False, 'import subprocess\n'), ((2856, 2920), 'numpy.concatenate', 'np.concatenate', (['([myriad_compile_path], myriad_compiler_options)'], {}), '(([myriad_compile_path], myriad_compiler_options))\n', (2870, 2920), True, 'import numpy as np\n'), ((2994, 3028), 'subprocess.run', 'subprocess.run', (['myriad_compile_cmd'], {}), '(myriad_compile_cmd)\n', (3008, 3028), False, 'import subprocess\n'), ((188, 202), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (192, 202), False, 'from pathlib import Path\n'), ((1159, 1185), 'pathlib.Path', 'Path', (['download_folder_path'], {}), '(download_folder_path)\n', (1163, 1185), False, 'from pathlib import Path\n'), ((3838, 3898), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'files': 'files', 'params': 'params'}), '(url, data=payload, files=files, params=params)\n', (3851, 3898), False, 'import requests\n'), ((8464, 8557), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'epilog': 'epilog_text', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(epilog=epilog_text, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (8478, 8557), False, 'from argparse import ArgumentParser\n'), ((1990, 2016), 'pathlib.Path', 'Path', (['download_folder_path'], {}), '(download_folder_path)\n', (1994, 2016), False, 'from pathlib import Path\n'), ((3678, 3687), 'pathlib.Path', 'Path', (['xml'], {}), '(xml)\n', (3682, 3687), False, 'from pathlib import Path\n'), ((3720, 3729), 'pathlib.Path', 'Path', (['bin'], {}), '(bin)\n', (3724, 3729), False, 'from pathlib import Path\n'), ((6135, 6157), 'pathlib.Path', 'Path', (['model_zoo_folder'], {}), '(model_zoo_folder)\n', (6139, 6157), False, 'from pathlib import Path\n'), ((2264, 2302), 'pathlib.Path', 'Path', (["os.environ['INTEL_OPENVINO_DIR']"], {}), "(os.environ['INTEL_OPENVINO_DIR'])\n", (2268, 2302), False, 'from pathlib import Path\n'), ((2405, 2529), 'sys.exit', 'sys.exit', (["('Unable to locate Model Optimizer. ' +\n 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')"], {}), "('Unable to locate Model Optimizer. ' +\n 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')\n", (2413, 2529), False, 'import os, sys\n'), ((4747, 4765), 'pathlib.Path', 'Path', (['openvino_dir'], {}), '(openvino_dir)\n', (4751, 4765), False, 'from pathlib import Path\n'), ((6858, 6881), 'pathlib.Path', 'Path', (['download_location'], {}), '(download_location)\n', (6862, 6881), False, 'from pathlib import Path\n')] |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from arch.api import federation
from arch.api.utils import log_utils
from federatedml.logistic_regression.base_logistic_regression import BaseLogisticRegression
from federatedml.model_selection import MiniBatch
from federatedml.optim import activation
from federatedml.optim.gradient import HeteroLogisticGradient
from federatedml.statistic import data_overview
from federatedml.util import consts
from federatedml.util.transfer_variable import HeteroLRTransferVariable
LOGGER = log_utils.getLogger()
class HeteroLRGuest(BaseLogisticRegression):
def __init__(self, logistic_params):
# LogisticParamChecker.check_param(logistic_params)
super(HeteroLRGuest, self).__init__(logistic_params)
self.transfer_variable = HeteroLRTransferVariable()
self.data_batch_count = []
self.wx = None
self.guest_forward = None
def compute_forward(self, data_instances, coef_, intercept_):
self.wx = self.compute_wx(data_instances, coef_, intercept_)
encrypt_operator = self.encrypt_operator
self.guest_forward = self.wx.mapValues(
lambda v: (encrypt_operator.encrypt(v), encrypt_operator.encrypt(np.square(v)), v))
def aggregate_forward(self, host_forward):
aggregate_forward_res = self.guest_forward.join(host_forward,
lambda g, h: (g[0] + h[0], g[1] + h[1] + 2 * g[2] * h[0]))
return aggregate_forward_res
@staticmethod
def load_data(data_instance):
if data_instance.label != 1:
data_instance.label = -1
return data_instance
def fit(self, data_instances):
LOGGER.info("Enter hetero_lr_guest fit")
self._abnormal_detection(data_instances)
self.header = data_instances.schema.get("header")
data_instances = data_instances.mapValues(HeteroLRGuest.load_data)
public_key = federation.get(name=self.transfer_variable.paillier_pubkey.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.paillier_pubkey),
idx=0)
LOGGER.info("Get public_key from arbiter:{}".format(public_key))
self.encrypt_operator.set_public_key(public_key)
LOGGER.info("Generate mini-batch from input data")
mini_batch_obj = MiniBatch(data_instances, batch_size=self.batch_size)
batch_num = mini_batch_obj.batch_nums
if self.batch_size == -1:
LOGGER.info("batch size is -1, set it to the number of data in data_instances")
self.batch_size = data_instances.count()
batch_info = {"batch_size": self.batch_size, "batch_num": batch_num}
LOGGER.info("batch_info:{}".format(batch_info))
federation.remote(batch_info,
name=self.transfer_variable.batch_info.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),
role=consts.HOST,
idx=0)
LOGGER.info("Remote batch_info to Host")
federation.remote(batch_info,
name=self.transfer_variable.batch_info.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),
role=consts.ARBITER,
idx=0)
LOGGER.info("Remote batch_info to Arbiter")
LOGGER.info("Start initialize model.")
LOGGER.info("fit_intercept:{}".format(self.init_param_obj.fit_intercept))
model_shape = data_overview.get_features_shape(data_instances)
weight = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
if self.init_param_obj.fit_intercept is True:
self.coef_ = weight[:-1]
self.intercept_ = weight[-1]
else:
self.coef_ = weight
is_send_all_batch_index = False
self.n_iter_ = 0
index_data_inst_map = {}
while self.n_iter_ < self.max_iter:
LOGGER.info("iter:{}".format(self.n_iter_))
# each iter will get the same batach_data_generator
batch_data_generator = mini_batch_obj.mini_batch_data_generator(result='index')
batch_index = 0
for batch_data_index in batch_data_generator:
LOGGER.info("batch:{}".format(batch_index))
if not is_send_all_batch_index:
LOGGER.info("remote mini-batch index to Host")
federation.remote(batch_data_index,
name=self.transfer_variable.batch_data_index.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.batch_data_index,
self.n_iter_,
batch_index),
role=consts.HOST,
idx=0)
if batch_index >= mini_batch_obj.batch_nums - 1:
is_send_all_batch_index = True
# Get mini-batch train data
if len(index_data_inst_map) < batch_num:
batch_data_inst = data_instances.join(batch_data_index, lambda data_inst, index: data_inst)
index_data_inst_map[batch_index] = batch_data_inst
else:
batch_data_inst = index_data_inst_map[batch_index]
# transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'
batch_feat_inst = self.transform(batch_data_inst)
# guest/host forward
self.compute_forward(batch_feat_inst, self.coef_, self.intercept_)
host_forward = federation.get(name=self.transfer_variable.host_forward_dict.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_forward_dict, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get host_forward from host")
aggregate_forward_res = self.aggregate_forward(host_forward)
en_aggregate_wx = aggregate_forward_res.mapValues(lambda v: v[0])
en_aggregate_wx_square = aggregate_forward_res.mapValues(lambda v: v[1])
# compute [[d]]
if self.gradient_operator is None:
self.gradient_operator = HeteroLogisticGradient(self.encrypt_operator)
fore_gradient = self.gradient_operator.compute_fore_gradient(batch_feat_inst, en_aggregate_wx)
federation.remote(fore_gradient,
name=self.transfer_variable.fore_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.fore_gradient,
self.n_iter_,
batch_index),
role=consts.HOST,
idx=0)
LOGGER.info("Remote fore_gradient to Host")
# compute guest gradient and loss
guest_gradient, loss = self.gradient_operator.compute_gradient_and_loss(batch_feat_inst,
fore_gradient,
en_aggregate_wx,
en_aggregate_wx_square,
self.fit_intercept)
# loss regulation if necessary
if self.updater is not None:
guest_loss_regular = self.updater.loss_norm(self.coef_)
loss += self.encrypt_operator.encrypt(guest_loss_regular)
federation.remote(guest_gradient,
name=self.transfer_variable.guest_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_gradient,
self.n_iter_,
batch_index),
role=consts.ARBITER,
idx=0)
LOGGER.info("Remote guest_gradient to arbiter")
optim_guest_gradient = federation.get(name=self.transfer_variable.guest_optim_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_optim_gradient, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get optim_guest_gradient from arbiter")
# update model
LOGGER.info("update_model")
self.update_model(optim_guest_gradient)
# update local model that transforms features of raw input 'batch_data_inst'
training_info = {"iteration": self.n_iter_, "batch_index": batch_index}
self.update_local_model(fore_gradient, batch_data_inst, self.coef_, **training_info)
# Get loss regulation from Host if regulation is set
if self.updater is not None:
en_host_loss_regular = federation.get(name=self.transfer_variable.host_loss_regular.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_loss_regular, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get host_loss_regular from Host")
loss += en_host_loss_regular
federation.remote(loss,
name=self.transfer_variable.loss.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.loss,
self.n_iter_,
batch_index),
role=consts.ARBITER,
idx=0)
LOGGER.info("Remote loss to arbiter")
# is converge of loss in arbiter
batch_index += 1
is_stopped = federation.get(name=self.transfer_variable.is_stopped.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.is_stopped, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get is_stop flag from arbiter:{}".format(is_stopped))
self.n_iter_ += 1
if is_stopped:
LOGGER.info("Get stop signal from arbiter, model is converged, iter:{}".format(self.n_iter_))
break
LOGGER.info("Reach max iter {}, train model finish!".format(self.max_iter))
def predict(self, data_instances, predict_param):
LOGGER.info("Start predict ...")
data_features = self.transform(data_instances)
prob_guest = self.compute_wx(data_features, self.coef_, self.intercept_)
prob_host = federation.get(name=self.transfer_variable.host_prob.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_prob),
idx=0)
LOGGER.info("Get probability from Host")
# guest probability
pred_prob = prob_guest.join(prob_host, lambda g, h: activation.sigmoid(g + h))
pred_label = self.classified(pred_prob, predict_param.threshold)
if predict_param.with_proba:
labels = data_instances.mapValues(lambda v: v.label)
predict_result = labels.join(pred_prob, lambda label, prob: (label, prob))
else:
predict_result = data_instances.mapValues(lambda v: (v.label, None))
predict_result = predict_result.join(pred_label, lambda r, p: (r[0], r[1], p))
return predict_result
| [
"arch.api.utils.log_utils.getLogger",
"federatedml.model_selection.MiniBatch",
"federatedml.util.transfer_variable.HeteroLRTransferVariable",
"numpy.square",
"federatedml.optim.gradient.HeteroLogisticGradient",
"federatedml.statistic.data_overview.get_features_shape",
"federatedml.optim.activation.sigmo... | [((1117, 1138), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1136, 1138), False, 'from arch.api.utils import log_utils\n'), ((1381, 1407), 'federatedml.util.transfer_variable.HeteroLRTransferVariable', 'HeteroLRTransferVariable', ([], {}), '()\n', (1405, 1407), False, 'from federatedml.util.transfer_variable import HeteroLRTransferVariable\n'), ((3034, 3087), 'federatedml.model_selection.MiniBatch', 'MiniBatch', (['data_instances'], {'batch_size': 'self.batch_size'}), '(data_instances, batch_size=self.batch_size)\n', (3043, 3087), False, 'from federatedml.model_selection import MiniBatch\n'), ((4293, 4341), 'federatedml.statistic.data_overview.get_features_shape', 'data_overview.get_features_shape', (['data_instances'], {}), '(data_instances)\n', (4325, 4341), False, 'from federatedml.statistic import data_overview\n'), ((13183, 13208), 'federatedml.optim.activation.sigmoid', 'activation.sigmoid', (['(g + h)'], {}), '(g + h)\n', (13201, 13208), False, 'from federatedml.optim import activation\n'), ((7367, 7412), 'federatedml.optim.gradient.HeteroLogisticGradient', 'HeteroLogisticGradient', (['self.encrypt_operator'], {}), '(self.encrypt_operator)\n', (7389, 7412), False, 'from federatedml.optim.gradient import HeteroLogisticGradient\n'), ((1811, 1823), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (1820, 1823), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates.builtin_frames import ICRS, Galactic, Galactocentric
from astropy.coordinates import builtin_frames as bf
from astropy.units import allclose as quantity_allclose
from astropy.coordinates.errors import ConvertError
from astropy.coordinates import representation as r
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
gc_frame = Galactocentric()
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(gc_frame)
# transform a set of ICRS proper motions to Galactic
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr)
icrs.transform_to(Galactic)
# transform a Barycentric RV to a GSR RV
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(Galactocentric)
all_kwargs = [
dict(ra=37.4*u.deg, dec=-55.8*u.deg),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s),
# Now test other representation/differential types:
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type='cartesian'),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type=r.CartesianRepresentation),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type='cartesian'),
]
@pytest.mark.parametrize('kwargs', all_kwargs)
def test_all_arg_options(kwargs):
# Above is a list of all possible valid combinations of arguments.
# Here we do a simple thing and just verify that passing them in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic)
repr_gal = repr(gal)
for k in kwargs:
if k == 'differential_type':
continue
getattr(icrs, k)
if 'pm_ra_cosdec' in kwargs: # should have both
assert 'pm_l_cosb' in repr_gal
assert 'pm_b' in repr_gal
assert 'mas / yr' in repr_gal
if 'radial_velocity' not in kwargs:
assert 'radial_velocity' not in repr_gal
if 'radial_velocity' in kwargs:
assert 'radial_velocity' in repr_gal
assert 'km / s' in repr_gal
if 'pm_ra_cosdec' not in kwargs:
assert 'pm_l_cosb' not in repr_gal
assert 'pm_b' not in repr_gal
@pytest.mark.parametrize('cls,lon,lat', [
[bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'],
[bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'],
[bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'],
[bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'],
[bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricTrueEcliptic, 'lon', 'lat'],
[bf.GeocentricTrueEcliptic, 'lon', 'lat'],
[bf.BarycentricTrueEcliptic, 'lon', 'lat'],
[bf.PrecessedGeocentric, 'ra', 'dec']
])
def test_expected_arg_names(cls, lon, lat):
kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc,
'pm_{0}_cos{1}'.format(lon, lat): -21.2*u.mas/u.yr,
'pm_{0}'.format(lat): 17.1*u.mas/u.yr,
'radial_velocity': 105.7*u.km/u.s}
frame = cls(**kwargs)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
"""[1:-1]
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
"""[1:-1]
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W',
[[float(val) for val in row.split()] for row in _xhip_data.split('\n')])
def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W):
i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc,
pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr,
radial_velocity=rv*u.km/u.s)
g = i.transform_to(Galactic)
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials['s']
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s)
@pytest.mark.parametrize('kwargs,expect_success', [
[dict(ra=37.4*u.deg, dec=-55.8*u.deg), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s), True]
])
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
icrs = ICRS(**kwargs)
if expect_success:
gc = icrs.transform_to(Galactocentric)
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric)
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
from astropy.coordinates.builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type=r.UnitSphericalDifferential)
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type={'s': r.UnitSphericalDifferential})
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(ra=1*u.deg, dec=60*u.deg,
v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s,
differential_type=r.CartesianDifferential)
# specify both
icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
assert icrs.x == 1*u.pc
assert icrs.y == 2*u.pc
assert icrs.z == 3*u.pc
assert icrs.v_x == 1*u.km/u.s
assert icrs.v_y == 2*u.km/u.s
assert icrs.v_z == 3*u.km/u.s
def test_slicing_preserves_differential():
icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
icrs2 = icrs.reshape(1,1)[:1,0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names('s').keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(ra=np.random.uniform(0, 360, n)*u.deg,
dec=np.random.uniform(-90, 90, n)*u.deg,
distance=100*u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n)*u.mas/u.yr,
pm_dec=np.random.normal(0, 100, n)*u.mas/u.yr,
radial_velocity=np.random.normal(0, 100, n)*u.km/u.s)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
rv = 105.7*u.km/u.s
icrs3 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=rv)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls('cartesian')
assert hasattr(icrs3, 'radial_velocity')
assert quantity_allclose(icrs3.radial_velocity, rv)
icrs4 = ICRS(x=30*u.pc, y=20*u.pc, z=11*u.pc,
v_x=10*u.km/u.s, v_y=10*u.km/u.s, v_z=10*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
icrs4.radial_velocity
def test_negative_distance():
""" Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
RA = 150 * u.deg
DEC = -11*u.deg
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=10*u.mas/u.yr,
pm_dec=10*u.mas/u.yr)
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()))
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
| [
"numpy.random.uniform",
"astropy.coordinates.builtin_frames.ICRS",
"astropy.units.parallax",
"pytest.raises",
"numpy.random.normal",
"astropy.units.allclose",
"pytest.mark.parametrize",
"astropy.coordinates.builtin_frames.Galactocentric"
] | [((2767, 2812), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kwargs"""', 'all_kwargs'], {}), "('kwargs', all_kwargs)\n", (2790, 2812), False, 'import pytest\n'), ((3769, 4320), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls,lon,lat"""', "[[bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra',\n 'dec'], [bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra',\n 'dec'], [bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic,\n 'l', 'b'], [bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'], [\n bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricTrueEcliptic, 'lon', 'lat'],\n [bf.GeocentricTrueEcliptic, 'lon', 'lat'], [bf.BarycentricTrueEcliptic,\n 'lon', 'lat'], [bf.PrecessedGeocentric, 'ra', 'dec']]"], {}), "('cls,lon,lat', [[bf.ICRS, 'ra', 'dec'], [bf.FK4,\n 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'], [bf.FK5, 'ra', 'dec'], [bf\n .GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'], [bf.LSR, 'ra', 'dec'], [bf\n .CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'], [bf.AltAz, 'az', 'alt'],\n [bf.Supergalactic, 'sgl', 'sgb'], [bf.GalacticLSR, 'l', 'b'], [bf.\n HeliocentricTrueEcliptic, 'lon', 'lat'], [bf.GeocentricTrueEcliptic,\n 'lon', 'lat'], [bf.BarycentricTrueEcliptic, 'lon', 'lat'], [bf.\n PrecessedGeocentric, 'ra', 'dec']])\n", (3792, 4320), False, 'import pytest\n'), ((555, 571), 'astropy.coordinates.builtin_frames.Galactocentric', 'Galactocentric', ([], {}), '()\n', (569, 571), False, 'from astropy.coordinates.builtin_frames import ICRS, Galactic, Galactocentric\n'), ((583, 744), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(151.0 * u.deg)', 'dec': '(-16 * u.deg)', 'distance': '(101 * u.pc)', 'pm_ra_cosdec': '(21 * u.mas / u.yr)', 'pm_dec': '(-71 * u.mas / u.yr)', 'radial_velocity': '(71 * u.km / u.s)'}), '(ra=151.0 * u.deg, dec=-16 * u.deg, distance=101 * u.pc, pm_ra_cosdec=\n 21 * u.mas / u.yr, pm_dec=-71 * u.mas / u.yr, radial_velocity=71 * u.km /\n u.s)\n', (587, 744), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((850, 952), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(151.0 * u.deg)', 'dec': '(-16 * u.deg)', 'pm_ra_cosdec': '(21 * u.mas / u.yr)', 'pm_dec': '(-71 * u.mas / u.yr)'}), '(ra=151.0 * u.deg, dec=-16 * u.deg, pm_ra_cosdec=21 * u.mas / u.yr,\n pm_dec=-71 * u.mas / u.yr)\n', (854, 952), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((1041, 1194), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(151.0 * u.deg)', 'dec': '(-16 * u.deg)', 'distance': '(1.0 * u.pc)', 'pm_ra_cosdec': '(0 * u.mas / u.yr)', 'pm_dec': '(0 * u.mas / u.yr)', 'radial_velocity': '(71 * u.km / u.s)'}), '(ra=151.0 * u.deg, dec=-16 * u.deg, distance=1.0 * u.pc, pm_ra_cosdec=0 *\n u.mas / u.yr, pm_dec=0 * u.mas / u.yr, radial_velocity=71 * u.km / u.s)\n', (1045, 1194), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((3073, 3087), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {}), '(**kwargs)\n', (3077, 3087), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((6685, 6835), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', (['(ra * u.deg)', '(dec * u.deg)', '(dist * u.pc)'], {'pm_ra_cosdec': '(pmra * u.marcsec / u.yr)', 'pm_dec': '(pmdec * u.marcsec / u.yr)', 'radial_velocity': '(rv * u.km / u.s)'}), '(ra * u.deg, dec * u.deg, dist * u.pc, pm_ra_cosdec=pmra * u.marcsec /\n u.yr, pm_dec=pmdec * u.marcsec / u.yr, radial_velocity=rv * u.km / u.s)\n', (6689, 6835), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((6961, 7053), 'astropy.units.allclose', 'quantity_allclose', (['g.pm_l_cosb', '(pmglon * u.marcsec / u.yr)'], {'atol': '(0.01 * u.marcsec / u.yr)'}), '(g.pm_l_cosb, pmglon * u.marcsec / u.yr, atol=0.01 * u.\n marcsec / u.yr)\n', (6978, 7053), True, 'from astropy.units import allclose as quantity_allclose\n'), ((7051, 7137), 'astropy.units.allclose', 'quantity_allclose', (['g.pm_b', '(pmglat * u.marcsec / u.yr)'], {'atol': '(0.01 * u.marcsec / u.yr)'}), '(g.pm_b, pmglat * u.marcsec / u.yr, atol=0.01 * u.marcsec /\n u.yr)\n', (7068, 7137), True, 'from astropy.units import allclose as quantity_allclose\n'), ((7292, 7358), 'astropy.units.allclose', 'quantity_allclose', (['uvwg.d_x', '(U * u.km / u.s)'], {'atol': '(0.1 * u.km / u.s)'}), '(uvwg.d_x, U * u.km / u.s, atol=0.1 * u.km / u.s)\n', (7309, 7358), True, 'from astropy.units import allclose as quantity_allclose\n'), ((7361, 7427), 'astropy.units.allclose', 'quantity_allclose', (['uvwg.d_y', '(V * u.km / u.s)'], {'atol': '(0.1 * u.km / u.s)'}), '(uvwg.d_y, V * u.km / u.s, atol=0.1 * u.km / u.s)\n', (7378, 7427), True, 'from astropy.units import allclose as quantity_allclose\n'), ((7430, 7496), 'astropy.units.allclose', 'quantity_allclose', (['uvwg.d_z', '(W * u.km / u.s)'], {'atol': '(0.1 * u.km / u.s)'}), '(uvwg.d_z, W * u.km / u.s, atol=0.1 * u.km / u.s)\n', (7447, 7496), True, 'from astropy.units import allclose as quantity_allclose\n'), ((8595, 8609), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {}), '(**kwargs)\n', (8599, 8609), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((9035, 9173), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(1 * u.deg)', 'dec': '(60 * u.deg)', 'pm_ra': '(10 * u.mas / u.yr)', 'pm_dec': '(-11 * u.mas / u.yr)', 'differential_type': 'r.UnitSphericalDifferential'}), '(ra=1 * u.deg, dec=60 * u.deg, pm_ra=10 * u.mas / u.yr, pm_dec=-11 * u.\n mas / u.yr, differential_type=r.UnitSphericalDifferential)\n', (9039, 9173), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((9240, 9385), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(1 * u.deg)', 'dec': '(60 * u.deg)', 'pm_ra': '(10 * u.mas / u.yr)', 'pm_dec': '(-11 * u.mas / u.yr)', 'differential_type': "{'s': r.UnitSphericalDifferential}"}), "(ra=1 * u.deg, dec=60 * u.deg, pm_ra=10 * u.mas / u.yr, pm_dec=-11 * u.\n mas / u.yr, differential_type={'s': r.UnitSphericalDifferential})\n", (9244, 9385), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((9452, 9550), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(1 * u.deg)', 'dec': '(60 * u.deg)', 'pm_ra_cosdec': '(10 * u.mas / u.yr)', 'pm_dec': '(-11 * u.mas / u.yr)'}), '(ra=1 * u.deg, dec=60 * u.deg, pm_ra_cosdec=10 * u.mas / u.yr, pm_dec=-\n 11 * u.mas / u.yr)\n', (9456, 9550), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((9624, 9672), 'astropy.units.allclose', 'quantity_allclose', (['icrs.pm_ra', '(20 * u.mas / u.yr)'], {}), '(icrs.pm_ra, 20 * u.mas / u.yr)\n', (9641, 9672), True, 'from astropy.units import allclose as quantity_allclose\n'), ((9945, 10143), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'x': '(1 * u.pc)', 'y': '(2 * u.pc)', 'z': '(3 * u.pc)', 'v_x': '(1 * u.km / u.s)', 'v_y': '(2 * u.km / u.s)', 'v_z': '(3 * u.km / u.s)', 'representation_type': 'r.CartesianRepresentation', 'differential_type': 'r.CartesianDifferential'}), '(x=1 * u.pc, y=2 * u.pc, z=3 * u.pc, v_x=1 * u.km / u.s, v_y=2 * u.km /\n u.s, v_z=3 * u.km / u.s, representation_type=r.CartesianRepresentation,\n differential_type=r.CartesianDifferential)\n', (9949, 10143), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((10408, 10577), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(37.4 * u.deg)', 'dec': '(-55.8 * u.deg)', 'distance': '(150 * u.pc)', 'pm_ra_cosdec': '(-21.2 * u.mas / u.yr)', 'pm_dec': '(17.1 * u.mas / u.yr)', 'radial_velocity': '(105.7 * u.km / u.s)'}), '(ra=37.4 * u.deg, dec=-55.8 * u.deg, distance=150 * u.pc, pm_ra_cosdec=\n -21.2 * u.mas / u.yr, pm_dec=17.1 * u.mas / u.yr, radial_velocity=105.7 *\n u.km / u.s)\n', (10412, 10577), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((11400, 11444), 'astropy.units.allclose', 'quantity_allclose', (['pm[0]', 'icrs1.pm_ra_cosdec'], {}), '(pm[0], icrs1.pm_ra_cosdec)\n', (11417, 11444), True, 'from astropy.units import allclose as quantity_allclose\n'), ((11456, 11494), 'astropy.units.allclose', 'quantity_allclose', (['pm[1]', 'icrs1.pm_dec'], {}), '(pm[1], icrs1.pm_dec)\n', (11473, 11494), True, 'from astropy.units import allclose as quantity_allclose\n'), ((11531, 11700), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(37.4 * u.deg)', 'dec': '(-55.8 * u.deg)', 'distance': '(150 * u.pc)', 'pm_ra_cosdec': '(-21.2 * u.mas / u.yr)', 'pm_dec': '(17.1 * u.mas / u.yr)', 'radial_velocity': '(105.7 * u.km / u.s)'}), '(ra=37.4 * u.deg, dec=-55.8 * u.deg, distance=150 * u.pc, pm_ra_cosdec=\n -21.2 * u.mas / u.yr, pm_dec=17.1 * u.mas / u.yr, radial_velocity=105.7 *\n u.km / u.s)\n', (11535, 11700), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((11771, 11815), 'astropy.units.allclose', 'quantity_allclose', (['pm[0]', 'icrs2.pm_ra_cosdec'], {}), '(pm[0], icrs2.pm_ra_cosdec)\n', (11788, 11815), True, 'from astropy.units import allclose as quantity_allclose\n'), ((11827, 11865), 'astropy.units.allclose', 'quantity_allclose', (['pm[1]', 'icrs2.pm_dec'], {}), '(pm[1], icrs2.pm_dec)\n', (11844, 11865), True, 'from astropy.units import allclose as quantity_allclose\n'), ((11965, 12092), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(37.4 * u.deg)', 'dec': '(-55.8 * u.deg)', 'pm_ra_cosdec': '(-21.2 * u.mas / u.yr)', 'pm_dec': '(17.1 * u.mas / u.yr)', 'radial_velocity': 'rv'}), '(ra=37.4 * u.deg, dec=-55.8 * u.deg, pm_ra_cosdec=-21.2 * u.mas / u.yr,\n pm_dec=17.1 * u.mas / u.yr, radial_velocity=rv)\n', (11969, 12092), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((12273, 12317), 'astropy.units.allclose', 'quantity_allclose', (['icrs3.radial_velocity', 'rv'], {}), '(icrs3.radial_velocity, rv)\n', (12290, 12317), True, 'from astropy.units import allclose as quantity_allclose\n'), ((12331, 12537), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'x': '(30 * u.pc)', 'y': '(20 * u.pc)', 'z': '(11 * u.pc)', 'v_x': '(10 * u.km / u.s)', 'v_y': '(10 * u.km / u.s)', 'v_z': '(10 * u.km / u.s)', 'representation_type': 'r.CartesianRepresentation', 'differential_type': 'r.CartesianDifferential'}), '(x=30 * u.pc, y=20 * u.pc, z=11 * u.pc, v_x=10 * u.km / u.s, v_y=10 * u\n .km / u.s, v_z=10 * u.km / u.s, representation_type=r.\n CartesianRepresentation, differential_type=r.CartesianDifferential)\n', (12335, 12537), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((12953, 12980), 'astropy.units.allclose', 'quantity_allclose', (['c.ra', 'RA'], {}), '(c.ra, RA)\n', (12970, 12980), True, 'from astropy.units import allclose as quantity_allclose\n'), ((12992, 13021), 'astropy.units.allclose', 'quantity_allclose', (['c.dec', 'DEC'], {}), '(c.dec, DEC)\n', (13009, 13021), True, 'from astropy.units import allclose as quantity_allclose\n'), ((13121, 13148), 'astropy.units.allclose', 'quantity_allclose', (['c.ra', 'RA'], {}), '(c.ra, RA)\n', (13138, 13148), True, 'from astropy.units import allclose as quantity_allclose\n'), ((13160, 13189), 'astropy.units.allclose', 'quantity_allclose', (['c.dec', 'DEC'], {}), '(c.dec, DEC)\n', (13177, 13189), True, 'from astropy.units import allclose as quantity_allclose\n'), ((9730, 9754), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9743, 9754), False, 'import pytest\n'), ((9764, 9907), 'astropy.coordinates.builtin_frames.ICRS', 'ICRS', ([], {'ra': '(1 * u.deg)', 'dec': '(60 * u.deg)', 'v_x': '(1 * u.km / u.s)', 'v_y': '(-2 * u.km / u.s)', 'v_z': '(-2 * u.km / u.s)', 'differential_type': 'r.CartesianDifferential'}), '(ra=1 * u.deg, dec=60 * u.deg, v_x=1 * u.km / u.s, v_y=-2 * u.km / u.s,\n v_z=-2 * u.km / u.s, differential_type=r.CartesianDifferential)\n', (9768, 9907), False, 'from astropy.coordinates.builtin_frames import ICRS\n'), ((12120, 12145), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12133, 12145), False, 'import pytest\n'), ((8705, 8732), 'pytest.raises', 'pytest.raises', (['ConvertError'], {}), '(ConvertError)\n', (8718, 8732), False, 'import pytest\n'), ((11002, 11030), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(360)', 'n'], {}), '(0, 360, n)\n', (11019, 11030), True, 'import numpy as np\n'), ((11059, 11088), 'numpy.random.uniform', 'np.random.uniform', (['(-90)', '(90)', 'n'], {}), '(-90, 90, n)\n', (11076, 11088), True, 'import numpy as np\n'), ((12851, 12863), 'astropy.units.parallax', 'u.parallax', ([], {}), '()\n', (12861, 12863), True, 'from astropy import units as u\n'), ((13095, 13107), 'astropy.units.parallax', 'u.parallax', ([], {}), '()\n', (13105, 13107), True, 'from astropy import units as u\n'), ((11162, 11189), 'numpy.random.normal', 'np.random.normal', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (11178, 11189), True, 'import numpy as np\n'), ((11226, 11253), 'numpy.random.normal', 'np.random.normal', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (11242, 11253), True, 'import numpy as np\n'), ((11299, 11326), 'numpy.random.normal', 'np.random.normal', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (11315, 11326), True, 'import numpy as np\n')] |
"""Tests for colorspace functions."""
import pytest
import numpy as np
PRECISION = 1e-1
@pytest.fixture
def test_spectrum():
return colorimetry.prepare_illuminant_spectrum()
def test_can_prepare_cmf_1931_2deg():
''' Trusts observer is properly formed.
'''
obs = colorimetry.prepare_cmf('1931_2deg')
assert obs
def test_can_prepare_cmf_1964_10deg():
''' Trusts observer is properly formed.
'''
obs = colorimetry.prepare_cmf('1964_10deg')
assert obs
def test_prepare_cmf_throws_for_bad_choices():
with pytest.raises(ValueError):
colorimetry.prepare_cmf('asdf')
def test_cmf_is_valid():
''' Tests if a cmf returns as valid data.
'''
obs = colorimetry.prepare_cmf()
assert 'X' in obs
assert 'Y' in obs
assert 'Z' in obs
assert 'wvl' in obs
assert len(obs['X']) == len(obs['Y']) == len(obs['Z']) == len(obs['wvl'])
def test_can_get_roberson_cct():
''' Trusts data is properly formed.
'''
cct_data = colorimetry.prepare_robertson_cct_data()
assert cct_data
def test_robertson_cct_is_valid():
''' Tests if the Roberson CCT data is returned properly.
'''
cct = colorimetry.prepare_robertson_cct_data()
assert len(cct['urd']) == 31
assert len(cct['K']) == 31
assert len(cct['u']) == 31
assert len(cct['v']) == 31
assert len(cct['dvdu']) == 31
@pytest.mark.parametrize('illuminant', [
'A', 'B', 'C', 'E',
'D50', 'D55', 'D65', 'D75',
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'HP1', 'HP2', 'HP3', 'HP4', 'HP5'])
def test_can_get_illuminant(illuminant):
ill_spectrum = colorimetry.prepare_illuminant_spectrum(illuminant)
assert ill_spectrum
@pytest.mark.parametrize('illuminant', ['bb_2000', 'bb_6500', 'bb_6504', 'bb_6500.123'])
def test_can_get_blackbody_illuminants(illuminant):
wvl = np.arange(360, 780, 5)
bb_spectrum = colorimetry.prepare_illuminant_spectrum(illuminant, wvl)
assert bb_spectrum
def test_can_get_blackbody_illuminant_without_defined_wvl():
ill = 'bb_6500'
bb_spectrum = colorimetry.prepare_illuminant_spectrum(ill)
assert bb_spectrum
@pytest.mark.parametrize('boolean', [True, False])
def test_can_get_blackbody_illuminant_with_without_normalization(boolean):
bb_spectrum = colorimetry.prepare_illuminant_spectrum('bb_6500', bb_norm=boolean)
assert bb_spectrum
def test_normalization_correctness_vis(test_spectrum):
spec2 = colorimetry.normalize_spectrum(test_spectrum, to='peak vis')
wvl, vals = spec2.values()
idx1 = np.searchsorted(wvl, 400)
idx2 = np.searchsorted(wvl, 700)
idx_max = np.argmax(vals)
assert idx_max > idx1 and idx_max < idx2
assert vals[idx_max] == 1
assert vals.max() == 1
def test_normalization_correctness_560nm(test_spectrum):
spec2 = colorimetry.normalize_spectrum(test_spectrum, to='560nm')
wvl, vals = spec2.values()
idx1 = np.searchsorted(wvl, 560)
assert vals[idx1] == 1
def test_spectrum_to_xyz_emissive_can_interpolate():
wvl = np.arange(360, 830, 1)
vals = colorimetry.blackbody_spectrum(6500, wvl)
spec = {
'wvl': wvl,
'values': vals,
}
X, Y, Z = colorimetry.spectrum_to_XYZ_emissive(spec)
assert np.isfinite(X)
assert np.isfinite(Y)
assert np.isfinite(Z)
def test_XYZ_to_xyY_zeros_in_refwhite_out():
spec = colorimetry.prepare_illuminant_spectrum('D65')
ref_xyz = colorimetry.spectrum_to_XYZ_emissive(spec)
ref_xyY = colorimetry.XYZ_to_xyY(ref_xyz)
test_X = test_Y = test_Z = np.zeros(2)
test_data = np.dstack((test_X, test_Y, test_Z))
xyY = colorimetry.XYZ_to_xyY(test_data, assume_nozeros=False)
assert np.allclose(ref_xyY, xyY[0, 0, :].T)
assert np.allclose(ref_xyY, xyY[0, 1, :].T)
def test_cct_duv_to_uvprime():
cct = 2900 # values from Ohno 2013
duv = 0.0200
true_u = 0.247629
true_v = 0.367808
up, vp = colorimetry.CCT_Duv_to_uvprime(cct, duv)
v = vp / 1.5
u = up
assert u == pytest.approx(true_u, rel=PRECISION, abs=PRECISION)
assert v == pytest.approx(true_v, rel=PRECISION, abs=PRECISION)
@pytest.mark.parametrize('illuminant', ['D50', 'D65'])
def test_XYZ_to_AdobeRGB_functions_for_allowed_illuminants(illuminant):
XYZ = [1, 1, 1]
assert colorimetry.XYZ_to_AdobeRGB(XYZ, illuminant).all()
assert colorimetry.XYZ_to_AdobeRGB(XYZ, illuminant).all()
@pytest.mark.parametrize('illuminant', ['F3', 'HP1', 'A', 'B', 'C', 'E'])
def test_XYZ_to_AdobeRGB_rejects_bad_illuminant(illuminant):
XYZ = [1, 1, 1]
with pytest.raises(ValueError):
colorimetry.XYZ_to_AdobeRGB(XYZ, illuminant)
@pytest.mark.parametrize('L', [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
def test_sRGB_oetf_and_reverse_oetf_cancel(L):
assert colorimetry.sRGB_reverse_oetf(colorimetry.sRGB_oetf(L)) == pytest.approx(L)
def test_plot_spectrum_functions(test_spectrum):
fig, ax = colorimetry.plot_spectrum(test_spectrum)
assert fig
assert ax
def test_plot_spectrum_works_with_smoothing(test_spectrum):
fig, ax = colorimetry.plot_spectrum(test_spectrum, smoothing=5)
def test_cie_1931_functions():
fig, ax = colorimetry.cie_1931_plot()
assert fig
assert ax
# TODO: somehow inspect that the image is only drawn over the reduced coordinates.
def test_cie_1931_with_negative_axlims_functions():
fig, ax = colorimetry.cie_1931_plot(xlim=(-0.1, 0.9), ylim=(-0.15, 0.9))
assert fig
assert ax
def test_cie_1976_functions():
fig, ax = colorimetry.cie_1976_plot()
assert fig
assert ax
def test_cie_1976_passes_plankian_locust_functions(): # TODO: assert that the locust was drawn
fig, ax = colorimetry.cie_1976_plot(draw_plankian_locust=True)
assert fig
assert ax
def test_cie_1976_plankian_locust_functions():
fig, ax = colorimetry.cie_1976_plankian_locust()
assert fig
assert ax
def test_cie_1976_plankian_locust_takes_no_isotemperature_lines():
fig, ax = colorimetry.cie_1976_plankian_locust(isotemperature_lines_at=False)
assert fig
assert ax
def test_cct_duv_diagram_functions():
fig, ax = colorimetry.cct_duv_diagram()
assert fig
assert ax
| [
"numpy.dstack",
"numpy.argmax",
"numpy.allclose",
"numpy.zeros",
"numpy.isfinite",
"numpy.searchsorted",
"pytest.raises",
"numpy.arange",
"pytest.mark.parametrize",
"pytest.approx"
] | [((1382, 1587), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""illuminant"""', "['A', 'B', 'C', 'E', 'D50', 'D55', 'D65', 'D75', 'F1', 'F2', 'F3', 'F4',\n 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12', 'HP1', 'HP2', 'HP3',\n 'HP4', 'HP5']"], {}), "('illuminant', ['A', 'B', 'C', 'E', 'D50', 'D55',\n 'D65', 'D75', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9',\n 'F10', 'F11', 'F12', 'HP1', 'HP2', 'HP3', 'HP4', 'HP5'])\n", (1405, 1587), False, 'import pytest\n'), ((1736, 1827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""illuminant"""', "['bb_2000', 'bb_6500', 'bb_6504', 'bb_6500.123']"], {}), "('illuminant', ['bb_2000', 'bb_6500', 'bb_6504',\n 'bb_6500.123'])\n", (1759, 1827), False, 'import pytest\n'), ((2179, 2228), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""boolean"""', '[True, False]'], {}), "('boolean', [True, False])\n", (2202, 2228), False, 'import pytest\n'), ((4166, 4219), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""illuminant"""', "['D50', 'D65']"], {}), "('illuminant', ['D50', 'D65'])\n", (4189, 4219), False, 'import pytest\n'), ((4439, 4511), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""illuminant"""', "['F3', 'HP1', 'A', 'B', 'C', 'E']"], {}), "('illuminant', ['F3', 'HP1', 'A', 'B', 'C', 'E'])\n", (4462, 4511), False, 'import pytest\n'), ((4685, 4763), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""L"""', '[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]'], {}), "('L', [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])\n", (4708, 4763), False, 'import pytest\n'), ((1886, 1908), 'numpy.arange', 'np.arange', (['(360)', '(780)', '(5)'], {}), '(360, 780, 5)\n', (1895, 1908), True, 'import numpy as np\n'), ((2585, 2610), 'numpy.searchsorted', 'np.searchsorted', (['wvl', '(400)'], {}), '(wvl, 400)\n', (2600, 2610), True, 'import numpy as np\n'), ((2622, 2647), 'numpy.searchsorted', 'np.searchsorted', (['wvl', '(700)'], {}), '(wvl, 700)\n', (2637, 2647), True, 'import numpy as np\n'), ((2662, 2677), 'numpy.argmax', 'np.argmax', (['vals'], {}), '(vals)\n', (2671, 2677), True, 'import numpy as np\n'), ((2951, 2976), 'numpy.searchsorted', 'np.searchsorted', (['wvl', '(560)'], {}), '(wvl, 560)\n', (2966, 2976), True, 'import numpy as np\n'), ((3069, 3091), 'numpy.arange', 'np.arange', (['(360)', '(830)', '(1)'], {}), '(360, 830, 1)\n', (3078, 3091), True, 'import numpy as np\n'), ((3276, 3290), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (3287, 3290), True, 'import numpy as np\n'), ((3302, 3316), 'numpy.isfinite', 'np.isfinite', (['Y'], {}), '(Y)\n', (3313, 3316), True, 'import numpy as np\n'), ((3328, 3342), 'numpy.isfinite', 'np.isfinite', (['Z'], {}), '(Z)\n', (3339, 3342), True, 'import numpy as np\n'), ((3583, 3594), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3591, 3594), True, 'import numpy as np\n'), ((3611, 3646), 'numpy.dstack', 'np.dstack', (['(test_X, test_Y, test_Z)'], {}), '((test_X, test_Y, test_Z))\n', (3620, 3646), True, 'import numpy as np\n'), ((3724, 3760), 'numpy.allclose', 'np.allclose', (['ref_xyY', 'xyY[0, 0, :].T'], {}), '(ref_xyY, xyY[0, 0, :].T)\n', (3735, 3760), True, 'import numpy as np\n'), ((3772, 3808), 'numpy.allclose', 'np.allclose', (['ref_xyY', 'xyY[0, 1, :].T'], {}), '(ref_xyY, xyY[0, 1, :].T)\n', (3783, 3808), True, 'import numpy as np\n'), ((551, 576), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (564, 576), False, 'import pytest\n'), ((4043, 4094), 'pytest.approx', 'pytest.approx', (['true_u'], {'rel': 'PRECISION', 'abs': 'PRECISION'}), '(true_u, rel=PRECISION, abs=PRECISION)\n', (4056, 4094), False, 'import pytest\n'), ((4111, 4162), 'pytest.approx', 'pytest.approx', (['true_v'], {'rel': 'PRECISION', 'abs': 'PRECISION'}), '(true_v, rel=PRECISION, abs=PRECISION)\n', (4124, 4162), False, 'import pytest\n'), ((4602, 4627), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4615, 4627), False, 'import pytest\n'), ((4881, 4897), 'pytest.approx', 'pytest.approx', (['L'], {}), '(L)\n', (4894, 4897), False, 'import pytest\n')] |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2019 HERE Europe B.V.
#
# SPDX-License-Identifier: MIT
#
###############################################################################
import json
import random
import numpy as np
from test.utils import (
BaseTestAsync,
TestFolder,
format_long_args,
len_of_struct,
len_of_struct_sorted,
flatten,
format_map_fields,
)
from qgis.core import QgsFields, QgsVectorLayer
from qgis.testing import unittest
from XYZHubConnector.xyz_qgis.layer import parser
# import unittest
# class TestParser(BaseTestAsync, unittest.TestCase):
class TestParser(BaseTestAsync):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.similarity_threshold = 80
# ######## Parse xyz geojson -> QgsFeature
def test_parse_xyzjson(self):
folder = "xyzjson-small"
fnames = ["airport-xyz.geojson", "water-xyz.geojson"]
for fname in fnames:
self.subtest_parse_xyzjson(folder, fname)
def subtest_parse_xyzjson(self, folder, fname):
feat = list()
with self.subTest(folder=folder, fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
obj_feat = obj["features"]
fields = QgsFields()
feat = [parser.xyz_json_to_feature(ft, fields) for ft in obj_feat]
self._assert_parsed_fields(obj_feat, feat, fields)
self._assert_parsed_geom(obj_feat, feat, fields)
return feat
def _assert_parsed_fields_unorder(self, obj_feat, feat, fields):
# self._log_debug(fields.names())
# self._log_debug("debug id, json vs. QgsFeature")
# self._log_debug([o["id"] for o in obj_feat])
# self._log_debug([ft.attribute(parser.QGS_XYZ_ID) for ft in feat])
names = fields.names()
self.assertTrue(parser.QGS_XYZ_ID in names, "%s %s" % (len(names), names))
self.assertEqual(len(obj_feat), len(feat))
def _assert_parsed_fields(self, obj_feat, feat, fields):
self._assert_parsed_fields_unorder(obj_feat, feat, fields)
def msg_fields(obj):
return (
"{sep}{0}{sep}{1}"
"{sep}fields-props {2}"
"{sep}props-fields {3}"
"{sep}json {4}".format(
*tuple(
map(
lambda x: "%s %s" % (len(x), x),
[
obj_props,
fields.names(),
set(fields.names()).difference(obj_props),
set(obj_props).difference(fields.names()),
],
)
),
format_long_args(json.dumps(obj)),
sep="\n>> "
)
)
for o in obj_feat:
obj_props = list(o["properties"].keys())
self.assertLessEqual(len(obj_props), fields.size(), msg_fields(o))
self.assertTrue(set(obj_props) < set(fields.names()), msg_fields(o))
# self.assertEqual( obj_props, fields.names(), msg_fields(o)) # strict assert
def _assert_parsed_geom_unorder(self, obj_feat, feat, fields, geom_str):
for ft in feat:
geom = json.loads(
ft.geometry().asJson()
) # limited to 13 or 14 precison (ogr.CreateGeometryFromJson)
self.assertEqual(geom and geom["type"], geom_str)
def _assert_parsed_geom(self, obj_feat, feat, fields):
# both crs is WGS84
for o, ft in zip(obj_feat, feat):
geom = json.loads(
ft.geometry().asJson()
) # limited to 13 or 14 precison (ogr.CreateGeometryFromJson)
obj_geom = o["geometry"]
self.assertEqual(geom["type"], obj_geom["type"])
id_ = ft.attribute(parser.QGS_XYZ_ID)
obj_id_ = o["id"]
self.assertEqual(id_, obj_id_)
# self._log_debug(geom)
# self._log_debug(obj_geom)
# coords = obj_geom["coordinates"]
# obj_geom["coordinates"] = [round(c, 13) for c in coords]
# obj_geom["coordinates"] = [float("%.13f"%c) for c in coords]
# self.assertDictEqual(geom, obj_geom) # precision
# for c1, c2 in zip(geom["coordinates"], obj_geom["coordinates"]):
# self.assertAlmostEqual(c1, c2, places=13)
c1 = np.array(obj_geom["coordinates"])
c2 = np.array(geom["coordinates"])
if c1.shape != c2.shape:
self._log_debug(
"\nWARNING: Geometry has mismatch shape",
c1.shape,
c2.shape,
"\nOriginal geom has problem. Testing parsed geom..",
)
self.assertEqual(c2.shape[-1], 2, "parsed geom has wrong shape of coord")
continue
else:
self.assertLess(np.max(np.abs(c1 - c2)), 1e-13, "parsed geometry error > 1e-13")
# @unittest.skip("large")
def test_parse_xyzjson_large(self):
folder = "xyzjson-large"
fnames = [
"cmcs-osm-dev-building-xyz.geojson",
"cmcs-osm-dev-building-xyz-30000.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson(folder, fname)
# ######## Parse xyz geojson -> struct of geom: [fields], [[QgsFeature]]
def test_parse_xyzjson_map(self):
folder = "xyzjson-small"
fnames = [
"mixed-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map(folder, fname)
mix_fnames = [
"airport-xyz.geojson",
"water-xyz.geojson",
]
self.subtest_parse_xyzjson_mix(folder, mix_fnames)
def test_parse_xyzjson_map_similarity_0(self):
s = self.similarity_threshold
self.similarity_threshold = 0
try:
folder = "xyzjson-small"
fnames = [
"mixed-xyz.geojson",
]
for fname in fnames:
with self.subTest(
folder=folder, fname=fname, similarity_threshold=self.similarity_threshold
):
map_fields = self._parse_xyzjson_map_simple(folder, fname)
self._assert_map_fields_similarity_0(map_fields)
finally:
self.similarity_threshold = s
def test_parse_xyzjson_map_dupe_case(self):
folder = "xyzjson-small"
fnames = [
"airport-xyz.geojson",
"water-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map_dupe_case(folder, fname)
def _parse_xyzjson_map_simple(self, folder, fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
return self.subtest_parse_xyzjson_map_chunk(obj)
def subtest_parse_xyzjson_map_dupe_case(self, folder, fname):
with self.subTest(folder=folder, fname=fname):
import random
mix_case = lambda txt, idx: "".join(
[
(s.lower() if s.isupper() else s.upper()) if i == idx else s
for i, s in enumerate(txt)
]
)
new_feat = lambda ft, props: dict(ft, properties=dict(props))
n_new_ft = 2
with self.subTest(folder=folder, fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
features = obj["features"]
features[0]["properties"].update(fid=1) # test fid
lst_k = list()
lst_new_k = list()
props_ = dict(obj["features"][0]["properties"])
props_ = sorted(props_.keys())
debug_msg = ""
for k in props_:
lst_k.append(k)
for i in range(n_new_ft):
ft = dict(features[0])
props = dict(ft["properties"])
new_k = k
while new_k == k:
idx = random.randint(0, len(k) - 1)
if k == "fid":
idx = i
new_k = mix_case(k, idx)
if new_k not in lst_new_k:
lst_new_k.append(new_k)
debug_msg += format_long_args("\n", "mix_case", k, new_k, props[k], idx)
props[new_k] = props.pop(k) or ""
new_ft = new_feat(ft, props)
features.append(new_ft)
map_fields = self.subtest_parse_xyzjson_map_chunk(obj, chunk_size=1)
# assert that parser handle dupe of case insensitive prop name, e.g. name vs Name
self.assertEqual(len(map_fields), 1, "not single geom")
lst_fields = list(map_fields.values())[0]
for k in lst_k:
self.assertIn(k, lst_fields[0].names())
for k in lst_new_k:
self.assertIn(k, [parser.normal_field_name(n) for n in lst_fields[0].names()])
return
# debug
debug_msg += format_long_args("\n", lst_fields[0].names())
for k, fields in zip(lst_new_k, lst_fields[1:]):
if k.lower() in {parser.QGS_ID, parser.QGS_XYZ_ID}:
k = "{}_{}".format(
k, "".join(str(i) for i, s in enumerate(k) if s.isupper())
)
debug_msg += format_long_args("\n", k in fields.names(), k, fields.names())
# self.assertEqual(len(lst_fields), len(lst_new_k) + 1)
for k, fields in zip(lst_new_k, lst_fields[1:]):
if k.lower() in {parser.QGS_ID, parser.QGS_XYZ_ID}:
k = "{}_{}".format(
k, "".join(str(i) for i, s in enumerate(k) if s.isupper())
)
self.assertIn(
k,
fields.names(),
"len lst_fields vs. len keys: %s != %s"
% (len(lst_fields), len(lst_new_k) + 1)
+ debug_msg,
)
def subtest_parse_xyzjson_map(self, folder, fname):
with self.subTest(folder=folder, fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
self.subtest_parse_xyzjson_map_shuffle(obj)
self.subtest_parse_xyzjson_map_multi_chunk(obj)
def subtest_parse_xyzjson_mix(self, folder, fnames):
if len(fnames) < 2:
return
with self.subTest(folder=folder, fname="mix:" + ",".join(fnames)):
resource = TestFolder(folder)
lst_obj = [json.loads(resource.load(fname)) for fname in fnames]
obj = lst_obj[0]
for o in lst_obj[1:]:
obj["features"].extend(o["features"])
random.seed(0.1)
random.shuffle(obj["features"])
self.subtest_parse_xyzjson_map_shuffle(obj)
self.subtest_parse_xyzjson_map_multi_chunk(obj)
def subtest_parse_xyzjson_map_multi_chunk(self, obj, lst_chunk_size=None):
if not lst_chunk_size:
p10 = 1 + len(str(len(obj["features"])))
lst_chunk_size = [10 ** i for i in range(p10)]
with self.subTest(lst_chunk_size=lst_chunk_size):
ref_map_feat, ref_map_fields = self.do_test_parse_xyzjson_map(obj)
lst_map_fields = list()
for chunk_size in lst_chunk_size:
map_fields = self.subtest_parse_xyzjson_map_chunk(obj, chunk_size)
if map_fields is None:
continue
lst_map_fields.append(map_fields)
for map_fields, chunk_size in zip(lst_map_fields, lst_chunk_size):
with self.subTest(chunk_size=chunk_size):
self._assert_len_map_fields(map_fields, ref_map_fields)
def subtest_parse_xyzjson_map_shuffle(self, obj, n_shuffle=5, chunk_size=10):
with self.subTest(n_shuffle=n_shuffle):
o = dict(obj)
ref_map_feat, ref_map_fields = self.do_test_parse_xyzjson_map(o)
lst_map_fields = list()
random.seed(0.5)
for i in range(n_shuffle):
random.shuffle(o["features"])
map_fields = self.subtest_parse_xyzjson_map_chunk(o, chunk_size)
if map_fields is None:
continue
lst_map_fields.append(map_fields)
# self._log_debug("parsed fields shuffle", len_of_struct(map_fields))
for i, map_fields in enumerate(lst_map_fields):
with self.subTest(shuffle=i):
self._assert_len_map_fields(map_fields, ref_map_fields)
def subtest_parse_xyzjson_map_chunk(self, obj, chunk_size=100):
similarity_threshold = self.similarity_threshold
with self.subTest(chunk_size=chunk_size, similarity_threshold=similarity_threshold):
o = dict(obj)
obj_feat = obj["features"]
lst_map_feat = list()
map_fields = dict()
for i0 in range(0, len(obj_feat), chunk_size):
chunk = obj_feat[i0 : i0 + chunk_size]
o["features"] = chunk
map_feat, _ = parser.xyz_json_to_feature_map(o, map_fields, similarity_threshold)
self._assert_parsed_map(chunk, map_feat, map_fields)
lst_map_feat.append(map_feat)
# self._log_debug("len feat", len(chunk))
# self._log_debug("parsed feat", len_of_struct(map_feat))
# self._log_debug("parsed fields", len_of_struct(map_fields))
lst_feat = flatten([x.values() for x in lst_map_feat])
self.assertEqual(len(lst_feat), len(obj["features"]))
return map_fields
def do_test_parse_xyzjson_map(self, obj, similarity_threshold=None):
obj_feat = obj["features"]
# map_fields=dict()
if similarity_threshold is None:
similarity_threshold = self.similarity_threshold
map_feat, map_fields = parser.xyz_json_to_feature_map(
obj, similarity_threshold=similarity_threshold
)
self._log_debug("len feat", len(obj_feat))
self._log_debug("parsed feat", len_of_struct(map_feat))
self._log_debug("parsed fields", len_of_struct(map_fields))
self._assert_parsed_map(obj_feat, map_feat, map_fields)
return map_feat, map_fields
def _assert_len_map_fields(self, map_fields, ref, strict=False):
len_ = len_of_struct if strict else len_of_struct_sorted
self.assertEqual(
len_(map_fields),
len_(ref),
"\n".join(
[
"map_fields, ref_map_fields",
format_map_fields(map_fields),
format_map_fields(ref),
]
),
)
def _assert_parsed_map(self, obj_feat, map_feat, map_fields):
self._assert_len_map_feat_fields(map_feat, map_fields)
self.assertEqual(
len(obj_feat),
sum(len(lst) for lst_lst in map_feat.values() for lst in lst_lst),
"total len of parsed feat incorrect",
)
# NOTE: obj_feat order does not corresponds to that of map_feat
# -> use unorder assert
for geom_str in map_feat:
for feat, fields in zip(map_feat[geom_str], map_fields[geom_str]):
o = obj_feat[: len(feat)]
self._assert_parsed_fields_unorder(o, feat, fields)
self._assert_parsed_geom_unorder(o, feat, fields, geom_str)
obj_feat = obj_feat[len(feat) :]
def _assert_len_map_feat_fields(self, map_feat, map_fields):
self.assertEqual(map_feat.keys(), map_fields.keys())
for geom_str in map_feat:
self.assertEqual(
len(map_feat[geom_str]),
len(map_fields[geom_str]),
"len mismatch: map_feat, map_fields"
+ "\n %s \n %s" % (len_of_struct(map_feat), len_of_struct(map_fields)),
)
def _assert_map_fields_similarity_0(self, map_fields):
fields_cnt = {k: len(lst_fields) for k, lst_fields in map_fields.items()}
ref = {k: 1 for k in map_fields}
self.assertEqual(
fields_cnt,
ref,
"given similarity_threshold=0, "
+ "map_fields should have exact 1 layer/fields per geom",
)
def test_parse_xyzjson_map_large(self):
folder = "xyzjson-large"
fnames = [
"cmcs-osm-dev-building-xyz.geojson",
"cmcs-osm-dev-road-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map(folder, fname)
# ######## Parse QgsFeature -> json
def test_parse_qgsfeature(self):
# self.subtest_parse_qgsfeature("geojson-small", "airport-qgis.geojson") # no xyz_id
self.subtest_parse_qgsfeature("xyzjson-small", "airport-xyz.geojson")
self.subtest_parse_qgsfeature_2way("xyzjson-small", "airport-xyz.geojson")
self.subtest_parse_qgsfeature_livemap("xyzjson-small", "livemap-xyz.geojson")
def subtest_parse_qgsfeature(self, folder, fname):
# qgs layer load geojson -> qgs feature
# parse feature to geojson
# compare geojson and geojson
with self.subTest(folder=folder, fname=fname):
resource = TestFolder(folder)
path = resource.fullpath(fname)
txt = resource.load(fname)
obj = json.loads(txt)
vlayer = QgsVectorLayer(path, "test", "ogr")
feat = parser.feature_to_xyz_json(
list(vlayer.getFeatures()), is_new=True
) # remove QGS_XYZ_ID if exist
self._log_debug(feat)
self.maxDiff = None
# no need to convert 0.0 to 0
expected = obj
for ft in expected["features"]:
ft.pop("id", None)
ft["properties"].pop("@ns:com:here:xyz", None)
for ft in feat:
ft["properties"].pop("id", None) # cleanup unexpected "id" field in input data
self.assertListEqual(expected["features"], feat)
self.assertEqual(len(expected["features"]), len(feat))
def subtest_parse_qgsfeature_2way(self, folder, fname):
# parse xyz geojson to qgs feature
# parse feature to xyz geojson
# compare geojson and xyz geojson
with self.subTest(folder=folder, fname=fname, mode="2way", target="QgsFeature"):
qgs_feat = self.subtest_parse_xyzjson(folder, fname)
with self.subTest(folder=folder, fname=fname, mode="2way", target="XYZ Geojson"):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
expected = obj
feat = parser.feature_to_xyz_json(qgs_feat)
self._log_debug(feat)
self.maxDiff = None
# no need to convert 0.0 to 0
for ft in expected["features"]:
ft["properties"].pop("@ns:com:here:xyz", None)
self.assertListEqual(expected["features"], feat)
self.assertEqual(len(expected["features"]), len(feat))
feat = parser.feature_to_xyz_json(qgs_feat, is_new=True)
self._log_debug(feat)
for ft in expected["features"]:
ft.pop("id", None)
self.assertListEqual(expected["features"], feat)
self.assertEqual(len(expected["features"]), len(feat))
def subtest_parse_qgsfeature_livemap(self, folder, fname):
# test parse livemap qgsfeature
with self.subTest(folder=folder, fname=fname, mode="livemap", target="QgsFeature"):
qgs_feat = self.subtest_parse_xyzjson(folder, fname)
with self.subTest(folder=folder, fname=fname, mode="livemap", target="XYZ Geojson"):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
feat = parser.feature_to_xyz_json(qgs_feat, is_livemap=True)
self.maxDiff = None
expected = obj
for ft in expected["features"]:
ft.pop("momType", None)
props = ft["properties"]
changeState = "UPDATED" if "@ns:com:here:mom:delta" in props else "CREATED"
delta = {
"reviewState": "UNPUBLISHED",
"changeState": changeState,
"taskGridId": "",
}
if ft.get("id"):
delta.update({"originId": ft.get("id")})
ignored_sepcial_keys = [k for k in props.keys() if k.startswith("@")]
ignored_keys = [k for k, v in props.items() if v is None]
for k in ignored_sepcial_keys + ignored_keys:
props.pop(k, None)
props.update({"@ns:com:here:mom:delta": delta})
lst_coords_ref = [
ft.pop("geometry", dict()).get("coordinates", list())
for ft in expected["features"]
]
lst_coords = [ft.pop("geometry", dict()).get("coordinates", list()) for ft in feat]
self.assertListEqual(expected["features"], feat)
self.assertEqual(len(expected["features"]), len(feat))
# self.assertEqual(flatten(lst_coords_ref), flatten(lst_coords))
for coords_ref, coords in zip(lst_coords_ref, lst_coords):
self.assertLess(
np.max(np.abs(np.array(coords_ref) - np.array(coords))),
1e-13,
"parsed geometry error > 1e-13",
)
def test_parse_qgsfeature_large(self):
pass
if __name__ == "__main__":
# unittest.main()
tests = [
# "TestParser.test_parse_xyzjson",
"TestParser.test_parse_xyzjson_map_similarity_0",
# "TestParser.test_parse_xyzjson_map",
# "TestParser.test_parse_xyzjson_map_dupe_case",
# "TestParser.test_parse_xyzjson_large",
# "TestParser.test_parse_xyzjson_map_large",
]
# unittest.main(defaultTest = tests, failfast=True) # will not run all subtest
unittest.main(defaultTest=tests)
| [
"qgis.testing.unittest.main",
"qgis.core.QgsVectorLayer",
"numpy.abs",
"json.loads",
"XYZHubConnector.xyz_qgis.layer.parser.normal_field_name",
"random.shuffle",
"test.utils.len_of_struct",
"XYZHubConnector.xyz_qgis.layer.parser.xyz_json_to_feature",
"test.utils.format_map_fields",
"test.utils.Tes... | [((22930, 22962), 'qgis.testing.unittest.main', 'unittest.main', ([], {'defaultTest': 'tests'}), '(defaultTest=tests)\n', (22943, 22962), False, 'from qgis.testing import unittest\n'), ((7007, 7025), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (7017, 7025), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((7075, 7090), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (7085, 7090), False, 'import json\n'), ((14710, 14788), 'XYZHubConnector.xyz_qgis.layer.parser.xyz_json_to_feature_map', 'parser.xyz_json_to_feature_map', (['obj'], {'similarity_threshold': 'similarity_threshold'}), '(obj, similarity_threshold=similarity_threshold)\n', (14740, 14788), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((1232, 1250), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (1242, 1250), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((1309, 1324), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (1319, 1324), False, 'import json\n'), ((1385, 1396), 'qgis.core.QgsFields', 'QgsFields', ([], {}), '()\n', (1394, 1396), False, 'from qgis.core import QgsFields, QgsVectorLayer\n'), ((4638, 4671), 'numpy.array', 'np.array', (["obj_geom['coordinates']"], {}), "(obj_geom['coordinates'])\n", (4646, 4671), True, 'import numpy as np\n'), ((4689, 4718), 'numpy.array', 'np.array', (["geom['coordinates']"], {}), "(geom['coordinates'])\n", (4697, 4718), True, 'import numpy as np\n'), ((10827, 10845), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (10837, 10845), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((10903, 10918), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (10913, 10918), False, 'import json\n'), ((11238, 11256), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (11248, 11256), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((11463, 11479), 'random.seed', 'random.seed', (['(0.1)'], {}), '(0.1)\n', (11474, 11479), False, 'import random\n'), ((11492, 11523), 'random.shuffle', 'random.shuffle', (["obj['features']"], {}), "(obj['features'])\n", (11506, 11523), False, 'import random\n'), ((12779, 12795), 'random.seed', 'random.seed', (['(0.5)'], {}), '(0.5)\n', (12790, 12795), False, 'import random\n'), ((14902, 14925), 'test.utils.len_of_struct', 'len_of_struct', (['map_feat'], {}), '(map_feat)\n', (14915, 14925), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((14968, 14993), 'test.utils.len_of_struct', 'len_of_struct', (['map_fields'], {}), '(map_fields)\n', (14981, 14993), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((18082, 18100), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (18092, 18100), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((18202, 18217), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (18212, 18217), False, 'import json\n'), ((18240, 18275), 'qgis.core.QgsVectorLayer', 'QgsVectorLayer', (['path', '"""test"""', '"""ogr"""'], {}), "(path, 'test', 'ogr')\n", (18254, 18275), False, 'from qgis.core import QgsFields, QgsVectorLayer\n'), ((19407, 19425), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (19417, 19425), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((19483, 19498), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (19493, 19498), False, 'import json\n'), ((19546, 19582), 'XYZHubConnector.xyz_qgis.layer.parser.feature_to_xyz_json', 'parser.feature_to_xyz_json', (['qgs_feat'], {}), '(qgs_feat)\n', (19572, 19582), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((19947, 19996), 'XYZHubConnector.xyz_qgis.layer.parser.feature_to_xyz_json', 'parser.feature_to_xyz_json', (['qgs_feat'], {'is_new': '(True)'}), '(qgs_feat, is_new=True)\n', (19973, 19996), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((20617, 20635), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (20627, 20635), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((20693, 20708), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (20703, 20708), False, 'import json\n'), ((20729, 20782), 'XYZHubConnector.xyz_qgis.layer.parser.feature_to_xyz_json', 'parser.feature_to_xyz_json', (['qgs_feat'], {'is_livemap': '(True)'}), '(qgs_feat, is_livemap=True)\n', (20755, 20782), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((1417, 1455), 'XYZHubConnector.xyz_qgis.layer.parser.xyz_json_to_feature', 'parser.xyz_json_to_feature', (['ft', 'fields'], {}), '(ft, fields)\n', (1443, 1455), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((7709, 7727), 'test.utils.TestFolder', 'TestFolder', (['folder'], {}), '(folder)\n', (7719, 7727), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((7793, 7808), 'json.loads', 'json.loads', (['txt'], {}), '(txt)\n', (7803, 7808), False, 'import json\n'), ((12851, 12880), 'random.shuffle', 'random.shuffle', (["o['features']"], {}), "(o['features'])\n", (12865, 12880), False, 'import random\n'), ((13882, 13949), 'XYZHubConnector.xyz_qgis.layer.parser.xyz_json_to_feature_map', 'parser.xyz_json_to_feature_map', (['o', 'map_fields', 'similarity_threshold'], {}), '(o, map_fields, similarity_threshold)\n', (13912, 13949), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((2930, 2945), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (2940, 2945), False, 'import json\n'), ((15421, 15450), 'test.utils.format_map_fields', 'format_map_fields', (['map_fields'], {}), '(map_fields)\n', (15438, 15450), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((15472, 15494), 'test.utils.format_map_fields', 'format_map_fields', (['ref'], {}), '(ref)\n', (15489, 15494), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((5175, 5190), 'numpy.abs', 'np.abs', (['(c1 - c2)'], {}), '(c1 - c2)\n', (5181, 5190), True, 'import numpy as np\n'), ((8761, 8820), 'test.utils.format_long_args', 'format_long_args', (['"""\n"""', '"""mix_case"""', 'k', 'new_k', 'props[k]', 'idx'], {}), "('\\n', 'mix_case', k, new_k, props[k], idx)\n", (8777, 8820), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((9461, 9488), 'XYZHubConnector.xyz_qgis.layer.parser.normal_field_name', 'parser.normal_field_name', (['n'], {}), '(n)\n', (9485, 9488), False, 'from XYZHubConnector.xyz_qgis.layer import parser\n'), ((16677, 16700), 'test.utils.len_of_struct', 'len_of_struct', (['map_feat'], {}), '(map_feat)\n', (16690, 16700), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((16702, 16727), 'test.utils.len_of_struct', 'len_of_struct', (['map_fields'], {}), '(map_fields)\n', (16715, 16727), False, 'from test.utils import BaseTestAsync, TestFolder, format_long_args, len_of_struct, len_of_struct_sorted, flatten, format_map_fields\n'), ((22266, 22286), 'numpy.array', 'np.array', (['coords_ref'], {}), '(coords_ref)\n', (22274, 22286), True, 'import numpy as np\n'), ((22289, 22305), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (22297, 22305), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Need to randomize drones and its locations
Need to iterate over different heuristics
Need to log each iteration
Need to keep track of success rate over iteration
"""
import sys
import numpy as np
import math
import random
from scipy import spatial
from queue import PriorityQueue
import matplotlib.pyplot as plt
"""UTM drones"""
#import Drone
import UTMDatabase
import PathFinding
"""multithreading and garbage collection processes"""
import time
import gc
import multiprocessing
"""Python Loggin packages"""
import os
import logging
import csv
import io
#import pandas as pd
"""this will be part of the homebase class as attributes"""
MAX_X = 200
MAX_Y = 200
MIN_X = -100
MIN_Y = -100
INNER_MIN_X = 0
INNER_MIN_Y = 0
INNER_MAX_X = 49
INNER_MAX_Y = 49
class QuadCopter():
"""
Class to represent quadcopter
'''
Attributes
--------------
id : str might want to change to an int
the id number or name of the UAV
starting_position list[int]
coordinates of the quadcopter
goal : list[int]
goal destination of quadcopter
zone_index : int
index number of zone destination
path : list[[int_x,int_y], ...]
waypoint path of quad after assignment of landing zone
service_state : int
service state of the quadcopter 0,1,2,3
Methods:
----------------
get_uav_state(self)
go_to_wp()
update_position()
apply_pid()
update_service_status()
set_zone_index()
set_path()
set_goal()
"""
speed_vector = [0.5]
k = 0.85
def __init__(self, id_name ,home_position, loiter_position, curr_position, want_service):
self.id = id_name
self.home_position = home_position
self.loiter_position = loiter_position
self.current_position = curr_position
self.want_service = want_service
self.zone_index = None
self.path = None
self.goal = None
self.service_state = None
self.distance_from_base = None
self.wp_index = 0
self.path_home = None
self.mission_success = None # true or false based on success rate of mission
self.weighted_heuristics = None #[min, max ] weighted scale
self.sim_num = None
def get_uav_state(self):
return self.service_state
def __lt__(self, other):
return self.distance_from_base < other.distance_from_base
def go_to_wp(self, current_position, wp):
"""
send the drone to the waypoint position
waypoint is a tuple
"""
while self.current_position != wp:
gain = self.apply_pid(current_position, wp)
current_position = np.array(self.current_position) + gain
self.current_position = list(current_position)
if tuple(self.current_position) == wp:
self.update_position(wp)
#print("arrived to wp", self.current_position)
break
def update_position(self, new_position):
self.current_position = new_position
def apply_pid(self, current_position,wp):
"""get gains needed to get to area"""
error = np.array(wp) - np.array(current_position)
return error*self.k
def update_service_state(self, state_num):
"""mutator"""
self.service_state = state_num
def set_zone_index(self, zone_index):
"""mutator"""
self.zone_index = zone_index
def set_path(self, path):
"""mutator to assign path list"""
self.path = path
def set_path_home(self, path_home):
self.path_home = path_home
def set_goal(self, goal):
"""mutator to assign goal[x,y,z] point"""
self.goal = goal
def set_mission_success(self, true_or_false):
self.mission_success = true_or_false
def begin_charging(self):
print("I'm charging", self.id)
def set_distance_from_base(self, distance_from_base):
self.distance_from_base = distance_from_base
def set_heuristics(self, weighted_heuristics):
self.weighted_heuristics = weighted_heuristics
def get_service_state(self):
return self.service_state
def set_sim_num(self, sim_num):
self.sim_num = sim_num
def to_dict(self):
return {
'uav_id': self.id,
'uav_home': self.home_position,
'loiter_position': self.loiter_position,
'path_to_target': self.path,
'path_to_home': self.path_home,
'zone assigned': self.zone_index,
'zone location': self.goal,
'min_max_weighted_heuristics': self.weighted_heuristics,
'sim_num': self.sim_num,
'mission success?' : self.mission_success
}
class HomeBase():
GRID_Z = 50
GRID_X = 50
GRID_Y = 50
STATIC_OBSTACLE = [(45,15)]
BASE_LOCATION = [25,25] #x,y coordinates
def __init__(self):
self.ZONE_DB = self.generate_landing_zones()
def generate_landing_zones(self):
zone_db = {}
zone_list = [(20, 20, 5), (20,30, 5), (30, 20, 5), (30, 30, 5)]
for idx, zone in enumerate(zone_list):
zone_name = "Zone_"+ str(idx)
zone_db[zone_name] = LandingZone(zone_name, zone, True)
return zone_db
def check_open_zones(self):
for zone_name, zone in self.ZONE_DB.items():
if zone.vacant == True:
return True
else:
continue
return False
def get_open_zones(self):
"""return list of open zones if possible"""
open_zone_list = []
for zone_name, zone in self.ZONE_DB.items():
if zone.vacant == True:
open_zone_list.append(zone)
return open_zone_list
def get_closed_zones(self):
"""return list of open zones if possible"""
open_zone_list = []
for zone_name, zone in self.ZONE_DB.items():
if zone.vacant == False:
open_zone_list.append(zone)
return open_zone_list
def get_open_zone_locations(self, open_zone_list):
open_zone_locs = []
for open_zone in open_zone_list:
open_zone_locs.append(open_zone.zone_coordinates)
return open_zone_locs
def set_landing_zone_vacancy(self, zone_key, yes_or_no):
"""set vacancy with true or false"""
#print("updated vacancy for", zone_key, yes_or_no)
self.ZONE_DB[zone_key].set_vacancy(yes_or_no)
class LandingZone():
def __init__(self, zone_name, zone_coordinates, yes_or_no):
self.zone_name = zone_name #string
self.zone_coordinates = zone_coordinates #[x,y,z]
self.vacant = yes_or_no
def get_zone_name(self):
"""accessor"""
return self.zone_name
def get_zone_coordinates(self):
"""accessor"""
return self.zone_coordinates
def set_vacancy(self, yes_or_no):
"""accessor"""
self.vacant = yes_or_no
class PreLandingService():
def __init__(self, homeBase, landing_db, min_h, max_h):
self.homeBase = homeBase
self.landingDb = landing_db
self.uav_service_state_num = 0
self.min_h = min_h
self.max_h = max_h
self.uav_priority = PriorityQueue()
def generate_grid(self):
"""generates search space based on parameters"""
grid = []
grid = np.zeros((self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y))
return grid
def generate_3d_obstacles(self, obstacle_list, height):
"""generate 3 dimensional obstacles ie trees and buildings etc"""
obstacle_3d_list = []
for static_obstacle in obstacle_list:
x = static_obstacle[0]
y = static_obstacle[1]
for z in range(25):
obstacle_3d_list.append((x,y,z))
return obstacle_3d_list
def add_obstacles(self,grid, obstacle_list):
""""add obstacles to grid location"""
for obstacle in obstacle_list:
if obstacle[2] >= self.homeBase.GRID_Z or \
obstacle[1] >= self.homeBase.GRID_X or\
obstacle[0] >= self.homeBase.GRID_Y:
continue
(grid[int(obstacle[2]),int(obstacle[0]), int(obstacle[1])]) = 1
return obstacle_list
def return_unassigned_list(self,some_list, index):
"""return all other zones or uavs not assigned to uav to make as a no fly zone"""
copy = some_list
copy.pop(index)
return copy
def get_dynamic_obstacles(self, idx, uav_path_obs, obstacle_list, zone_locations, \
zone_idx, path_list, uav_loc_list, grid):
"""generate dynamic obstacles from uav waypoints"""
#should be a function to make dynamic obstacles
if idx == 0:
new_obstacle = obstacle_list + \
self.return_unassigned_list(zone_locations[:], zone_idx)
else:
uav_path_obs.append(path_list[idx-1])
flat_list = [item for sublist in uav_path_obs for item in sublist]
new_obstacle = obstacle_list + \
self.return_unassigned_list(zone_locations[:], zone_idx) + \
self.return_unassigned_list(uav_loc_list[:], idx) + flat_list
grid_copy = grid.copy()
new_obstacle = self.add_obstacles(grid_copy, new_obstacle)
return grid_copy, new_obstacle
def find_closest_zone(self, uav_loc, landing_zones):
"""find closest zone location to uav location"""
tree = spatial.KDTree(landing_zones)
dist,zone_index = tree.query(uav_loc)
return dist, zone_index
def check_uav_service_state(self, uav_service_num):
"""checks if uav wants the service of prelanding service"""
if uav_service_num == self.uav_service_state_num:
return True
def assign_uav_zone(self,uav, landing_zone):
"""get open zone locations"""
uav.set_zone_index(landing_zone.get_zone_name())
uav.set_goal(landing_zone.get_zone_coordinates())
def compute_vectors(self,point_1, point_2, point_3):
vector_start = np.array(point_2)- np.array(point_1)
vector_end = np.array(point_3) - np.array(point_2)
return vector_start, vector_end
def compute_cross_product(self,vector_1, vector_2):
return np.cross(vector_1, vector_2)
def reduce_waypoints(self,waypoint_list):
"""reduce waypoints"""
filtered_waypoints = []
for i, waypoint in enumerate(waypoint_list):
if i+2 - len(waypoint_list) == 0:
filtered_waypoints.append(waypoint_list[i+1])
"""might want to append last waypoint value to new list"""
return filtered_waypoints
vec_start, vec_end = self.compute_vectors(waypoint, waypoint_list[i+1], waypoint_list[i+2])
cross_product = self.compute_cross_product(vec_start, vec_end)
if (cross_product[0] == 0 and cross_product[1] == 0
and cross_product[2] == 0):
continue
else:
#print("not collinear")
filtered_waypoints.append(waypoint)
filtered_waypoints.append(waypoint_list[i+2])
return filtered_waypoints
def find_waypoints(self, grid_copy, new_obstacle, uav):
uav_loc = uav.current_position
goal_point = uav.goal
astar = PathFinding.Astar(grid_copy, new_obstacle, uav_loc, goal_point, \
self.min_h, self.max_h)
uav_wp = astar.main()
if uav_wp:
uav_filtered_wp = self.reduce_waypoints(uav_wp)
else:
uav_wp = None
uav_filtered_wp = None
return uav_wp, uav_filtered_wp
def return_uav_loc_list(self):
"""return list of uavs"""
uav_loc_list = []
for uav_id, uav in self.landingDb.items():
"""check uav service state"""
if uav.service_state == 3:
continue
else:
uav_loc_list.append(uav.current_position)
return uav_loc_list
def get_uavs_requesting_wps(self):
uav_request_wp = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.uav_service_state_num and uav.path == None:
uav_request_wp.append(uav)
return uav_request_wp
def main(self):
"""main implementation"""
grid = self.generate_grid()
obstacle_list = self.generate_3d_obstacles(HomeBase.STATIC_OBSTACLE,15)
obstacle_list = self.add_obstacles(grid, obstacle_list)
uav_loc_list = self.return_uav_loc_list()
idx = 0
uav_path_obs = []
path_list = []
uav_request_wp = self.get_uavs_requesting_wps()
for uav in uav_request_wp:
if self.homeBase.check_open_zones() == True:
#print("path list is", path_list)
print("controlling uav", uav.id)
"""assign uav to landing zone"""
open_zones = self.homeBase.get_open_zones()
open_zones_locs = self.homeBase.get_open_zone_locations(open_zones)
dist, zone_index = self.find_closest_zone(uav.current_position, open_zones_locs)
self.assign_uav_zone(uav, open_zones[zone_index])
self.homeBase.set_landing_zone_vacancy(open_zones[zone_index].zone_name, False)
"""generate dynamic obstacles"""
grid_copy, new_obstacle = self.get_dynamic_obstacles(idx, uav_path_obs, obstacle_list, open_zones_locs, \
zone_index, path_list, uav_loc_list, grid)
"""get waypoints to arrive to landing zone"""
uav_wp, uav_filtered_wp= self.find_waypoints(grid_copy, new_obstacle, uav)
if uav_wp:
uav.set_path(uav_wp)
"""set new obstacles"""
path_list.append(uav_wp)
idx += 1
else:
uav_wp = []
print("Failed to find path", uav.id)
return False
else:
#print("no open zones")
continue
class PathSenderService():
"""send uav waypoints"""
def __init__(self, homeBase,landing_db):
self.landingDb = landing_db
self.search_service_number = 0
self.update_service_number = 1
self.homeBase = homeBase
def is_arrived_to_zone(self,waypoint_coords, uav_curr_location):
"""check if close to distance"""
zone_coords = np.array(waypoint_coords)
uav_coords = np.array(uav_curr_location)
dist = math.sqrt((zone_coords[0]- uav_coords[0])**2+ \
(zone_coords[1]- uav_coords[1])**2)
if dist <= 1.0:
return True
else:
return False
def get_uavs_with_wps(self):
"""return list of uavs that have wps"""
uavs_with_wp_list = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.search_service_number and uav.path != None:
uavs_with_wp_list.append(uav)
return uavs_with_wp_list
def send_wp_commands(self, uavs_with_wp_list, uav):
"""send waypoint commands to uav"""
waypoint_list = uav.path
# for idx,wp in enumerate(waypoint_list):
# uav.go_to_wp(uav.current_position,wp)
# #print(idx)
# if idx > (len(waypoint_list)-2):
# #print("reached final waypoint")
# uav.update_service_state(self.update_service_number)
# uav.go_to_wp(uav.current_position,waypoint_list[-1])
# break
idx = 0
while idx < len(waypoint_list):
uav.go_to_wp(uav.current_position,waypoint_list[idx])
idx += 1
if idx > (len(waypoint_list)-1):
#print("reached final waypoint")
uav.update_service_state(self.update_service_number)
uav.go_to_wp(uav.current_position,waypoint_list[-1])
self.homeBase.set_landing_zone_vacancy(uav.zone_index, True)
break
def main(self):
uavs_with_wp_list = self.get_uavs_with_wps()
if not uavs_with_wp_list:
pass
#print("no drones for Path Sender")
else:
threads = []
for idx, uav in enumerate(uavs_with_wp_list[:]):
self.send_wp_commands(uavs_with_wp_list, uav)
# t = multiprocessing.Process(self.send_wp_commands(uavs_with_wp_list, uav))
# t.start()
# threads.append(t)
if not uavs_with_wp_list:
print("list is empty")
break
# for t in threads:
# t.join()
class LandingStateService():
"""landing state service"""
search_service_number = 1
update_service_number = 2
def __init__(self, landing_db):
self.landingDb = landing_db
def get_uavs_requesting_land(self):
"""return list of uavs that have wps"""
uavs_landing = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.search_service_number:
uavs_landing.append(uav)
return uavs_landing
def allow_land(self, uavs_landing, uav):
landing_spot = [uav.goal[0], uav.goal[1], 0]
uav.path.append(tuple(landing_spot))
uav.go_to_wp(uav.current_position, landing_spot)
uav.update_service_state(self.update_service_number)
#print("uav has landed")
def main(self):
uavs_landing = self.get_uavs_requesting_land()
if not uavs_landing:
return None
else:
threads = []
for idx, uav in enumerate(uavs_landing[:]):
t = multiprocessing.Process(self.allow_land(uavs_landing, uav))
t.start()
threads.append(t)
if not uavs_landing:
print("list is empty")
break
for t in threads:
t.join()
class PostFlightService():
"""this class is simliar to the preflight class"""
search_service_number = 2
update_service_number = 3
def __init__(self, homeBase, landing_db, min_h, max_h):
self.homeBase = homeBase
self.landingDb = landing_db
self.uav_service_state_num = 0
self.min_h = min_h
self.max_h = max_h
def generate_grid(self):
"""generates search space based on parameters"""
grid = []
grid = np.zeros((self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y))
return grid
def generate_3d_obstacles(self, obstacle_list, height):
"""generate 3 dimensional obstacles ie trees and buildings etc"""
obstacle_3d_list = []
for static_obstacle in obstacle_list:
x = static_obstacle[0]
y = static_obstacle[1]
for z in range(25):
obstacle_3d_list.append((x,y,z))
return obstacle_3d_list
def add_obstacles(self,grid, obstacle_list):
""""add obstacles to grid location"""
for obstacle in obstacle_list:
(grid[int(obstacle[2]),int(obstacle[0]), int(obstacle[1])]) = 1
return obstacle_list
def return_unassigned_list(self,some_list, index):
"""return all other zones or uavs not assigned to uav to make as a no fly zone"""
#copy = [int(i) for i in some_list]
copy = some_list
copy.pop(index)
return copy
def get_dynamic_obstacles(self, idx, uav_path_obs, obstacle_list, \
path_list, uav_loc_list, grid):
"""generate dynamic obstacles from uav waypoints"""
#should be a function to make dynamic obstacles
if idx == 0:
new_obstacle = obstacle_list
else:
if len(uav_path_obs) < idx:
new_obstacle = obstacle_list + \
self.return_unassigned_list(uav_loc_list[:], idx)
else:
uav_path_obs.append(path_list[idx-1])
flat_list = [item for sublist in uav_path_obs for item in sublist]
new_obstacle = obstacle_list + \
self.return_unassigned_list(uav_loc_list[:], idx) + flat_list
grid_copy = grid.copy()
new_obstacle = self.add_obstacles(grid_copy, new_obstacle)
return grid_copy, new_obstacle
def compute_vectors(self,point_1, point_2, point_3):
vector_start = np.array(point_2)- np.array(point_1)
vector_end = np.array(point_3) - np.array(point_2)
return vector_start, vector_end
def compute_cross_product(self,vector_1, vector_2):
return np.cross(vector_1, vector_2)
def reduce_waypoints(self,waypoint_list):
filtered_waypoints = []
for i, waypoint in enumerate(waypoint_list):
if i+2 - len(waypoint_list) == 0:
filtered_waypoints.append(waypoint_list[i+1])
"""might want to append last waypoint value to new list"""
return filtered_waypoints
vec_start, vec_end = self.compute_vectors(waypoint, waypoint_list[i+1], waypoint_list[i+2])
cross_product = self.compute_cross_product(vec_start, vec_end)
if (cross_product[0] == 0 and cross_product[1] == 0
and cross_product[2] == 0):
continue
else:
filtered_waypoints.append(waypoint)
filtered_waypoints.append(waypoint_list[i+2])
return filtered_waypoints
def find_waypoints(self, grid_copy, new_obstacle, uav):
uav_loc = uav.current_position
goal_point = uav.loiter_position
astar = PathFinding.Astar(grid_copy, new_obstacle, uav_loc, goal_point,\
self.min_h, self.max_h)
uav_wp = astar.main()
if uav_wp:
uav_filtered_wp = self.reduce_waypoints(uav_wp)
else:
uav_wp = None
uav_filtered_wp = None
return uav_wp, uav_filtered_wp
def return_uav_loc_list(self):
"""return list of uavs"""
uav_loc_list = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.search_service_number:
uav_loc_list.append(uav.current_position)
return uav_loc_list
def get_uavs_requesting_departure(self):
uavs_departure = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.search_service_number:
uavs_departure.append(uav)
return uavs_departure
def hover_up(self, uav, height_z):
hover_spot = [uav.current_position[0], uav.current_position[1], height_z]
uav.go_to_wp(uav.current_position, hover_spot)
def main(self):
"""main implementation """
grid = self.generate_grid()
obstacle_list = self.generate_3d_obstacles(HomeBase.STATIC_OBSTACLE,25)
obstacle_list = self.add_obstacles(grid, obstacle_list)
uavs_departure = self.get_uavs_requesting_departure()
uav_loc_list = self.return_uav_loc_list()
idx = 0
uav_path_obs = []
path_list = []
for idx, uav in enumerate(uavs_departure):
self.hover_up(uav, 7.0)
"""generate dynamic obstacles"""
grid_copy, new_obstacle = self.get_dynamic_obstacles(idx, uav_path_obs, obstacle_list, \
path_list, uav_loc_list, grid)
"""find waypoints with Astar pathfinding"""
uav_wp, uav_filtered_wp = self.find_waypoints(grid_copy, new_obstacle, uav)
#print("uav home is", uav_wp)
if uav_wp:
home_wp = uav_wp.copy()
home_wp.append(uav.home_position)
"""add home position for flight home"""
uav.set_path_home(home_wp)
#uav.set_path(uav_filtered_wp)
"""set new obstacles"""
path_list.append(uav_wp)
idx += 1
#print("index", idx)
else:
print("Failed to find home path")
return False
class HomeSenderService():
"""Need to refactor this code"""
def __init__(self, homeBase, landing_db):
self.landingDb = landing_db
self.search_service_number = 2
self.update_service_number = 3
self.homeBase = homeBase
def is_arrived_to_zone(self,waypoint_coords, uav_curr_location):
"""check if close to distance"""
zone_coords = np.array(waypoint_coords)
uav_coords = np.array(uav_curr_location)
dist = math.sqrt((zone_coords[0]- uav_coords[0])**2+ \
(zone_coords[1]- uav_coords[1])**2)
if dist <= 1.0:
return True
else:
return False
def get_uavs_with_wps(self):
"""return list of uavs that have wps"""
uavs_with_wp_list = []
for uav_id, uav in self.landingDb.items():
if uav.service_state == self.search_service_number and uav.path_home != None:
uavs_with_wp_list.append(uav)
return uavs_with_wp_list
def send_wp_commands(self, uavs_with_wp_list, uav):
"""send waypoint commands to uav"""
waypoint_list = uav.path_home
#print("SENDING HOME ", uav.id)
# for idx,wp in enumerate(waypoint_list):
# uav.go_to_wp(uav.current_position,wp)
# if idx > (len(waypoint_list)-1):
# print("reached final waypoint")
# uav.update_service_state(self.update_service_number)
# uav.go_to_wp(uav.current_position,waypoint_list[-1])
# self.homeBase.set_landing_zone_vacancy(uav.zone_index, True)
# break
idx = 0
while idx < len(waypoint_list):
uav.go_to_wp(uav.current_position,waypoint_list[idx])
idx += 1
if idx > (len(waypoint_list)-1):
print("reached final waypoint")
uav.update_service_state(self.update_service_number)
uav.go_to_wp(uav.current_position,waypoint_list[-1])
self.homeBase.set_landing_zone_vacancy(uav.zone_index, True)
break
def main(self):
uavs_with_wp_list = self.get_uavs_with_wps()
time.sleep(1)
if not uavs_with_wp_list:
print("no drones to send home")
return False
#return continue
else:
threads = []
for idx, uav in enumerate(uavs_with_wp_list[:]):
self.send_wp_commands(uavs_with_wp_list, uav)
# t = multiprocessing.Process(self.send_wp_commands(uavs_with_wp_list, uav))
# t.start()
# threads.append(t)
if not uavs_with_wp_list:
print("list is empty")
break
# for t in threads:
# t.join()
def check_spawn_okay(current_location, spawn_list):
for spawn_loc in spawn_list:
distance = compute_euclidean(current_location, spawn_loc)
if distance < 4.0:
return False
return True
def randomize_drone_outer_locations(n_drones):
"""randommize drone locations and make sure its spaced
from home position and where it wants to head towards to"""
"""how to randomize values in this area need to refactor this"""
min_height = 8
max_height = 11
spawned_locations = []
loiter_locations = []
case = ["left", "right", "down", "up"]
case_edge = {
"left": MIN_X,
"right": MAX_X,
"down": MIN_Y,
"up": MAX_Y
}
inner_case_edge = {
"left": INNER_MIN_X,
"right": INNER_MAX_X,
"down": INNER_MIN_Y,
"up": INNER_MAX_Y
}
for i in range(n_drones):
decision = random.choice(case)
if decision == "left" or decision == "right":
coords = [case_edge[decision], np.random.choice(range(MIN_Y, MAX_Y)), np.random.choice(range(min_height, max_height))]
loiter = [inner_case_edge[decision], np.random.choice(range(INNER_MIN_Y, INNER_MAX_Y)), np.random.choice(range(min_height, max_height))]
if check_spawn_okay(coords, spawned_locations) == False:
while check_spawn_okay(coords, spawned_locations) == False:
print("drones too close")
coords = [case_edge[decision], np.random.choice(range(MIN_Y, MAX_Y)), np.random.choice(range(min_height, max_height))]
if check_spawn_okay(coords, spawned_locations) == True:
break
else:
continue
if check_spawn_okay(loiter, loiter_locations) == False:
while check_spawn_okay(loiter, loiter_locations) == False:
loiter = [inner_case_edge[decision], np.random.choice(range(INNER_MIN_Y, INNER_MAX_Y)), np.random.choice(range(min_height, max_height))]
if check_spawn_okay(loiter, loiter_locations) == True:
break
else:
continue
print(coords,loiter)
loiter_locations.append(loiter)
spawned_locations.append(coords)
else:
coords = [np.random.choice(range(MIN_X, MAX_X)), case_edge[decision], np.random.choice(range(min_height, max_height))]
loiter = [np.random.choice(range(INNER_MIN_X, INNER_MAX_X)), inner_case_edge[decision], np.random.choice(range(min_height, max_height))]
if check_spawn_okay(coords, spawned_locations) == False:
while check_spawn_okay(coords, spawned_locations) == False:
print("drones too close")
coords = [np.random.choice(range(MIN_X, MAX_X)),case_edge[decision], np.random.choice(range(min_height, max_height))]
if check_spawn_okay(coords, spawned_locations) == True:
break
else:
continue
if check_spawn_okay(loiter, loiter_locations) == False:
while check_spawn_okay(loiter, loiter_locations) == False:
loiter = [ np.random.choice(range(INNER_MIN_X, INNER_MAX_X)), inner_case_edge[decision],np.random.choice(range(min_height, max_height))]
if check_spawn_okay(loiter, loiter_locations) == True:
break
else:
continue
loiter_locations.append(loiter)
spawned_locations.append(coords)
return spawned_locations, loiter_locations
def spawn_uavs(home_position_list, loiter_locaiton_list):
"""spawn drones"""
uavs_list = []
for idx, home_position in enumerate(home_position_list):
uav_id = "uav"+str(idx)
uav = QuadCopter(uav_id, home_position, loiter_locaiton_list[idx], loiter_locaiton_list[idx], True)
uavs_list.append(uav)
return uavs_list
def check_mission_status(global_landing_db):
uavs_with_wp_list = []
for uav_id, uav in global_landing_db.items():
if uav.service_state == 0 and uav.path == None:
uavs_with_wp_list.append(uav)
return uavs_with_wp_list
def begin_randomization(n_drones):
"""initialize randomized drones and location"""
random_home_locations, random_loiters = randomize_drone_outer_locations(n_drones)
random_uavs = spawn_uavs(random_home_locations, random_loiters)
return random_uavs
def compute_path_length(point_list):
"""compute the total waypoint path"""
apts = np.array(point_list)
apts = apts[:,:2]
lengths = np.sqrt(np.sum(np.diff(apts, axis=0)**2, axis=1)) # Length between corners
path_length = np.sum(lengths)
return path_length
def compute_euclidean(position, goal):
"""compute euclidiean with position and goal as 3 vector component"""
distance = math.sqrt(((position[0] - goal[0]) ** 2) +
((position[1] - goal[1]) ** 2))
return distance
def run_utm(n_simulations, min_drones, max_drones, min_h, max_h):
"""begin simulation"""
performance = []
logger = MonteCarloLogger()
results = []
for i in range(735,n_simulations):
print("Simulation number", i)
"""garbage collection"""
global_db = {}
homeBase = HomeBase()
gc.collect()
n_drones = random.randint(min_drones, max_drones)
random_uavs = begin_randomization(n_drones)
overallDb = UTMDatabase.OverallDatabase()
overall_db = overallDb.listen_for_incoming_uavs(random_uavs)
landingDb = UTMDatabase.LandingDatabase(overall_db, homeBase)
landingDb.main()
global_landing_db = landingDb.get_landing_db()
"""instantiation of Third Party Service with UTM"""
preLandingService = PreLandingService(homeBase, global_landing_db, min_h, max_h)
pathSenderService = PathSenderService(homeBase,global_landing_db)
landingServiceState = LandingStateService(global_landing_db)
postFlightService = PostFlightService(homeBase, global_landing_db, min_h, max_h)
homeSenderService = HomeSenderService(homeBase, global_landing_db)
uavs_leftover = check_mission_status(global_landing_db)
while uavs_leftover:
gc.collect()
gc.set_debug(gc.DEBUG_LEAK)
prelanding_result = preLandingService.main()
if prelanding_result == False:
mission_status = False
"""log data"""
logger.write_csv(i, n_drones, global_landing_db, mission_status, [min_h,max_h])
break
pathSenderService.main()
gc.collect()
landingServiceState.main()
post_flight_result = postFlightService.main()
if post_flight_result == False:
print("post flight was a failure ")
mission_status = False
break
response = homeSenderService.main()
uavs_leftover = check_mission_status(global_landing_db)
"""check if mission was a success"""
if not uavs_leftover:
for uav_id, uav in global_landing_db.items():
#multiply by 2 since its backwards and forwards
ideal_path = 2*compute_euclidean(uav.loiter_position, uav.goal)
init_path = compute_path_length(uav.path)
final_path = compute_path_length(uav.path_home[:-1])
if (ideal_path)/(init_path + final_path) <= 0.50:
mission_status = False
performance.append(False)
logger.write_csv(i, n_drones, global_landing_db, mission_status, [min_h,max_h])
break
else:
mission_status = True
performance.append(True)
logger.write_csv(i, n_drones, global_landing_db, mission_status, [min_h,max_h])
break
return performance, global_landing_db,n_drones, results
class MonteCarloLogger():
"""
Name the following:
- file name as follows:
sim_#_n_drones.csv
- set file path as well
- record dictionary information as follows:
- n drones
- heuristics
- uav flight path based on order of control
- success or failure
"""
def __init__(self):# sim_num, n_drones, dict_db, performance, heuristics):
self.save_path = os.getcwd() + "\logs"
self.filename = "monte_carlo_sim"
#self.filename = "simnum_"+str(sim_num)+"_drones_"+ str(n_drones)
self.complete_directory = os.path.join(self.save_path, self.filename+".csv")
#complete_directory = os.path.join(save_path, filename+".csv")
def convert_info_to_list(self, sim_num, n_drones, dict_db, performance, heuristics):
dataframe_list = []
for idx, (uav_id, uav) in enumerate(dict_db.items()):
uav.set_mission_success(performance)
uav.set_heuristics(heuristics)
uav.set_sim_num(sim_num)
dataframe_list.append(uav.to_dict())
return dataframe_list
def write_csv(self, sim_num, n_drones, dict_db, performance, heuristics):
dataframe_list = self.convert_info_to_list(sim_num, n_drones, dict_db, performance, heuristics)
keys = dataframe_list[0].keys()
filename = "simnum_"+str(sim_num)+"_drones_"+ str(n_drones)
complete_directory = os.path.join(self.save_path, filename+".csv")
with open( complete_directory, 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(dataframe_list)
print("recorded information to ", complete_directory)
if __name__ == '__main__':
"""weighted heuristics for astar"""
min_h = 0.5
max_h = 1.5
start = time.time()
n_simulations = 1000
min_uavs = 4
max_uavs = 20
performance, db, n_drones, results = run_utm(n_simulations, min_uavs, max_uavs, min_h, max_h)
# df = pd.read_csv(logger.complete_directory)
| [
"numpy.sum",
"gc.collect",
"os.path.join",
"csv.DictWriter",
"random.randint",
"UTMDatabase.LandingDatabase",
"PathFinding.Astar",
"queue.PriorityQueue",
"math.sqrt",
"numpy.cross",
"time.sleep",
"gc.set_debug",
"UTMDatabase.OverallDatabase",
"os.getcwd",
"numpy.zeros",
"random.choice"... | [((33242, 33262), 'numpy.array', 'np.array', (['point_list'], {}), '(point_list)\n', (33250, 33262), True, 'import numpy as np\n'), ((33392, 33407), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (33398, 33407), True, 'import numpy as np\n'), ((33562, 33632), 'math.sqrt', 'math.sqrt', (['((position[0] - goal[0]) ** 2 + (position[1] - goal[1]) ** 2)'], {}), '((position[0] - goal[0]) ** 2 + (position[1] - goal[1]) ** 2)\n', (33571, 33632), False, 'import math\n'), ((38874, 38885), 'time.time', 'time.time', ([], {}), '()\n', (38883, 38885), False, 'import time\n'), ((7549, 7564), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (7562, 7564), False, 'from queue import PriorityQueue\n'), ((7685, 7761), 'numpy.zeros', 'np.zeros', (['(self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y)'], {}), '((self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y))\n', (7693, 7761), True, 'import numpy as np\n'), ((9913, 9942), 'scipy.spatial.KDTree', 'spatial.KDTree', (['landing_zones'], {}), '(landing_zones)\n', (9927, 9942), False, 'from scipy import spatial\n'), ((10755, 10783), 'numpy.cross', 'np.cross', (['vector_1', 'vector_2'], {}), '(vector_1, vector_2)\n', (10763, 10783), True, 'import numpy as np\n'), ((11871, 11962), 'PathFinding.Astar', 'PathFinding.Astar', (['grid_copy', 'new_obstacle', 'uav_loc', 'goal_point', 'self.min_h', 'self.max_h'], {}), '(grid_copy, new_obstacle, uav_loc, goal_point, self.min_h,\n self.max_h)\n', (11888, 11962), False, 'import PathFinding\n'), ((15240, 15265), 'numpy.array', 'np.array', (['waypoint_coords'], {}), '(waypoint_coords)\n', (15248, 15265), True, 'import numpy as np\n'), ((15287, 15314), 'numpy.array', 'np.array', (['uav_curr_location'], {}), '(uav_curr_location)\n', (15295, 15314), True, 'import numpy as np\n'), ((15330, 15422), 'math.sqrt', 'math.sqrt', (['((zone_coords[0] - uav_coords[0]) ** 2 + (zone_coords[1] - uav_coords[1]) ** 2)'], {}), '((zone_coords[0] - uav_coords[0]) ** 2 + (zone_coords[1] -\n uav_coords[1]) ** 2)\n', (15339, 15422), False, 'import math\n'), ((19499, 19575), 'numpy.zeros', 'np.zeros', (['(self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y)'], {}), '((self.homeBase.GRID_Z, self.homeBase.GRID_X, self.homeBase.GRID_Y))\n', (19507, 19575), True, 'import numpy as np\n'), ((21782, 21810), 'numpy.cross', 'np.cross', (['vector_1', 'vector_2'], {}), '(vector_1, vector_2)\n', (21790, 21810), True, 'import numpy as np\n'), ((22838, 22929), 'PathFinding.Astar', 'PathFinding.Astar', (['grid_copy', 'new_obstacle', 'uav_loc', 'goal_point', 'self.min_h', 'self.max_h'], {}), '(grid_copy, new_obstacle, uav_loc, goal_point, self.min_h,\n self.max_h)\n', (22855, 22929), False, 'import PathFinding\n'), ((25823, 25848), 'numpy.array', 'np.array', (['waypoint_coords'], {}), '(waypoint_coords)\n', (25831, 25848), True, 'import numpy as np\n'), ((25870, 25897), 'numpy.array', 'np.array', (['uav_curr_location'], {}), '(uav_curr_location)\n', (25878, 25897), True, 'import numpy as np\n'), ((25913, 26005), 'math.sqrt', 'math.sqrt', (['((zone_coords[0] - uav_coords[0]) ** 2 + (zone_coords[1] - uav_coords[1]) ** 2)'], {}), '((zone_coords[0] - uav_coords[0]) ** 2 + (zone_coords[1] -\n uav_coords[1]) ** 2)\n', (25922, 26005), False, 'import math\n'), ((27665, 27678), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (27675, 27678), False, 'import time\n'), ((29365, 29384), 'random.choice', 'random.choice', (['case'], {}), '(case)\n', (29378, 29384), False, 'import random\n'), ((34024, 34036), 'gc.collect', 'gc.collect', ([], {}), '()\n', (34034, 34036), False, 'import gc\n'), ((34060, 34098), 'random.randint', 'random.randint', (['min_drones', 'max_drones'], {}), '(min_drones, max_drones)\n', (34074, 34098), False, 'import random\n'), ((34171, 34200), 'UTMDatabase.OverallDatabase', 'UTMDatabase.OverallDatabase', ([], {}), '()\n', (34198, 34200), False, 'import UTMDatabase\n'), ((34303, 34352), 'UTMDatabase.LandingDatabase', 'UTMDatabase.LandingDatabase', (['overall_db', 'homeBase'], {}), '(overall_db, homeBase)\n', (34330, 34352), False, 'import UTMDatabase\n'), ((37552, 37604), 'os.path.join', 'os.path.join', (['self.save_path', "(self.filename + '.csv')"], {}), "(self.save_path, self.filename + '.csv')\n", (37564, 37604), False, 'import os\n'), ((38404, 38451), 'os.path.join', 'os.path.join', (['self.save_path', "(filename + '.csv')"], {}), "(self.save_path, filename + '.csv')\n", (38416, 38451), False, 'import os\n'), ((3255, 3267), 'numpy.array', 'np.array', (['wp'], {}), '(wp)\n', (3263, 3267), True, 'import numpy as np\n'), ((3270, 3296), 'numpy.array', 'np.array', (['current_position'], {}), '(current_position)\n', (3278, 3296), True, 'import numpy as np\n'), ((10530, 10547), 'numpy.array', 'np.array', (['point_2'], {}), '(point_2)\n', (10538, 10547), True, 'import numpy as np\n'), ((10549, 10566), 'numpy.array', 'np.array', (['point_1'], {}), '(point_1)\n', (10557, 10566), True, 'import numpy as np\n'), ((10588, 10605), 'numpy.array', 'np.array', (['point_3'], {}), '(point_3)\n', (10596, 10605), True, 'import numpy as np\n'), ((10608, 10625), 'numpy.array', 'np.array', (['point_2'], {}), '(point_2)\n', (10616, 10625), True, 'import numpy as np\n'), ((21557, 21574), 'numpy.array', 'np.array', (['point_2'], {}), '(point_2)\n', (21565, 21574), True, 'import numpy as np\n'), ((21576, 21593), 'numpy.array', 'np.array', (['point_1'], {}), '(point_1)\n', (21584, 21593), True, 'import numpy as np\n'), ((21615, 21632), 'numpy.array', 'np.array', (['point_3'], {}), '(point_3)\n', (21623, 21632), True, 'import numpy as np\n'), ((21635, 21652), 'numpy.array', 'np.array', (['point_2'], {}), '(point_2)\n', (21643, 21652), True, 'import numpy as np\n'), ((35013, 35025), 'gc.collect', 'gc.collect', ([], {}), '()\n', (35023, 35025), False, 'import gc\n'), ((35042, 35069), 'gc.set_debug', 'gc.set_debug', (['gc.DEBUG_LEAK'], {}), '(gc.DEBUG_LEAK)\n', (35054, 35069), False, 'import gc\n'), ((35421, 35433), 'gc.collect', 'gc.collect', ([], {}), '()\n', (35431, 35433), False, 'import gc\n'), ((37379, 37390), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37388, 37390), False, 'import os\n'), ((38548, 38581), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (38562, 38581), False, 'import csv\n'), ((2759, 2790), 'numpy.array', 'np.array', (['self.current_position'], {}), '(self.current_position)\n', (2767, 2790), True, 'import numpy as np\n'), ((33314, 33335), 'numpy.diff', 'np.diff', (['apts'], {'axis': '(0)'}), '(apts, axis=0)\n', (33321, 33335), True, 'import numpy as np\n')] |
#
# An attempt to implement multiprocessing
#
# Importing the necessary modules.
import projectq
from projectq.ops import H,X,Y,Z,T,Tdagger,S,Sdagger,CNOT,Measure,All,Rx,Ry,Rz,SqrtX,Swap
import numpy as np
import copy, sys, getopt, os
from deap import creator, base, tools
from candidate import Candidate
from constants import *
from new_evolution import crossoverInd, mutateInd, selectAndEvolve, geneticAlgorithm
from tools import *
from datetime import datetime
from comparison import compare
import time
import multiprocessing
import psutil
import argparse
from qiskit import Aer, execute, QuantumRegister
from qiskit.quantum_info import state_fidelity, DensityMatrix, Statevector, Operator
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo, FakeAthens
from qiskit.circuit.library import Permutation
from qiskit_transpiler.transpiled_initialization_circuits import genCircs, getFidelities
def loadState(numberOfQubits, stateName):
f = open('states/'+str(numberOfQubits)+'_qubits/' + stateName, 'rb')
global desired_state
desired_state = pickle.load(f)
f.close()
def desiredState():
"""
This function returns the state vector of the desiredState as list where
ith element is the ith coefficient of the state vector.
"""
return desired_state
def evaluateIndcostt(individual, verbose=False):
"""
This function should take an individual,possibly an instance of Candidate class,
and return a tuple where each element of the tuple is an objective.
An example objective would be (error,circuitLen) where:
error = |1 - < createdState | wantedState >
circuitLen = len(individual.circuit) / MAX_CIRCUIT_LENGTH
MAX_CIRCUIT_LENGTH is the expected circuit length for the problem.
"""
wanted = desiredState()
got = individual.simulateCircuit()
error = 1 - np.absolute(np.vdot(wanted, got))**2
individual.setCMW(error)
cost = individual.evaluateCost()
if verbose:
print("Wanted state is:", wanted)
print("Produced state is", got)
print("Error is:", error)
return (error,cost)
def evaluateInd(individual, verbose=False):
"""
This function should take an individual,possibly an instance of Candidate class,
and return a tuple where each element of the tuple is an objective.
An example objective would be (error,circuitLen) where:
error = |1 - < createdState | wantedState >
circuitLen = len(individual.circuit) / MAX_CIRCUIT_LENGTH
MAX_CIRCUIT_LENGTH is the expected circuit length for the problem.
"""
wanted = desiredState()
got = individual.simulateCircuit()
error = 1 - np.absolute(np.vdot(wanted, got))**2
individual.setCMW(error)
if verbose:
print("Wanted state is:", wanted)
print("Produced state is", got)
print("Error is:", error)
return (error, len(individual.circuit))
# if len(individual.circuit) > 0 and len(individual.circuit) < MAX_CIRCUIT_LENGTH:
# return (error, len(individual.circuit) / MAX_CIRCUIT_LENGTH)
# else:
# return (error, 1.0)
directory = f"performance_data/{numberOfQubits}QB/{POPSIZE}POP/"
ID = int(len(os.listdir(directory)) / 2)
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional argument
parser.add_argument("-p", "--POPSIZE", help = "Size of the population")
parser.add_argument("-g", "--NGEN", help = "The number of generations")
parser.add_argument("-q", "--NQUBIT", help = "The number of qubits")
parser.add_argument("-i", "--INDEX", help = "Index of desired state")
parser.add_argument("-id", "--ID", help = "ID of the saved file")
# Read arguments from command line
args = parser.parse_args()
if args.POPSIZE:
POPSIZE = int(args.POPSIZE)
if args.NGEN:
NGEN = int(args.NGEN)
if args.NQUBIT:
numberOfQubits = int(args.NQUBIT)
if args.INDEX:
stateIndex = int(args.INDEX)
if args.ID:
ID = int(args.ID)
stateName = str(numberOfQubits)+"QB_state"+str(stateIndex)
loadState(numberOfQubits, stateName)
now = datetime.now()
timeStr = now.strftime("%d.%m.%y-%H:%M")
problemName = f"{ID}-{NGEN}GEN-{stateName}"
problemDescription = "State initalization for:\n"
problemDescription += "numberOfQubits=" + str(numberOfQubits) + "\n"
problemDescription += "allowedGates=" + str(allowedGates) + "\n"
# trying to minimize error and length !
fitnessWeights = (-1.0, -0.5)
# Create the type of the individual
creator.create("FitnessMin", base.Fitness, weights=fitnessWeights)
creator.create("Individual", Candidate, fitness=creator.FitnessMin)
# Initialize your toolbox and population
toolbox = base.Toolbox()
#from scoop import futures
#if multiProcess:
# pool = multiprocessing.Pool()
# toolbox.register("map", pool.map)
toolbox.register("individual", creator.Individual, numberOfQubits, allowedGates, connectivity)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", crossoverInd, toolbox=toolbox)
toolbox.register("mutate", mutateInd)
toolbox.register("select", tools.selSPEA2)
toolbox.register("selectAndEvolve", selectAndEvolve)
toolbox.register("evaluate", evaluateIndcostt)
def main():
# Your main function
# epsilon is the error bound at which we simply finish the evolution and print out
# all the rank zero solutions.
# These probabilities were necessary if we were going to use the built-in
# selection and evolution algorithms, however since we have defined our own,
# we won't be using them.
CXPB = 0.2
MUTPB = 0.2
pop = toolbox.population(n=POPSIZE)
# toolbox.register("map", futures.map)
# toolbox.unregister("individual")
# toolbox.unregister("population")
from qiskit_transpiler.transpiled_initialization_circuits import genCircs, getPermutation, getFidelities, randomDV
from candidate import qasm2ls
qiskit_circs, depths = genCircs(numberOfQubits, fake_machine, desiredState(), n_iter=1)
perm = getPermutation(qiskit_circs[0])
qiskit_circ = qasm2ls(qiskit_circs[0].qasm())
pop[0].circuit = qiskit_circ
pop[0].permutation = perm
print(1-toolbox.evaluate(pop[0])[0])
start = time.perf_counter()
pop, logbook = geneticAlgorithm(pop, toolbox, NGEN, problemName, problemDescription, epsilon, verbose=verbose, returnLog=True)
runtime = round(time.perf_counter() - start, 2)
#-----------------------------------------------
# from qiskit.quantum_info import Operator
# from qiskit.circuit.library import Permutation
# qubit_pattern = [0,1,2,3,4]
# perm_unitary = pop[0].getPermutationMatrix()
# perm_desired_state = np.linalg.inv(perm_unitary) @ desired_state
# n_phys = 5
# aug_desired_state = perm_desired_state
# for k in range(n_phys-numberOfQubits):
# aug_desired_state = np.kron([1,0],aug_desired_state)
# perm_circ = Permutation(n_phys, qubit_pattern) # Creating a circuit for qubit mapping
# perm_unitary = Operator(perm_circ) # Matrix for the previous circuit
# perm_aug_desired_state = perm_unitary.data @ aug_desired_state
# from qiskit.providers.aer.noise import NoiseModel
# from deap.tools.emo import sortNondominated
# machine_simulator = Aer.get_backend('qasm_simulator')
# fake_machine = FakeAthens()
# noise_model = NoiseModel.from_backend(fake_machine)
# coupling_map = fake_machine.configuration().coupling_map
# basis_gates = noise_model.basis_gates
# plt.figure(figsize=(8, 6))
#
# ranks = sortNondominated(pop, len(pop), first_front_only=True)
# front = ranks[0]
# print(len(front))
# c=[]
# data = []
# for circ in front:
# circ=circ.toQiskitCircuit()
# s=0
# for _ in range(100):
# circ.snapshot_density_matrix('final')
# result = execute(circ,machine_simulator,
# coupling_map=coupling_map,
# basis_gates=basis_gates,
# noise_model=noise_model,
# shots=1).result()
# noisy_dens_matr = result.data()['snapshots']['density_matrix']['final'][0]['value']
# fid=state_fidelity(perm_aug_desired_state,noisy_dens_matr)
# s+=fid
# data.append([circ.size(), s/100])
# c.append(len(data))
# print(s)
#
## plt.scatter(circ.toQiskitCircuit().size(), 1-evaluateInd(circ, desired_state)[0])
#
# data = np.array(data)
# x = data[:, 0]
# y = data[:, 1]
# plt.scatter(x, y)
# plt.ylabel("Fidelity")
# plt.xlabel("Length")
# plt.ylim(0,1)
# #qiskit_circs, depths = genCircs(numberOfQubits, fake_machine, desired_state, n_iter=100)
# #fidelities=getFidelities(5, qiskit_circs, machine_simulator, fake_machine, desired_state)
# #plt.hist(fidelities, bins=list(np.arange(0,1.2,0.01)), align='left', color='#AC557C', label='Qiskit')
# print('!!!')
# plt.savefig('100Fid_GA',dpi=300)
#
# plotFitSize(logbook)
#
print(evaluateInd(pop[0]))
backend = Aer.get_backend('statevector_simulator')
circ = pop[0].toQiskitCircuit()
statevector = execute(circ, backend).result().get_statevector(circ)
print(state_fidelity(desiredState(), pop[0].getPermutationMatrix() @ statevector))
# print(state_fidelity(pop[0].getPermutationMatrix() @ desiredState(), statevector))
paretoFront(pop)
compare(pop, numberOfQubits, desired_state)
# Save the results
if saveResult:
save(pop, logbook, directory, problemName)
print(f"The population and logbook were saved in {directory}{problemName}")
# plotLenFidScatter(directory, problemName, numberOfQubits, stateName, evaluateInd, POPSIZE)
print(f'Runtime: {runtime}s')
return runtime
if __name__=="__main__":
main()
| [
"comparison.compare",
"new_evolution.geneticAlgorithm",
"argparse.ArgumentParser",
"deap.base.Toolbox",
"numpy.vdot",
"time.perf_counter",
"qiskit_transpiler.transpiled_initialization_circuits.getPermutation",
"deap.creator.create",
"qiskit.execute",
"datetime.datetime.now",
"os.listdir",
"qis... | [((3263, 3288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3286, 3288), False, 'import argparse\n'), ((4058, 4072), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4070, 4072), False, 'from datetime import datetime\n'), ((4451, 4517), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'base.Fitness'], {'weights': 'fitnessWeights'}), "('FitnessMin', base.Fitness, weights=fitnessWeights)\n", (4465, 4517), False, 'from deap import creator, base, tools\n'), ((4518, 4585), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'Candidate'], {'fitness': 'creator.FitnessMin'}), "('Individual', Candidate, fitness=creator.FitnessMin)\n", (4532, 4585), False, 'from deap import creator, base, tools\n'), ((4638, 4652), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (4650, 4652), False, 'from deap import creator, base, tools\n'), ((5958, 5989), 'qiskit_transpiler.transpiled_initialization_circuits.getPermutation', 'getPermutation', (['qiskit_circs[0]'], {}), '(qiskit_circs[0])\n', (5972, 5989), False, 'from qiskit_transpiler.transpiled_initialization_circuits import genCircs, getPermutation, getFidelities, randomDV\n'), ((6160, 6179), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6177, 6179), False, 'import time\n'), ((6199, 6314), 'new_evolution.geneticAlgorithm', 'geneticAlgorithm', (['pop', 'toolbox', 'NGEN', 'problemName', 'problemDescription', 'epsilon'], {'verbose': 'verbose', 'returnLog': '(True)'}), '(pop, toolbox, NGEN, problemName, problemDescription,\n epsilon, verbose=verbose, returnLog=True)\n', (6215, 6314), False, 'from new_evolution import crossoverInd, mutateInd, selectAndEvolve, geneticAlgorithm\n'), ((8993, 9033), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""statevector_simulator"""'], {}), "('statevector_simulator')\n", (9008, 9033), False, 'from qiskit import Aer, execute, QuantumRegister\n'), ((9344, 9387), 'comparison.compare', 'compare', (['pop', 'numberOfQubits', 'desired_state'], {}), '(pop, numberOfQubits, desired_state)\n', (9351, 9387), False, 'from comparison import compare\n'), ((3205, 3226), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3215, 3226), False, 'import copy, sys, getopt, os\n'), ((6331, 6350), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6348, 6350), False, 'import time\n'), ((1892, 1912), 'numpy.vdot', 'np.vdot', (['wanted', 'got'], {}), '(wanted, got)\n', (1899, 1912), True, 'import numpy as np\n'), ((2699, 2719), 'numpy.vdot', 'np.vdot', (['wanted', 'got'], {}), '(wanted, got)\n', (2706, 2719), True, 'import numpy as np\n'), ((9088, 9110), 'qiskit.execute', 'execute', (['circ', 'backend'], {}), '(circ, backend)\n', (9095, 9110), False, 'from qiskit import Aer, execute, QuantumRegister\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom model that is already retained by data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import tensorflow as tf # TF2
import tensorflow_examples.lite.model_customization.core.model_export_format as mef
class ClassificationModel(abc.ABC):
""""The abstract base class that represents a Tensorflow classification model."""
def __init__(self, data, model_export_format, model_name, shuffle,
train_whole_model, validation_ratio, test_ratio):
"""Initialize a instance with data, deploy mode and other related parameters.
Args:
data: Raw data that could be splitted for training / validation / testing.
model_export_format: Model export format such as saved_model / tflite.
model_name: Model name.
shuffle: Whether the data should be shuffled.
train_whole_model: If true, the Hub module is trained together with the
classification layer on top. Otherwise, only train the top
classification layer.
validation_ratio: The ratio of valid data to be splitted.
test_ratio: The ratio of test data to be splitted.
"""
if model_export_format != mef.ModelExportFormat.TFLITE:
raise ValueError('Model export format %s is not supported currently.' %
str(model_export_format))
self.data = data
self.model_export_format = model_export_format
self.model_name = model_name
self.shuffle = shuffle
self.train_whole_model = train_whole_model
self.validation_ratio = validation_ratio
self.test_ratio = test_ratio
# Generates training, validation and testing data.
if validation_ratio + test_ratio >= 1.0:
raise ValueError(
'The total ratio for validation and test data should be less than 1.0.'
)
self.validation_data, rest_data = data.split(
validation_ratio, shuffle=shuffle)
self.test_data, self.train_data = rest_data.split(
test_ratio / (1 - validation_ratio), shuffle=shuffle)
# Checks dataset parameter.
if self.train_data.size == 0:
raise ValueError('Training dataset is empty.')
self.model = None
@abc.abstractmethod
def _create_model(self, **kwargs):
return
@abc.abstractmethod
def preprocess(self, sample_data, label):
return
@abc.abstractmethod
def train(self, **kwargs):
return
@abc.abstractmethod
def export(self, **kwargs):
return
def summary(self):
self.model.summary()
def evaluate(self, data=None, batch_size=32):
"""Evaluates the model.
Args:
data: Data to be evaluated. If None, then evaluates in self.test_data.
batch_size: Number of samples per evaluation step.
Returns:
The loss value and accuracy.
"""
if data is None:
data = self.test_data
ds = self._gen_validation_dataset(data, batch_size)
return self.model.evaluate(ds)
def predict_topk(self, data=None, k=1, batch_size=32):
"""Predicts the top-k predictions.
Args:
data: Data to be evaluated. If None, then predicts in self.test_data.
k: Number of top results to be predicted.
batch_size: Number of samples per evaluation step.
Returns:
top k results. Each one is (label, probability).
"""
if k < 0:
raise ValueError('K should be equal or larger than 0.')
if data is None:
data = self.test_data
ds = self._gen_validation_dataset(data, batch_size)
predicted_prob = self.model.predict(ds)
topk_prob, topk_id = tf.math.top_k(predicted_prob, k=k)
topk_label = np.array(self.data.index_to_label)[topk_id.numpy()]
label_prob = []
for label, prob in zip(topk_label, topk_prob.numpy()):
label_prob.append(list(zip(label, prob)))
return label_prob
def _gen_train_dataset(self, data, batch_size=32):
"""Generates training dataset."""
ds = data.dataset.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.shuffle:
ds = ds.shuffle(buffer_size=data.size)
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _gen_validation_dataset(self, data, batch_size=32):
"""Generates validation dataset."""
ds = data.dataset.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _export_tflite(self, tflite_filename, label_filename, quantized=False):
"""Converts the retrained model to tflite format and saves it.
Args:
tflite_filename: File name to save tflite model.
label_filename: File name to save labels.
quantized: boolean, if True, save quantized model.
"""
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
if quantized:
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filename, 'wb') as f:
f.write(tflite_model)
with tf.io.gfile.GFile(label_filename, 'w') as f:
f.write('\n'.join(self.data.index_to_label))
tf.compat.v1.logging.info('Export to tflite model %s, saved labels in %s.',
tflite_filename, label_filename)
| [
"tensorflow.compat.v1.logging.info",
"tensorflow.math.top_k",
"numpy.array",
"tensorflow.lite.TFLiteConverter.from_keras_model",
"tensorflow.io.gfile.GFile"
] | [((4185, 4219), 'tensorflow.math.top_k', 'tf.math.top_k', (['predicted_prob'], {'k': 'k'}), '(predicted_prob, k=k)\n', (4198, 4219), True, 'import tensorflow as tf\n'), ((5455, 5507), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['self.model'], {}), '(self.model)\n', (5495, 5507), True, 'import tensorflow as tf\n'), ((5830, 5942), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Export to tflite model %s, saved labels in %s."""', 'tflite_filename', 'label_filename'], {}), "('Export to tflite model %s, saved labels in %s.',\n tflite_filename, label_filename)\n", (5855, 5942), True, 'import tensorflow as tf\n'), ((4237, 4271), 'numpy.array', 'np.array', (['self.data.index_to_label'], {}), '(self.data.index_to_label)\n', (4245, 4271), True, 'import numpy as np\n'), ((5644, 5684), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['tflite_filename', '"""wb"""'], {}), "(tflite_filename, 'wb')\n", (5661, 5684), True, 'import tensorflow as tf\n'), ((5729, 5767), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['label_filename', '"""w"""'], {}), "(label_filename, 'w')\n", (5746, 5767), True, 'import tensorflow as tf\n')] |
import numpy as np
from lagom.transform import geometric_cumsum
from lagom.utils import numpify
def returns(gamma, rewards):
return geometric_cumsum(gamma, rewards)[0, :].astype(np.float32)
def bootstrapped_returns(gamma, rewards, last_V, reach_terminal):
r"""Return (discounted) accumulated returns with bootstrapping for a
batch of episodic transitions.
Formally, suppose we have all rewards :math:`(r_1, \dots, r_T)`, it computes
.. math::
Q_t = r_t + \gamma r_{t+1} + \dots + \gamma^{T - t} r_T + \gamma^{T - t + 1} V(s_{T+1})
.. note::
The state values for terminal states are masked out as zero !
"""
last_V = numpify(last_V, np.float32).item()
if reach_terminal:
out = geometric_cumsum(gamma, np.append(rewards, 0.0))
else:
out = geometric_cumsum(gamma, np.append(rewards, last_V))
return out[0, :-1].astype(np.float32)
| [
"numpy.append",
"lagom.utils.numpify",
"lagom.transform.geometric_cumsum"
] | [((696, 723), 'lagom.utils.numpify', 'numpify', (['last_V', 'np.float32'], {}), '(last_V, np.float32)\n', (703, 723), False, 'from lagom.utils import numpify\n'), ((797, 820), 'numpy.append', 'np.append', (['rewards', '(0.0)'], {}), '(rewards, 0.0)\n', (806, 820), True, 'import numpy as np\n'), ((870, 896), 'numpy.append', 'np.append', (['rewards', 'last_V'], {}), '(rewards, last_V)\n', (879, 896), True, 'import numpy as np\n'), ((139, 171), 'lagom.transform.geometric_cumsum', 'geometric_cumsum', (['gamma', 'rewards'], {}), '(gamma, rewards)\n', (155, 171), False, 'from lagom.transform import geometric_cumsum\n')] |
import collections
import numpy
from .model import monthly_markov as gentrellis
from . import trellis
from . import markov_event
def make_hmm():
state_to_index = {s: i for i, s in enumerate(gentrellis.STATES)}
states = numpy.asarray([state_to_index[s] for s in gentrellis.STATES], dtype=int)
weighted_edges_by_state_index = collections.defaultdict(list)
weighted_obs_by_state_index = collections.defaultdict(list)
# Jank: convert from old monthly_markov aka gentrellis encoding.
for s in gentrellis.STATES:
s_i = state_to_index[s]
for e in gentrellis.OUTGOING_EDGES_BY_STATE[s]:
weighted_edge = trellis.WeightedEdge(
succ=state_to_index[e.succ],
weight=e.weight,
)
weighted_edges_by_state_index[s_i].append(weighted_edge)
for wob in gentrellis.WEIGHTED_OBS_BY_STATE[s]:
weighted_obs = trellis.WeightedObservation(
delta_e=wob.delta_e,
weight=wob.weight,
)
weighted_obs_by_state_index[s_i].append(weighted_obs)
logprob_prior = numpy.asarray(
[gentrellis.LOGPROB_PRIOR_BY_STATE[s] for s in gentrellis.STATES],
dtype=numpy.float64)
packed_edges = trellis.pack_edges(states, weighted_edges_by_state_index)
packed_obs = trellis.pack_observations(states, weighted_obs_by_state_index)
return trellis.HMM(
state_by_statekey=state_to_index,
states=states,
packed_edges=packed_edges,
packed_observations=packed_obs,
prior=logprob_prior,
)
class MonthlyMarkovEventAuxiliarySolver(markov_event.MarkovEventAuxiliarySolver):
def __init__(self):
super().__init__()
self.hmm = make_hmm()
def _searchfunc(self, times, prizes_u):
# times must be array of time indices 0...T-1
# prizes must be shape (T, ) array of prizes (unit: logprob units per event).
objective_star, logprob_star, state_trajectory, obs_trajectory = trellis.search_best_path(
self.hmm,
times,
prizes_u,
)
# Translate from state indices back into fancy states
fancy_state_trajectory = [gentrellis.prettystate(gentrellis.STATES[s]) for s in
state_trajectory]
return (objective_star, logprob_star, fancy_state_trajectory, obs_trajectory) | [
"collections.defaultdict",
"numpy.asarray"
] | [((230, 302), 'numpy.asarray', 'numpy.asarray', (['[state_to_index[s] for s in gentrellis.STATES]'], {'dtype': 'int'}), '([state_to_index[s] for s in gentrellis.STATES], dtype=int)\n', (243, 302), False, 'import numpy\n'), ((340, 369), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (363, 369), False, 'import collections\n'), ((404, 433), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (427, 433), False, 'import collections\n'), ((1120, 1226), 'numpy.asarray', 'numpy.asarray', (['[gentrellis.LOGPROB_PRIOR_BY_STATE[s] for s in gentrellis.STATES]'], {'dtype': 'numpy.float64'}), '([gentrellis.LOGPROB_PRIOR_BY_STATE[s] for s in gentrellis.\n STATES], dtype=numpy.float64)\n', (1133, 1226), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 18:34:24 2019
@author: philipp
"""
# z-Score plot of fold change
# =======================================================================
# Imports
from __future__ import division # floating point division by default
import sys
import time
import os
import pandas
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy
import yaml
import glob
from matplotlib.ticker import FuncFormatter
def kilos(x, pos):
return '%1.0fk' % (x*1e-3)
def zScoreFC(sample,GOI='none',Annot='none',NonT='none'):
# ------------------------------------------------
# Print header
# ------------------------------------------------
print('++++++++++++++++++++++++++++++++++++++++++++++++')
start_total = time.time()
# ------------------------------------------------
# Get parameters
# ------------------------------------------------
configFile = open('configuration.yaml','r')
config = yaml.safe_load(configFile)
configFile.close()
sgRNARanksDir = config['sgRNARanksDir']
ScriptsDir = config['ScriptsDir']
outputDir = config['zScoreDir_sgRNA']
ScreenType = config['ScreenType']
res = config['dpi']
svg = config['svg']
NonTPrefix = config['NonTargetPrefix']
PrintHighlights = config['PrintHighlights']
# Show non-targeting controls?
if NonT == 'none':
ShowNonTargets = config['ShowNonTargets']
elif NonT == 'False':
ShowNonTargets = False
elif NonT == 'True':
ShowNonTargets = True
# Annotate sgRNAs ?
if Annot == 'none':
annotate = config['scatter_annotate']
elif Annot == 'False':
annotate = False
elif Annot == 'True':
annotate = True
# ------------------------------------------------
# Reading fold-change data
# ------------------------------------------------
print('Reading sgRNA read counts ...')
os.chdir(sgRNARanksDir)
filename = glob.glob(sample+'_*sgRNAList.txt')[0]
sgRNARanking = pandas.read_table(filename, sep='\t')
L = len(sgRNARanking)
if ScreenType == 'enrichment':
sgRNARanking = sgRNARanking.sort_values('fold change',ascending=True)
elif ScreenType == 'depletion':
sgRNARanking = sgRNARanking.sort_values('fold change',ascending=False)
fc = list(sgRNARanking['fold change'])
sig = list(sgRNARanking['significant'])
genes = list(sgRNARanking['gene'])
sgIDs = list(sgRNARanking['sgRNA'])
# ------------------------------------------------
# Compute z Scores
# ------------------------------------------------
print('Computing fold-change z-scores ...')
logfc = [numpy.log10(fc[k]) for k in range(L)]
m = numpy.mean(logfc)
std = numpy.std(logfc)
zScores = [(logfc[k]-m)/std for k in range(L)]
# ------------------------------------------------
# Creating subsets
# ------------------------------------------------
K_nonT = [k for k in range(L) if NonTPrefix in genes[k]]
K_sig = [k for k in range(L) if sig[k]==True]
K_goi = [k for k in range(L) if genes[k] == GOI]
K_rest = list(set(range(L)) - set.union(set(K_nonT),set(K_sig),set(K_goi)))
z_nonT = [zScores[k] for k in K_nonT]
z_sig = [zScores[k] for k in K_sig]
z_goi = [zScores[k] for k in K_goi]
z_rest = [zScores[k] for k in K_rest]
sgIDs_goi = [sgIDs[k] for k in K_goi]
# ------------------------------------------------
# Plot
# ------------------------------------------------
print('Generating z-score plot ...')
if not os.path.exists(outputDir):
os.makedirs(outputDir)
os.chdir(outputDir)
fig, ax = plt.subplots(figsize=(3.5,2.9))
plt.plot((0,L), (0,0), ls="--", color=(51/255,153/255,1))
plt.scatter(K_rest,z_rest,s=8,lw=0,color='#d3d3d3',rasterized=True)
plt.scatter(K_sig,z_sig,s=8,lw=0,color='green',label='significant',rasterized=True)
if len(K_nonT)>0 and ShowNonTargets:
plt.scatter(K_nonT,z_nonT,s=8,lw=0,color='orange',alpha=0.15,label='non-targeting',rasterized=True)
if GOI != 'none':
plt.scatter(K_goi,z_goi,s=8,lw=0,color='red',label=GOI,rasterized=True)
if annotate:
for label, x, y in zip(sgIDs_goi,K_goi,z_goi):
plt.annotate(label,xy=(x,y),color='red',fontsize=4,fontweight='bold')
ymax = 1.05*max(zScores); ymin = 1.05*min(zScores)
plt.ylim([ymin,ymax])
formatter = FuncFormatter(kilos)
ax.xaxis.set_major_formatter(formatter)
plt.xlabel('Ranked sgRNAs', fontsize=11)
plt.ylabel('Fold-Change z-Score', fontsize=11)
plt.tick_params(labelsize=11)
PlotTitle = 'sgRNA '+ScreenType.capitalize()
plt.title(PlotTitle,fontsize=12)
if ScreenType == 'enrichment':
leg = plt.legend(loc='upper left', prop={'size':6})
for lh in leg.legendHandles: lh.set_alpha(1)
elif ScreenType == 'depletion':
leg = plt.legend(loc='upper right', prop={'size':6})
for lh in leg.legendHandles: lh.set_alpha(1)
plt.tight_layout()
# Define file name
figurename = sample+'_'+'sgRNA_zScores.png'
if GOI != 'none':
figurename = figurename[:-4]+'_'+GOI+'.png'
if annotate:
figurename = figurename[:-4]+'_IDs.png'
if ShowNonTargets:
figurename = figurename[:-4]+'_nonT.png'
# Save figure
if GOI != 'none':
if not os.path.exists(outputDir+'/'+sample+'_Highlighted_Genes'):
os.makedirs(outputDir+'/'+sample+'_Highlighted_Genes')
os.chdir(outputDir+'/'+sample+'_Highlighted_Genes')
plt.savefig(figurename, dpi=res)
if svg:
plt.savefig(figurename[:-4]+'.svg')
# ------------------------------------------------
# Printing
# ------------------------------------------------
if GOI != 'none' and PrintHighlights:
print('-----------------------------------------------------------')
print('sgID\t\tFold-Change\tz-Score\t\tSignificant')
print('-----------------------------------------------------------')
if not K_goi:
print('### ERROR: Gene name not found! ###')
else:
for k in K_goi:
println = str(sgIDs[k])+'\t'+str(fc[k])+'\t'+ \
str(zScores[k])+'\t'+str(sig[k])
print(println)
# Final time stamp
os.chdir(ScriptsDir)
end_total = time.time()
print('------------------------------------------------')
print('Script completed.')
sec_elapsed = end_total - start_total
if sec_elapsed < 60:
time_elapsed = sec_elapsed
print('Time elapsed (Total) [secs]: ' + '%.3f' % time_elapsed +'\n')
elif sec_elapsed < 3600:
time_elapsed = sec_elapsed/60
print('Time elapsed (Total) [mins]: ' + '%.3f' % time_elapsed +'\n')
else:
time_elapsed = sec_elapsed/3600
print('Time elapsed (Total) [hours]: ' + '%.3f' % time_elapsed +'\n')
if __name__ == "__main__":
if len(sys.argv) == 2:
input1 = sys.argv[1]
zScoreFC(input1)
elif len(sys.argv) == 3:
input1 = sys.argv[1]
input2 = sys.argv[2]
zScoreFC(input1,input2)
elif len(sys.argv) == 4:
input1 = sys.argv[1]
input2 = sys.argv[2]
input3 = sys.argv[3]
zScoreFC(input1,input2,input3)
elif len(sys.argv) == 5:
input1 = sys.argv[1]
input2 = sys.argv[2]
input3 = sys.argv[3]
input4 = sys.argv[4]
zScoreFC(input1,input2,input3,input4) | [
"matplotlib.pyplot.title",
"numpy.mean",
"yaml.safe_load",
"glob.glob",
"matplotlib.pyplot.tick_params",
"pandas.read_table",
"matplotlib.pyplot.tight_layout",
"os.chdir",
"numpy.std",
"os.path.exists",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylim",
"matplotlib.pypl... | [((358, 379), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (372, 379), False, 'import matplotlib\n'), ((852, 863), 'time.time', 'time.time', ([], {}), '()\n', (861, 863), False, 'import time\n'), ((1057, 1083), 'yaml.safe_load', 'yaml.safe_load', (['configFile'], {}), '(configFile)\n', (1071, 1083), False, 'import yaml\n'), ((2032, 2055), 'os.chdir', 'os.chdir', (['sgRNARanksDir'], {}), '(sgRNARanksDir)\n', (2040, 2055), False, 'import os\n'), ((2129, 2166), 'pandas.read_table', 'pandas.read_table', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (2146, 2166), False, 'import pandas\n'), ((2857, 2874), 'numpy.mean', 'numpy.mean', (['logfc'], {}), '(logfc)\n', (2867, 2874), False, 'import numpy\n'), ((2885, 2901), 'numpy.std', 'numpy.std', (['logfc'], {}), '(logfc)\n', (2894, 2901), False, 'import numpy\n'), ((3794, 3813), 'os.chdir', 'os.chdir', (['outputDir'], {}), '(outputDir)\n', (3802, 3813), False, 'import os\n'), ((3830, 3862), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.5, 2.9)'}), '(figsize=(3.5, 2.9))\n', (3842, 3862), True, 'import matplotlib.pyplot as plt\n'), ((3866, 3931), 'matplotlib.pyplot.plot', 'plt.plot', (['(0, L)', '(0, 0)'], {'ls': '"""--"""', 'color': '(51 / 255, 153 / 255, 1)'}), "((0, L), (0, 0), ls='--', color=(51 / 255, 153 / 255, 1))\n", (3874, 3931), True, 'import matplotlib.pyplot as plt\n'), ((3932, 4004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['K_rest', 'z_rest'], {'s': '(8)', 'lw': '(0)', 'color': '"""#d3d3d3"""', 'rasterized': '(True)'}), "(K_rest, z_rest, s=8, lw=0, color='#d3d3d3', rasterized=True)\n", (3943, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4097), 'matplotlib.pyplot.scatter', 'plt.scatter', (['K_sig', 'z_sig'], {'s': '(8)', 'lw': '(0)', 'color': '"""green"""', 'label': '"""significant"""', 'rasterized': '(True)'}), "(K_sig, z_sig, s=8, lw=0, color='green', label='significant',\n rasterized=True)\n", (4015, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4574, 4596), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[ymin, ymax]'], {}), '([ymin, ymax])\n', (4582, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4613, 4633), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['kilos'], {}), '(kilos)\n', (4626, 4633), False, 'from matplotlib.ticker import FuncFormatter\n'), ((4690, 4730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked sgRNAs"""'], {'fontsize': '(11)'}), "('Ranked sgRNAs', fontsize=11)\n", (4700, 4730), True, 'import matplotlib.pyplot as plt\n'), ((4735, 4781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fold-Change z-Score"""'], {'fontsize': '(11)'}), "('Fold-Change z-Score', fontsize=11)\n", (4745, 4781), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4815), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(11)'}), '(labelsize=11)\n', (4801, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4873, 4906), 'matplotlib.pyplot.title', 'plt.title', (['PlotTitle'], {'fontsize': '(12)'}), '(PlotTitle, fontsize=12)\n', (4882, 4906), True, 'import matplotlib.pyplot as plt\n'), ((5216, 5234), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5232, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5792, 5824), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figurename'], {'dpi': 'res'}), '(figurename, dpi=res)\n', (5803, 5824), True, 'import matplotlib.pyplot as plt\n'), ((6597, 6617), 'os.chdir', 'os.chdir', (['ScriptsDir'], {}), '(ScriptsDir)\n', (6605, 6617), False, 'import os\n'), ((6634, 6645), 'time.time', 'time.time', ([], {}), '()\n', (6643, 6645), False, 'import time\n'), ((2071, 2108), 'glob.glob', 'glob.glob', (["(sample + '_*sgRNAList.txt')"], {}), "(sample + '_*sgRNAList.txt')\n", (2080, 2108), False, 'import glob\n'), ((2811, 2829), 'numpy.log10', 'numpy.log10', (['fc[k]'], {}), '(fc[k])\n', (2822, 2829), False, 'import numpy\n'), ((3732, 3757), 'os.path.exists', 'os.path.exists', (['outputDir'], {}), '(outputDir)\n', (3746, 3757), False, 'import os\n'), ((3767, 3789), 'os.makedirs', 'os.makedirs', (['outputDir'], {}), '(outputDir)\n', (3778, 3789), False, 'import os\n'), ((4137, 4248), 'matplotlib.pyplot.scatter', 'plt.scatter', (['K_nonT', 'z_nonT'], {'s': '(8)', 'lw': '(0)', 'color': '"""orange"""', 'alpha': '(0.15)', 'label': '"""non-targeting"""', 'rasterized': '(True)'}), "(K_nonT, z_nonT, s=8, lw=0, color='orange', alpha=0.15, label=\n 'non-targeting', rasterized=True)\n", (4148, 4248), True, 'import matplotlib.pyplot as plt\n'), ((4267, 4344), 'matplotlib.pyplot.scatter', 'plt.scatter', (['K_goi', 'z_goi'], {'s': '(8)', 'lw': '(0)', 'color': '"""red"""', 'label': 'GOI', 'rasterized': '(True)'}), "(K_goi, z_goi, s=8, lw=0, color='red', label=GOI, rasterized=True)\n", (4278, 4344), True, 'import matplotlib.pyplot as plt\n'), ((4955, 5001), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'prop': "{'size': 6}"}), "(loc='upper left', prop={'size': 6})\n", (4965, 5001), True, 'import matplotlib.pyplot as plt\n'), ((5734, 5791), 'os.chdir', 'os.chdir', (["(outputDir + '/' + sample + '_Highlighted_Genes')"], {}), "(outputDir + '/' + sample + '_Highlighted_Genes')\n", (5742, 5791), False, 'import os\n'), ((5847, 5884), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figurename[:-4] + '.svg')"], {}), "(figurename[:-4] + '.svg')\n", (5858, 5884), True, 'import matplotlib.pyplot as plt\n'), ((4427, 4501), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x, y)', 'color': '"""red"""', 'fontsize': '(4)', 'fontweight': '"""bold"""'}), "(label, xy=(x, y), color='red', fontsize=4, fontweight='bold')\n", (4439, 4501), True, 'import matplotlib.pyplot as plt\n'), ((5104, 5151), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': "{'size': 6}"}), "(loc='upper right', prop={'size': 6})\n", (5114, 5151), True, 'import matplotlib.pyplot as plt\n'), ((5594, 5657), 'os.path.exists', 'os.path.exists', (["(outputDir + '/' + sample + '_Highlighted_Genes')"], {}), "(outputDir + '/' + sample + '_Highlighted_Genes')\n", (5608, 5657), False, 'import os\n'), ((5662, 5722), 'os.makedirs', 'os.makedirs', (["(outputDir + '/' + sample + '_Highlighted_Genes')"], {}), "(outputDir + '/' + sample + '_Highlighted_Genes')\n", (5673, 5722), False, 'import os\n')] |
import csv
import pdb
from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report, confusion_matrix
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
class Queue:
#Constructor creates a list
def __init__(self, max_size, n_classes):
self.queue = list(np.zeros((max_size, n_classes),dtype = float).tolist())
self.max_size = max_size
self.median = None
self.ma = None
self.ewma = None
#Adding elements to queue
def enqueue(self,data):
self.queue.insert(0,data)
self.median = self._median()
self.ma = self._ma()
self.ewma = self._ewma()
return True
#Removing the last element from the queue
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
#Getting the size of the queue
def size(self):
return len(self.queue)
#printing the elements of the queue
def printQueue(self):
return self.queue
#Average
def _ma(self):
return np.array(self.queue[:self.max_size]).mean(axis = 0)
#Median
def _median(self):
return np.median(np.array(self.queue[:self.max_size]), axis = 0)
#Exponential average
def _ewma(self):
weights = np.exp(np.linspace(-1., 0., self.max_size))
weights /= weights.sum()
average = weights.reshape(1,self.max_size).dot( np.array(self.queue[:self.max_size]))
return average.reshape(average.shape[1],)
def LevenshteinDistance(a,b):
# This is a straightforward implementation of a well-known algorithm, and thus
# probably shouldn't be covered by copyright to begin with. But in case it is,
# the author (<NAME>) has, to the extent possible under law,
# dedicated all copyright and related and neighboring rights to this software
# to the public domain worldwide, by distributing it under the CC0 license,
# version 1.0. This software is distributed without any warranty. For more
# information, see <http://creativecommons.org/publicdomain/zero/1.0>
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def calculate_accuracy(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
n_correct_elems = correct.float().sum().item()
return n_correct_elems / batch_size
def calculate_precision(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
return precision_score(targets.view(-1).cpu(), pred.view(-1).cpu(), average = 'macro')
def calculate_recall(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
return recall_score(targets.view(-1).cpu(), pred.view(-1).cpu(), average = 'macro')
| [
"numpy.zeros",
"numpy.array",
"csv.writer",
"numpy.linspace"
] | [((674, 715), 'csv.writer', 'csv.writer', (['self.log_file'], {'delimiter': '"""\t"""'}), "(self.log_file, delimiter='\\t')\n", (684, 715), False, 'import csv\n'), ((2077, 2113), 'numpy.array', 'np.array', (['self.queue[:self.max_size]'], {}), '(self.queue[:self.max_size])\n', (2085, 2113), True, 'import numpy as np\n'), ((2201, 2238), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(0.0)', 'self.max_size'], {}), '(-1.0, 0.0, self.max_size)\n', (2212, 2238), True, 'import numpy as np\n'), ((2327, 2363), 'numpy.array', 'np.array', (['self.queue[:self.max_size]'], {}), '(self.queue[:self.max_size])\n', (2335, 2363), True, 'import numpy as np\n'), ((1964, 2000), 'numpy.array', 'np.array', (['self.queue[:self.max_size]'], {}), '(self.queue[:self.max_size])\n', (1972, 2000), True, 'import numpy as np\n'), ((1190, 1234), 'numpy.zeros', 'np.zeros', (['(max_size, n_classes)'], {'dtype': 'float'}), '((max_size, n_classes), dtype=float)\n', (1198, 1234), True, 'import numpy as np\n')] |
# based on https://github.com/stelzner/monet
# License: MIT
# Author: <NAME>
import argparse
import torch
from torch import nn, optim
import torch.distributions as dists
import numpy as np
from PIL import Image
import os
from utils.os_utils import make_dir
this_file_dir = os.path.dirname(os.path.abspath(__file__)) + '/'
def single_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
class UNet(nn.Module):
def __init__(self, num_blocks, in_channels, out_channels, channel_base=64):
super().__init__()
self.num_blocks = num_blocks
self.down_convs = nn.ModuleList()
cur_in_channels = in_channels
for i in range(num_blocks):
self.down_convs.append(single_conv(cur_in_channels,
channel_base * 2**i))
cur_in_channels = channel_base * 2**i
self.tconvs = nn.ModuleList()
for i in range(num_blocks-1, 0, -1):
self.tconvs.append(nn.ConvTranspose2d(channel_base * 2**i,
channel_base * 2**(i-1),
2, stride=2))
self.up_convs = nn.ModuleList()
for i in range(num_blocks-2, -1, -1):
self.up_convs.append(single_conv(channel_base * 2**(i+1), channel_base * 2**i))
self.final_conv = nn.Conv2d(channel_base, out_channels, 1)
def forward(self, x):
intermediates = []
cur = x
for down_conv in self.down_convs[:-1]:
cur = down_conv(cur)
intermediates.append(cur)
cur = nn.MaxPool2d(2)(cur)
cur = self.down_convs[-1](cur)
for i in range(self.num_blocks-1):
cur = self.tconvs[i](cur)
cur = torch.cat((cur, intermediates[-i -1]), 1)
cur = self.up_convs[i](cur)
return self.final_conv(cur)
class AttentionNet(nn.Module):
def __init__(self, num_blocks, channel_base):
super().__init__()
self.unet = UNet(num_blocks=num_blocks,
in_channels=4,
out_channels=2,
channel_base=channel_base)
def forward(self, x, scope):
inp = torch.cat((x, scope), 1)
logits = self.unet(inp)
alpha = torch.softmax(logits, 1)
# output channel 0 represents alpha_k,
# channel 1 represents (1 - alpha_k).
mask = scope * alpha[:, 0:1]
new_scope = scope * alpha[:, 1:2]
return mask, new_scope
class EncoderNet(nn.Module):
def __init__(self, width, height, device, latent_size=16, full_connected_size=256, input_channels=4,
kernel_size=3, encoder_stride=2, conv_size1=32, conv_size2=64):
super().__init__()
self.device = device
self.conv_size1 = conv_size1
self.conv_size2 = conv_size2
self.convs = nn.Sequential(
nn.Conv2d(in_channels=input_channels, out_channels=conv_size1,
kernel_size=kernel_size, stride=encoder_stride),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=conv_size1, out_channels=conv_size2,
kernel_size=kernel_size, stride=encoder_stride),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=conv_size2, out_channels=conv_size2,
kernel_size=kernel_size, stride=encoder_stride),
nn.ReLU(inplace=True)
)
red_width = width
red_height = height
for i in range(3):
red_width = (red_width - 1) // 2
red_height = (red_height - 1) // 2
self.red_width = red_width
self.red_height = red_height
self.fc1 = nn.Sequential(
nn.Linear(conv_size2 * red_width * red_height, full_connected_size),
nn.ReLU(inplace=True),
)
self.fc21 = nn.Linear(full_connected_size, latent_size)
self.fc22 = nn.Linear(full_connected_size, latent_size)
def forward(self, x):
cx = self.convs(x)
f_cx = cx.reshape(-1, self.red_width * self.red_height * self.conv_size2)
#x = x.view(x.shape[0], -1)
e = self.fc1(f_cx)
return self.fc21(e), self.fc22(e)
class DecoderNet(nn.Module):
def __init__(self, width, height, device, latent_size, output_channels=4,
kernel_size=3, conv_size1=32, decoder_stride=1):
super().__init__()
self.device = device
self.height = height
self.width = width
self.latent_size = latent_size
self.convs = nn.Sequential(
nn.Conv2d(in_channels=latent_size + 2, out_channels=conv_size1,
kernel_size=kernel_size, stride=decoder_stride),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=conv_size1, out_channels=conv_size1,
kernel_size=kernel_size, stride=decoder_stride),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=conv_size1, out_channels=output_channels, kernel_size=1),
)
ys = torch.linspace(-1, 1, self.height + 4)
xs = torch.linspace(-1, 1, self.width + 4)
ys, xs = torch.meshgrid(ys, xs)
coord_map = torch.stack((ys, xs)).unsqueeze(0)
self.register_buffer('coord_map_const', coord_map)
def forward(self, z):
z_tiled = z.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, self.height + 4, self.width + 4)
coord_map = self.coord_map_const.repeat(z.shape[0], 1, 1, 1)
inp = torch.cat((z_tiled, coord_map), 1)
result = self.convs(inp)
return result
class Monet_VAE(nn.Module):
def __init__(self, height, width, device, latent_size, num_blocks, channel_base, num_slots,
full_connected_size, color_channels, kernel_size, encoder_stride,decoder_stride,
conv_size1, conv_size2):
super().__init__()
self.device = device
self.num_slots = num_slots
self.latent_size = latent_size
self.height = height
self.width = width
self.color_channels = color_channels
self.attention = AttentionNet(num_blocks=num_blocks, channel_base=channel_base)
self.encoder = EncoderNet(width=width, height=height, device=device, latent_size=latent_size,
full_connected_size=full_connected_size, input_channels=color_channels+1,
kernel_size=kernel_size, encoder_stride=encoder_stride, conv_size1=conv_size1,
conv_size2=conv_size2)
self.decoder = DecoderNet(width=width, height=height, device=device, latent_size=latent_size,
output_channels=color_channels+1, kernel_size=kernel_size, conv_size1=conv_size1,
decoder_stride=decoder_stride)
def _encoder_step(self, x, mask):
encoder_input = torch.cat((x, mask), 1)
mu, logvar = self.encoder(encoder_input)
return mu, logvar
def get_masks(self, x):
scope = torch.ones_like(x[:, 0:1])
masks = []
for i in range(self.num_slots - 1):
mask, scope = self.attention(x, scope)
masks.append(mask)
# list len S (B, 1, H, W)
masks.append(scope)
#(S, B, 1, H, W)
masks = torch.stack(masks)
#(B, S, 1, H, W)
masks = masks.permute([1, 0, 2, 3, 4])
return masks
def encode(self, x):
scope = torch.ones_like(x[:, 0:1])
masks = []
for i in range(self.num_slots-1):
mask, scope = self.attention(x, scope)
masks.append(mask)
masks.append(scope)
mu_s = []
logvar_s = []
for i, mask in enumerate(masks):
mu, logvar = self._encoder_step(x, mask)
mu_s.append(mu)
logvar_s.append(logvar)
return mu_s, logvar_s, masks
def _reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return (mu + eps * std)
def _decoder_step(self, z):
decoder_output = self.decoder(z)
x_recon = torch.sigmoid(decoder_output[:, :3])
mask_pred = decoder_output[:, 3]
return x_recon, mask_pred
def decode(self, z_s, masks):
full_reconstruction = torch.zeros(
(masks[0].shape[0], self.color_channels, self.width, self.height)).to(self.device)
x_recon_s, mask_pred_s = [], []
for i in range(len(masks)):
x_recon, mask_pred = self._decoder_step(z_s[i])
x_recon_s.append(x_recon)
mask_pred_s.append(mask_pred)
full_reconstruction += x_recon*masks[i]
return full_reconstruction, x_recon_s, mask_pred_s
def forward(self, x):
mu_s, logvar_s, masks = self.encode(x)
z_s = [self._reparameterize(mu_s[i], logvar_s[i]) for i in range(len(mu_s))]
full_reconstruction, x_recon_s, mask_pred_s = self.decode(z_s, masks)
return mu_s, logvar_s, masks, full_reconstruction, x_recon_s, mask_pred_s
def loss_function(x, x_recon_s, masks, mask_pred_s, mu_s, logvar_s, beta, gamma, bg_sigma, fg_sigma, device):
batch_size = x.shape[0]
p_xs = torch.zeros(batch_size).to(device)
kl_z = torch.zeros(batch_size).to(device)
for i in range(len(masks)):
kld = -0.5 * torch.sum(1 + logvar_s[i] - mu_s[i].pow(2) - logvar_s[i].exp(), dim=1)
'''for t in kld:
assert not torch.isnan(t)
assert not torch.isinf(t)'''
kl_z += kld
if i == 0:
sigma = bg_sigma
else:
sigma = fg_sigma
dist = dists.Normal(x_recon_s[i], sigma)
#log(p_theta(x|z_k))
p_x = dist.log_prob(x)
p_x *= masks[i]
p_x = torch.sum(p_x, [1, 2, 3])
'''for t in p_x:
assert not torch.isnan(t)
assert not torch.isinf(t)'''
p_xs += -p_x#this iterartive sum might not be correct since log(x*y) = log(x)+log(y)
masks = torch.cat(masks, 1)
tr_masks = torch.transpose(masks, 1, 3)
q_masks = dists.Categorical(probs=tr_masks)
stacked_mask_preds = torch.stack(mask_pred_s, 3)
q_masks_recon = dists.Categorical(logits=stacked_mask_preds)
#avoid problem of kl_divergence becoming inf
smallest_num = torch.finfo(q_masks_recon.probs.dtype).tiny
q_masks_recon.probs[q_masks_recon.probs == 0.] = smallest_num
kl_masks = dists.kl_divergence(q_masks, q_masks_recon)
kl_masks = torch.sum(kl_masks, [1, 2])
'''for t in kl_masks:
assert not torch.isnan(t)
assert not torch.isinf(t)'''
loss = gamma * kl_masks + p_xs + beta* kl_z
return loss
def train(epoch, model, optimizer, device, log_interval, train_file, batch_size, beta, gamma, bg_sigma, fg_sigma):
model.train()
train_loss = 0
data_set = np.load(train_file)
data_size = len(data_set)
#creates indexes and shuffles them. So it can acces the data
idx_set = np.arange(data_size)
np.random.shuffle(idx_set)
idx_set = idx_set[:12800]
idx_set = np.split(idx_set, len(idx_set) / batch_size)
for batch_idx, idx_select in enumerate(idx_set):
data = data_set[idx_select]
data = torch.from_numpy(data).float().to(device)
data /= 255
data = data.permute([0, 3, 1, 2])
optimizer.zero_grad()
mu_s, logvar_s, masks, full_reconstruction, x_recon_s, mask_pred_s = model(data)
loss_batch = loss_function(data, x_recon_s, masks, mask_pred_s, mu_s, logvar_s,
beta, gamma, bg_sigma, fg_sigma, device=device)
loss = torch.mean(loss_batch)
loss.backward()
optimizer.step()
train_loss += loss.item()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), data_size,
100. * (batch_idx + 1) / len(data_set),
loss.item() / len(data)))
print('Loss: ', loss.item() / len(data))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / data_size))
def numpify(tensor):
return tensor.cpu().detach().numpy()
def visualize_masks(imgs, masks, recons, file_name):
recons = np.clip(recons, 0., 1.)
colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255), (255, 0, 255), (255, 255, 0), (0, 0, 0), (0, 127, 255), (0,255, 127)]
colors.extend([(c[0]//2, c[1]//2, c[2]//2) for c in colors])
colors.extend([(c[0]//4, c[1]//4, c[2]//4) for c in colors])
seg_maps = np.zeros_like(imgs)
masks_argmax = np.argmax(masks, 1)
for i in range(imgs.shape[0]):
for y in range(imgs.shape[2]):
for x in range(imgs.shape[3]):
seg_maps[i, :, y, x] = colors[masks_argmax[i, y, x]]
imgs *= 255.0
recons *= 255.0
masks *= 255.0
masks_ims = [np.stack([masks[:, i, :, :]]*3, axis=1) for i in range(masks.shape[1])]
masks_ims = [np.concatenate(np.transpose(m, (0, 2, 3, 1)), axis=1) for m in masks_ims]
imgs = np.transpose(imgs, (0, 2, 3, 1))
imgs = np.concatenate(imgs, axis=1)
seg_maps = np.transpose(seg_maps, (0, 2, 3, 1))
seg_maps = np.concatenate(seg_maps, axis=1)
recons = np.transpose(recons, (0, 2, 3, 1))
recons = np.concatenate(recons, axis=1)
all_list = [imgs, seg_maps, recons]+masks_ims
all_im_array = np.concatenate(all_list, axis=0)
all_im = Image.fromarray(all_im_array.astype(np.uint8))
all_im.save(file_name)
def train_Vae(batch_size, img_size, latent_size, train_file, vae_weights_path, beta, gamma, bg_sigma, fg_sigma,
epochs=100, no_cuda=False, seed=1, log_interval=100, load=False,
num_blocks=5, channel_base=64, num_slots=6,
full_connected_size=256, color_channels=3, kernel_size=3, encoder_stride=2,decoder_stride=1,
conv_size1=32, conv_size2=64):
cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
if load:
model = Monet_VAE(height=img_size, width=img_size, device=device, latent_size=latent_size,
num_blocks=num_blocks,
channel_base=channel_base, num_slots=num_slots, full_connected_size=full_connected_size,
color_channels=color_channels, kernel_size=kernel_size, encoder_stride=encoder_stride,
decoder_stride=decoder_stride, conv_size1=conv_size1, conv_size2=conv_size2).to(device)
optimizer = optim.RMSprop(model.parameters(), lr=1e-4)
checkpoint = torch.load(vae_weights_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
start_epoch = epoch + 1
else:
model = Monet_VAE(height=img_size, width=img_size, device=device, latent_size=latent_size,
num_blocks=num_blocks,
channel_base=channel_base, num_slots=num_slots, full_connected_size=full_connected_size,
color_channels=color_channels, kernel_size=kernel_size, encoder_stride=encoder_stride,
decoder_stride=decoder_stride, conv_size1=conv_size1, conv_size2=conv_size2).to(device)
optimizer = optim.RMSprop(model.parameters(), lr=1e-4)
start_epoch = 1
for epoch in range(start_epoch, epochs + start_epoch):
train(epoch=epoch, model=model, optimizer=optimizer, device=device, log_interval=log_interval,
train_file=train_file, batch_size=batch_size, beta=beta,
gamma=gamma, bg_sigma=bg_sigma, fg_sigma=fg_sigma)
if not (epoch % 5) or epoch == 1:
compare_with_data_set(model, device, filename_suffix='epoch_{}'.format(epoch), latent_size=latent_size,
train_file=train_file)
print('Saving Progress!')
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, vae_weights_path)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, vae_weights_path+'_epoch_'+str(epoch))
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, vae_weights_path)
def compare_with_data_set(model, device, filename_suffix, latent_size, train_file):
data_set = np.load(train_file)
data_size = len(data_set)
idx = np.random.randint(0, data_size, size=10)
data = data_set[idx]
print(data.shape)
with torch.no_grad():
data = torch.from_numpy(data).float().to(device)
data /= 255
data = data.permute([0, 3, 1, 2])
mu_s, logvar_s, masks, full_reconstruction, x_recon_s, mask_pred_s = model(data)
visualize_masks(imgs=numpify(data),masks=numpify(torch.cat(masks, dim=1)), recons=numpify(full_reconstruction),
file_name=this_file_dir+'results/reconstruction_{}.png'.format(filename_suffix))
def load_Vae(path, img_size, latent_size, no_cuda=False, seed=1, num_blocks=5, channel_base=64, num_slots=6,
full_connected_size=256, color_channels=3,kernel_size=3, encoder_stride=2,decoder_stride=1,
conv_size1=32, conv_size2=64):
cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
model = Monet_VAE(height=img_size, width=img_size, device=device, latent_size=latent_size, num_blocks=num_blocks,
channel_base=channel_base, num_slots=num_slots,full_connected_size=full_connected_size,
color_channels=color_channels,kernel_size=kernel_size, encoder_stride=encoder_stride,
decoder_stride=decoder_stride, conv_size1=conv_size1, conv_size2=conv_size2).to(device)
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state_dict'])
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='gym env id', type=str)
parser.add_argument('--task', help='use monet for training or testing', type=str,
choices=['train', 'test'], required=True)
parser.add_argument('--batch_size', help='number of batch to train', type=np.float, default=32)
parser.add_argument('--train_epochs', help='number of epochs to train vae', type=np.int32, default=40)
parser.add_argument('--img_size', help='size image in pixels', type=np.int32, default=64)
parser.add_argument('--latent_size', help='latent size to train the VAE', type=np.int32, default=6)
parser.add_argument('--num_slots', help='number of slots', type=np.int32, default=6)
parser.add_argument('--beta', help='beta val for the reconstruction loss', type=np.float, default=8.)#5#8
parser.add_argument('--gamma', help='gamma val for the mask loss', type=np.float, default=5.)#2.)#5
parser.add_argument('--bg_sigma', help='', type=np.float, default=0.09)
parser.add_argument('--fg_sigma', help='', type=np.float, default=0.11)
args = parser.parse_args()
# get names corresponding folders, and files where to store data
make_dir(this_file_dir+'results/', clear=False)
base_data_dir = this_file_dir + '../data/'
data_dir = base_data_dir + args.env + '/'
train_file = data_dir + 'all_set.npy'
weights_path = data_dir + 'all_sb_model'
if args.task == 'train':
train_Vae(epochs=args.train_epochs, batch_size=args.batch_size,img_size=args.img_size,
latent_size=args.latent_size, train_file=train_file,
vae_weights_path=weights_path, beta=args.beta, gamma=args.gamma, bg_sigma=args.bg_sigma,
fg_sigma=args.fg_sigma, load=False, num_slots=args.num_slots)
else:
device = torch.device("cuda")
model = load_Vae(path=weights_path, img_size=args.img_size, latent_size=args.latent_size)
compare_with_data_set(model=model, device=device, latent_size=args.latent_size,
filename_suffix="test", train_file=train_file)
| [
"numpy.load",
"torch.distributions.Categorical",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.cat",
"numpy.clip",
"numpy.random.randint",
"numpy.arange",
"torch.device",
"torch.no_grad",
"utils.os_utils.make_dir",
"os.path.abspath",
"numpy.zeros_like",
"torch.load",
"numpy.transpose... | [((10187, 10206), 'torch.cat', 'torch.cat', (['masks', '(1)'], {}), '(masks, 1)\n', (10196, 10206), False, 'import torch\n'), ((10222, 10250), 'torch.transpose', 'torch.transpose', (['masks', '(1)', '(3)'], {}), '(masks, 1, 3)\n', (10237, 10250), False, 'import torch\n'), ((10265, 10298), 'torch.distributions.Categorical', 'dists.Categorical', ([], {'probs': 'tr_masks'}), '(probs=tr_masks)\n', (10282, 10298), True, 'import torch.distributions as dists\n'), ((10324, 10351), 'torch.stack', 'torch.stack', (['mask_pred_s', '(3)'], {}), '(mask_pred_s, 3)\n', (10335, 10351), False, 'import torch\n'), ((10372, 10416), 'torch.distributions.Categorical', 'dists.Categorical', ([], {'logits': 'stacked_mask_preds'}), '(logits=stacked_mask_preds)\n', (10389, 10416), True, 'import torch.distributions as dists\n'), ((10611, 10654), 'torch.distributions.kl_divergence', 'dists.kl_divergence', (['q_masks', 'q_masks_recon'], {}), '(q_masks, q_masks_recon)\n', (10630, 10654), True, 'import torch.distributions as dists\n'), ((10670, 10697), 'torch.sum', 'torch.sum', (['kl_masks', '[1, 2]'], {}), '(kl_masks, [1, 2])\n', (10679, 10697), False, 'import torch\n'), ((11027, 11046), 'numpy.load', 'np.load', (['train_file'], {}), '(train_file)\n', (11034, 11046), True, 'import numpy as np\n'), ((11157, 11177), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (11166, 11177), True, 'import numpy as np\n'), ((11182, 11208), 'numpy.random.shuffle', 'np.random.shuffle', (['idx_set'], {}), '(idx_set)\n', (11199, 11208), True, 'import numpy as np\n'), ((12492, 12517), 'numpy.clip', 'np.clip', (['recons', '(0.0)', '(1.0)'], {}), '(recons, 0.0, 1.0)\n', (12499, 12517), True, 'import numpy as np\n'), ((12799, 12818), 'numpy.zeros_like', 'np.zeros_like', (['imgs'], {}), '(imgs)\n', (12812, 12818), True, 'import numpy as np\n'), ((12838, 12857), 'numpy.argmax', 'np.argmax', (['masks', '(1)'], {}), '(masks, 1)\n', (12847, 12857), True, 'import numpy as np\n'), ((13294, 13326), 'numpy.transpose', 'np.transpose', (['imgs', '(0, 2, 3, 1)'], {}), '(imgs, (0, 2, 3, 1))\n', (13306, 13326), True, 'import numpy as np\n'), ((13338, 13366), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(1)'}), '(imgs, axis=1)\n', (13352, 13366), True, 'import numpy as np\n'), ((13382, 13418), 'numpy.transpose', 'np.transpose', (['seg_maps', '(0, 2, 3, 1)'], {}), '(seg_maps, (0, 2, 3, 1))\n', (13394, 13418), True, 'import numpy as np\n'), ((13434, 13466), 'numpy.concatenate', 'np.concatenate', (['seg_maps'], {'axis': '(1)'}), '(seg_maps, axis=1)\n', (13448, 13466), True, 'import numpy as np\n'), ((13480, 13514), 'numpy.transpose', 'np.transpose', (['recons', '(0, 2, 3, 1)'], {}), '(recons, (0, 2, 3, 1))\n', (13492, 13514), True, 'import numpy as np\n'), ((13528, 13558), 'numpy.concatenate', 'np.concatenate', (['recons'], {'axis': '(1)'}), '(recons, axis=1)\n', (13542, 13558), True, 'import numpy as np\n'), ((13628, 13660), 'numpy.concatenate', 'np.concatenate', (['all_list'], {'axis': '(0)'}), '(all_list, axis=0)\n', (13642, 13660), True, 'import numpy as np\n'), ((14207, 14230), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (14224, 14230), False, 'import torch\n'), ((14245, 14284), 'torch.device', 'torch.device', (["('cuda' if cuda else 'cpu')"], {}), "('cuda' if cuda else 'cpu')\n", (14257, 14284), False, 'import torch\n'), ((16953, 16972), 'numpy.load', 'np.load', (['train_file'], {}), '(train_file)\n', (16960, 16972), True, 'import numpy as np\n'), ((17013, 17053), 'numpy.random.randint', 'np.random.randint', (['(0)', 'data_size'], {'size': '(10)'}), '(0, data_size, size=10)\n', (17030, 17053), True, 'import numpy as np\n'), ((17885, 17908), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (17902, 17908), False, 'import torch\n'), ((17922, 17961), 'torch.device', 'torch.device', (["('cuda' if cuda else 'cpu')"], {}), "('cuda' if cuda else 'cpu')\n", (17934, 17961), False, 'import torch\n'), ((18425, 18441), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (18435, 18441), False, 'import torch\n'), ((18559, 18584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18582, 18584), False, 'import argparse\n'), ((19767, 19816), 'utils.os_utils.make_dir', 'make_dir', (["(this_file_dir + 'results/')"], {'clear': '(False)'}), "(this_file_dir + 'results/', clear=False)\n", (19775, 19816), False, 'from utils.os_utils import make_dir\n'), ((290, 315), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (305, 315), False, 'import os\n'), ((403, 453), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)'], {'padding': '(1)'}), '(in_channels, out_channels, 3, padding=1)\n', (412, 453), False, 'from torch import nn, optim\n'), ((463, 491), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (477, 491), False, 'from torch import nn, optim\n'), ((501, 522), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (508, 522), False, 'from torch import nn, optim\n'), ((723, 738), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (736, 738), False, 'from torch import nn, optim\n'), ((1019, 1034), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1032, 1034), False, 'from torch import nn, optim\n'), ((1315, 1330), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1328, 1330), False, 'from torch import nn, optim\n'), ((1496, 1536), 'torch.nn.Conv2d', 'nn.Conv2d', (['channel_base', 'out_channels', '(1)'], {}), '(channel_base, out_channels, 1)\n', (1505, 1536), False, 'from torch import nn, optim\n'), ((2361, 2385), 'torch.cat', 'torch.cat', (['(x, scope)', '(1)'], {}), '((x, scope), 1)\n', (2370, 2385), False, 'import torch\n'), ((2434, 2458), 'torch.softmax', 'torch.softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (2447, 2458), False, 'import torch\n'), ((4018, 4061), 'torch.nn.Linear', 'nn.Linear', (['full_connected_size', 'latent_size'], {}), '(full_connected_size, latent_size)\n', (4027, 4061), False, 'from torch import nn, optim\n'), ((4082, 4125), 'torch.nn.Linear', 'nn.Linear', (['full_connected_size', 'latent_size'], {}), '(full_connected_size, latent_size)\n', (4091, 4125), False, 'from torch import nn, optim\n'), ((5203, 5241), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', '(self.height + 4)'], {}), '(-1, 1, self.height + 4)\n', (5217, 5241), False, 'import torch\n'), ((5255, 5292), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', '(self.width + 4)'], {}), '(-1, 1, self.width + 4)\n', (5269, 5292), False, 'import torch\n'), ((5310, 5332), 'torch.meshgrid', 'torch.meshgrid', (['ys', 'xs'], {}), '(ys, xs)\n', (5324, 5332), False, 'import torch\n'), ((5651, 5685), 'torch.cat', 'torch.cat', (['(z_tiled, coord_map)', '(1)'], {}), '((z_tiled, coord_map), 1)\n', (5660, 5685), False, 'import torch\n'), ((7051, 7074), 'torch.cat', 'torch.cat', (['(x, mask)', '(1)'], {}), '((x, mask), 1)\n', (7060, 7074), False, 'import torch\n'), ((7195, 7221), 'torch.ones_like', 'torch.ones_like', (['x[:, 0:1]'], {}), '(x[:, 0:1])\n', (7210, 7221), False, 'import torch\n'), ((7470, 7488), 'torch.stack', 'torch.stack', (['masks'], {}), '(masks)\n', (7481, 7488), False, 'import torch\n'), ((7625, 7651), 'torch.ones_like', 'torch.ones_like', (['x[:, 0:1]'], {}), '(x[:, 0:1])\n', (7640, 7651), False, 'import torch\n'), ((8117, 8140), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (8126, 8140), False, 'import torch\n'), ((8155, 8176), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (8171, 8176), False, 'import torch\n'), ((8301, 8337), 'torch.sigmoid', 'torch.sigmoid', (['decoder_output[:, :3]'], {}), '(decoder_output[:, :3])\n', (8314, 8337), False, 'import torch\n'), ((9818, 9851), 'torch.distributions.Normal', 'dists.Normal', (['x_recon_s[i]', 'sigma'], {}), '(x_recon_s[i], sigma)\n', (9830, 9851), True, 'import torch.distributions as dists\n'), ((9950, 9975), 'torch.sum', 'torch.sum', (['p_x', '[1, 2, 3]'], {}), '(p_x, [1, 2, 3])\n', (9959, 9975), False, 'import torch\n'), ((10485, 10523), 'torch.finfo', 'torch.finfo', (['q_masks_recon.probs.dtype'], {}), '(q_masks_recon.probs.dtype)\n', (10496, 10523), False, 'import torch\n'), ((11811, 11833), 'torch.mean', 'torch.mean', (['loss_batch'], {}), '(loss_batch)\n', (11821, 11833), False, 'import torch\n'), ((13119, 13160), 'numpy.stack', 'np.stack', (['([masks[:, i, :, :]] * 3)'], {'axis': '(1)'}), '([masks[:, i, :, :]] * 3, axis=1)\n', (13127, 13160), True, 'import numpy as np\n'), ((14177, 14202), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14200, 14202), False, 'import torch\n'), ((14872, 14900), 'torch.load', 'torch.load', (['vae_weights_path'], {}), '(vae_weights_path)\n', (14882, 14900), False, 'import torch\n'), ((17110, 17125), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17123, 17125), False, 'import torch\n'), ((17855, 17880), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17878, 17880), False, 'import torch\n'), ((20406, 20426), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20418, 20426), False, 'import torch\n'), ((1904, 1946), 'torch.cat', 'torch.cat', (['(cur, intermediates[-i - 1])', '(1)'], {}), '((cur, intermediates[-i - 1]), 1)\n', (1913, 1946), False, 'import torch\n'), ((3058, 3173), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels', 'out_channels': 'conv_size1', 'kernel_size': 'kernel_size', 'stride': 'encoder_stride'}), '(in_channels=input_channels, out_channels=conv_size1, kernel_size=\n kernel_size, stride=encoder_stride)\n', (3067, 3173), False, 'from torch import nn, optim\n'), ((3204, 3225), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3211, 3225), False, 'from torch import nn, optim\n'), ((3239, 3350), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'conv_size1', 'out_channels': 'conv_size2', 'kernel_size': 'kernel_size', 'stride': 'encoder_stride'}), '(in_channels=conv_size1, out_channels=conv_size2, kernel_size=\n kernel_size, stride=encoder_stride)\n', (3248, 3350), False, 'from torch import nn, optim\n'), ((3381, 3402), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3388, 3402), False, 'from torch import nn, optim\n'), ((3416, 3527), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'conv_size2', 'out_channels': 'conv_size2', 'kernel_size': 'kernel_size', 'stride': 'encoder_stride'}), '(in_channels=conv_size2, out_channels=conv_size2, kernel_size=\n kernel_size, stride=encoder_stride)\n', (3425, 3527), False, 'from torch import nn, optim\n'), ((3558, 3579), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3565, 3579), False, 'from torch import nn, optim\n'), ((3884, 3951), 'torch.nn.Linear', 'nn.Linear', (['(conv_size2 * red_width * red_height)', 'full_connected_size'], {}), '(conv_size2 * red_width * red_height, full_connected_size)\n', (3893, 3951), False, 'from torch import nn, optim\n'), ((3965, 3986), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3972, 3986), False, 'from torch import nn, optim\n'), ((4741, 4857), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(latent_size + 2)', 'out_channels': 'conv_size1', 'kernel_size': 'kernel_size', 'stride': 'decoder_stride'}), '(in_channels=latent_size + 2, out_channels=conv_size1, kernel_size\n =kernel_size, stride=decoder_stride)\n', (4750, 4857), False, 'from torch import nn, optim\n'), ((4888, 4909), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4895, 4909), False, 'from torch import nn, optim\n'), ((4923, 5034), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'conv_size1', 'out_channels': 'conv_size1', 'kernel_size': 'kernel_size', 'stride': 'decoder_stride'}), '(in_channels=conv_size1, out_channels=conv_size1, kernel_size=\n kernel_size, stride=decoder_stride)\n', (4932, 5034), False, 'from torch import nn, optim\n'), ((5065, 5086), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5072, 5086), False, 'from torch import nn, optim\n'), ((5100, 5178), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'conv_size1', 'out_channels': 'output_channels', 'kernel_size': '(1)'}), '(in_channels=conv_size1, out_channels=output_channels, kernel_size=1)\n', (5109, 5178), False, 'from torch import nn, optim\n'), ((9383, 9406), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (9394, 9406), False, 'import torch\n'), ((9429, 9452), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (9440, 9452), False, 'import torch\n'), ((13223, 13252), 'numpy.transpose', 'np.transpose', (['m', '(0, 2, 3, 1)'], {}), '(m, (0, 2, 3, 1))\n', (13235, 13252), True, 'import numpy as np\n'), ((1111, 1198), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(channel_base * 2 ** i)', '(channel_base * 2 ** (i - 1))', '(2)'], {'stride': '(2)'}), '(channel_base * 2 ** i, channel_base * 2 ** (i - 1), 2,\n stride=2)\n', (1129, 1198), False, 'from torch import nn, optim\n'), ((1743, 1758), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1755, 1758), False, 'from torch import nn, optim\n'), ((5353, 5374), 'torch.stack', 'torch.stack', (['(ys, xs)'], {}), '((ys, xs))\n', (5364, 5374), False, 'import torch\n'), ((8478, 8556), 'torch.zeros', 'torch.zeros', (['(masks[0].shape[0], self.color_channels, self.width, self.height)'], {}), '((masks[0].shape[0], self.color_channels, self.width, self.height))\n', (8489, 8556), False, 'import torch\n'), ((17392, 17415), 'torch.cat', 'torch.cat', (['masks'], {'dim': '(1)'}), '(masks, dim=1)\n', (17401, 17415), False, 'import torch\n'), ((11402, 11424), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (11418, 11424), False, 'import torch\n'), ((17142, 17164), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (17158, 17164), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 11:49:58 2016
"""
from __future__ import division
import numpy as np
#from sklearn.gaussian_process import GaussianProcess
from scipy.optimize import minimize
from acquisition_functions import AcquisitionFunction, unique_rows
#from visualization import Visualization
from prada_gaussian_process import PradaGaussianProcess
#from prada_gaussian_process import PradaMultipleGaussianProcess
from acquisition_maximization import acq_max_nlopt
from acquisition_maximization import acq_max_direct
from acquisition_maximization import acq_max
from sklearn.metrics.pairwise import euclidean_distances
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
import matplotlib.pyplot as plt
import pickle
import time
import copy
import math
import random
#import nlopt
#@author: Vu
#======================================================================================================
#======================================================================================================
#======================================================================================================
#======================================================================================================
counter = 0
class PradaBayOptFn(object):
def __init__(self, gp_params, func_params, acq_params, experiment_num, seed, verbose=1):
"""
Input parameters
----------
gp_params: GP parameters
gp_params.theta: to compute the kernel
gp_params.delta: to compute the kernel
func_params: function to optimize
func_params.init bound: initial bounds for parameters
func_params.bounds: bounds on parameters
func_params.func: a function to be optimized
acq_params: acquisition function,
acq_params.acq_func['name']=['ei','ucb','poi','lei']
,acq['kappa'] for ucb, acq['k'] for lei
acq_params.opt_toolbox: optimization toolbox 'nlopt','direct','scipy'
experiment_num: the interation of the GP method. Used to make sure each
independant stage of the experiment uses different
initial conditions
seed: Variable used as part of a seed to generate random initial points
Returns
-------
dim: dimension
bounds: bounds on original scale
scalebounds: bounds on normalized scale of 0-1
time_opt: will record the time spent on optimization
gp: Gaussian Process object
"""
self.experiment_num=experiment_num
self.seed=seed
# Find number of parameters
bounds=func_params['bounds']
if 'init_bounds' not in func_params:
init_bounds=bounds
else:
init_bounds=func_params['init_bounds']
self.dim = len(bounds)
# Create an array with parameters bounds
if isinstance(bounds,dict):
# Get the name of the parameters
self.keys = list(bounds.keys())
self.bounds = []
for key in bounds.keys():
self.bounds.append(bounds[key])
self.bounds = np.asarray(self.bounds)
else:
self.bounds=np.asarray(bounds)
if len(init_bounds)==0:
self.init_bounds=self.bounds.copy()
else:
self.init_bounds=init_bounds
if isinstance(init_bounds,dict):
# Get the name of the parameters
self.keys = list(init_bounds.keys())
self.init_bounds = []
for key in init_bounds.keys():
self.init_bounds.append(init_bounds[key])
self.init_bounds = np.asarray(self.init_bounds)
else:
self.init_bounds=np.asarray(init_bounds)
# create a scalebounds 0-1
scalebounds=np.array([np.zeros(self.dim), np.ones(self.dim)])
self.scalebounds=scalebounds.T
self.max_min_gap=self.bounds[:,1]-self.bounds[:,0]
# Some function to be optimized
self.f = func_params['f']
# optimization toolbox
if 'opt_toolbox' not in acq_params:
self.opt_toolbox='scipy'
else:
self.opt_toolbox=acq_params['opt_toolbox']
# acquisition function type
self.acq=acq_params['acq_func']
self.acq['scalebounds']=self.scalebounds
if 'debug' not in self.acq:
self.acq['debug']=0
if 'stopping' not in acq_params:
self.stopping_criteria=0
else:
self.stopping_criteria=acq_params['stopping']
if 'optimize_gp' not in acq_params:
self.optimize_gp=0
else:
self.optimize_gp=acq_params['optimize_gp']
if 'marginalize_gp' not in acq_params:
self.marginalize_gp=0
else:
self.marginalize_gp=acq_params['marginalize_gp']
# store X in original scale
self.X_original= None
# store X in 0-1 scale
self.X = None
# store y=f(x)
# (y - mean)/(max-min)
self.Y = None
# y original scale
self.Y_original = None
# value of the acquisition function at the selected point
self.alpha_Xt=None
self.Tau_Xt=None
self.time_opt=0
self.k_Neighbor=2
# Lipschitz constant
self.L=0
# Gaussian Process class
self.gp=PradaGaussianProcess(gp_params)
# acquisition function
self.acq_func = None
# stop condition
self.stop_flag=0
self.logmarginal=0
# xt_suggestion, caching for Consensus
self.xstars=[]
self.ystars=np.zeros((2,1))
# theta vector for marginalization GP
self.theta_vector =[]
# will be later used for visualization
def posterior(self, Xnew):
self.gp.fit(self.X, self.Y)
mu, sigma2 = self.gp.predict(Xnew, eval_MSE=True)
return mu, np.sqrt(sigma2)
def init(self,gp_params, n_init_points=3):
"""
Input parameters
----------
gp_params: Gaussian Process structure
n_init_points: # init points
"""
# set seed to allow for reproducible results
np.random.seed(self.experiment_num*self.seed)
print(self.experiment_num)
# Generate random points
l = [np.random.uniform(x[0], x[1], size=n_init_points) for x in self.init_bounds]
# Concatenate new random points to possible existing
# points from self.explore method.
temp=np.asarray(l)
temp=temp.T
init_X=list(temp.reshape((n_init_points,-1)))
self.X_original = np.asarray(init_X)
# Evaluate target function at all initialization
y_init=self.f(init_X)
y_init=np.reshape(y_init,(n_init_points,1))
self.Y_original = np.asarray(y_init)
self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)#/(np.max(self.Y_original)-np.min(self.Y_original))
# convert it to scaleX
temp_init_point=np.divide((init_X-self.bounds[:,0]),self.max_min_gap)
self.X = np.asarray(temp_init_point)
def maximize(self,gp_params):
"""
Main optimization method.
Input parameters
----------
gp_params: parameter for Gaussian Process
Returns
-------
x: recommented point for evaluation
"""
if self.stop_flag==1:
return
if self.acq['name']=='random':
x_max = [np.random.uniform(x[0], x[1], size=1) for x in self.bounds]
x_max=np.asarray(x_max)
x_max=x_max.T
self.X_original=np.vstack((self.X_original, x_max))
# evaluate Y using original X
#self.Y = np.append(self.Y, self.f(temp_X_new_original))
self.Y_original = np.append(self.Y_original, self.f(x_max))
# update Y after change Y_original
self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)
self.time_opt=np.hstack((self.time_opt,0))
return
# init a new Gaussian Process
self.gp=PradaGaussianProcess(gp_params)
if self.gp.KK_x_x_inv ==[]:
# Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
acq=self.acq
if acq['debug']==1:
logmarginal=self.gp.log_marginal_lengthscale(gp_params['theta'],gp_params['noise_delta'])
print(gp_params['theta'])
print("log marginal before optimizing ={:.4f}".format(logmarginal))
self.logmarginal=logmarginal
if logmarginal<-999999:
logmarginal=self.gp.log_marginal_lengthscale(gp_params['theta'],gp_params['noise_delta'])
# optimize GP parameters after 5 iterations
if self.optimize_gp==1 and len(self.Y)%10*self.dim==0:
print("Initial length scale={}".format(gp_params['theta']))
newtheta = self.gp.optimize_lengthscale(gp_params['theta'],gp_params['noise_delta'])
gp_params['theta']=newtheta
print("New length scale={}".format(gp_params['theta']))
#logmarginal=self.gp.log_marginal_lengthscale(newtheta,gp_params['noise_delta'])
#print "print newtheta={:s} log marginal={:.4f}".format(newtheta,logmarginal)
# init a new Gaussian Process after optimizing hyper-parameter
self.gp=PradaGaussianProcess(gp_params)
# Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
# Set acquisition function
start_opt=time.time()
y_max = self.Y.max()
if acq['name'] in ['consensus','mes']:
ucb_acq_func={}
ucb_acq_func['name']='ucb'
ucb_acq_func['kappa']=np.log(len(self.Y))
ucb_acq_func['dim']=self.dim
ucb_acq_func['scalebounds']=self.scalebounds
myacq=AcquisitionFunction(ucb_acq_func)
xt_ucb = acq_max(ac=myacq.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds)
xstars=[]
xstars.append(xt_ucb)
ei_acq_func={}
ei_acq_func['name']='ei'
ei_acq_func['dim']=self.dim
ei_acq_func['scalebounds']=self.scalebounds
myacq=AcquisitionFunction(ei_acq_func)
xt_ei = acq_max(ac=myacq.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds)
xstars.append(xt_ei)
pes_acq_func={}
pes_acq_func['name']='pes'
pes_acq_func['dim']=self.dim
pes_acq_func['scalebounds']=self.scalebounds
myacq=AcquisitionFunction(pes_acq_func)
xt_pes = acq_max(ac=myacq.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds)
xstars.append(xt_pes)
self.xstars=xstars
if acq['name']=='vrs':
print("please call the maximize_vrs function")
return
if 'xstars' not in globals():
xstars=[]
self.xstars=xstars
self.acq['xstars']=xstars
self.acq_func = AcquisitionFunction(self.acq)
if acq['name']=="ei_mu":
#find the maximum in the predictive mean
mu_acq={}
mu_acq['name']='mu'
mu_acq['dim']=self.dim
acq_mu=AcquisitionFunction(mu_acq)
x_mu_max = acq_max(ac=acq_mu.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox)
# set y_max = mu_max
y_max=acq_mu.acq_kind(x_mu_max,gp=self.gp, y_max=y_max)
x_max = acq_max(ac=self.acq_func.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox,seeds=self.xstars)
if acq['name']=='consensus' and acq['debug']==1: # plot the x_max and xstars
fig=plt.figure(figsize=(5, 5))
plt.scatter(xt_ucb[0],xt_ucb[1],marker='s',color='g',s=200,label='Peak')
plt.scatter(xt_ei[0],xt_ei[1],marker='s',color='k',s=200,label='Peak')
plt.scatter(x_max[0],x_max[1],marker='*',color='r',s=300,label='Peak')
plt.xlim(0,1)
plt.ylim(0,1)
strFileName="acquisition_functions_debug.eps"
fig.savefig(strFileName, bbox_inches='tight')
if acq['name']=='vrs' and acq['debug']==1: # plot the x_max and xstars
fig=plt.figure(figsize=(5, 5))
plt.scatter(xt_ucb[0],xt_ucb[1],marker='s',color='g',s=200,label='Peak')
plt.scatter(xt_ei[0],xt_ei[1],marker='s',color='k',s=200,label='Peak')
plt.scatter(x_max[0],x_max[1],marker='*',color='r',s=300,label='Peak')
plt.xlim(0,1)
plt.ylim(0,1)
strFileName="vrs_acquisition_functions_debug.eps"
#fig.savefig(strFileName, bbox_inches='tight')
val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)
#print x_max
#print val_acq
if self.stopping_criteria!=0 and val_acq<self.stopping_criteria:
val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)
self.stop_flag=1
print("Stopping Criteria is violated. Stopping Criteria is {:.15f}".format(self.stopping_criteria))
self.alpha_Xt= np.append(self.alpha_Xt,val_acq)
mean,var=self.gp.predict(x_max, eval_MSE=True)
var.flags['WRITEABLE']=True
var[var<1e-20]=0
#self.Tau_Xt= np.append(self.Tau_Xt,val_acq/var)
# record the optimization time
finished_opt=time.time()
elapse_opt=finished_opt-start_opt
self.time_opt=np.hstack((self.time_opt,elapse_opt))
# store X
self.X = np.vstack((self.X, x_max.reshape((1, -1))))
# compute X in original scale
temp_X_new_original=x_max*self.max_min_gap+self.bounds[:,0]
self.X_original=np.vstack((self.X_original, temp_X_new_original))
# evaluate Y using original X
#self.Y = np.append(self.Y, self.f(temp_X_new_original))
self.Y_original = np.append(self.Y_original, self.f(temp_X_new_original))
# update Y after change Y_original
self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)
if self.gp.flagIncremental==1:
self.gp.fit_incremental(x_max,self.Y[-1])
def maximize_ei_dist(self,gp_params):
"""
Main optimization method.
Input parameters
----------
gp_params: parameter for Gaussian Process
Returns
-------
x: recommented point for evaluation
"""
if self.stop_flag==1:
return
# init a new Gaussian Process
self.gp=PradaGaussianProcess(gp_params)
# Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
# optimize GP lengthscale after 15 iterations if self.optimize_gp==1
if self.optimize_gp==1 and len(self.Y)%15==0:
print("Initial length scale={}".format(gp_params['theta']))
newtheta = self.gp.optimize_lengthscale(gp_params['theta'],gp_params['noise_delta'])
gp_params['theta']=newtheta
print("New length scale={}".format(newtheta))
# init a new Gaussian Process using the new theta
self.gp=PradaGaussianProcess(gp_params)
# Find unique rows of X to prevent GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
# Set acquisition function
start_opt=time.time()
y_max = self.Y.max()
self.xstars=[]
self.y_stars=[]
y_max=np.max(self.Y)
###############################################################################
# numXtar controls the number of thompson samples, given as M in the paper
###############################################################################
numXtar=100
temp=[]
# finding the xt of Thompson Sampling
ii=0
while ii<numXtar:
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=self.dim
mu_acq['scalebounds']=self.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
#Get the location of the Thompson sample maxima
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox='scipy')
# get the value g*
y_xt_TS=acq_mu.acq_kind(xt_TS,self.gp,y_max=y_max)
temp.append(xt_TS)
self.xstars.append(xt_TS)
self.y_stars.append(y_xt_TS)
ii+=1
if self.acq['debug']==1:
print('mean y*={:.4f}({:.8f}) y+={:.4f}'.format(np.mean(y_xt_TS),np.std(y_xt_TS),y_max))
if self.xstars==[]:
self.xstars=temp
#save optimal Thompson sample mean and sdv for later analysis
y_stars=np.array([np.mean(self.y_stars),np.std(self.y_stars)]).reshape(2,-1)
self.acq['xstars']=self.xstars
self.acq['ystars']=self.y_stars
self.ystars=np.hstack((self.ystars,(np.array(y_stars))))
self.acq_func = AcquisitionFunction(self.acq)
x_max = acq_max(ac=self.acq_func.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox,seeds=self.ystars)
val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)
if self.stopping_criteria!=0 and val_acq<self.stopping_criteria:
val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)
self.stop_flag=1
print("Stopping Criteria is violated. Stopping Criteria is {:.15f}".format(self.stopping_criteria))
# record the optimization time
finished_opt=time.time()
elapse_opt=finished_opt-start_opt
self.time_opt=np.hstack((self.time_opt,elapse_opt))
# store X
self.X = np.vstack((self.X, x_max.reshape((1, -1))))
# compute X in original scale
temp_X_new_original=x_max*self.max_min_gap+self.bounds[:,0]
self.X_original=np.vstack((self.X_original, temp_X_new_original))
# evaluate Y using original X
self.Y_original = np.append(self.Y_original, self.f(temp_X_new_original))
# update Y after change Y_original
self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)
def maximize_vrs_of_ts(self,gp_params):
"""
Main optimization method.
Input parameters
----------
gp_params: parameter for Gaussian Process
Returns
-------
x: recommented point for evaluation
"""
if self.stop_flag==1:
return
# init a new Gaussian Process
self.gp=PradaGaussianProcess(gp_params)
# Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
acq=self.acq
if acq['debug']==1:
logmarginal=self.gp.log_marginal_lengthscale(gp_params['theta'],gp_params['noise_delta'])
print(gp_params['theta'])
print("log marginal before optimizing ={:.4f}".format(logmarginal))
self.logmarginal=logmarginal
if logmarginal<-999999:
logmarginal=self.gp.log_marginal_lengthscale(gp_params['theta'],gp_params['noise_delta'])
# optimize GP parameters after 5 iterations
#if self.optimize_gp==1 and len(self.Y)>=6*self.dim and len(self.Y)%7*self.dim==0:
if self.optimize_gp==1 and len(self.Y)%(4*self.dim)==0:
newtheta = self.gp.optimize_lengthscale(gp_params['theta'],gp_params['noise_delta'])
gp_params['theta']=newtheta
logmarginal=self.gp.log_marginal_lengthscale(newtheta,gp_params['noise_delta'])
if acq['debug']==1:
print("{:s} log marginal={:.4f}".format(newtheta,logmarginal))
if 'n_xstars' in self.acq:
numXstar=self.acq['n_xstars']
else:
numXstar=10*self.dim
if self.marginalize_gp==1 and len(self.Y)==(5*self.dim):
newtheta = self.gp.optimize_lengthscale(gp_params['theta'],gp_params['noise_delta'])
gp_params['theta']=newtheta
if self.marginalize_gp==1 and len(self.Y)%(8*self.dim)==0:
self.theta_vector = self.gp.slice_sampling_lengthscale_SE(gp_params['theta'],gp_params['noise_delta'],nSamples=numXstar)
#gp_params['theta']=newtheta
gp_params['newtheta_vector']=self.theta_vector
#print newtheta_vector
#logmarginal=self.gp.log_marginal_lengthscale(newtheta,gp_params['noise_delta'])
#print "{:s} log marginal={:.4f}".format(newtheta,logmarginal)
# Set acquisition function
start_opt=time.time()
y_max = self.Y.max()
# run the acquisition function for the first time to get xstar
self.xstars=[]
# finding the xt of UCB
y_max=np.max(self.Y)
temp=[]
# finding the xt of Thompson Sampling
for ii in range(numXstar):
if self.theta_vector!=[]:
gp_params['theta']=self.theta_vector[ii]
# init a new Gaussian Process
self.gp=PradaGaussianProcess(gp_params)
# Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
self.gp.fit(self.X[ur], self.Y[ur])
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=self.dim
mu_acq['scalebounds']=self.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox='thompson')
# get the value f*
y_xt_TS=acq_mu.acq_kind(xt_TS,self.gp,y_max=y_max)
temp.append(xt_TS)
# check if f* > y^max and ignore xt_TS otherwise
#if y_xt_TS>=y_max:
#self.xstars.append(xt_TS)
if self.xstars==[]:
#print 'xt_suggestion is empty'
# again perform TS and take all of them
self.xstars=temp
# check predictive variance before adding a new data points
var_before=self.gp.compute_var(self.X,self.xstars)
var_before=np.mean(var_before)
self.gp.lengthscale_vector=self.theta_vector
self.acq['xstars']=self.xstars
self.acq_func = AcquisitionFunction(self.acq)
x_max = acq_max(ac=self.acq_func.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox,seeds=self.xstars)
#xstars_array=np.asarray(self.acq_func.object.xstars)
val_acq=-self.acq_func.acq_kind(x_max,self.gp,y_max)
# check predictive variance after
temp=np.vstack((self.gp.X,x_max))
var_after=self.gp.compute_var(temp,self.xstars)
var_after=np.mean(var_after)
print("predictive variance before={:.12f} after={:.12f} val_acq={:.12f}".format(var_before,var_after,np.asscalar(val_acq)))
# check maximum variance
var_acq={}
var_acq['name']='pure_exploration'
var_acq['dim']=self.dim
var_acq['scalebounds']=self.scalebounds
acq_var=AcquisitionFunction(var_acq)
temp = acq_max(ac=acq_var.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox='scipy')
# get the value f*
max_var_after=acq_var.acq_kind(temp,self.gp,y_max=y_max)
print("max predictive variance ={:.8f}".format(np.asscalar(max_var_after)))
if self.stopping_criteria!=0 and val_acq<self.stopping_criteria:
val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)
self.stop_flag=1
print("Stopping Criteria is violated. Stopping Criteria is {:.15f}".format(self.stopping_criteria))
mean,var=self.gp.predict(x_max, eval_MSE=True)
var.flags['WRITEABLE']=True
var[var<1e-20]=0
#self.Tau_Xt= np.append(self.Tau_Xt,val_acq/var)
# record the optimization time
finished_opt=time.time()
elapse_opt=finished_opt-start_opt
self.time_opt=np.hstack((self.time_opt,elapse_opt))
# store X
self.X = np.vstack((self.X, x_max.reshape((1, -1))))
# compute X in original scale
temp_X_new_original=x_max*self.max_min_gap+self.bounds[:,0]
self.X_original=np.vstack((self.X_original, temp_X_new_original))
# evaluate Y using original X
#self.Y = np.append(self.Y, self.f(temp_X_new_original))
self.Y_original = np.append(self.Y_original, self.f(temp_X_new_original))
# update Y after change Y_original
self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)
#======================================================================================
#======================================================================================================
#======================================================================================================
#======================================================================================================
| [
"acquisition_maximization.acq_max",
"numpy.random.seed",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.asscalar",
"prada_gaussian_process.PradaGaussianProcess",
"numpy.std",
"numpy.append",
"numpy.max",
"numpy.reshape",
"numpy.divide",
"matplotlib.pyplot.ylim",
"numpy.asar... | [((6075, 6106), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (6095, 6106), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((6360, 6376), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (6368, 6376), True, 'import numpy as np\n'), ((6992, 7039), 'numpy.random.seed', 'np.random.seed', (['(self.experiment_num * self.seed)'], {}), '(self.experiment_num * self.seed)\n', (7006, 7039), True, 'import numpy as np\n'), ((7321, 7334), 'numpy.asarray', 'np.asarray', (['l'], {}), '(l)\n', (7331, 7334), True, 'import numpy as np\n'), ((7448, 7466), 'numpy.asarray', 'np.asarray', (['init_X'], {}), '(init_X)\n', (7458, 7466), True, 'import numpy as np\n'), ((7583, 7621), 'numpy.reshape', 'np.reshape', (['y_init', '(n_init_points, 1)'], {}), '(y_init, (n_init_points, 1))\n', (7593, 7621), True, 'import numpy as np\n'), ((7649, 7667), 'numpy.asarray', 'np.asarray', (['y_init'], {}), '(y_init)\n', (7659, 7667), True, 'import numpy as np\n'), ((7869, 7924), 'numpy.divide', 'np.divide', (['(init_X - self.bounds[:, 0])', 'self.max_min_gap'], {}), '(init_X - self.bounds[:, 0], self.max_min_gap)\n', (7878, 7924), True, 'import numpy as np\n'), ((7951, 7978), 'numpy.asarray', 'np.asarray', (['temp_init_point'], {}), '(temp_init_point)\n', (7961, 7978), True, 'import numpy as np\n'), ((9103, 9134), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (9123, 9134), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((10770, 10781), 'time.time', 'time.time', ([], {}), '()\n', (10779, 10781), False, 'import time\n'), ((12481, 12510), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['self.acq'], {}), '(self.acq)\n', (12500, 12510), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((13001, 13139), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'self.acq_func.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': 'self.opt_toolbox', 'seeds': 'self.xstars'}), '(ac=self.acq_func.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox=self.opt_toolbox, seeds=self.xstars)\n', (13008, 13139), False, 'from acquisition_maximization import acq_max\n'), ((14712, 14745), 'numpy.append', 'np.append', (['self.alpha_Xt', 'val_acq'], {}), '(self.alpha_Xt, val_acq)\n', (14721, 14745), True, 'import numpy as np\n'), ((15003, 15014), 'time.time', 'time.time', ([], {}), '()\n', (15012, 15014), False, 'import time\n'), ((15081, 15119), 'numpy.hstack', 'np.hstack', (['(self.time_opt, elapse_opt)'], {}), '((self.time_opt, elapse_opt))\n', (15090, 15119), True, 'import numpy as np\n'), ((15382, 15431), 'numpy.vstack', 'np.vstack', (['(self.X_original, temp_X_new_original)'], {}), '((self.X_original, temp_X_new_original))\n', (15391, 15431), True, 'import numpy as np\n'), ((16284, 16315), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (16304, 16315), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((16399, 16418), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (16410, 16418), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((17227, 17238), 'time.time', 'time.time', ([], {}), '()\n', (17236, 17238), False, 'import time\n'), ((17368, 17382), 'numpy.max', 'np.max', (['self.Y'], {}), '(self.Y)\n', (17374, 17382), True, 'import numpy as np\n'), ((18951, 18980), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['self.acq'], {}), '(self.acq)\n', (18970, 18980), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((18998, 19136), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'self.acq_func.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': 'self.opt_toolbox', 'seeds': 'self.ystars'}), '(ac=self.acq_func.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox=self.opt_toolbox, seeds=self.ystars)\n', (19005, 19136), False, 'from acquisition_maximization import acq_max\n'), ((19562, 19573), 'time.time', 'time.time', ([], {}), '()\n', (19571, 19573), False, 'import time\n'), ((19640, 19678), 'numpy.hstack', 'np.hstack', (['(self.time_opt, elapse_opt)'], {}), '((self.time_opt, elapse_opt))\n', (19649, 19678), True, 'import numpy as np\n'), ((19941, 19990), 'numpy.vstack', 'np.vstack', (['(self.X_original, temp_X_new_original)'], {}), '((self.X_original, temp_X_new_original))\n', (19950, 19990), True, 'import numpy as np\n'), ((20662, 20693), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (20682, 20693), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((20777, 20796), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (20788, 20796), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((22847, 22858), 'time.time', 'time.time', ([], {}), '()\n', (22856, 22858), False, 'import time\n'), ((23081, 23095), 'numpy.max', 'np.max', (['self.Y'], {}), '(self.Y)\n', (23087, 23095), True, 'import numpy as np\n'), ((24520, 24539), 'numpy.mean', 'np.mean', (['var_before'], {}), '(var_before)\n', (24527, 24539), True, 'import numpy as np\n'), ((24673, 24702), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['self.acq'], {}), '(self.acq)\n', (24692, 24702), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((24720, 24858), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'self.acq_func.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': 'self.opt_toolbox', 'seeds': 'self.xstars'}), '(ac=self.acq_func.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox=self.opt_toolbox, seeds=self.xstars)\n', (24727, 24858), False, 'from acquisition_maximization import acq_max\n'), ((25057, 25086), 'numpy.vstack', 'np.vstack', (['(self.gp.X, x_max)'], {}), '((self.gp.X, x_max))\n', (25066, 25086), True, 'import numpy as np\n'), ((25163, 25181), 'numpy.mean', 'np.mean', (['var_after'], {}), '(var_after)\n', (25170, 25181), True, 'import numpy as np\n'), ((25547, 25575), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['var_acq'], {}), '(var_acq)\n', (25566, 25575), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((25592, 25696), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'acq_var.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': '"""scipy"""'}), "(ac=acq_var.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox='scipy')\n", (25599, 25696), False, 'from acquisition_maximization import acq_max\n'), ((26447, 26458), 'time.time', 'time.time', ([], {}), '()\n', (26456, 26458), False, 'import time\n'), ((26525, 26563), 'numpy.hstack', 'np.hstack', (['(self.time_opt, elapse_opt)'], {}), '((self.time_opt, elapse_opt))\n', (26534, 26563), True, 'import numpy as np\n'), ((26826, 26875), 'numpy.vstack', 'np.vstack', (['(self.X_original, temp_X_new_original)'], {}), '((self.X_original, temp_X_new_original))\n', (26835, 26875), True, 'import numpy as np\n'), ((3557, 3580), 'numpy.asarray', 'np.asarray', (['self.bounds'], {}), '(self.bounds)\n', (3567, 3580), True, 'import numpy as np\n'), ((3621, 3639), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (3631, 3639), True, 'import numpy as np\n'), ((4113, 4141), 'numpy.asarray', 'np.asarray', (['self.init_bounds'], {}), '(self.init_bounds)\n', (4123, 4141), True, 'import numpy as np\n'), ((4187, 4210), 'numpy.asarray', 'np.asarray', (['init_bounds'], {}), '(init_bounds)\n', (4197, 4210), True, 'import numpy as np\n'), ((6666, 6681), 'numpy.sqrt', 'np.sqrt', (['sigma2'], {}), '(sigma2)\n', (6673, 6681), True, 'import numpy as np\n'), ((7122, 7171), 'numpy.random.uniform', 'np.random.uniform', (['x[0]', 'x[1]'], {'size': 'n_init_points'}), '(x[0], x[1], size=n_init_points)\n', (7139, 7171), True, 'import numpy as np\n'), ((7735, 7758), 'numpy.std', 'np.std', (['self.Y_original'], {}), '(self.Y_original)\n', (7741, 7758), True, 'import numpy as np\n'), ((8487, 8504), 'numpy.asarray', 'np.asarray', (['x_max'], {}), '(x_max)\n', (8497, 8504), True, 'import numpy as np\n'), ((8561, 8596), 'numpy.vstack', 'np.vstack', (['(self.X_original, x_max)'], {}), '((self.X_original, x_max))\n', (8570, 8596), True, 'import numpy as np\n'), ((8987, 9016), 'numpy.hstack', 'np.hstack', (['(self.time_opt, 0)'], {}), '((self.time_opt, 0))\n', (8996, 9016), True, 'import numpy as np\n'), ((9253, 9272), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (9264, 9272), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((10513, 10544), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (10533, 10544), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((10626, 10645), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (10637, 10645), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((11126, 11159), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['ucb_acq_func'], {}), '(ucb_acq_func)\n', (11145, 11159), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((11182, 11258), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'myacq.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds'}), '(ac=myacq.acq_kind, gp=self.gp, y_max=y_max, bounds=self.scalebounds)\n', (11189, 11258), False, 'from acquisition_maximization import acq_max\n'), ((11535, 11567), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['ei_acq_func'], {}), '(ei_acq_func)\n', (11554, 11567), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((11589, 11665), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'myacq.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds'}), '(ac=myacq.acq_kind, gp=self.gp, y_max=y_max, bounds=self.scalebounds)\n', (11596, 11665), False, 'from acquisition_maximization import acq_max\n'), ((11928, 11961), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['pes_acq_func'], {}), '(pes_acq_func)\n', (11947, 11961), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((11984, 12060), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'myacq.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds'}), '(ac=myacq.acq_kind, gp=self.gp, y_max=y_max, bounds=self.scalebounds)\n', (11991, 12060), False, 'from acquisition_maximization import acq_max\n'), ((12713, 12740), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['mu_acq'], {}), '(mu_acq)\n', (12732, 12740), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((12765, 12877), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'acq_mu.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': 'self.opt_toolbox'}), '(ac=acq_mu.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox=self.opt_toolbox)\n', (12772, 12877), False, 'from acquisition_maximization import acq_max\n'), ((13235, 13261), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (13245, 13261), True, 'import matplotlib.pyplot as plt\n'), ((13277, 13354), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xt_ucb[0]', 'xt_ucb[1]'], {'marker': '"""s"""', 'color': '"""g"""', 's': '(200)', 'label': '"""Peak"""'}), "(xt_ucb[0], xt_ucb[1], marker='s', color='g', s=200, label='Peak')\n", (13288, 13354), True, 'import matplotlib.pyplot as plt\n'), ((13363, 13438), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xt_ei[0]', 'xt_ei[1]'], {'marker': '"""s"""', 'color': '"""k"""', 's': '(200)', 'label': '"""Peak"""'}), "(xt_ei[0], xt_ei[1], marker='s', color='k', s=200, label='Peak')\n", (13374, 13438), True, 'import matplotlib.pyplot as plt\n'), ((13447, 13522), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_max[0]', 'x_max[1]'], {'marker': '"""*"""', 'color': '"""r"""', 's': '(300)', 'label': '"""Peak"""'}), "(x_max[0], x_max[1], marker='*', color='r', s=300, label='Peak')\n", (13458, 13522), True, 'import matplotlib.pyplot as plt\n'), ((13531, 13545), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (13539, 13545), True, 'import matplotlib.pyplot as plt\n'), ((13558, 13572), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (13566, 13572), True, 'import matplotlib.pyplot as plt\n'), ((13789, 13815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (13799, 13815), True, 'import matplotlib.pyplot as plt\n'), ((13831, 13908), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xt_ucb[0]', 'xt_ucb[1]'], {'marker': '"""s"""', 'color': '"""g"""', 's': '(200)', 'label': '"""Peak"""'}), "(xt_ucb[0], xt_ucb[1], marker='s', color='g', s=200, label='Peak')\n", (13842, 13908), True, 'import matplotlib.pyplot as plt\n'), ((13917, 13992), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xt_ei[0]', 'xt_ei[1]'], {'marker': '"""s"""', 'color': '"""k"""', 's': '(200)', 'label': '"""Peak"""'}), "(xt_ei[0], xt_ei[1], marker='s', color='k', s=200, label='Peak')\n", (13928, 13992), True, 'import matplotlib.pyplot as plt\n'), ((14001, 14076), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_max[0]', 'x_max[1]'], {'marker': '"""*"""', 'color': '"""r"""', 's': '(300)', 'label': '"""Peak"""'}), "(x_max[0], x_max[1], marker='*', color='r', s=300, label='Peak')\n", (14012, 14076), True, 'import matplotlib.pyplot as plt\n'), ((14085, 14099), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (14093, 14099), True, 'import matplotlib.pyplot as plt\n'), ((14112, 14126), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (14120, 14126), True, 'import matplotlib.pyplot as plt\n'), ((15743, 15766), 'numpy.std', 'np.std', (['self.Y_original'], {}), '(self.Y_original)\n', (15749, 15766), True, 'import numpy as np\n'), ((16978, 17009), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (16998, 17009), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((17093, 17112), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (17104, 17112), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((17940, 17967), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['mu_acq'], {}), '(mu_acq)\n', (17959, 17967), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((18050, 18153), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'acq_mu.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': '"""scipy"""'}), "(ac=acq_mu.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox='scipy')\n", (18057, 18153), False, 'from acquisition_maximization import acq_max\n'), ((20216, 20239), 'numpy.std', 'np.std', (['self.Y_original'], {}), '(self.Y_original)\n', (20222, 20239), True, 'import numpy as np\n'), ((23373, 23404), 'prada_gaussian_process.PradaGaussianProcess', 'PradaGaussianProcess', (['gp_params'], {}), '(gp_params)\n', (23393, 23404), False, 'from prada_gaussian_process import PradaGaussianProcess\n'), ((23486, 23505), 'acquisition_functions.unique_rows', 'unique_rows', (['self.X'], {}), '(self.X)\n', (23497, 23505), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((23747, 23774), 'acquisition_functions.AcquisitionFunction', 'AcquisitionFunction', (['mu_acq'], {}), '(mu_acq)\n', (23766, 23774), False, 'from acquisition_functions import AcquisitionFunction, unique_rows\n'), ((23796, 23902), 'acquisition_maximization.acq_max', 'acq_max', ([], {'ac': 'acq_mu.acq_kind', 'gp': 'self.gp', 'y_max': 'y_max', 'bounds': 'self.scalebounds', 'opt_toolbox': '"""thompson"""'}), "(ac=acq_mu.acq_kind, gp=self.gp, y_max=y_max, bounds=self.\n scalebounds, opt_toolbox='thompson')\n", (23803, 23902), False, 'from acquisition_maximization import acq_max\n'), ((27187, 27210), 'numpy.std', 'np.std', (['self.Y_original'], {}), '(self.Y_original)\n', (27193, 27210), True, 'import numpy as np\n'), ((4304, 4322), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (4312, 4322), True, 'import numpy as np\n'), ((4324, 4341), 'numpy.ones', 'np.ones', (['self.dim'], {}), '(self.dim)\n', (4331, 4341), True, 'import numpy as np\n'), ((7709, 7733), 'numpy.mean', 'np.mean', (['self.Y_original'], {}), '(self.Y_original)\n', (7716, 7733), True, 'import numpy as np\n'), ((8408, 8445), 'numpy.random.uniform', 'np.random.uniform', (['x[0]', 'x[1]'], {'size': '(1)'}), '(x[0], x[1], size=1)\n', (8425, 8445), True, 'import numpy as np\n'), ((8922, 8945), 'numpy.std', 'np.std', (['self.Y_original'], {}), '(self.Y_original)\n', (8928, 8945), True, 'import numpy as np\n'), ((15717, 15741), 'numpy.mean', 'np.mean', (['self.Y_original'], {}), '(self.Y_original)\n', (15724, 15741), True, 'import numpy as np\n'), ((18905, 18922), 'numpy.array', 'np.array', (['y_stars'], {}), '(y_stars)\n', (18913, 18922), True, 'import numpy as np\n'), ((20190, 20214), 'numpy.mean', 'np.mean', (['self.Y_original'], {}), '(self.Y_original)\n', (20197, 20214), True, 'import numpy as np\n'), ((25292, 25312), 'numpy.asscalar', 'np.asscalar', (['val_acq'], {}), '(val_acq)\n', (25303, 25312), True, 'import numpy as np\n'), ((25848, 25874), 'numpy.asscalar', 'np.asscalar', (['max_var_after'], {}), '(max_var_after)\n', (25859, 25874), True, 'import numpy as np\n'), ((27161, 27185), 'numpy.mean', 'np.mean', (['self.Y_original'], {}), '(self.Y_original)\n', (27168, 27185), True, 'import numpy as np\n'), ((8896, 8920), 'numpy.mean', 'np.mean', (['self.Y_original'], {}), '(self.Y_original)\n', (8903, 8920), True, 'import numpy as np\n'), ((18502, 18518), 'numpy.mean', 'np.mean', (['y_xt_TS'], {}), '(y_xt_TS)\n', (18509, 18518), True, 'import numpy as np\n'), ((18519, 18534), 'numpy.std', 'np.std', (['y_xt_TS'], {}), '(y_xt_TS)\n', (18525, 18534), True, 'import numpy as np\n'), ((18714, 18735), 'numpy.mean', 'np.mean', (['self.y_stars'], {}), '(self.y_stars)\n', (18721, 18735), True, 'import numpy as np\n'), ((18736, 18756), 'numpy.std', 'np.std', (['self.y_stars'], {}), '(self.y_stars)\n', (18742, 18756), True, 'import numpy as np\n')] |
import torch
import numpy as np
from logger import logger
from envs.babyai.utils.buffer import Buffer
import time
import psutil
import os
class Trainer(object):
def __init__(
self,
args=None,
collect_policy=None,
rl_policy=None,
il_policy=None,
relabel_policy=None,
sampler=None,
env=None,
obs_preprocessor=None,
log_dict={},
log_fn=lambda w, x: None,
):
self.args = args
self.collect_policy = collect_policy
self.rl_policy = rl_policy
self.il_policy = il_policy
self.relabel_policy = relabel_policy
self.sampler = sampler
self.env = env
self.itr = args.start_itr
self.obs_preprocessor = obs_preprocessor
self.log_fn = log_fn
self.buffer = None
# Set run counters, or reinitialize if log_dict isn't empty (i.e. we're continuing a run).
self.num_feedback_advice = log_dict.get('num_feedback_advice', 0)
self.num_feedback_reward = log_dict.get('num_feedback_reward', 0)
self.total_distillation_frames = log_dict.get('total_distillation_frames', 0)
# Dict saying which teacher types the agent has access to for training.
self.gave_feedback = log_dict.get('gave_feedback', {k: 0 for k in self.args.feedback_list})
self.followed_feedback = log_dict.get('followed_feedback', {k: 0 for k in self.args.feedback_list})
self.num_train_skip_itrs = log_dict.get('num_train_skip_itrs', 5)
# Counters to determine early stopping
self.best_val_loss = float('inf')
self.best_success = 0
self.itrs_since_best = 0
def save_model(self):
params = self.get_itr_snapshot(self.itr)
if (self.rl_policy is not None) and (self.il_policy is not None):
assert not self.rl_policy.teacher == self.il_policy.teacher, "will overwrite if policies have same teacher"
if self.rl_policy is not None:
self.rl_policy.save(self.args.exp_dir)
if self.il_policy is not None:
self.il_policy.save(self.args.exp_dir)
if self.rl_policy is not None and self.current_success >= self.best_success:
self.rl_policy.save(self.args.exp_dir, save_name=f"best_{self.rl_policy.teacher}_model.pt")
if self.il_policy is not None and self.current_val_loss <= self.best_val_loss:
self.il_policy.save(self.args.exp_dir, save_name=f"best_{self.il_policy.teacher}_model.pt")
logger.save_itr_params(self.itr, self.args.level, params)
def log_rollouts(self):
if self.args.feedback_from_buffer:
num_feedback = self.buffer.num_feedback
else:
num_feedback = self.num_feedback_advice + self.num_feedback_reward
self.log_fn(self.itr, num_feedback)
def init_logs(self):
self.all_time_training = 0
self.all_time_collection = 0
self.all_run_policy_time = 0
self.all_distill_time = 0
self.all_rollout_time = 0
self.all_saving_time = 0
self.all_unaccounted_time = 0
self.start_time = time.time()
self.rollout_time = 0
self.saving_time = 0
self.itr_start_time = time.time()
self.last_success = 0
self.last_accuracy = 0
def update_logs(self, time_training, time_collection, distill_time, saving_time):
logger.dumpkvs()
time_itr = time.time() - self.itr_start_time
time_unaccounted = time_itr - time_training - time_collection - distill_time - saving_time
self.all_time_training += time_training
self.all_time_collection += time_collection
self.all_distill_time += distill_time
self.all_saving_time += saving_time
self.all_unaccounted_time += time_unaccounted
logger.logkv('Itr', self.itr)
time_total = time.time() - self.start_time
self.itr_start_time = time.time()
logger.logkv('Time/Total', time_total)
logger.logkv('Time/Itr', time_itr)
process = psutil.Process(os.getpid())
memory_use = process.memory_info().rss / float(2 ** 20)
logger.logkv('Memory MiB', memory_use)
logger.logkv('Time/Training', time_training)
logger.logkv('Time/Collection', time_collection)
logger.logkv('Time/Distillation', distill_time)
logger.logkv('Time/Saving', saving_time)
logger.logkv('Time/Unaccounted', time_unaccounted)
logger.logkv('Time/All_Training', self.all_time_training / time_total)
logger.logkv('Time/All_Collection', self.all_time_collection / time_total)
logger.logkv('Time/All_RunwTeacher', self.all_run_policy_time / time_total)
logger.logkv('Time/All_Distillation', self.all_distill_time / time_total)
logger.logkv('Time/All_Saving', self.all_saving_time / time_total)
logger.logkv('Time/All_Unaccounted', self.all_unaccounted_time / time_total)
def make_buffer(self):
if not self.args.no_buffer:
self.buffer = Buffer(self.args.buffer_name, self.args.buffer_capacity, val_prob=.1,
successful_only=self.args.distill_successful_only)
def relabel(self, batch):
action, agent_dict = self.relabel_policy.act(batch.obs, sample=True)
action = action.to(batch.action.dtype)
assert type(action) == type(batch.action)
assert action.dtype == batch.action.dtype, (action.dtype, batch.action.dtype)
assert action.shape == batch.action.shape
log_prob = agent_dict['dist'].log_prob(action).sum(-1).to(batch.log_prob.dtype)
assert type(log_prob) == type(batch.log_prob)
assert log_prob.dtype == batch.log_prob.dtype
assert log_prob.shape == batch.log_prob.shape
if 'argmax_action' in agent_dict:
argmax_action = agent_dict['argmax_action'].to(batch.argmax_action.dtype)
assert type(argmax_action) == type(batch.argmax_action)
assert argmax_action.dtype == batch.argmax_action.dtype
assert argmax_action.shape == batch.argmax_action.shape
batch.argmax_action = agent_dict['argmax_action'].to(batch.argmax_action.dtype).detach()
batch.action = action.to(batch.action.dtype).detach()
batch.log_prob = agent_dict['dist'].log_prob(action).sum(-1).to(batch.log_prob.dtype).detach()
return batch
def train(self):
self.init_logs()
self.make_buffer()
for itr in range(self.itr, self.args.n_itr):
self.itr = itr
if self.num_feedback_advice + self.num_feedback_reward >= self.args.n_advice:
self.log_rollouts()
self.save_model()
return
if self.args.save_untrained:
self.save_model()
return
if itr % self.args.log_interval == 0:
self.log_rollouts()
logger.log("\n ---------------- Iteration %d ----------------" % itr)
""" -------------------- Sampling --------------------------"""
logger.log("Obtaining samples...")
time_env_sampling_start = time.time()
self.should_collect = self.args.collect_teacher is not None
self.should_train_rl = self.args.rl_teacher is not None
if self.should_collect:
# Collect if we are distilling OR if we're not skipping
samples_data, episode_logs = self.sampler.collect_experiences(
collect_with_oracle=self.args.collect_with_oracle,
collect_reward=self.should_train_rl,
train=self.should_train_rl)
if self.relabel_policy is not None:
samples_data = self.relabel(samples_data)
buffer_start = time.time()
self.buffer.add_batch(samples_data, save=self.itr % 200 == 0)
buffer_time = time.time() - buffer_start
logger.logkv('Time/Buffer', buffer_time)
else:
episode_logs = None
samples_data = None
""" -------------------- Training --------------------------"""
time_collection = time.time() - time_env_sampling_start
time_training_start = time.time()
if self.should_train_rl and itr > self.args.min_itr_steps:
logger.log("RL Training...")
for _ in range(self.args.epochs):
if self.args.on_policy:
sampled_batch = samples_data
else:
sampled_batch = self.buffer.sample(total_num_samples=self.args.batch_size, split='train')
summary_logs = self.rl_policy.optimize_policy(sampled_batch, itr)
if not self.args.on_policy:
val_batch = self.buffer.sample(total_num_samples=self.args.batch_size, split='val')
self.rl_policy.log_rl(val_batch)
else:
summary_logs = None
time_training = time.time() - time_training_start
self._log(episode_logs, summary_logs, samples_data, tag="Train")
""" ------------------ Distillation ---------------------"""
self.should_distill = self.args.distill_teacher is not None and self.itr >= self.args.min_itr_steps_distill
if self.args.no_distill or (self.buffer is not None and self.buffer.counts_train == 0):
self.should_distill = False
if self.should_distill:
logger.log("Distilling ...")
time_distill_start = time.time()
for dist_i in range(self.args.distillation_steps):
sampled_batch = self.buffer.sample(total_num_samples=self.args.batch_size, split='train')
self.total_distillation_frames += len(sampled_batch)
self.distill(sampled_batch, is_training=True)
sampled_val_batch = self.buffer.sample(total_num_samples=self.args.batch_size, split='val')
distill_log_val = self.distill(sampled_val_batch, is_training=False)
val_loss = distill_log_val['Loss']
self.current_val_loss = val_loss
self.itrs_since_best = 0 if val_loss < self.best_val_loss else self.itrs_since_best + 1
self.best_val_loss = min(self.best_val_loss, val_loss)
distill_time = time.time() - time_distill_start
else:
distill_time = 0
""" ------------------- Logging and Saving --------------------------"""
logger.log(self.args.exp_dir)
self.update_logs(time_training, time_collection, distill_time, self.saving_time)
should_terminate = self.save_and_maybe_early_stop()
if should_terminate:
break
# All done!
self.log_rollouts()
logger.log("Training finished")
def save_and_maybe_early_stop(self):
early_stopping = self.itrs_since_best > self.args.early_stop
logger.logkv('Train/BestLoss', self.best_val_loss)
logger.logkv('Train/ItrsSinceBest', self.itrs_since_best)
if early_stopping or (self.itr % self.args.eval_interval == 0) or (self.itr == self.args.n_itr - 1):
saving_time_start = time.time()
logger.log("Saving snapshot...")
self.save_model()
logger.log("Saved")
self.saving_time = time.time() - saving_time_start
return early_stopping
def _log(self, episode_logs, summary_logs, data, tag=""):
logger.logkv('Level', self.args.level)
counts_train = 0 if self.buffer is None else self.buffer.counts_train
logger.logkv("BufferSize", counts_train)
if episode_logs is not None:
avg_return = np.mean(episode_logs['return_per_episode'])
avg_path_length = np.mean(episode_logs['num_frames_per_episode'])
avg_success = np.mean(episode_logs['success_per_episode'])
avg_dist_to_goal = np.mean(episode_logs['dist_to_goal_per_episode'])
avg_reward = np.mean(episode_logs["return_per_episode"])
self.current_success = avg_success
self.best_success = max(self.best_success, avg_success)
logger.logkv(f"{tag}/Success", avg_success)
logger.logkv(f"{tag}/DistToGoal", avg_dist_to_goal)
logger.logkv(f"{tag}/Reward", avg_reward)
logger.logkv(f"{tag}/Return", avg_return)
logger.logkv(f"{tag}/PathLength", avg_path_length)
if self.args.discrete:
logger.logkv(f"{tag}/Accuracy", torch.eq(data.action, data.teacher_action).float().mean().item())
logger.logkv(f"{tag}/Argmax_Accuracy", torch.eq(data.action_probs.argmax(dim=1).unsqueeze(1),
data.teacher_action).float().mean().item())
self.num_feedback_advice += episode_logs['num_feedback_advice']
self.num_feedback_reward += episode_logs['num_feedback_reward']
for k in self.args.feedback_list:
k_gave = f'gave_{k}'
if k_gave in episode_logs:
self.gave_feedback[k] += episode_logs[k_gave]
logger.logkv(f"Feedback/Total_{k_gave}", self.gave_feedback[k])
k_followed = f'followed_{k}'
if k_followed in episode_logs:
self.followed_feedback[k] += episode_logs[k_followed]
logger.logkv(f"Feedback/Total_{k_followed}", self.followed_feedback[k])
logger.logkv(f"Feedback/Ratio_{k_followed}", episode_logs[k_followed] / episode_logs[k_gave])
logger.logkv(f"{tag}/NumFeedbackAdvice", self.num_feedback_advice)
logger.logkv(f"{tag}/NumFeedbackReward", self.num_feedback_reward)
logger.logkv(f"{tag}/NumFeedbackTotal", self.num_feedback_advice + self.num_feedback_reward)
logger.logkv(f"{tag}/num_feedback_reward", episode_logs['num_feedback_reward'])
logger.logkv(f"{tag}/num_feedback_advice", episode_logs['num_feedback_advice'])
for key in episode_logs:
if 'followed_' in key or 'gave_' in key:
logger.logkv(f"Feedback/{key}", episode_logs[key])
if summary_logs is not None:
for k, v in summary_logs.items():
if not k == 'Accuracy':
logger.logkv(f"{tag}/{k}", v)
def distill(self, samples, is_training=False, source=None):
if source is None:
source = self.args.source
log = self.il_policy.distill(samples, source=source, is_training=is_training)
return log
def get_itr_snapshot(self, itr):
""" Saves training args (models are saved elsewhere) """
d = dict(itr=itr,
env=self.env,
args=self.args,
log_dict={
'num_feedback_advice': self.num_feedback_advice,
'num_feedback_reward': self.num_feedback_reward,
'total_distillation_frames': self.total_distillation_frames,
'num_train_skip_itrs': self.num_train_skip_itrs,
'gave_feedback': self.gave_feedback,
'followed_feedback': self.followed_feedback,
})
return d
| [
"logger.logger.dumpkvs",
"torch.eq",
"os.getpid",
"logger.logger.save_itr_params",
"logger.logger.log",
"time.time",
"envs.babyai.utils.buffer.Buffer",
"numpy.mean",
"logger.logger.logkv"
] | [((2518, 2575), 'logger.logger.save_itr_params', 'logger.save_itr_params', (['self.itr', 'self.args.level', 'params'], {}), '(self.itr, self.args.level, params)\n', (2540, 2575), False, 'from logger import logger\n'), ((3137, 3148), 'time.time', 'time.time', ([], {}), '()\n', (3146, 3148), False, 'import time\n'), ((3238, 3249), 'time.time', 'time.time', ([], {}), '()\n', (3247, 3249), False, 'import time\n'), ((3406, 3422), 'logger.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (3420, 3422), False, 'from logger import logger\n'), ((3829, 3858), 'logger.logger.logkv', 'logger.logkv', (['"""Itr"""', 'self.itr'], {}), "('Itr', self.itr)\n", (3841, 3858), False, 'from logger import logger\n'), ((3941, 3952), 'time.time', 'time.time', ([], {}), '()\n', (3950, 3952), False, 'import time\n'), ((3961, 3999), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Total"""', 'time_total'], {}), "('Time/Total', time_total)\n", (3973, 3999), False, 'from logger import logger\n'), ((4008, 4042), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Itr"""', 'time_itr'], {}), "('Time/Itr', time_itr)\n", (4020, 4042), False, 'from logger import logger\n'), ((4162, 4200), 'logger.logger.logkv', 'logger.logkv', (['"""Memory MiB"""', 'memory_use'], {}), "('Memory MiB', memory_use)\n", (4174, 4200), False, 'from logger import logger\n'), ((4210, 4254), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Training"""', 'time_training'], {}), "('Time/Training', time_training)\n", (4222, 4254), False, 'from logger import logger\n'), ((4263, 4311), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Collection"""', 'time_collection'], {}), "('Time/Collection', time_collection)\n", (4275, 4311), False, 'from logger import logger\n'), ((4320, 4367), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Distillation"""', 'distill_time'], {}), "('Time/Distillation', distill_time)\n", (4332, 4367), False, 'from logger import logger\n'), ((4376, 4416), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Saving"""', 'saving_time'], {}), "('Time/Saving', saving_time)\n", (4388, 4416), False, 'from logger import logger\n'), ((4425, 4475), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Unaccounted"""', 'time_unaccounted'], {}), "('Time/Unaccounted', time_unaccounted)\n", (4437, 4475), False, 'from logger import logger\n'), ((4484, 4554), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_Training"""', '(self.all_time_training / time_total)'], {}), "('Time/All_Training', self.all_time_training / time_total)\n", (4496, 4554), False, 'from logger import logger\n'), ((4563, 4637), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_Collection"""', '(self.all_time_collection / time_total)'], {}), "('Time/All_Collection', self.all_time_collection / time_total)\n", (4575, 4637), False, 'from logger import logger\n'), ((4646, 4721), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_RunwTeacher"""', '(self.all_run_policy_time / time_total)'], {}), "('Time/All_RunwTeacher', self.all_run_policy_time / time_total)\n", (4658, 4721), False, 'from logger import logger\n'), ((4730, 4803), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_Distillation"""', '(self.all_distill_time / time_total)'], {}), "('Time/All_Distillation', self.all_distill_time / time_total)\n", (4742, 4803), False, 'from logger import logger\n'), ((4812, 4878), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_Saving"""', '(self.all_saving_time / time_total)'], {}), "('Time/All_Saving', self.all_saving_time / time_total)\n", (4824, 4878), False, 'from logger import logger\n'), ((4887, 4963), 'logger.logger.logkv', 'logger.logkv', (['"""Time/All_Unaccounted"""', '(self.all_unaccounted_time / time_total)'], {}), "('Time/All_Unaccounted', self.all_unaccounted_time / time_total)\n", (4899, 4963), False, 'from logger import logger\n'), ((11153, 11184), 'logger.logger.log', 'logger.log', (['"""Training finished"""'], {}), "('Training finished')\n", (11163, 11184), False, 'from logger import logger\n'), ((11304, 11354), 'logger.logger.logkv', 'logger.logkv', (['"""Train/BestLoss"""', 'self.best_val_loss'], {}), "('Train/BestLoss', self.best_val_loss)\n", (11316, 11354), False, 'from logger import logger\n'), ((11363, 11420), 'logger.logger.logkv', 'logger.logkv', (['"""Train/ItrsSinceBest"""', 'self.itrs_since_best'], {}), "('Train/ItrsSinceBest', self.itrs_since_best)\n", (11375, 11420), False, 'from logger import logger\n'), ((11845, 11883), 'logger.logger.logkv', 'logger.logkv', (['"""Level"""', 'self.args.level'], {}), "('Level', self.args.level)\n", (11857, 11883), False, 'from logger import logger\n'), ((11970, 12010), 'logger.logger.logkv', 'logger.logkv', (['"""BufferSize"""', 'counts_train'], {}), "('BufferSize', counts_train)\n", (11982, 12010), False, 'from logger import logger\n'), ((3443, 3454), 'time.time', 'time.time', ([], {}), '()\n', (3452, 3454), False, 'import time\n'), ((3881, 3892), 'time.time', 'time.time', ([], {}), '()\n', (3890, 3892), False, 'import time\n'), ((4077, 4088), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4086, 4088), False, 'import os\n'), ((5054, 5179), 'envs.babyai.utils.buffer.Buffer', 'Buffer', (['self.args.buffer_name', 'self.args.buffer_capacity'], {'val_prob': '(0.1)', 'successful_only': 'self.args.distill_successful_only'}), '(self.args.buffer_name, self.args.buffer_capacity, val_prob=0.1,\n successful_only=self.args.distill_successful_only)\n', (5060, 5179), False, 'from envs.babyai.utils.buffer import Buffer\n'), ((6960, 7032), 'logger.logger.log', 'logger.log', (['("""\n ---------------- Iteration %d ----------------""" % itr)'], {}), '("""\n ---------------- Iteration %d ----------------""" % itr)\n', (6970, 7032), False, 'from logger import logger\n'), ((7120, 7154), 'logger.logger.log', 'logger.log', (['"""Obtaining samples..."""'], {}), "('Obtaining samples...')\n", (7130, 7154), False, 'from logger import logger\n'), ((7193, 7204), 'time.time', 'time.time', ([], {}), '()\n', (7202, 7204), False, 'import time\n'), ((8493, 8504), 'time.time', 'time.time', ([], {}), '()\n', (8502, 8504), False, 'import time\n'), ((10854, 10883), 'logger.logger.log', 'logger.log', (['self.args.exp_dir'], {}), '(self.args.exp_dir)\n', (10864, 10883), False, 'from logger import logger\n'), ((11562, 11573), 'time.time', 'time.time', ([], {}), '()\n', (11571, 11573), False, 'import time\n'), ((11586, 11618), 'logger.logger.log', 'logger.log', (['"""Saving snapshot..."""'], {}), "('Saving snapshot...')\n", (11596, 11618), False, 'from logger import logger\n'), ((11661, 11680), 'logger.logger.log', 'logger.log', (['"""Saved"""'], {}), "('Saved')\n", (11671, 11680), False, 'from logger import logger\n'), ((12073, 12116), 'numpy.mean', 'np.mean', (["episode_logs['return_per_episode']"], {}), "(episode_logs['return_per_episode'])\n", (12080, 12116), True, 'import numpy as np\n'), ((12147, 12194), 'numpy.mean', 'np.mean', (["episode_logs['num_frames_per_episode']"], {}), "(episode_logs['num_frames_per_episode'])\n", (12154, 12194), True, 'import numpy as np\n'), ((12221, 12265), 'numpy.mean', 'np.mean', (["episode_logs['success_per_episode']"], {}), "(episode_logs['success_per_episode'])\n", (12228, 12265), True, 'import numpy as np\n'), ((12297, 12346), 'numpy.mean', 'np.mean', (["episode_logs['dist_to_goal_per_episode']"], {}), "(episode_logs['dist_to_goal_per_episode'])\n", (12304, 12346), True, 'import numpy as np\n'), ((12372, 12415), 'numpy.mean', 'np.mean', (["episode_logs['return_per_episode']"], {}), "(episode_logs['return_per_episode'])\n", (12379, 12415), True, 'import numpy as np\n'), ((12543, 12586), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/Success"""', 'avg_success'], {}), "(f'{tag}/Success', avg_success)\n", (12555, 12586), False, 'from logger import logger\n'), ((12599, 12650), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/DistToGoal"""', 'avg_dist_to_goal'], {}), "(f'{tag}/DistToGoal', avg_dist_to_goal)\n", (12611, 12650), False, 'from logger import logger\n'), ((12663, 12704), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/Reward"""', 'avg_reward'], {}), "(f'{tag}/Reward', avg_reward)\n", (12675, 12704), False, 'from logger import logger\n'), ((12717, 12758), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/Return"""', 'avg_return'], {}), "(f'{tag}/Return', avg_return)\n", (12729, 12758), False, 'from logger import logger\n'), ((12771, 12821), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/PathLength"""', 'avg_path_length'], {}), "(f'{tag}/PathLength', avg_path_length)\n", (12783, 12821), False, 'from logger import logger\n'), ((14024, 14090), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/NumFeedbackAdvice"""', 'self.num_feedback_advice'], {}), "(f'{tag}/NumFeedbackAdvice', self.num_feedback_advice)\n", (14036, 14090), False, 'from logger import logger\n'), ((14103, 14169), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/NumFeedbackReward"""', 'self.num_feedback_reward'], {}), "(f'{tag}/NumFeedbackReward', self.num_feedback_reward)\n", (14115, 14169), False, 'from logger import logger\n'), ((14182, 14279), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/NumFeedbackTotal"""', '(self.num_feedback_advice + self.num_feedback_reward)'], {}), "(f'{tag}/NumFeedbackTotal', self.num_feedback_advice + self.\n num_feedback_reward)\n", (14194, 14279), False, 'from logger import logger\n'), ((14287, 14366), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/num_feedback_reward"""', "episode_logs['num_feedback_reward']"], {}), "(f'{tag}/num_feedback_reward', episode_logs['num_feedback_reward'])\n", (14299, 14366), False, 'from logger import logger\n'), ((14379, 14458), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/num_feedback_advice"""', "episode_logs['num_feedback_advice']"], {}), "(f'{tag}/num_feedback_advice', episode_logs['num_feedback_advice'])\n", (14391, 14458), False, 'from logger import logger\n'), ((8018, 8029), 'time.time', 'time.time', ([], {}), '()\n', (8027, 8029), False, 'import time\n'), ((8182, 8222), 'logger.logger.logkv', 'logger.logkv', (['"""Time/Buffer"""', 'buffer_time'], {}), "('Time/Buffer', buffer_time)\n", (8194, 8222), False, 'from logger import logger\n'), ((8421, 8432), 'time.time', 'time.time', ([], {}), '()\n', (8430, 8432), False, 'import time\n'), ((8592, 8620), 'logger.logger.log', 'logger.log', (['"""RL Training..."""'], {}), "('RL Training...')\n", (8602, 8620), False, 'from logger import logger\n'), ((9277, 9288), 'time.time', 'time.time', ([], {}), '()\n', (9286, 9288), False, 'import time\n'), ((9778, 9806), 'logger.logger.log', 'logger.log', (['"""Distilling ..."""'], {}), "('Distilling ...')\n", (9788, 9806), False, 'from logger import logger\n'), ((9844, 9855), 'time.time', 'time.time', ([], {}), '()\n', (9853, 9855), False, 'import time\n'), ((11712, 11723), 'time.time', 'time.time', ([], {}), '()\n', (11721, 11723), False, 'import time\n'), ((8139, 8150), 'time.time', 'time.time', ([], {}), '()\n', (8148, 8150), False, 'import time\n'), ((10672, 10683), 'time.time', 'time.time', ([], {}), '()\n', (10681, 10683), False, 'import time\n'), ((13555, 13618), 'logger.logger.logkv', 'logger.logkv', (['f"""Feedback/Total_{k_gave}"""', 'self.gave_feedback[k]'], {}), "(f'Feedback/Total_{k_gave}', self.gave_feedback[k])\n", (13567, 13618), False, 'from logger import logger\n'), ((14573, 14623), 'logger.logger.logkv', 'logger.logkv', (['f"""Feedback/{key}"""', 'episode_logs[key]'], {}), "(f'Feedback/{key}', episode_logs[key])\n", (14585, 14623), False, 'from logger import logger\n'), ((14767, 14796), 'logger.logger.logkv', 'logger.logkv', (['f"""{tag}/{k}"""', 'v'], {}), "(f'{tag}/{k}', v)\n", (14779, 14796), False, 'from logger import logger\n'), ((13821, 13892), 'logger.logger.logkv', 'logger.logkv', (['f"""Feedback/Total_{k_followed}"""', 'self.followed_feedback[k]'], {}), "(f'Feedback/Total_{k_followed}', self.followed_feedback[k])\n", (13833, 13892), False, 'from logger import logger\n'), ((13917, 14014), 'logger.logger.logkv', 'logger.logkv', (['f"""Feedback/Ratio_{k_followed}"""', '(episode_logs[k_followed] / episode_logs[k_gave])'], {}), "(f'Feedback/Ratio_{k_followed}', episode_logs[k_followed] /\n episode_logs[k_gave])\n", (13929, 14014), False, 'from logger import logger\n'), ((12906, 12948), 'torch.eq', 'torch.eq', (['data.action', 'data.teacher_action'], {}), '(data.action, data.teacher_action)\n', (12914, 12948), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, asarray
from ..common import safe_import
with safe_import():
from scipy.special import factorial
class Benchmark:
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``fun`` method
for a particular optimization problem.
Attributes
----------
N : int
The dimensionality of the problem.
bounds : sequence
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
xmin : sequence
The lower bounds for the problem
xmax : sequence
The upper bounds for the problem
fglob : float
The global minimum of the evaluated function.
global_optimum : sequence
A list of vectors that provide the locations of the global minimum.
Note that some problems have multiple global minima, not all of which
may be listed.
nfev : int
the number of function evaluations that the object has been asked to
calculate.
change_dimensionality : bool
Whether we can change the benchmark function `x` variable length (i.e.,
the dimensionality of the problem)
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
self._dimensions = dimensions
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.change_dimensionality = False
self.custom_bounds = None
def __str__(self):
return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.N)
def __repr__(self):
return self.__class__.__name__
def initial_vector(self):
"""
Random initialisation for the benchmark problem.
Returns
-------
x : sequence
a vector of length ``N`` that contains random floating point
numbers that lie between the lower and upper bounds for a given
parameter.
"""
return asarray([np.random.uniform(l, u) for l, u in self.bounds])
def success(self, x, tol=1.e-5):
"""
Tests if a candidate solution at the global minimum.
The default test is
Parameters
----------
x : sequence
The candidate vector for testing if the global minimum has been
reached. Must have ``len(x) == self.N``
tol : float
The evaluated function and known global minimum must differ by less
than this amount to be at a global minimum.
Returns
-------
bool : is the candidate vector at the global minimum?
"""
val = self.fun(asarray(x))
if abs(val - self.fglob) < tol:
return True
# the solution should still be in bounds, otherwise immediate fail.
if np.any(x > np.asfarray(self.bounds)[:, 1]):
return False
if np.any(x < np.asfarray(self.bounds)[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
if val < self.fglob:
raise ValueError("Found a lower global minimum",
x,
val,
self.fglob)
return False
def fun(self, x):
"""
Evaluation of the benchmark function.
Parameters
----------
x : sequence
The candidate vector for evaluating the benchmark problem. Must
have ``len(x) == self.N``.
Returns
-------
val : float
the evaluated benchmark function
"""
raise NotImplementedError
def change_dimensions(self, ndim):
"""
Changes the dimensionality of the benchmark problem
The dimensionality will only be changed if the problem is suitable
Parameters
----------
ndim : int
The new dimensionality for the problem.
"""
if self.change_dimensionality:
self._dimensions = ndim
else:
raise ValueError('dimensionality cannot be changed for this'
'problem')
@property
def bounds(self):
"""
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
"""
if self.change_dimensionality:
return [self._bounds[0]] * self.N
else:
return self._bounds
@property
def N(self):
"""
The dimensionality of the problem.
Returns
-------
N : int
The dimensionality of the problem
"""
return self._dimensions
@property
def xmin(self):
"""
The lower bounds for the problem
Returns
-------
xmin : sequence
The lower bounds for the problem
"""
return asarray([b[0] for b in self.bounds])
@property
def xmax(self):
"""
The upper bounds for the problem
Returns
-------
xmax : sequence
The upper bounds for the problem
"""
return asarray([b[1] for b in self.bounds])
| [
"numpy.asfarray",
"numpy.random.uniform",
"numpy.asarray",
"numpy.abs"
] | [((5634, 5670), 'numpy.asarray', 'asarray', (['[b[0] for b in self.bounds]'], {}), '([b[0] for b in self.bounds])\n', (5641, 5670), False, 'from numpy import abs, asarray\n'), ((5888, 5924), 'numpy.asarray', 'asarray', (['[b[1] for b in self.bounds]'], {}), '([b[1] for b in self.bounds])\n', (5895, 5924), False, 'from numpy import abs, asarray\n'), ((3199, 3209), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (3206, 3209), False, 'from numpy import abs, asarray\n'), ((3222, 3243), 'numpy.abs', 'abs', (['(val - self.fglob)'], {}), '(val - self.fglob)\n', (3225, 3243), False, 'from numpy import abs, asarray\n'), ((2536, 2559), 'numpy.random.uniform', 'np.random.uniform', (['l', 'u'], {}), '(l, u)\n', (2553, 2559), True, 'import numpy as np\n'), ((3374, 3398), 'numpy.asfarray', 'np.asfarray', (['self.bounds'], {}), '(self.bounds)\n', (3385, 3398), True, 'import numpy as np\n'), ((3454, 3478), 'numpy.asfarray', 'np.asfarray', (['self.bounds'], {}), '(self.bounds)\n', (3465, 3478), True, 'import numpy as np\n')] |
from lenstronomy.SimulationAPI.observation_api import SingleBand
from lenstronomy.Data.imaging_data import ImageData
import lenstronomy.Util.util as util
import numpy as np
__all__ = ['DataAPI']
class DataAPI(SingleBand):
"""
This class is a wrapper of the general description of data in SingleBand() to translate those quantities into
configurations in the core lenstronomy Data modules to simulate images according to those quantities.
This class is meant to be an example of a wrapper. More possibilities in terms of PSF and data type
options are available. Have a look in the specific modules if you are interested in.
"""
def __init__(self, numpix, **kwargs_single_band):
"""
:param numpix: number of pixels per axis in the simulation to be modelled
:param kwargs_single_band: keyword arguments used to create instance of SingleBand class
"""
self.numpix = numpix
SingleBand.__init__(self, **kwargs_single_band)
@property
def data_class(self):
"""
creates a Data() instance of lenstronomy based on knowledge of the observation
:return: instance of Data() class
"""
data_class = ImageData(**self.kwargs_data)
return data_class
@property
def kwargs_data(self):
"""
:return: keyword arguments for ImageData class instance
"""
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(
numPix=self.numpix, deltapix=self.pixel_scale, subgrid_res=1, left_lower=False, inverse=False)
# CCD gain corrected exposure time to allow a direct Poisson estimates based on IID counts
scaled_exposure_time = self.flux_iid(1)
kwargs_data = {'image_data': np.zeros((self.numpix, self.numpix)), 'ra_at_xy_0': ra_at_xy_0,
'dec_at_xy_0': dec_at_xy_0,
'transform_pix2angle': Mpix2coord,
'background_rms': self.background_noise,
'exposure_time': scaled_exposure_time}
return kwargs_data
| [
"lenstronomy.SimulationAPI.observation_api.SingleBand.__init__",
"numpy.zeros",
"lenstronomy.Util.util.make_grid_with_coordtransform",
"lenstronomy.Data.imaging_data.ImageData"
] | [((950, 997), 'lenstronomy.SimulationAPI.observation_api.SingleBand.__init__', 'SingleBand.__init__', (['self'], {}), '(self, **kwargs_single_band)\n', (969, 997), False, 'from lenstronomy.SimulationAPI.observation_api import SingleBand\n'), ((1214, 1243), 'lenstronomy.Data.imaging_data.ImageData', 'ImageData', ([], {}), '(**self.kwargs_data)\n', (1223, 1243), False, 'from lenstronomy.Data.imaging_data import ImageData\n'), ((1503, 1637), 'lenstronomy.Util.util.make_grid_with_coordtransform', 'util.make_grid_with_coordtransform', ([], {'numPix': 'self.numpix', 'deltapix': 'self.pixel_scale', 'subgrid_res': '(1)', 'left_lower': '(False)', 'inverse': '(False)'}), '(numPix=self.numpix, deltapix=self.\n pixel_scale, subgrid_res=1, left_lower=False, inverse=False)\n', (1537, 1637), True, 'import lenstronomy.Util.util as util\n'), ((1830, 1866), 'numpy.zeros', 'np.zeros', (['(self.numpix, self.numpix)'], {}), '((self.numpix, self.numpix))\n', (1838, 1866), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import os
import warnings
import numpy as np
import scipy.sparse as sparse
from .default_constants import file_names_dict
from .exceptions import DirectoryDoesNotExistsError, FileDoesNotExistError
from .helpers import check_and_make_folder
# hf_data is HighFidelityData, not imported due to circular import
# rb_data is ReducedOrderData, not imported due to circular import
# from ._hf_data_class import HighFidelityData
# from ._rb_data_class import ReducedOrderData
def hf_save(hf_data, directory_path, has_neumann, has_non_homo_dirichlet, has_non_homo_neumann,
default_file_names_dict=file_names_dict):
"""
Save the high-fidelity data
Parameters
----------
hf_data :
high-fidelity data.
directory_path : str
path to directory to save in.
has_neumann : bool
Does the problem have Neumann boundary conditions.
has_non_homo_dirichlet : bool
Does the problem have non homogeneous Dirichlet boundary conditions.
has_non_homo_neumann : bool
Does the problem have non homogeneous Neumann boundary conditions.
default_file_names_dict : dict, optional
Dictionary of names for the files, see e.g. file_names_dict in default_constants.
The default is file_names_dict.
Returns
-------
None.
"""
hf_folder_path = os.path.join(directory_path, "high_fidelity")
hf_folder_path = check_and_make_folder(hf_data.n, hf_folder_path, n_counts_nodes=True)
print(f"Saving in directory {hf_folder_path}")
# matrices a1 and a2
a1_file_path = os.path.join(hf_folder_path, default_file_names_dict["a1"])
a2_file_path = os.path.join(hf_folder_path, default_file_names_dict["a2"])
sparse.save_npz(a1_file_path, hf_data.a1_full.tocsr())
sparse.save_npz(a2_file_path, hf_data.a2_full.tocsr())
# body force load vector
f_load_lv_file_path = os.path.join(hf_folder_path, default_file_names_dict["f_load_lv"])
np.save(f_load_lv_file_path, hf_data.f_load_lv_full, allow_pickle=False)
# Dirichlet edge
dirichlet_edge_file_path = os.path.join(hf_folder_path, default_file_names_dict["dirichlet_edge"])
np.save(dirichlet_edge_file_path, hf_data.dirichlet_edge, allow_pickle=False)
# p, tri and edge
p_tri_edge_file_path = os.path.join(hf_folder_path, default_file_names_dict["p_tri_edge"])
np.savez(p_tri_edge_file_path, p=hf_data.p, tri=hf_data.tri, edge=hf_data.edge,
allow_pickle=False)
# Neumann load vector
if has_neumann:
f_load_neumann_file_path = os.path.join(hf_folder_path, default_file_names_dict["f_load_neumann"])
if has_non_homo_neumann:
np.save(f_load_neumann_file_path, hf_data.f_load_neumann_full, allow_pickle=False)
else:
np.save(f_load_neumann_file_path, np.array(["has homo neumann"]), allow_pickle=False)
# Lifting Dirichlet load vector
if has_non_homo_dirichlet:
rg_lifting_func_file_path = os.path.join(hf_folder_path, default_file_names_dict["rg"])
np.save(rg_lifting_func_file_path, hf_data.rg, allow_pickle=False)
print(f"Saved the high fidelity data in {hf_folder_path}")
def rb_save(n, rb_data, directory_path, has_neumann, has_non_homo_dirichlet, has_non_homo_neumann,
default_file_names_dict=file_names_dict):
"""
Save the reduced-order data
Parameters
----------
n : int
The number of nodes along the axes.
rb_data :
reduced-order data.
directory_path : str
path to directory to save in.
has_neumann : bool
Does the problem have Neumann boundary conditions.
has_non_homo_dirichlet : bool
Does the problem have non homogeneous Dirichlet boundary conditions.
has_non_homo_neumann : bool
Does the problem have non homogeneous Neumann boundary conditions.
default_file_names_dict : dict, optional
Dictionary of names for the files, see e.g. file_names_dict in default_constants.
The default is file_names_dict.
Returns
-------
None.
"""
rb_folder_path = os.path.join(directory_path, "reduced_order")
rb_folder_path = check_and_make_folder(n, rb_folder_path, n_counts_nodes=True)
print(f"Saving in directory {rb_folder_path}")
# matrices a1 and a2
a1_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["a1_rom"])
a2_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["a2_rom"])
np.save(a1_rom_file_path, rb_data.a1_free_rom, allow_pickle=False)
np.save(a2_rom_file_path, rb_data.a2_free_rom, allow_pickle=False)
# body force load vector
f_load_lv_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f_load_lv_rom"])
np.save(f_load_lv_rom_file_path, rb_data.f_load_lv_free_rom, allow_pickle=False)
# Neumann load vector
if has_neumann:
f_load_neumann_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f_load_neumann_rom"])
if has_non_homo_neumann:
np.save(f_load_neumann_rom_file_path, rb_data.f_load_neumann_free_rom, allow_pickle=False)
# Lifting Dirichlet load vector
if has_non_homo_dirichlet:
f1_dir_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f1_dir_rom"])
f2_dir_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f2_dir_rom"])
np.save(f1_dir_rom_file_path, rb_data.f1_dirichlet_rom, allow_pickle=False)
np.save(f2_dir_rom_file_path, rb_data.f2_dirichlet_rom, allow_pickle=False)
# the matrix v
v_mat_file_path = os.path.join(rb_folder_path, default_file_names_dict["v"])
np.save(v_mat_file_path, rb_data.v, allow_pickle=False)
# The singular values squared
sigma2_file_path = os.path.join(rb_folder_path, default_file_names_dict["sigma2"])
np.save(sigma2_file_path, rb_data.sigma2_vec, allow_pickle=False)
# Pod parameters
pod_parameters = np.array([rb_data.e_young_range,
rb_data.nu_poisson_range,
rb_data.rb_grid,
(rb_data.eps_pod, rb_data.pod_sampling_mode),
(rb_data.n_rom_max, rb_data.n_rom_cut)])
pod_parameters_file_path = os.path.join(rb_folder_path, default_file_names_dict["pod_parameters"])
np.save(pod_parameters_file_path, pod_parameters, allow_pickle=False)
print(f"Saved the reduced order data in {rb_folder_path}")
def hf_from_files(hf_data, directory_path, default_file_names_dict=file_names_dict):
"""
Get the high-fidelity data from saved files
Parameters
----------
hf_data :
high-fidelity data.
directory_path : str
path to directory to save in.
default_file_names_dict : dict, optional
Dictionary of names for the files, see e.g. file_names_dict in default_constants.
The default is file_names_dict.
Raises
------
DirectoryDoesNotExistsError
If directory does not exit.
Returns
-------
has_neumann : bool
Does the problem have Neumann boundary conditions.
has_non_homo_dirichlet : bool
Does the problem have non homogeneous Dirichlet boundary conditions.
has_non_homo_neumann : bool
Does the problem have non homogeneous Neumann boundary conditions.
"""
hf_folder_path = os.path.join(directory_path, "high_fidelity", f"n{hf_data.n - 1}")
if not os.path.isdir(hf_folder_path):
text = f"Directory {hf_folder_path} does not exist, can not load the high_fidelity data."
raise DirectoryDoesNotExistsError(text)
# matrices a1 and a2
a1_file_path = os.path.join(hf_folder_path, default_file_names_dict["a1"])
a2_file_path = os.path.join(hf_folder_path, default_file_names_dict["a2"])
hf_data.a1_full = sparse.load_npz(a1_file_path)
hf_data.a2_full = sparse.load_npz(a2_file_path)
# body force load vector
f_load_lv_file_path = os.path.join(hf_folder_path, default_file_names_dict["f_load_lv"])
hf_data.f_load_lv_full = np.load(f_load_lv_file_path, allow_pickle=False)
# Dirichlet edge
dirichlet_edge_file_path = os.path.join(hf_folder_path, default_file_names_dict["dirichlet_edge"])
hf_data.dirichlet_edge = np.load(dirichlet_edge_file_path, allow_pickle=False)
# p, tri and edge
p_tri_edge_file_path = os.path.join(hf_folder_path, default_file_names_dict["p_tri_edge"])
p_tri_edge = np.load(p_tri_edge_file_path, allow_pickle=False)
hf_data.p = p_tri_edge['p']
hf_data.tri = p_tri_edge['tri']
hf_data.edge = p_tri_edge['edge']
# Neumann load vector
f_load_neumann_file_path = os.path.join(hf_folder_path, default_file_names_dict["f_load_neumann"])
has_neumann = os.path.isfile(f_load_neumann_file_path)
has_non_homo_neumann = True
if has_neumann:
hf_data.f_load_neumann_full = np.load(f_load_neumann_file_path, allow_pickle=False)
if hf_data.f_load_neumann_full[0] == "has homo neumann":
hf_data.f_load_neumann_full = None
has_non_homo_neumann = False
# Get edge data
hf_data.get_neumann_edge()
hf_data.compute_free_and_expanded_edges(has_neumann, has_non_homo_neumann)
hf_data.plate_limits = (np.min(hf_data.p), np.max(hf_data.p))
# Lifting Dirichlet load vector
rg_lifting_func_file_path = os.path.join(hf_folder_path, default_file_names_dict["rg"])
has_non_homo_dirichlet = os.path.isfile(rg_lifting_func_file_path)
if has_non_homo_dirichlet:
hf_data.rg = np.load(rg_lifting_func_file_path, allow_pickle=False)
return has_neumann, has_non_homo_dirichlet, has_non_homo_neumann
def rb_from_files(n, rb_data, directory_path, warn=True, default_file_names_dict=file_names_dict):
"""
Load the reduced -order data if it exists.
Parameters
----------
n : int
The number of nodes along the axes.
rb_data :
reduced-order data.
directory_path : str
path to directory to save in.
warn : bool, optional
Do warn the user. The default is True.
default_file_names_dict : dict, optional
Dictionary of names for the files, see e.g. file_names_dict in default_constants.
The default is file_names_dict.
Raises
------
FileDoesNotExistError
if one lifting Dirichlet load vector save file exists and the other does not exist.
Returns
-------
is_pod_computed : bool
True if the reduced-order data exists.
"""
rb_folder_path = os.path.join(directory_path, "reduced_order", f"n{n - 1}")
is_pod_computed = True
if not os.path.isdir(rb_folder_path):
# assume the data does not exist.
if warn:
warning_text = f"Directory {rb_folder_path}" \
+ " does not exist, can not load the reduced order data." \
+ "\nBuild it with build_rb_model of the class."
warnings.warn(warning_text)
is_pod_computed = False
else:
# matrices a1 and a2
a1_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["a1_rom"])
a2_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["a2_rom"])
rb_data.a1_free_rom = np.load(a1_rom_file_path, allow_pickle=False)
rb_data.a2_free_rom = np.load(a2_rom_file_path, allow_pickle=False)
# body force load vector
f_load_lv_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f_load_lv_rom"])
rb_data.f_load_lv_free_rom = np.load(f_load_lv_rom_file_path, allow_pickle=False)
# neumann load vector
f_load_neumann_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f_load_neumann_rom"])
if os.path.isfile(f_load_neumann_rom_file_path):
rb_data.f_load_neumann_free_rom = np.load(f_load_neumann_rom_file_path, allow_pickle=False)
if rb_data.f_load_neumann_free_rom[0] == "has homo neumann":
rb_data.f_load_neumann_free_rom = None
# Lifting Dirichlet load vector
f1_dir_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f1_dir_rom"])
f2_dir_rom_file_path = os.path.join(rb_folder_path, default_file_names_dict["f2_dir_rom"])
f1_dir_rom_isfile = os.path.isfile(f1_dir_rom_file_path)
f2_dir_rom_isfile = os.path.isfile(f2_dir_rom_file_path)
if (f1_dir_rom_isfile and not f2_dir_rom_isfile) or (not f1_dir_rom_isfile and f2_dir_rom_isfile):
text = "One of the files {}".format(default_file_names_dict["f1_dir_rom"]) \
+ " and {} does not exist.".format(default_file_names_dict["f2_dir_rom"])
raise FileDoesNotExistError(text)
elif f2_dir_rom_isfile and f2_dir_rom_isfile:
rb_data.f1_dirichlet_rom = np.load(f1_dir_rom_file_path, allow_pickle=False)
rb_data.f2_dirichlet_rom = np.load(f2_dir_rom_file_path, allow_pickle=False)
# has_non_homo_dirichlet = True
# matrix v
v_mat_file_path = os.path.join(rb_folder_path, default_file_names_dict["v"])
rb_data.v = np.load(v_mat_file_path, allow_pickle=False)
rb_data.n_rom = rb_data.v.shape[1]
# singular values squared
sigma2_file_path = os.path.join(rb_folder_path, default_file_names_dict["sigma2"])
rb_data.sigma2_vec = np.load(sigma2_file_path, allow_pickle=False)
# pod parameters
pod_parameters_file_path = os.path.join(rb_folder_path, default_file_names_dict["pod_parameters"])
pod_parameters = np.load(pod_parameters_file_path, allow_pickle=False)
rb_data.e_young_range = tuple(pod_parameters[0].astype(float))
rb_data.nu_poisson_range = tuple(pod_parameters[1].astype(float))
rb_data.rb_grid = tuple(pod_parameters[2].astype(int))
rb_data.eps_pod = pod_parameters[3, 0].astype(float)
rb_data.pod_sampling_mode = pod_parameters[3, 1].astype(str)
rb_data.n_rom_max = pod_parameters[4, 0].astype(int)
rb_data.n_rom_cut = pod_parameters[4, 1]
if rb_data.n_rom_cut != "rank":
rb_data.n_rom_cut.astype(float)
return is_pod_computed
| [
"numpy.load",
"numpy.save",
"scipy.sparse.load_npz",
"os.path.isdir",
"os.path.isfile",
"numpy.min",
"numpy.array",
"numpy.max",
"warnings.warn",
"numpy.savez",
"os.path.join"
] | [((1389, 1434), 'os.path.join', 'os.path.join', (['directory_path', '"""high_fidelity"""'], {}), "(directory_path, 'high_fidelity')\n", (1401, 1434), False, 'import os\n'), ((1621, 1680), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['a1']"], {}), "(hf_folder_path, default_file_names_dict['a1'])\n", (1633, 1680), False, 'import os\n'), ((1700, 1759), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['a2']"], {}), "(hf_folder_path, default_file_names_dict['a2'])\n", (1712, 1759), False, 'import os\n'), ((1933, 1999), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['f_load_lv']"], {}), "(hf_folder_path, default_file_names_dict['f_load_lv'])\n", (1945, 1999), False, 'import os\n'), ((2004, 2076), 'numpy.save', 'np.save', (['f_load_lv_file_path', 'hf_data.f_load_lv_full'], {'allow_pickle': '(False)'}), '(f_load_lv_file_path, hf_data.f_load_lv_full, allow_pickle=False)\n', (2011, 2076), True, 'import numpy as np\n'), ((2129, 2200), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['dirichlet_edge']"], {}), "(hf_folder_path, default_file_names_dict['dirichlet_edge'])\n", (2141, 2200), False, 'import os\n'), ((2205, 2282), 'numpy.save', 'np.save', (['dirichlet_edge_file_path', 'hf_data.dirichlet_edge'], {'allow_pickle': '(False)'}), '(dirichlet_edge_file_path, hf_data.dirichlet_edge, allow_pickle=False)\n', (2212, 2282), True, 'import numpy as np\n'), ((2332, 2399), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['p_tri_edge']"], {}), "(hf_folder_path, default_file_names_dict['p_tri_edge'])\n", (2344, 2399), False, 'import os\n'), ((2404, 2508), 'numpy.savez', 'np.savez', (['p_tri_edge_file_path'], {'p': 'hf_data.p', 'tri': 'hf_data.tri', 'edge': 'hf_data.edge', 'allow_pickle': '(False)'}), '(p_tri_edge_file_path, p=hf_data.p, tri=hf_data.tri, edge=hf_data.\n edge, allow_pickle=False)\n', (2412, 2508), True, 'import numpy as np\n'), ((4139, 4184), 'os.path.join', 'os.path.join', (['directory_path', '"""reduced_order"""'], {}), "(directory_path, 'reduced_order')\n", (4151, 4184), False, 'import os\n'), ((4367, 4430), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['a1_rom']"], {}), "(rb_folder_path, default_file_names_dict['a1_rom'])\n", (4379, 4430), False, 'import os\n'), ((4454, 4517), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['a2_rom']"], {}), "(rb_folder_path, default_file_names_dict['a2_rom'])\n", (4466, 4517), False, 'import os\n'), ((4522, 4588), 'numpy.save', 'np.save', (['a1_rom_file_path', 'rb_data.a1_free_rom'], {'allow_pickle': '(False)'}), '(a1_rom_file_path, rb_data.a1_free_rom, allow_pickle=False)\n', (4529, 4588), True, 'import numpy as np\n'), ((4593, 4659), 'numpy.save', 'np.save', (['a2_rom_file_path', 'rb_data.a2_free_rom'], {'allow_pickle': '(False)'}), '(a2_rom_file_path, rb_data.a2_free_rom, allow_pickle=False)\n', (4600, 4659), True, 'import numpy as np\n'), ((4719, 4789), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f_load_lv_rom']"], {}), "(rb_folder_path, default_file_names_dict['f_load_lv_rom'])\n", (4731, 4789), False, 'import os\n'), ((4794, 4879), 'numpy.save', 'np.save', (['f_load_lv_rom_file_path', 'rb_data.f_load_lv_free_rom'], {'allow_pickle': '(False)'}), '(f_load_lv_rom_file_path, rb_data.f_load_lv_free_rom, allow_pickle=False\n )\n', (4801, 4879), True, 'import numpy as np\n'), ((5646, 5704), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['v']"], {}), "(rb_folder_path, default_file_names_dict['v'])\n", (5658, 5704), False, 'import os\n'), ((5709, 5764), 'numpy.save', 'np.save', (['v_mat_file_path', 'rb_data.v'], {'allow_pickle': '(False)'}), '(v_mat_file_path, rb_data.v, allow_pickle=False)\n', (5716, 5764), True, 'import numpy as np\n'), ((5822, 5885), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['sigma2']"], {}), "(rb_folder_path, default_file_names_dict['sigma2'])\n", (5834, 5885), False, 'import os\n'), ((5890, 5955), 'numpy.save', 'np.save', (['sigma2_file_path', 'rb_data.sigma2_vec'], {'allow_pickle': '(False)'}), '(sigma2_file_path, rb_data.sigma2_vec, allow_pickle=False)\n', (5897, 5955), True, 'import numpy as np\n'), ((5998, 6168), 'numpy.array', 'np.array', (['[rb_data.e_young_range, rb_data.nu_poisson_range, rb_data.rb_grid, (rb_data\n .eps_pod, rb_data.pod_sampling_mode), (rb_data.n_rom_max, rb_data.\n n_rom_cut)]'], {}), '([rb_data.e_young_range, rb_data.nu_poisson_range, rb_data.rb_grid,\n (rb_data.eps_pod, rb_data.pod_sampling_mode), (rb_data.n_rom_max,\n rb_data.n_rom_cut)])\n', (6006, 6168), True, 'import numpy as np\n'), ((6316, 6387), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['pod_parameters']"], {}), "(rb_folder_path, default_file_names_dict['pod_parameters'])\n", (6328, 6387), False, 'import os\n'), ((6392, 6461), 'numpy.save', 'np.save', (['pod_parameters_file_path', 'pod_parameters'], {'allow_pickle': '(False)'}), '(pod_parameters_file_path, pod_parameters, allow_pickle=False)\n', (6399, 6461), True, 'import numpy as np\n'), ((7431, 7497), 'os.path.join', 'os.path.join', (['directory_path', '"""high_fidelity"""', 'f"""n{hf_data.n - 1}"""'], {}), "(directory_path, 'high_fidelity', f'n{hf_data.n - 1}')\n", (7443, 7497), False, 'import os\n'), ((7730, 7789), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['a1']"], {}), "(hf_folder_path, default_file_names_dict['a1'])\n", (7742, 7789), False, 'import os\n'), ((7809, 7868), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['a2']"], {}), "(hf_folder_path, default_file_names_dict['a2'])\n", (7821, 7868), False, 'import os\n'), ((7891, 7920), 'scipy.sparse.load_npz', 'sparse.load_npz', (['a1_file_path'], {}), '(a1_file_path)\n', (7906, 7920), True, 'import scipy.sparse as sparse\n'), ((7943, 7972), 'scipy.sparse.load_npz', 'sparse.load_npz', (['a2_file_path'], {}), '(a2_file_path)\n', (7958, 7972), True, 'import scipy.sparse as sparse\n'), ((8028, 8094), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['f_load_lv']"], {}), "(hf_folder_path, default_file_names_dict['f_load_lv'])\n", (8040, 8094), False, 'import os\n'), ((8124, 8172), 'numpy.load', 'np.load', (['f_load_lv_file_path'], {'allow_pickle': '(False)'}), '(f_load_lv_file_path, allow_pickle=False)\n', (8131, 8172), True, 'import numpy as np\n'), ((8225, 8296), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['dirichlet_edge']"], {}), "(hf_folder_path, default_file_names_dict['dirichlet_edge'])\n", (8237, 8296), False, 'import os\n'), ((8326, 8379), 'numpy.load', 'np.load', (['dirichlet_edge_file_path'], {'allow_pickle': '(False)'}), '(dirichlet_edge_file_path, allow_pickle=False)\n', (8333, 8379), True, 'import numpy as np\n'), ((8429, 8496), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['p_tri_edge']"], {}), "(hf_folder_path, default_file_names_dict['p_tri_edge'])\n", (8441, 8496), False, 'import os\n'), ((8514, 8563), 'numpy.load', 'np.load', (['p_tri_edge_file_path'], {'allow_pickle': '(False)'}), '(p_tri_edge_file_path, allow_pickle=False)\n', (8521, 8563), True, 'import numpy as np\n'), ((8727, 8798), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['f_load_neumann']"], {}), "(hf_folder_path, default_file_names_dict['f_load_neumann'])\n", (8739, 8798), False, 'import os\n'), ((8817, 8857), 'os.path.isfile', 'os.path.isfile', (['f_load_neumann_file_path'], {}), '(f_load_neumann_file_path)\n', (8831, 8857), False, 'import os\n'), ((9419, 9478), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['rg']"], {}), "(hf_folder_path, default_file_names_dict['rg'])\n", (9431, 9478), False, 'import os\n'), ((9508, 9549), 'os.path.isfile', 'os.path.isfile', (['rg_lifting_func_file_path'], {}), '(rg_lifting_func_file_path)\n', (9522, 9549), False, 'import os\n'), ((10593, 10651), 'os.path.join', 'os.path.join', (['directory_path', '"""reduced_order"""', 'f"""n{n - 1}"""'], {}), "(directory_path, 'reduced_order', f'n{n - 1}')\n", (10605, 10651), False, 'import os\n'), ((2598, 2669), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['f_load_neumann']"], {}), "(hf_folder_path, default_file_names_dict['f_load_neumann'])\n", (2610, 2669), False, 'import os\n'), ((3013, 3072), 'os.path.join', 'os.path.join', (['hf_folder_path', "default_file_names_dict['rg']"], {}), "(hf_folder_path, default_file_names_dict['rg'])\n", (3025, 3072), False, 'import os\n'), ((3081, 3147), 'numpy.save', 'np.save', (['rg_lifting_func_file_path', 'hf_data.rg'], {'allow_pickle': '(False)'}), '(rg_lifting_func_file_path, hf_data.rg, allow_pickle=False)\n', (3088, 3147), True, 'import numpy as np\n'), ((4960, 5035), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f_load_neumann_rom']"], {}), "(rb_folder_path, default_file_names_dict['f_load_neumann_rom'])\n", (4972, 5035), False, 'import os\n'), ((5270, 5337), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f1_dir_rom']"], {}), "(rb_folder_path, default_file_names_dict['f1_dir_rom'])\n", (5282, 5337), False, 'import os\n'), ((5369, 5436), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f2_dir_rom']"], {}), "(rb_folder_path, default_file_names_dict['f2_dir_rom'])\n", (5381, 5436), False, 'import os\n'), ((5445, 5520), 'numpy.save', 'np.save', (['f1_dir_rom_file_path', 'rb_data.f1_dirichlet_rom'], {'allow_pickle': '(False)'}), '(f1_dir_rom_file_path, rb_data.f1_dirichlet_rom, allow_pickle=False)\n', (5452, 5520), True, 'import numpy as np\n'), ((5529, 5604), 'numpy.save', 'np.save', (['f2_dir_rom_file_path', 'rb_data.f2_dirichlet_rom'], {'allow_pickle': '(False)'}), '(f2_dir_rom_file_path, rb_data.f2_dirichlet_rom, allow_pickle=False)\n', (5536, 5604), True, 'import numpy as np\n'), ((7509, 7538), 'os.path.isdir', 'os.path.isdir', (['hf_folder_path'], {}), '(hf_folder_path)\n', (7522, 7538), False, 'import os\n'), ((8948, 9001), 'numpy.load', 'np.load', (['f_load_neumann_file_path'], {'allow_pickle': '(False)'}), '(f_load_neumann_file_path, allow_pickle=False)\n', (8955, 9001), True, 'import numpy as np\n'), ((9313, 9330), 'numpy.min', 'np.min', (['hf_data.p'], {}), '(hf_data.p)\n', (9319, 9330), True, 'import numpy as np\n'), ((9332, 9349), 'numpy.max', 'np.max', (['hf_data.p'], {}), '(hf_data.p)\n', (9338, 9349), True, 'import numpy as np\n'), ((9602, 9656), 'numpy.load', 'np.load', (['rg_lifting_func_file_path'], {'allow_pickle': '(False)'}), '(rg_lifting_func_file_path, allow_pickle=False)\n', (9609, 9656), True, 'import numpy as np\n'), ((10690, 10719), 'os.path.isdir', 'os.path.isdir', (['rb_folder_path'], {}), '(rb_folder_path)\n', (10703, 10719), False, 'import os\n'), ((11140, 11203), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['a1_rom']"], {}), "(rb_folder_path, default_file_names_dict['a1_rom'])\n", (11152, 11203), False, 'import os\n'), ((11231, 11294), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['a2_rom']"], {}), "(rb_folder_path, default_file_names_dict['a2_rom'])\n", (11243, 11294), False, 'import os\n'), ((11325, 11370), 'numpy.load', 'np.load', (['a1_rom_file_path'], {'allow_pickle': '(False)'}), '(a1_rom_file_path, allow_pickle=False)\n', (11332, 11370), True, 'import numpy as np\n'), ((11401, 11446), 'numpy.load', 'np.load', (['a2_rom_file_path'], {'allow_pickle': '(False)'}), '(a2_rom_file_path, allow_pickle=False)\n', (11408, 11446), True, 'import numpy as np\n'), ((11514, 11584), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f_load_lv_rom']"], {}), "(rb_folder_path, default_file_names_dict['f_load_lv_rom'])\n", (11526, 11584), False, 'import os\n'), ((11622, 11674), 'numpy.load', 'np.load', (['f_load_lv_rom_file_path'], {'allow_pickle': '(False)'}), '(f_load_lv_rom_file_path, allow_pickle=False)\n', (11629, 11674), True, 'import numpy as np\n'), ((11744, 11819), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f_load_neumann_rom']"], {}), "(rb_folder_path, default_file_names_dict['f_load_neumann_rom'])\n", (11756, 11819), False, 'import os\n'), ((11831, 11875), 'os.path.isfile', 'os.path.isfile', (['f_load_neumann_rom_file_path'], {}), '(f_load_neumann_rom_file_path)\n', (11845, 11875), False, 'import os\n'), ((12180, 12247), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f1_dir_rom']"], {}), "(rb_folder_path, default_file_names_dict['f1_dir_rom'])\n", (12192, 12247), False, 'import os\n'), ((12279, 12346), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['f2_dir_rom']"], {}), "(rb_folder_path, default_file_names_dict['f2_dir_rom'])\n", (12291, 12346), False, 'import os\n'), ((12375, 12411), 'os.path.isfile', 'os.path.isfile', (['f1_dir_rom_file_path'], {}), '(f1_dir_rom_file_path)\n', (12389, 12411), False, 'import os\n'), ((12440, 12476), 'os.path.isfile', 'os.path.isfile', (['f2_dir_rom_file_path'], {}), '(f2_dir_rom_file_path)\n', (12454, 12476), False, 'import os\n'), ((13134, 13192), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['v']"], {}), "(rb_folder_path, default_file_names_dict['v'])\n", (13146, 13192), False, 'import os\n'), ((13213, 13257), 'numpy.load', 'np.load', (['v_mat_file_path'], {'allow_pickle': '(False)'}), '(v_mat_file_path, allow_pickle=False)\n', (13220, 13257), True, 'import numpy as np\n'), ((13362, 13425), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['sigma2']"], {}), "(rb_folder_path, default_file_names_dict['sigma2'])\n", (13374, 13425), False, 'import os\n'), ((13455, 13500), 'numpy.load', 'np.load', (['sigma2_file_path'], {'allow_pickle': '(False)'}), '(sigma2_file_path, allow_pickle=False)\n', (13462, 13500), True, 'import numpy as np\n'), ((13561, 13632), 'os.path.join', 'os.path.join', (['rb_folder_path', "default_file_names_dict['pod_parameters']"], {}), "(rb_folder_path, default_file_names_dict['pod_parameters'])\n", (13573, 13632), False, 'import os\n'), ((13658, 13711), 'numpy.load', 'np.load', (['pod_parameters_file_path'], {'allow_pickle': '(False)'}), '(pod_parameters_file_path, allow_pickle=False)\n', (13665, 13711), True, 'import numpy as np\n'), ((2715, 2802), 'numpy.save', 'np.save', (['f_load_neumann_file_path', 'hf_data.f_load_neumann_full'], {'allow_pickle': '(False)'}), '(f_load_neumann_file_path, hf_data.f_load_neumann_full, allow_pickle\n =False)\n', (2722, 2802), True, 'import numpy as np\n'), ((5081, 5175), 'numpy.save', 'np.save', (['f_load_neumann_rom_file_path', 'rb_data.f_load_neumann_free_rom'], {'allow_pickle': '(False)'}), '(f_load_neumann_rom_file_path, rb_data.f_load_neumann_free_rom,\n allow_pickle=False)\n', (5088, 5175), True, 'import numpy as np\n'), ((11014, 11041), 'warnings.warn', 'warnings.warn', (['warning_text'], {}), '(warning_text)\n', (11027, 11041), False, 'import warnings\n'), ((11923, 11980), 'numpy.load', 'np.load', (['f_load_neumann_rom_file_path'], {'allow_pickle': '(False)'}), '(f_load_neumann_rom_file_path, allow_pickle=False)\n', (11930, 11980), True, 'import numpy as np\n'), ((2858, 2888), 'numpy.array', 'np.array', (["['has homo neumann']"], {}), "(['has homo neumann'])\n", (2866, 2888), True, 'import numpy as np\n'), ((12905, 12954), 'numpy.load', 'np.load', (['f1_dir_rom_file_path'], {'allow_pickle': '(False)'}), '(f1_dir_rom_file_path, allow_pickle=False)\n', (12912, 12954), True, 'import numpy as np\n'), ((12994, 13043), 'numpy.load', 'np.load', (['f2_dir_rom_file_path'], {'allow_pickle': '(False)'}), '(f2_dir_rom_file_path, allow_pickle=False)\n', (13001, 13043), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.igpgtg` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestXYZ_to_IgPgTg', 'TestIgPgTg_to_XYZ']
class TestXYZ_to_IgPgTg(unittest.TestCase):
"""
Defines :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition unit tests
methods.
"""
def test_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([0.42421258, 0.18632491, 0.10689223]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([0.50912820, -0.14804331, 0.11921472]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([0.29095152, -0.04057508, -0.18220795]),
decimal=7)
def test_n_dimensional_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition
n-dimensional support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
IgPgTg = XYZ_to_IgPgTg(XYZ)
XYZ = np.tile(XYZ, (6, 1))
IgPgTg = np.tile(IgPgTg, (6, 1))
np.testing.assert_almost_equal(XYZ_to_IgPgTg(XYZ), IgPgTg, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
IgPgTg = np.reshape(IgPgTg, (2, 3, 3))
np.testing.assert_almost_equal(XYZ_to_IgPgTg(XYZ), IgPgTg, decimal=7)
def test_domain_range_scale_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition domain and
range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
IgPgTg = XYZ_to_IgPgTg(XYZ)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(XYZ * factor), IgPgTg * factor, decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_to_IgPgTg(XYZ)
class TestIgPgTg_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition unit tests
methods.
"""
def test_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.42421258, 0.18632491, 0.10689223])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.50912820, -0.14804331, 0.11921472])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.29095152, -0.04057508, -0.18220795])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
def test_n_dimensional_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition
n-dimensional support.
"""
IgPgTg = np.array([0.42421258, 0.18632491, 0.10689223])
XYZ = IgPgTg_to_XYZ(IgPgTg)
IgPgTg = np.tile(IgPgTg, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(IgPgTg_to_XYZ(IgPgTg), XYZ, decimal=7)
IgPgTg = np.reshape(IgPgTg, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(IgPgTg_to_XYZ(IgPgTg), XYZ, decimal=7)
def test_domain_range_scale_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition domain and
range scale support.
"""
IgPgTg = np.array([0.42421258, 0.18632491, 0.10689223])
XYZ = IgPgTg_to_XYZ(IgPgTg)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(IgPgTg * factor), XYZ * factor, decimal=7)
@ignore_numpy_errors
def test_nan_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
IgPgTg = np.array(case)
IgPgTg_to_XYZ(IgPgTg)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"colour.utilities.domain_range_scale",
"itertools.permutations",
"colour.models.IgPgTg_to_XYZ",
"numpy.array",
"numpy.tile",
"numpy.reshape",
"colour.models.XYZ_to_IgPgTg"
] | [((5434, 5449), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5447, 5449), False, 'import unittest\n'), ((1676, 1722), 'numpy.array', 'np.array', (['[0.20654008, 0.12197225, 0.05136952]'], {}), '([0.20654008, 0.12197225, 0.05136952])\n', (1684, 1722), True, 'import numpy as np\n'), ((1740, 1758), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['XYZ'], {}), '(XYZ)\n', (1753, 1758), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((1774, 1794), 'numpy.tile', 'np.tile', (['XYZ', '(6, 1)'], {}), '(XYZ, (6, 1))\n', (1781, 1794), True, 'import numpy as np\n'), ((1812, 1835), 'numpy.tile', 'np.tile', (['IgPgTg', '(6, 1)'], {}), '(IgPgTg, (6, 1))\n', (1819, 1835), True, 'import numpy as np\n'), ((1929, 1955), 'numpy.reshape', 'np.reshape', (['XYZ', '(2, 3, 3)'], {}), '(XYZ, (2, 3, 3))\n', (1939, 1955), True, 'import numpy as np\n'), ((1973, 2002), 'numpy.reshape', 'np.reshape', (['IgPgTg', '(2, 3, 3)'], {}), '(IgPgTg, (2, 3, 3))\n', (1983, 2002), True, 'import numpy as np\n'), ((2282, 2328), 'numpy.array', 'np.array', (['[0.20654008, 0.12197225, 0.05136952]'], {}), '([0.20654008, 0.12197225, 0.05136952])\n', (2290, 2328), True, 'import numpy as np\n'), ((2346, 2364), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['XYZ'], {}), '(XYZ)\n', (2359, 2364), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((4069, 4115), 'numpy.array', 'np.array', (['[0.42421258, 0.18632491, 0.10689223]'], {}), '([0.42421258, 0.18632491, 0.10689223])\n', (4077, 4115), True, 'import numpy as np\n'), ((4130, 4151), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['IgPgTg'], {}), '(IgPgTg)\n', (4143, 4151), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((4170, 4193), 'numpy.tile', 'np.tile', (['IgPgTg', '(6, 1)'], {}), '(IgPgTg, (6, 1))\n', (4177, 4193), True, 'import numpy as np\n'), ((4208, 4228), 'numpy.tile', 'np.tile', (['XYZ', '(6, 1)'], {}), '(XYZ, (6, 1))\n', (4215, 4228), True, 'import numpy as np\n'), ((4325, 4354), 'numpy.reshape', 'np.reshape', (['IgPgTg', '(2, 3, 3)'], {}), '(IgPgTg, (2, 3, 3))\n', (4335, 4354), True, 'import numpy as np\n'), ((4369, 4395), 'numpy.reshape', 'np.reshape', (['XYZ', '(2, 3, 3)'], {}), '(XYZ, (2, 3, 3))\n', (4379, 4395), True, 'import numpy as np\n'), ((4678, 4724), 'numpy.array', 'np.array', (['[0.42421258, 0.18632491, 0.10689223]'], {}), '([0.42421258, 0.18632491, 0.10689223])\n', (4686, 4724), True, 'import numpy as np\n'), ((4739, 4760), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['IgPgTg'], {}), '(IgPgTg)\n', (4752, 4760), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((1017, 1063), 'numpy.array', 'np.array', (['[0.42421258, 0.18632491, 0.10689223]'], {}), '([0.42421258, 0.18632491, 0.10689223])\n', (1025, 1063), True, 'import numpy as np\n'), ((1216, 1262), 'numpy.array', 'np.array', (['[0.5091282, -0.14804331, 0.11921472]'], {}), '([0.5091282, -0.14804331, 0.11921472])\n', (1224, 1262), True, 'import numpy as np\n'), ((1416, 1464), 'numpy.array', 'np.array', (['[0.29095152, -0.04057508, -0.18220795]'], {}), '([0.29095152, -0.04057508, -0.18220795])\n', (1424, 1464), True, 'import numpy as np\n'), ((1875, 1893), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['XYZ'], {}), '(XYZ)\n', (1888, 1893), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((2042, 2060), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['XYZ'], {}), '(XYZ)\n', (2055, 2060), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((2878, 2906), 'itertools.permutations', 'permutations', (['(cases * 3)'], {'r': '(3)'}), '(cases * 3, r=3)\n', (2890, 2906), False, 'from itertools import permutations\n'), ((2953, 2967), 'numpy.array', 'np.array', (['case'], {}), '(case)\n', (2961, 2967), True, 'import numpy as np\n'), ((2980, 2998), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['XYZ'], {}), '(XYZ)\n', (2993, 2998), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((3407, 3453), 'numpy.array', 'np.array', (['[0.20654008, 0.12197225, 0.05136952]'], {}), '([0.20654008, 0.12197225, 0.05136952])\n', (3415, 3453), True, 'import numpy as np\n'), ((3607, 3652), 'numpy.array', 'np.array', (['[0.1422201, 0.23042768, 0.10495772]'], {}), '([0.1422201, 0.23042768, 0.10495772])\n', (3615, 3652), True, 'import numpy as np\n'), ((3808, 3853), 'numpy.array', 'np.array', (['[0.0781878, 0.06157201, 0.28099326]'], {}), '([0.0781878, 0.06157201, 0.28099326])\n', (3816, 3853), True, 'import numpy as np\n'), ((4268, 4289), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['IgPgTg'], {}), '(IgPgTg)\n', (4281, 4289), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((4435, 4456), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['IgPgTg'], {}), '(IgPgTg)\n', (4448, 4456), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((5274, 5302), 'itertools.permutations', 'permutations', (['(cases * 3)'], {'r': '(3)'}), '(cases * 3, r=3)\n', (5286, 5302), False, 'from itertools import permutations\n'), ((5352, 5366), 'numpy.array', 'np.array', (['case'], {}), '(case)\n', (5360, 5366), True, 'import numpy as np\n'), ((5379, 5400), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['IgPgTg'], {}), '(IgPgTg)\n', (5392, 5400), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((956, 1002), 'numpy.array', 'np.array', (['[0.20654008, 0.12197225, 0.05136952]'], {}), '([0.20654008, 0.12197225, 0.05136952])\n', (964, 1002), True, 'import numpy as np\n'), ((1155, 1200), 'numpy.array', 'np.array', (['[0.1422201, 0.23042768, 0.10495772]'], {}), '([0.1422201, 0.23042768, 0.10495772])\n', (1163, 1200), True, 'import numpy as np\n'), ((1355, 1400), 'numpy.array', 'np.array', (['[0.0781878, 0.06157201, 0.28099326]'], {}), '([0.0781878, 0.06157201, 0.28099326])\n', (1363, 1400), True, 'import numpy as np\n'), ((2470, 2495), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (2488, 2495), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((3346, 3392), 'numpy.array', 'np.array', (['[0.42421258, 0.18632491, 0.10689223]'], {}), '([0.42421258, 0.18632491, 0.10689223])\n', (3354, 3392), True, 'import numpy as np\n'), ((3545, 3591), 'numpy.array', 'np.array', (['[0.5091282, -0.14804331, 0.11921472]'], {}), '([0.5091282, -0.14804331, 0.11921472])\n', (3553, 3591), True, 'import numpy as np\n'), ((3745, 3793), 'numpy.array', 'np.array', (['[0.29095152, -0.04057508, -0.18220795]'], {}), '([0.29095152, -0.04057508, -0.18220795])\n', (3753, 3793), True, 'import numpy as np\n'), ((4866, 4891), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (4884, 4891), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((2565, 2592), 'colour.models.XYZ_to_IgPgTg', 'XYZ_to_IgPgTg', (['(XYZ * factor)'], {}), '(XYZ * factor)\n', (2578, 2592), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n'), ((4961, 4991), 'colour.models.IgPgTg_to_XYZ', 'IgPgTg_to_XYZ', (['(IgPgTg * factor)'], {}), '(IgPgTg * factor)\n', (4974, 4991), False, 'from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ\n')] |
import numpy as np
from typing import Dict, Any
from dataset.dataset import Dataset
from utils.constants import INPUT_SHAPE, INPUTS, OUTPUT, SAMPLE_ID, INPUT_NOISE, SMALL_NUMBER
from utils.constants import INPUT_SCALER, NUM_OUTPUT_FEATURES, NUM_CLASSES, LABEL_MAP
class SingleDataset(Dataset):
def tensorize(self, sample: Dict[str, Any], metadata: Dict[str, Any], is_train: bool) -> Dict[str, np.ndarray]:
# Normalize inputs
input_shape = metadata[INPUT_SHAPE]
input_sample = np.array(sample[INPUTS]).reshape((-1, input_shape))
input_scaler = metadata[INPUT_SCALER]
normalized_input = input_scaler.transform(input_sample) # [1, L * D]
# Apply input noise during training
if is_train and metadata.get(INPUT_NOISE, 0.0) > SMALL_NUMBER:
input_noise = np.random.normal(loc=0.0, scale=metadata[INPUT_NOISE], size=normalized_input.shape)
normalized_input += input_noise
# Re-map labels for classification problems
output = sample[OUTPUT]
if metadata[NUM_CLASSES] > 0:
label_map = metadata[LABEL_MAP]
output = label_map[output]
return {
INPUTS: normalized_input,
OUTPUT: output,
SAMPLE_ID: sample[SAMPLE_ID]
}
| [
"numpy.array",
"numpy.random.normal"
] | [((827, 915), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'metadata[INPUT_NOISE]', 'size': 'normalized_input.shape'}), '(loc=0.0, scale=metadata[INPUT_NOISE], size=\n normalized_input.shape)\n', (843, 915), True, 'import numpy as np\n'), ((509, 533), 'numpy.array', 'np.array', (['sample[INPUTS]'], {}), '(sample[INPUTS])\n', (517, 533), True, 'import numpy as np\n')] |
# Step 0: Import the dependencies
import numpy as np
import gym
import random
def train_q(environment_var,
agent_var,
gamma_var,
lr_var,
total_episodes_var,
q_max_steps_var,
q_epsilon_var,
q_max_epsilon_var,
q_min_epsilon_var,
q_decay_rate_var):
# Step 1: Create the environment
env = gym.make(environment_var)
# Step 2: Create the Q-table and initialize it
action_size = env.action_space.n
state_size = env.observation_space.n
qtable = np.zeros((state_size, action_size))
# Step 3: Create the hyperparameters
total_episodes = total_episodes_var
learning_rate = lr_var
max_steps = q_max_steps_var # Max steps per episode
gamma = gamma_var # Discounting rate
# Exploration parameters
epsilon = q_epsilon_var # Exploration rate
max_epsilon = q_max_epsilon_var # Exploration probability at start
min_epsilon = q_min_epsilon_var # Minimum exploration probability
decay_rate = q_decay_rate_var # Exponential decay rate for exploration prob
# Step 4: The Q learning algorithm
# List of rewards
rewards = []
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
total_rewards = 0
step = 0
for step in range(max_steps):
# 3. Choose an action a in the current world state (s)
## First we randomize a number
exp_exp_tradeoff = random.uniform(0, 1)
## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[state,:])
# Else doing a random choice --> exploration
else:
action = env.action_space.sample()
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])
total_rewards += reward
# Our new state is state
state = new_state
# If done (if we're dead) : finish episode
if done == True:
break
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
rewards.append(total_rewards)
# Return the qtable
return qtable
| [
"gym.make",
"numpy.argmax",
"random.uniform",
"numpy.zeros",
"numpy.max",
"numpy.exp"
] | [((504, 529), 'gym.make', 'gym.make', (['environment_var'], {}), '(environment_var)\n', (512, 529), False, 'import gym\n'), ((673, 708), 'numpy.zeros', 'np.zeros', (['(state_size, action_size)'], {}), '((state_size, action_size))\n', (681, 708), True, 'import numpy as np\n'), ((1805, 1825), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1819, 1825), False, 'import random\n'), ((2017, 2044), 'numpy.argmax', 'np.argmax', (['qtable[state, :]'], {}), '(qtable[state, :])\n', (2026, 2044), True, 'import numpy as np\n'), ((2927, 2956), 'numpy.exp', 'np.exp', (['(-decay_rate * episode)'], {}), '(-decay_rate * episode)\n', (2933, 2956), True, 'import numpy as np\n'), ((2502, 2530), 'numpy.max', 'np.max', (['qtable[new_state, :]'], {}), '(qtable[new_state, :])\n', (2508, 2530), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # V3_P_RMSP
# In[ ]:
# Set the path to the root folder containing the training data.
# If you want to have access to the data please contact ...
basePath = ''
imgDir = basePath + 'images/Trainingsdatensatz_cropped_scaled/'
trainTsv = basePath + 'tsvDatein/final_dataset_splitting/train.tsv'
validTsv = basePath + 'tsvDatein/final_dataset_splitting/val.tsv'
testTsv = basePath + 'tsvDatein/final_dataset_splitting/test.tsv'
whitelist = basePath + 'whitelist/whitelist1.txt'
saveDir = basePath + 'experiments/InceptionV3-preTrained-rmsprops/'
imgShape = (1000,1000)
num_classes = 11
batch_size = 4
max_epochs = 110
preTrained = True
# In[ ]:
#Imports
import csv
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.utils import to_categorical, Sequence
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model
import numpy as np
from skimage import io
from keras.models import model_from_json
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from pandas import DataFrame
from contextlib import redirect_stdout
import keras
import pydot
import pydotplus
from keras.utils.vis_utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.models import load_model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import CSVLogger
import time
import random
# In[ ]:
# From mgLearn library.
def heatmap(values, xlabel, ylabel, xticklabels, yticklabels, cmap=None,
vmin=None, vmax=None, ax=None, fmt="%0.2f"):
if ax is None:
ax = plt.gca()
# plot the mean cross-validation scores
img = ax.pcolor(values, cmap=cmap, vmin=vmin, vmax=vmax)
img.update_scalarmappable()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(xticklabels)) + 0.5)
ax.set_yticks(np.arange(len(yticklabels)) + 0.5)
ax.set_xticklabels(xticklabels, rotation='vertical')
ax.set_yticklabels(yticklabels)
ax.set_aspect(1)
for p, color, value in zip(img.get_paths(), img.get_facecolors(),
img.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.mean(color[:3]) > 0.5:
c = 'k'
else:
c = 'w'
ax.text(x, y, fmt % value, color=c, ha="center", va="center", fontsize=12)
return img
# In[ ]:
# https://stackoverflow.com/a/43186440
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
path = saveDir + "history/timeHistory.csv"
with open(path,'a') as fd:
fd.write(str(time.time()-self.epoch_time_start) + "\n")
# In[ ]:
def readTargetList(tsv, target_map):
# Read target list.
with open(tsv) as f:
reader = csv.reader(f,delimiter='\t')
target = []
imgId = []
next(reader)
i = 0
for class_name in reader:
if class_name[14] == "":
print(className)
continue
target.append([value for key, value in target_map.items() if key in class_name[14]]) # nur substring ist wichtig
imgId.append(class_name[0])
i = i+1
return target, imgId
# In[ ]:
def getImgPaths(imgId):
filenames = (str(idx) + '.jpg' for idx in imgId)
return [imgDir + filename for filename in filenames]
# In[ ]:
def bounds(old_size, new_size):
if new_size >= old_size:
return (0, old_size)
else:
diff = old_size - new_size
low = diff // 2 + diff % 2
high = low + new_size
return (low, high)
# In[ ]:
def crop_image(img, shape):
left, right = bounds(img.shape[0], shape[0])
top, bottom = bounds(img.shape[1], shape[1])
img = img[left:right, top:bottom]
img = img[:, :,np.newaxis]
return img
# In[ ]:
# Sequence class for training using lazy batches of images.
# See example at https://keras.io/utils/#sequence
#
# `X_set` is list of path to the images, and `y_set` are the associated classes.
#
class LokiImageSequence(Sequence):
def __init__(self, X_set, y_set, batch_size, image_shape):
self._X = list(X_set)
self._y = list(y_set)
self._batch_size = batch_size
self._image_shape = image_shape
def __len__(self):
return int(np.ceil(len(self._X) / float(self._batch_size)))
def __getitem__(self, idx):
batch_X = self._X[idx * self._batch_size:(idx + 1) * self._batch_size]
batch_y = self._y[idx * self._batch_size:(idx + 1) * self._batch_size]
x = []
for file_name in batch_X:
z = io.imread(file_name)
t = crop_image(z,self._image_shape)
d = t[:,:,0]
b = np.repeat(d[..., np.newaxis], 3, -1)
x.append(b)
x = preprocess_input(np.array(x))
return(np.array(x), np.array(batch_y, dtype=np.int8))
# In[ ]:
# Data preparation
# Read tsv and initialize generator
with open(whitelist) as f:
inverse_target_map = dict(enumerate(f))
target_map = {v[:-1]: k for (k, v) in inverse_target_map.items()}
num_classes=(1 + max(inverse_target_map))
trainTarget, trainImgId = readTargetList(trainTsv, target_map)
validTarget, validImgId = readTargetList(validTsv, target_map)
testTarget, testImgId = readTargetList(testTsv, target_map)
# shuffle
combined = list(zip(trainTarget, trainImgId))
random.shuffle(combined)
trainTarget[:], trainImgId[:] = zip(*combined)
# shuffle
combined = list(zip(validTarget, validImgId))
random.shuffle(combined)
validTarget[:], validImgId[:] = zip(*combined)
# shuffle
combined = list(zip(testTarget, testImgId))
random.shuffle(combined)
testTarget[:], testImgId[:] = zip(*combined)
# image file paths
X_trainImgPath = getImgPaths(trainImgId)
X_validImgPath = getImgPaths(validImgId)
X_testImgPath = getImgPaths(testImgId)
# Convert class vectors to binary class matrices (format required by Keras).
y_train = to_categorical(trainTarget, num_classes)
y_valid = to_categorical(validTarget, num_classes)
y_test = to_categorical(testTarget, num_classes)
# Constructing sequences
train_seq = LokiImageSequence(X_trainImgPath, y_train, batch_size, imgShape)
valid_seq = LokiImageSequence(X_validImgPath, y_valid, batch_size, imgShape)
test_seq = LokiImageSequence(X_testImgPath, y_test, batch_size, imgShape)
# In[ ]:
print("Length trainingsset: " + str(len(y_train)))
print("Length validationset: " + str(len(y_valid)))
print("Length testset: " + str(len(y_test)))
print("Number of classes: " + str(num_classes))
# In[ ]:
# Model customization
if preTrained:
base_model = InceptionV3(weights='imagenet', include_top=False)
else:
base_model = InceptionV3(weights=None, include_top=False)
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dropout(0.4)(x)
predictions = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
# Freeze all layers
if preTrained:
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Create callbacks
csv_logger_callback = CSVLogger(saveDir + "history/model_history_log.csv", append=True)
checkpointEveryEpoch_callback = ModelCheckpoint(saveDir + "modelFiles/saved-model-{epoch:02d}-{val_acc:.2f}.hdf5", monitor='val_acc', verbose=1, save_best_only=False, mode='max')
time_callback = TimeHistory()
earlyStopping_callback = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20, min_delta = 0.01)
modelCheckpoint_callback = ModelCheckpoint(saveDir + 'modelFiles/best_model.h5', monitor='val_loss', verbose=1)
callback_list = [time_callback, earlyStopping_callback, modelCheckpoint_callback, checkpointEveryEpoch_callback,csv_logger_callback]
# In[ ]:
# Transfer learning
history = model.fit_generator(train_seq,
epochs = max_epochs,
validation_data = valid_seq,
callbacks = callback_list)
# In[ ]:
# Save model
model_json = model.to_json()
with open(saveDir + "modelFiles/model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(saveDir + "modelFiles/weights.h5")
# Load model
model = load_model(saveDir + 'modelFiles/best_model.h5')
# ## History
# In[ ]:
# convert the history.history dict to a pandas DataFrame:
hist_df = DataFrame(history.history)
# save to json:
hist_json_file = saveDir + 'history/history.json'
with open(hist_json_file, mode='w') as f:
hist_df.to_json(f)
# In[ ]:
# summarize history for accuracy
plt.figure(figsize=(10,5))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(saveDir + 'history/accuracy.svg', transparent = True, bbox_inches='tight')
plt.show()
# summarize history for loss
plt.figure(figsize=(10,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(saveDir + 'history/loss.svg', transparent = True, bbox_inches='tight')
plt.show()
# In[ ]:
keras.utils.vis_utils.pydot = pydot
plot_model(model, to_file=saveDir+'model_architecture_charts/model_small.png')
# In[ ]:
def plot_keras_model_verbose(model, show_shapes=True, show_layer_names=True):
return SVG(model_to_dot(model, show_shapes=show_shapes,
show_layer_names=show_layer_names).create(prog='dot',format='svg'))
svg = plot_keras_model_verbose(model, show_shapes=True, show_layer_names=False)
with open(saveDir + "model_architecture_charts/model_verbose.svg", "w") as txt:
txt.write(svg.data)
svg
# In[ ]:
# Save mode summary
with open(saveDir + 'model_architecture_charts/model_summary.txt', 'w') as f:
with redirect_stdout(f):
model.summary()
# ## Training duration
# In[ ]:
times = time_callback.times
# In[ ]:
df = DataFrame(times)
df.to_csv (saveDir + r'trainingsDuration/durationPerEpoch.csv')
# In[ ]:
sum = df.sum()
sum.to_csv(saveDir + r'trainingsDuration/durationSum.csv')
print(sum)
# In[ ]:
avg = df.mean()
avg.to_csv(saveDir + r'trainingsDuration/durationAvgPerEpoch.csv')
print(avg)
# In[ ]:
predValid = model.predict_generator(valid_seq)
predTest = model.predict_generator(test_seq)
loss, acc = model.evaluate_generator(test_seq)
# ## Validationset
# In[ ]:
trueClassNum=[]
for x in y_valid:
ind = np.array(x).argmax()
y = ind
trueClassNum.append(y)
trueClassName = []
for f in trueClassNum:
trueClassName.append(inverse_target_map[f][:-1])
# In[ ]:
predMultilabelAll=[]
predProbabilityAll = []
counter = 0
for x in predValid:
maxProb = x.max()
predProbabilityAll.append(maxProb)
ind = x.argmax()
y = [0]*len(x)
y[ind]=1
predMultilabelAll.append(y)
counter +=1
# In[ ]:
# Convert to int
predClassNum=[]
for x in predMultilabelAll:
ind = np.array(x).argmax()
y = ind
predClassNum.append(y)
# In[ ]:
# Convert to name
predClassName = []
for f in predClassNum:
predClassName.append(inverse_target_map[f][:-1])
# In[ ]:
cl = classification_report(trueClassName, predClassName, output_dict=True)
df = DataFrame(cl).transpose()
df.to_csv (saveDir + r'classification_reports/valid.csv', index = True, header=True)
df
# In[ ]:
plt.figure(figsize=(15,15))
cm = confusion_matrix(trueClassName, predClassName)
df = DataFrame(cm)
df.to_csv (saveDir + r'confusion_matrix/valid_total.csv', index = True, header=True)
hm = heatmap(
cm, xlabel='Predicted label',
ylabel='True label', xticklabels=np.unique(trueClassName),
yticklabels=np.unique(trueClassName), cmap=plt.cm.gray_r, fmt="%d")
plt.title("Total values \n")
plt.colorbar(hm)
plt.gca().invert_yaxis()
plt.savefig(saveDir + 'confusion_matrix/valid_total.svg', transparent = True, bbox_inches='tight')
# In[ ]:
plt.figure(figsize=(15,15))
cm = confusion_matrix(trueClassName, predClassName)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
df = DataFrame(cm)
df.to_csv (saveDir + r'confusion_matrix/valid_normalised.csv', index = True, header=True)
plt.figure(figsize=(20,20))
cm = heatmap(
cm, xlabel='Predicted label',
ylabel='True label', xticklabels=np.unique(trueClassName),
yticklabels=np.unique(trueClassName), cmap=plt.cm.gray_r)
plt.title("Normalised values\n")
plt.colorbar(cm)
plt.gca().invert_yaxis()
plt.savefig(saveDir + 'confusion_matrix/valid_normalised.svg', transparent = True, bbox_inches='tight')
# In[ ]:
# Save pred and prob to tsv
df = DataFrame(list(zip(validImgId,trueClassName, predClassName,predProbabilityAll )),
columns =['ImgId', 'True', 'Predicted', 'Probability'])
df = df.set_index('ImgId')
df.to_csv (saveDir + r'predictions/valid.csv', index = True, header=True)
# ## Testset
# In[ ]:
trueClassNum=[]
for x in y_test:
ind = np.array(x).argmax()
y = ind
trueClassNum.append(y)
trueClassName = []
for f in trueClassNum:
trueClassName.append(inverse_target_map[f][:-1])
# In[ ]:
predMultilabelAll=[]
predProbabilityAll = []
counter = 0
for x in predTest:
maxProb = x.max()
predProbabilityAll.append(maxProb)
ind = x.argmax()
y = [0]*len(x)
y[ind]=1
predMultilabelAll.append(y)
counter +=1
# In[ ]:
# Convert to int
predClassNum=[]
for x in predMultilabelAll:
ind = np.array(x).argmax()
y = ind
predClassNum.append(y)
# In[ ]:
# Convert to name
predClassName = []
for f in predClassNum:
predClassName.append(inverse_target_map[f][:-1])
# In[ ]:
cl = classification_report(trueClassName, predClassName,output_dict=True)
df = DataFrame(cl).transpose()
df.to_csv (saveDir + r'classification_reports/test.csv', index = True, header=True)
df
# In[ ]:
plt.figure(figsize=(15,15))
cm = confusion_matrix(trueClassName, predClassName)
df = DataFrame(cm)
df.to_csv (saveDir + r'confusion_matrix/test_total.csv', index = True, header=True)
hm = heatmap(
cm, xlabel='Predicted label',
ylabel='True label', xticklabels=np.unique(trueClassName),
yticklabels=np.unique(trueClassName), cmap=plt.cm.gray_r, fmt="%d")
plt.title("Total values \n")
plt.colorbar(hm)
plt.gca().invert_yaxis()
plt.savefig(saveDir + 'confusion_matrix/test_total.svg', transparent = True, bbox_inches='tight')
# In[ ]:
plt.figure(figsize=(15,15))
cm = confusion_matrix(trueClassName, predClassName)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
df = DataFrame(cm)
df.to_csv (saveDir + r'confusion_matrix/test_normalised.csv', index = True, header=True)
plt.figure(figsize=(20,20))
cm = heatmap(
cm, xlabel='Predicted label',
ylabel='True label', xticklabels=np.unique(trueClassName),
yticklabels=np.unique(trueClassName), cmap=plt.cm.gray_r)
plt.title("Normalised values\n")
plt.colorbar(cm)
plt.gca().invert_yaxis()
plt.savefig(saveDir + 'confusion_matrix/test_normalised.svg', transparent = True, bbox_inches='tight')
# In[ ]:
# Save pred and prob to tsv
df = DataFrame(list(zip(validImgId,trueClassName, predClassName,predProbabilityAll )),
columns =['ImgId', 'True', 'Predicted', 'Probability'])
df = df.set_index('ImgId')
df.to_csv (saveDir + r'predictions/test.csv', index = True, header=True)
# In[ ]:
modelName = "Modelname: " + base_model.name
trained = "Pre-Trained: " + str(preTrained)
overallRuntime = "Overall runtime: " + str(sum.get_values()[0]) + "s"
runtimePerEpoch = "Avg. runtime per Epoch: " + str(avg.get_values()[0]) + "s"
dsImg = "Dataset image: " + imgDir
dsTrain = "Dataset train: " + trainTsv
dsValid = "Dataset validation: " + validTsv
dsTest = "Dataset test: " + testTsv
testAcc = "Accuracy testset: " + str(acc)
testLoss = "Loss testset: " + str(loss)
numEpochs = "Num. Epochs: " + str(len(history.epoch))
earlyStop = "Early stop (0 if it didn't stop early): " + str(earlyStopping_callback.stopped_epoch)
# In[ ]:
with open(saveDir + 'README.txt','w') as out:
out.write('{}\n{}\n\n{}\n{}\n\n{}\n{}\n{}\n\n{}\n{}\n{}\n{}\n{}\n'.format(modelName,
trained,
testAcc,
testLoss,
numEpochs,
overallRuntime,
runtimePerEpoch,
dsImg,
dsTrain,
dsValid,
dsTest,
earlyStop
))
| [
"keras.models.load_model",
"matplotlib.pyplot.title",
"csv.reader",
"keras.callbacks.CSVLogger",
"random.shuffle",
"keras.models.Model",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.gca",
"numpy.unique",
"pandas.DataFrame",
"keras.util... | [((5887, 5911), 'random.shuffle', 'random.shuffle', (['combined'], {}), '(combined)\n', (5901, 5911), False, 'import random\n'), ((6016, 6040), 'random.shuffle', 'random.shuffle', (['combined'], {}), '(combined)\n', (6030, 6040), False, 'import random\n'), ((6143, 6167), 'random.shuffle', 'random.shuffle', (['combined'], {}), '(combined)\n', (6157, 6167), False, 'import random\n'), ((6443, 6483), 'keras.utils.to_categorical', 'to_categorical', (['trainTarget', 'num_classes'], {}), '(trainTarget, num_classes)\n', (6457, 6483), False, 'from keras.utils import to_categorical, Sequence\n'), ((6494, 6534), 'keras.utils.to_categorical', 'to_categorical', (['validTarget', 'num_classes'], {}), '(validTarget, num_classes)\n', (6508, 6534), False, 'from keras.utils import to_categorical, Sequence\n'), ((6545, 6584), 'keras.utils.to_categorical', 'to_categorical', (['testTarget', 'num_classes'], {}), '(testTarget, num_classes)\n', (6559, 6584), False, 'from keras.utils import to_categorical, Sequence\n'), ((7405, 7456), 'keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (7410, 7456), False, 'from keras.models import Model\n'), ((7722, 7787), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(saveDir + 'history/model_history_log.csv')"], {'append': '(True)'}), "(saveDir + 'history/model_history_log.csv', append=True)\n", (7731, 7787), False, 'from keras.callbacks import CSVLogger\n'), ((7820, 7975), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(saveDir + 'modelFiles/saved-model-{epoch:02d}-{val_acc:.2f}.hdf5')"], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(False)', 'mode': '"""max"""'}), "(saveDir +\n 'modelFiles/saved-model-{epoch:02d}-{val_acc:.2f}.hdf5', monitor=\n 'val_acc', verbose=1, save_best_only=False, mode='max')\n", (7835, 7975), False, 'from keras.callbacks import ModelCheckpoint\n'), ((8035, 8124), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(20)', 'min_delta': '(0.01)'}), "(monitor='val_loss', mode='min', verbose=1, patience=20,\n min_delta=0.01)\n", (8048, 8124), False, 'from keras.callbacks import EarlyStopping\n'), ((8150, 8238), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(saveDir + 'modelFiles/best_model.h5')"], {'monitor': '"""val_loss"""', 'verbose': '(1)'}), "(saveDir + 'modelFiles/best_model.h5', monitor='val_loss',\n verbose=1)\n", (8165, 8238), False, 'from keras.callbacks import ModelCheckpoint\n'), ((8800, 8848), 'keras.models.load_model', 'load_model', (["(saveDir + 'modelFiles/best_model.h5')"], {}), "(saveDir + 'modelFiles/best_model.h5')\n", (8810, 8848), False, 'from keras.models import load_model\n'), ((8949, 8975), 'pandas.DataFrame', 'DataFrame', (['history.history'], {}), '(history.history)\n', (8958, 8975), False, 'from pandas import DataFrame\n'), ((9159, 9186), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9169, 9186), True, 'import matplotlib.pyplot as plt\n'), ((9186, 9218), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (9194, 9218), True, 'import matplotlib.pyplot as plt\n'), ((9219, 9255), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (9227, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9256, 9283), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (9265, 9283), True, 'import matplotlib.pyplot as plt\n'), ((9284, 9306), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (9294, 9306), True, 'import matplotlib.pyplot as plt\n'), ((9307, 9326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9317, 9326), True, 'import matplotlib.pyplot as plt\n'), ((9327, 9380), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (9337, 9380), True, 'import matplotlib.pyplot as plt\n'), ((9381, 9470), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'history/accuracy.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'history/accuracy.svg', transparent=True, bbox_inches\n ='tight')\n", (9392, 9470), True, 'import matplotlib.pyplot as plt\n'), ((9468, 9478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9476, 9478), True, 'import matplotlib.pyplot as plt\n'), ((9510, 9537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9520, 9537), True, 'import matplotlib.pyplot as plt\n'), ((9537, 9570), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (9545, 9570), True, 'import matplotlib.pyplot as plt\n'), ((9571, 9608), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (9579, 9608), True, 'import matplotlib.pyplot as plt\n'), ((9609, 9632), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (9618, 9632), True, 'import matplotlib.pyplot as plt\n'), ((9633, 9651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (9643, 9651), True, 'import matplotlib.pyplot as plt\n'), ((9652, 9671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9662, 9671), True, 'import matplotlib.pyplot as plt\n'), ((9672, 9725), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (9682, 9725), True, 'import matplotlib.pyplot as plt\n'), ((9726, 9811), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'history/loss.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'history/loss.svg', transparent=True, bbox_inches='tight'\n )\n", (9737, 9811), True, 'import matplotlib.pyplot as plt\n'), ((9809, 9819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9817, 9819), True, 'import matplotlib.pyplot as plt\n'), ((9869, 9954), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': "(saveDir + 'model_architecture_charts/model_small.png')"}), "(model, to_file=saveDir + 'model_architecture_charts/model_small.png'\n )\n", (9879, 9954), False, 'from keras.utils.vis_utils import plot_model\n'), ((10627, 10643), 'pandas.DataFrame', 'DataFrame', (['times'], {}), '(times)\n', (10636, 10643), False, 'from pandas import DataFrame\n'), ((11871, 11940), 'sklearn.metrics.classification_report', 'classification_report', (['trueClassName', 'predClassName'], {'output_dict': '(True)'}), '(trueClassName, predClassName, output_dict=True)\n', (11892, 11940), False, 'from sklearn.metrics import classification_report\n'), ((12074, 12102), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (12084, 12102), True, 'import matplotlib.pyplot as plt\n'), ((12108, 12154), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['trueClassName', 'predClassName'], {}), '(trueClassName, predClassName)\n', (12124, 12154), False, 'from sklearn.metrics import confusion_matrix\n'), ((12160, 12173), 'pandas.DataFrame', 'DataFrame', (['cm'], {}), '(cm)\n', (12169, 12173), False, 'from pandas import DataFrame\n'), ((12435, 12463), 'matplotlib.pyplot.title', 'plt.title', (['"""Total values \n"""'], {}), "('Total values \\n')\n", (12444, 12463), True, 'import matplotlib.pyplot as plt\n'), ((12465, 12481), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['hm'], {}), '(hm)\n', (12477, 12481), True, 'import matplotlib.pyplot as plt\n'), ((12508, 12608), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'confusion_matrix/valid_total.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'confusion_matrix/valid_total.svg', transparent=True,\n bbox_inches='tight')\n", (12519, 12608), True, 'import matplotlib.pyplot as plt\n'), ((12620, 12648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (12630, 12648), True, 'import matplotlib.pyplot as plt\n'), ((12654, 12700), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['trueClassName', 'predClassName'], {}), '(trueClassName, predClassName)\n', (12670, 12700), False, 'from sklearn.metrics import confusion_matrix\n'), ((12763, 12776), 'pandas.DataFrame', 'DataFrame', (['cm'], {}), '(cm)\n', (12772, 12776), False, 'from pandas import DataFrame\n'), ((12869, 12897), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (12879, 12897), True, 'import matplotlib.pyplot as plt\n'), ((13061, 13093), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalised values\n"""'], {}), "('Normalised values\\n')\n", (13070, 13093), True, 'import matplotlib.pyplot as plt\n'), ((13095, 13111), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cm'], {}), '(cm)\n', (13107, 13111), True, 'import matplotlib.pyplot as plt\n'), ((13138, 13244), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'confusion_matrix/valid_normalised.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'confusion_matrix/valid_normalised.svg', transparent=\n True, bbox_inches='tight')\n", (13149, 13244), True, 'import matplotlib.pyplot as plt\n'), ((14344, 14413), 'sklearn.metrics.classification_report', 'classification_report', (['trueClassName', 'predClassName'], {'output_dict': '(True)'}), '(trueClassName, predClassName, output_dict=True)\n', (14365, 14413), False, 'from sklearn.metrics import classification_report\n'), ((14545, 14573), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (14555, 14573), True, 'import matplotlib.pyplot as plt\n'), ((14579, 14625), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['trueClassName', 'predClassName'], {}), '(trueClassName, predClassName)\n', (14595, 14625), False, 'from sklearn.metrics import confusion_matrix\n'), ((14631, 14644), 'pandas.DataFrame', 'DataFrame', (['cm'], {}), '(cm)\n', (14640, 14644), False, 'from pandas import DataFrame\n'), ((14905, 14933), 'matplotlib.pyplot.title', 'plt.title', (['"""Total values \n"""'], {}), "('Total values \\n')\n", (14914, 14933), True, 'import matplotlib.pyplot as plt\n'), ((14935, 14951), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['hm'], {}), '(hm)\n', (14947, 14951), True, 'import matplotlib.pyplot as plt\n'), ((14978, 15077), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'confusion_matrix/test_total.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'confusion_matrix/test_total.svg', transparent=True,\n bbox_inches='tight')\n", (14989, 15077), True, 'import matplotlib.pyplot as plt\n'), ((15089, 15117), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (15099, 15117), True, 'import matplotlib.pyplot as plt\n'), ((15123, 15169), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['trueClassName', 'predClassName'], {}), '(trueClassName, predClassName)\n', (15139, 15169), False, 'from sklearn.metrics import confusion_matrix\n'), ((15232, 15245), 'pandas.DataFrame', 'DataFrame', (['cm'], {}), '(cm)\n', (15241, 15245), False, 'from pandas import DataFrame\n'), ((15337, 15365), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (15347, 15365), True, 'import matplotlib.pyplot as plt\n'), ((15529, 15561), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalised values\n"""'], {}), "('Normalised values\\n')\n", (15538, 15561), True, 'import matplotlib.pyplot as plt\n'), ((15563, 15579), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cm'], {}), '(cm)\n', (15575, 15579), True, 'import matplotlib.pyplot as plt\n'), ((15606, 15711), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'confusion_matrix/test_normalised.svg')"], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(saveDir + 'confusion_matrix/test_normalised.svg', transparent=\n True, bbox_inches='tight')\n", (15617, 15711), True, 'import matplotlib.pyplot as plt\n'), ((7130, 7180), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (7141, 7180), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((7204, 7248), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': 'None', 'include_top': '(False)'}), '(weights=None, include_top=False)\n', (7215, 7248), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((7276, 7315), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'name': '"""avg_pool"""'}), "(name='avg_pool')\n", (7298, 7315), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((7323, 7335), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (7330, 7335), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((7353, 7393), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (7358, 7393), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((1778, 1787), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1785, 1787), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2829), 'time.time', 'time.time', ([], {}), '()\n', (2827, 2829), False, 'import time\n'), ((3209, 3238), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (3219, 3238), False, 'import csv\n'), ((10500, 10518), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (10515, 10518), False, 'from contextlib import redirect_stdout\n'), ((11946, 11959), 'pandas.DataFrame', 'DataFrame', (['cl'], {}), '(cl)\n', (11955, 11959), False, 'from pandas import DataFrame\n'), ((12340, 12364), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (12349, 12364), True, 'import numpy as np\n'), ((12379, 12403), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (12388, 12403), True, 'import numpy as np\n'), ((12482, 12491), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12489, 12491), True, 'import matplotlib.pyplot as plt\n'), ((12976, 13000), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (12985, 13000), True, 'import numpy as np\n'), ((13015, 13039), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (13024, 13039), True, 'import numpy as np\n'), ((13112, 13121), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13119, 13121), True, 'import matplotlib.pyplot as plt\n'), ((14418, 14431), 'pandas.DataFrame', 'DataFrame', (['cl'], {}), '(cl)\n', (14427, 14431), False, 'from pandas import DataFrame\n'), ((14810, 14834), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (14819, 14834), True, 'import numpy as np\n'), ((14849, 14873), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (14858, 14873), True, 'import numpy as np\n'), ((14952, 14961), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14959, 14961), True, 'import matplotlib.pyplot as plt\n'), ((15444, 15468), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (15453, 15468), True, 'import numpy as np\n'), ((15483, 15507), 'numpy.unique', 'np.unique', (['trueClassName'], {}), '(trueClassName)\n', (15492, 15507), True, 'import numpy as np\n'), ((15580, 15589), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15587, 15589), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2408), 'numpy.mean', 'np.mean', (['color[:3]'], {}), '(color[:3])\n', (2397, 2408), True, 'import numpy as np\n'), ((5082, 5102), 'skimage.io.imread', 'io.imread', (['file_name'], {}), '(file_name)\n', (5091, 5102), False, 'from skimage import io\n'), ((5192, 5228), 'numpy.repeat', 'np.repeat', (['d[..., np.newaxis]', '(3)', '(-1)'], {}), '(d[..., np.newaxis], 3, -1)\n', (5201, 5228), True, 'import numpy as np\n'), ((5295, 5306), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5303, 5306), True, 'import numpy as np\n'), ((5332, 5343), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5340, 5343), True, 'import numpy as np\n'), ((5345, 5377), 'numpy.array', 'np.array', (['batch_y'], {'dtype': 'np.int8'}), '(batch_y, dtype=np.int8)\n', (5353, 5377), True, 'import numpy as np\n'), ((11147, 11158), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (11155, 11158), True, 'import numpy as np\n'), ((11666, 11677), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (11674, 11677), True, 'import numpy as np\n'), ((13616, 13627), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (13624, 13627), True, 'import numpy as np\n'), ((14139, 14150), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (14147, 14150), True, 'import numpy as np\n'), ((2901, 2912), 'time.time', 'time.time', ([], {}), '()\n', (2910, 2912), False, 'import time\n'), ((10054, 10133), 'keras.utils.vis_utils.model_to_dot', 'model_to_dot', (['model'], {'show_shapes': 'show_shapes', 'show_layer_names': 'show_layer_names'}), '(model, show_shapes=show_shapes, show_layer_names=show_layer_names)\n', (10066, 10133), False, 'from keras.utils.vis_utils import model_to_dot\n'), ((3049, 3060), 'time.time', 'time.time', ([], {}), '()\n', (3058, 3060), False, 'import time\n')] |
import numpy as np
#Matriz Triangular Superior
A = np.array([[4,2,5],[2,5,8],[5,4,3]])
b = np.array([[60.7],[92.9],[56.3]])
def f(A, b):
Ab = np.concatenate((A,b),axis=1)
x = np.zeros(N)
for i in range(N-1,-1,-1):
xsum = 0
for j in range(i+1,N,1):
xsum += Ab[i,j] * x[j]
x[i] = (Ab[i,N] - xsum) / Ab[i,i]
x = np.transpose([x])
return x
print(f(A, b))
xsol = np.linalg.solve(A,b)
print(xsol)
| [
"numpy.zeros",
"numpy.transpose",
"numpy.array",
"numpy.linalg.solve",
"numpy.concatenate"
] | [((57, 100), 'numpy.array', 'np.array', (['[[4, 2, 5], [2, 5, 8], [5, 4, 3]]'], {}), '([[4, 2, 5], [2, 5, 8], [5, 4, 3]])\n', (65, 100), True, 'import numpy as np\n'), ((98, 132), 'numpy.array', 'np.array', (['[[60.7], [92.9], [56.3]]'], {}), '([[60.7], [92.9], [56.3]])\n', (106, 132), True, 'import numpy as np\n'), ((470, 491), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (485, 491), True, 'import numpy as np\n'), ((176, 206), 'numpy.concatenate', 'np.concatenate', (['(A, b)'], {'axis': '(1)'}), '((A, b), axis=1)\n', (190, 206), True, 'import numpy as np\n'), ((214, 225), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (222, 225), True, 'import numpy as np\n'), ((408, 425), 'numpy.transpose', 'np.transpose', (['[x]'], {}), '([x])\n', (420, 425), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.