code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
from future.utils import iteritems
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = pd.Timestamp(date_to_compare)
result = self.compare_dates(now, date_to_compare)
return result
@abc.abstractmethod
def compare_dates(self, now, date_to_compare):
raise (NotImplementedError("RunPeriod Algo is an abstract class!"))
class RunDaily(RunPeriod):
"""
Returns True on day change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's day has changed
compared to the last(or next if run_on_end_of_period) date, if not returns False.
Useful for daily rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.date() != date_to_compare.date():
return True
return False
class RunWeekly(RunPeriod):
"""
Returns True on week change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's week has changed
since relative to the last(or next) date, if not returns False. Useful for
weekly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.week != date_to_compare.week:
return True
return False
class RunMonthly(RunPeriod):
"""
Returns True on month change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's month has changed
since relative to the last(or next) date, if not returns False. Useful for
monthly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.month != date_to_compare.month:
return True
return False
class RunQuarterly(RunPeriod):
"""
Returns True on quarter change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's quarter has changed
since relative to the last(or next) date, if not returns False. Useful for
quarterly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.quarter != date_to_compare.quarter:
return True
return False
class RunYearly(RunPeriod):
"""
Returns True on year change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's year has changed
since relative to the last(or next) date, if not returns False. Useful for
yearly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year:
return True
return False
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [pd.to_datetime(d) for d in dates]
def __call__(self, target):
return target.now in self.dates
class RunAfterDate(Algo):
"""
Returns True after a date has passed
Args:
* date: Date after which to start trading
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, date):
"""
Args:
* date: Date after which to start trading
"""
super(RunAfterDate, self).__init__()
# parse dates and save
self.date = pd.to_datetime(date)
def __call__(self, target):
return target.now > self.date
class RunAfterDays(Algo):
"""
Returns True after a specific number of 'warmup' trading days have passed
Args:
* days (int): Number of trading days to wait before starting
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, days):
"""
Args:
* days (int): Number of trading days to wait before starting
"""
super(RunAfterDays, self).__init__()
self.days = days
def __call__(self, target):
if self.days > 0:
self.days -= 1
return False
return True
class RunIfOutOfBounds(Algo):
"""
This algo returns true if any of the target weights deviate by an amount greater
than tolerance. For example, it will be run if the tolerance is set to 0.5 and
a security grows from a target weight of 0.2 to greater than 0.3.
A strategy where rebalancing is performed quarterly or whenever any
security's weight deviates by more than 20% could be implemented by:
Or([runQuarterlyAlgo,runIfOutOfBoundsAlgo(0.2)])
Args:
* tolerance (float): Allowed deviation of each security weight.
Requires:
* Weights
"""
def __init__(self, tolerance):
self.tolerance = float(tolerance)
super(RunIfOutOfBounds, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for cname in target.children:
if cname in targets:
c = target.children[cname]
deviation = abs((c.weight - targets[cname]) / targets[cname])
if deviation > self.tolerance:
return True
if "cash" in target.temp:
cash_deviation = abs(
(target.capital - targets.value) / targets.value - target.temp["cash"]
)
if cash_deviation > self.tolerance:
return True
return False
class RunEveryNPeriods(Algo):
"""
This algo runs every n periods.
Args:
* n (int): Run each n periods
* offset (int): Applies to the first run. If 0, this algo will run the
first time it is called.
This Algo can be useful for the following type of strategy:
Each month, select the top 5 performers. Hold them for 3 months.
You could then create 3 strategies with different offsets and create a
master strategy that would allocate equal amounts of capital to each.
"""
def __init__(self, n, offset=0):
super(RunEveryNPeriods, self).__init__()
self.n = n
self.offset = offset
self.idx = n - offset - 1
self.lcall = 0
def __call__(self, target):
# ignore multiple calls on same period
if self.lcall == target.now:
return False
else:
self.lcall = target.now
# run when idx == (n-1)
if self.idx == (self.n - 1):
self.idx = 0
return True
else:
self.idx += 1
return False
class SelectAll(Algo):
"""
Sets temp['selected'] with all securities (based on universe).
Selects all the securities and saves them in temp['selected'].
By default, SelectAll does not include securities that have no
data (nan) on current date or those whose price is zero or negative.
Args:
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, include_no_data=False, include_negative=False):
super(SelectAll, self).__init__()
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = target.universe.columns
else:
universe = target.universe.loc[target.now].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectThese(Algo):
"""
Sets temp['selected'] with a set list of tickers.
Args:
* ticker (list): List of tickers to select.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, tickers, include_no_data=False, include_negative=False):
super(SelectThese, self).__init__()
self.tickers = tickers
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = self.tickers
else:
universe = target.universe.loc[target.now, self.tickers].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectHasData(Algo):
"""
Sets temp['selected'] based on all items in universe that meet
data requirements.
This is a more advanced version of SelectAll. Useful for selecting
tickers that need a certain amount of data for future algos to run
properly.
For example, if we need the items with 3 months of data or more,
we could use this Algo with a lookback period of 3 months.
When providing a lookback period, it is also wise to provide a min_count.
This is basically the number of data points needed within the lookback
period for a series to be considered valid. For example, in our 3 month
lookback above, we might want to specify the min_count as being
57 -> a typical trading month has give or take 20 trading days. If we
factor in some holidays, we can use 57 or 58. It's really up to you.
If you don't specify min_count, min_count will default to ffn's
get_num_days_required.
Args:
* lookback (DateOffset): A DateOffset that determines the lookback
period.
* min_count (int): Minimum number of days required for a series to be
considered valid. If not provided, ffn's get_num_days_required is
used to estimate the number of points required.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
min_count=None,
include_no_data=False,
include_negative=False,
):
super(SelectHasData, self).__init__()
self.lookback = lookback
if min_count is None:
min_count = bt.ffn.get_num_days_required(lookback)
self.min_count = min_count
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
selected = target.temp["selected"]
else:
selected = target.universe.columns
filt = target.universe.loc[target.now - self.lookback :, selected]
cnt = filt.count()
cnt = cnt[cnt >= self.min_count]
if not self.include_no_data:
cnt = cnt[~target.universe.loc[target.now, selected].isnull()]
if not self.include_negative:
cnt = cnt[target.universe.loc[target.now, selected] > 0]
target.temp["selected"] = list(cnt.index)
return True
class SelectN(Algo):
"""
Sets temp['selected'] based on ranking temp['stat'].
Selects the top or botton N items based on temp['stat'].
This is usually some kind of metric that will be computed in a
previous Algo and will be used for ranking purposes. Can select
top or bottom N based on sort_descending parameter.
Args:
* n (int): select top n items.
* sort_descending (bool): Should the stat be sorted in descending order
before selecting the first n items?
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
* filter_selected (bool): If True, will only select from the existing
'selected' list.
Sets:
* selected
Requires:
* stat
"""
def __init__(
self, n, sort_descending=True, all_or_none=False, filter_selected=False
):
super(SelectN, self).__init__()
if n < 0:
raise ValueError("n cannot be negative")
self.n = n
self.ascending = not sort_descending
self.all_or_none = all_or_none
self.filter_selected = filter_selected
def __call__(self, target):
stat = target.temp["stat"].dropna()
if self.filter_selected and "selected" in target.temp:
stat = stat.loc[stat.index.intersection(target.temp["selected"])]
stat.sort_values(ascending=self.ascending, inplace=True)
# handle percent n
keep_n = self.n
if self.n < 1:
keep_n = int(self.n * len(stat))
sel = list(stat[:keep_n].index)
if self.all_or_none and len(sel) < keep_n:
sel = []
target.temp["selected"] = sel
return True
class SelectMomentum(AlgoStack):
"""
Sets temp['selected'] based on a simple momentum filter.
Selects the top n securities based on the total return over
a given lookback period. This is just a wrapper around an
AlgoStack with two algos: StatTotalReturn and SelectN.
Note, that SelectAll() or similar should be called before
SelectMomentum(), as StatTotalReturn uses values of temp['selected']
Args:
* n (int): select first N elements
* lookback (DateOffset): lookback period for total return
calculation
* lag (DateOffset): Lag interval for total return calculation
* sort_descending (bool): Sort descending (highest return is best)
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
Sets:
* selected
Requires:
* selected
"""
def __init__(
self,
n,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
sort_descending=True,
all_or_none=False,
):
super(SelectMomentum, self).__init__(
StatTotalReturn(lookback=lookback, lag=lag),
SelectN(n=n, sort_descending=sort_descending, all_or_none=all_or_none),
)
class SelectWhere(Algo):
"""
Selects securities based on an indicator DataFrame.
Selects securities where the value is True on the current date
(target.now) only if current date is present in signal DataFrame.
For example, this could be the result of a pandas boolean comparison such
as data > 100.
Args:
* signal (str|DataFrame): Boolean DataFrame containing selection logic.
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, signal, include_no_data=False, include_negative=False):
super(SelectWhere, self).__init__()
if isinstance(signal, pd.DataFrame):
self.signal_name = None
self.signal = signal
else:
self.signal_name = signal
self.signal = None
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# get signal Series at target.now
if self.signal_name is None:
signal = self.signal
else:
signal = target.get_data(self.signal_name)
if target.now in signal.index:
sig = signal.loc[target.now]
# get tickers where True
# selected = sig.index[sig]
selected = sig[sig == True].index # noqa: E712
# save as list
if not self.include_no_data:
universe = target.universe.loc[target.now, list(selected)].dropna()
if self.include_negative:
selected = list(universe.index)
else:
selected = list(universe[universe > 0].index)
target.temp["selected"] = list(selected)
return True
class SelectRandomly(AlgoStack):
"""
Sets temp['selected'] based on a random subset of
the items currently in temp['selected'].
Selects n random elements from the list stored in temp['selected'].
This is useful for benchmarking against a strategy where we believe
the selection algorithm is adding value.
For example, if we are testing a momentum strategy and we want to see if
selecting securities based on momentum is better than just selecting
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
Note:
Another selection algorithm should be use prior to this Algo to
populate temp['selected']. This will typically be SelectAll.
Args:
* n (int): Select N elements randomly.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
Requires:
* selected
"""
def __init__(self, n=None, include_no_data=False, include_negative=False):
super(SelectRandomly, self).__init__()
self.n = n
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
sel = target.temp["selected"]
else:
sel = list(target.universe.columns)
if not self.include_no_data:
universe = target.universe.loc[target.now, sel].dropna()
if self.include_negative:
sel = list(universe.index)
else:
sel = list(universe[universe > 0].index)
if self.n is not None:
n = self.n if self.n < len(sel) else len(sel)
sel = random.sample(sel, int(n))
target.temp["selected"] = sel
return True
class SelectRegex(Algo):
"""
Sets temp['selected'] based on a regex on their names.
Useful when working with a large universe of different kinds of securities
Args:
* regex (str): regular expression on the name
Sets:
* selected
Requires:
* selected
"""
def __init__(self, regex):
super(SelectRegex, self).__init__()
self.regex = re.compile(regex)
def __call__(self, target):
selected = target.temp["selected"]
selected = [s for s in selected if self.regex.search(s)]
target.temp["selected"] = selected
return True
class ResolveOnTheRun(Algo):
"""
Looks at securities set in temp['selected'] and searches for names that
match the names of "aliases" for on-the-run securities in the provided
data. Then replaces the alias with the name of the underlying security
appropriate for the given date, and sets it back on temp['selected']
Args:
* on_the_run (str): Name of a Data frame with
- columns set to "on the run" ticker names
- index set to the timeline for the backtest
- values are the actual security name to use for the given date
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Requires:
* selected
Sets:
* selected
"""
def __init__(self, on_the_run, include_no_data=False, include_negative=False):
super(ResolveOnTheRun, self).__init__()
self.on_the_run = on_the_run
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# Resolve real tickers based on OTR
on_the_run = target.get_data(self.on_the_run)
selected = target.temp["selected"]
aliases = [s for s in selected if s in on_the_run.columns]
resolved = on_the_run.loc[target.now, aliases].tolist()
if not self.include_no_data:
universe = target.universe.loc[target.now, resolved].dropna()
if self.include_negative:
resolved = list(universe.index)
else:
resolved = list(universe[universe > 0].index)
target.temp["selected"] = resolved + [
s for s in selected if s not in on_the_run.columns
]
return True
class SetStat(Algo):
"""
Sets temp['stat'] for use by downstream algos (such as SelectN).
Args:
* stat (str|DataFrame): A dataframe of the same dimension as target.universe
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
Sets:
* stat
"""
def __init__(self, stat):
if isinstance(stat, pd.DataFrame):
self.stat_name = None
self.stat = stat
else:
self.stat_name = stat
self.stat = None
def __call__(self, target):
if self.stat_name is None:
stat = self.stat
else:
stat = target.get_data(self.stat_name)
target.temp["stat"] = stat.loc[target.now]
return True
class StatTotalReturn(Algo):
"""
Sets temp['stat'] with total returns over a given period.
Sets the 'stat' based on the total return of each element in
temp['selected'] over a given lookback period. The total return
is determined by ffn's calc_total_return.
Args:
* lookback (DateOffset): lookback period.
* lag (DateOffset): Lag interval. Total return is calculated in
the inteval [now - lookback - lag, now - lag]
Sets:
* stat
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3), lag=pd.DateOffset(days=0)):
super(StatTotalReturn, self).__init__()
self.lookback = lookback
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
target.temp["stat"] = prc.calc_total_return()
return True
class WeighEqually(Algo):
"""
Sets temp['weights'] by calculating equal weights for all items in
selected.
Equal weight Algo. Sets the 'weights' to 1/n for each item in 'selected'.
Sets:
* weights
Requires:
* selected
"""
def __init__(self):
super(WeighEqually, self).__init__()
def __call__(self, target):
selected = target.temp["selected"]
n = len(selected)
if n == 0:
target.temp["weights"] = {}
else:
w = 1.0 / n
target.temp["weights"] = {x: w for x in selected}
return True
class WeighSpecified(Algo):
"""
Sets temp['weights'] based on a provided dict of ticker:weights.
Sets the weights based on pre-specified targets.
Args:
* weights (dict): target weights -> ticker: weight
Sets:
* weights
"""
def __init__(self, **weights):
super(WeighSpecified, self).__init__()
self.weights = weights
def __call__(self, target):
# added copy to make sure these are not overwritten
target.temp["weights"] = self.weights.copy()
return True
class ScaleWeights(Algo):
"""
Sets temp['weights'] based on a scaled version of itself.
Useful for going short, or scaling up/down when using
:class:`FixedIncomeStrategy <bt.core.FixedIncomeStrategy>`.
Args:
* scale (float): the scaling factor
Sets:
* weights
Requires:
* weights
"""
def __init__(self, scale):
super(ScaleWeights, self).__init__()
self.scale = scale
def __call__(self, target):
target.temp["weights"] = {
k: self.scale * w for k, w in iteritems(target.temp["weights"])
}
return True
class WeighTarget(Algo):
"""
Sets target weights based on a target weight DataFrame.
If the target weight dataFrame is of same dimension
as the target.universe, the portfolio will effectively be rebalanced on
each period. For example, if we have daily data and the target DataFrame
is of the same shape, we will have daily rebalancing.
However, if we provide a target weight dataframe that has only month end
dates, then rebalancing only occurs monthly.
Basically, if a weight is provided on a given date, the target weights are
set and the algo moves on (presumably to a Rebalance algo). If not, not
target weights are set.
Args:
* weights (str|DataFrame): DataFrame containing the target weights
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
Sets:
* weights
"""
def __init__(self, weights):
super(WeighTarget, self).__init__()
if isinstance(weights, pd.DataFrame):
self.weights_name = None
self.weights = weights
else:
self.weights_name = weights
self.weights = None
def __call__(self, target):
# get current target weights
if self.weights_name is None:
weights = self.weights
else:
weights = target.get_data(self.weights_name)
if target.now in weights.index:
w = weights.loc[target.now]
# dropna and save
target.temp["weights"] = w.dropna()
return True
else:
return False
class WeighInvVol(Algo):
"""
Sets temp['weights'] based on the inverse volatility Algo.
Sets the target weights based on ffn's calc_inv_vol_weights. This
is a commonly used technique for risk parity portfolios. The least
volatile elements receive the highest weight under this scheme. Weights
are proportional to the inverse of their volatility.
Args:
* lookback (DateOffset): lookback period for estimating volatility
Sets:
* weights
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3), lag=pd.DateOffset(days=0)):
super(WeighInvVol, self).__init__()
self.lookback = lookback
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_inv_vol_weights(prc.to_returns().dropna())
target.temp["weights"] = tw.dropna()
return True
class WeighERC(Algo):
"""
Sets temp['weights'] based on equal risk contribution algorithm.
Sets the target weights based on ffn's calc_erc_weights. This
is an extension of the inverse volatility risk parity portfolio in
which the correlation of asset returns is incorporated into the
calculation of risk contribution of each asset.
The resulting portfolio is similar to a minimum variance portfolio
subject to a diversification constraint on the weights of its components
and its volatility is located between those of the minimum variance and
equally-weighted portfolios (Maillard 2008).
See:
https://en.wikipedia.org/wiki/Risk_parity
Args:
* lookback (DateOffset): lookback period for estimating covariance
* initial_weights (list): Starting asset weights [default inverse vol].
* risk_weights (list): Risk target weights [default equal weight].
* covar_method (str): method used to estimate the covariance. See ffn's
calc_erc_weights for more details. (default ledoit-wolf).
* risk_parity_method (str): Risk parity estimation method. see ffn's
calc_erc_weights for more details. (default ccd).
* maximum_iterations (int): Maximum iterations in iterative solutions
(default 100).
* tolerance (float): Tolerance level in iterative solutions (default 1E-8).
Sets:
* weights
Requires:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
initial_weights=None,
risk_weights=None,
covar_method="ledoit-wolf",
risk_parity_method="ccd",
maximum_iterations=100,
tolerance=1e-8,
lag=pd.DateOffset(days=0),
):
super(WeighERC, self).__init__()
self.lookback = lookback
self.initial_weights = initial_weights
self.risk_weights = risk_weights
self.covar_method = covar_method
self.risk_parity_method = risk_parity_method
self.maximum_iterations = maximum_iterations
self.tolerance = tolerance
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_erc_weights(
prc.to_returns().dropna(),
initial_weights=self.initial_weights,
risk_weights=self.risk_weights,
covar_method=self.covar_method,
risk_parity_method=self.risk_parity_method,
maximum_iterations=self.maximum_iterations,
tolerance=self.tolerance,
)
target.temp["weights"] = tw.dropna()
return True
class WeighMeanVar(Algo):
"""
Sets temp['weights'] based on mean-variance optimization.
Sets the target weights based on ffn's calc_mean_var_weights. This is a
Python implementation of Markowitz's mean-variance optimization.
See:
http://en.wikipedia.org/wiki/Modern_portfolio_theory#The_efficient_frontier_with_no_risk-free_asset
Args:
* lookback (DateOffset): lookback period for estimating volatility
* bounds ((min, max)): tuple specifying the min and max weights for
each asset in the optimization.
* covar_method (str): method used to estimate the covariance. See ffn's
calc_mean_var_weights for more details.
* rf (float): risk-free rate used in optimization.
Sets:
* weights
Requires:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
bounds=(0.0, 1.0),
covar_method="ledoit-wolf",
rf=0.0,
lag=pd.DateOffset(days=0),
):
super(WeighMeanVar, self).__init__()
self.lookback = lookback
self.lag = lag
self.bounds = bounds
self.covar_method = covar_method
self.rf = rf
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_mean_var_weights(
prc.to_returns().dropna(),
weight_bounds=self.bounds,
covar_method=self.covar_method,
rf=self.rf,
)
target.temp["weights"] = tw.dropna()
return True
class WeighRandomly(Algo):
"""
Sets temp['weights'] based on a random weight vector.
Sets random target weights for each security in 'selected'.
This is useful for benchmarking against a strategy where we believe
the weighing algorithm is adding value.
For example, if we are testing a low-vol strategy and we want to see if
our weighing strategy is better than just weighing
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
This is an Algo wrapper around ffn's random_weights function.
Args:
* bounds ((low, high)): Tuple including low and high bounds for each
security
* weight_sum (float): What should the weights sum up to?
Sets:
* weights
Requires:
* selected
"""
def __init__(self, bounds=(0.0, 1.0), weight_sum=1):
super(WeighRandomly, self).__init__()
self.bounds = bounds
self.weight_sum = weight_sum
def __call__(self, target):
sel = target.temp["selected"]
n = len(sel)
w = {}
try:
rw = bt.ffn.random_weights(n, self.bounds, self.weight_sum)
w = dict(zip(sel, rw))
except ValueError:
pass
target.temp["weights"] = w
return True
class LimitDeltas(Algo):
"""
Modifies temp['weights'] based on weight delta limits.
Basically, this can be used if we want to restrict how much a security's
target weight can change from day to day. Useful when we want to be more
conservative about how much we could actually trade on a given day without
affecting the market.
For example, if we have a strategy that is currently long 100% one
security, and the weighing Algo sets the new weight to 0%, but we
use this Algo with a limit of 0.1, the new target weight will
be 90% instead of 0%.
Args:
* limit (float, dict): Weight delta limit. If float, this will be a
global limit for all securities. If dict, you may specify by-ticker
limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitDeltas, self).__init__()
self.limit = limit
# determine if global or specific
self.global_limit = True
if isinstance(limit, dict):
self.global_limit = False
def __call__(self, target):
tw = target.temp["weights"]
all_keys = set(list(target.children.keys()) + list(tw.keys()))
for k in all_keys:
tgt = tw[k] if k in tw else 0.0
cur = target.children[k].weight if k in target.children else 0.0
delta = tgt - cur
# check if we need to limit
if self.global_limit:
if abs(delta) > self.limit:
tw[k] = cur + (self.limit * np.sign(delta))
else:
# make sure we have a limit defined in case of limit dict
if k in self.limit:
lmt = self.limit[k]
if abs(delta) > lmt:
tw[k] = cur + (lmt * np.sign(delta))
return True
class LimitWeights(Algo):
"""
Modifies temp['weights'] based on weight limits.
This is an Algo wrapper around ffn's limit_weights. The purpose of this
Algo is to limit the weight of any one specifc asset. For example, some
Algos will set some rather extreme weights that may not be acceptable.
Therefore, we can use this Algo to limit the extreme weights. The excess
weight is then redistributed to the other assets, proportionally to
their current weights.
See ffn's limit_weights for more information.
Args:
* limit (float): Weight limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitWeights, self).__init__()
self.limit = limit
def __call__(self, target):
if "weights" not in target.temp:
return True
tw = target.temp["weights"]
if len(tw) == 0:
return True
# if the limit < equal weight then set weights to 0
if self.limit < 1.0 / len(tw):
tw = {}
else:
tw = bt.ffn.limit_weights(tw, self.limit)
target.temp["weights"] = tw
return True
class TargetVol(Algo):
"""
Updates temp['weights'] based on the target annualized volatility desired.
Args:
* target_volatility: annualized volatility to target
* lookback (DateOffset): lookback period for estimating volatility
* lag (DateOffset): amount of time to wait to calculate the covariance
* covar_method: method of calculating volatility
* annualization_factor: number of periods to annualize by.
It is assumed that target volatility is already annualized by this factor.
Updates:
* weights
Requires:
* temp['weights']
"""
def __init__(
self,
target_volatility,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
covar_method="standard",
annualization_factor=252,
):
super(TargetVol, self).__init__()
self.target_volatility = target_volatility
self.lookback = lookback
self.lag = lag
self.covar_method = covar_method
self.annualization_factor = annualization_factor
def __call__(self, target):
current_weights = target.temp["weights"]
selected = current_weights.keys()
# if there were no weights already set then skip
if len(selected) == 0:
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
returns = bt.ffn.to_returns(prc)
# calc covariance matrix
if self.covar_method == "ledoit-wolf":
covar = sklearn.covariance.ledoit_wolf(returns)
elif self.covar_method == "standard":
covar = returns.cov()
else:
raise NotImplementedError("covar_method not implemented")
weights = pd.Series(
[current_weights[x] for x in covar.columns], index=covar.columns
)
vol = np.sqrt(
np.matmul(weights.values.T, np.matmul(covar.values, weights.values))
* self.annualization_factor
)
# vol is too high
if vol > self.target_volatility:
mult = self.target_volatility / vol
# vol is too low
elif vol < self.target_volatility:
mult = self.target_volatility / vol
else:
mult = 1
for k in target.temp["weights"].keys():
target.temp["weights"][k] = target.temp["weights"][k] * mult
return True
class PTE_Rebalance(Algo):
"""
Triggers a rebalance when PTE from static weights is past a level.
Args:
* PTE_volatility_cap: annualized volatility to target
* target_weights: dataframe of weights that needs to have the same index as the price dataframe
* lookback (DateOffset): lookback period for estimating volatility
* lag (DateOffset): amount of time to wait to calculate the covariance
* covar_method: method of calculating volatility
* annualization_factor: number of periods to annualize by.
It is assumed that target volatility is already annualized by this factor.
"""
def __init__(
self,
PTE_volatility_cap,
target_weights,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
covar_method="standard",
annualization_factor=252,
):
super(PTE_Rebalance, self).__init__()
self.PTE_volatility_cap = PTE_volatility_cap
self.target_weights = target_weights
self.lookback = lookback
self.lag = lag
self.covar_method = covar_method
self.annualization_factor = annualization_factor
def __call__(self, target):
if target.now is None:
return False
if target.positions.shape == (0, 0):
return True
positions = target.positions.loc[target.now]
if positions is None:
return True
prices = target.universe.loc[target.now, positions.index]
if prices is None:
return True
current_weights = positions * prices / target.value
target_weights = self.target_weights.loc[target.now, :]
cols = list(current_weights.index.copy())
for c in target_weights.keys():
if c not in cols:
cols.append(c)
weights = pd.Series(np.zeros(len(cols)), index=cols)
for c in cols:
if c in current_weights:
weights[c] = current_weights[c]
if c in target_weights:
weights[c] -= target_weights[c]
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, cols]
returns = bt.ffn.to_returns(prc)
# calc covariance matrix
if self.covar_method == "ledoit-wolf":
covar = sklearn.covariance.ledoit_wolf(returns)
elif self.covar_method == "standard":
covar = returns.cov()
else:
raise NotImplementedError("covar_method not implemented")
PTE_vol = np.sqrt(
np.matmul(weights.values.T, np.matmul(covar.values, weights.values))
* self.annualization_factor
)
if pd.isnull(PTE_vol):
return False
# vol is too high
if PTE_vol > self.PTE_volatility_cap:
return True
else:
return False
return True
class CapitalFlow(Algo):
"""
Used to model capital flows. Flows can either be inflows or outflows.
This Algo can be used to model capital flows. For example, a pension
fund might have inflows every month or year due to contributions. This
Algo will affect the capital of the target node without affecting returns
for the node.
Since this is modeled as an adjustment, the capital will remain in the
strategy until a re-allocation/rebalancement is made.
Args:
* amount (float): Amount of adjustment
"""
def __init__(self, amount):
"""
CapitalFlow constructor.
Args:
* amount (float): Amount to adjust by
"""
super(CapitalFlow, self).__init__()
self.amount = float(amount)
def __call__(self, target):
target.adjust(self.amount)
return True
class CloseDead(Algo):
"""
Closes all positions for which prices are equal to zero (we assume
that these stocks are dead) and removes them from temp['weights'] if
they enter it by any chance.
To be called before Rebalance().
In a normal workflow it is not needed, as those securities will not
be selected by SelectAll(include_no_data=False) or similar method, and
Rebalance() closes positions that are not in temp['weights'] anyway.
However in case when for some reasons include_no_data=False could not
be used or some modified weighting method is used, CloseDead() will
allow to avoid errors.
Requires:
* weights
"""
def __init__(self):
super(CloseDead, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for c in target.children:
if target.universe[c].loc[target.now] <= 0:
target.close(c)
if c in targets:
del targets[c]
return True
class SetNotional(Algo):
"""
Sets the notional_value to use as the base for rebalancing for
:class:`FixedIncomeStrategy <bt.core.FixedIncomeStrategy>` targets
Args:
* notional_value (str): Name of a pd.Series object containing the
target notional values of the strategy over time.
Sets:
* notional_value
"""
def __init__(self, notional_value):
self.notional_value = notional_value
super(SetNotional, self).__init__()
def __call__(self, target):
notional_value = target.get_data(self.notional_value)
if target.now in notional_value.index:
target.temp["notional_value"] = notional_value.loc[target.now]
return True
else:
return False
class Rebalance(Algo):
"""
Rebalances capital based on temp['weights']
Rebalances capital based on temp['weights']. Also closes
positions if open but not in target_weights. This is typically
the last Algo called once the target weights have been set.
Requires:
* weights
* cash (optional): You can set a 'cash' value on temp. This should be a
number between 0-1 and determines the amount of cash to set aside.
For example, if cash=0.3, the strategy will allocate 70% of its
value to the provided weights, and the remaining 30% will be kept
in cash. If this value is not provided (default), the full value
of the strategy is allocated to securities.
* notional_value (optional): Required only for fixed_income targets. This is the base
balue of total notional that will apply to the weights.
"""
def __init__(self):
super(Rebalance, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
# save value because it will change after each call to allocate
# use it as base in rebalance calls
# call it before de-allocation so that notional_value is correct
if target.fixed_income:
if "notional_value" in target.temp:
base = target.temp["notional_value"]
else:
base = target.notional_value
else:
base = target.value
# de-allocate children that are not in targets and have non-zero value
# (open positions)
for cname in target.children:
# if this child is in our targets, we don't want to close it out
if cname in targets:
continue
# get child and value
c = target.children[cname]
if target.fixed_income:
v = c.notional_value
else:
v = c.value
# if non-zero and non-null, we need to close it out
if v != 0.0 and not np.isnan(v):
target.close(cname, update=False)
# If cash is set (it should be a value between 0-1 representing the
# proportion of cash to keep), calculate the new 'base'
if "cash" in target.temp and not target.fixed_income:
base = base * (1 - target.temp["cash"])
# Turn off updating while we rebalance each child
for item in iteritems(targets):
target.rebalance(item[1], child=item[0], base=base, update=False)
# Now update
target.root.update(target.now)
return True
class RebalanceOverTime(Algo):
"""
Similar to Rebalance but rebalances to target
weight over n periods.
Rebalances towards a target weight over a n periods. Splits up the weight
delta over n periods.
This can be useful if we want to make more conservative rebalacing
assumptions. Some strategies can produce large swings in allocations. It
might not be reasonable to assume that this rebalancing can occur at the
end of one specific period. Therefore, this algo can be used to simulate
rebalancing over n periods.
This has typically been used in monthly strategies where we want to spread
out the rebalancing over 5 or 10 days.
Note:
This Algo will require the run_always wrapper in the above case. For
example, the RunMonthly will return True on the first day, and
RebalanceOverTime will be 'armed'. However, RunMonthly will return
False the rest days of the month. Therefore, we must specify that we
want to always run this algo.
Args:
* n (int): number of periods over which rebalancing takes place.
Requires:
* weights
"""
def __init__(self, n=10):
super(RebalanceOverTime, self).__init__()
self.n = float(n)
self._rb = Rebalance()
self._weights = None
self._days_left = None
def __call__(self, target):
# new weights specified - update rebalance data
if "weights" in target.temp:
self._weights = target.temp["weights"]
self._days_left = self.n
# if _weights are not None, we have some work to do
if self._weights:
tgt = {}
# scale delta relative to # of periods left and set that as the new
# target
for t in self._weights:
curr = target.children[t].weight if t in target.children else 0.0
dlt = (self._weights[t] - curr) / self._days_left
tgt[t] = curr + dlt
# mock weights and call real Rebalance
target.temp["weights"] = tgt
self._rb(target)
# dec _days_left. If 0, set to None & set _weights to None
self._days_left -= 1
if self._days_left == 0:
self._days_left = None
self._weights = None
return True
class Require(Algo):
"""
Flow control Algo.
This algo returns the value of a predicate
on an temp entry. Useful for controlling
flow.
For example, we might want to make sure we have some items selected.
We could pass a lambda function that checks the len of 'selected':
pred=lambda x: len(x) == 0
item='selected'
Args:
* pred (Algo): Function that returns a Bool given the strategy. This
is the definition of an Algo. However, this is typically used
with a simple lambda function.
* item (str): An item within temp.
* if_none (bool): Result if the item required is not in temp or if it's
value if None
"""
def __init__(self, pred, item, if_none=False):
super(Require, self).__init__()
self.item = item
self.pred = pred
self.if_none = if_none
def __call__(self, target):
if self.item not in target.temp:
return self.if_none
item = target.temp[self.item]
if item is None:
return self.if_none
return self.pred(item)
class Not(Algo):
"""
Flow control Algo
It is usful for "inverting" other flow control algos,
For example Not( RunAfterDate(...) ), Not( RunAfterDays(...) ), etc
Args:
* list_of_algos (Algo): The algo to run and invert the return value of
"""
def __init__(self, algo):
super(Not, self).__init__()
self._algo = algo
def __call__(self, target):
return not self._algo(target)
class Or(Algo):
"""
Flow control Algo
It useful for combining multiple signals into one signal.
For example, we might want two different rebalance signals to work together:
runOnDateAlgo = bt.algos.RunOnDate(pdf.index[0]) # where pdf.index[0] is the first date in our time series
runMonthlyAlgo = bt.algos.RunMonthly()
orAlgo = Or([runMonthlyAlgo,runOnDateAlgo])
orAlgo will return True if it is the first date or if it is 1st of the month
Args:
* list_of_algos: Iterable list of algos.
Runs each algo and
returns true if any algo returns true.
"""
def __init__(self, list_of_algos):
super(Or, self).__init__()
self._list_of_algos = list_of_algos
return
def __call__(self, target):
res = False
for algo in self._list_of_algos:
tempRes = algo(target)
res = res | tempRes
return res
class SelectTypes(Algo):
"""
Sets temp['selected'] based on node type.
If temp['selected'] is already set, it will filter the existing
selection.
Args:
* include_types (list): Types of nodes to include
* exclude_types (list): Types of nodes to exclude
Sets:
* selected
"""
def __init__(self, include_types=(bt.core.Node,), exclude_types=()):
super(SelectTypes, self).__init__()
self.include_types = include_types
self.exclude_types = exclude_types or (type(None),)
def __call__(self, target):
selected = [
sec_name
for sec_name, sec in target.children.items()
if isinstance(sec, self.include_types)
and not isinstance(sec, self.exclude_types)
]
if "selected" in target.temp:
selected = [s for s in selected if s in target.temp["selected"]]
target.temp["selected"] = selected
return True
class ClosePositionsAfterDates(Algo):
"""
Close positions on securities after a given date.
This can be used to make sure positions on matured/redeemed securities are
closed. It can also be used as part of a strategy to, i.e. make sure
the strategy doesn't hold any securities with time to maturity less than a year
Note that if placed after a RunPeriod algo in the stack, that the actual
closing of positions will occur after the provided date. For this to work,
the "price" of the security (even if matured) must exist up until that date.
Alternatively, run this with the @run_always decorator to close the positions
immediately.
Also note that this algo does not operate using temp['weights'] and Rebalance.
This is so that hedges (which are excluded from that workflow) will also be
closed as necessary.
Args:
* close_dates (str): the name of a dataframe indexed by security name, with columns
"date": the date after which we want to close the position ASAP
Sets:
* target.perm['closed'] : to keep track of which securities have already closed
"""
def __init__(self, close_dates):
super(ClosePositionsAfterDates, self).__init__()
self.close_dates = close_dates
def __call__(self, target):
if "closed" not in target.perm:
target.perm["closed"] = set()
close_dates = target.get_data(self.close_dates)["date"]
# Find securities that are candidate for closing
sec_names = [
sec_name
for sec_name, sec in iteritems(target.children)
if isinstance(sec, SecurityBase)
and sec_name in close_dates.index
and sec_name not in target.perm["closed"]
]
# Check whether closed
is_closed = close_dates.loc[sec_names] <= target.now
# Close position
for sec_name in is_closed[is_closed].index:
target.close(sec_name, update=False)
target.perm["closed"].add(sec_name)
# Now update
target.root.update(target.now)
return True
class RollPositionsAfterDates(Algo):
"""
Roll securities based on the provided map.
This can be used for any securities which have "On-The-Run" and "Off-The-Run"
versions (treasury bonds, index swaps, etc).
Also note that this algo does not operate using temp['weights'] and Rebalance.
This is so that hedges (which are excluded from that workflow) will also be
rolled as necessary.
Args:
* roll_data (str): the name of a dataframe indexed by security name, with columns
- "date": the first date at which the roll can occur
- "target": the security name we are rolling into
- "factor": the conversion factor. One unit of the original security
rolls into "factor" units of the new one.
Sets:
* target.perm['rolled'] : to keep track of which securities have already rolled
"""
def __init__(self, roll_data):
super(RollPositionsAfterDates, self).__init__()
self.roll_data = roll_data
def __call__(self, target):
if "rolled" not in target.perm:
target.perm["rolled"] = set()
roll_data = target.get_data(self.roll_data)
transactions = {}
# Find securities that are candidate for roll
sec_names = [
sec_name
for sec_name, sec in iteritems(target.children)
if isinstance(sec, SecurityBase)
and sec_name in roll_data.index
and sec_name not in target.perm["rolled"]
]
# Calculate new transaction and close old position
for sec_name, sec_fields in roll_data.loc[sec_names].iterrows():
if sec_fields["date"] <= target.now:
target.perm["rolled"].add(sec_name)
new_quantity = sec_fields["factor"] * target[sec_name].position
new_sec = sec_fields["target"]
if new_sec in transactions:
transactions[new_sec] += new_quantity
else:
transactions[new_sec] = new_quantity
target.close(sec_name, update=False)
# Do all the new transactions at the end, to do any necessary aggregations first
for new_sec, quantity in iteritems(transactions):
target.transact(quantity, new_sec, update=False)
# Now update
target.root.update(target.now)
return True
class SelectActive(Algo):
"""
Sets temp['selected'] based on filtering temp['selected'] to exclude
those securities that have been closed or rolled after a certain date
using ClosePositionsAfterDates or RollPositionsAfterDates. This makes sure
not to select them again for weighting (even if they have prices).
Requires:
* selected
* perm['closed'] or perm['rolled']
Sets:
* selected
"""
def __call__(self, target):
selected = target.temp["selected"]
rolled = target.perm.get("rolled", set())
closed = target.perm.get("closed", set())
selected = [s for s in selected if s not in set.union(rolled, closed)]
target.temp["selected"] = selected
return True
class ReplayTransactions(Algo):
"""
Replay a list of transactions that were executed.
This is useful for taking a blotter of actual trades that occurred,
and measuring performance against hypothetical strategies.
In particular, one can replay the outputs of backtest.Result.get_transactions
Note that this allows the timestamps and prices of the reported transactions
to be completely arbitrary, so while the strategy may track performance
on a daily basis, it will accurately account for the actual PNL of
the trades based on where they actually traded, and the bidofferpaid
attribute on the strategy will capture the "slippage" as measured
against the daily prices.
Args:
* transactions (str): name of a MultiIndex dataframe with format
Date, Security | quantity, price.
Note this schema follows the output of backtest.Result.get_transactions
"""
def __init__(self, transactions):
super(ReplayTransactions, self).__init__()
self.transactions = transactions
def __call__(self, target):
timeline = target.data.index
index = timeline.get_loc(target.now)
end = target.now
if index == 0:
start = pd.Timestamp.min
else:
start = timeline[index - 1]
# Get the transactions since the last update
all_transactions = target.get_data(self.transactions)
timestamps = all_transactions.index.get_level_values("Date")
transactions = all_transactions[(timestamps > start) & (timestamps <= end)]
for (_, security), transaction in transactions.iterrows():
c = target[security]
c.transact(
transaction["quantity"], price=transaction["price"], update=False
)
# Now update
target.root.update(target.now)
return True
class SimulateRFQTransactions(Algo):
"""
An algo that simulates the outcomes from RFQs (Request for Quote)
using a "model" that determines which ones becomes transactions and at what price
those transactions happen. This can be used from the perspective of the sender of the
RFQ or the receiver.
Args:
* rfqs (str): name of a dataframe with columns
Date, Security | quantity, *additional columns as required by model
* model (object): a function/callable object with arguments
- rfqs : data frame of rfqs to respond to
- target : the strategy object, for access to position and value data
and which returns a set of transactions, a MultiIndex DataFrame with:
Date, Security | quantity, price
"""
def __init__(self, rfqs, model):
super(SimulateRFQTransactions, self).__init__()
self.rfqs = rfqs
self.model = model
def __call__(self, target):
timeline = target.data.index
index = timeline.get_loc(target.now)
end = target.now
if index == 0:
start = pd.Timestamp.min
else:
start = timeline[index - 1]
# Get the RFQs since the last update
all_rfqs = target.get_data(self.rfqs)
timestamps = all_rfqs.index.get_level_values("Date")
rfqs = all_rfqs[(timestamps > start) & (timestamps <= end)]
# Turn the RFQs into transactions
transactions = self.model(rfqs, target)
for (_, security), transaction in transactions.iterrows():
c = target[security]
c.transact(
transaction["quantity"], price=transaction["price"], update=False
)
# Now update
target.root.update(target.now)
return True
def _get_unit_risk(security, data, index=None):
try:
unit_risks = data[security]
unit_risk = unit_risks.values[index]
except Exception:
# No risk data, assume zero
unit_risk = 0.0
return unit_risk
class UpdateRisk(Algo):
"""
Tracks a risk measure on all nodes of the strategy. To use this node, the
``additional_data`` argument on :class:`Backtest <bt.backtest.Backtest>` must
have a "unit_risk" key. The value should be a dictionary, keyed
by risk measure, of DataFrames with a column per security that is sensitive to that measure.
Args:
* name (str): the name of the risk measure (IR01, PVBP, IsIndustials, etc).
The name must coincide with the keys of the dictionary passed to additional_data as the
"unit_risk" argument.
* history (int): The level of depth in the tree at which to track the time series of risk numbers.
i.e. 0=no tracking, 1=first level only, etc. More levels is more expensive.
Modifies:
* The "risk" attribute on the target and all its children
* If history==True, the "risks" attribute on the target and all its children
"""
def __init__(self, measure, history=0):
super(UpdateRisk, self).__init__(name="UpdateRisk>%s" % measure)
self.measure = measure
self.history = history
def _setup_risk(self, target, set_history):
""" Setup risk attributes on the node in question """
target.risk = {}
if set_history:
target.risks = pd.DataFrame(index=target.data.index)
def _setup_measure(self, target, set_history):
""" Setup a risk measure within the risk attributes on the node in question """
target.risk[self.measure] = np.NaN
if set_history:
target.risks[self.measure] = np.NaN
def _set_risk_recursive(self, target, depth, unit_risk_frame):
set_history = depth < self.history
# General setup of risk on nodes
if not hasattr(target, "risk"):
self._setup_risk(target, set_history)
if self.measure not in target.risk:
self._setup_measure(target, set_history)
if isinstance(target, bt.core.SecurityBase):
# Use target.root.now as non-traded securities may not have been updated yet
# and there is no need to update them here as we only use position
index = unit_risk_frame.index.get_loc(target.root.now)
unit_risk = _get_unit_risk(target.name, unit_risk_frame, index)
if is_zero(target.position):
risk = 0.0
else:
risk = unit_risk * target.position * target.multiplier
else:
risk = 0.0
for child in target.children.values():
self._set_risk_recursive(child, depth + 1, unit_risk_frame)
risk += child.risk[self.measure]
target.risk[self.measure] = risk
if depth < self.history:
target.risks.loc[target.now, self.measure] = risk
def __call__(self, target):
unit_risk_frame = target.get_data("unit_risk")[self.measure]
self._set_risk_recursive(target, 0, unit_risk_frame)
return True
class PrintRisk(Algo):
"""
This Algo prints the risk data.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's risk attributes. Therefore, you should provide
what you want to examine within curly braces ( { } )
If not provided, will print the entire dictionary with no formatting.
"""
def __init__(self, fmt_string=""):
super(PrintRisk, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if hasattr(target, "risk"):
if self.fmt_string:
print(self.fmt_string.format(**target.risk))
else:
print(target.risk)
return True
class HedgeRisks(Algo):
"""
Hedges risk measures with selected instruments.
Make sure that the UpdateRisk algo has been called beforehand.
Args:
* measures (list): the names of the risk measures to hedge
* pseudo (bool): whether to use the pseudo-inverse to compute
the inverse Jacobian. If False, will fail if the number
of selected instruments is not equal to the number of
measures, or if the Jacobian is singular
* strategy (StrategyBase): If provided, will hedge the risk
from this strategy in addition to the risk from target.
This is to allow separate tracking of hedged and unhedged
performance. Note that risk_strategy must occur earlier than
'target' in a depth-first traversal of the children of the root,
otherwise hedging will occur before positions of risk_strategy are
updated.
* throw_nan (bool): Whether to throw on nan hedge notionals, rather
than simply not hedging.
Requires:
* selected
"""
def __init__(self, measures, pseudo=False, strategy=None, throw_nan=True):
super(HedgeRisks, self).__init__()
if len(measures) == 0:
raise ValueError("Must pass in at least one measure to hedge")
self.measures = measures
self.pseudo = pseudo
self.strategy = strategy
self.throw_nan = throw_nan
def _get_target_risk(self, target, measure):
if not hasattr(target, "risk"):
raise ValueError("risk not set up on target %s" % target.name)
if measure not in target.risk:
raise ValueError("measure %s not set on target %s" % (measure, target.name))
return target.risk[measure]
def __call__(self, target):
securities = target.temp["selected"]
# Get target risk
target_risk = np.array(
[self._get_target_risk(target, m) for m in self.measures]
)
if self.strategy is not None:
# Add the target risk of the strategy to the risk of the target
# (which contains existing hedges)
target_risk += np.array(
[self._get_target_risk(self.strategy, m) for m in self.measures]
)
# Turn target_risk into a column array
target_risk = target_risk.reshape(len(self.measures), 1)
# Get hedge risk as a Jacobian matrix
data = []
for m in self.measures:
d = target.get_data("unit_risk").get(m)
if d is None:
raise ValueError(
"unit_risk for %s not present in temp on %s"
% (self.measure, target.name)
)
i = d.index.get_loc(target.now)
data.append((i, d))
hedge_risk = np.array(
[[_get_unit_risk(s, d, i) for (i, d) in data] for s in securities]
)
# Get hedge ratios
if self.pseudo:
inv = np.linalg.pinv(hedge_risk).T
else:
inv = np.linalg.inv(hedge_risk).T
notionals = np.matmul(inv, -target_risk).flatten()
# Hedge
for notional, security in zip(notionals, securities):
if np.isnan(notional) and self.throw_nan:
raise ValueError("%s has nan hedge notional" % security)
target.transact(notional, security)
return True
| [
"bt.ffn.limit_weights",
"bt.ffn.to_returns",
"numpy.isnan",
"numpy.linalg.pinv",
"pandas.DataFrame",
"pandas.DateOffset",
"future.utils.iteritems",
"bt.ffn.random_weights",
"pandas.to_datetime",
"pandas.Series",
"bt.core.is_zero",
"numpy.linalg.inv",
"bt.ffn.get_num_days_required",
"re.com... | [((2425, 2440), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2438, 2440), False, 'import pdb\n'), ((9709, 9729), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (9723, 9729), True, 'import pandas as pd\n'), ((16807, 16830), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (16820, 16830), True, 'import pandas as pd\n'), ((20639, 20662), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (20652, 20662), True, 'import pandas as pd\n'), ((20676, 20697), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (20689, 20697), True, 'import pandas as pd\n'), ((25313, 25330), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (25323, 25330), False, 'import re\n'), ((28740, 28763), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (28753, 28763), True, 'import pandas as pd\n'), ((28769, 28790), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (28782, 28790), True, 'import pandas as pd\n'), ((33165, 33188), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (33178, 33188), True, 'import pandas as pd\n'), ((33194, 33215), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (33207, 33215), True, 'import pandas as pd\n'), ((35370, 35393), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (35383, 35393), True, 'import pandas as pd\n'), ((35590, 35611), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (35603, 35611), True, 'import pandas as pd\n'), ((37696, 37719), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (37709, 37719), True, 'import pandas as pd\n'), ((37812, 37833), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (37825, 37833), True, 'import pandas as pd\n'), ((43817, 43840), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (43830, 43840), True, 'import pandas as pd\n'), ((43854, 43875), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (43867, 43875), True, 'import pandas as pd\n'), ((44560, 44582), 'bt.ffn.to_returns', 'bt.ffn.to_returns', (['prc'], {}), '(prc)\n', (44577, 44582), False, 'import bt\n'), ((44907, 44982), 'pandas.Series', 'pd.Series', (['[current_weights[x] for x in covar.columns]'], {'index': 'covar.columns'}), '([current_weights[x] for x in covar.columns], index=covar.columns)\n', (44916, 44982), True, 'import pandas as pd\n'), ((46329, 46352), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (46342, 46352), True, 'import pandas as pd\n'), ((46366, 46387), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(0)'}), '(days=0)\n', (46379, 46387), True, 'import pandas as pd\n'), ((47798, 47820), 'bt.ffn.to_returns', 'bt.ffn.to_returns', (['prc'], {}), '(prc)\n', (47815, 47820), False, 'import bt\n'), ((48297, 48315), 'pandas.isnull', 'pd.isnull', (['PTE_vol'], {}), '(PTE_vol)\n', (48306, 48315), True, 'import pandas as pd\n'), ((53771, 53789), 'future.utils.iteritems', 'iteritems', (['targets'], {}), '(targets)\n', (53780, 53789), False, 'from future.utils import iteritems\n'), ((64170, 64193), 'future.utils.iteritems', 'iteritems', (['transactions'], {}), '(transactions)\n', (64179, 64193), False, 'from future.utils import iteritems\n'), ((9070, 9087), 'pandas.to_datetime', 'pd.to_datetime', (['d'], {}), '(d)\n', (9084, 9087), True, 'import pandas as pd\n'), ((17059, 17097), 'bt.ffn.get_num_days_required', 'bt.ffn.get_num_days_required', (['lookback'], {}), '(lookback)\n', (17087, 17097), False, 'import bt\n'), ((39826, 39880), 'bt.ffn.random_weights', 'bt.ffn.random_weights', (['n', 'self.bounds', 'self.weight_sum'], {}), '(n, self.bounds, self.weight_sum)\n', (39847, 39880), False, 'import bt\n'), ((43016, 43052), 'bt.ffn.limit_weights', 'bt.ffn.limit_weights', (['tw', 'self.limit'], {}), '(tw, self.limit)\n', (43036, 43052), False, 'import bt\n'), ((70384, 70421), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'target.data.index'}), '(index=target.data.index)\n', (70396, 70421), True, 'import pandas as pd\n'), ((71396, 71420), 'bt.core.is_zero', 'is_zero', (['target.position'], {}), '(target.position)\n', (71403, 71420), False, 'from bt.core import Algo, AlgoStack, SecurityBase, is_zero\n'), ((4371, 4388), 'pandas.Timestamp', 'pd.Timestamp', (['now'], {}), '(now)\n', (4383, 4388), True, 'import pandas as pd\n'), ((4597, 4626), 'pandas.Timestamp', 'pd.Timestamp', (['date_to_compare'], {}), '(date_to_compare)\n', (4609, 4626), True, 'import pandas as pd\n'), ((30888, 30921), 'future.utils.iteritems', 'iteritems', (["target.temp['weights']"], {}), "(target.temp['weights'])\n", (30897, 30921), False, 'from future.utils import iteritems\n'), ((61403, 61429), 'future.utils.iteritems', 'iteritems', (['target.children'], {}), '(target.children)\n', (61412, 61429), False, 'from future.utils import iteritems\n'), ((63272, 63298), 'future.utils.iteritems', 'iteritems', (['target.children'], {}), '(target.children)\n', (63281, 63298), False, 'from future.utils import iteritems\n'), ((75804, 75830), 'numpy.linalg.pinv', 'np.linalg.pinv', (['hedge_risk'], {}), '(hedge_risk)\n', (75818, 75830), True, 'import numpy as np\n'), ((75865, 75890), 'numpy.linalg.inv', 'np.linalg.inv', (['hedge_risk'], {}), '(hedge_risk)\n', (75878, 75890), True, 'import numpy as np\n'), ((75913, 75941), 'numpy.matmul', 'np.matmul', (['inv', '(-target_risk)'], {}), '(inv, -target_risk)\n', (75922, 75941), True, 'import numpy as np\n'), ((76046, 76064), 'numpy.isnan', 'np.isnan', (['notional'], {}), '(notional)\n', (76054, 76064), True, 'import numpy as np\n'), ((45069, 45108), 'numpy.matmul', 'np.matmul', (['covar.values', 'weights.values'], {}), '(covar.values, weights.values)\n', (45078, 45108), True, 'import numpy as np\n'), ((48194, 48233), 'numpy.matmul', 'np.matmul', (['covar.values', 'weights.values'], {}), '(covar.values, weights.values)\n', (48203, 48233), True, 'import numpy as np\n'), ((53374, 53385), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (53382, 53385), True, 'import numpy as np\n'), ((41600, 41614), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (41607, 41614), True, 'import numpy as np\n'), ((41870, 41884), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (41877, 41884), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from path_explain.utils import set_up_environment
from path_explain.path_explainer_tf import PathExplainerTF
from preprocess import higgs_dataset
from train import build_model
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_examples', 10000, 'Number of inputs to run attributions on')
flags.DEFINE_integer('num_samples', 300, 'Number of samples to use when computing attributions')
def interpret(argv=None):
set_up_environment(visible_devices=FLAGS.visible_devices)
train_set, test_set, vald_set = higgs_dataset(batch_size=FLAGS.batch_size,
num_parallel_calls=8,
buffer_size=10000,
seed=0,
scale=True,
include_vald=True)
print('Loading model...')
model = build_model(weight_decay=FLAGS.weight_decay,
num_layers=FLAGS.num_layers,
hidden_units=FLAGS.hidden_units,
for_interpretation=True)
model.load_weights('model.h5', by_name=True)
print('Gathering inputs...')
training_iters = int(10000 / FLAGS.batch_size)
training_samples = []
for i, (x_batch, _) in enumerate(train_set):
training_samples.append(x_batch)
if i >= training_iters:
break
training_samples = tf.concat(training_samples, axis=0)
input_samples = []
true_labels = []
pred_output = []
num_accumulated = 0
for x_batch, label_batch in test_set:
pred_labels = model(x_batch)
correct_mask = (pred_labels[:, 0].numpy() > 0.5).astype(int) == label_batch
input_samples.append(x_batch.numpy()[correct_mask])
pred_output.append(pred_labels.numpy()[correct_mask, 0])
true_labels.append(label_batch.numpy()[correct_mask])
num_accumulated += np.sum(correct_mask)
if num_accumulated >= FLAGS.num_examples:
break
input_samples = np.concatenate(input_samples, axis=0).astype(np.float32)
true_labels = np.concatenate(true_labels, axis=0)
pred_output = np.concatenate(pred_output, axis=0)
np.save('input_samples.npy', input_samples)
np.save('pred_output.npy', pred_output)
np.save('true_labels.npy', true_labels)
explainer = PathExplainerTF(model)
print('Computing attributions...')
attributions = explainer.attributions(inputs=input_samples,
baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
batch_size=FLAGS.batch_size,
num_samples=FLAGS.num_samples,
use_expectation=False,
output_indices=0,
verbose=True)
np.save('attributions.npy', attributions)
print('Computing interactions...')
interactions = explainer.interactions(inputs=input_samples,
baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
batch_size=FLAGS.batch_size,
num_samples=FLAGS.num_samples,
use_expectation=False,
output_indices=0,
verbose=True)
np.save('interactions.npy', interactions)
if __name__ == '__main__':
app.run(interpret) | [
"numpy.save",
"path_explain.utils.set_up_environment",
"numpy.sum",
"preprocess.higgs_dataset",
"numpy.zeros",
"tensorflow.concat",
"train.build_model",
"absl.app.run",
"absl.flags.DEFINE_integer",
"numpy.concatenate",
"path_explain.path_explainer_tf.PathExplainerTF"
] | [((286, 376), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_examples"""', '(10000)', '"""Number of inputs to run attributions on"""'], {}), "('num_examples', 10000,\n 'Number of inputs to run attributions on')\n", (306, 376), False, 'from absl import flags\n'), ((373, 473), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_samples"""', '(300)', '"""Number of samples to use when computing attributions"""'], {}), "('num_samples', 300,\n 'Number of samples to use when computing attributions')\n", (393, 473), False, 'from absl import flags\n'), ((501, 558), 'path_explain.utils.set_up_environment', 'set_up_environment', ([], {'visible_devices': 'FLAGS.visible_devices'}), '(visible_devices=FLAGS.visible_devices)\n', (519, 558), False, 'from path_explain.utils import set_up_environment\n'), ((596, 722), 'preprocess.higgs_dataset', 'higgs_dataset', ([], {'batch_size': 'FLAGS.batch_size', 'num_parallel_calls': '(8)', 'buffer_size': '(10000)', 'seed': '(0)', 'scale': '(True)', 'include_vald': '(True)'}), '(batch_size=FLAGS.batch_size, num_parallel_calls=8,\n buffer_size=10000, seed=0, scale=True, include_vald=True)\n', (609, 722), False, 'from preprocess import higgs_dataset\n'), ((1012, 1147), 'train.build_model', 'build_model', ([], {'weight_decay': 'FLAGS.weight_decay', 'num_layers': 'FLAGS.num_layers', 'hidden_units': 'FLAGS.hidden_units', 'for_interpretation': '(True)'}), '(weight_decay=FLAGS.weight_decay, num_layers=FLAGS.num_layers,\n hidden_units=FLAGS.hidden_units, for_interpretation=True)\n', (1023, 1147), False, 'from train import build_model\n'), ((1539, 1574), 'tensorflow.concat', 'tf.concat', (['training_samples'], {'axis': '(0)'}), '(training_samples, axis=0)\n', (1548, 1574), True, 'import tensorflow as tf\n'), ((2229, 2264), 'numpy.concatenate', 'np.concatenate', (['true_labels'], {'axis': '(0)'}), '(true_labels, axis=0)\n', (2243, 2264), True, 'import numpy as np\n'), ((2283, 2318), 'numpy.concatenate', 'np.concatenate', (['pred_output'], {'axis': '(0)'}), '(pred_output, axis=0)\n', (2297, 2318), True, 'import numpy as np\n'), ((2324, 2367), 'numpy.save', 'np.save', (['"""input_samples.npy"""', 'input_samples'], {}), "('input_samples.npy', input_samples)\n", (2331, 2367), True, 'import numpy as np\n'), ((2372, 2411), 'numpy.save', 'np.save', (['"""pred_output.npy"""', 'pred_output'], {}), "('pred_output.npy', pred_output)\n", (2379, 2411), True, 'import numpy as np\n'), ((2416, 2455), 'numpy.save', 'np.save', (['"""true_labels.npy"""', 'true_labels'], {}), "('true_labels.npy', true_labels)\n", (2423, 2455), True, 'import numpy as np\n'), ((2473, 2495), 'path_explain.path_explainer_tf.PathExplainerTF', 'PathExplainerTF', (['model'], {}), '(model)\n', (2488, 2495), False, 'from path_explain.path_explainer_tf import PathExplainerTF\n'), ((3036, 3077), 'numpy.save', 'np.save', (['"""attributions.npy"""', 'attributions'], {}), "('attributions.npy', attributions)\n", (3043, 3077), True, 'import numpy as np\n'), ((3619, 3660), 'numpy.save', 'np.save', (['"""interactions.npy"""', 'interactions'], {}), "('interactions.npy', interactions)\n", (3626, 3660), True, 'import numpy as np\n'), ((3693, 3711), 'absl.app.run', 'app.run', (['interpret'], {}), '(interpret)\n', (3700, 3711), False, 'from absl import app\n'), ((2043, 2063), 'numpy.sum', 'np.sum', (['correct_mask'], {}), '(correct_mask)\n', (2049, 2063), True, 'import numpy as np\n'), ((2154, 2191), 'numpy.concatenate', 'np.concatenate', (['input_samples'], {'axis': '(0)'}), '(input_samples, axis=0)\n', (2168, 2191), True, 'import numpy as np\n'), ((2650, 2705), 'numpy.zeros', 'np.zeros', (['(1, input_samples.shape[1])'], {'dtype': 'np.float32'}), '((1, input_samples.shape[1]), dtype=np.float32)\n', (2658, 2705), True, 'import numpy as np\n'), ((3233, 3288), 'numpy.zeros', 'np.zeros', (['(1, input_samples.shape[1])'], {'dtype': 'np.float32'}), '((1, input_samples.shape[1]), dtype=np.float32)\n', (3241, 3288), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import torch
import torch.nn.functional as F
import torch.nn as nn
def encode(item):
index = [0, 3, 4, 5]
year = np.zeros((3))
year[item[0] - 1] = 1
band = np.zeros((3))
band[item[3] - 1] = 1
group = np.zeros((11))
group[item[4] - 1] = 1
denomination = np.zeros((3))
denomination[item[5] - 1] = 1
new_item = np.delete(item, index)
newdata = np.concatenate((year,band,group,denomination,new_item),axis=0)
return newdata
def encode_data(data):
new = []
for item in data:
new.append(encode(item))
return new
def read_data(file):
raw_data = pd.read_csv(file)
data_len = len(raw_data)
data = []
for i in range(0, data_len):
row = np.array(raw_data.iloc[i])
data.append(row)
data = np.array(data)
return data
def split_data(data):
label = data[:, 22]
data = data[:, :22]
pre_x_train, x_test, pre_y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=0)
x_train, x_dev, y_train, y_dev = train_test_split(pre_x_train, pre_y_train, test_size=0.25, random_state=0)
return x_train, x_dev, x_test, y_train, y_dev, y_test
def get_lr_mse(train_x, dev_x, test_x, train_y, dev_y, test_y):
_, dev_x_100, _, dev_y_100 = train_test_split(dev_x, dev_y, test_size=100, random_state=0)
logreg = Lasso()
logreg.fit(train_x, train_y)
prediction = logreg.predict(test_x)
result = mean_squared_error(test_y, prediction)
print(result)
return result
class MyClassifier(nn.Module):
def __init__(self, hidden_layer):
super(MyClassifier, self).__init__()
self.fc1 = nn.Linear(22, hidden_layer)
self.fc2 = nn.Linear(hidden_layer, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def get_mse_net(train_x, dev_x, test_x, train_y, dev_y, test_y):
train_x = torch.from_numpy(train_x).type(torch.FloatTensor)
dev_x = torch.from_numpy(dev_x).type(torch.FloatTensor)
test_x = torch.from_numpy(test_x).type(torch.FloatTensor)
train_y = torch.from_numpy(train_y).type(torch.FloatTensor)
dev_y = torch.from_numpy(dev_y).type(torch.FloatTensor)
test_y = torch.from_numpy(test_y).type(torch.FloatTensor)
_, dev_x_100, _, dev_y_100 = train_test_split(dev_x, dev_y, test_size=100, random_state=0)
model = MyClassifier(20)
loss_fn = torch.nn.MSELoss()
learning_rate = 1e-2
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(200):
y_pred = model(train_x)
loss = loss_fn(y_pred, train_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prediction = model(test_x)
result = mean_squared_error(test_y, prediction.detach().numpy())
print(result)
return result
# read data.
femaleData = np.array(encode_data(read_data("FEMALE.csv")))
maleData = np.array(encode_data(read_data("MALE.csv")))
mixedData = np.array(encode_data(read_data("MIXED.csv")))
# split data
male_train_x, male_dev_x, male_test_x, male_train_y, male_dev_y, male_test_y = split_data(maleData)
female_train_x, female_dev_x, female_test_x, female_train_y, female_dev_y, female_test_y = split_data(femaleData)
mix_train_x, mix_dev_x, mix_test_x, mix_train_y, mix_dev_y, mix_test_y = split_data(mixedData)
_, t_male_train_x, _, t_male_train_y = train_test_split(male_train_x, male_train_y, test_size=100, random_state=0)
_, t_female_train_x, _, t_female_train_y = train_test_split(female_train_x, female_train_y, test_size=100, random_state=0)
_, t_mix_train_x, _, t_mix_train_y = train_test_split(mix_train_x, mix_train_y, test_size=100, random_state=0)
t_male_train_x = np.tile(t_male_train_x, (70, 1))
t_male_train_y = np.reshape(t_male_train_y, (100,1))
t_male_train_y = np.tile(t_male_train_y, (70, 1))
t_male_train_y = np.reshape(t_male_train_y, (7000,))
t_female_train_x = np.tile(t_female_train_x, (65, 1))
t_female_train_y = np.reshape(t_female_train_y, (100,1))
t_female_train_y = np.tile(t_female_train_y, (65, 1))
t_female_train_y = np.reshape(t_female_train_y, (6500,))
t_mix_train_x = np.tile(t_mix_train_x, (48, 1))
t_mix_train_y = np.reshape(t_mix_train_y, (100,1))
t_mix_train_y = np.tile(t_mix_train_y, (48, 1))
t_mix_train_y = np.reshape(t_mix_train_y, (4800,))
mix_target_x = np.append(male_train_x, female_train_x, axis=0)
mix_target_x = np.append(mix_target_x, t_mix_train_x, axis=0)
mix_target_y = np.append(male_train_y, female_train_y, axis=0)
mix_target_y = np.append(mix_target_y, t_mix_train_y, axis=0)
male_target_x = np.append(mix_train_x, female_train_x, axis=0)
male_target_x = np.append(male_target_x, t_male_train_x, axis=0)
male_target_y = np.append(mix_train_y, female_train_y, axis=0)
male_target_y = np.append(male_target_y, t_male_train_y, axis=0)
female_target_x = np.append(mix_train_x, male_train_x, axis=0)
female_target_x = np.append(female_target_x, t_female_train_x, axis=0)
female_target_y = np.append(mix_train_y, male_train_y, axis=0)
female_target_y = np.append(female_target_y, t_female_train_y, axis=0)
# calculate LogisticRegression result
reg_result = 0
reg_result = get_lr_mse(mix_target_x, mix_dev_x, mix_test_x, mix_target_y, mix_dev_y, mix_test_y)
reg_result = reg_result + get_lr_mse(male_target_x, male_dev_x, male_test_x, male_target_y, male_dev_y, male_test_y)
reg_result = reg_result + get_lr_mse(female_target_x, female_dev_x, female_test_x, female_target_y, female_dev_y, female_test_y)
print(reg_result/3)
#
# calculate Neural Network result
net_result = 0
net_result = get_mse_net(mix_target_x, mix_dev_x, mix_test_x, mix_target_y, mix_dev_y, mix_test_y)
net_result = net_result + get_mse_net(male_target_x, male_dev_x, male_test_x, male_target_y, male_dev_y, male_test_y)
net_result = net_result + get_mse_net(female_target_x, female_dev_x, female_test_x, female_target_y, female_dev_y, female_test_y)
print(net_result/3) | [
"torch.nn.MSELoss",
"torch.from_numpy",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"sklearn.linear_model.Lasso",
"numpy.append",
"numpy.array",
"numpy.tile",
"numpy.reshape",
"torch.nn.Linear",
"numpy.delete",
"numpy.concatenate",
"sklearn.metrics.mean_sq... | [((3545, 3620), 'sklearn.model_selection.train_test_split', 'train_test_split', (['male_train_x', 'male_train_y'], {'test_size': '(100)', 'random_state': '(0)'}), '(male_train_x, male_train_y, test_size=100, random_state=0)\n', (3561, 3620), False, 'from sklearn.model_selection import train_test_split\n'), ((3664, 3743), 'sklearn.model_selection.train_test_split', 'train_test_split', (['female_train_x', 'female_train_y'], {'test_size': '(100)', 'random_state': '(0)'}), '(female_train_x, female_train_y, test_size=100, random_state=0)\n', (3680, 3743), False, 'from sklearn.model_selection import train_test_split\n'), ((3781, 3854), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mix_train_x', 'mix_train_y'], {'test_size': '(100)', 'random_state': '(0)'}), '(mix_train_x, mix_train_y, test_size=100, random_state=0)\n', (3797, 3854), False, 'from sklearn.model_selection import train_test_split\n'), ((3873, 3905), 'numpy.tile', 'np.tile', (['t_male_train_x', '(70, 1)'], {}), '(t_male_train_x, (70, 1))\n', (3880, 3905), True, 'import numpy as np\n'), ((3923, 3959), 'numpy.reshape', 'np.reshape', (['t_male_train_y', '(100, 1)'], {}), '(t_male_train_y, (100, 1))\n', (3933, 3959), True, 'import numpy as np\n'), ((3976, 4008), 'numpy.tile', 'np.tile', (['t_male_train_y', '(70, 1)'], {}), '(t_male_train_y, (70, 1))\n', (3983, 4008), True, 'import numpy as np\n'), ((4026, 4061), 'numpy.reshape', 'np.reshape', (['t_male_train_y', '(7000,)'], {}), '(t_male_train_y, (7000,))\n', (4036, 4061), True, 'import numpy as np\n'), ((4082, 4116), 'numpy.tile', 'np.tile', (['t_female_train_x', '(65, 1)'], {}), '(t_female_train_x, (65, 1))\n', (4089, 4116), True, 'import numpy as np\n'), ((4136, 4174), 'numpy.reshape', 'np.reshape', (['t_female_train_y', '(100, 1)'], {}), '(t_female_train_y, (100, 1))\n', (4146, 4174), True, 'import numpy as np\n'), ((4193, 4227), 'numpy.tile', 'np.tile', (['t_female_train_y', '(65, 1)'], {}), '(t_female_train_y, (65, 1))\n', (4200, 4227), True, 'import numpy as np\n'), ((4247, 4284), 'numpy.reshape', 'np.reshape', (['t_female_train_y', '(6500,)'], {}), '(t_female_train_y, (6500,))\n', (4257, 4284), True, 'import numpy as np\n'), ((4302, 4333), 'numpy.tile', 'np.tile', (['t_mix_train_x', '(48, 1)'], {}), '(t_mix_train_x, (48, 1))\n', (4309, 4333), True, 'import numpy as np\n'), ((4350, 4385), 'numpy.reshape', 'np.reshape', (['t_mix_train_y', '(100, 1)'], {}), '(t_mix_train_y, (100, 1))\n', (4360, 4385), True, 'import numpy as np\n'), ((4401, 4432), 'numpy.tile', 'np.tile', (['t_mix_train_y', '(48, 1)'], {}), '(t_mix_train_y, (48, 1))\n', (4408, 4432), True, 'import numpy as np\n'), ((4449, 4483), 'numpy.reshape', 'np.reshape', (['t_mix_train_y', '(4800,)'], {}), '(t_mix_train_y, (4800,))\n', (4459, 4483), True, 'import numpy as np\n'), ((4500, 4547), 'numpy.append', 'np.append', (['male_train_x', 'female_train_x'], {'axis': '(0)'}), '(male_train_x, female_train_x, axis=0)\n', (4509, 4547), True, 'import numpy as np\n'), ((4563, 4609), 'numpy.append', 'np.append', (['mix_target_x', 't_mix_train_x'], {'axis': '(0)'}), '(mix_target_x, t_mix_train_x, axis=0)\n', (4572, 4609), True, 'import numpy as np\n'), ((4625, 4672), 'numpy.append', 'np.append', (['male_train_y', 'female_train_y'], {'axis': '(0)'}), '(male_train_y, female_train_y, axis=0)\n', (4634, 4672), True, 'import numpy as np\n'), ((4688, 4734), 'numpy.append', 'np.append', (['mix_target_y', 't_mix_train_y'], {'axis': '(0)'}), '(mix_target_y, t_mix_train_y, axis=0)\n', (4697, 4734), True, 'import numpy as np\n'), ((4752, 4798), 'numpy.append', 'np.append', (['mix_train_x', 'female_train_x'], {'axis': '(0)'}), '(mix_train_x, female_train_x, axis=0)\n', (4761, 4798), True, 'import numpy as np\n'), ((4815, 4863), 'numpy.append', 'np.append', (['male_target_x', 't_male_train_x'], {'axis': '(0)'}), '(male_target_x, t_male_train_x, axis=0)\n', (4824, 4863), True, 'import numpy as np\n'), ((4880, 4926), 'numpy.append', 'np.append', (['mix_train_y', 'female_train_y'], {'axis': '(0)'}), '(mix_train_y, female_train_y, axis=0)\n', (4889, 4926), True, 'import numpy as np\n'), ((4943, 4991), 'numpy.append', 'np.append', (['male_target_y', 't_male_train_y'], {'axis': '(0)'}), '(male_target_y, t_male_train_y, axis=0)\n', (4952, 4991), True, 'import numpy as np\n'), ((5011, 5055), 'numpy.append', 'np.append', (['mix_train_x', 'male_train_x'], {'axis': '(0)'}), '(mix_train_x, male_train_x, axis=0)\n', (5020, 5055), True, 'import numpy as np\n'), ((5074, 5126), 'numpy.append', 'np.append', (['female_target_x', 't_female_train_x'], {'axis': '(0)'}), '(female_target_x, t_female_train_x, axis=0)\n', (5083, 5126), True, 'import numpy as np\n'), ((5145, 5189), 'numpy.append', 'np.append', (['mix_train_y', 'male_train_y'], {'axis': '(0)'}), '(mix_train_y, male_train_y, axis=0)\n', (5154, 5189), True, 'import numpy as np\n'), ((5208, 5260), 'numpy.append', 'np.append', (['female_target_y', 't_female_train_y'], {'axis': '(0)'}), '(female_target_y, t_female_train_y, axis=0)\n', (5217, 5260), True, 'import numpy as np\n'), ((301, 312), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (309, 312), True, 'import numpy as np\n'), ((352, 363), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (360, 363), True, 'import numpy as np\n'), ((404, 416), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (412, 416), True, 'import numpy as np\n'), ((465, 476), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (473, 476), True, 'import numpy as np\n'), ((528, 550), 'numpy.delete', 'np.delete', (['item', 'index'], {}), '(item, index)\n', (537, 550), True, 'import numpy as np\n'), ((565, 632), 'numpy.concatenate', 'np.concatenate', (['(year, band, group, denomination, new_item)'], {'axis': '(0)'}), '((year, band, group, denomination, new_item), axis=0)\n', (579, 632), True, 'import numpy as np\n'), ((793, 810), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (804, 810), True, 'import pandas as pd\n'), ((964, 978), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (972, 978), True, 'import numpy as np\n'), ((1114, 1174), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'label'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(data, label, test_size=0.2, random_state=0)\n', (1130, 1174), False, 'from sklearn.model_selection import train_test_split\n'), ((1212, 1286), 'sklearn.model_selection.train_test_split', 'train_test_split', (['pre_x_train', 'pre_y_train'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(pre_x_train, pre_y_train, test_size=0.25, random_state=0)\n', (1228, 1286), False, 'from sklearn.model_selection import train_test_split\n'), ((1444, 1505), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dev_x', 'dev_y'], {'test_size': '(100)', 'random_state': '(0)'}), '(dev_x, dev_y, test_size=100, random_state=0)\n', (1460, 1505), False, 'from sklearn.model_selection import train_test_split\n'), ((1519, 1526), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (1524, 1526), False, 'from sklearn.linear_model import Lasso\n'), ((1613, 1651), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_y', 'prediction'], {}), '(test_y, prediction)\n', (1631, 1651), False, 'from sklearn.metrics import mean_squared_error\n'), ((2461, 2522), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dev_x', 'dev_y'], {'test_size': '(100)', 'random_state': '(0)'}), '(dev_x, dev_y, test_size=100, random_state=0)\n', (2477, 2522), False, 'from sklearn.model_selection import train_test_split\n'), ((2566, 2584), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (2582, 2584), False, 'import torch\n'), ((901, 927), 'numpy.array', 'np.array', (['raw_data.iloc[i]'], {}), '(raw_data.iloc[i])\n', (909, 927), True, 'import numpy as np\n'), ((1823, 1850), 'torch.nn.Linear', 'nn.Linear', (['(22)', 'hidden_layer'], {}), '(22, hidden_layer)\n', (1832, 1850), True, 'import torch.nn as nn\n'), ((1870, 1896), 'torch.nn.Linear', 'nn.Linear', (['hidden_layer', '(1)'], {}), '(hidden_layer, 1)\n', (1879, 1896), True, 'import torch.nn as nn\n'), ((2070, 2095), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (2086, 2095), False, 'import torch\n'), ((2132, 2155), 'torch.from_numpy', 'torch.from_numpy', (['dev_x'], {}), '(dev_x)\n', (2148, 2155), False, 'import torch\n'), ((2193, 2217), 'torch.from_numpy', 'torch.from_numpy', (['test_x'], {}), '(test_x)\n', (2209, 2217), False, 'import torch\n'), ((2256, 2281), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (2272, 2281), False, 'import torch\n'), ((2318, 2341), 'torch.from_numpy', 'torch.from_numpy', (['dev_y'], {}), '(dev_y)\n', (2334, 2341), False, 'import torch\n'), ((2379, 2403), 'torch.from_numpy', 'torch.from_numpy', (['test_y'], {}), '(test_y)\n', (2395, 2403), False, 'import torch\n')] |
from matplotlib import pyplot as plt
import numpy as np
from fractions import Fraction
f13=Fraction('1/3')
f23=Fraction('2/3')
f43=Fraction('4/3')
f53=Fraction('5/3')
f12=Fraction('1/2')
f32=Fraction('3/2')
f56=Fraction('5/6')
fm1=Fraction('-1')
fm23=Fraction('-2/3')
fm32=Fraction('-3/2')
#Powers of t9 from original code
t9=np.arange(0.01,2,0.01)
t9a=t9/(1+0.1071*t9)
rate=(4.817e+6)*(t9**fm23)*np.exp(-14.964/(t9**float(f13)))*(1+0.0325*(t9**f13)-(1.04e-3)*(t9**f23)-(2.37e-4)*t9-(8.11e-5)*(t9**f43)-(4.69e-5)*(t9**f53))+(5.938e+6)*(t9a**f56)*(t9**fm32)*np.exp(-12.859/(t9a**float(f13))) #Without floats, shows Attribute error:exp
plt.plot(t9, rate, label='Old Data')
plt.plot([.01, .011, .012, .013, .014, .015, .016, .018, .02, .025, .03, .04, .05, .06, .07, .08, .09, .1, .11, .12, .13, .14, .15, .16, .18, .2, .25, .3, .35, .4, .45, .5, .6, .7, .8, .9, 1, 1.25, 1.5, 1.75, 2], [1.715e-18, 1.035e-17, 5.079e-17, 2.104e-16, 7.578e-16, 2.426e-15, 7.028e-15, 4.609e-14, 2.325e-13, 5.918e-12, 6.951e-11, 2.493e-9, 3.151e-8, 2.168e-7, 1.007e-6, 3.56e-6, 1.033e-5, 2.578e-5, 5.726e-5, .0001159, .0002173, .0003826, .0006392, .001021, .002333, .004739, .01945, .05655, .1317, .2632, .4705, .7731, 1.739, 3.296, 5.55, 8.582, 12.45, 25.92, 44.9, 69.19, 98.47], 'ro', label='New Data')
plt.xlabel('T9')
plt.ylabel('Reaction Rates (cm3/s/mol')
plt.legend()
plt.title('He3 (a,g) Be7')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"fractions.Fraction",
"matplotlib.pyplot.xlabel"
] | [((91, 106), 'fractions.Fraction', 'Fraction', (['"""1/3"""'], {}), "('1/3')\n", (99, 106), False, 'from fractions import Fraction\n'), ((111, 126), 'fractions.Fraction', 'Fraction', (['"""2/3"""'], {}), "('2/3')\n", (119, 126), False, 'from fractions import Fraction\n'), ((131, 146), 'fractions.Fraction', 'Fraction', (['"""4/3"""'], {}), "('4/3')\n", (139, 146), False, 'from fractions import Fraction\n'), ((151, 166), 'fractions.Fraction', 'Fraction', (['"""5/3"""'], {}), "('5/3')\n", (159, 166), False, 'from fractions import Fraction\n'), ((171, 186), 'fractions.Fraction', 'Fraction', (['"""1/2"""'], {}), "('1/2')\n", (179, 186), False, 'from fractions import Fraction\n'), ((191, 206), 'fractions.Fraction', 'Fraction', (['"""3/2"""'], {}), "('3/2')\n", (199, 206), False, 'from fractions import Fraction\n'), ((211, 226), 'fractions.Fraction', 'Fraction', (['"""5/6"""'], {}), "('5/6')\n", (219, 226), False, 'from fractions import Fraction\n'), ((231, 245), 'fractions.Fraction', 'Fraction', (['"""-1"""'], {}), "('-1')\n", (239, 245), False, 'from fractions import Fraction\n'), ((251, 267), 'fractions.Fraction', 'Fraction', (['"""-2/3"""'], {}), "('-2/3')\n", (259, 267), False, 'from fractions import Fraction\n'), ((273, 289), 'fractions.Fraction', 'Fraction', (['"""-3/2"""'], {}), "('-3/2')\n", (281, 289), False, 'from fractions import Fraction\n'), ((327, 351), 'numpy.arange', 'np.arange', (['(0.01)', '(2)', '(0.01)'], {}), '(0.01, 2, 0.01)\n', (336, 351), True, 'import numpy as np\n'), ((635, 671), 'matplotlib.pyplot.plot', 'plt.plot', (['t9', 'rate'], {'label': '"""Old Data"""'}), "(t9, rate, label='Old Data')\n", (643, 671), True, 'from matplotlib import pyplot as plt\n'), ((672, 1381), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 0.011, 0.012, 0.013, 0.014, 0.015, 0.016, 0.018, 0.02, 0.025, 0.03, \n 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, \n 0.16, 0.18, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1,\n 1.25, 1.5, 1.75, 2]', '[1.715e-18, 1.035e-17, 5.079e-17, 2.104e-16, 7.578e-16, 2.426e-15, \n 7.028e-15, 4.609e-14, 2.325e-13, 5.918e-12, 6.951e-11, 2.493e-09, \n 3.151e-08, 2.168e-07, 1.007e-06, 3.56e-06, 1.033e-05, 2.578e-05, \n 5.726e-05, 0.0001159, 0.0002173, 0.0003826, 0.0006392, 0.001021, \n 0.002333, 0.004739, 0.01945, 0.05655, 0.1317, 0.2632, 0.4705, 0.7731, \n 1.739, 3.296, 5.55, 8.582, 12.45, 25.92, 44.9, 69.19, 98.47]', '"""ro"""'], {'label': '"""New Data"""'}), "([0.01, 0.011, 0.012, 0.013, 0.014, 0.015, 0.016, 0.018, 0.02, \n 0.025, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13,\n 0.14, 0.15, 0.16, 0.18, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7,\n 0.8, 0.9, 1, 1.25, 1.5, 1.75, 2], [1.715e-18, 1.035e-17, 5.079e-17, \n 2.104e-16, 7.578e-16, 2.426e-15, 7.028e-15, 4.609e-14, 2.325e-13, \n 5.918e-12, 6.951e-11, 2.493e-09, 3.151e-08, 2.168e-07, 1.007e-06, \n 3.56e-06, 1.033e-05, 2.578e-05, 5.726e-05, 0.0001159, 0.0002173, \n 0.0003826, 0.0006392, 0.001021, 0.002333, 0.004739, 0.01945, 0.05655, \n 0.1317, 0.2632, 0.4705, 0.7731, 1.739, 3.296, 5.55, 8.582, 12.45, 25.92,\n 44.9, 69.19, 98.47], 'ro', label='New Data')\n", (680, 1381), True, 'from matplotlib import pyplot as plt\n'), ((1283, 1299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T9"""'], {}), "('T9')\n", (1293, 1299), True, 'from matplotlib import pyplot as plt\n'), ((1300, 1339), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reaction Rates (cm3/s/mol"""'], {}), "('Reaction Rates (cm3/s/mol')\n", (1310, 1339), True, 'from matplotlib import pyplot as plt\n'), ((1340, 1352), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1350, 1352), True, 'from matplotlib import pyplot as plt\n'), ((1353, 1379), 'matplotlib.pyplot.title', 'plt.title', (['"""He3 (a,g) Be7"""'], {}), "('He3 (a,g) Be7')\n", (1362, 1379), True, 'from matplotlib import pyplot as plt\n'), ((1380, 1390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1388, 1390), True, 'from matplotlib import pyplot as plt\n')] |
# PyPore
#
# Idea: GUI for porE
#
# Author: <NAME>
# Dates:
# 06.01.2020 -- init, pore functions
# 09.01.2020 -- use new pore.f90 files
# build user input
# pore and get_psd are working
# 10.01.2020 -- include all Fortran porE (original,subgrid,window)
# include grid_density input
# include some help information
# TODO: add visulation of structure and pores may use pygui
# June 1st, 2020 -> restructuring due to new fortran routines
#from porE import pore
#from porE_subgrid import pore as pore_subgrid
##from porE_window import pore as pore_window
#from get_PSD import *
import pore
# module pore_options
# - subroutines: osa, gpa_fullgrid, gpa_gridpera, do_gpa (this executes the GPA evaluation), get_psd
import os
import math
import numpy as np
from tkinter import *
from tkinter import scrolledtext as st
from tkinter.filedialog import askopenfilename
from ase.io import read
from ase.visualize import view
from tkinter.ttk import Frame
# define some abbreviations
osa = pore.porosity.osa
gpa = pore.porosity.do_gpa
gpa_FullGrid = pore.porosity.gpa_fullgrid
gpa_GridPerA = pore.porosity.gpa_gridpera
get_PSD = pore.psd.get_psd
# the actual class
class PyPore(Frame):
# Predefined MOFs
settings = {'ud' : {'description' :'user defined','file':'pypore.xyz'},
'do' : {'description' :'DUT-8(Ni) open','file':'dut_8_open.xyz'},
'vo' : {'description' :'DUT-8(Ni) open vcrelax','file':'dut_8_open_vcrelax.xyz'},
'dc' : {'description' :'DUT-8(Ni) closed','file':'dut_8_closed.xyz'},
'vc' : {'description' :'DUT-8(Ni) closed vcrelax','file':'dut_8_closed_vcrelax.xyz'},
'u6' : {'description' :'UiO-66','file':'uio66.xyz'},
'u7' : {'description' :'UiO-67','file':'uio67.xyz'},
'u8' : {'description' :'UiO-68','file':'uio68.xyz'},
'm5' : {'description' :'MOF-5','file':'mof5.xyz'},
'ir' : {'description' :'IRMOF-10','file':'irmof10.xyz'},
'm2' : {'description' :'MOF210','file':'mof210.xyz'},
'h1' : {'description' :'HKUST-1, open Cu sites','file':'hkust1.xyz'},
'ho' : {'description' :'HKUST-1, O-Cu-Cu-O','file':'hkust1_with_O.xyz'},
'c6' : {'description' :'C60@MOF','file':'c60_MOF.xyz'},
'be' : {'description' :'Benzene, opt','file':'benzene.xyz'},
'b2' : {'description' :'Benzene, exp','file':'benzene_exp.xyz'},
'bc' : {'description' :'Benzene, C only','file':'benzene_Conly.xyz'},
'ha' : {'description' :'H atom','file':'h_atom.xyz'}}
def __init__(self):
# Generate the general gui setup
master = Tk()
self.master = master
self.master.title("PyPorE")
# Predefined porE xyz files
self.dirname = 'structures/xyz/'
# Action buttons
#b1 = Button(self.master, text='Open', command=self.open_file).grid(column=2,row=3, sticky=W, pady=4)
Button(self.master, text='Porosity', command=self.get_pore).grid(column=3,row=3, sticky=W, pady=4)
Button(self.master, text='PSD', command=self.get_psd).grid(column=4,row=3, sticky=W, pady=4)
Button(self.master, text='Quit', command=self.master.quit).grid(column=6,row=3, sticky=W, pady=4)
# Inital values
self.probe_r = 1.2
self.grid_a = 5
self.grid_b = 5
self.grid_c = 5
self.init_newwin()
self.l1 = None
self.cell = None
# Gui
mainloop()
def get_pore(self):
# main function for porosity calculation
self.newwin.destroy()
self.init_newwin()
# Inital text/ Help
s = ''' PyPorE: porosity \n
\t structure \n
\t\t\t ud -- user defined; use "open" to select the structure \n
\t\t\t do -- predefined DUT-8(Ni) open, see others as well \n
\t methods \n
\t\t\t 1 -- overlaping spheres approach (OSA) \n
\t\t\t 2 -- grid approach \n
\t\t\t\t enter value and click in the field to update the value\n
\t executable: \n
\t\t\t GPA subgrid -- speedup for grid approach, can also calculate pore windows (using output_PSD file) \n
\t Authors: \n
\t\t\t <NAME> (Fortran, core routines) \n
\t\t\t <NAME> (Python, GUI)'''
self.text.insert(END,s)
self.text.pack()
# Search input fields
# What property to search for: when, where or ...
Label(self.controls, text="Select structure").grid(row=1,column=1)
Label(self.controls, text="Select method").grid(row=3,column=1)
Label(self.controls, text="e.g. do").grid(row=1,column=3)
#Label(self.controls, text="e.g. 1").grid(row=3,column=3)
# Entry fields
e1 = StringVar(self.newwin)
self.e1 = e1
self.e1.set("ud") # default value
w1= OptionMenu(self.controls, self.e1, 'ud','do','vo','dc','vc','u6','u7','m5','ir','m2','h1','ho','c6','be','b2','bc','ha',command=self.refresh_pore)
self.w1 = w1
e2 = Entry(self.controls)
e2.insert(END,'1') # default value
e3 = Entry(self.controls)
e3.insert(END,self.settings[self.e1.get()]['description'])
self.e1 = e1
self.e2 = e2
self.e3 = e3
exe_pore = StringVar()
exe_pore.set('subgrid')
self.exe_pore = exe_pore
#w2= OptionMenu(self.controls, self.exe_pore, 'subgrid',command=self.refresh_pore)
#self.w2 = w2
self.w1.grid(row=1, column=2)
self.e2.grid(row=3, column=2)
self.e3.grid(row=2, column=2)
#self.w2.grid(row=3, column=3)
# Action buttons
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=4,column=1)
self.b3 = b3
b1 = Button(self.controls, text='OK', command=self.check_pore)
b1.grid(row=4,column=2)
self.b1 = b1
self.newwin.update()
self.newwin.deiconify()
def refresh_pore(self,event):
# refresh the gui
# the user can select the 1st method then the 2nd one
# if method 1 we need not the additional input fields
# if method 2 we need the additional input fields
if self.e2.get() == '1':
try:
# we ever have these elements
self.b3.destroy()
self.b1.destroy()
# only for method 2 we have these elements
self.l4.destroy()
self.l5.destroy()
self.l6.destroy()
self.l7.destroy()
self.l8.destroy()
self.e4.destroy()
self.e5.destroy()
self.e6.destroy()
self.e7.destroy()
self.e8.destroy()
except: 'Nothing'
if self.e2.get() == '2':
try:
self.b3.destroy()
self.b1.destroy()
except: 'Nothing'
self.e3.delete(0,END)
self.e3.insert(END,self.settings[self.e1.get()]['description'])
if self.e2.get() == '1':
b1 = Button(self.controls, text='OK', command=self.check_pore)
b1.grid(row=4,column=2)
self.b1 = b1
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=4,column=1)
self.b3 = b3
if self.e2.get() == '2':
b1 = Button(self.controls, text='OK', command=self.cmd_pore)
b1.grid(row=9,column=2)
self.b1 = b1
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=9,column=1)
self.b3 = b3
if self.e1.get() != 'ud':
self.b3.destroy()
def refresh_psd(self,event):
# refresh the gui
# if ud we need a open button
# if not ud we need no open button
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=4,column=1)
self.b3 =b3
if self.e1.get() != 'ud':
self.b3.destroy()
def check_pore(self):
# check the input
# if method 1 is chosen we can run porE
# if method 2 is chosen we need additional input
self.refresh_pore(self)
self.get_target()
if self.target[1] == '1':
self.cmd_pore()
if self.target[1] == '2':
self.b1.destroy()
if self.e1.get() == 'ud':
self.b3.destroy()
# labels
l4 = Label(self.controls, text="probe_r")
l4.grid(row=4,column=1)
self.l4 = l4
l5 = Label(self.controls, text="grid_a")
l5.grid(row=5,column=1)
self.l5 = l5
l6 = Label(self.controls, text="grid_b")
l6.grid(row=6,column=1)
self.l6 = l6
l7 = Label(self.controls, text="grid_c")
l7.grid(row=7,column=1)
self.l7 = l7
l8 = Label(self.controls, text="grid_density")
l8.grid(row=8,column=1)
self.l8 = l8
# entries
e4 = Entry(self.controls)
e4.insert(END,'1.2') # default value
self.e4 = e4
grid_a = StringVar()
self.grid_a = grid_a
self.grid_a.set('5')
e5 = Entry(self.controls)
e5.insert(END,'5') # default value
self.e5 = e5
# Enter value and clicking in the field update the value
self.e5.bind("<Button-1>", self.grid2grid_density)
grid_b = StringVar()
self.grid_b = grid_b
self.grid_b.set('5')
e6 = Entry(self.controls)
e6.insert(END,'5') # default value
self.e6 = e6
# Enter value and clicking in the field update the value
self.e6.bind("<Button-1>", self.grid2grid_density)
grid_c = StringVar()
self.grid_c = grid_c
self.grid_c.set('5')
e7 = Entry(self.controls)
e7.insert(END,'5') # default value
self.e7 = e7
# Enter value and clicking in the field update the value
self.e7.bind("<Button-1>", self.grid2grid_density)
g = StringVar()
self.g = g
self.g.set('5')
g.trace("w", self.grid_density2grid)
e8 = Entry(self.controls)
e8.insert(END,self.g.get()) # default value
self.e8 = e8
# Enter value and clicking in the field update the value
self.e8.bind("<Button-1>", self.grid_density2grid)
self.e4.grid(row=4, column=2)
self.e5.grid(row=5, column=2)
self.e6.grid(row=6, column=2)
self.e7.grid(row=7, column=2)
self.e8.grid(row=8, column=2)
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=9,column=1)
self.b3 =b3
b1 = Button(self.controls, text='OK', command=self.cmd_pore)
b1.grid(row=9,column=2)
self.b1 = b1
self.newwin.update()
self.newwin.deiconify()
self.controls.grid_rowconfigure(10, weight=1)
self.controls.grid_columnconfigure(1, weight=1)
def grid_density2grid(self,event):
# grid_a, grid_b, grid_c to grid_density
if self.e1.get() != 'ud':
f = open(self.dirname+self.settings[self.e1.get()]['file'],'r')
if self.e1.get() == 'ud':
f = open(self.settings[self.e1.get()]['file'],'r')
ll = f.readlines()
f.close()
cell = np.zeros([3,3])
cell[0,0] = ll[1].split()[0]
cell[0,1] = ll[1].split()[1]
cell[0,2] = ll[1].split()[2]
cell[1,0] = ll[1].split()[3]
cell[1,1] = ll[1].split()[4]
cell[1,2] = ll[1].split()[5]
cell[2,0] = ll[1].split()[6]
cell[2,1] = ll[1].split()[7]
cell[2,2] = ll[1].split()[8]
self.cell = cell
g = float(self.e8.get())
grid_a = math.ceil(g*np.sqrt(self.cell[0,0]**2+self.cell[0,1]**2+self.cell[0,2]**2)) # +1 ?
grid_b = math.ceil(g*np.sqrt(self.cell[1,0]**2+self.cell[1,1]**2+self.cell[1,2]**2))
grid_c = math.ceil(g*np.sqrt(self.cell[2,0]**2+self.cell[2,1]**2+self.cell[2,2]**2))
self.e5.delete(0, END)
self.e5.insert(END,grid_a)
self.e6.delete(0, END)
self.e6.insert(END,grid_b)
self.e7.delete(0, END)
self.e7.insert(END,grid_c)
def grid2grid_density(self,event):
# grid_density to grid_a, grid_b, grid_c
if self.e1.get() != 'ud':
f = open(self.dirname+self.settings[self.e1.get()]['file'],'r')
if self.e1.get() == 'ud':
f = open(self.settings[self.e1.get()]['file'],'r')
ll = f.readlines()
f.close()
cell = np.zeros([3,3])
cell[0,0] = ll[1].split()[0]
cell[0,1] = ll[1].split()[1]
cell[0,2] = ll[1].split()[2]
cell[1,0] = ll[1].split()[3]
cell[1,1] = ll[1].split()[4]
cell[1,2] = ll[1].split()[5]
cell[2,0] = ll[1].split()[6]
cell[2,1] = ll[1].split()[7]
cell[2,2] = ll[1].split()[8]
self.cell = cell
g_a = float(self.e5.get())/math.ceil(np.sqrt(self.cell[0,0]**2+self.cell[0,1]**2+self.cell[0,2]**2))
g_b = float(self.e6.get())/math.ceil(np.sqrt(self.cell[1,0]**2+self.cell[1,1]**2+self.cell[1,2]**2))
g_c = float(self.e7.get())/math.ceil(np.sqrt(self.cell[2,0]**2+self.cell[2,1]**2+self.cell[2,2]**2))
# debug output
#print(g_a)
#print(g_b)
#print(g_c)
# Approximation
new_g = g_a
self.e8.delete(0, END)
self.e8.insert(END,new_g)
def cmd_pore(self):
# The Fortran call
self.refresh_pore(self)
self.get_target()
# KT: get correct structure inputs
structs = {'ud' : 'pypore.xyz',
'do' : self.dirname+'dut_8_open.xyz',
'vo' : self.dirname+'dut_8_open_vcrelax.xyz',
'dc' : self.dirname+'dut_8_closed.xyz',
'vc' : self.dirname+'dut_8_closed_vcrelax.xyz',
'u6' : self.dirname+'uio66.xyz',
'u7' : self.dirname+'uio67.xyz',
'u8' : self.dirname+'uio68.xyz',
'm5' : self.dirname+'mof5.xyz',
'ir' : self.dirname+'irmof10.xyz',
'm2' : self.dirname+'mof210.xyz',
'h1' : self.dirname+'hkust1.xyz',
'ho' : self.dirname+'hkust1_with_O.xyz',
'c6' : self.dirname+'c60_MOF.xyz',
'be' : self.dirname+'benzene.xyz',
'b2' : self.dirname+'benzene_exp.xyz',
'bc' : self.dirname+'benzene_Conly.xyz',
'ha' : self.dirname+'h_atom.xyz'}
if self.target[1] == '1':
osa(structs[self.target[0]])
try:
# Catch the screen output of the Fortran call
#
# magic to capture that output:
# from http://stackoverflow.com/questions/977840/redirecting-fortran-called-via-f2py-output-in-python
# http://websrv.cs.umt.edu/isis/index.php/F2py_example
output_file = 'pore.out'
if os.path.exists(output_file):
os.remove(output_file)
# open outputfile
outfile = os.open(output_file, os.O_RDWR|os.O_CREAT)
# save the current file descriptor
save = os.dup(1)
# put outfile on 1
os.dup2(outfile, 1)
# end magic
# Fortran call
osa(structs[self.target[0]])
# restore the standard output file descriptor
os.dup2(save, 1)
# close the output file
os.close(outfile)
f = open(output_file,'r')
output = f.read()
f.close()
except: output = 'You have not provided the path to pore.so!'
if self.target[1] == '2':
self.probe_r = self.e4.get()
self.grid_a = self.e5.get()
self.grid_b = self.e6.get()
self.grid_c = self.e7.get()
try:
# Catch the screen output of the Fortran call
#
# magic to capture that output:
# from http://stackoverflow.com/questions/977840/redirecting-fortran-called-via-f2py-output-in-python
# http://websrv.cs.umt.edu/isis/index.php/F2py_example
output_file = 'pore.out'
if os.path.exists(output_file):
os.remove(output_file)
# open outputfile
outfile = os.open(output_file, os.O_RDWR|os.O_CREAT)
# save the current file descriptor
save = os.dup(1)
# put outfile on 1
os.dup2(outfile, 1)
# end magic
# Fortran call
if self.exe_pore.get() == 'subgrid':
gpa_FullGrid(structs[self.target[0]],self.probe_r,self.grid_a,self.grid_b,self.grid_c)
# restore the standard output file descriptor
os.dup2(save, 1)
# close the output file
os.close(outfile)
f = open(output_file,'r')
output = f.read()
f.close()
except: output = 'You have not provided the path to pore.so!'
self.text.delete("1.0", "end")
self.text.insert(END,output)
self.text.pack()
self.text.update()
self.newwin.update()
self.newwin.deiconify()
def get_psd(self):
self.newwin.destroy()
self.init_newwin()
s = ''' PyPorE: pore size distribution (PSD) \n
\t structure \n
\t\t\t ud -- user defined; use "open" to select the structure \n
\t\t\t do -- predefined DUT-8(Ni) open, see others as well \n
\t Starting points -- number of starting points \n
\t Monte-Carlo cycles -- number of Monte-Carlo cycles \n
\t Authors: \n
\t\t\t <NAME> (Fortran, core routines) \n
\t\t\t <NAME> (Python, GUI)
'''
self.text.insert(END,s)
self.text.pack()
# Search input fields
# What property to search for: when, where or ...
Label(self.controls, text="Select structure").grid(row=1,column=1)
Label(self.controls, text="Starting points").grid(row=2,column=1)
Label(self.controls, text="Monte-Carlo cycles").grid(row=3,column=1)
Label(self.controls, text="e.g. do").grid(row=1,column=3)
Label(self.controls, text="e.g. 200").grid(row=2,column=3)
Label(self.controls, text="e.g. 2000").grid(row=3,column=3)
# Entry fields
e1 = StringVar(self.newwin)
self.e1 = e1
self.e1.set("ud") # default value
w1= OptionMenu(self.controls, self.e1, 'ud','do','vo','dc','vc','u6','u7','m5','ir','m2','h1','ho','c6','be','b2','bc','ha',command=self.refresh_psd)
self.w1 = w1
e2 = Entry(self.controls)
e2.insert(END,'200') # default value
e3 = Entry(self.controls)
e3.insert(END,'2000') # default value
self.e1 = e1
self.e2 = e2
self.e3 = e3
self.w1.grid(row=1, column=2)
self.e2.grid(row=2, column=2)
self.e3.grid(row=3, column=2)
# Action buttons
if self.e1.get() == 'ud':
b3 = Button(self.controls, text='Open', command=self.user_input)
b3.grid(row=4,column=1)
self.b3 =b3
b1 = Button(self.controls, text='OK', command=self.cmd_psd)
b1.grid(row=4,column=2)
self.b1 = b1
self.text.update()
#self.text.deiconify()
self.newwin.update()
self.newwin.deiconify()
def cmd_psd(self):
self.get_target()
# KT: get correct structure inputs
structs = {'ud' : 'pypore.xyz',
'do' : self.dirname+'dut_8_open.xyz',
'vo' : self.dirname+'dut_8_open_vcrelax.xyz',
'dc' : self.dirname+'dut_8_closed.xyz',
'vc' : self.dirname+'dut_8_closed_vcrelax.xyz',
'u6' : self.dirname+'uio66.xyz',
'u7' : self.dirname+'uio67.xyz',
'u8' : self.dirname+'uio68.xyz',
'm5' : self.dirname+'mof5.xyz',
'ir' : self.dirname+'irmof10.xyz',
'm2' : self.dirname+'mof210.xyz',
'h1' : self.dirname+'hkust1.xyz',
'ho' : self.dirname+'hkust1_with_O.xyz',
'c6' : self.dirname+'c60_MOF.xyz',
'be' : self.dirname+'benzene.xyz',
'b2' : self.dirname+'benzene_exp.xyz',
'bc' : self.dirname+'benzene_Conly.xyz',
'ha' : self.dirname+'h_atom.xyz'}
try:
# magic to capture that output:
# from http://stackoverflow.com/questions/977840/redirecting-fortran-called-via-f2py-output-in-python
# http://websrv.cs.umt.edu/isis/index.php/F2py_example
output_file = 'psd.out'
if os.path.exists(output_file):
os.remove(output_file)
# open outputfile
outfile = os.open(output_file, os.O_RDWR|os.O_CREAT)
# save the current file descriptor
save = os.dup(1)
# put outfile on 1
os.dup2(outfile, 1)
# end magic
# Fortran call
#pore(self.target[0],self.target[1],self.probe_r,self.grid_a,self.grid_b,self.grid_c)
get_PSD(structs[self.target[0]],self.e2.get(),self.e3.get())
#
# restore the standard output file descriptor
os.dup2(save, 1)
# close the output file
os.close(outfile)
f = open(output_file,'r')
output = f.read()
f.close()
except: output = 'You have not provided the path to get_psd.so!'
self.text.delete("1.0", "end")
self.text.insert(END,output)
self.text.pack()
#self.display.pack()
#self.text.grid(row=1, columnspan=4, rowspan=4,padx=5, sticky=E+W+S+N)
#self.display.grid(row=1, columnspan=4, rowspan=4,padx=5, sticky=E+W+S+N)
self.text.update()
self.newwin.update()
self.newwin.deiconify()
def init_newwin(self):
# The search check boxes
check_box_list = []
self.check_box_list = check_box_list
# Output/Result window
newwin = Toplevel(self.master)
area = Canvas(newwin)
controls = Frame(newwin)
area.pack(side="left", fill="both", expand=True)
controls.pack(side="right", fill="both", expand=False)
self.area = area
self.controls = controls
scroll = Scrollbar(newwin)
# Label of the new window
#display = Label(newwin, text='Info')
# scrollable text in new window
text = Text(self.area, height=40, width=120,yscrollcommand=scroll.set)
self.newwin = newwin
#self.display = display
self.text = text
self.newwin.withdraw()
self.b = None
def get_target(self):
# get structure and method
target = [self.e1.get(),self.e2.get()]
self.target = target
def ase2pore(self):
# convert ase structural information into pore input
struct = read(self.name)
pos = struct.get_positions()
sym = struct.get_chemical_symbols()
cell = struct.get_cell()[:]
self.cell = cell
f = open('pypore.xyz','w')
f.write('%i \n' %(len(pos)))
f.write('%0.9f %0.9f %0.9f %0.9f %0.9f %0.9f %0.9f %0.9f %0.9f \n' %(cell[0,0],cell[0,1],cell[0,2],cell[1,0],cell[1,1],cell[1,2],cell[2,0],cell[2,1],cell[2,2]))
for s in range(len(sym)):
f.write('%s %0.9f %0.9f %0.9f\n' %(sym[s],pos[s][0],pos[s][1],pos[s][2]))
f.close()
def user_input(self):
name= askopenfilename(initialdir=self.dirname,filetypes=[("xyz pypore files",".xyz"),("cif files",".cif"),("POSCAR","POSCAR")])
try:
datatype = name.split('.')[-1]
except: datatype = 'error'
# material structural formats using ase
if name =='POSCAR' or datatype =='cif':
self.name = name
self.ase2pore()
# KT: If xyz -> predefined porE xyz file -> write pypore file here
if datatype == 'xyz':
org_file = open(name,'r')
new_file = open('pypore.xyz','w')
lines_org = org_file.readlines()
org_file.close()
for s in range(len(lines_org)):
new_file.write(lines_org[s])
new_file.close()
def open_file(self):
# select a file name in the selected folder :
name= askopenfilename(initialdir=self.dirname)
try:
datatype = name.split('.')[-1]
except: datatype = 'error'
# text files
if datatype =='txt' or datatype =='dat' or datatype =='out' or name.find('README') != -1:
with open(name,'r') as UseFile:
s = UseFile.read()
self.text.delete("1.0", "end")
self.text.insert(END,s)
self.text.pack()
#self.display.pack()
#self.text.grid(column=0,row=0)
#self.columnconfigure(1, weight=1)
#self.columnconfigure(3, pad=7)
#self.rowconfigure(3, weight=1)
#self.rowconfigure(5, pad=7)
#self.text.grid(row=0, columnspan=2, rowspan=4,padx=5, sticky=E+W+S+N)
#self.display.grid(row=0, columnspan=2, rowspan=4,padx=5, sticky=E+W+S+N)
self.text.update()
self.newwin.update()
self.newwin.deiconify()
# material structural formats using ase
if datatype =='xyz' or datatype =='cif':
struct = read(name)
view(struct)
def go(self):
# find and structure and watch with ase
# e.g. analyze folder and viewing txt files
t = self.allstates()
self.t = t
self.init_newwin()
self.text.delete("1.0", "end")
if self.b != None:
self.b.pack_forget()
self.b = Button(self.newwin,text='File Open', command=self.open_file)
self.b.pack(fill=X)
for i in range(len(t)):
if self.t[i] == 1:
os.chdir(self.r[self.keys[i]]['where'])
ls = os.listdir('./')
ls_str = '\n'.join(map(str, ls))
self.dirname = self.r[self.keys[i]]['where']
self.newwin.update()
self.newwin.deiconify()
if __name__ == '__main__':
pe = PyPore()
| [
"os.listdir",
"os.open",
"os.remove",
"os.dup2",
"ase.visualize.view",
"os.dup",
"os.path.exists",
"numpy.zeros",
"tkinter.filedialog.askopenfilename",
"tkinter.ttk.Frame",
"os.close",
"ase.io.read",
"os.chdir",
"numpy.sqrt"
] | [((12380, 12396), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (12388, 12396), True, 'import numpy as np\n'), ((13644, 13660), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (13652, 13660), True, 'import numpy as np\n'), ((23857, 23870), 'tkinter.ttk.Frame', 'Frame', (['newwin'], {}), '(newwin)\n', (23862, 23870), False, 'from tkinter.ttk import Frame\n'), ((24668, 24683), 'ase.io.read', 'read', (['self.name'], {}), '(self.name)\n', (24672, 24683), False, 'from ase.io import read\n'), ((25253, 25384), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {'initialdir': 'self.dirname', 'filetypes': "[('xyz pypore files', '.xyz'), ('cif files', '.cif'), ('POSCAR', 'POSCAR')]"}), "(initialdir=self.dirname, filetypes=[('xyz pypore files',\n '.xyz'), ('cif files', '.cif'), ('POSCAR', 'POSCAR')])\n", (25268, 25384), False, 'from tkinter.filedialog import askopenfilename\n'), ((26097, 26137), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {'initialdir': 'self.dirname'}), '(initialdir=self.dirname)\n', (26112, 26137), False, 'from tkinter.filedialog import askopenfilename\n'), ((22349, 22376), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (22363, 22376), False, 'import os\n'), ((22469, 22513), 'os.open', 'os.open', (['output_file', '(os.O_RDWR | os.O_CREAT)'], {}), '(output_file, os.O_RDWR | os.O_CREAT)\n', (22476, 22513), False, 'import os\n'), ((22578, 22587), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (22584, 22587), False, 'import os\n'), ((22631, 22650), 'os.dup2', 'os.dup2', (['outfile', '(1)'], {}), '(outfile, 1)\n', (22638, 22650), False, 'import os\n'), ((22959, 22975), 'os.dup2', 'os.dup2', (['save', '(1)'], {}), '(save, 1)\n', (22966, 22975), False, 'import os\n'), ((23024, 23041), 'os.close', 'os.close', (['outfile'], {}), '(outfile)\n', (23032, 23041), False, 'import os\n'), ((27233, 27243), 'ase.io.read', 'read', (['name'], {}), '(name)\n', (27237, 27243), False, 'from ase.io import read\n'), ((27256, 27268), 'ase.visualize.view', 'view', (['struct'], {}), '(struct)\n', (27260, 27268), False, 'from ase.visualize import view\n'), ((12817, 12892), 'numpy.sqrt', 'np.sqrt', (['(self.cell[0, 0] ** 2 + self.cell[0, 1] ** 2 + self.cell[0, 2] ** 2)'], {}), '(self.cell[0, 0] ** 2 + self.cell[0, 1] ** 2 + self.cell[0, 2] ** 2)\n', (12824, 12892), True, 'import numpy as np\n'), ((12918, 12993), 'numpy.sqrt', 'np.sqrt', (['(self.cell[1, 0] ** 2 + self.cell[1, 1] ** 2 + self.cell[1, 2] ** 2)'], {}), '(self.cell[1, 0] ** 2 + self.cell[1, 1] ** 2 + self.cell[1, 2] ** 2)\n', (12925, 12993), True, 'import numpy as np\n'), ((13011, 13086), 'numpy.sqrt', 'np.sqrt', (['(self.cell[2, 0] ** 2 + self.cell[2, 1] ** 2 + self.cell[2, 2] ** 2)'], {}), '(self.cell[2, 0] ** 2 + self.cell[2, 1] ** 2 + self.cell[2, 2] ** 2)\n', (13018, 13086), True, 'import numpy as np\n'), ((14063, 14138), 'numpy.sqrt', 'np.sqrt', (['(self.cell[0, 0] ** 2 + self.cell[0, 1] ** 2 + self.cell[0, 2] ** 2)'], {}), '(self.cell[0, 0] ** 2 + self.cell[0, 1] ** 2 + self.cell[0, 2] ** 2)\n', (14070, 14138), True, 'import numpy as np\n'), ((14173, 14248), 'numpy.sqrt', 'np.sqrt', (['(self.cell[1, 0] ** 2 + self.cell[1, 1] ** 2 + self.cell[1, 2] ** 2)'], {}), '(self.cell[1, 0] ** 2 + self.cell[1, 1] ** 2 + self.cell[1, 2] ** 2)\n', (14180, 14248), True, 'import numpy as np\n'), ((14282, 14357), 'numpy.sqrt', 'np.sqrt', (['(self.cell[2, 0] ** 2 + self.cell[2, 1] ** 2 + self.cell[2, 2] ** 2)'], {}), '(self.cell[2, 0] ** 2 + self.cell[2, 1] ** 2 + self.cell[2, 2] ** 2)\n', (14289, 14357), True, 'import numpy as np\n'), ((16186, 16213), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (16200, 16213), False, 'import os\n'), ((16318, 16362), 'os.open', 'os.open', (['output_file', '(os.O_RDWR | os.O_CREAT)'], {}), '(output_file, os.O_RDWR | os.O_CREAT)\n', (16325, 16362), False, 'import os\n'), ((16435, 16444), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (16441, 16444), False, 'import os\n'), ((16496, 16515), 'os.dup2', 'os.dup2', (['outfile', '(1)'], {}), '(outfile, 1)\n', (16503, 16515), False, 'import os\n'), ((16699, 16715), 'os.dup2', 'os.dup2', (['save', '(1)'], {}), '(save, 1)\n', (16706, 16715), False, 'import os\n'), ((16772, 16789), 'os.close', 'os.close', (['outfile'], {}), '(outfile)\n', (16780, 16789), False, 'import os\n'), ((17569, 17596), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (17583, 17596), False, 'import os\n'), ((17701, 17745), 'os.open', 'os.open', (['output_file', '(os.O_RDWR | os.O_CREAT)'], {}), '(output_file, os.O_RDWR | os.O_CREAT)\n', (17708, 17745), False, 'import os\n'), ((17818, 17827), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (17824, 17827), False, 'import os\n'), ((17879, 17898), 'os.dup2', 'os.dup2', (['outfile', '(1)'], {}), '(outfile, 1)\n', (17886, 17898), False, 'import os\n'), ((18197, 18213), 'os.dup2', 'os.dup2', (['save', '(1)'], {}), '(save, 1)\n', (18204, 18213), False, 'import os\n'), ((18270, 18287), 'os.close', 'os.close', (['outfile'], {}), '(outfile)\n', (18278, 18287), False, 'import os\n'), ((22394, 22416), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (22403, 22416), False, 'import os\n'), ((27753, 27792), 'os.chdir', 'os.chdir', (["self.r[self.keys[i]]['where']"], {}), "(self.r[self.keys[i]]['where'])\n", (27761, 27792), False, 'import os\n'), ((27814, 27830), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (27824, 27830), False, 'import os\n'), ((16235, 16257), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (16244, 16257), False, 'import os\n'), ((17618, 17640), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (17627, 17640), False, 'import os\n')] |
import sys
from PIL import Image
import numpy as np
import argparse
import datetime
def create_namespace():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-i', '--image', default=None, metavar='',
help='set image to transform into a mosaic in grayscale')
arg_parser.add_argument('-r', '--result', default='res.jpg', metavar='',
help='set name with which the result will be saved')
return arg_parser.parse_args(sys.argv[1:])
def open_image(image_name):
while True:
try:
img = Image.open(image_name)
return np.array(img)
except Exception as e:
register_an_error(e)
print('Incorrect input.')
print('Enter the image name in the current directory '
'or specify the full path to your file. Write "exit" if you want to exit.')
image_name = input()
if image_name == 'exit':
exit()
def register_an_error(error):
try:
with open('./log.txt', 'a') as file:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
file.write("[{}] - {}\n".format(now, error))
except Exception as e:
with open('./log.txt', 'w+') as file:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
file.write("[{}] - {}\n".format(now, e))
file.write("[{}] - {}\n".format(now, error))
def set_grayscale():
while True:
try:
return int(input('Set grayscale.\n'))
except Exception as e:
register_an_error(e)
print('Incorrect input. Please enter grayscale in correct format.')
def set_mosaic_dimensions():
while True:
try:
height = int(input('Set the mosaic height.\n'))
break
except Exception as e:
register_an_error(e)
print('Incorrect input. Please enter mosaic height in correct format.')
while True:
try:
width = int(input('Set the mosaic width.\n'))
break
except Exception as e:
register_an_error(e)
print('Incorrect input. Please enter mosaic width in correct format.')
return height, width
def replace_with_gray(dimensions, array, step):
height = dimensions[0]
width = dimensions[1]
for x in range(0, len(array), height):
for y in range(0, len(array[1]), width):
# find out the average brightness
average_brightness = np.sum(array[x: x + height, y: y + width]) // (height * width * 3)
# bring the color of average brightness to the step in increments of 50
color = int(average_brightness // step) * step
# paint the cell into mosaics in the resulting color
array[x: x + height, y: y + width] = np.full(3, color)
return Image.fromarray(array)
def save_image(result_name, array):
while True:
try:
return array.save(result_name)
except Exception as e:
register_an_error(e)
print('Enter a different name for the output image or specify the correct file format.')
result_name = input()
if __name__ == '__main__':
namespace = create_namespace()
arr = open_image(namespace.image)
image = replace_with_gray(set_mosaic_dimensions(), arr, set_grayscale())
save_image(namespace.result, image)
| [
"numpy.full",
"numpy.sum",
"argparse.ArgumentParser",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"datetime.datetime.now"
] | [((135, 160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (158, 160), False, 'import argparse\n'), ((2991, 3013), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (3006, 3013), False, 'from PIL import Image\n'), ((611, 633), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (621, 633), False, 'from PIL import Image\n'), ((654, 667), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (662, 667), True, 'import numpy as np\n'), ((2961, 2978), 'numpy.full', 'np.full', (['(3)', 'color'], {}), '(3, color)\n', (2968, 2978), True, 'import numpy as np\n'), ((2633, 2673), 'numpy.sum', 'np.sum', (['array[x:x + height, y:y + width]'], {}), '(array[x:x + height, y:y + width])\n', (2639, 2673), True, 'import numpy as np\n'), ((1142, 1165), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1163, 1165), False, 'import datetime\n'), ((1348, 1371), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1369, 1371), False, 'import datetime\n')] |
# def test(a):
# return a
import numpy as np
import textblob
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
def test(input_):
nltk.data.path.append("/data/data/cau.injiyong.slight/files/nltk_data")
B_INCR = 0.293
B_DECR = -0.293
NEGATE = \
["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite", "n't", "no"]
BOOSTER_DICT = \
{"absolutely": B_INCR, "amazingly": B_INCR, "awfully": B_INCR,
"completely": B_INCR, "considerable": B_INCR, "considerably": B_INCR,
"decidedly": B_INCR, "deeply": B_INCR, "effing": B_INCR, "enormous": B_INCR, "enormously": B_INCR,
"entirely": B_INCR, "especially": B_INCR, "exceptional": B_INCR, "exceptionally": B_INCR,
"extreme": B_INCR, "extremely": B_INCR,
"fabulously": B_INCR, "flipping": B_INCR, "flippin": B_INCR, "frackin": B_INCR, "fracking": B_INCR,
"fricking": B_INCR, "frickin": B_INCR, "frigging": B_INCR, "friggin": B_INCR, "fully": B_INCR,
"fuckin": B_INCR, "fucking": B_INCR, "fuggin": B_INCR, "fugging": B_INCR,
"greatly": B_INCR, "hella": B_INCR, "highly": B_INCR, "hugely": B_INCR,
"incredible": B_INCR, "incredibly": B_INCR, "intensely": B_INCR,
"major": B_INCR, "majorly": B_INCR, "more": B_INCR, "most": B_INCR, "particularly": B_INCR,
"purely": B_INCR, "quite": B_INCR, "really": B_INCR, "remarkably": B_INCR,
"so": B_INCR, "substantially": B_INCR,
"thoroughly": B_INCR, "total": B_INCR, "totally": B_INCR, "tremendous": B_INCR, "tremendously": B_INCR,
"uber": B_INCR, "unbelievably": B_INCR, "unusually": B_INCR, "utter": B_INCR, "utterly": B_INCR,
"very": B_INCR,
"almost": B_DECR, "barely": B_DECR, "hardly": B_DECR, "just enough": B_DECR,
"kind of": B_DECR, "kinda": B_DECR, "kindof": B_DECR, "kind-of": B_DECR,
"less": B_DECR, "little": B_DECR, "marginal": B_DECR, "marginally": B_DECR,
"occasional": B_DECR, "occasionally": B_DECR, "partly": B_DECR,
"scarce": B_DECR, "scarcely": B_DECR, "slight": B_DECR, "slightly": B_DECR, "somewhat": B_DECR,
"sort of": B_DECR, "sorta": B_DECR, "sortof": B_DECR, "sort-of": B_DECR}
JOY = \
['cute', 'smile', 'good', 'nice', 'cheerful', 'commanding', 'loving', 'hilarious', 'flutter', 'fun','funny', 'amusement', 'interest','interesting','interested', 'excite', 'exciting','excited', 'pleasant','pleasantly',
'pleasure', 'enjoyable','enjoy', 'joyful','joy', 'cheer','cheery','cheerful', 'happy', 'happily','merry', 'delightful','delight',
'exhilarating','exhilarate', 'boon','exhilarated', 'simpatico', 'mirthful', 'riant','joy', 'joyous', 'rollicking','rollick','wonderful',
'glad','gladly', 'desire','desired', 'desirable','wish','wishes','aspire', 'aspiration', 'avid', 'relieve','relieving','relieved',
'unburdened', 'unburden' ,'gratifying','gratify', 'gratified', 'satisfy','satisfaction','satisfied','satisfying', 'dramatic', 'moving',
'touching', 'overwhelming','overwhelmed','overwhelm' ,'wonder', 'awe', 'marvel','marvelous', 'impressive',
'impressed','impressing','impress' ,'ecstasy','ecstatic', 'rapture','raptured', 'blissful','bliss', 'entrancing','entranced', 'charming','charm',
'nympholepsy', 'jolly', 'rosy', 'hopeful','hope','hopes','hopefully', 'bright','brightly', 'wishful', 'content', 'heartwarming', 'sufficient',
'ample', 'enough', 'pride', 'proud','achieve', 'achievement','accomplish', 'accomplishment', 'fulfillment','fulfill', 'worthwhile', 'fruitful',
'rewarding', 'boast','boastful', 'honor','honored' ,'glory','glorious', 'kudos', 'honour','honoured', 'priviledge', 'triumph', 'jubilance',
'arouse','aroused','lucky', 'luck', 'fortune','fortunate', 'relief', 'comfortable','comfort','relax','relaxing', 'relaxed', 'easy', 'comfy',
'peaceful','peace', 'calm', 'restful','rest', 'quiet', 'informal', 'homey', 'love','loved', 'cherish', 'beloved', 'lurve', 'romance', 'caritas',
'thankful','thanks','thank', 'grateful', 'obliged', 'thankworthy', 'welcome','welcoming','welcomes', 'inspiring', 'inspire', 'inspired','flutter']
SADNESS = \
['alone', 'coldhearted', 'unkind','bereft', 'bereavement', 'apathetic', 'apathy', 'crying', 'cry', 'cries', 'cried', 'disastrous', 'moldy', 'futility', 'vain','vanity', 'futlie', 'nihilism', 'fugacious', 'idle', 'dejected','deject','dejects','dejecting', 'dispirited','dispirit',
'dispiriting','dispirits' 'despondent','despond','desponding','despondency','hollowed', 'hollow',
'resign','resignation','resigning','resigned', 'empty', 'boredom', 'bored' ,'boring', 'tiresome', 'wearisome', 'irksome', 'longwinded', 'dull', 'tedious',
'monotonous', 'lengthy', 'stodgy',
'regret','regrets', 'repent','repents','repenting', 'rue','rues','rueing', 'remorse', 'lonely', 'solitary','solitarily', 'lonesome', 'melancholy',
'forlorn', 'gloomy', 'desolate','desolating','desolated',
'reclusive','reclusively', 'moody', 'desert','deserted', 'alienation','alienate','alienated','alienating', 'isolate', 'isolating', 'isolated',
'depressed','depress','depressing','losses', 'loss','deject','dejected', 'dejection', 'gessepany', 'sad', 'sadly','sadness','plaintive',
'disconsolate', 'grief','grieve','grieving','grieved', 'plead','plea', 'sob', 'morn', 'doleful', 'unhappy', 'hurts','hurt', 'unfair','unfairness',
'cruelty', 'cruel', 'unfeeling', 'heartless', 'harsh', 'coldly', 'bitter','bitterness', 'upset']
ANGER = \
['frown', 'frowning','annoy','annoying', 'annoyed', 'calamity', 'angry','anger', 'fucking', 'dratted','drat', 'offenseful','offense','hate', 'hateful', 'detest', 'detestable', 'naughty', 'dislike', 'hostility',
'antaginism', 'animosity','hatred',
'antipathy', 'disapproval','disapprove', 'animus', 'enmity', 'rage', 'fury', 'resentment','resent', 'indignation', 'wrath', 'enrage', 'ire',
'bile', 'mortify', 'mortified', 'mortifying', 'affront', 'affronting', 'affronted', 'dodgasted', 'outraged', 'outrage', 'exasperation','exasperate',
'exasperating', 'exasperated', 'displease','displeasing', 'displeased', 'miffy', 'pained', 'irritate','irritates', 'irritating',
'raspingly', 'betray', 'treachery', 'vex', 'vexation', 'grudge','reproach', 'reproachful', 'dissatisfaction', 'discontent', 'complaint',
'oppression','oppress', 'oppressed', 'abaissement', 'mortification', 'disappoint','disappointed','disappointment','disappointing', 'envy', 'jealous',
'aggravate','aggravating', 'aggravated', 'peeve','peeved',
'nasty', 'saucy', 'cheeky', 'pert', 'spiteful', 'impudent', 'mad']
FEAR = \
['strange', 'retaliation', 'threaten','threatening','threat', 'menacing', 'menace', 'thrilled','thrill','thrilling', 'bloodcurdling','eerie','uncanny', 'agony', 'agonize', 'breakup', 'frightening','frightened','frighten','fright', 'terrifying','terrify','terrified', 'horrify','horror', 'horrifying', 'ghastly', 'gruesome', 'macabre',
'eldritch', 'unearthly', 'gooseflesh', 'hideous', 'terrible', 'grisly', 'creepy','fear', 'fearful','feared', 'afraid','scare', 'scared',
'dreaded','dread', 'bogey', 'horrific', 'shock','shocked', 'stun', 'astonished','astonish','astonished','astonishing', 'impatience', 'astounded',
'astound','astounding', 'startled','startle', 'anxious','anxiousness', 'apprehension', 'uneasiness','uneasy', 'nervous', 'edgy', 'impatient',
'jittery', 'clutched', 'fretted', 'scary','painful']
DISGUST = \
['stinc','stinky', 'stinken', 'vomit', 'venomous', 'nausea','abominate', 'abomination', 'loath', 'abhorrence','abhorre', 'revulsion', 'aversion', 'repugnance', 'disrelish', 'contempt',
'scorn', 'despise', 'contemn', 'disgusting','disgust', 'nauseating', 'yucky', 'sickening', 'repellent', 'repulsive', 'disillusion','disillusionment',
'unpleasant', 'discomfort', 'unpleasure', 'disamenity', 'umbrage', 'queerness']
RESTLESS = \
['ill', 'ache', 'diseased', 'disease', 'sick', 'weary', 'depleted', 'tired','sleepy', 'helpless', 'restless', 'pathetic','pathetically', 'wailful', 'ardent', 'homesick', 'miss','misses','missed', 'yearn','yearning','yearns', 'longing', 'sympathy','sympathetic',
'sympathize', 'pity','pitiable', 'compassion','compassionate', 'miserable',
'lacerant', 'woeful', 'poor', 'wretch', 'commiserable', 'sorry', 'worry','worries','worried', 'concern', 'care','cares', 'enbarrassed','enbarrass',
'enbarrassing', 'disconcerted','disconcert','disconcerting',
'confuse','confused','confusing','confusement', 'puzzling', 'puzzled', 'perplex' , 'perplexing', 'perplexed', 'dilemma', 'baffled', 'absurd',
'ridiculous', 'nonsensial', 'preposterous', 'sublime',
'shame', 'ashamed', 'shameful','shame', 'disgrace', 'disgraceful', 'reprehensible', 'inglorious', 'bashful', 'shy', 'wusted', 'cringeworthy', 'unmentionable',
'humiliated','humiliate','humiliation', 'humiliating', 'ignominious', 'opprobrious', 'guilt','guilty', 'compunction', 'disturbed','disturbing','disturbes',
'complicated', 'intricacy', 'involution', 'stuffy', 'stifling', 'stifle', 'stifles', 'suffocates', 'suffocate', 'suffocated', 'suffocating','disheartened',
'airless', 'poky', 'afflicting','affliction','afflict', 'afflicts', 'dismal', 'direful', 'crushing', 'despair', 'hopelessness','hopeless','frustrate',
'frustration','frustrating','frustrated', 'discourageed', 'reversal', 'backset', 'flameout', 'unhappiness', 'misfortune', 'unfortunate', 'unlucky',
'distressed', 'stern', 'urgent', 'desperate', 'stringent','stringently', 'clamant','clamantly', 'impend','impending', 'impendly']
try:
JoySum = 0
SadnessSum = 0
AngerSum = 0
FearSum = 0
DisgustSum = 0
RestlessSum = 0
ko_blob = textblob.TextBlob(input_ + " (s)")
if ko_blob.detect_language() == 'en':
input_text = ko_blob
else:
input_text = ko_blob.translate(from_lang='ko', to='en')
input_text = str(input_text.lower())
input_strings = nltk.tokenize.sent_tokenize(input_text)
vector = CountVectorizer()
countVec = vector.fit_transform(input_strings).toarray()
input_words = []
for i in range(len(countVec)):
tokenizer=nltk.tokenize.TreebankWordTokenizer()
input_words.append(tokenizer.tokenize(input_strings[i]))
countVec = np.asfarray(countVec)
neg_words = []
neg_words.extend(NEGATE)
for i in range(len(countVec)):
for k, word in enumerate(input_words[i]):
if (word in neg_words) and (word in vector.vocabulary_):
if (countVec[i, vector.vocabulary_[word]] > 0):
if (k + 2 < len(input_words[i])):
word1 = input_words[i][k + 1]
word2 = input_words[i][k + 2]
if word1 in vector.vocabulary_:
countVec[i, vector.vocabulary_[word1]] -= 0.59
if word2 in vector.vocabulary_:
if word1 in neg_words:
pass
else:
countVec[i, vector.vocabulary_[word2]] -= 0.59
elif (k + 1 < len(input_words[i])):
word1 = input_words[i][k + 1]
if word1 in vector.vocabulary_:
countVec[i, vector.vocabulary_[word1]] -= 0.59
else:
pass
scalar = 0.0
if word in BOOSTER_DICT:
scalar = BOOSTER_DICT[word]
if scalar != 0 and (word in vector.vocabulary_):
if (k + 1 < len(input_words[i])):
word1 = input_words[i][k + 1]
if word1 in vector.vocabulary_:
countVec[i, vector.vocabulary_[word1]] += scalar
countVec = np.maximum(0, countVec)
delta = 10**(-9)
resVec = np.maximum(0, 1 + np.log(countVec + delta))
lambda_ = 0.05
tfidfv = TfidfVectorizer().fit(input_strings)
resVec = resVec + lambda_ * (1 - tfidfv.transform(input_strings).toarray())
sumVec = np.sum(resVec, axis=0)
for k in vector.vocabulary_:
rk = textblob.Word(k).lemmatize('v')
if (rk in JOY) == True:
JoySum += sumVec[vector.vocabulary_[k]]
if (rk in SADNESS) == True:
SadnessSum += sumVec[vector.vocabulary_[k]]
if (rk in ANGER) == True:
AngerSum += sumVec[vector.vocabulary_[k]]
if (rk in FEAR) == True:
FearSum += sumVec[vector.vocabulary_[k]]
if (rk in DISGUST) == True:
DisgustSum += sumVec[vector.vocabulary_[k]]
if (rk in RESTLESS) == True:
RestlessSum += sumVec[vector.vocabulary_[k]]
resArr = [JoySum, SadnessSum, AngerSum, FearSum, DisgustSum, RestlessSum]
res = resArr.index(max(resArr))
res = str(res)
return res
except Exception as ex:
return "6" | [
"sklearn.feature_extraction.text.CountVectorizer",
"nltk.tokenize.TreebankWordTokenizer",
"numpy.maximum",
"numpy.sum",
"numpy.log",
"sklearn.feature_extraction.text.TfidfVectorizer",
"textblob.Word",
"numpy.asfarray",
"nltk.data.path.append",
"nltk.tokenize.sent_tokenize",
"textblob.TextBlob"
] | [((224, 295), 'nltk.data.path.append', 'nltk.data.path.append', (['"""/data/data/cau.injiyong.slight/files/nltk_data"""'], {}), "('/data/data/cau.injiyong.slight/files/nltk_data')\n", (245, 295), False, 'import nltk\n'), ((10718, 10752), 'textblob.TextBlob', 'textblob.TextBlob', (["(input_ + ' (s)')"], {}), "(input_ + ' (s)')\n", (10735, 10752), False, 'import textblob\n'), ((10985, 11024), 'nltk.tokenize.sent_tokenize', 'nltk.tokenize.sent_tokenize', (['input_text'], {}), '(input_text)\n', (11012, 11024), False, 'import nltk\n'), ((11043, 11060), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (11058, 11060), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((11340, 11361), 'numpy.asfarray', 'np.asfarray', (['countVec'], {}), '(countVec)\n', (11351, 11361), True, 'import numpy as np\n'), ((13012, 13035), 'numpy.maximum', 'np.maximum', (['(0)', 'countVec'], {}), '(0, countVec)\n', (13022, 13035), True, 'import numpy as np\n'), ((13303, 13325), 'numpy.sum', 'np.sum', (['resVec'], {'axis': '(0)'}), '(resVec, axis=0)\n', (13309, 13325), True, 'import numpy as np\n'), ((11213, 11250), 'nltk.tokenize.TreebankWordTokenizer', 'nltk.tokenize.TreebankWordTokenizer', ([], {}), '()\n', (11248, 11250), False, 'import nltk\n'), ((13097, 13121), 'numpy.log', 'np.log', (['(countVec + delta)'], {}), '(countVec + delta)\n', (13103, 13121), True, 'import numpy as np\n'), ((13164, 13181), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (13179, 13181), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((13381, 13397), 'textblob.Word', 'textblob.Word', (['k'], {}), '(k)\n', (13394, 13397), False, 'import textblob\n')] |
"""
@author sanjeethr, oligoglot
Thanks to <NAME> for this step by step guide: https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys, os
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import Sequential
from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from libindic.soundex import Soundex
from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'indic_nlp_library'))
from indicnlp.normalize.indic_normalize import BaseNormalizer
try:
from indictrans import Transliterator
except ImportError:
print('Please install indic-trans from git: https://github.com/libindic/indic-trans')
ta_trans = Transliterator(source='eng', target='tam', build_lookup=True)
ml_trans = Transliterator(source='eng', target='mal', build_lookup=True)
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each review.
MAX_SEQUENCE_LENGTH = 150
# This is fixed.
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
soundexer = Soundex()
def load_language_maps(mapfile):
lmap = {}
with open(mapfile, 'r') as mapf:
for line in mapf:
text, lang, conf = line.rstrip().split('\t')
lmap[text] = (lang, float(conf))
return lmap
def get_language_tag(text):
return lmap.get(text, ('unknown', 0.0))
def append_language_tag(text):
p_lang, conf = get_language_tag(text)
if p_lang == lang or p_lang == (lang + 'en'):
# google agrees with some confidence
agreement = 1
elif conf < 0.5:
# google says not-tamil, but weakly
agreement = 0.5
else:
# google clearly says not-tamil
agreement = 0
return ' '.join((' ', text, p_lang, lang, str(agreement), ' '))
def append_emoji_sentiment(text):
emojis, sentiment = get_emojis_from_text(text)
return ' '.join((' ', text, str(emojis), sentiment, ' '))
def append_soundex(text):
if lang == 'ta':
text = ta_trans.transform(text)
if lang == 'ml':
text = ml_trans.transform(text)
soundexes = [soundexer.soundex(word) for word in text.split()]
return ' ' + text + ' ' + ' '.join(soundexes) + ' '
def append_doc_len_range(text):
return ' ' + get_doc_len_range(text) + ' '
def load_data(df, mode, lb = None):
df.info()
df = df.reset_index(drop=True)
df['text'] = df['text'].apply(append_emoji_sentiment)
df['text'] = df['text'].apply(append_language_tag)
df['text'] = df['text'].apply(append_soundex)
df['text'] = df['text'].apply(append_doc_len_range)
tokenizer.fit_on_texts([normalizer.normalize (text) for text in df.text.values])
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = tokenizer.texts_to_sequences(df.text.values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X.shape)
if mode == 'pred':
Y = df.id.values
else:
print(df.category.value_counts())
if lb is None:
lb = LabelBinarizer()
Y = lb.fit_transform(df.category.values.reshape(-1, 1))
else:
Y = lb.transform(df.category.values.reshape(-1, 1))
print('Shape of label tensor:', Y.shape)
return (X, Y, lb)
lang, train_file, test_file, predict_file, outfile = sys.argv[1:6]
normalizer = BaseNormalizer(lang)
lmap = load_language_maps('../../resources/data/alltextslang.txt')
#train_file = '../../resources/data/tamil_train.tsv'
train_df = pd.read_csv(train_file, sep='\t')
X_train, Y_train, lb = load_data(train_df, 'train')
#test_file = '../../resources/data/tamil_dev.tsv'
test_df = pd.read_csv(test_file, sep='\t')
X_test, Y_test, lb = load_data(test_df, 'test', lb)
# X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
if lang == 'ta':
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.8))
model.add(LSTM(100, dropout=0.7, recurrent_dropout=0.5))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy'])
epochs = 15
batch_size = 64
if lang == 'ml':
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.5))
#model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3, return_sequences=True))
model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy'])
epochs = 10
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
# accr = model.evaluate(X_test,Y_test)
# print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
Y_test_idx = np.argmax(Y_test, axis=1) # Convert one-hot to index
Y_pred = model.predict_classes(X_test)
print(classification_report(Y_test_idx, Y_pred))
new_review = ['Thalaiva superstar Rajinikanth number one mass Hero']
seq = tokenizer.texts_to_sequences(new_review)
padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
pred = model.predict(padded)
print(pred, lb.inverse_transform(pred))
with open(outfile, 'w') as outf:
test_df = pd.read_csv(predict_file, sep='\t')
X_pred, ID_pred, lb = load_data(test_df, 'pred', lb)
Y_pred = lb.inverse_transform(model.predict(X_pred)).flatten()
outf.write('id\ttext\tlabel\n')
for idx, text, pred_category in zip(ID_pred, test_df.text.values, Y_pred):
#print(idx, text, pred_category)
outf.write('\t'.join((idx, text, pred_category)) + '\n') | [
"sklearn.preprocessing.LabelBinarizer",
"numpy.argmax",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"keras.Sequential",
"sklearn.metrics.classification_report",
"indicnlp.normalize.indic_normalize.BaseNormalizer",
"lib.feature_utils.get_doc_len_range",
"os.path.dirname",
"kera... | [((1125, 1186), 'indictrans.Transliterator', 'Transliterator', ([], {'source': '"""eng"""', 'target': '"""tam"""', 'build_lookup': '(True)'}), "(source='eng', target='tam', build_lookup=True)\n", (1139, 1186), False, 'from indictrans import Transliterator\n'), ((1198, 1259), 'indictrans.Transliterator', 'Transliterator', ([], {'source': '"""eng"""', 'target': '"""mal"""', 'build_lookup': '(True)'}), "(source='eng', target='mal', build_lookup=True)\n", (1212, 1259), False, 'from indictrans import Transliterator\n'), ((1452, 1546), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'MAX_NB_WORDS', 'filters': '"""!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~"""', 'lower': '(True)'}), '(num_words=MAX_NB_WORDS, filters=\n \'!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\', lower=True)\n', (1461, 1546), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1553, 1562), 'libindic.soundex.Soundex', 'Soundex', ([], {}), '()\n', (1560, 1562), False, 'from libindic.soundex import Soundex\n'), ((3934, 3954), 'indicnlp.normalize.indic_normalize.BaseNormalizer', 'BaseNormalizer', (['lang'], {}), '(lang)\n', (3948, 3954), False, 'from indicnlp.normalize.indic_normalize import BaseNormalizer\n'), ((4086, 4119), 'pandas.read_csv', 'pd.read_csv', (['train_file'], {'sep': '"""\t"""'}), "(train_file, sep='\\t')\n", (4097, 4119), True, 'import pandas as pd\n'), ((4232, 4264), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {'sep': '"""\t"""'}), "(test_file, sep='\\t')\n", (4243, 4264), True, 'import pandas as pd\n'), ((5714, 5739), 'numpy.argmax', 'np.argmax', (['Y_test'], {'axis': '(1)'}), '(Y_test, axis=1)\n', (5723, 5739), True, 'import numpy as np\n'), ((5982, 6028), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['seq'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(seq, maxlen=MAX_SEQUENCE_LENGTH)\n', (5995, 6028), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2387, 2413), 'lib.feature_utils.get_emojis_from_text', 'get_emojis_from_text', (['text'], {}), '(text)\n', (2407, 2413), False, 'from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range\n'), ((3381, 3425), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(X, maxlen=MAX_SEQUENCE_LENGTH)\n', (3394, 3425), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4512, 4524), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (4522, 4524), False, 'from keras import Sequential\n'), ((4930, 4942), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (4940, 4942), False, 'from keras import Sequential\n'), ((5812, 5853), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test_idx', 'Y_pred'], {}), '(Y_test_idx, Y_pred)\n', (5833, 5853), False, 'from sklearn.metrics import classification_report\n'), ((6146, 6181), 'pandas.read_csv', 'pd.read_csv', (['predict_file'], {'sep': '"""\t"""'}), "(predict_file, sep='\\t')\n", (6157, 6181), True, 'import pandas as pd\n'), ((833, 861), 'os.path.dirname', 'os.path.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (848, 861), False, 'import sys, os\n'), ((4539, 4608), 'keras.layers.Embedding', 'Embedding', (['MAX_NB_WORDS', 'EMBEDDING_DIM'], {'input_length': 'X_train.shape[1]'}), '(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1])\n', (4548, 4608), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((4624, 4645), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.8)'], {}), '(0.8)\n', (4640, 4645), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((4661, 4706), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'dropout': '(0.7)', 'recurrent_dropout': '(0.5)'}), '(100, dropout=0.7, recurrent_dropout=0.5)\n', (4665, 4706), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((4722, 4752), 'keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""softmax"""'}), "(5, activation='softmax')\n", (4727, 4752), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((4957, 5026), 'keras.layers.Embedding', 'Embedding', (['MAX_NB_WORDS', 'EMBEDDING_DIM'], {'input_length': 'X_train.shape[1]'}), '(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1])\n', (4966, 5026), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((5042, 5063), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.5)'], {}), '(0.5)\n', (5058, 5063), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((5164, 5209), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'dropout': '(0.3)', 'recurrent_dropout': '(0.3)'}), '(100, dropout=0.3, recurrent_dropout=0.3)\n', (5168, 5209), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((5225, 5255), 'keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""softmax"""'}), "(5, activation='softmax')\n", (5230, 5255), False, 'from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense\n'), ((2806, 2829), 'lib.feature_utils.get_doc_len_range', 'get_doc_len_range', (['text'], {}), '(text)\n', (2823, 2829), False, 'from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range\n'), ((3619, 3635), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (3633, 3635), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((4815, 4841), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (4819, 4841), False, 'from keras.optimizers import Adam\n'), ((5318, 5344), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (5322, 5344), False, 'from keras.optimizers import Adam\n'), ((5512, 5575), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(3)', 'min_delta': '(0.0001)'}), "(monitor='val_loss', patience=3, min_delta=0.0001)\n", (5525, 5575), False, 'from keras.callbacks import EarlyStopping\n')] |
"""Filters module with a class to manage filters/algorithms for polydata datasets."""
import collections.abc
import logging
import numpy as np
import pyvista
from pyvista import (
abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array
)
from pyvista.core.errors import NotAllTrianglesError
from pyvista.core.filters import _get_output, _update_alg
from pyvista.core.filters.data_set import DataSetFilters
@abstract_class
class PolyDataFilters(DataSetFilters):
"""An internal class to manage filters/algorithms for polydata datasets."""
def edge_mask(poly_data, angle):
"""Return a mask of the points of a surface mesh that has a surface angle greater than angle.
Parameters
----------
angle : float
Angle to consider an edge.
"""
if not isinstance(poly_data, pyvista.PolyData): # pragma: no cover
poly_data = pyvista.PolyData(poly_data)
poly_data.point_arrays['point_ind'] = np.arange(poly_data.n_points)
featureEdges = _vtk.vtkFeatureEdges()
featureEdges.SetInputData(poly_data)
featureEdges.FeatureEdgesOn()
featureEdges.BoundaryEdgesOff()
featureEdges.NonManifoldEdgesOff()
featureEdges.ManifoldEdgesOff()
featureEdges.SetFeatureAngle(angle)
featureEdges.Update()
edges = _get_output(featureEdges)
orig_id = pyvista.point_array(edges, 'point_ind')
return np.in1d(poly_data.point_arrays['point_ind'], orig_id,
assume_unique=True)
def boolean_cut(poly_data, cut, tolerance=1E-5, inplace=False):
"""Perform a Boolean cut using another mesh.
Parameters
----------
cut : pyvista.PolyData
Mesh making the cut
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
The cut mesh.
"""
if not isinstance(cut, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
if not poly_data.is_all_triangles() or not cut.is_all_triangles():
raise NotAllTrianglesError("Make sure both the input and output are triangulated.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToIntersection()
# bfilter.SetOperationToDifference()
bfilter.SetInputData(1, cut)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.SetTolerance(tolerance)
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_add(poly_data, mesh, inplace=False):
"""Add a mesh to the current mesh.
Does not attempt to "join" the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to add.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
joinedmesh : pyvista.PolyData
The joined mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
vtkappend = _vtk.vtkAppendPolyData()
vtkappend.AddInputData(poly_data)
vtkappend.AddInputData(mesh)
vtkappend.Update()
mesh = _get_output(vtkappend)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def __add__(poly_data, mesh):
"""Merge these two meshes."""
if not isinstance(mesh, _vtk.vtkPolyData):
return DataSetFilters.__add__(poly_data, mesh)
return PolyDataFilters.boolean_add(poly_data, mesh)
def boolean_union(poly_data, mesh, inplace=False):
"""Combine two meshes and attempts to create a manifold mesh.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToUnion()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_difference(poly_data, mesh, inplace=False):
"""Combine two meshes and retains only the volume in common between the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToDifference()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def intersection(poly_data, mesh, split_first=True, split_second=True):
"""Compute the intersection between two meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to intersect with.
split_first : bool, optional
If `True`, return the first input mesh split by the intersection with the
second input mesh.
split_second : bool, optional
If `True`, return the second input mesh split by the intersection with the
first input mesh.
Returns
-------
intersection: pyvista.PolyData
The intersection line.
first_split: pyvista.PolyData
The first mesh split along the intersection. Returns the original first mesh
if `split_first` is False.
second_split: pyvista.PolyData
The second mesh split along the intersection. Returns the original second mesh
if `split_second` is False.
Examples
--------
Intersect two spheres, returning the intersection and both spheres
which have new points/cells along the intersection line.
>>> import pyvista as pv
>>> s1 = pv.Sphere()
>>> s2 = pv.Sphere(center=(0.25, 0, 0))
>>> intersection, s1_split, s2_split = s1.intersection(s2)
The mesh splitting takes additional time and can be turned
off for either mesh individually.
>>> intersection, _, s2_split = s1.intersection(s2, \
split_first=False, \
split_second=True)
"""
intfilter = _vtk.vtkIntersectionPolyDataFilter()
intfilter.SetInputDataObject(0, poly_data)
intfilter.SetInputDataObject(1, mesh)
intfilter.SetComputeIntersectionPointArray(True)
intfilter.SetSplitFirstOutput(split_first)
intfilter.SetSplitSecondOutput(split_second)
intfilter.Update()
intersection = _get_output(intfilter, oport=0)
first = _get_output(intfilter, oport=1)
second = _get_output(intfilter, oport=2)
return intersection, first, second
def curvature(poly_data, curv_type='mean'):
"""Return the pointwise curvature of a mesh.
Parameters
----------
mesh : vtk.polydata
vtk polydata mesh
curvature string, optional
One of the following strings
Mean
Gaussian
Maximum
Minimum
Returns
-------
curvature : np.ndarray
Curvature values
"""
curv_type = curv_type.lower()
# Create curve filter and compute curvature
curvefilter = _vtk.vtkCurvatures()
curvefilter.SetInputData(poly_data)
if curv_type == 'mean':
curvefilter.SetCurvatureTypeToMean()
elif curv_type == 'gaussian':
curvefilter.SetCurvatureTypeToGaussian()
elif curv_type == 'maximum':
curvefilter.SetCurvatureTypeToMaximum()
elif curv_type == 'minimum':
curvefilter.SetCurvatureTypeToMinimum()
else:
raise ValueError('Curv_Type must be either "Mean", '
'"Gaussian", "Maximum", or "Minimum"')
curvefilter.Update()
# Compute and return curvature
curv = _get_output(curvefilter)
return _vtk.vtk_to_numpy(curv.GetPointData().GetScalars())
def plot_curvature(poly_data, curv_type='mean', **kwargs):
"""Plot the curvature.
Parameters
----------
curvtype : str, optional
One of the following strings indicating curvature type:
* ``'Mean'``
* ``'Gaussian'``
* ``'Maximum'``
* ``'Minimum'``
**kwargs : optional
See :func:`pyvista.plot`
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Examples
--------
Plot the mean curvature of an example mesh.
>>> from pyvista import examples
>>> hills = examples.load_random_hills()
>>> cpos = hills.plot_curvature(smooth_shading=True)
"""
kwargs.setdefault('scalar_bar_args',
{'title': f'{curv_type.capitalize()} Curvature'})
return poly_data.plot(scalars=poly_data.curvature(curv_type),
**kwargs)
def triangulate(poly_data, inplace=False):
"""Return an all triangle mesh.
More complex polygons will be broken down into tetrahedrals.
Parameters
----------
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Mesh containing only triangles.
"""
trifilter = _vtk.vtkTriangleFilter()
trifilter.SetInputData(poly_data)
trifilter.PassVertsOff()
trifilter.PassLinesOff()
trifilter.Update()
mesh = _get_output(trifilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def smooth(poly_data, n_iter=20, relaxation_factor=0.01, convergence=0.0,
edge_angle=15, feature_angle=45,
boundary_smoothing=True, feature_smoothing=False, inplace=False):
"""Adjust point coordinates using Laplacian smoothing.
The effect is to "relax" the mesh, making the cells better shaped and
the vertices more evenly distributed.
Parameters
----------
n_iter : int
Number of iterations for Laplacian smoothing.
relaxation_factor : float, optional
Relaxation factor controls the amount of displacement in a single
iteration. Generally a lower relaxation factor and higher number of
iterations is numerically more stable.
convergence : float, optional
Convergence criterion for the iteration process. Smaller numbers
result in more smoothing iterations. Range from (0 to 1).
edge_angle : float, optional
Edge angle to control smoothing along edges (either interior or boundary).
feature_angle : float, optional
Feature angle for sharp edge identification.
boundary_smoothing : bool, optional
Boolean flag to control smoothing of boundary edges.
feature_smoothing : bool, optional
Boolean flag to control smoothing of feature edges.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Smoothed mesh.
Examples
--------
Smooth the edges of an all triangular cube
>>> import pyvista as pv
>>> cube = pv.Cube().triangulate().subdivide(5).clean()
>>> smooth_cube = cube.smooth(1000, feature_smoothing=False)
>>> n_edge_cells = cube.extract_feature_edges().n_cells
>>> n_smooth_cells = smooth_cube.extract_feature_edges().n_cells
>>> print(f'Sharp Edges on Cube: {n_edge_cells}')
Sharp Edges on Cube: 384
>>> print(f'Sharp Edges on Smooth Cube: {n_smooth_cells}')
Sharp Edges on Smooth Cube: 12
"""
alg = _vtk.vtkSmoothPolyDataFilter()
alg.SetInputData(poly_data)
alg.SetNumberOfIterations(n_iter)
alg.SetConvergence(convergence)
alg.SetFeatureEdgeSmoothing(feature_smoothing)
alg.SetFeatureAngle(feature_angle)
alg.SetEdgeAngle(edge_angle)
alg.SetBoundarySmoothing(boundary_smoothing)
alg.SetRelaxationFactor(relaxation_factor)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def decimate_pro(poly_data, reduction, feature_angle=45.0, split_angle=75.0, splitting=True,
pre_split_mesh=False, preserve_topology=False, inplace=False):
"""Reduce the number of triangles in a triangular mesh.
It forms a good approximation to the original geometry. Based on the algorithm
originally described in "Decimation of Triangle Meshes", Proc Siggraph 92.
Parameters
----------
reduction : float
Reduction factor. A value of 0.9 will leave 10 % of the original number
of vertices.
feature_angle : float, optional
Angle used to define what an edge is (i.e., if the surface normal between
two adjacent triangles is >= feature_angle, an edge exists).
split_angle : float, optional
Angle used to control the splitting of the mesh. A split line exists
when the surface normals between two edge connected triangles are >= split_angle.
splitting : bool, optional
Controls the splitting of the mesh at corners, along edges, at non-manifold
points, or anywhere else a split is required. Turning splitting off
will better preserve the original topology of the mesh, but may not
necessarily give the exact requested decimation.
pre_split_mesh : bool, optional
Separates the mesh into semi-planar patches, which are disconnected
from each other. This can give superior results in some cases. If pre_split_mesh
is set to True, the mesh is split with the specified split_angle. Otherwise
mesh splitting is deferred as long as possible.
preserve_topology : bool, optional
Controls topology preservation. If on, mesh splitting and hole elimination
will not occur. This may limit the maximum reduction that may be achieved.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Decimated mesh.
"""
alg = _vtk.vtkDecimatePro()
alg.SetInputData(poly_data)
alg.SetTargetReduction(reduction)
alg.SetPreserveTopology(preserve_topology)
alg.SetFeatureAngle(feature_angle)
alg.SetSplitting(splitting)
alg.SetSplitAngle(split_angle)
alg.SetPreSplitMesh(pre_split_mesh)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def tube(poly_data, radius=None, scalars=None, capping=True, n_sides=20,
radius_factor=10, preference='point', inplace=False):
"""Generate a tube around each input line.
The radius of the tube can be set to linearly vary with a scalar value.
Parameters
----------
radius : float
Minimum tube radius (minimum because the tube radius may vary).
scalars : str, optional
scalars array by which the radius varies
capping : bool, optional
Turn on/off whether to cap the ends with polygons. Default ``True``.
n_sides : int, optional
Set the number of sides for the tube. Minimum of 3.
radius_factor : float, optional
Maximum tube radius in terms of a multiple of the minimum radius.
preference : str, optional
The field preference when searching for the scalars array by name.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Tube-filtered mesh.
Examples
--------
Convert a single line to a tube
>>> import pyvista as pv
>>> line = pv.Line()
>>> tube = line.tube(radius=0.02)
>>> print('Line Cells:', line.n_cells)
Line Cells: 1
>>> print('Tube Cells:', tube.n_cells)
Tube Cells: 22
"""
if not isinstance(poly_data, pyvista.PolyData):
poly_data = pyvista.PolyData(poly_data)
if n_sides < 3:
n_sides = 3
tube = _vtk.vtkTubeFilter()
tube.SetInputDataObject(poly_data)
# User Defined Parameters
tube.SetCapping(capping)
if radius is not None:
tube.SetRadius(radius)
tube.SetNumberOfSides(n_sides)
tube.SetRadiusFactor(radius_factor)
# Check if scalars array given
if scalars is not None:
if not isinstance(scalars, str):
raise TypeError('scalars array must be given as a string name')
_, field = poly_data.get_array(scalars, preference=preference, info=True)
# args: (idx, port, connection, field, name)
tube.SetInputArrayToProcess(0, 0, 0, field.value, scalars)
tube.SetVaryRadiusToVaryRadiusByScalar()
# Apply the filter
tube.Update()
mesh = _get_output(tube)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def subdivide(poly_data, nsub, subfilter='linear', inplace=False):
"""Increase the number of triangles in a single, connected triangular mesh.
Uses one of the following vtk subdivision filters to subdivide a mesh.
vtkButterflySubdivisionFilter
vtkLoopSubdivisionFilter
vtkLinearSubdivisionFilter
Linear subdivision results in the fastest mesh subdivision,
but it does not smooth mesh edges, but rather splits each
triangle into 4 smaller triangles.
Butterfly and loop subdivision perform smoothing when
dividing, and may introduce artifacts into the mesh when
dividing.
Subdivision filter appears to fail for multiple part meshes.
Should be one single mesh.
Parameters
----------
nsub : int
Number of subdivisions. Each subdivision creates 4 new
triangles, so the number of resulting triangles is
``nface*4**nsub`` where ``nface`` is the current number of
faces.
subfilter : string, optional
Can be one of the following: 'butterfly', 'loop', 'linear'.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
Returns
-------
mesh : Polydata object
``pyvista`` polydata object.
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide(1, 'loop')
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide(1, 'loop', inplace=True)
"""
subfilter = subfilter.lower()
if subfilter == 'linear':
sfilter = _vtk.vtkLinearSubdivisionFilter()
elif subfilter == 'butterfly':
sfilter = _vtk.vtkButterflySubdivisionFilter()
elif subfilter == 'loop':
sfilter = _vtk.vtkLoopSubdivisionFilter()
else:
raise ValueError("Subdivision filter must be one of the following: "
"'butterfly', 'loop', or 'linear'")
# Subdivide
sfilter.SetNumberOfSubdivisions(nsub)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def subdivide_adaptive(poly_data, max_edge_len=None, max_tri_area=None,
max_n_tris=None, max_n_passes=None, inplace=False):
"""Increase the number of triangles in a triangular mesh based on edge and/or area metrics.
This filter uses a simple case-based, multi-pass approach to
repeatedly subdivide the input triangle mesh to meet the area
and/or edge length criteria. New points may be inserted only
on edges; depending on the number of edges to be subdivided a
different number of triangles are inserted ranging from two
(i.e., two triangles replace the original one) to four.
Point and cell data is treated as follows: The cell data from
a parent triangle is assigned to its subdivided
children. Point data is interpolated along edges as the edges
are subdivided.
This filter retains mesh watertightness if the mesh was
originally watertight; and the area and max triangles criteria
are not used.
Parameters
----------
max_edge_len : float, optional
The maximum edge length that a triangle may have. Edges
longer than this value are split in half and the
associated triangles are modified accordingly.
max_tri_area : float, optional
The maximum area that a triangle may have. Triangles
larger than this value are subdivided to meet this
threshold. Note that if this criterion is used it may
produce non-watertight meshes as a result.
max_n_tris : int, optional
The maximum number of triangles that can be created. If
the limit is hit, it may result in premature termination
of the algorithm and the results may be less than
satisfactory (for example non-watertight meshes may be
created). By default, the limit is set to a very large
number (i.e., no effective limit).
max_n_passes : int, optional
The maximum number of passes (i.e., levels of
subdivision). If the limit is hit, then the subdivision
process stops and additional passes (needed to meet other
criteria) are aborted. The default limit is set to a very
large number (i.e., no effective limit).
inplace : bool, optional
Updates mesh in-place.
Returns
-------
:class:`pyvista.PolyData`
Subdivided mesh
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2)
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2, inplace=True)
"""
sfilter = _vtk.vtkAdaptiveSubdivisionFilter()
if max_edge_len:
sfilter.SetMaximumEdgeLength(max_edge_len)
if max_tri_area:
sfilter.SetMaximumTriangleArea(max_tri_area)
if max_n_tris:
sfilter.SetMaximumNumberOfTriangles(max_n_tris)
if max_n_passes:
sfilter.SetMaximumNumberOfPasses(max_n_passes)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def decimate(poly_data, target_reduction, volume_preservation=False,
attribute_error=False, scalars=True, vectors=True,
normals=False, tcoords=True, tensors=True, scalars_weight=0.1,
vectors_weight=0.1, normals_weight=0.1, tcoords_weight=0.1,
tensors_weight=0.1, inplace=False, progress_bar=False):
"""Reduce the number of triangles in a triangular mesh using vtkQuadricDecimation.
Parameters
----------
mesh : vtk.PolyData
Mesh to decimate
target_reduction : float
Fraction of the original mesh to remove.
TargetReduction is set to 0.9, this filter will try to reduce
the data set to 10% of its original size and will remove 90%
of the input triangles.
volume_preservation : bool, optional
Decide whether to activate volume preservation which greatly reduces
errors in triangle normal direction. If off, volume preservation is
disabled and if AttributeErrorMetric is active, these errors can be
large. Defaults to False.
attribute_error : bool, optional
Decide whether to include data attributes in the error metric. If
off, then only geometric error is used to control the decimation.
Defaults to False.
scalars : bool, optional
If attribute errors are to be included in the metric (i.e.,
AttributeErrorMetric is on), then the following flags control which
attributes are to be included in the error calculation. Defaults to
True.
vectors : bool, optional
See scalars parameter. Defaults to True.
normals : bool, optional
See scalars parameter. Defaults to False.
tcoords : bool, optional
See scalars parameter. Defaults to True.
tensors : bool, optional
See scalars parameter. Defaults to True.
scalars_weight : float, optional
The scaling weight contribution of the scalar attribute. These
values are used to weight the contribution of the attributes towards
the error metric. Defaults to 0.1.
vectors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
normals_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tcoords_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tensors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
inplace : bool, optional
Updates mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
outmesh : pyvista.PolyData
Decimated mesh.
Examples
--------
Decimate a sphere while preserving its volume
>>> import pyvista as pv
>>> sphere = pv.Sphere(theta_resolution=90, phi_resolution=90)
>>> print(sphere.n_cells)
15840
>>> dec_sphere = sphere.decimate(0.9, volume_preservation=True)
>>> print(dec_sphere.n_cells)
1584
Notes
-----
If you encounter a segmentation fault or other error, consider
using ``clean`` to remove any invalid cells before using this
filter.
"""
# create decimation filter
alg = _vtk.vtkQuadricDecimation() # vtkDecimatePro as well
alg.SetVolumePreservation(volume_preservation)
alg.SetAttributeErrorMetric(attribute_error)
alg.SetScalarsAttribute(scalars)
alg.SetVectorsAttribute(vectors)
alg.SetNormalsAttribute(normals)
alg.SetTCoordsAttribute(tcoords)
alg.SetTensorsAttribute(tensors)
alg.SetScalarsWeight(scalars_weight)
alg.SetVectorsWeight(vectors_weight)
alg.SetNormalsWeight(normals_weight)
alg.SetTCoordsWeight(tcoords_weight)
alg.SetTensorsWeight(tensors_weight)
alg.SetTargetReduction(target_reduction)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Decimating')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_normals(poly_data, cell_normals=True,
point_normals=True, split_vertices=False,
flip_normals=False, consistent_normals=True,
auto_orient_normals=False,
non_manifold_traversal=True,
feature_angle=30.0, inplace=False):
"""Compute point and/or cell normals for a mesh.
The filter can reorder polygons to insure consistent
orientation across polygon neighbors. Sharp edges can be split
and points duplicated with separate normals to give crisp
(rendered) surface definition. It is also possible to globally
flip the normal orientation.
The algorithm works by determining normals for each polygon
and then averaging them at shared points. When sharp edges are
present, the edges are split and new points generated to
prevent blurry edges (due to Gouraud shading).
Parameters
----------
cell_normals : bool, optional
Calculation of cell normals. Defaults to ``True``.
point_normals : bool, optional
Calculation of point normals. Defaults to ``True``.
split_vertices : bool, optional
Splitting of sharp edges. Defaults to ``False``.
flip_normals : bool, optional
Set global flipping of normal orientation. Flipping
modifies both the normal direction and the order of a
cell's points. Defaults to ``False``.
consistent_normals : bool, optional
Enforcement of consistent polygon ordering. Defaults to ``True``.
auto_orient_normals : bool, optional
Turn on/off the automatic determination of correct normal
orientation. NOTE: This assumes a completely closed
surface (i.e. no boundary edges) and no non-manifold
edges. If these constraints do not hold, all bets are
off. This option adds some computational complexity, and
is useful if you do not want to have to inspect the
rendered image to determine whether to turn on the
``flip_normals`` flag. However, this flag can work with
the ``flip_normals`` flag, and if both are set, all the
normals in the output will point "inward". Defaults to
``False``.
non_manifold_traversal : bool, optional
Turn on/off traversal across non-manifold edges. Changing
this may prevent problems where the consistency of
polygonal ordering is corrupted due to topological
loops. Defaults to ``True``.
feature_angle : float, optional
The angle that defines a sharp edge. If the difference in
angle across neighboring polygons is greater than this
value, the shared edge is considered "sharp". Defaults to
30.0.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Updated mesh with cell and point normals.
Examples
--------
Compute the point normals of the surface of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere = sphere.compute_normals(cell_normals=False)
>>> normals = sphere['Normals']
>>> normals.shape
(842, 3)
Alternatively, create a new mesh when computing the normals
and compute both cell and point normals.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere_with_norm = sphere.compute_normals()
>>> sphere_with_norm.point_arrays['Normals'].shape
(842, 3)
>>> sphere_with_norm.cell_arrays['Normals'].shape
(1680, 3)
Notes
-----
Previous arrays named "Normals" will be overwritten.
Normals are computed only for polygons and triangle
strips. Normals are not computed for lines or vertices.
Triangle strips are broken up into triangle polygons. You may
want to restrip the triangles.
May be easier to run ``mesh.point_normals`` or ``mesh.cell_normals``.
"""
normal = _vtk.vtkPolyDataNormals()
normal.SetComputeCellNormals(cell_normals)
normal.SetComputePointNormals(point_normals)
normal.SetSplitting(split_vertices)
normal.SetFlipNormals(flip_normals)
normal.SetConsistency(consistent_normals)
normal.SetAutoOrientNormals(auto_orient_normals)
normal.SetNonManifoldTraversal(non_manifold_traversal)
normal.SetFeatureAngle(feature_angle)
normal.SetInputData(poly_data)
normal.Update()
mesh = _get_output(normal)
if point_normals:
mesh.GetPointData().SetActiveNormals('Normals')
if cell_normals:
mesh.GetCellData().SetActiveNormals('Normals')
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clip_closed_surface(poly_data, normal='x', origin=None,
tolerance=1e-06, inplace=False):
"""Clip a closed polydata surface with a plane.
This currently only supports one plane but could be
implemented to handle a plane collection.
It will produce a new closed surface by creating new polygonal
faces where the input data was clipped.
Non-manifold surfaces should not be used as input for this
filter. The input surface should have no open edges, and must
not have any edges that are shared by more than two faces. In
addition, the input surface should not self-intersect, meaning
that the faces of the surface should only touch at their
edges.
Parameters
----------
normal : str, list, optional
Plane normal to clip with. Plane is centered at
``origin``. Normal can be either a 3 member list
(e.g. ``[0, 0, 1]``) or one of the following strings:
``'x'``, ``'y'``, ``'z'``, ``'-x'``, ``'-y'``, or
``'-z'``.
origin : list, optional
Coordinate of the origin (e.g. ``[1, 0, 0]``). Defaults
to the center of the mesh.
tolerance : float, optional
The tolerance for creating new points while clipping. If
the tolerance is too small, then degenerate triangles
might be produced.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
clipped_mesh : pyvista.PolyData
The clipped mesh.
Examples
--------
Clip a sphere in the X direction centered at the origin. This
will leave behind half a sphere in the positive X direction.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> clipped_mesh = sphere.clip_closed_surface()
Clip the sphere at the xy plane and leave behind half the
sphere in the positive Z direction. Shift the clip upwards to
leave a smaller mesh behind.
>>> clipped_mesh = sphere.clip_closed_surface('z', origin=[0, 0, 0.3])
"""
# verify it is manifold
if poly_data.n_open_edges > 0:
raise ValueError("This surface appears to be non-manifold.")
if isinstance(normal, str):
normal = NORMALS[normal.lower()]
# find center of data if origin not specified
if origin is None:
origin = poly_data.center
# create the plane for clipping
plane = generate_plane(normal, origin)
collection = _vtk.vtkPlaneCollection()
collection.AddItem(plane)
alg = _vtk.vtkClipClosedSurface()
alg.SetGenerateFaces(True)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tolerance)
alg.SetClippingPlanes(collection)
alg.Update() # Perform the Cut
result = _get_output(alg)
if inplace:
poly_data.overwrite(result)
return poly_data
else:
return result
def fill_holes(poly_data, hole_size, inplace=False, progress_bar=False): # pragma: no cover
"""
Fill holes in a pyvista.PolyData or vtk.vtkPolyData object.
Holes are identified by locating boundary edges, linking them together
into loops, and then triangulating the resulting loops. Note that you
can specify an approximate limit to the size of the hole that can be
filled.
Parameters
----------
hole_size : float
Specifies the maximum hole size to fill. This is represented as a
radius to the bounding circumsphere containing the hole. Note that
this is an approximate area; the actual area cannot be computed
without first triangulating the hole.
inplace : bool, optional
Return new mesh or overwrite input.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Mesh with holes filled.
Examples
--------
Create a partial sphere with a hole and then fill it
>>> import pyvista as pv
>>> sphere_with_hole = pv.Sphere(end_theta=330)
>>> sphere = sphere_with_hole.fill_holes(1000)
>>> edges = sphere.extract_feature_edges(feature_edges=False,
... manifold_edges=False)
>>> assert edges.n_cells == 0
"""
logging.warning('pyvista.PolyData.fill_holes is known to segfault. '
'Use at your own risk')
alg = _vtk.vtkFillHolesFilter()
alg.SetHoleSize(hole_size)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Filling Holes')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clean(poly_data, point_merging=True, tolerance=None, lines_to_points=True,
polys_to_lines=True, strips_to_polys=True, inplace=False,
absolute=True, progress_bar=False, **kwargs):
"""Clean the mesh.
This merges duplicate points, removes unused points, and/or
removes degenerate cells.
Parameters
----------
point_merging : bool, optional
Enables point merging. On by default.
tolerance : float, optional
Set merging tolerance. When enabled merging is set to
absolute distance. If ``absolute`` is ``False``, then the
merging tolerance is a fraction of the bounding box
length. The alias ``merge_tol`` is also excepted.
lines_to_points : bool, optional
Turn on/off conversion of degenerate lines to points.
Enabled by default.
polys_to_lines : bool, optional
Turn on/off conversion of degenerate polys to lines.
Enabled by default.
strips_to_polys : bool, optional
Turn on/off conversion of degenerate strips to polys.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
absolute : bool, optional
Control if ``tolerance`` is an absolute distance or a
fraction.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Cleaned mesh.
Examples
--------
Create a mesh with a degenerate face and then clean it,
removing the degenerate face
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> faces = np.array([3, 0, 1, 2, 3, 0, 3, 3])
>>> mesh = pv.PolyData(points, faces)
>>> mout = mesh.clean()
>>> print(mout.faces) # doctest:+SKIP
[3 0 1 2]
"""
if tolerance is None:
tolerance = kwargs.pop('merge_tol', None)
assert_empty_kwargs(**kwargs)
alg = _vtk.vtkCleanPolyData()
alg.SetPointMerging(point_merging)
alg.SetConvertLinesToPoints(lines_to_points)
alg.SetConvertPolysToLines(polys_to_lines)
alg.SetConvertStripsToPolys(strips_to_polys)
if isinstance(tolerance, (int, float)):
if absolute:
alg.ToleranceIsAbsoluteOn()
alg.SetAbsoluteTolerance(tolerance)
else:
alg.SetTolerance(tolerance)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Cleaning')
output = _get_output(alg)
# Check output so no segfaults occur
if output.n_points < 1:
raise ValueError('Clean tolerance is too high. Empty mesh returned.')
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic(poly_data, start_vertex, end_vertex, inplace=False,
keep_order=True):
"""Calculate the geodesic path between two vertices using Dijkstra's algorithm.
This will add an array titled ``'vtkOriginalPointIds'`` of the input
mesh's point ids to the output mesh. The default behavior of the
underlying ``vtkDijkstraGraphGeodesicPath`` filter is that the
geodesic path is reversed in the resulting mesh. This is overridden
in PyVista by default.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
inplace : bool, optional
Whether the input mesh should be replaced with the path. The
geodesic path is always returned.
keep_order : bool, optional
If ``True``, the points of the returned path are guaranteed
to start with the start vertex (as opposed to the end vertex).
.. versionadded:: 0.32.0
Returns
-------
output : pyvista.PolyData
``PolyData`` object consisting of the line segment between the
two given vertices. If ``inplace`` is ``True`` this is the
same object as the input mesh.
Examples
--------
Plot the path between two points on a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> pl = pv.Plotter()
>>> actor = pl.add_mesh(sphere)
>>> actor = pl.add_mesh(path, line_width=5, color='k')
>>> cpos = pl.show()
"""
if not (0 <= start_vertex < poly_data.n_points and
0 <= end_vertex < poly_data.n_points):
raise IndexError('Invalid point indices.')
if not poly_data.is_all_triangles():
raise NotAllTrianglesError("Input mesh for geodesic path must be all triangles.")
dijkstra = _vtk.vtkDijkstraGraphGeodesicPath()
dijkstra.SetInputData(poly_data)
dijkstra.SetStartVertex(start_vertex)
dijkstra.SetEndVertex(end_vertex)
dijkstra.Update()
original_ids = vtk_id_list_to_array(dijkstra.GetIdList())
output = _get_output(dijkstra)
output["vtkOriginalPointIds"] = original_ids
# Do not copy textures from input
output.clear_textures()
# ensure proper order if requested
if keep_order and original_ids[0] == end_vertex:
output.points[...] = output.points[::-1, :]
output["vtkOriginalPointIds"] = output["vtkOriginalPointIds"][::-1]
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic_distance(poly_data, start_vertex, end_vertex):
"""Calculate the geodesic distance between two vertices using Dijkstra's algorithm.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
Returns
-------
length : float
Length of the geodesic segment.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
"""
path = poly_data.geodesic(start_vertex, end_vertex)
sizes = path.compute_cell_sizes(length=True, area=False, volume=False)
distance = np.sum(sizes['Length'])
del path
del sizes
return distance
def ray_trace(poly_data, origin, end_point, first_point=False, plot=False,
off_screen=False):
"""Perform a single ray trace calculation.
This requires a mesh and a line segment defined by an origin
and end_point.
Parameters
----------
origin : np.ndarray or list
Start of the line segment.
end_point : np.ndarray or list
End of the line segment.
first_point : bool, optional
Returns intersection of first point only.
plot : bool, optional
Plots ray trace results
off_screen : bool, optional
Plots off screen when ``plot=True``. Used for unit testing.
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between a ray from the origin and
[1, 0, 0] and a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> point, cell = sphere.ray_trace([0, 0, 0], [1, 0, 0], first_point=True)
>>> print(f'Intersected at {point[0]:.3f} {point[1]:.3f} {point[2]:.3f}')
Intersected at 0.499 0.000 0.000
"""
points = _vtk.vtkPoints()
cell_ids = _vtk.vtkIdList()
poly_data.obbTree.IntersectWithLine(np.array(origin),
np.array(end_point),
points, cell_ids)
intersection_points = _vtk.vtk_to_numpy(points.GetData())
if first_point and intersection_points.shape[0] >= 1:
intersection_points = intersection_points[0]
intersection_cells = []
if intersection_points.any():
if first_point:
ncells = 1
else:
ncells = cell_ids.GetNumberOfIds()
for i in range(ncells):
intersection_cells.append(cell_ids.GetId(i))
intersection_cells = np.array(intersection_cells)
if plot:
plotter = pyvista.Plotter(off_screen=off_screen)
plotter.add_mesh(poly_data, label='Test Mesh')
segment = np.array([origin, end_point])
plotter.add_lines(segment, 'b', label='Ray Segment')
plotter.add_mesh(intersection_points, 'r', point_size=10,
label='Intersection Points')
plotter.add_legend()
plotter.add_axes()
plotter.show()
return intersection_points, intersection_cells
def multi_ray_trace(poly_data, origins, directions, first_point=False, retry=False):
"""Perform multiple ray trace calculations.
This requires a mesh with only triangular faces,
an array of origin points and an equal sized array of
direction vectors to trace along.
The embree library used for vectorisation of the ray traces is known to occasionally
return no intersections where the VTK implementation would return an intersection.
If the result appears to be missing some intersection points, set retry=True to run a second pass over rays
that returned no intersections, using the VTK ray_trace implementation.
Parameters
----------
origins : np.ndarray or list
Starting point for each trace.
directions : np.ndarray or list
Direction vector for each trace.
first_point : bool, optional
Returns intersection of first point only.
retry : bool, optional
Will retry rays that return no intersections using the ray_trace
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_rays : np.ndarray
Indices of the ray for each intersection point. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between rays from the origin in
directions ``[1, 0, 0]``, ``[0, 1, 0]`` and ``[0, 0, 1]``, and
a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv # doctest: +SKIP
... sphere = pv.Sphere()
... points, rays, cells = sphere.multi_ray_trace([[0, 0, 0]]*3, [[1, 0, 0], [0, 1, 0], [0, 0, 1]], first_point=True)
... string = ", ".join([f"({point[0]:.3f}, {point[1]:.3f}, {point[2]:.3f})" for point in points])
... print(f'Rays intersected at {string}')
Rays intersected at (0.499, 0.000, 0.000), (0.000, 0.497, 0.000), (0.000, 0.000, 0.500)
"""
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
try:
import trimesh, rtree, pyembree
except (ModuleNotFoundError, ImportError):
raise ImportError(
"To use multi_ray_trace please install trimesh, rtree and pyembree with:\n"
"\tconda install trimesh rtree pyembree"
)
origins = np.asarray(origins)
directions = np.asarray(directions)
faces_as_array = poly_data.faces.reshape((poly_data.n_faces, 4))[:, 1:]
tmesh = trimesh.Trimesh(poly_data.points, faces_as_array)
locations, index_ray, index_tri = tmesh.ray.intersects_location(
origins, directions, multiple_hits=not first_point
)
if retry:
# gather intersecting rays in lists
loc_lst, ray_lst, tri_lst = [arr.tolist() for arr in [locations, index_ray, index_tri]]
# find indices that trimesh failed on
all_ray_indices = np.arange(len(origins))
retry_ray_indices = np.setdiff1d(all_ray_indices, index_ray, assume_unique=True)
# compute ray points for all failed rays at once
origins_retry = origins[retry_ray_indices, :] # shape (n_retry, 3)
directions_retry = directions[retry_ray_indices, :]
unit_directions = directions_retry / np.linalg.norm(directions_retry,
axis=1, keepdims=True)
second_points = origins_retry + unit_directions * poly_data.length # shape (n_retry, 3)
for id_r, origin, second_point in zip(retry_ray_indices, origins_retry, second_points):
locs, indices = poly_data.ray_trace(origin, second_point, first_point=first_point)
if locs.any():
if first_point:
locs = locs.reshape([1, 3])
ray_lst.extend([id_r] * indices.size)
tri_lst.extend(indices)
loc_lst.extend(locs)
# sort result arrays by ray index
index_ray = np.array(ray_lst)
sorting_inds = index_ray.argsort()
index_ray = index_ray[sorting_inds]
index_tri = np.array(tri_lst)[sorting_inds]
locations = np.array(loc_lst)[sorting_inds]
return locations, index_ray, index_tri
def plot_boundaries(poly_data, edge_color="red", **kwargs):
"""Plot boundaries of a mesh.
Parameters
----------
edge_color : str, optional
The color of the edges when they are added to the plotter.
kwargs : optional
All additional keyword arguments will be passed to
:func:`pyvista.BasePlotter.add_mesh`
"""
edges = DataSetFilters.extract_feature_edges(poly_data)
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
plotter.add_mesh(edges, color=edge_color, style='wireframe', label='Edges')
plotter.add_mesh(poly_data, label='Mesh', **kwargs)
plotter.add_legend()
return plotter.show()
def plot_normals(poly_data, show_mesh=True, mag=1.0, flip=False,
use_every=1, **kwargs):
"""Plot the point normals of a mesh.
Parameters
----------
show_mesh : bool, optional
Plot the mesh itself. Defaults to ``True``.
mag : float, optional
Size magnitude of the normal arrows. Defaults to 1.0.
flip : bool, optional
Flip the normal direction when ``True``. Default
``False``.
use_every : int, optional
Display every nth normal. By default every normal is
displayed. Display every 10th normal by setting this
parameter to 10.
Examples
--------
Plot the normals of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
if show_mesh:
plotter.add_mesh(poly_data, **kwargs)
normals = poly_data.point_normals
if flip:
normals *= -1
plotter.add_arrows(poly_data.points[::use_every],
normals[::use_every], mag=mag, show_scalar_bar=False)
return plotter.show()
def remove_points(poly_data, remove, mode='any', keep_scalars=True, inplace=False):
"""Rebuild a mesh by removing points.
Only valid for all-triangle meshes.
Parameters
----------
remove : np.ndarray
If remove is a bool array, points that are ``True`` will
be removed. Otherwise, it is treated as a list of
indices.
mode : str, optional
When ``'all'``, only faces containing all points flagged
for removal will be removed. Default ``'any'``.
keep_scalars : bool, optional
When ``True``, point and cell scalars will be passed on to
the new mesh.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Mesh without the points flagged for removal.
ridx : np.ndarray
Indices of new points relative to the original mesh.
Examples
--------
Remove the first 100 points from a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> reduced_sphere, ridx = sphere.remove_points(range(100))
"""
remove = np.asarray(remove)
# np.asarray will eat anything, so we have to weed out bogus inputs
if not issubclass(remove.dtype.type, (np.bool_, np.integer)):
raise TypeError('Remove must be either a mask or an integer array-like')
if remove.dtype == np.bool_:
if remove.size != poly_data.n_points:
raise ValueError('Mask different size than n_points')
remove_mask = remove
else:
remove_mask = np.zeros(poly_data.n_points, np.bool_)
remove_mask[remove] = True
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
f = poly_data.faces.reshape(-1, 4)[:, 1:]
vmask = remove_mask.take(f)
if mode == 'all':
fmask = ~(vmask).all(1)
else:
fmask = ~(vmask).any(1)
# Regenerate face and point arrays
uni = np.unique(f.compress(fmask, 0), return_inverse=True)
new_points = poly_data.points.take(uni[0], 0)
nfaces = fmask.sum()
faces = np.empty((nfaces, 4), dtype=pyvista.ID_TYPE)
faces[:, 0] = 3
faces[:, 1:] = np.reshape(uni[1], (nfaces, 3))
newmesh = pyvista.PolyData(new_points, faces, deep=True)
ridx = uni[0]
# Add scalars back to mesh if requested
if keep_scalars:
for key in poly_data.point_arrays:
newmesh.point_arrays[key] = poly_data.point_arrays[key][ridx]
for key in poly_data.cell_arrays:
try:
newmesh.cell_arrays[key] = poly_data.cell_arrays[key][fmask]
except:
logging.warning(f'Unable to pass cell key {key} onto reduced mesh')
# Return vtk surface and reverse indexing array
if inplace:
poly_data.overwrite(newmesh)
return poly_data, ridx
else:
return newmesh, ridx
def flip_normals(poly_data):
"""Flip normals of a triangular mesh by reversing the point ordering.
Examples
--------
Flip the normals of a sphere and plot the normals before and
after the flip.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
>>> sphere.flip_normals()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
if not poly_data.is_all_triangles:
raise NotAllTrianglesError('Can only flip normals on an all triangle mesh')
f = poly_data.faces.reshape((-1, 4))
f[:, 1:] = f[:, 1:][:, ::-1]
poly_data.faces = f
def delaunay_2d(poly_data, tol=1e-05, alpha=0.0, offset=1.0, bound=False,
inplace=False, edge_source=None, progress_bar=False):
"""Apply a delaunay 2D filter along the best fitting plane.
Parameters
----------
tol : float, optional
Specify a tolerance to control discarding of closely
spaced points. This tolerance is specified as a fraction
of the diagonal length of the bounding box of the points.
Defaults to ``1e-05``.
alpha : float, optional
Specify alpha (or distance) value to control output of
this filter. For a non-zero alpha value, only edges or
triangles contained within a sphere centered at mesh
vertices will be output. Otherwise, only triangles will be
output. Defaults to ``0.0``.
offset : float, optional
Specify a multiplier to control the size of the initial,
bounding Delaunay triangulation. Defaults to ``1.0``.
bound : bool, optional
Boolean controls whether bounding triangulation points
and associated triangles are included in the
output. These are introduced as an initial triangulation
to begin the triangulation process. This feature is nice
for debugging output. Default ``False``.
inplace : bool, optional
If ``True``, overwrite this mesh with the triangulated
mesh. Default ``False``.
edge_source : pyvista.PolyData, optional
Specify the source object used to specify constrained
edges and loops. If set, and lines/polygons are defined, a
constrained triangulation is created. The lines/polygons
are assumed to reference points in the input point set
(i.e. point ids are identical in the input and
source).
progress_bar : bool, optional
Display a progress bar to indicate progress. Default
``False``.
Examples
--------
Extract the points of a sphere and then convert the point
cloud to a surface mesh. Note that only the bottom half is
converted to a mesh.
>>> import pyvista as pv
>>> points = pv.PolyData(pv.Sphere().points)
>>> mesh = points.delaunay_2d()
>>> mesh.is_all_triangles()
True
"""
alg = _vtk.vtkDelaunay2D()
alg.SetProjectionPlaneMode(_vtk.VTK_BEST_FITTING_PLANE)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tol)
alg.SetAlpha(alpha)
alg.SetOffset(offset)
alg.SetBoundingTriangulation(bound)
if edge_source is not None:
alg.SetSourceData(edge_source)
_update_alg(alg, progress_bar, 'Computing 2D Triangulation')
# Sometimes lines are given in the output. The
# `.triangulate()` filter cleans those
mesh = _get_output(alg).triangulate()
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_arc_length(poly_data):
"""Compute the arc length over the length of the probed line.
It adds a new point-data array named ``"arc_length"`` with the
computed arc length for each of the polylines in the
input. For all other cell types, the arc length is set to 0.
Returns
-------
arc_length : float
Arc length of the length of the probed line.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> length = path.compute_arc_length()['arc_length'][-1]
>>> print(f'Length is {length:.3f}')
Length is 0.812
This is identical to the geodesic_distance.
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
You can also plot the arc_length
>>> arc = path.compute_arc_length()
>>> cpos = arc.plot(scalars="arc_length")
"""
alg = _vtk.vtkAppendArcLength()
alg.SetInputData(poly_data)
alg.Update()
return _get_output(alg)
def project_points_to_plane(poly_data, origin=None, normal=(0, 0, 1),
inplace=False):
"""Project points of this mesh to a plane.
Parameters
----------
origin : np.ndarray or collections.abc.Sequence, optional
Plane origin. Defaults the approximate center of the
input mesh minus half the length of the input mesh in the
direction of the normal.
normal : np.ndarray or collections.abc.Sequence, optional
Plane normal. Defaults to +Z ``[0, 0, 1]``
inplace : bool, optional
Overwrite the original mesh with the projected points
Examples
--------
Flatten a sphere to the XY plane
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> projected = sphere.project_points_to_plane([0, 0, 0])
"""
if not isinstance(normal, (np.ndarray, collections.abc.Sequence)) or len(normal) != 3:
raise TypeError('Normal must be a length three vector')
if origin is None:
origin = np.array(poly_data.center) - np.array(normal)*poly_data.length/2.
# choose what mesh to use
if not inplace:
mesh = poly_data.copy()
else:
mesh = poly_data
# Make plane
plane = generate_plane(normal, origin)
# Perform projection in place on the copied mesh
f = lambda p: plane.ProjectPoint(p, p)
np.apply_along_axis(f, 1, mesh.points)
return mesh
def ribbon(poly_data, width=None, scalars=None, angle=0.0, factor=2.0,
normal=None, tcoords=False, preference='points'):
"""Create a ribbon of the lines in this dataset.
Parameters
----------
width : float, optional
Set the "half" width of the ribbon. If the width is
allowed to vary, this is the minimum width. The default is
10% the length.
scalars : str, optional
String name of the scalars array to use to vary the ribbon
width. This is only used if a scalars array is specified.
angle : float, optional
Angle in degrees of the offset angle of the ribbon from
the line normal. The default is 0.0.
factor : float, optional
Set the maximum ribbon width in terms of a multiple of the
minimum width. The default is 2.0
normal : tuple(float), optional
Normal to use as default.
tcoords : bool, str, optional
If ``True``, generate texture coordinates along the
ribbon. This can also be specified to generate the texture
coordinates with either ``'length'`` or ``'normalized'``.
Examples
--------
Convert a line to a ribbon and plot it.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> ribbon = path.ribbon()
>>> cpos = pv.plot([sphere, ribbon])
Notes
-----
If there are no lines in the input dataset, then the output
will be an empty ``pyvista.PolyData`` mesh.
"""
if scalars is not None:
arr, field = get_array(poly_data, scalars, preference=preference, info=True)
if width is None:
width = poly_data.length * 0.1
alg = _vtk.vtkRibbonFilter()
alg.SetInputDataObject(poly_data)
alg.SetWidth(width)
if normal is not None:
alg.SetUseDefaultNormal(True)
alg.SetDefaultNormal(normal)
alg.SetAngle(angle)
if scalars is not None:
alg.SetVaryWidth(True)
alg.SetInputArrayToProcess(0, 0, 0, field.value, scalars) # args: (idx, port, connection, field, name)
alg.SetWidthFactor(factor)
else:
alg.SetVaryWidth(False)
if tcoords:
alg.SetGenerateTCoords(True)
if isinstance(tcoords, str):
if tcoords.lower() == 'length':
alg.SetGenerateTCoordsToUseLength()
elif tcoords.lower() == 'normalized':
alg.SetGenerateTCoordsToNormalizedLength()
else:
alg.SetGenerateTCoordsToUseLength()
else:
alg.SetGenerateTCoordsToOff()
alg.Update()
return _get_output(alg)
def extrude(poly_data, vector, inplace=False, progress_bar=False):
"""Sweep polygonal data creating a "skirt" from free edges.
This will create a line from vertices.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept according to some
extrusion function and creates new polygonal primitives. These
primitives form a "skirt" or swept surface. For example,
sweeping a line results in a quadrilateral, and sweeping a
triangle creates a "wedge".
There are a number of control parameters for this filter. You
can control whether the sweep of a 2D object (i.e., polygon or
triangle strip) is capped with the generating geometry via the
"Capping" parameter.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to create 3D fonts, 3D irregular bar
charts, or to model 2 1/2D objects like punched plates. It
also can be used to create solid objects from 2D polygonal
meshes.
Parameters
----------
mesh : pyvista.PolyData
Mesh to extrude.
vector : np.ndarray or list
Direction and length to extrude the mesh in.
inplace : bool, optional
Overwrites the original mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
Extrude a half arc circle
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [1, 0, 0], [0, 0, 0])
>>> mesh = arc.extrude([0, 0, 1])
>>> cpos = mesh.plot()
"""
alg = _vtk.vtkLinearExtrusionFilter()
alg.SetExtrusionTypeToVectorExtrusion()
alg.SetVector(*vector)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def extrude_rotate(poly_data, resolution=30, inplace=False,
translation=0.0, dradius=0.0, angle=360.0, progress_bar=False):
"""Sweep polygonal data creating "skirt" from free edges and lines, and lines from vertices.
This is a modeling filter.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept around the z-axis
to create new polygonal primitives. These primitives form a
"skirt" or swept surface. For example, sweeping a line
results in a cylindrical shell, and sweeping a circle
creates a torus.
There are a number of control parameters for this filter.
You can control whether the sweep of a 2D object (i.e.,
polygon or triangle strip) is capped with the generating
geometry via the "Capping" instance variable. Also, you can
control the angle of rotation, and whether translation along
the z-axis is performed along with the rotation.
(Translation is useful for creating "springs".) You also can
adjust the radius of the generating geometry using the
"DeltaRotation" instance variable.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to model axisymmetric objects like
cylinders, bottles, and wine glasses; or translational/
rotational symmetric objects like springs or corkscrews.
Parameters
----------
resolution : int, optional
Number of pieces to divide line into.
inplace : bool, optional
Overwrites the original mesh inplace.
translation : float, optional
Total amount of translation along the z-axis.
dradius : float, optional
Change in radius during sweep process.
angle : float, optional
The angle of rotation.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
>>> import pyvista
>>> line = pyvista.Line(pointa=(0, 0, 0), pointb=(1, 0, 0))
>>> mesh = line.extrude_rotate(resolution = 4)
>>> cpos = mesh.plot()
"""
if resolution <= 0:
raise ValueError('`resolution` should be positive')
alg = _vtk.vtkRotationalExtrusionFilter()
alg.SetInputData(poly_data)
alg.SetResolution(resolution)
alg.SetTranslation(translation)
alg.SetDeltaRadius(dradius)
alg.SetAngle(angle)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def strip(poly_data, join=False, max_length=1000, pass_cell_data=False,
pass_cell_ids=False, pass_point_ids=False):
"""Strip poly data cells.
Generates triangle strips and/or poly-lines from input
polygons, triangle strips, and lines.
Polygons are assembled into triangle strips only if they are
triangles; other types of polygons are passed through to the
output and not stripped. (Use ``triangulate`` filter to
triangulate non-triangular polygons prior to running this
filter if you need to strip all the data.) The filter will
pass through (to the output) vertices if they are present in
the input polydata.
Also note that if triangle strips or polylines are defined in
the input they are passed through and not joined nor
extended. (If you wish to strip these use ``triangulate``
filter to fragment the input into triangles and lines prior to
running this filter.)
Parameters
----------
join : bool, optional
If ``True``, the output polygonal segments will be joined
if they are contiguous. This is useful after slicing a
surface. The default is ``False``.
max_length : int, optional
Specify the maximum number of triangles in a triangle
strip, and/or the maximum number of lines in a poly-line.
pass_cell_data : bool, optional
Enable/Disable passing of the CellData in the input to the
output as FieldData. Note the field data is transformed.
Default is ``False``.
pass_cell_ids : bool, optional
If ``True``, the output polygonal dataset will have a
celldata array that holds the cell index of the original
3D cell that produced each output cell. This is useful for
picking. The default is ``False`` to conserve memory.
pass_point_ids : bool, optional
If ``True``, the output polygonal dataset will have a
pointdata array that holds the point index of the original
vertex that produced each output vertex. This is useful
for picking. The default is ``False`` to conserve memory.
Examples
--------
>>> from pyvista import examples
>>> mesh = examples.load_airplane()
>>> slc = mesh.slice(normal='z', origin=(0,0,-10))
>>> stripped = slc.strip()
>>> stripped.n_cells
1
"""
alg = _vtk.vtkStripper()
alg.SetInputDataObject(poly_data)
alg.SetJoinContiguousSegments(join)
alg.SetMaximumLength(max_length)
alg.SetPassCellDataAsFieldData(pass_cell_data)
alg.SetPassThroughCellIds(pass_cell_ids)
alg.SetPassThroughPointIds(pass_point_ids)
alg.Update()
return _get_output(alg)
| [
"pyvista.core.filters._update_alg",
"numpy.sum",
"pyvista.core.filters._get_output",
"pyvista._vtk.vtkRotationalExtrusionFilter",
"numpy.empty",
"pyvista.assert_empty_kwargs",
"pyvista._vtk.vtkPlaneCollection",
"pyvista._vtk.vtkPolyDataNormals",
"numpy.arange",
"numpy.linalg.norm",
"pyvista._vtk... | [((1020, 1049), 'numpy.arange', 'np.arange', (['poly_data.n_points'], {}), '(poly_data.n_points)\n', (1029, 1049), True, 'import numpy as np\n'), ((1073, 1095), 'pyvista._vtk.vtkFeatureEdges', '_vtk.vtkFeatureEdges', ([], {}), '()\n', (1093, 1095), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((1392, 1417), 'pyvista.core.filters._get_output', '_get_output', (['featureEdges'], {}), '(featureEdges)\n', (1403, 1417), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((1436, 1475), 'pyvista.point_array', 'pyvista.point_array', (['edges', '"""point_ind"""'], {}), "(edges, 'point_ind')\n", (1455, 1475), False, 'import pyvista\n'), ((1492, 1565), 'numpy.in1d', 'np.in1d', (["poly_data.point_arrays['point_ind']", 'orig_id'], {'assume_unique': '(True)'}), "(poly_data.point_arrays['point_ind'], orig_id, assume_unique=True)\n", (1499, 1565), True, 'import numpy as np\n'), ((2286, 2326), 'pyvista._vtk.vtkBooleanOperationPolyDataFilter', '_vtk.vtkBooleanOperationPolyDataFilter', ([], {}), '()\n', (2324, 2326), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((2624, 2644), 'pyvista.core.filters._get_output', '_get_output', (['bfilter'], {}), '(bfilter)\n', (2635, 2644), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((3329, 3353), 'pyvista._vtk.vtkAppendPolyData', '_vtk.vtkAppendPolyData', ([], {}), '()\n', (3351, 3353), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((3476, 3498), 'pyvista.core.filters._get_output', '_get_output', (['vtkappend'], {}), '(vtkappend)\n', (3487, 3498), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((4419, 4459), 'pyvista._vtk.vtkBooleanOperationPolyDataFilter', '_vtk.vtkBooleanOperationPolyDataFilter', ([], {}), '()\n', (4457, 4459), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((4665, 4685), 'pyvista.core.filters._get_output', '_get_output', (['bfilter'], {}), '(bfilter)\n', (4676, 4685), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((5386, 5426), 'pyvista._vtk.vtkBooleanOperationPolyDataFilter', '_vtk.vtkBooleanOperationPolyDataFilter', ([], {}), '()\n', (5424, 5426), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((5637, 5657), 'pyvista.core.filters._get_output', '_get_output', (['bfilter'], {}), '(bfilter)\n', (5648, 5657), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((7494, 7530), 'pyvista._vtk.vtkIntersectionPolyDataFilter', '_vtk.vtkIntersectionPolyDataFilter', ([], {}), '()\n', (7528, 7530), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((7840, 7871), 'pyvista.core.filters._get_output', '_get_output', (['intfilter'], {'oport': '(0)'}), '(intfilter, oport=0)\n', (7851, 7871), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((7888, 7919), 'pyvista.core.filters._get_output', '_get_output', (['intfilter'], {'oport': '(1)'}), '(intfilter, oport=1)\n', (7899, 7919), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((7937, 7968), 'pyvista.core.filters._get_output', '_get_output', (['intfilter'], {'oport': '(2)'}), '(intfilter, oport=2)\n', (7948, 7968), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((8586, 8606), 'pyvista._vtk.vtkCurvatures', '_vtk.vtkCurvatures', ([], {}), '()\n', (8604, 8606), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((9232, 9256), 'pyvista.core.filters._get_output', '_get_output', (['curvefilter'], {}), '(curvefilter)\n', (9243, 9256), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((10742, 10766), 'pyvista._vtk.vtkTriangleFilter', '_vtk.vtkTriangleFilter', ([], {}), '()\n', (10764, 10766), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((10918, 10940), 'pyvista.core.filters._get_output', '_get_output', (['trifilter'], {}), '(trifilter)\n', (10929, 10940), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((13250, 13280), 'pyvista._vtk.vtkSmoothPolyDataFilter', '_vtk.vtkSmoothPolyDataFilter', ([], {}), '()\n', (13278, 13280), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((13675, 13691), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (13686, 13691), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((15936, 15957), 'pyvista._vtk.vtkDecimatePro', '_vtk.vtkDecimatePro', ([], {}), '()\n', (15955, 15957), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((16286, 16302), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (16297, 16302), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((18044, 18064), 'pyvista._vtk.vtkTubeFilter', '_vtk.vtkTubeFilter', ([], {}), '()\n', (18062, 18064), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((18852, 18869), 'pyvista.core.filters._get_output', '_get_output', (['tube'], {}), '(tube)\n', (18863, 18869), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((21306, 21326), 'pyvista.core.filters._get_output', '_get_output', (['sfilter'], {}), '(sfilter)\n', (21317, 21326), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((24375, 24410), 'pyvista._vtk.vtkAdaptiveSubdivisionFilter', '_vtk.vtkAdaptiveSubdivisionFilter', ([], {}), '()\n', (24408, 24410), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((24824, 24844), 'pyvista.core.filters._get_output', '_get_output', (['sfilter'], {}), '(sfilter)\n', (24835, 24844), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((28516, 28543), 'pyvista._vtk.vtkQuadricDecimation', '_vtk.vtkQuadricDecimation', ([], {}), '()\n', (28541, 28543), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((29203, 29247), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Decimating"""'], {}), "(alg, progress_bar, 'Decimating')\n", (29214, 29247), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((29264, 29280), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (29275, 29280), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((33707, 33732), 'pyvista._vtk.vtkPolyDataNormals', '_vtk.vtkPolyDataNormals', ([], {}), '()\n', (33730, 33732), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((34220, 34239), 'pyvista.core.filters._get_output', '_get_output', (['normal'], {}), '(normal)\n', (34231, 34239), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((37165, 37195), 'pyvista.generate_plane', 'generate_plane', (['normal', 'origin'], {}), '(normal, origin)\n', (37179, 37195), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((37217, 37242), 'pyvista._vtk.vtkPlaneCollection', '_vtk.vtkPlaneCollection', ([], {}), '()\n', (37240, 37242), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((37292, 37319), 'pyvista._vtk.vtkClipClosedSurface', '_vtk.vtkClipClosedSurface', ([], {}), '()\n', (37317, 37319), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((37532, 37548), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (37543, 37548), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((39176, 39270), 'logging.warning', 'logging.warning', (['"""pyvista.PolyData.fill_holes is known to segfault. Use at your own risk"""'], {}), "(\n 'pyvista.PolyData.fill_holes is known to segfault. Use at your own risk')\n", (39191, 39270), False, 'import logging\n'), ((39307, 39332), 'pyvista._vtk.vtkFillHolesFilter', '_vtk.vtkFillHolesFilter', ([], {}), '()\n', (39330, 39332), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((39412, 39459), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Filling Holes"""'], {}), "(alg, progress_bar, 'Filling Holes')\n", (39423, 39459), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((39476, 39492), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (39487, 39492), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((41715, 41744), 'pyvista.assert_empty_kwargs', 'assert_empty_kwargs', ([], {}), '(**kwargs)\n', (41734, 41744), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((41759, 41782), 'pyvista._vtk.vtkCleanPolyData', '_vtk.vtkCleanPolyData', ([], {}), '()\n', (41780, 41782), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((42258, 42300), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Cleaning"""'], {}), "(alg, progress_bar, 'Cleaning')\n", (42269, 42300), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((42318, 42334), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (42329, 42334), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((44724, 44759), 'pyvista._vtk.vtkDijkstraGraphGeodesicPath', '_vtk.vtkDijkstraGraphGeodesicPath', ([], {}), '()\n', (44757, 44759), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((44999, 45020), 'pyvista.core.filters._get_output', '_get_output', (['dijkstra'], {}), '(dijkstra)\n', (45010, 45020), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((46412, 46435), 'numpy.sum', 'np.sum', (["sizes['Length']"], {}), "(sizes['Length'])\n", (46418, 46435), True, 'import numpy as np\n'), ((47994, 48010), 'pyvista._vtk.vtkPoints', '_vtk.vtkPoints', ([], {}), '()\n', (48008, 48010), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((48030, 48046), 'pyvista._vtk.vtkIdList', '_vtk.vtkIdList', ([], {}), '()\n', (48044, 48046), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((48743, 48771), 'numpy.array', 'np.array', (['intersection_cells'], {}), '(intersection_cells)\n', (48751, 48771), True, 'import numpy as np\n'), ((51950, 51969), 'numpy.asarray', 'np.asarray', (['origins'], {}), '(origins)\n', (51960, 51969), True, 'import numpy as np\n'), ((51991, 52013), 'numpy.asarray', 'np.asarray', (['directions'], {}), '(directions)\n', (52001, 52013), True, 'import numpy as np\n'), ((52110, 52159), 'trimesh.Trimesh', 'trimesh.Trimesh', (['poly_data.points', 'faces_as_array'], {}), '(poly_data.points, faces_as_array)\n', (52125, 52159), False, 'import trimesh, rtree, pyembree\n'), ((54368, 54415), 'pyvista.core.filters.data_set.DataSetFilters.extract_feature_edges', 'DataSetFilters.extract_feature_edges', (['poly_data'], {}), '(poly_data)\n', (54404, 54415), False, 'from pyvista.core.filters.data_set import DataSetFilters\n'), ((57403, 57421), 'numpy.asarray', 'np.asarray', (['remove'], {}), '(remove)\n', (57413, 57421), True, 'import numpy as np\n'), ((58458, 58502), 'numpy.empty', 'np.empty', (['(nfaces, 4)'], {'dtype': 'pyvista.ID_TYPE'}), '((nfaces, 4), dtype=pyvista.ID_TYPE)\n', (58466, 58502), True, 'import numpy as np\n'), ((58550, 58581), 'numpy.reshape', 'np.reshape', (['uni[1]', '(nfaces, 3)'], {}), '(uni[1], (nfaces, 3))\n', (58560, 58581), True, 'import numpy as np\n'), ((58601, 58647), 'pyvista.PolyData', 'pyvista.PolyData', (['new_points', 'faces'], {'deep': '(True)'}), '(new_points, faces, deep=True)\n', (58617, 58647), False, 'import pyvista\n'), ((62480, 62500), 'pyvista._vtk.vtkDelaunay2D', '_vtk.vtkDelaunay2D', ([], {}), '()\n', (62498, 62500), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((62826, 62886), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Computing 2D Triangulation"""'], {}), "(alg, progress_bar, 'Computing 2D Triangulation')\n", (62837, 62886), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((64209, 64234), 'pyvista._vtk.vtkAppendArcLength', '_vtk.vtkAppendArcLength', ([], {}), '()\n', (64232, 64234), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((64307, 64323), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (64318, 64323), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((65672, 65702), 'pyvista.generate_plane', 'generate_plane', (['normal', 'origin'], {}), '(normal, origin)\n', (65686, 65702), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((65815, 65853), 'numpy.apply_along_axis', 'np.apply_along_axis', (['f', '(1)', 'mesh.points'], {}), '(f, 1, mesh.points)\n', (65834, 65853), True, 'import numpy as np\n'), ((67744, 67766), 'pyvista._vtk.vtkRibbonFilter', '_vtk.vtkRibbonFilter', ([], {}), '()\n', (67764, 67766), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((68735, 68751), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (68746, 68751), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((70707, 70738), 'pyvista._vtk.vtkLinearExtrusionFilter', '_vtk.vtkLinearExtrusionFilter', ([], {}), '()\n', (70736, 70738), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((70862, 70905), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Extruding"""'], {}), "(alg, progress_bar, 'Extruding')\n", (70873, 70905), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((73664, 73699), 'pyvista._vtk.vtkRotationalExtrusionFilter', '_vtk.vtkRotationalExtrusionFilter', ([], {}), '()\n', (73697, 73699), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((73886, 73929), 'pyvista.core.filters._update_alg', '_update_alg', (['alg', 'progress_bar', '"""Extruding"""'], {}), "(alg, progress_bar, 'Extruding')\n", (73897, 73929), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((76665, 76683), 'pyvista._vtk.vtkStripper', '_vtk.vtkStripper', ([], {}), '()\n', (76681, 76683), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((77002, 77018), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (77013, 77018), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((946, 973), 'pyvista.PolyData', 'pyvista.PolyData', (['poly_data'], {}), '(poly_data)\n', (962, 973), False, 'import pyvista\n'), ((2189, 2266), 'pyvista.core.errors.NotAllTrianglesError', 'NotAllTrianglesError', (['"""Make sure both the input and output are triangulated."""'], {}), "('Make sure both the input and output are triangulated.')\n", (2209, 2266), False, 'from pyvista.core.errors import NotAllTrianglesError\n'), ((3767, 3806), 'pyvista.core.filters.data_set.DataSetFilters.__add__', 'DataSetFilters.__add__', (['poly_data', 'mesh'], {}), '(poly_data, mesh)\n', (3789, 3806), False, 'from pyvista.core.filters.data_set import DataSetFilters\n'), ((17953, 17980), 'pyvista.PolyData', 'pyvista.PolyData', (['poly_data'], {}), '(poly_data)\n', (17969, 17980), False, 'import pyvista\n'), ((20775, 20808), 'pyvista._vtk.vtkLinearSubdivisionFilter', '_vtk.vtkLinearSubdivisionFilter', ([], {}), '()\n', (20806, 20808), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((44628, 44703), 'pyvista.core.errors.NotAllTrianglesError', 'NotAllTrianglesError', (['"""Input mesh for geodesic path must be all triangles."""'], {}), "('Input mesh for geodesic path must be all triangles.')\n", (44648, 44703), False, 'from pyvista.core.errors import NotAllTrianglesError\n'), ((48091, 48107), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (48099, 48107), True, 'import numpy as np\n'), ((48153, 48172), 'numpy.array', 'np.array', (['end_point'], {}), '(end_point)\n', (48161, 48172), True, 'import numpy as np\n'), ((48812, 48850), 'pyvista.Plotter', 'pyvista.Plotter', ([], {'off_screen': 'off_screen'}), '(off_screen=off_screen)\n', (48827, 48850), False, 'import pyvista\n'), ((48932, 48961), 'numpy.array', 'np.array', (['[origin, end_point]'], {}), '([origin, end_point])\n', (48940, 48961), True, 'import numpy as np\n'), ((52609, 52669), 'numpy.setdiff1d', 'np.setdiff1d', (['all_ray_indices', 'index_ray'], {'assume_unique': '(True)'}), '(all_ray_indices, index_ray, assume_unique=True)\n', (52621, 52669), True, 'import numpy as np\n'), ((53679, 53696), 'numpy.array', 'np.array', (['ray_lst'], {}), '(ray_lst)\n', (53687, 53696), True, 'import numpy as np\n'), ((57885, 57923), 'numpy.zeros', 'np.zeros', (['poly_data.n_points', 'np.bool_'], {}), '(poly_data.n_points, np.bool_)\n', (57893, 57923), True, 'import numpy as np\n'), ((59841, 59910), 'pyvista.core.errors.NotAllTrianglesError', 'NotAllTrianglesError', (['"""Can only flip normals on an all triangle mesh"""'], {}), "('Can only flip normals on an all triangle mesh')\n", (59861, 59910), False, 'from pyvista.core.errors import NotAllTrianglesError\n'), ((67597, 67660), 'pyvista.get_array', 'get_array', (['poly_data', 'scalars'], {'preference': 'preference', 'info': '(True)'}), '(poly_data, scalars, preference=preference, info=True)\n', (67606, 67660), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((20870, 20906), 'pyvista._vtk.vtkButterflySubdivisionFilter', '_vtk.vtkButterflySubdivisionFilter', ([], {}), '()\n', (20904, 20906), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((52925, 52980), 'numpy.linalg.norm', 'np.linalg.norm', (['directions_retry'], {'axis': '(1)', 'keepdims': '(True)'}), '(directions_retry, axis=1, keepdims=True)\n', (52939, 52980), True, 'import numpy as np\n'), ((53816, 53833), 'numpy.array', 'np.array', (['tri_lst'], {}), '(tri_lst)\n', (53824, 53833), True, 'import numpy as np\n'), ((53872, 53889), 'numpy.array', 'np.array', (['loc_lst'], {}), '(loc_lst)\n', (53880, 53889), True, 'import numpy as np\n'), ((63005, 63021), 'pyvista.core.filters._get_output', '_get_output', (['alg'], {}), '(alg)\n', (63016, 63021), False, 'from pyvista.core.filters import _get_output, _update_alg\n'), ((65432, 65458), 'numpy.array', 'np.array', (['poly_data.center'], {}), '(poly_data.center)\n', (65440, 65458), True, 'import numpy as np\n'), ((20963, 20994), 'pyvista._vtk.vtkLoopSubdivisionFilter', '_vtk.vtkLoopSubdivisionFilter', ([], {}), '()\n', (20992, 20994), False, 'from pyvista import abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array\n'), ((59062, 59129), 'logging.warning', 'logging.warning', (['f"""Unable to pass cell key {key} onto reduced mesh"""'], {}), "(f'Unable to pass cell key {key} onto reduced mesh')\n", (59077, 59129), False, 'import logging\n'), ((65461, 65477), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (65469, 65477), True, 'import numpy as np\n')] |
# OS libraries
import os
import copy
import queue
import argparse
import scipy.misc
import numpy as np
from tqdm import tqdm
# Pytorch
import torch
import torch.nn as nn
# Customized libraries
from libs.test_utils import *
from libs.model import transform
from libs.utils import norm_mask
from libs.model import Model_switchGTfixdot_swCC_Res as Model
import pdb
############################## helper functions ##############################
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type = int, default = 1,
help = "batch size")
parser.add_argument("-o","--out_dir",type = str, default = "results/trackingnet_contrastive/",
help = "output saving path")
parser.add_argument("--device", type = int, default = 5,
help = "0~4 for single GPU, 5 for dataparallel.")
parser.add_argument("-c","--checkpoint_dir",type = str,
default = "trackingnet_contrastive/checkpoint_latest.pth.tar",
help = "checkpoints path")
parser.add_argument("-s", "--scale_size", type = int, nargs = "+",
help = "scale size, a single number for shorter edge, or a pair for height and width")
parser.add_argument("--pre_num", type = int, default = 7,
help = "preceding frame numbers")
parser.add_argument("--temp", type = float, default = 1,
help = "softmax temperature")
parser.add_argument("--topk", type = int, default = 5,
help = "accumulate label from top k neighbors")
parser.add_argument("-d", "--davis_dir", type = str,
default = "",
help = "davis dataset path")
args = parser.parse_args()
args.is_train = False
args.multiGPU = args.device == 5
if not args.multiGPU:
torch.cuda.set_device(args.device)
args.val_txt = os.path.join(args.davis_dir, "ImageSets/2017/val.txt")
args.davis_dir = os.path.join(args.davis_dir, "JPEGImages/480p/")
return args
############################## testing functions ##############################
def forward(frame1, frame2, model, seg):
"""
propagate seg of frame1 to frame2
"""
n, c, h, w = frame1.size()
output = model(frame1, frame2, frame1, frame2)
aff = output[2]
frame2_seg = transform_topk(aff, seg.cuda(), k=args.topk)
return frame2_seg
def test(model, frame_list, video_dir, first_seg, seg_ori):
"""
test on a video given first frame & segmentation
"""
video_dir = os.path.join(video_dir)
video_nm = video_dir.split('/')[-1]
video_folder = os.path.join(args.out_dir, video_nm)
os.makedirs(video_folder, exist_ok = True)
transforms = create_transforms()
# The queue stores args.pre_num preceding frames
que = queue.Queue(args.pre_num)
# first frame
frame1, ori_h, ori_w = read_frame(frame_list[0], transforms, args.scale_size)
n, c, h, w = frame1.size()
# saving first segmentation
out_path = os.path.join(video_folder, "00000.png")
imwrite_indexed(out_path, seg_ori)
for cnt in tqdm(range(1,len(frame_list))):
frame_tar, ori_h, ori_w = read_frame(frame_list[cnt], transforms, args.scale_size)
with torch.no_grad():
# frame 1 -> frame cnt
frame_tar_acc = forward(frame1, frame_tar, model, first_seg)
# frame cnt - i -> frame cnt, (i = 1, ..., pre_num)
tmp_queue = list(que.queue)
for pair in tmp_queue:
framei = pair[0]
segi = pair[1]
frame_tar_est_i = forward(framei, frame_tar, model, segi)
frame_tar_acc += frame_tar_est_i
frame_tar_avg = frame_tar_acc / (1 + len(tmp_queue))
frame_nm = frame_list[cnt].split('/')[-1].replace(".jpg",".png")
out_path = os.path.join(video_folder, frame_nm)
# pop out oldest frame if neccessary
if(que.qsize() == args.pre_num):
que.get()
# push current results into queue
seg = copy.deepcopy(frame_tar_avg)
frame, ori_h, ori_w = read_frame(frame_list[cnt], transforms, args.scale_size)
que.put([frame,seg])
# upsampling & argmax
frame_tar_avg = torch.nn.functional.interpolate(frame_tar_avg, scale_factor=8, mode='bilinear')
frame_tar_avg = frame_tar_avg.squeeze()
frame_tar_avg = norm_mask(frame_tar_avg.squeeze())
_, frame_tar_seg = torch.max(frame_tar_avg, dim=0)
# saving to disk
frame_tar_seg = frame_tar_seg.squeeze().cpu().numpy()
frame_tar_seg = np.array(frame_tar_seg, dtype=np.uint8)
frame_tar_seg = scipy.misc.imresize(frame_tar_seg, (ori_h, ori_w), "nearest")
imwrite_indexed(out_path, frame_tar_seg)
############################## main function ##############################
if(__name__ == '__main__'):
args = parse_args()
with open(args.val_txt) as f:
lines = f.readlines()
f.close()
# loading pretrained model
model = Model(pretrainRes=False, temp = args.temp, uselayer=4)
if(args.multiGPU):
model = nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_dir)
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{} ({})' (epoch {})"
.format(args.checkpoint_dir, best_loss, checkpoint['epoch']))
model.cuda()
model.eval()
# start testing
for cnt,line in enumerate(lines):
video_nm = line.strip()
print('[{:n}/{:n}] Begin to segmentate video {}.'.format(cnt,len(lines),video_nm))
video_dir = os.path.join(args.davis_dir, video_nm)
frame_list = read_frame_list(video_dir)
seg_dir = frame_list[0].replace("JPEGImages","Annotations")
seg_dir = seg_dir.replace("jpg","png")
_, first_seg, seg_ori = read_seg(seg_dir, args.scale_size)
test(model, frame_list, video_dir, first_seg, seg_ori)
| [
"libs.model.Model_switchGTfixdot_swCC_Res",
"copy.deepcopy",
"os.makedirs",
"argparse.ArgumentParser",
"torch.load",
"torch.nn.DataParallel",
"torch.max",
"numpy.array",
"torch.cuda.set_device",
"torch.nn.functional.interpolate",
"torch.no_grad",
"os.path.join",
"queue.Queue"
] | [((476, 501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (499, 501), False, 'import argparse\n'), ((1983, 2037), 'os.path.join', 'os.path.join', (['args.davis_dir', '"""ImageSets/2017/val.txt"""'], {}), "(args.davis_dir, 'ImageSets/2017/val.txt')\n", (1995, 2037), False, 'import os\n'), ((2059, 2107), 'os.path.join', 'os.path.join', (['args.davis_dir', '"""JPEGImages/480p/"""'], {}), "(args.davis_dir, 'JPEGImages/480p/')\n", (2071, 2107), False, 'import os\n'), ((2635, 2658), 'os.path.join', 'os.path.join', (['video_dir'], {}), '(video_dir)\n', (2647, 2658), False, 'import os\n'), ((2718, 2754), 'os.path.join', 'os.path.join', (['args.out_dir', 'video_nm'], {}), '(args.out_dir, video_nm)\n', (2730, 2754), False, 'import os\n'), ((2759, 2799), 'os.makedirs', 'os.makedirs', (['video_folder'], {'exist_ok': '(True)'}), '(video_folder, exist_ok=True)\n', (2770, 2799), False, 'import os\n'), ((2904, 2929), 'queue.Queue', 'queue.Queue', (['args.pre_num'], {}), '(args.pre_num)\n', (2915, 2929), False, 'import queue\n'), ((3110, 3149), 'os.path.join', 'os.path.join', (['video_folder', '"""00000.png"""'], {}), "(video_folder, '00000.png')\n", (3122, 3149), False, 'import os\n'), ((5142, 5194), 'libs.model.Model_switchGTfixdot_swCC_Res', 'Model', ([], {'pretrainRes': '(False)', 'temp': 'args.temp', 'uselayer': '(4)'}), '(pretrainRes=False, temp=args.temp, uselayer=4)\n', (5147, 5194), True, 'from libs.model import Model_switchGTfixdot_swCC_Res as Model\n'), ((5276, 5307), 'torch.load', 'torch.load', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (5286, 5307), False, 'import torch\n'), ((1928, 1962), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.device'], {}), '(args.device)\n', (1949, 1962), False, 'import torch\n'), ((3952, 3988), 'os.path.join', 'os.path.join', (['video_folder', 'frame_nm'], {}), '(video_folder, frame_nm)\n', (3964, 3988), False, 'import os\n'), ((4154, 4182), 'copy.deepcopy', 'copy.deepcopy', (['frame_tar_avg'], {}), '(frame_tar_avg)\n', (4167, 4182), False, 'import copy\n'), ((4354, 4433), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['frame_tar_avg'], {'scale_factor': '(8)', 'mode': '"""bilinear"""'}), "(frame_tar_avg, scale_factor=8, mode='bilinear')\n", (4385, 4433), False, 'import torch\n'), ((4570, 4601), 'torch.max', 'torch.max', (['frame_tar_avg'], {'dim': '(0)'}), '(frame_tar_avg, dim=0)\n', (4579, 4601), False, 'import torch\n'), ((4714, 4753), 'numpy.array', 'np.array', (['frame_tar_seg'], {'dtype': 'np.uint8'}), '(frame_tar_seg, dtype=np.uint8)\n', (4722, 4753), True, 'import numpy as np\n'), ((5236, 5258), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (5251, 5258), True, 'import torch.nn as nn\n'), ((5764, 5802), 'os.path.join', 'os.path.join', (['args.davis_dir', 'video_nm'], {}), '(args.davis_dir, video_nm)\n', (5776, 5802), False, 'import os\n'), ((3342, 3357), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3355, 3357), False, 'import torch\n')] |
from holoviews.element import RGB, Tiles, Points, Bounds
from holoviews.element.tiles import StamenTerrain, _ATTRIBUTIONS
from .test_plot import TestPlotlyPlot, plotly_renderer
import numpy as np
class TestMapboxTilesPlot(TestPlotlyPlot):
def setUp(self):
super().setUp()
# Precompute coordinates
self.xs = [3000000, 2000000, 1000000]
self.ys = [-3000000, -2000000, -1000000]
self.x_range = (-5000000, 4000000)
self.x_center = sum(self.x_range) / 2.0
self.y_range = (-3000000, 2000000)
self.y_center = sum(self.y_range) / 2.0
self.lon_range, self.lat_range = Tiles.easting_northing_to_lon_lat(self.x_range, self.y_range)
self.lon_centers, self.lat_centers = Tiles.easting_northing_to_lon_lat(
[self.x_center], [self.y_center]
)
self.lon_center, self.lat_center = self.lon_centers[0], self.lat_centers[0]
self.lons, self.lats = Tiles.easting_northing_to_lon_lat(self.xs, self.ys)
def test_mapbox_tiles_defaults(self):
tiles = Tiles("").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check that xaxis and yaxis entries are not created
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
# Check no layers are introduced when an empty tile server string is
# passed
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 0)
def test_styled_mapbox_tiles(self):
tiles = Tiles().opts(mapboxstyle="dark", accesstoken="token-str").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
def test_raster_layer(self):
tiles = StamenTerrain().redim.range(
x=self.x_range, y=self.y_range
).opts(alpha=0.7, min_zoom=3, max_zoom=7)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check for raster layer
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 1)
layer = layers[0]
self.assertEqual(layer["source"][0].lower(), tiles.data.lower())
self.assertEqual(layer["opacity"], 0.7)
self.assertEqual(layer["sourcetype"], "raster")
self.assertEqual(layer["minzoom"], 3)
self.assertEqual(layer["maxzoom"], 7)
self.assertEqual(layer["sourceattribution"], _ATTRIBUTIONS[('stamen', 'net/t')])
def test_overlay(self):
# Base layer is mapbox vector layer
tiles = Tiles("").opts(mapboxstyle="dark", accesstoken="token-str")
# Raster tile layer
stamen_raster = StamenTerrain().opts(alpha=0.7)
# RGB layer
rgb_data = np.random.rand(10, 10, 3)
rgb = RGB(
rgb_data,
bounds=(self.x_range[0], self.y_range[0], self.x_range[1], self.y_range[1])
).opts(
opacity=0.5
)
# Points layer
points = Points([(0, 0), (self.x_range[1], self.y_range[1])]).opts(
show_legend=True
)
# Bounds
bounds = Bounds((self.x_range[0], self.y_range[0], 0, 0))
# Overlay
overlay = (tiles * stamen_raster * rgb * points * bounds).redim.range(
x=self.x_range, y=self.y_range
)
# Render to plotly figure dictionary
fig_dict = plotly_renderer.get_plot_state(overlay)
# Check number of traces and layers
traces = fig_dict["data"]
subplot = fig_dict["layout"]["mapbox"]
layers = subplot["layers"]
self.assertEqual(len(traces), 5)
self.assertEqual(len(layers), 2)
# Check vector layer
dummy_trace = traces[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check raster layer
dummy_trace = traces[1]
raster_layer = layers[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
# Check raster_layer
self.assertEqual(raster_layer["below"], "traces")
self.assertEqual(raster_layer["opacity"], 0.7)
self.assertEqual(raster_layer["sourcetype"], "raster")
self.assertEqual(raster_layer["source"][0].lower(), stamen_raster.data.lower())
# Check RGB layer
dummy_trace = traces[2]
rgb_layer = layers[1]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [None])
self.assertEqual(dummy_trace["lat"], [None])
self.assertFalse(dummy_trace["showlegend"])
# Check rgb_layer
self.assertEqual(rgb_layer["below"], "traces")
self.assertEqual(rgb_layer["opacity"], 0.5)
self.assertEqual(rgb_layer["sourcetype"], "image")
self.assertTrue(rgb_layer["source"].startswith("data:image/png;base64,iVBOR"))
self.assertEqual(rgb_layer["coordinates"], [
[self.lon_range[0], self.lat_range[1]],
[self.lon_range[1], self.lat_range[1]],
[self.lon_range[1], self.lat_range[0]],
[self.lon_range[0], self.lat_range[0]]
])
# Check Points layer
points_trace = traces[3]
self.assertEqual(points_trace["type"], "scattermapbox")
self.assertEqual(points_trace["lon"], np.array([0, self.lon_range[1]]))
self.assertEqual(points_trace["lat"], np.array([0, self.lat_range[1]]))
self.assertEqual(points_trace["mode"], "markers")
self.assertTrue(points_trace.get("showlegend", True))
# Check Bounds layer
bounds_trace = traces[4]
self.assertEqual(bounds_trace["type"], "scattermapbox")
self.assertEqual(bounds_trace["lon"], np.array([
self.lon_range[0], self.lon_range[0], 0, 0, self.lon_range[0]
]))
self.assertEqual(bounds_trace["lat"], np.array([
self.lat_range[0], 0, 0, self.lat_range[0], self.lat_range[0]
]))
self.assertEqual(bounds_trace["mode"], "lines")
self.assertTrue(points_trace["showlegend"], False)
# No xaxis/yaxis
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
| [
"holoviews.element.tiles.StamenTerrain",
"holoviews.element.RGB",
"holoviews.element.Points",
"numpy.array",
"holoviews.element.Tiles.easting_northing_to_lon_lat",
"numpy.random.rand",
"holoviews.element.Tiles",
"holoviews.element.Bounds"
] | [((638, 699), 'holoviews.element.Tiles.easting_northing_to_lon_lat', 'Tiles.easting_northing_to_lon_lat', (['self.x_range', 'self.y_range'], {}), '(self.x_range, self.y_range)\n', (671, 699), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((745, 812), 'holoviews.element.Tiles.easting_northing_to_lon_lat', 'Tiles.easting_northing_to_lon_lat', (['[self.x_center]', '[self.y_center]'], {}), '([self.x_center], [self.y_center])\n', (778, 812), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((950, 1001), 'holoviews.element.Tiles.easting_northing_to_lon_lat', 'Tiles.easting_northing_to_lon_lat', (['self.xs', 'self.ys'], {}), '(self.xs, self.ys)\n', (983, 1001), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((4323, 4348), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(3)'], {}), '(10, 10, 3)\n', (4337, 4348), True, 'import numpy as np\n'), ((4702, 4750), 'holoviews.element.Bounds', 'Bounds', (['(self.x_range[0], self.y_range[0], 0, 0)'], {}), '((self.x_range[0], self.y_range[0], 0, 0))\n', (4708, 4750), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((7392, 7424), 'numpy.array', 'np.array', (['[0, self.lon_range[1]]'], {}), '([0, self.lon_range[1]])\n', (7400, 7424), True, 'import numpy as np\n'), ((7472, 7504), 'numpy.array', 'np.array', (['[0, self.lat_range[1]]'], {}), '([0, self.lat_range[1]])\n', (7480, 7504), True, 'import numpy as np\n'), ((7799, 7872), 'numpy.array', 'np.array', (['[self.lon_range[0], self.lon_range[0], 0, 0, self.lon_range[0]]'], {}), '([self.lon_range[0], self.lon_range[0], 0, 0, self.lon_range[0]])\n', (7807, 7872), True, 'import numpy as np\n'), ((7942, 8015), 'numpy.array', 'np.array', (['[self.lat_range[0], 0, 0, self.lat_range[0], self.lat_range[0]]'], {}), '([self.lat_range[0], 0, 0, self.lat_range[0], self.lat_range[0]])\n', (7950, 8015), True, 'import numpy as np\n'), ((4138, 4147), 'holoviews.element.Tiles', 'Tiles', (['""""""'], {}), "('')\n", (4143, 4147), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((4251, 4266), 'holoviews.element.tiles.StamenTerrain', 'StamenTerrain', ([], {}), '()\n', (4264, 4266), False, 'from holoviews.element.tiles import StamenTerrain, _ATTRIBUTIONS\n'), ((4363, 4457), 'holoviews.element.RGB', 'RGB', (['rgb_data'], {'bounds': '(self.x_range[0], self.y_range[0], self.x_range[1], self.y_range[1])'}), '(rgb_data, bounds=(self.x_range[0], self.y_range[0], self.x_range[1],\n self.y_range[1]))\n', (4366, 4457), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((4569, 4621), 'holoviews.element.Points', 'Points', (['[(0, 0), (self.x_range[1], self.y_range[1])]'], {}), '([(0, 0), (self.x_range[1], self.y_range[1])])\n', (4575, 4621), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((1061, 1070), 'holoviews.element.Tiles', 'Tiles', (['""""""'], {}), "('')\n", (1066, 1070), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((2214, 2221), 'holoviews.element.Tiles', 'Tiles', ([], {}), '()\n', (2219, 2221), False, 'from holoviews.element import RGB, Tiles, Points, Bounds\n'), ((2754, 2769), 'holoviews.element.tiles.StamenTerrain', 'StamenTerrain', ([], {}), '()\n', (2767, 2769), False, 'from holoviews.element.tiles import StamenTerrain, _ATTRIBUTIONS\n')] |
"""A collection of shared utilities for all encoders, not intended for external use."""
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
__author__ = 'willmcginnis'
def convert_cols_to_list(cols):
if isinstance(cols, pd.Series):
return cols.tolist()
elif isinstance(cols, np.ndarray):
return cols.tolist()
elif np.isscalar(cols):
return [cols]
elif isinstance(cols, set):
return list(cols)
elif isinstance(cols, tuple):
return list(cols)
elif pd.api.types.is_categorical_dtype(cols):
return cols.astype(object).tolist()
return cols
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols
def is_category(dtype):
return pd.api.types.is_categorical_dtype(dtype)
def convert_inputs(X, y, columns=None, index=None, deep=False):
"""
Unite arraylike `X` and vectorlike `y` into a DataFrame and Series.
If both are pandas types already, raises an error if their indexes do not match.
If one is pandas, the returns will share that index.
If neither is pandas, a default index will be used, unless `index` is passed.
Parameters
----------
X: arraylike
y: listlike
columns: listlike
Specifies column names to use for `X`.
Ignored if `X` is already a dataframe.
If `None`, use the default pandas column names.
index: listlike
The index to use, if neither `X` nor `y` is a pandas type.
(If one has an index, then this has no effect.)
If `None`, use the default pandas index.
deep: bool
Whether to deep-copy `X`.
"""
X_alt_index = y.index if isinstance(y, pd.Series) else index
X = convert_input(X, columns=columns, deep=deep, index=X_alt_index)
if y is not None:
y = convert_input_vector(y, index=X.index)
# N.B.: If either was already pandas, it keeps its index.
if any(X.index != y.index):
raise ValueError("`X` and `y` both have indexes, but they do not match.")
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
return X, y
def convert_input(X, columns=None, deep=False, index=None):
"""
Unite data into a DataFrame.
Objects that do not contain column names take the names from the argument.
Optionally perform deep copy of the data.
"""
if not isinstance(X, pd.DataFrame):
if isinstance(X, pd.Series):
X = pd.DataFrame(X, copy=deep)
else:
if columns is not None and np.size(X,1) != len(columns):
raise ValueError('The count of the column names does not correspond to the count of the columns')
if isinstance(X, list):
X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument
elif isinstance(X, (np.generic, np.ndarray)):
X = pd.DataFrame(X, columns=columns, copy=deep, index=index)
elif isinstance(X, csr_matrix):
X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index)
else:
raise ValueError('Unexpected input type: %s' % (str(type(X))))
elif deep:
X = X.copy(deep=True)
return X
def convert_input_vector(y, index):
"""
Unite target data type into a Series.
If the target is a Series or a DataFrame, we preserve its index.
But if the target does not contain index attribute, we use the index from the argument.
"""
if y is None:
raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None')
if isinstance(y, pd.Series):
return y
elif isinstance(y, np.ndarray):
if len(np.shape(y))==1: # vector
return pd.Series(y, name='target', index=index)
elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix
return pd.Series(y[0, :], name='target', index=index)
elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix
return pd.Series(y[:, 0], name='target', index=index)
else:
raise ValueError('Unexpected input shape: %s' % (str(np.shape(y))))
elif np.isscalar(y):
return pd.Series([y], name='target', index=index)
elif isinstance(y, list):
if len(y)==0: # empty list
return pd.Series(y, name='target', index=index, dtype=float)
elif len(y)>0 and not isinstance(y[0], list): # vector
return pd.Series(y, name='target', index=index)
elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix
flatten = lambda y: [item for sublist in y for item in sublist]
return pd.Series(flatten(y), name='target', index=index)
elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix
return pd.Series(y[0], name='target', index=index, dtype=float)
elif len(y)==1 and isinstance(y[0], list): # single column in a matrix
return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0]))
else:
raise ValueError('Unexpected input shape')
elif isinstance(y, pd.DataFrame):
if len(list(y))==0: # empty DataFrame
return pd.Series(name='target', index=index, dtype=float)
if len(list(y))==1: # a single column
return y.iloc[:, 0]
else:
raise ValueError('Unexpected input shape: %s' % (str(y.shape)))
else:
return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types
def get_generated_cols(X_original, X_transformed, to_transform):
"""
Returns a list of the generated/transformed columns.
Arguments:
X_original: df
the original (input) DataFrame.
X_transformed: df
the transformed (current) DataFrame.
to_transform: [str]
a list of columns that were transformed (as in the original DataFrame), commonly self.cols.
Output:
a list of columns that were transformed (as in the current DataFrame).
"""
original_cols = list(X_original.columns)
if len(to_transform) > 0:
[original_cols.remove(c) for c in to_transform]
current_cols = list(X_transformed.columns)
if len(original_cols) > 0:
[current_cols.remove(c) for c in original_cols]
return current_cols
class TransformerWithTargetMixin:
def fit_transform(self, X, y=None, **fit_params):
"""
Encoders that utilize the target must make sure that the training data are transformed with:
transform(X, y)
and not with:
transform(X)
"""
if y is None:
raise TypeError('fit_transform() missing argument: ''y''')
return self.fit(X, y, **fit_params).transform(X, y)
| [
"pandas.DataFrame",
"numpy.size",
"numpy.isscalar",
"pandas.api.types.is_categorical_dtype",
"numpy.shape",
"pandas.Series"
] | [((954, 994), 'pandas.api.types.is_categorical_dtype', 'pd.api.types.is_categorical_dtype', (['dtype'], {}), '(dtype)\n', (987, 994), True, 'import pandas as pd\n'), ((373, 390), 'numpy.isscalar', 'np.isscalar', (['cols'], {}), '(cols)\n', (384, 390), True, 'import numpy as np\n'), ((2751, 2777), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'copy': 'deep'}), '(X, copy=deep)\n', (2763, 2777), True, 'import pandas as pd\n'), ((4556, 4570), 'numpy.isscalar', 'np.isscalar', (['y'], {}), '(y)\n', (4567, 4570), True, 'import numpy as np\n'), ((3031, 3087), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'columns', 'copy': 'deep', 'index': 'index'}), '(X, columns=columns, copy=deep, index=index)\n', (3043, 3087), True, 'import pandas as pd\n'), ((4113, 4153), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""target"""', 'index': 'index'}), "(y, name='target', index=index)\n", (4122, 4153), True, 'import pandas as pd\n'), ((4587, 4629), 'pandas.Series', 'pd.Series', (['[y]'], {'name': '"""target"""', 'index': 'index'}), "([y], name='target', index=index)\n", (4596, 4629), True, 'import pandas as pd\n'), ((2831, 2844), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (2838, 2844), True, 'import numpy as np\n'), ((3242, 3298), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'columns', 'copy': 'deep', 'index': 'index'}), '(X, columns=columns, copy=deep, index=index)\n', (3254, 3298), True, 'import pandas as pd\n'), ((4067, 4078), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4075, 4078), True, 'import numpy as np\n'), ((4255, 4301), 'pandas.Series', 'pd.Series', (['y[0, :]'], {'name': '"""target"""', 'index': 'index'}), "(y[0, :], name='target', index=index)\n", (4264, 4301), True, 'import pandas as pd\n'), ((541, 580), 'pandas.api.types.is_categorical_dtype', 'pd.api.types.is_categorical_dtype', (['cols'], {}), '(cols)\n', (574, 580), True, 'import pandas as pd\n'), ((4406, 4452), 'pandas.Series', 'pd.Series', (['y[:, 0]'], {'name': '"""target"""', 'index': 'index'}), "(y[:, 0], name='target', index=index)\n", (4415, 4452), True, 'import pandas as pd\n'), ((4715, 4768), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""target"""', 'index': 'index', 'dtype': 'float'}), "(y, name='target', index=index, dtype=float)\n", (4724, 4768), True, 'import pandas as pd\n'), ((5887, 5927), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""target"""', 'index': 'index'}), "(y, name='target', index=index)\n", (5896, 5927), True, 'import pandas as pd\n'), ((4171, 4182), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4179, 4182), True, 'import numpy as np\n'), ((4191, 4202), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4199, 4202), True, 'import numpy as np\n'), ((4852, 4892), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""target"""', 'index': 'index'}), "(y, name='target', index=index)\n", (4861, 4892), True, 'import pandas as pd\n'), ((5643, 5693), 'pandas.Series', 'pd.Series', ([], {'name': '"""target"""', 'index': 'index', 'dtype': 'float'}), "(name='target', index=index, dtype=float)\n", (5652, 5693), True, 'import pandas as pd\n'), ((4319, 4330), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4327, 4330), True, 'import numpy as np\n'), ((4339, 4350), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4347, 4350), True, 'import numpy as np\n'), ((4532, 4543), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4540, 4543), True, 'import numpy as np\n'), ((5251, 5307), 'pandas.Series', 'pd.Series', (['y[0]'], {'name': '"""target"""', 'index': 'index', 'dtype': 'float'}), "(y[0], name='target', index=index, dtype=float)\n", (5260, 5307), True, 'import pandas as pd\n')] |
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_lcmv, apply_lcmv
from scipy.stats import pearsonr
import config
from config import fname, lcmv_settings
from time_series import simulate_raw, create_epochs
# Don't be verbose
mne.set_log_level(False)
fn_stc_signal = fname.stc_signal(vertex=config.vertex)
fn_simulated_raw = fname.simulated_raw(vertex=config.vertex)
fn_simulated_epochs = fname.simulated_epochs(vertex=config.vertex)
# fn_report_h5 = fname.report(vertex=config.vertex)
fn_report_h5 = None # Don't produce a report
###############################################################################
# Simulate raw data and create epochs
###############################################################################
print('simulate data')
info = mne.io.read_info(fname.sample_raw)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False))
fwd_disc_true = mne.read_forward_solution(fname.fwd_discrete_true)
fwd_disc_true = mne.pick_types_forward(fwd_disc_true, meg=True, eeg=False)
er_raw = mne.io.read_raw_fif(fname.ernoise, preload=True)
raw, stc_signal = simulate_raw(info=info, fwd_disc_true=fwd_disc_true, signal_vertex=config.vertex,
signal_freq=config.signal_freq, n_trials=config.n_trials,
noise_multiplier=config.noise, random_state=config.random,
n_noise_dipoles=config.n_noise_dipoles_vol, er_raw=er_raw)
true_ori = fwd_disc_true['src'][0]['nn'][config.vertex]
# del info, fwd_disc_true, er_raw
epochs = create_epochs(raw)
###############################################################################
# Sensor-level analysis
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make cov matrices
cov = mne.compute_covariance(epochs, tmin=0, tmax=1, method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=-1, tmax=0, method='empirical')
# Compute evokeds
evoked_grad = epochs_grad.average()
evoked_mag = epochs_mag.average()
evoked_joint = epochs_joint.average()
###############################################################################
# Compute LCMV beamformer results
###############################################################################
# Read in forward solution
fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man)
dists = []
focs = []
corrs = []
ori_errors = []
for setting in lcmv_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, use_noise_cov, reduce_rank = setting
try:
if sensor_type == 'grad':
evoked = evoked_grad
elif sensor_type == 'mag':
evoked = evoked_mag
elif sensor_type == 'joint':
evoked = evoked_joint
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
filters = make_lcmv(evoked.info, fwd_disc_true, cov, reg=reg,
pick_ori=pick_ori, weight_norm=weight_norm,
inversion=inversion,
depth=1. if normalize_fwd else None,
noise_cov=noise_cov if use_noise_cov else None,
reduce_rank=reduce_rank)
stc_est = apply_lcmv(evoked, filters).crop(0.001, 1)
# Estimated source location is at peak power
if pick_ori == 'vector':
stc_est_power = (stc_est.magnitude() ** 2).sum().sqrt()
else:
stc_est_power = (stc_est ** 2).sum().sqrt()
peak_vertex, _ = stc_est_power.get_peak(vert_as_index=True)
# Compute distance between true and estimated source locations
pos_est = fwd_disc_man['source_rr'][peak_vertex]
pos_true = fwd_disc_man['source_rr'][config.vertex]
dist = np.linalg.norm(pos_est - pos_true)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_est_power.data[peak_vertex, 0] / stc_est_power.data.sum()
# Correlation between true and reconstructed timecourse
true_time_course = stc_signal.copy().crop(0, 1).data[0]
if pick_ori == 'vector':
estimated_time_course = np.abs(stc_est.magnitude().data[peak_vertex])
else:
estimated_time_course = np.abs(stc_est.data[peak_vertex])
corr = pearsonr(np.abs(true_time_course), estimated_time_course)[0]
# Angle between estimated and true source orientation
if pick_ori == 'max-power':
estimated_ori = filters['max_power_ori'][config.vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
elif pick_ori == 'vector':
_, peak_time = stc_est.magnitude().get_peak(time_as_index=True)
estimated_ori = stc_est.data[peak_vertex, :, peak_time]
estimated_ori /= np.linalg.norm(estimated_ori)
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
corr = np.nan
ori_error = np.nan
print(setting, dist, focality_score, corr, ori_error)
dists.append(dist)
focs.append(focality_score)
corrs.append(corr)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(lcmv_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'use_noise_cov', 'reduce_rank'])
df['dist'] = dists
df['focality'] = focs
df['corr'] = corrs
df['ori_error'] = ori_errors
#df.to_csv(fname.lcmv_results(vertex=config.vertex, noise=config.noise))
print('OK!')
| [
"pandas.DataFrame",
"config.fname.stc_signal",
"mne.io.read_raw_fif",
"time_series.simulate_raw",
"numpy.abs",
"mne.pick_types",
"mne.beamformer.apply_lcmv",
"time_series.create_epochs",
"mne.set_log_level",
"mne.pick_types_forward",
"mne.beamformer.make_lcmv",
"mne.compute_covariance",
"mne... | [((259, 283), 'mne.set_log_level', 'mne.set_log_level', (['(False)'], {}), '(False)\n', (276, 283), False, 'import mne\n'), ((301, 339), 'config.fname.stc_signal', 'fname.stc_signal', ([], {'vertex': 'config.vertex'}), '(vertex=config.vertex)\n', (317, 339), False, 'from config import fname, lcmv_settings\n'), ((359, 400), 'config.fname.simulated_raw', 'fname.simulated_raw', ([], {'vertex': 'config.vertex'}), '(vertex=config.vertex)\n', (378, 400), False, 'from config import fname, lcmv_settings\n'), ((423, 467), 'config.fname.simulated_epochs', 'fname.simulated_epochs', ([], {'vertex': 'config.vertex'}), '(vertex=config.vertex)\n', (445, 467), False, 'from config import fname, lcmv_settings\n'), ((797, 831), 'mne.io.read_info', 'mne.io.read_info', (['fname.sample_raw'], {}), '(fname.sample_raw)\n', (813, 831), False, 'import mne\n'), ((918, 968), 'mne.read_forward_solution', 'mne.read_forward_solution', (['fname.fwd_discrete_true'], {}), '(fname.fwd_discrete_true)\n', (943, 968), False, 'import mne\n'), ((985, 1043), 'mne.pick_types_forward', 'mne.pick_types_forward', (['fwd_disc_true'], {'meg': '(True)', 'eeg': '(False)'}), '(fwd_disc_true, meg=True, eeg=False)\n', (1007, 1043), False, 'import mne\n'), ((1053, 1101), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['fname.ernoise'], {'preload': '(True)'}), '(fname.ernoise, preload=True)\n', (1072, 1101), False, 'import mne\n'), ((1121, 1391), 'time_series.simulate_raw', 'simulate_raw', ([], {'info': 'info', 'fwd_disc_true': 'fwd_disc_true', 'signal_vertex': 'config.vertex', 'signal_freq': 'config.signal_freq', 'n_trials': 'config.n_trials', 'noise_multiplier': 'config.noise', 'random_state': 'config.random', 'n_noise_dipoles': 'config.n_noise_dipoles_vol', 'er_raw': 'er_raw'}), '(info=info, fwd_disc_true=fwd_disc_true, signal_vertex=config.\n vertex, signal_freq=config.signal_freq, n_trials=config.n_trials,\n noise_multiplier=config.noise, random_state=config.random,\n n_noise_dipoles=config.n_noise_dipoles_vol, er_raw=er_raw)\n', (1133, 1391), False, 'from time_series import simulate_raw, create_epochs\n'), ((1574, 1592), 'time_series.create_epochs', 'create_epochs', (['raw'], {}), '(raw)\n', (1587, 1592), False, 'from time_series import simulate_raw, create_epochs\n'), ((1956, 2022), 'mne.compute_covariance', 'mne.compute_covariance', (['epochs'], {'tmin': '(0)', 'tmax': '(1)', 'method': '"""empirical"""'}), "(epochs, tmin=0, tmax=1, method='empirical')\n", (1978, 2022), False, 'import mne\n'), ((2035, 2102), 'mne.compute_covariance', 'mne.compute_covariance', (['epochs'], {'tmin': '(-1)', 'tmax': '(0)', 'method': '"""empirical"""'}), "(epochs, tmin=-1, tmax=0, method='empirical')\n", (2057, 2102), False, 'import mne\n'), ((2468, 2517), 'mne.read_forward_solution', 'mne.read_forward_solution', (['fname.fwd_discrete_man'], {}), '(fname.fwd_discrete_man)\n', (2493, 2517), False, 'import mne\n'), ((5818, 5974), 'pandas.DataFrame', 'pd.DataFrame', (['lcmv_settings'], {'columns': "['reg', 'sensor_type', 'pick_ori', 'inversion', 'weight_norm',\n 'normalize_fwd', 'use_noise_cov', 'reduce_rank']"}), "(lcmv_settings, columns=['reg', 'sensor_type', 'pick_ori',\n 'inversion', 'weight_norm', 'normalize_fwd', 'use_noise_cov',\n 'reduce_rank'])\n", (5830, 5974), True, 'import pandas as pd\n'), ((859, 900), 'mne.pick_types', 'mne.pick_types', (['info'], {'meg': '(True)', 'eeg': '(False)'}), '(info, meg=True, eeg=False)\n', (873, 900), False, 'import mne\n'), ((3021, 3260), 'mne.beamformer.make_lcmv', 'make_lcmv', (['evoked.info', 'fwd_disc_true', 'cov'], {'reg': 'reg', 'pick_ori': 'pick_ori', 'weight_norm': 'weight_norm', 'inversion': 'inversion', 'depth': '(1.0 if normalize_fwd else None)', 'noise_cov': '(noise_cov if use_noise_cov else None)', 'reduce_rank': 'reduce_rank'}), '(evoked.info, fwd_disc_true, cov, reg=reg, pick_ori=pick_ori,\n weight_norm=weight_norm, inversion=inversion, depth=1.0 if\n normalize_fwd else None, noise_cov=noise_cov if use_noise_cov else None,\n reduce_rank=reduce_rank)\n', (3030, 3260), False, 'from mne.beamformer import make_lcmv, apply_lcmv\n'), ((3947, 3981), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos_est - pos_true)'], {}), '(pos_est - pos_true)\n', (3961, 3981), True, 'import numpy as np\n'), ((4440, 4473), 'numpy.abs', 'np.abs', (['stc_est.data[peak_vertex]'], {}), '(stc_est.data[peak_vertex])\n', (4446, 4473), True, 'import numpy as np\n'), ((3407, 3434), 'mne.beamformer.apply_lcmv', 'apply_lcmv', (['evoked', 'filters'], {}), '(evoked, filters)\n', (3417, 3434), False, 'from mne.beamformer import make_lcmv, apply_lcmv\n'), ((4498, 4522), 'numpy.abs', 'np.abs', (['true_time_course'], {}), '(true_time_course)\n', (4504, 4522), True, 'import numpy as np\n'), ((4752, 4787), 'numpy.arccos', 'np.arccos', (['(estimated_ori @ true_ori)'], {}), '(estimated_ori @ true_ori)\n', (4761, 4787), True, 'import numpy as np\n'), ((5072, 5101), 'numpy.linalg.norm', 'np.linalg.norm', (['estimated_ori'], {}), '(estimated_ori)\n', (5086, 5101), True, 'import numpy as np\n'), ((5137, 5172), 'numpy.arccos', 'np.arccos', (['(estimated_ori @ true_ori)'], {}), '(estimated_ori @ true_ori)\n', (5146, 5172), True, 'import numpy as np\n')] |
"""Test training of sensor classification k-fold validation."""
import os
import torch
from iotai_sensor_classification.dataset import read_dataset, read_recordings
from iotai_sensor_classification.model_handler import ModelCall
from iotai_sensor_classification.evaluation import evaluate_prediction
from data.gestures import accelerometer_gyroscope
from iotai_sensor_classification.trainer import sensor_classification
from iotai_sensor_classification.plot_util import plot_columns, plot_confusion_matrix, plot_lines
from iotai_sensor_classification.preprocess import check_windows
import numpy as np
from sklearn import model_selection
TEST_OUTPUT = os.path.join("test_output", "gestures", "trainer", "kfold")
SAMPLES_PER_RECORDING = 120
K_FOLDS = 6
def test_window_size():
"""Determine window size for linear accelerometer, gyroscope measurements of motion gestures."""
recordings = read_recordings(os.path.dirname(accelerometer_gyroscope.__file__))
window_checked = check_windows(recordings, window_size=SAMPLES_PER_RECORDING)
os.makedirs(TEST_OUTPUT, exist_ok=True)
for label_name in window_checked.keys():
label_data = window_checked[label_name]
plot_lines(label_data, name=f"{label_name} gesture filtered {SAMPLES_PER_RECORDING} windows",
filepath=os.path.join(TEST_OUTPUT, f"{label_name}-filtered-windows.png"),
vertical_tick_spacing=SAMPLES_PER_RECORDING)
def test_train_gesture_class_kfold_linear():
"""Test trainer gesture classification model from sensor data.
:return:
"""
X, y, label_coder = read_dataset(os.path.dirname(accelerometer_gyroscope.__file__), SAMPLES_PER_RECORDING)
X_train_val, X_test, y_train_val, y_test = model_selection.train_test_split(X, y, test_size=0.15, shuffle=True)
kf = model_selection.KFold(n_splits=K_FOLDS)
k_validations = []
models = []
max_val_accs = []
for train, val in kf.split(X_train_val):
X_train = X_train_val[train]
X_val = X_train_val[val]
y_train = y_train_val[train]
y_val = y_train_val[val]
model = sensor_classification.LinearModel(input_dim=X_train.shape[1] * X_train.shape[2],
output_dim=len(np.unique(y_train)))
val_df = sensor_classification.train_gesture_classification(model, X_train, y_train, X_val, y_val)
k_validations.append(val_df)
models.append(model)
max_val_acc = val_df['val_acc'].max()
max_val_accs.append(max_val_acc)
# train with all non test data
model = sensor_classification.LinearModel(input_dim=X_train_val.shape[1] * X_train_val.shape[2],
output_dim=len(np.unique(y_train_val)))
train_val_loss_df = sensor_classification.train_gesture_classification(model, X_train_val, y_train_val)
state_path = os.path.join(TEST_OUTPUT, "gesture_class_gyro_kval_linear_state_dict.zip")
torch.save(model.state_dict(), state_path)
plot_columns(train_val_loss_df,
name=f"Gesture classification K-validation linear model, max acc={max_val_acc:.2} [accel, gyro]",
filepath=os.path.join(TEST_OUTPUT, "gesture_class_gyro_kval_linear.png"),
title_mean=False)
load_model = sensor_classification.LinearModel(input_dim=X_train.shape[1] * X_train.shape[2],
output_dim=len(np.unique(y_train)))
load_model.load_state_dict(torch.load(state_path))
assert all(load_model.state_dict()['layer3.bias'] == model.state_dict()['layer3.bias'])
model_call = ModelCall(model=load_model, decode=label_coder.decode)
test_accuracy_, test_matrix = evaluate_prediction(model_call, label_coder.decode, X_test, y_test)
unique_y = np.unique(y)
unique_y.sort()
unique_y_labels = label_coder.decode(unique_y)
unique_y_labels
plot_confusion_matrix(test_matrix, classes=unique_y_labels,
title=f"Gesture classification k-validation linear acc={test_accuracy_:.2} [accel, gyro]",
output_path=os.path.join(TEST_OUTPUT, "gesture_class_gyro_kval_linear_confusion.png"))
| [
"os.makedirs",
"iotai_sensor_classification.evaluation.evaluate_prediction",
"sklearn.model_selection.train_test_split",
"os.path.dirname",
"torch.load",
"iotai_sensor_classification.model_handler.ModelCall",
"sklearn.model_selection.KFold",
"iotai_sensor_classification.preprocess.check_windows",
"o... | [((657, 716), 'os.path.join', 'os.path.join', (['"""test_output"""', '"""gestures"""', '"""trainer"""', '"""kfold"""'], {}), "('test_output', 'gestures', 'trainer', 'kfold')\n", (669, 716), False, 'import os\n'), ((989, 1049), 'iotai_sensor_classification.preprocess.check_windows', 'check_windows', (['recordings'], {'window_size': 'SAMPLES_PER_RECORDING'}), '(recordings, window_size=SAMPLES_PER_RECORDING)\n', (1002, 1049), False, 'from iotai_sensor_classification.preprocess import check_windows\n'), ((1054, 1093), 'os.makedirs', 'os.makedirs', (['TEST_OUTPUT'], {'exist_ok': '(True)'}), '(TEST_OUTPUT, exist_ok=True)\n', (1065, 1093), False, 'import os\n'), ((1739, 1807), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['X', 'y'], {'test_size': '(0.15)', 'shuffle': '(True)'}), '(X, y, test_size=0.15, shuffle=True)\n', (1771, 1807), False, 'from sklearn import model_selection\n'), ((1817, 1856), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': 'K_FOLDS'}), '(n_splits=K_FOLDS)\n', (1838, 1856), False, 'from sklearn import model_selection\n'), ((2793, 2880), 'iotai_sensor_classification.trainer.sensor_classification.train_gesture_classification', 'sensor_classification.train_gesture_classification', (['model', 'X_train_val', 'y_train_val'], {}), '(model, X_train_val,\n y_train_val)\n', (2843, 2880), False, 'from iotai_sensor_classification.trainer import sensor_classification\n'), ((2894, 2968), 'os.path.join', 'os.path.join', (['TEST_OUTPUT', '"""gesture_class_gyro_kval_linear_state_dict.zip"""'], {}), "(TEST_OUTPUT, 'gesture_class_gyro_kval_linear_state_dict.zip')\n", (2906, 2968), False, 'import os\n'), ((3643, 3697), 'iotai_sensor_classification.model_handler.ModelCall', 'ModelCall', ([], {'model': 'load_model', 'decode': 'label_coder.decode'}), '(model=load_model, decode=label_coder.decode)\n', (3652, 3697), False, 'from iotai_sensor_classification.model_handler import ModelCall\n'), ((3732, 3799), 'iotai_sensor_classification.evaluation.evaluate_prediction', 'evaluate_prediction', (['model_call', 'label_coder.decode', 'X_test', 'y_test'], {}), '(model_call, label_coder.decode, X_test, y_test)\n', (3751, 3799), False, 'from iotai_sensor_classification.evaluation import evaluate_prediction\n'), ((3815, 3827), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3824, 3827), True, 'import numpy as np\n'), ((917, 966), 'os.path.dirname', 'os.path.dirname', (['accelerometer_gyroscope.__file__'], {}), '(accelerometer_gyroscope.__file__)\n', (932, 966), False, 'import os\n'), ((1618, 1667), 'os.path.dirname', 'os.path.dirname', (['accelerometer_gyroscope.__file__'], {}), '(accelerometer_gyroscope.__file__)\n', (1633, 1667), False, 'import os\n'), ((2303, 2396), 'iotai_sensor_classification.trainer.sensor_classification.train_gesture_classification', 'sensor_classification.train_gesture_classification', (['model', 'X_train', 'y_train', 'X_val', 'y_val'], {}), '(model, X_train, y_train,\n X_val, y_val)\n', (2353, 2396), False, 'from iotai_sensor_classification.trainer import sensor_classification\n'), ((3510, 3532), 'torch.load', 'torch.load', (['state_path'], {}), '(state_path)\n', (3520, 3532), False, 'import torch\n'), ((3194, 3257), 'os.path.join', 'os.path.join', (['TEST_OUTPUT', '"""gesture_class_gyro_kval_linear.png"""'], {}), "(TEST_OUTPUT, 'gesture_class_gyro_kval_linear.png')\n", (3206, 3257), False, 'import os\n'), ((4138, 4211), 'os.path.join', 'os.path.join', (['TEST_OUTPUT', '"""gesture_class_gyro_kval_linear_confusion.png"""'], {}), "(TEST_OUTPUT, 'gesture_class_gyro_kval_linear_confusion.png')\n", (4150, 4211), False, 'import os\n'), ((1317, 1380), 'os.path.join', 'os.path.join', (['TEST_OUTPUT', 'f"""{label_name}-filtered-windows.png"""'], {}), "(TEST_OUTPUT, f'{label_name}-filtered-windows.png')\n", (1329, 1380), False, 'import os\n'), ((2744, 2766), 'numpy.unique', 'np.unique', (['y_train_val'], {}), '(y_train_val)\n', (2753, 2766), True, 'import numpy as np\n'), ((3458, 3476), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3467, 3476), True, 'import numpy as np\n'), ((2265, 2283), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2274, 2283), True, 'import numpy as np\n')] |
# Scatter plot of a gaussian distribution
# with varying color and point sizes
from vedo import *
from vedo.pyplot import plot
import numpy as np
n = 1000
x = np.random.randn(n)
y = np.random.randn(n)
# define what size must have each marker:
marker_sizes = np.sin(2*x)/8
# define a (r,g,b) list of colors for each marker:
marker_cols = np.c_[np.cos(2*x), np.zeros(n), np.zeros(n)]
txt0 = Text2D("A scatter plot of a\n2D gaussian distribution")
plt0 = plot(x, y, ma=0.3, lw=0, # ma = marker alpha
marker="*", # marker style
xtitle="variable A",
ytitle="variable B",
)
txt1 = Text2D("marker size proportional to sin(2x) ")
plt1 = plot(x, y, ma=0.3, lw=0,
marker="*", # marker style
ms=marker_sizes, # VARIABLE marker sizes
mc='red', # same fixed color for markers
)
txt2 = Text2D("marker size proportional to sin(2x)\nred level proportional to cos(2x)")
plt2 = plot(x, y, ma=0.3, lw=0,
marker=">", # marker style
ms=marker_sizes, # VARIABLE marker sizes
mc=marker_cols, # VARIABLE marker colors
)
show(plt0, txt0, at=0, N=3, size=(1800,500))
show(plt1, txt1, at=1)
show(plt2, txt2, at=2, interactive=True).close()
| [
"numpy.random.randn",
"numpy.zeros",
"vedo.pyplot.plot",
"numpy.sin",
"numpy.cos"
] | [((160, 178), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (175, 178), True, 'import numpy as np\n'), ((183, 201), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (198, 201), True, 'import numpy as np\n'), ((457, 535), 'vedo.pyplot.plot', 'plot', (['x', 'y'], {'ma': '(0.3)', 'lw': '(0)', 'marker': '"""*"""', 'xtitle': '"""variable A"""', 'ytitle': '"""variable B"""'}), "(x, y, ma=0.3, lw=0, marker='*', xtitle='variable A', ytitle='variable B')\n", (461, 535), False, 'from vedo.pyplot import plot\n'), ((704, 767), 'vedo.pyplot.plot', 'plot', (['x', 'y'], {'ma': '(0.3)', 'lw': '(0)', 'marker': '"""*"""', 'ms': 'marker_sizes', 'mc': '"""red"""'}), "(x, y, ma=0.3, lw=0, marker='*', ms=marker_sizes, mc='red')\n", (708, 767), False, 'from vedo.pyplot import plot\n'), ((1027, 1096), 'vedo.pyplot.plot', 'plot', (['x', 'y'], {'ma': '(0.3)', 'lw': '(0)', 'marker': '""">"""', 'ms': 'marker_sizes', 'mc': 'marker_cols'}), "(x, y, ma=0.3, lw=0, marker='>', ms=marker_sizes, mc=marker_cols)\n", (1031, 1096), False, 'from vedo.pyplot import plot\n'), ((260, 273), 'numpy.sin', 'np.sin', (['(2 * x)'], {}), '(2 * x)\n', (266, 273), True, 'import numpy as np\n'), ((346, 359), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (352, 359), True, 'import numpy as np\n'), ((359, 370), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (367, 370), True, 'import numpy as np\n'), ((372, 383), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (380, 383), True, 'import numpy as np\n')] |
import torch
import math
import random
from pycm import ConfusionMatrix
import time
import os
import cv2
import sys
from pytorch_metric_learning import losses, testers
from pytorch_metric_learning.utils.accuracy_calculator import AccuracyCalculator
from pytorch_grad_cam import (
GradCAM,
ScoreCAM,
GradCAMPlusPlus,
AblationCAM,
XGradCAM,
EigenCAM,
EigenGradCAM,
LayerCAM,
FullGrad,
)
from pytorch_grad_cam.utils.image import show_cam_on_image
import numpy as np
from torch.utils.tensorboard import SummaryWriter
cur_path = os.path.abspath(os.path.dirname(__file__))
def get_category(path, mode="list"):
"""
读取label.txt,获取类别
mode: 指定返回格式 list:['dog','cat'] dict:{0:'dog',1:'cat'}
"""
assert mode in ["list", "dict"]
assert os.path.exists(path), "Warn: %s does not exist" % path
labels = open(path, "r").readlines()
labels = [label.strip() for label in labels if label != "\n"]
if mode == "dict":
index = list(range(0, len(labels)))
return dict(zip(index,labels))
else:
return labels
def init_env(cfg):
"""
初始化训练环境
"""
# 固定随机种子
seed = 227
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# 设置CUDA
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
# 创建日志路径
exp_path = (
os.path.dirname(cur_path)
+ "/ExpLog/"
+ time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
+ "/"
)
tb_path, checkpoint_path = [exp_path + "tb_log/", exp_path + "checkpoint/"]
os.makedirs(tb_path)
os.makedirs(checkpoint_path)
# 初始化TensorBoard
tb_writer = SummaryWriter(tb_path)
tb_writer.add_text("Config", str(cfg))
print("*" * 28)
print("TensorBoard | Checkpoint save to ", exp_path, "\n")
return tb_writer, checkpoint_path + cfg["Models"]["backbone"]
@torch.no_grad()
def eval_model(model, data_loader):
"""
常规分类:评估指标
"""
device = next(model.parameters()).device
scores_list, preds_list, labels_list = [], [], []
for batch_idx, (imgs, labels) in enumerate(data_loader):
imgs, labels = imgs.to(device), labels.to(device)
scores = model(imgs)
scores = torch.nn.functional.softmax(scores, dim=1)
preds = torch.argmax(scores, dim=1)
preds_list.append(preds)
labels_list.append(labels)
preds_list = torch.cat(preds_list, dim=0).cpu().numpy()
labels_list = torch.cat(labels_list, dim=0).cpu().numpy()
# 统计
return ConfusionMatrix(labels_list, preds_list)
@torch.no_grad()
def eval_metric_model(model, train_set, val_set):
"""
度量学习:评估指标
"""
tester = testers.BaseTester(batch_size=64, dataloader_num_workers=4)
train_embeddings, train_labels = tester.get_all_embeddings(train_set, model)
test_embeddings, test_labels = tester.get_all_embeddings(val_set, model)
train_labels, test_labels = train_labels.squeeze(1), test_labels.squeeze(1)
accuracy_calculator = AccuracyCalculator(include=("precision_at_1",), k=1)
accuracies = accuracy_calculator.get_accuracy(
test_embeddings, train_embeddings, test_labels, train_labels, False
)
return accuracies["precision_at_1"]
def tensor2img(tensor, BCHW2BHWC=False):
"""
Tenso恢复为图像,用于可视化
反归一化、RGB->BGR
tensor: Tensor,形状[B,C,H,W]
BCHW2BHWC: (可选)是否交换Tensor维度
返回值
imgs: Tensor,形状[B,C,H,W]
"""
B, C, H, W = tensor.shape
# ImageNet均值方差
t_mean = torch.FloatTensor((0.485, 0.456, 0.406)).view(C, 1, 1).expand(3, H, W)
t_std = torch.FloatTensor((0.229, 0.224, 0.225)).view(C, 1, 1).expand(3, H, W)
tensor = tensor * t_std.to(tensor) + t_mean.to(tensor) # 反归一化
tensor = tensor[:, [2, 1, 0], :, :] # RGB->BGR
if BCHW2BHWC:
tensor = tensor.permute(0, 2, 3, 1)
return tensor
def vis_cam(model, img_tensor, pool_name="global_pool", cam_algorithm=GradCAM):
"""
可视化注意力图
img_tensor(tensor): shape[B,C,H,W]
pool_name(str): 可视化特征图的网络位置的名称。
通常选取卷积网络最后输出的特征图 (卷积网络->全局池化->分类网络)
默认timm库的全局池化名称为"global_pool",自定义模型需自行确定
cam_algorithm: 可视化算法,包含:
GradCAM, 默认
ScoreCAM,
GradCAMPlusPlus,
AblationCAM,
XGradCAM,
EigenCAM,
EigenGradCAM,
LayerCAM,
FullGrad,
"""
modules_list = []
for name, module in model.named_modules():
if pool_name in name: # 定位到全局池化层
break
modules_list.append(module)
target_layers = [modules_list[-1]] # 全局池化层的前一层
# 反归一化、RGB->BGR、[B,C,H,W] -> [B,H,W,C]
bgr_img = tensor2img(img_tensor.cpu(), BCHW2BHWC=True)
bgr_img = bgr_img.squeeze(0).numpy()
try:
with cam_algorithm(model=model, target_layers=target_layers) as cam:
cam.batch_size = 32
grayscale_cam = cam(
input_tensor=img_tensor,
targets=None, # 默认基于模型预测最高分值的类别可视化
aug_smooth=True, # 平滑策略1
eigen_smooth=True, # 平滑策略2
)
grayscale_cam = grayscale_cam[0, :]
cam_image = show_cam_on_image(bgr_img, grayscale_cam, use_rgb=False)
return cam_image
except:
print("错误: 请尝试确认 当前模型的全局池化层名称,并赋值pool_name")
sys.exit()
| [
"numpy.random.seed",
"torch.argmax",
"pytorch_grad_cam.utils.image.show_cam_on_image",
"torch.cat",
"torch.no_grad",
"os.path.dirname",
"os.path.exists",
"torch.FloatTensor",
"pytorch_metric_learning.testers.BaseTester",
"random.seed",
"torch.utils.tensorboard.SummaryWriter",
"pycm.ConfusionMa... | [((2015, 2030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2028, 2030), False, 'import torch\n'), ((2706, 2721), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2719, 2721), False, 'import torch\n'), ((577, 602), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (592, 602), False, 'import os\n'), ((788, 808), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (802, 808), False, 'import os\n'), ((1169, 1186), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1180, 1186), False, 'import random\n'), ((1191, 1211), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1205, 1211), True, 'import numpy as np\n'), ((1216, 1239), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1233, 1239), False, 'import torch\n'), ((1244, 1272), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1266, 1272), False, 'import torch\n'), ((1277, 1309), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1303, 1309), False, 'import torch\n'), ((1705, 1725), 'os.makedirs', 'os.makedirs', (['tb_path'], {}), '(tb_path)\n', (1716, 1725), False, 'import os\n'), ((1730, 1758), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1741, 1758), False, 'import os\n'), ((1797, 1819), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['tb_path'], {}), '(tb_path)\n', (1810, 1819), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2662, 2702), 'pycm.ConfusionMatrix', 'ConfusionMatrix', (['labels_list', 'preds_list'], {}), '(labels_list, preds_list)\n', (2677, 2702), False, 'from pycm import ConfusionMatrix\n'), ((2815, 2874), 'pytorch_metric_learning.testers.BaseTester', 'testers.BaseTester', ([], {'batch_size': '(64)', 'dataloader_num_workers': '(4)'}), '(batch_size=64, dataloader_num_workers=4)\n', (2833, 2874), False, 'from pytorch_metric_learning import losses, testers\n'), ((3140, 3192), 'pytorch_metric_learning.utils.accuracy_calculator.AccuracyCalculator', 'AccuracyCalculator', ([], {'include': "('precision_at_1',)", 'k': '(1)'}), "(include=('precision_at_1',), k=1)\n", (3158, 3192), False, 'from pytorch_metric_learning.utils.accuracy_calculator import AccuracyCalculator\n'), ((2362, 2404), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (2389, 2404), False, 'import torch\n'), ((2421, 2448), 'torch.argmax', 'torch.argmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (2433, 2448), False, 'import torch\n'), ((5249, 5305), 'pytorch_grad_cam.utils.image.show_cam_on_image', 'show_cam_on_image', (['bgr_img', 'grayscale_cam'], {'use_rgb': '(False)'}), '(bgr_img, grayscale_cam, use_rgb=False)\n', (5266, 5305), False, 'from pytorch_grad_cam.utils.image import show_cam_on_image\n'), ((5404, 5414), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5412, 5414), False, 'import sys\n'), ((1491, 1516), 'os.path.dirname', 'os.path.dirname', (['cur_path'], {}), '(cur_path)\n', (1506, 1516), False, 'import os\n'), ((1583, 1599), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1597, 1599), False, 'import time\n'), ((2536, 2564), 'torch.cat', 'torch.cat', (['preds_list'], {'dim': '(0)'}), '(preds_list, dim=0)\n', (2545, 2564), False, 'import torch\n'), ((2597, 2626), 'torch.cat', 'torch.cat', (['labels_list'], {'dim': '(0)'}), '(labels_list, dim=0)\n', (2606, 2626), False, 'import torch\n'), ((3628, 3668), 'torch.FloatTensor', 'torch.FloatTensor', (['(0.485, 0.456, 0.406)'], {}), '((0.485, 0.456, 0.406))\n', (3645, 3668), False, 'import torch\n'), ((3711, 3751), 'torch.FloatTensor', 'torch.FloatTensor', (['(0.229, 0.224, 0.225)'], {}), '((0.229, 0.224, 0.225))\n', (3728, 3751), False, 'import torch\n')] |
import os
import numpy as np
from dtcwt.compat import dtwavexfm2, dtwaveifm2
from dtcwt.coeffs import biort, qshift
import tests.datasets as datasets
TOLERANCE = 1e-12
def setup():
global mandrill, mandrill_crop
mandrill = datasets.mandrill().astype(np.float64)
mandrill_crop = mandrill[:233, :301]
def test_mandrill_loaded():
assert mandrill.shape == (512, 512)
assert mandrill.min() >= 0
assert mandrill.max() <= 1
assert mandrill.dtype == np.float64
def test_reconstruct():
# Reconstruction up to tolerance
Yl, Yh = dtwavexfm2(mandrill)
mandrill_recon = dtwaveifm2(Yl, Yh)
assert np.all(np.abs(mandrill_recon - mandrill) < TOLERANCE)
def test_reconstruct_crop():
# Reconstruction up to tolerance
Yl_crop, Yh_crop = dtwavexfm2(mandrill_crop)
mandrill_recon = dtwaveifm2(Yl_crop, Yh_crop)[:mandrill_crop.shape[0], :mandrill_crop.shape[1]]
assert np.all(np.abs(mandrill_recon - mandrill_crop) < TOLERANCE)
def test_reconstruct_custom_filter():
# Reconstruction up to tolerance
Yl, Yh = dtwavexfm2(mandrill, 4, biort('legall'), qshift('qshift_06'))
mandrill_recon = dtwaveifm2(Yl, Yh, biort('legall'), qshift('qshift_06'))
assert np.all(np.abs(mandrill_recon - mandrill) < TOLERANCE)
def test_float32_input():
# Check that an float32 input is correctly output as float32
Yl, Yh = dtwavexfm2(mandrill.astype(np.float32))
assert np.issubsctype(Yl.dtype, np.float32)
assert np.all(list(np.issubsctype(x.dtype, np.complex64) for x in Yh))
mandrill_recon = dtwaveifm2(Yl, Yh)
assert np.issubsctype(mandrill_recon.dtype, np.float32)
# vim:sw=4:sts=4:et
| [
"dtcwt.compat.dtwaveifm2",
"numpy.abs",
"dtcwt.compat.dtwavexfm2",
"numpy.issubsctype",
"dtcwt.coeffs.qshift",
"tests.datasets.mandrill",
"dtcwt.coeffs.biort"
] | [((560, 580), 'dtcwt.compat.dtwavexfm2', 'dtwavexfm2', (['mandrill'], {}), '(mandrill)\n', (570, 580), False, 'from dtcwt.compat import dtwavexfm2, dtwaveifm2\n'), ((602, 620), 'dtcwt.compat.dtwaveifm2', 'dtwaveifm2', (['Yl', 'Yh'], {}), '(Yl, Yh)\n', (612, 620), False, 'from dtcwt.compat import dtwavexfm2, dtwaveifm2\n'), ((776, 801), 'dtcwt.compat.dtwavexfm2', 'dtwavexfm2', (['mandrill_crop'], {}), '(mandrill_crop)\n', (786, 801), False, 'from dtcwt.compat import dtwavexfm2, dtwaveifm2\n'), ((1422, 1458), 'numpy.issubsctype', 'np.issubsctype', (['Yl.dtype', 'np.float32'], {}), '(Yl.dtype, np.float32)\n', (1436, 1458), True, 'import numpy as np\n'), ((1556, 1574), 'dtcwt.compat.dtwaveifm2', 'dtwaveifm2', (['Yl', 'Yh'], {}), '(Yl, Yh)\n', (1566, 1574), False, 'from dtcwt.compat import dtwavexfm2, dtwaveifm2\n'), ((1586, 1634), 'numpy.issubsctype', 'np.issubsctype', (['mandrill_recon.dtype', 'np.float32'], {}), '(mandrill_recon.dtype, np.float32)\n', (1600, 1634), True, 'import numpy as np\n'), ((823, 851), 'dtcwt.compat.dtwaveifm2', 'dtwaveifm2', (['Yl_crop', 'Yh_crop'], {}), '(Yl_crop, Yh_crop)\n', (833, 851), False, 'from dtcwt.compat import dtwavexfm2, dtwaveifm2\n'), ((1085, 1100), 'dtcwt.coeffs.biort', 'biort', (['"""legall"""'], {}), "('legall')\n", (1090, 1100), False, 'from dtcwt.coeffs import biort, qshift\n'), ((1102, 1121), 'dtcwt.coeffs.qshift', 'qshift', (['"""qshift_06"""'], {}), "('qshift_06')\n", (1108, 1121), False, 'from dtcwt.coeffs import biort, qshift\n'), ((1163, 1178), 'dtcwt.coeffs.biort', 'biort', (['"""legall"""'], {}), "('legall')\n", (1168, 1178), False, 'from dtcwt.coeffs import biort, qshift\n'), ((1180, 1199), 'dtcwt.coeffs.qshift', 'qshift', (['"""qshift_06"""'], {}), "('qshift_06')\n", (1186, 1199), False, 'from dtcwt.coeffs import biort, qshift\n'), ((234, 253), 'tests.datasets.mandrill', 'datasets.mandrill', ([], {}), '()\n', (251, 253), True, 'import tests.datasets as datasets\n'), ((639, 672), 'numpy.abs', 'np.abs', (['(mandrill_recon - mandrill)'], {}), '(mandrill_recon - mandrill)\n', (645, 672), True, 'import numpy as np\n'), ((920, 958), 'numpy.abs', 'np.abs', (['(mandrill_recon - mandrill_crop)'], {}), '(mandrill_recon - mandrill_crop)\n', (926, 958), True, 'import numpy as np\n'), ((1219, 1252), 'numpy.abs', 'np.abs', (['(mandrill_recon - mandrill)'], {}), '(mandrill_recon - mandrill)\n', (1225, 1252), True, 'import numpy as np\n'), ((1482, 1519), 'numpy.issubsctype', 'np.issubsctype', (['x.dtype', 'np.complex64'], {}), '(x.dtype, np.complex64)\n', (1496, 1519), True, 'import numpy as np\n')] |
"""
Please see
https://computationalmindset.com/en/neural-networks/ordinary-differential-equation-solvers.html#sys1
for details
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from neurodiffeq import diff
from neurodiffeq.ode import solve_system
from neurodiffeq.ode import IVP
from neurodiffeq.ode import Monitor
import neurodiffeq.networks as ndenw
ode_sys = lambda x, y, t: [diff(x, t, order=1) + x - y, diff(y, t, order=1) - 4. * x + y ]
an_sol_x = lambda t : np.exp(t) + np.exp(-3. * t)
an_sol_y = lambda t : 2. * np.exp(t) - 2. * np.exp(-3. * t)
t_begin=0.
t_end=2.
t_nsamples=100
t_space = np.linspace(t_begin, t_end, t_nsamples)
x_init = IVP(t_0=t_begin, x_0=2.0)
y_init = IVP(t_0=t_begin, x_0=0.0)
x_an_sol = an_sol_x(t_space)
y_an_sol = an_sol_y(t_space)
batch_size=200
net = ndenw.FCNN(
n_input_units=1,
n_output_units=2,
n_hidden_layers=3,
n_hidden_units=50,
actv=ndenw.SinActv)
optimizer = torch.optim.Adam(net.parameters(), lr=0.003)
num_sol, history = solve_system(
ode_system=ode_sys,
conditions=[x_init, y_init],
t_min=t_begin,
t_max=t_end,
batch_size=batch_size,
max_epochs=1200,
return_best=True,
single_net = net,
optimizer=optimizer,
monitor=Monitor(t_min=t_begin, t_max=t_end, check_every=10))
num_sol = num_sol(t_space, as_type='np')
plt.figure()
plt.plot(t_space, x_an_sol, '--', linewidth=2, label='analytical x')
plt.plot(t_space, y_an_sol, '--', linewidth=2, label='analytical y')
plt.plot(t_space, num_sol[0], linewidth=1, label='numerical x')
plt.plot(t_space, num_sol[1], linewidth=1, label='numerical y')
plt.title('System of two ODEs 1st order IVP solved by NeuroDiffEq')
plt.xlabel('t')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"neurodiffeq.diff",
"neurodiffeq.ode.IVP",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.linspace",
"neurodiffeq.ode.Monitor",
"matplotlib.pyplot.xlabel",
"neurodiffeq.networks.FCNN"
] | [((622, 661), 'numpy.linspace', 'np.linspace', (['t_begin', 't_end', 't_nsamples'], {}), '(t_begin, t_end, t_nsamples)\n', (633, 661), True, 'import numpy as np\n'), ((671, 696), 'neurodiffeq.ode.IVP', 'IVP', ([], {'t_0': 't_begin', 'x_0': '(2.0)'}), '(t_0=t_begin, x_0=2.0)\n', (674, 696), False, 'from neurodiffeq.ode import IVP\n'), ((706, 731), 'neurodiffeq.ode.IVP', 'IVP', ([], {'t_0': 't_begin', 'x_0': '(0.0)'}), '(t_0=t_begin, x_0=0.0)\n', (709, 731), False, 'from neurodiffeq.ode import IVP\n'), ((814, 921), 'neurodiffeq.networks.FCNN', 'ndenw.FCNN', ([], {'n_input_units': '(1)', 'n_output_units': '(2)', 'n_hidden_layers': '(3)', 'n_hidden_units': '(50)', 'actv': 'ndenw.SinActv'}), '(n_input_units=1, n_output_units=2, n_hidden_layers=3,\n n_hidden_units=50, actv=ndenw.SinActv)\n', (824, 921), True, 'import neurodiffeq.networks as ndenw\n'), ((1315, 1327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1325, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1396), 'matplotlib.pyplot.plot', 'plt.plot', (['t_space', 'x_an_sol', '"""--"""'], {'linewidth': '(2)', 'label': '"""analytical x"""'}), "(t_space, x_an_sol, '--', linewidth=2, label='analytical x')\n", (1336, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1465), 'matplotlib.pyplot.plot', 'plt.plot', (['t_space', 'y_an_sol', '"""--"""'], {'linewidth': '(2)', 'label': '"""analytical y"""'}), "(t_space, y_an_sol, '--', linewidth=2, label='analytical y')\n", (1405, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1529), 'matplotlib.pyplot.plot', 'plt.plot', (['t_space', 'num_sol[0]'], {'linewidth': '(1)', 'label': '"""numerical x"""'}), "(t_space, num_sol[0], linewidth=1, label='numerical x')\n", (1474, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1593), 'matplotlib.pyplot.plot', 'plt.plot', (['t_space', 'num_sol[1]'], {'linewidth': '(1)', 'label': '"""numerical y"""'}), "(t_space, num_sol[1], linewidth=1, label='numerical y')\n", (1538, 1593), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1661), 'matplotlib.pyplot.title', 'plt.title', (['"""System of two ODEs 1st order IVP solved by NeuroDiffEq"""'], {}), "('System of two ODEs 1st order IVP solved by NeuroDiffEq')\n", (1603, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1677), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (1672, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1690), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1688, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1691, 1701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1699, 1701), True, 'import matplotlib.pyplot as plt\n'), ((488, 497), 'numpy.exp', 'np.exp', (['t'], {}), '(t)\n', (494, 497), True, 'import numpy as np\n'), ((500, 516), 'numpy.exp', 'np.exp', (['(-3.0 * t)'], {}), '(-3.0 * t)\n', (506, 516), True, 'import numpy as np\n'), ((1220, 1271), 'neurodiffeq.ode.Monitor', 'Monitor', ([], {'t_min': 't_begin', 't_max': 't_end', 'check_every': '(10)'}), '(t_min=t_begin, t_max=t_end, check_every=10)\n', (1227, 1271), False, 'from neurodiffeq.ode import Monitor\n'), ((543, 552), 'numpy.exp', 'np.exp', (['t'], {}), '(t)\n', (549, 552), True, 'import numpy as np\n'), ((560, 576), 'numpy.exp', 'np.exp', (['(-3.0 * t)'], {}), '(-3.0 * t)\n', (566, 576), True, 'import numpy as np\n'), ((401, 420), 'neurodiffeq.diff', 'diff', (['x', 't'], {'order': '(1)'}), '(x, t, order=1)\n', (405, 420), False, 'from neurodiffeq import diff\n'), ((430, 449), 'neurodiffeq.diff', 'diff', (['y', 't'], {'order': '(1)'}), '(y, t, order=1)\n', (434, 449), False, 'from neurodiffeq import diff\n')] |
import pdb
import os
import argparse
import matplotlib
import numpy as np
import sys # NOQA
sys.path.insert(0, '..') # NOQA: E402
from envs.gridworld_drone import GridWorldDrone as GridWorld
import utils
from logger.logger import Logger
parser = argparse.ArgumentParser()
parser.add_argument('--policy-path', type=str, nargs='?', default=None)
parser.add_argument('--play', action='store_true',
help='play given or latest stored policy.')
parser.add_argument('--dont-save', action='store_true',
help="don't save the policy network weights.")
parser.add_argument('--render', action='store_true', help="show the env.")
parser.add_argument('--on-server', action='store_true',
help="True if the code is being run on a server.")
parser.add_argument('--store-train-results', action='store_true',
help='True if you want to store intermediate results')
parser.add_argument('--store-interval', action='store_true',
help='Interval of storing the results.')
parser.add_argument('--rl-episodes', type=int, default=50)
parser.add_argument('--rl-ep-length', type=int, default=30)
parser.add_argument('--irl-iterations', type=int, default=100)
parser.add_argument('--rl-log-intervals', type=int, default=10)
parser.add_argument('--regularizer', type=float, default=0, help='The regularizer to use.')
parser.add_argument('--seed', type=int, default=7, help='The seed for the run')
parser.add_argument('--save-folder', type=str, default=None,
help='The name of the directory to store the results in. The name will be used to \
save the plots, the policy and the reward networks.(Relative path)')
parser.add_argument('--exp-trajectory-path', type=str, default=None, help='The name of the directory in which \
the expert trajectories are stored.(Relative path)')
parser.add_argument('--feat-extractor', type=str, default=None, help='The name of the \
feature extractor to be used in the experiment.')
parser.add_argument('--reward-net-hidden-dims', nargs="*", type=int , default=[128], help='The dimensions of the \
hidden layers of the reward network.')
parser.add_argument('--annotation-file', type=str, default=None, help='The location of the annotation file to \
be used to run the environment.')
parser.add_argument('--lr', type=float, default=1e-3, help='The learning rate for the reward network.')
#IMPORTANT*** search for 'CHANGE HERE' to find that most probably need changing
#before running on different settings
def main():
args = parser.parse_args()
if args.on_server:
# matplotlib without monitor
matplotlib.use('Agg')
# pygame without monitor
os.environ['SDL_VIDEODRIVER'] = 'dummy'
#####for the logger
base_folder = './results/'+str(args.save_folder)+'-reg-'+str(args.regularizer)+'-seed-'+str(args.seed)+'-lr-'+str(args.lr)
log_file = 'Experiment_info.txt'
experiment_logger = Logger(base_folder, log_file)
experiment_logger.log_header('Arguments for the experiment :')
experiment_logger.log_info(vars(args))
from rlmethods.rlutils import LossBasedTermination
from rlmethods.b_actor_critic import ActorCritic
from irlmethods.deep_maxent import DeepMaxEnt
import irlmethods.irlUtils as irlUtils
from featureExtractor.gridworld_featureExtractor import OneHot,LocalGlobal,SocialNav,FrontBackSideSimple
agent_width = 10
step_size = 10
obs_width = 10
grid_size = 10
if args.feat_extractor is None:
print('Feature extractor missing.')
exit()
#check for the feature extractor being used
#initialize feature extractor
if args.feat_extractor == 'Onehot':
feat_ext = OneHot(grid_rows = 10 , grid_cols = 10)
if args.feat_extractor == 'SocialNav':
feat_ext = SocialNav()
if args.feat_extractor == 'FrontBackSideSimple':
feat_ext = FrontBackSideSimple(thresh1 = 1,
thresh2 = 2,
thresh3 = 3,
thresh4=4,
step_size=step_size,
agent_width=agent_width,
obs_width=obs_width,
)
if args.feat_extractor == 'LocalGlobal':
feat_ext = LocalGlobal(window_size=5, grid_size=grid_size,
agent_width=agent_width,
obs_width=obs_width,
step_size=step_size,
)
experiment_logger.log_header('Parameters of the feature extractor :')
experiment_logger.log_info(feat_ext.__dict__)
#initialize the environment
if not args.dont_save and args.save_folder is None:
print('Specify folder to save the results.')
exit()
if args.annotation_file is None:
print('Specify annotation file for the environment.')
exit()
if args.exp_trajectory_path is None:
print('Specify expert trajectory folder.')
exit()
#**set is_onehot to false
goal_state = np.asarray([1,5])
'''
env = GridWorld(display=args.render, is_onehot= False,is_random=False,
rows =10,
cols =10,
seed = 7,
obstacles = [np.asarray([5,5])],
goal_state = np.asarray([1,5]))
'''
env = GridWorld(display=args.render, is_random = True,
rows = 576, cols = 720,
agent_width=agent_width,
step_size=step_size,
obs_width=obs_width,
width=grid_size,
annotation_file=args.annotation_file,
goal_state=goal_state,
step_wrapper=utils.step_wrapper,
seed = args.seed,
reset_wrapper=utils.reset_wrapper,
is_onehot = False)
experiment_logger.log_header('Environment details :')
experiment_logger.log_info(env.__dict__)
#CHANGE HEREq
#CHANGE HERE
#initialize loss based termination
# intialize RL method
#CHANGE HERE
rlMethod = ActorCritic(env, gamma=0.99,
log_interval = args.rl_log_intervals,
max_episodes=args.rl_episodes,
max_ep_length=args.rl_ep_length,
termination = None,
hidden_dims=args.reward_net_hidden_dims,
feat_extractor = feat_ext)
print("RL method initialized.")
print(rlMethod.policy)
if args.policy_path is not None:
rlMethod.policy.load(args.policy_path)
experiment_logger.log_header('Details of the RL method :')
experiment_logger.log_info(rlMethod.__dict__)
# initialize IRL method
#CHANGE HERE
trajectory_path = args.exp_trajectory_path
folder_to_save = '/results/'+args.save_folder
irlMethod = DeepMaxEnt(trajectory_path, rlmethod=rlMethod, env=env,
iterations=args.irl_iterations, log_intervals=5,
on_server=args.on_server,
regularizer=args.regularizer,
learning_rate=args.lr,
graft=True,
hidden_dims = args.reward_net_hidden_dims,
save_folder=folder_to_save)
print("IRL method intialized.")
print(irlMethod.reward)
experiment_logger.log_header('Details of the IRL method :')
experiment_logger.log_info(irlMethod.__dict__)
rewardNetwork = irlMethod.train()
if not args.dont_save:
pass
if __name__ == '__main__':
main()
| [
"featureExtractor.gridworld_featureExtractor.LocalGlobal",
"irlmethods.deep_maxent.DeepMaxEnt",
"argparse.ArgumentParser",
"numpy.asarray",
"logger.logger.Logger",
"sys.path.insert",
"featureExtractor.gridworld_featureExtractor.FrontBackSideSimple",
"featureExtractor.gridworld_featureExtractor.OneHot"... | [((94, 118), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (109, 118), False, 'import sys\n'), ((251, 276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (274, 276), False, 'import argparse\n'), ((3070, 3099), 'logger.logger.Logger', 'Logger', (['base_folder', 'log_file'], {}), '(base_folder, log_file)\n', (3076, 3099), False, 'from logger.logger import Logger\n'), ((5273, 5291), 'numpy.asarray', 'np.asarray', (['[1, 5]'], {}), '([1, 5])\n', (5283, 5291), True, 'import numpy as np\n'), ((5622, 5951), 'envs.gridworld_drone.GridWorldDrone', 'GridWorld', ([], {'display': 'args.render', 'is_random': '(True)', 'rows': '(576)', 'cols': '(720)', 'agent_width': 'agent_width', 'step_size': 'step_size', 'obs_width': 'obs_width', 'width': 'grid_size', 'annotation_file': 'args.annotation_file', 'goal_state': 'goal_state', 'step_wrapper': 'utils.step_wrapper', 'seed': 'args.seed', 'reset_wrapper': 'utils.reset_wrapper', 'is_onehot': '(False)'}), '(display=args.render, is_random=True, rows=576, cols=720,\n agent_width=agent_width, step_size=step_size, obs_width=obs_width,\n width=grid_size, annotation_file=args.annotation_file, goal_state=\n goal_state, step_wrapper=utils.step_wrapper, seed=args.seed,\n reset_wrapper=utils.reset_wrapper, is_onehot=False)\n', (5631, 5951), True, 'from envs.gridworld_drone import GridWorldDrone as GridWorld\n'), ((6411, 6635), 'rlmethods.b_actor_critic.ActorCritic', 'ActorCritic', (['env'], {'gamma': '(0.99)', 'log_interval': 'args.rl_log_intervals', 'max_episodes': 'args.rl_episodes', 'max_ep_length': 'args.rl_ep_length', 'termination': 'None', 'hidden_dims': 'args.reward_net_hidden_dims', 'feat_extractor': 'feat_ext'}), '(env, gamma=0.99, log_interval=args.rl_log_intervals,\n max_episodes=args.rl_episodes, max_ep_length=args.rl_ep_length,\n termination=None, hidden_dims=args.reward_net_hidden_dims,\n feat_extractor=feat_ext)\n', (6422, 6635), False, 'from rlmethods.b_actor_critic import ActorCritic\n'), ((7226, 7505), 'irlmethods.deep_maxent.DeepMaxEnt', 'DeepMaxEnt', (['trajectory_path'], {'rlmethod': 'rlMethod', 'env': 'env', 'iterations': 'args.irl_iterations', 'log_intervals': '(5)', 'on_server': 'args.on_server', 'regularizer': 'args.regularizer', 'learning_rate': 'args.lr', 'graft': '(True)', 'hidden_dims': 'args.reward_net_hidden_dims', 'save_folder': 'folder_to_save'}), '(trajectory_path, rlmethod=rlMethod, env=env, iterations=args.\n irl_iterations, log_intervals=5, on_server=args.on_server, regularizer=\n args.regularizer, learning_rate=args.lr, graft=True, hidden_dims=args.\n reward_net_hidden_dims, save_folder=folder_to_save)\n', (7236, 7505), False, 'from irlmethods.deep_maxent import DeepMaxEnt\n'), ((2753, 2774), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (2767, 2774), False, 'import matplotlib\n'), ((3846, 3880), 'featureExtractor.gridworld_featureExtractor.OneHot', 'OneHot', ([], {'grid_rows': '(10)', 'grid_cols': '(10)'}), '(grid_rows=10, grid_cols=10)\n', (3852, 3880), False, 'from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple\n'), ((3948, 3959), 'featureExtractor.gridworld_featureExtractor.SocialNav', 'SocialNav', ([], {}), '()\n', (3957, 3959), False, 'from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple\n'), ((4032, 4167), 'featureExtractor.gridworld_featureExtractor.FrontBackSideSimple', 'FrontBackSideSimple', ([], {'thresh1': '(1)', 'thresh2': '(2)', 'thresh3': '(3)', 'thresh4': '(4)', 'step_size': 'step_size', 'agent_width': 'agent_width', 'obs_width': 'obs_width'}), '(thresh1=1, thresh2=2, thresh3=3, thresh4=4, step_size=\n step_size, agent_width=agent_width, obs_width=obs_width)\n', (4051, 4167), False, 'from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple\n'), ((4488, 4606), 'featureExtractor.gridworld_featureExtractor.LocalGlobal', 'LocalGlobal', ([], {'window_size': '(5)', 'grid_size': 'grid_size', 'agent_width': 'agent_width', 'obs_width': 'obs_width', 'step_size': 'step_size'}), '(window_size=5, grid_size=grid_size, agent_width=agent_width,\n obs_width=obs_width, step_size=step_size)\n', (4499, 4606), False, 'from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple\n')] |
# global
from typing import List, Optional, Union
import ivy
_round = round
import mxnet as mx
import numpy as np
from numbers import Number
import multiprocessing as _multiprocessing
# local
from ivy.functional.ivy.device import default_device
from ivy.functional.backends.mxnet.device import dev
from ivy.functional.backends.mxnet import (
_handle_flat_arrays_in_out,
_mxnet_init_context,
)
def is_native_array(x, exclusive=False):
if isinstance(x, mx.nd.NDArray):
if exclusive and x.grad is not None:
return False
return True
return False
def copy_array(x: mx.nd.NDArray) -> mx.nd.NDArray:
return x.copy()
def array_equal(x0: mx.nd.NDArray, x1: mx.nd.NDArray) -> bool:
if ivy.dtype(x0) == "bool":
x0 = x0.astype("int32")
if ivy.dtype(x1) == "bool":
x1 = x1.astype("int32")
return mx.nd.min(mx.nd.broadcast_equal(x0, x1)) == 1
def to_numpy(x: mx.nd.NDArray) -> mx.nd.NDArray:
if isinstance(x, np.ndarray):
return x
else:
if isinstance(x, (int, float)):
return np.array(x)
else:
return x.asnumpy()
def to_scalar(x: mx.nd.NDArray) -> Number:
if isinstance(x, Number):
return x
else:
x.asscalar().item()
def to_list(x: mx.nd.NDArray) -> list:
return to_numpy(x).tolist()
@_handle_flat_arrays_in_out
def floormod(
x: mx.nd.NDArray,
y: mx.nd.NDArray,
out: Optional[mx.nd.NDArray] = None,
) -> mx.nd.NDArray:
ret = x % y
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
container_types = lambda: []
def unstack(x, axis, keepdims=False):
if x.shape == ():
return [x]
num_outputs = x.shape[axis]
ret = mx.nd.split(x, num_outputs, axis, squeeze_axis=not keepdims)
return ret if isinstance(ret, list) else [ret]
def inplace_update(
x: Union[ivy.Array, mx.nd.NDArray],
val: Union[ivy.Array, mx.nd.NDArray],
ensure_in_backend: bool = False,
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native[:] = val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
inplace_arrays_supported = lambda: True
inplace_variables_supported = lambda: True
def inplace_decrement(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native[:] -= val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_increment(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native[:] += val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def cumsum(
x: mx.nd.NDArray,
axis: int = 0,
out: Optional[mx.nd.NDArray] = None,
) -> mx.nd.NDArray:
if ivy.exists(out):
return ivy.inplace_update(
out, mx.nd.cumsum(x, axis if axis >= 0 else axis % len(x.shape))
)
else:
mx.nd.cumsum(x, axis if axis >= 0 else axis % len(x.shape))
def cumprod(
x: mx.nd.NDArray,
axis: int = 0,
exclusive: Optional[bool] = False,
out: Optional[mx.nd.NDArray] = None,
) -> mx.nd.NDArray:
array_stack = [mx.nd.expand_dims(chunk, axis) for chunk in unstack(x, axis)]
if exclusive:
array_stack = [mx.nd.ones_like(array_stack[0])] + array_stack[:-1]
new_array_list = [array_stack[0]]
for array_chunk in array_stack[1:]:
new_array_list.append(new_array_list[-1] * array_chunk)
if ivy.exists(out):
return ivy.inplace_update(out, mx.nd.concat(*new_array_list, dim=axis))
return mx.nd.concat(*new_array_list, dim=axis)
# noinspection PyShadowingNames
def scatter_flat(
indices, updates, size=None, tensor=None, reduction="sum", device=None
):
if ivy.exists(tensor):
raise Exception(
"MXNet scatter_flat does not support scattering into "
"an pre-existing tensor."
)
if reduction == "replace":
return mx.nd.scatter_nd(updates, mx.nd.expand_dims(indices, 0), [size]).copyto(
_mxnet_init_context(default_device(device))
)
else:
raise Exception(
"MXNet scatter_flat currently only supports reduction mode 'replace', "
"but {} selected.".format(reduction)
)
# noinspection PyShadowingNames
def scatter_nd(indices, updates, shape=None, tensor=None, reduction="sum", device=None):
if ivy.exists(tensor):
raise Exception(
"MXNet scatter_flat does not support scattering into "
"an pre-existing tensor."
)
if device is None:
device = dev(indices)
shape = list(shape)
num_idx_dims = len(indices.shape)
transpose_order = [num_idx_dims - 1] + list(range(num_idx_dims - 1))
indices = mx.nd.transpose(indices, transpose_order)
shape = shape if type(shape) is list else shape.asnumpy().astype(np.int32).tolist()
if reduction == "replace":
return mx.nd.scatter_nd(updates, indices, shape).copyto(
_mxnet_init_context(device)
)
else:
raise Exception(
"MXNet scatter_nd currently only supports reduction mode 'replace', "
"but {} selected.".format(reduction)
)
def gather(
params: mx.nd.NDArray,
indices: mx.nd.NDArray,
axis: Optional[int] = -1,
device: Optional[str] = None,
out: mx.nd.NDArray = None,
) -> mx.nd.NDArray:
if device is None:
device = dev(params)
index_slices = unstack(indices, -1)
res = mx.nd.concat(
*[
mx.nd.expand_dims(mx.nd.pick(params, idx_slice, axis), -1)
for idx_slice in index_slices
],
dim=-1
)
res = mx.nd.reshape(res, indices.shape)
if ivy.exists(out):
out = _mxnet_init_context(device)
return res.copyto(out)
else:
return res.copyto(_mxnet_init_context(device))
def gather_nd(params, indices, device=None):
if device is None:
device = dev(params)
indices_shape = indices.shape
num_idx_dims = len(indices_shape)
transpose_order = [num_idx_dims - 1] + list(range(num_idx_dims - 1))
indices = mx.nd.transpose(indices, transpose_order)
return mx.nd.gather_nd(params, indices).copyto(_mxnet_init_context(device))
def multiprocessing(context=None):
return (
_multiprocessing if context is None else _multiprocessing.get_context(context)
)
def one_hot(indices, depth, device=None):
return mx.nd.one_hot(indices, depth)
def shape(x: mx.nd.NDArray, as_tensor: bool = False) -> Union[mx.nd.NDArray, List[int]]:
if as_tensor:
return mx.nd.shape_array(x)
else:
return x.shape
def get_num_dims(x, as_tensor=False):
return (
mx.nd.shape_array(mx.nd.shape_array(x)).reshape([])
if as_tensor
else len(x.shape)
)
def indices_where(x):
x_shape = x.shape
x_flat = x.reshape(
(
1,
-1,
)
)
flat_indices = x_flat.astype("int32").tostype("csr").indices
if flat_indices.shape == (0,):
res = flat_indices.reshape((0, len(x_shape)))
return res
res = mx.nd.swapaxes(mx.nd.unravel_index(flat_indices, x_shape), 0, 1)
return res
current_backend_str = lambda: "mxnet"
current_backend_str.__name__ = "current_backend_str"
| [
"ivy.inplace_update",
"multiprocessing.get_context",
"mxnet.nd.split",
"ivy.is_ivy_array",
"mxnet.nd.concat",
"mxnet.nd.scatter_nd",
"ivy.exists",
"ivy.functional.backends.mxnet.device.dev",
"mxnet.nd.transpose",
"mxnet.nd.shape_array",
"ivy.dtype",
"mxnet.nd.ones_like",
"mxnet.nd.reshape",
... | [((1516, 1531), 'ivy.exists', 'ivy.exists', (['out'], {}), '(out)\n', (1526, 1531), False, 'import ivy\n'), ((1746, 1806), 'mxnet.nd.split', 'mx.nd.split', (['x', 'num_outputs', 'axis'], {'squeeze_axis': '(not keepdims)'}), '(x, num_outputs, axis, squeeze_axis=not keepdims)\n', (1757, 1806), True, 'import mxnet as mx\n'), ((2047, 2073), 'ivy.args_to_native', 'ivy.args_to_native', (['x', 'val'], {}), '(x, val)\n', (2065, 2073), False, 'import ivy\n'), ((2110, 2129), 'ivy.is_ivy_array', 'ivy.is_ivy_array', (['x'], {}), '(x)\n', (2126, 2129), False, 'import ivy\n'), ((2362, 2388), 'ivy.args_to_native', 'ivy.args_to_native', (['x', 'val'], {}), '(x, val)\n', (2380, 2388), False, 'import ivy\n'), ((2426, 2445), 'ivy.is_ivy_array', 'ivy.is_ivy_array', (['x'], {}), '(x)\n', (2442, 2445), False, 'import ivy\n'), ((2593, 2619), 'ivy.args_to_native', 'ivy.args_to_native', (['x', 'val'], {}), '(x, val)\n', (2611, 2619), False, 'import ivy\n'), ((2657, 2676), 'ivy.is_ivy_array', 'ivy.is_ivy_array', (['x'], {}), '(x)\n', (2673, 2676), False, 'import ivy\n'), ((2882, 2897), 'ivy.exists', 'ivy.exists', (['out'], {}), '(out)\n', (2892, 2897), False, 'import ivy\n'), ((3578, 3593), 'ivy.exists', 'ivy.exists', (['out'], {}), '(out)\n', (3588, 3593), False, 'import ivy\n'), ((3686, 3725), 'mxnet.nd.concat', 'mx.nd.concat', (['*new_array_list'], {'dim': 'axis'}), '(*new_array_list, dim=axis)\n', (3698, 3725), True, 'import mxnet as mx\n'), ((3863, 3881), 'ivy.exists', 'ivy.exists', (['tensor'], {}), '(tensor)\n', (3873, 3881), False, 'import ivy\n'), ((4516, 4534), 'ivy.exists', 'ivy.exists', (['tensor'], {}), '(tensor)\n', (4526, 4534), False, 'import ivy\n'), ((4878, 4919), 'mxnet.nd.transpose', 'mx.nd.transpose', (['indices', 'transpose_order'], {}), '(indices, transpose_order)\n', (4893, 4919), True, 'import mxnet as mx\n'), ((5796, 5829), 'mxnet.nd.reshape', 'mx.nd.reshape', (['res', 'indices.shape'], {}), '(res, indices.shape)\n', (5809, 5829), True, 'import mxnet as mx\n'), ((5837, 5852), 'ivy.exists', 'ivy.exists', (['out'], {}), '(out)\n', (5847, 5852), False, 'import ivy\n'), ((6250, 6291), 'mxnet.nd.transpose', 'mx.nd.transpose', (['indices', 'transpose_order'], {}), '(indices, transpose_order)\n', (6265, 6291), True, 'import mxnet as mx\n'), ((6570, 6599), 'mxnet.nd.one_hot', 'mx.nd.one_hot', (['indices', 'depth'], {}), '(indices, depth)\n', (6583, 6599), True, 'import mxnet as mx\n'), ((735, 748), 'ivy.dtype', 'ivy.dtype', (['x0'], {}), '(x0)\n', (744, 748), False, 'import ivy\n'), ((799, 812), 'ivy.dtype', 'ivy.dtype', (['x1'], {}), '(x1)\n', (808, 812), False, 'import ivy\n'), ((1548, 1576), 'ivy.inplace_update', 'ivy.inplace_update', (['out', 'ret'], {}), '(out, ret)\n', (1566, 1576), False, 'import ivy\n'), ((2179, 2198), 'ivy.Array', 'ivy.Array', (['x_native'], {}), '(x_native)\n', (2188, 2198), False, 'import ivy\n'), ((2495, 2514), 'ivy.Array', 'ivy.Array', (['x_native'], {}), '(x_native)\n', (2504, 2514), False, 'import ivy\n'), ((2726, 2745), 'ivy.Array', 'ivy.Array', (['x_native'], {}), '(x_native)\n', (2735, 2745), False, 'import ivy\n'), ((3274, 3304), 'mxnet.nd.expand_dims', 'mx.nd.expand_dims', (['chunk', 'axis'], {}), '(chunk, axis)\n', (3291, 3304), True, 'import mxnet as mx\n'), ((4716, 4728), 'ivy.functional.backends.mxnet.device.dev', 'dev', (['indices'], {}), '(indices)\n', (4719, 4728), False, 'from ivy.functional.backends.mxnet.device import dev\n'), ((5554, 5565), 'ivy.functional.backends.mxnet.device.dev', 'dev', (['params'], {}), '(params)\n', (5557, 5565), False, 'from ivy.functional.backends.mxnet.device import dev\n'), ((5868, 5895), 'ivy.functional.backends.mxnet._mxnet_init_context', '_mxnet_init_context', (['device'], {}), '(device)\n', (5887, 5895), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context\n'), ((6079, 6090), 'ivy.functional.backends.mxnet.device.dev', 'dev', (['params'], {}), '(params)\n', (6082, 6090), False, 'from ivy.functional.backends.mxnet.device import dev\n'), ((6343, 6370), 'ivy.functional.backends.mxnet._mxnet_init_context', '_mxnet_init_context', (['device'], {}), '(device)\n', (6362, 6370), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context\n'), ((6471, 6508), 'multiprocessing.get_context', '_multiprocessing.get_context', (['context'], {}), '(context)\n', (6499, 6508), True, 'import multiprocessing as _multiprocessing\n'), ((6724, 6744), 'mxnet.nd.shape_array', 'mx.nd.shape_array', (['x'], {}), '(x)\n', (6741, 6744), True, 'import mxnet as mx\n'), ((7269, 7311), 'mxnet.nd.unravel_index', 'mx.nd.unravel_index', (['flat_indices', 'x_shape'], {}), '(flat_indices, x_shape)\n', (7288, 7311), True, 'import mxnet as mx\n'), ((877, 906), 'mxnet.nd.broadcast_equal', 'mx.nd.broadcast_equal', (['x0', 'x1'], {}), '(x0, x1)\n', (898, 906), True, 'import mxnet as mx\n'), ((1084, 1095), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1092, 1095), True, 'import numpy as np\n'), ((3634, 3673), 'mxnet.nd.concat', 'mx.nd.concat', (['*new_array_list'], {'dim': 'axis'}), '(*new_array_list, dim=axis)\n', (3646, 3673), True, 'import mxnet as mx\n'), ((5116, 5143), 'ivy.functional.backends.mxnet._mxnet_init_context', '_mxnet_init_context', (['device'], {}), '(device)\n', (5135, 5143), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context\n'), ((5963, 5990), 'ivy.functional.backends.mxnet._mxnet_init_context', '_mxnet_init_context', (['device'], {}), '(device)\n', (5982, 5990), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context\n'), ((6303, 6335), 'mxnet.nd.gather_nd', 'mx.nd.gather_nd', (['params', 'indices'], {}), '(params, indices)\n', (6318, 6335), True, 'import mxnet as mx\n'), ((3377, 3408), 'mxnet.nd.ones_like', 'mx.nd.ones_like', (['array_stack[0]'], {}), '(array_stack[0])\n', (3392, 3408), True, 'import mxnet as mx\n'), ((4174, 4196), 'ivy.functional.ivy.device.default_device', 'default_device', (['device'], {}), '(device)\n', (4188, 4196), False, 'from ivy.functional.ivy.device import default_device\n'), ((5054, 5095), 'mxnet.nd.scatter_nd', 'mx.nd.scatter_nd', (['updates', 'indices', 'shape'], {}), '(updates, indices, shape)\n', (5070, 5095), True, 'import mxnet as mx\n'), ((4095, 4124), 'mxnet.nd.expand_dims', 'mx.nd.expand_dims', (['indices', '(0)'], {}), '(indices, 0)\n', (4112, 4124), True, 'import mxnet as mx\n'), ((5671, 5706), 'mxnet.nd.pick', 'mx.nd.pick', (['params', 'idx_slice', 'axis'], {}), '(params, idx_slice, axis)\n', (5681, 5706), True, 'import mxnet as mx\n'), ((6857, 6877), 'mxnet.nd.shape_array', 'mx.nd.shape_array', (['x'], {}), '(x)\n', (6874, 6877), True, 'import mxnet as mx\n')] |
# License: Apache-2.0
import glob
import os
import platform
import numpy as np
import pytest
from lightgbm import LGBMClassifier
from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper
def test():
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([0, 1, 1, 0])
model = LGBMClassifier(max_depth=1, n_estimators=1).fit(X_train, y_train)
if platform.system() == 'Linux':
LGBMTreeliteDumper.dump(
model=model,
toolchain="gcc",
parallel_comp=1,
model_path=".",
model_name="dummy",
)
print(glob.glob("*"))
elif platform.system() == 'Darwin':
LGBMTreeliteDumper.dump(
model=model,
toolchain="clang",
parallel_comp=1,
model_path=".",
model_name="dummy",
)
elif platform.system() == 'Windows':
LGBMTreeliteDumper.dump(
model=model,
toolchain="msvc",
parallel_comp=1,
model_path=".",
model_name="dummy",
)
else:
pass
[os.remove(f) for f in glob.glob("*") if f.startswith('dummy')]
def test_input():
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([0, 1, 1, 0])
model = LGBMClassifier(max_depth=1, n_estimators=1).fit(X_train, y_train)
with pytest.raises(TypeError):
LGBMTreeliteDumper.dump(
model=0,
toolchain="gcc",
parallel_comp=1,
model_path=".",
model_name="dummy",
)
with pytest.raises(TypeError):
LGBMTreeliteDumper.dump(
model=model,
toolchain=0,
parallel_comp=1,
model_path=".",
model_name="dummy",
)
with pytest.raises(TypeError):
LGBMTreeliteDumper.dump(
model=model,
toolchain="gcc",
parallel_comp="a",
model_path=".",
model_name="dummy",
)
with pytest.raises(TypeError):
LGBMTreeliteDumper.dump(
model=model,
toolchain="gcc",
parallel_comp=1,
model_path=0,
model_name="dummy",
)
with pytest.raises(TypeError):
LGBMTreeliteDumper.dump(
model=model, toolchain="gcc", parallel_comp=1, model_path=".", model_name=0
)
with pytest.raises(ValueError):
LGBMTreeliteDumper.dump(
model=model,
toolchain="a",
parallel_comp=1,
model_path=".",
model_name="dummy",
)
| [
"os.remove",
"lightgbm.LGBMClassifier",
"gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump",
"pytest.raises",
"numpy.array",
"glob.glob",
"platform.system"
] | [((233, 275), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (241, 275), True, 'import numpy as np\n'), ((290, 312), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (298, 312), True, 'import numpy as np\n'), ((1216, 1258), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (1224, 1258), True, 'import numpy as np\n'), ((1273, 1295), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (1281, 1295), True, 'import numpy as np\n'), ((398, 415), 'platform.system', 'platform.system', ([], {}), '()\n', (413, 415), False, 'import platform\n'), ((446, 556), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""gcc"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain='gcc', parallel_comp=1,\n model_path='.', model_name='dummy')\n", (469, 556), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((1119, 1131), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1128, 1131), False, 'import os\n'), ((1383, 1407), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1396, 1407), False, 'import pytest\n'), ((1417, 1523), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': '(0)', 'toolchain': '"""gcc"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=0, toolchain='gcc', parallel_comp=1,\n model_path='.', model_name='dummy')\n", (1440, 1523), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((1600, 1624), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1613, 1624), False, 'import pytest\n'), ((1634, 1740), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '(0)', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain=0, parallel_comp=1,\n model_path='.', model_name='dummy')\n", (1657, 1740), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((1817, 1841), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1830, 1841), False, 'import pytest\n'), ((1851, 1963), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""gcc"""', 'parallel_comp': '"""a"""', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain='gcc', parallel_comp='a',\n model_path='.', model_name='dummy')\n", (1874, 1963), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((2040, 2064), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2053, 2064), False, 'import pytest\n'), ((2074, 2182), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""gcc"""', 'parallel_comp': '(1)', 'model_path': '(0)', 'model_name': '"""dummy"""'}), "(model=model, toolchain='gcc', parallel_comp=1,\n model_path=0, model_name='dummy')\n", (2097, 2182), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((2259, 2283), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2272, 2283), False, 'import pytest\n'), ((2293, 2397), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""gcc"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '(0)'}), "(model=model, toolchain='gcc', parallel_comp=1,\n model_path='.', model_name=0)\n", (2316, 2397), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((2425, 2450), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2438, 2450), False, 'import pytest\n'), ((2460, 2568), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""a"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain='a', parallel_comp=1,\n model_path='.', model_name='dummy')\n", (2483, 2568), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((325, 368), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {'max_depth': '(1)', 'n_estimators': '(1)'}), '(max_depth=1, n_estimators=1)\n', (339, 368), False, 'from lightgbm import LGBMClassifier\n'), ((638, 652), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (647, 652), False, 'import glob\n'), ((663, 680), 'platform.system', 'platform.system', ([], {}), '()\n', (678, 680), False, 'import platform\n'), ((702, 814), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""clang"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain='clang', parallel_comp=1,\n model_path='.', model_name='dummy')\n", (725, 814), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n'), ((1141, 1155), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (1150, 1155), False, 'import glob\n'), ((1308, 1351), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {'max_depth': '(1)', 'n_estimators': '(1)'}), '(max_depth=1, n_estimators=1)\n', (1322, 1351), False, 'from lightgbm import LGBMClassifier\n'), ((891, 908), 'platform.system', 'platform.system', ([], {}), '()\n', (906, 908), False, 'import platform\n'), ((931, 1042), 'gators.model_building.lgbm_treelite_dumper.LGBMTreeliteDumper.dump', 'LGBMTreeliteDumper.dump', ([], {'model': 'model', 'toolchain': '"""msvc"""', 'parallel_comp': '(1)', 'model_path': '"""."""', 'model_name': '"""dummy"""'}), "(model=model, toolchain='msvc', parallel_comp=1,\n model_path='.', model_name='dummy')\n", (954, 1042), False, 'from gators.model_building.lgbm_treelite_dumper import LGBMTreeliteDumper\n')] |
"""
Convert ACE data to our json format.
"""
import xml.etree.ElementTree as ET
import json
from os import path
import os
import re
import argparse
from dataclasses import dataclass
from typing import List
import spacy
from spacy.symbols import ORTH
import numpy as np
class AceException(Exception):
pass
class CrossSentenceException(AceException):
pass
class MultiTokenTrigerException(AceException):
pass
def in_between(ix, pair):
assert ix != pair[0] and ix != pair[1]
return ix > pair[0] and ix < pair[1]
@dataclass
class TokSpan:
# Note that end chars are inclusive.
start_char: int
end_char: int
text_string: str
def align(self, sent):
self.span_doc = get_token_indices(self, sent)
self.span_sentence = get_token_indices(self, sent.as_doc())
self.adjusted_span_sentence = get_token_indices(self, sent.as_doc())
self.adjusted_text_string = str(self.text_string)
def adjust(self, tok):
if in_between(tok.i, self.span_sentence):
assert tok.text == "\n" or tok.text == " " # Either a newline or an occasional whitespace.
self.adjusted_text_string = self.adjusted_text_string.replace("\n", " ")
self.adjusted_span_sentence = (self.adjusted_span_sentence[0],
self.adjusted_span_sentence[1] - 1)
elif tok.i < self.span_sentence[0]:
self.adjusted_span_sentence = tuple([x - 1 for x in self.adjusted_span_sentence])
def adjust_spans_doc(self, entry_start):
self.adjusted_span_doc = tuple([x + entry_start for x in self.adjusted_span_sentence])
@dataclass
class Entity(TokSpan):
mention_id: str
mention_type: str
flavor: str
def to_json(self):
return [*self.adjusted_span_doc, self.mention_type]
@dataclass
class RelationArgument(TokSpan):
argument_id: str
relation_role: str
@dataclass
class Relation:
relation_type: str
arg1: RelationArgument
arg2: RelationArgument
def align(self, sent):
self.arg1.align(sent)
self.arg2.align(sent)
def adjust(self, tok):
self.arg1.adjust(tok)
self.arg2.adjust(tok)
def adjust_spans_doc(self, entry_start):
self.arg1.adjust_spans_doc(entry_start)
self.arg2.adjust_spans_doc(entry_start)
def to_json(self):
return [*self.arg1.adjusted_span_doc, *self.arg2.adjusted_span_doc, self.relation_type]
@dataclass
class EventTrigger(TokSpan):
trigger_id: str
trigger_type: str
@dataclass
class EventArgument(TokSpan):
argument_id: str
argument_role: str
@dataclass
class Event:
trigger: EventTrigger
arguments: List[EventArgument]
def align(self, sent):
self.trigger.align(sent)
for arg in self.arguments:
arg.align(sent)
def adjust(self, tok):
self.trigger.adjust(tok)
for arg in self.arguments:
arg.adjust(tok)
def adjust_spans_doc(self, entry_start):
self.trigger.adjust_spans_doc(entry_start)
for arg in self.arguments:
arg.adjust_spans_doc(entry_start)
def to_json(self):
trigger_span = self.trigger.adjusted_span_doc
assert trigger_span[0] == trigger_span[1]
trigger = [[trigger_span[0], self.trigger.trigger_type]]
args = []
for arg in self.arguments:
# Collapse time argument roles following Bishan.
arg_role = "Time" if "Time" in arg.argument_role else arg.argument_role
args.append([*arg.adjusted_span_doc, arg_role])
res = trigger + sorted(args)
return res
@dataclass
class Entry:
sent: spacy.tokens.span.Span
entities: List[Entity]
relations: List[Relation]
events: List[Event]
def align(self):
for entity in self.entities:
entity.align(self.sent)
for relation in self.relations:
relation.align(self.sent)
for event in self.events:
event.align(self.sent)
def remove_whitespace(self):
final_toks = []
self.align()
for tok in self.sent.as_doc():
if tok.is_space:
self.adjust(tok)
else:
final_toks.append(tok)
self.final_toks = final_toks
def adjust(self, tok):
for entity in self.entities:
entity.adjust(tok)
for relation in self.relations:
relation.adjust(tok)
for event in self.events:
event.adjust(tok)
def adjust_spans_doc(self, entry_start):
self.adjusted_start = entry_start
for entity in self.entities:
entity.adjust_spans_doc(entry_start)
for relation in self.relations:
relation.adjust_spans_doc(entry_start)
for event in self.events:
event.adjust_spans_doc(entry_start)
def to_json(self):
self.entities = sorted(self.entities, key=lambda x: x.span_sentence)
ner = [entity.to_json() for entity in self.entities]
ner_flavors = [entity.flavor for entity in self.entities]
relations = sorted([relation.to_json() for relation in self.relations])
events = sorted([event.to_json() for event in self.events])
sentences = [tok.text for tok in self.final_toks]
return dict(sentences=sentences, ner=ner, relations=relations, events=events,
sentence_start=self.adjusted_start, ner_flavor=ner_flavors)
def is_real(self):
# If no tokens, make sure it's got no entities or anything.
n_toks = len(self.final_toks)
# Get rid of empty sentences
n_entities = len(self.entities)
n_relations = len(self.relations)
n_events = len(self.events)
if n_toks == 0:
assert n_entities == n_relations == n_events == 0
return False
else:
return True
class Doc:
def __init__(self, entries, doc_key):
self.entries = entries
self.doc_key = doc_key
def remove_whitespace(self):
for entry in self.entries:
entry.remove_whitespace()
self.entries = [entry for entry in self.entries if entry.is_real()]
def adjust_spans_doc(self):
# Get the token starts of the sentence
entry_lengths = [len(entry.final_toks) for entry in self.entries]
entry_starts = np.cumsum(entry_lengths)
entry_starts = np.roll(entry_starts, 1)
entry_starts[0] = 0
for entry, start in zip(self.entries, entry_starts):
entry.adjust_spans_doc(start)
def to_json(self):
self.remove_whitespace()
self.adjust_spans_doc()
by_entry = [entry.to_json() for entry in self.entries]
res = {}
for field in ["sentences", "ner", "relations", "events", "sentence_start"]:
res[field] = [entry[field] for entry in by_entry]
res["doc_key"] = self.doc_key
return res
def debug_if(cond):
if cond:
import ipdb; ipdb.set_trace()
def get_token_indices(entity, sent):
start_token = [tok for tok in sent if tok.idx == entity.start_char]
debug_if(len(start_token) != 1)
start_token = start_token[0]
end_token = [tok for tok in sent if tok.idx + len(tok) - 1 == entity.end_char]
debug_if(len(end_token) != 1)
end_token = end_token[0]
start_ix = start_token.i
end_ix = end_token.i
return start_ix, end_ix
def get_token_of(doc, char):
'Given a document and a character in the document, get the token that the char lives in.'
for tok in doc:
if char >= tok.idx and char < tok.idx + len(tok):
return doc[tok.i]
raise Exception('Should not get here.')
# Copied over from Heng Ji's student's code.
class Document:
def __init__(self, annotation_path, text_path, doc_key, fold, heads_only=True,
real_entities_only=True, include_pronouns=False):
'''
A base class for ACE xml annotation
:param annotation_path:
:param text_path:
'''
self._heads_only = heads_only
self._real_entities_only = real_entities_only
self._doc_key = doc_key
self._annotation_path = annotation_path
self._annotation_xml = ET.parse(self._annotation_path)
self._text_path = text_path
self._text = self._load_text(text_path)
self.doc = self._make_nlp(self._text)
assert self.doc.text == self._text
self.entity_list, self.entity_ids = self._populate_entity_list()
self.entity_lookup = self._populate_entity_lookup()
if self._real_entities_only:
self._allowed_flavors = ["entity", "pronoun"] if include_pronouns else ["entity"]
self.entity_list = [x for x in self.entity_list if x.flavor in self._allowed_flavors]
else:
self._allowed_flavors = None
self.event_list = self._populate_event_list()
self.relation_list = self._populate_relation_list()
self._fold = fold
def _make_nlp(self, text):
'''
Add a few special cases to spacy tokenizer so it works with ACe mistakes.
'''
# Prevent edge case where there are sentence breaks in bad places
def custom_seg(doc):
for index, token in enumerate(doc):
if self._doc_key == "AFP_ENG_20030417.0307":
if token.text == "Ivanov":
token.sent_start = False
if '--' in token.text:
doc[index].sent_start = False
doc[index + 1].sent_start = False
if token.text == "things" and doc[index + 1].text == "their":
doc[index + 1].sent_start = False
if (token.text == "Explosions" and
token.i < len(doc) and
doc[index - 1].text == "." and
doc[index - 2].text == "Baghdad"):
token.sent_start = True
# Comma followed by whitespace doesn't end a sentence.
if token.text == "," and doc[index + 1].is_space:
doc[index + 2].sent_start = False
# "And" only starts a sentence if preceded by period or question mark.
if token.text in ["and", "but"] and doc[index - 1].text not in [".", "?", "!"]:
doc[index].sent_start = False
if (not ((token.is_punct and token.text not in [",", "_", ";", "...", ":", "(", ")", '"']) or token.is_space)
and index < len(doc) - 1):
doc[index + 1].sent_start = False
if "\n" in token.text:
if index + 1 < len(doc):
next_token = doc[index + 1]
if len(token) > 1:
next_token.sent_start = True
else:
next_token.sent_start = False
if token.text == "-":
before = doc[index - 1]
after = doc[index + 1]
if not (before.is_space or before.is_punct or after.is_space or after.is_punct):
after.sent_start = False
return doc
nlp = spacy.load('en')
nlp.add_pipe(custom_seg, before='parser')
single_tokens = ['sgt.',
'sen.',
'col.',
'brig.',
'gen.',
'maj.',
'sr.',
'lt.',
'cmdr.',
'u.s.',
'mr.',
'p.o.w.',
'u.k.',
'u.n.',
'ft.',
'dr.',
'd.c.',
'mt.',
'st.',
'snr.',
'rep.',
'ms.',
'capt.',
'sq.',
'jr.',
'ave.']
for special_case in single_tokens:
nlp.tokenizer.add_special_case(special_case, [dict(ORTH=special_case)])
upped = special_case.upper()
nlp.tokenizer.add_special_case(upped, [dict(ORTH=upped)])
capped = special_case.capitalize()
nlp.tokenizer.add_special_case(capped, [dict(ORTH=capped)])
doc = nlp(text)
assert doc.text == text
return doc
def _load_text(self, text_path):
'''
Load in text and strip out tags.
'''
with open(text_path, "r") as f:
text_data = f.read()
# Get rid of XML tags.
remove_tags = re.compile('<.*?>', re.DOTALL) # Also match expressions with a newline in the middle.
text_data = remove_tags.sub("", text_data)
# Fix errors in ACE.
text_data = text_data.replace("dr. germ. the", "dr. germ, the")
text_data = text_data.replace("arms inspectors. 300 miles west",
"arms inspectors, 300 miles west")
if self._doc_key in["APW_ENG_20030327.0376", "APW_ENG_20030519.0367"]:
text_data = text_data.replace("_", "-")
return text_data
def _get_chars(self, start_char, end_char, trigger=False):
the_text = self.doc.char_span(start_char, end_char + 1)
start_tok = get_token_of(self.doc, start_char)
end_tok = get_token_of(self.doc, end_char)
if trigger and start_tok != end_tok:
raise MultiTokenTrigerException()
# # If the trigger is multiple words, get the highest token in the dependency parse.
# the_root = self.doc[start_tok.i:end_tok.i + 1].root
# start_char = the_root.idx
# end_char = start_char + len(the_root) - 1
# the_text = the_root.text
elif the_text is None:
# Otherwise, just take all spans containing the entity.
start_char = start_tok.idx
end_char = end_tok.idx + len(end_tok) - 1
the_text = self.doc.char_span(start_char, end_char + 1)
return start_char, end_char, the_text
def _populate_entity_list(self):
entity_ids = []
res = []
xml_root = self._annotation_xml.getroot()
field_to_find = "head" if self._heads_only else "extent"
for one_entity in xml_root[0].findall('entity'):
entity_id = one_entity.attrib["ID"]
entity_ids.append(entity_id)
for one_entity_mention in one_entity.findall('entity_mention'):
mention_id = one_entity_mention.attrib['ID']
mention_type = one_entity.attrib['TYPE']
# Others have only looked at the head.
tentative_start = int(one_entity_mention.find(field_to_find)[0].attrib['START'])
tentative_end = int(one_entity_mention.find(field_to_find)[0].attrib['END'])
start_char, end_char, text_string = self._get_chars(tentative_start, tentative_end)
# Parser chokes on the space.
if (self._doc_key == "soc.history.war.world-war-ii_20050127.2403" and
text_string.text == "<EMAIL>"):
continue
# Keep option to ignore pronouns.
flavor = "pronoun" if one_entity_mention.attrib["TYPE"] == "PRO" else "entity"
entry = Entity(start_char, end_char, text_string, mention_id=mention_id,
mention_type=mention_type, flavor=flavor)
res.append(entry)
# Values. Values don't have heads.
field_to_find = "extent"
for one_value in xml_root[0].findall('value'):
value_id = one_value.attrib["ID"]
entity_ids.append(value_id)
for one_value_mention in one_value.findall('value_mention'):
mention_id = one_value_mention.attrib['ID']
# In the AAAI 2019 paper, they lump all the values together into one label.
mention_type = 'VALUE'
tentative_start = int(one_value_mention.find(field_to_find)[0].attrib['START'])
tentative_end = int(one_value_mention.find(field_to_find)[0].attrib['END'])
start_char, end_char, text_string = self._get_chars(tentative_start, tentative_end)
# Parser chokes on the space.
if (self._doc_key == "soc.history.war.world-war-ii_20050127.2403" and
text_string.text == "<EMAIL>"):
continue
entry = Entity(start_char, end_char, text_string, mention_id=mention_id,
mention_type=mention_type, flavor="value")
res.append(entry)
# Also timex2. These also don't have heads.
field_to_find = "extent"
for one_timex2 in xml_root[0].findall('timex2'):
timex2_id = one_timex2.attrib["ID"]
entity_ids.append(timex2_id)
for one_timex2_mention in one_timex2.findall('timex2_mention'):
mention_id = one_timex2_mention.attrib['ID']
mention_type = 'TIMEX2'
# Others have only looked at the head.
tentative_start = int(one_timex2_mention.find(field_to_find)[0].attrib['START'])
tentative_end = int(one_timex2_mention.find(field_to_find)[0].attrib['END'])
start_char, end_char, text_string = self._get_chars(tentative_start, tentative_end)
# Crosses a sentence boundary.
if self._doc_key == "CNN_ENG_20030508_210555.5" and start_char == 1316 and end_char == 1335:
continue
# This is just ridiculous.
weird_times = set(["BACONSREBELLION_20050127.1017", "MARKBACKER_20041103.1300"])
if self._doc_key in weird_times and "????" in text_string.text:
continue
entry = Entity(start_char, end_char, text_string, mention_id=mention_id,
mention_type=mention_type, flavor="timex2")
res.append(entry)
return res, entity_ids
def _populate_entity_lookup(self):
return {entry.mention_id: entry for entry in self.entity_list}
def _populate_event_list(self):
res = []
xml_root = self._annotation_xml.getroot()
for one_event in xml_root[0].findall('event'):
for one_event_mention in one_event.findall('event_mention'):
include = True
trigger_id = one_event_mention.attrib['ID']
trigger_type = '%s.%s' % (one_event.attrib['TYPE'], one_event.attrib['SUBTYPE'])
trigger_tag = one_event_mention.find('anchor')
try:
start_char, end_char, text_string = self._get_chars(
int(trigger_tag[0].attrib['START']),
int(trigger_tag[0].attrib['END']),
trigger=True)
# If we hit a multi-token trigger, skip the event mention.
except MultiTokenTrigerException:
continue
# Buggy event. Crosses sentence. Skip it.
if self._doc_key == "APW_ENG_20030308.0314" and start_char == 3263 and end_char == 3270:
continue
if self._doc_key == "soc.history.what-if_20050129.1404" and start_char == 554 and end_char == 556:
continue
event_trigger = EventTrigger(start_char, end_char, text_string, trigger_id,
trigger_type)
argument_list = []
for one_event_mention_argument in one_event_mention.findall('event_mention_argument'):
argument_id = one_event_mention_argument.attrib['REFID']
if self._heads_only:
assert argument_id in self.entity_lookup
this_entity = self.entity_lookup[argument_id]
# If we're only doing real entities and this isn't one, don't append.
if self._real_entities_only and this_entity.flavor not in self._allowed_flavors:
continue
start_char, end_char, text_string = (this_entity.start_char,
this_entity.end_char,
this_entity.text_string)
else:
event_mention_argument_tag = one_event_mention_argument.find('extent')
relation_mention_argument_tag = one_event_mention_argument.find('extent')
start_char, end_char, text_string = self._get_chars(
int(event_mention_argument_tag[0].attrib['START']),
int(event_mention_argument_tag[0].attrib['END']))
# Check that we've seen the entity. If it's a value or timex, just skip it as an
# argument.
entity_id = "-".join(argument_id.split("-")[:-1])
assert entity_id in self.entity_ids
argument_role = one_event_mention_argument.attrib['ROLE']
to_append = EventArgument(start_char, end_char, text_string, argument_id,
argument_role)
argument_list.append(to_append)
if include:
res.append(Event(event_trigger, argument_list))
return res
def _populate_relation_list(self):
res = []
xml_root = self._annotation_xml.getroot()
for one_relation in xml_root[0].findall('relation'):
for one_relation_mention in one_relation.findall('relation_mention'):
include = True
relation_type = '%s.%s' % (one_relation.attrib['TYPE'], one_relation.attrib['SUBTYPE'])
argument_dict = {}
for one_relation_mention_argument in one_relation_mention.findall("relation_mention_argument"):
argument_id = one_relation_mention_argument.attrib['REFID']
# If doing heads only, get the span by looking up the entity and getting its span.
if self._heads_only:
assert argument_id in self.entity_lookup
this_entity = self.entity_lookup[argument_id]
start_char, end_char, text_string = (this_entity.start_char,
this_entity.end_char,
this_entity.text_string)
else:
relation_mention_argument_tag = one_relation_mention_argument.find('extent')
start_char, end_char, text_string = self._get_chars(
int(relation_mention_argument_tag[0].attrib['START']),
int(relation_mention_argument_tag[0].attrib['END']))
# Check that we've seen the entity. If it's a value or timex, skip the event.
entity_id = "-".join(argument_id.split("-")[:-1])
assert entity_id in self.entity_ids
relation_role = one_relation_mention_argument.attrib['ROLE']
this_argument = RelationArgument(
start_char, end_char, text_string, argument_id, relation_role)
# Skip if not a real entity and we're only keeping real entities.
if self._heads_only and self._real_entities_only:
this_entity = self.entity_lookup[this_argument.argument_id]
if this_entity.flavor not in self._allowed_flavors:
include = False
if this_argument.relation_role == "Arg-1":
argument_dict["arg1"] = this_argument
elif this_argument.relation_role == "Arg-2":
# This is a mis-annotated relation. Ignore it.
if (self._doc_key == 'CNN_ENG_20030430_093016.0' and
text_string.text == "the school in an\nunderprivileged rural area"):
include = False
if (self._doc_key == "CNN_ENG_20030430_093016.0" and
start_char == 3091 and end_char == 3096):
include = False
# Crosses a sentence boundary.
if (self._doc_key == "rec.travel.cruises_20050222.0313" and
start_char == 1435 and end_char == 1442):
include = False
if (self._doc_key == "rec.travel.cruises_20050222.0313" and
start_char == 1456 and end_char == 1458):
include = False
argument_dict["arg2"] = this_argument
else:
include = False
if include:
relation = Relation(relation_type, argument_dict["arg1"], argument_dict["arg2"])
# There are some examples where the identical relation mention shows up twice,
# for instance "young men and women in this country" in
# CNN_CF_20030304.1900.04.apf.xml. When this occurs, ignore it.
if relation in res:
continue
else:
res.append(relation)
return res
@staticmethod
def _check_in_range(span, sent):
# The end character inequality must be string. since end character for spans are inclusive
# and end characters for sentences are exclusive.
# Raise an exception if the span crosses a sentence boundary.
if span.start_char >= sent.start_char and span.end_char < sent.end_char:
return True
if span.end_char <= sent.start_char:
return False
if span.start_char >= sent.end_char:
return False
else:
raise CrossSentenceException
def _sentence_get_ner(self, sent):
entities = []
to_remove = [] # Only relevant for full extents.
for entity in self.entity_list:
try:
in_range = self._check_in_range(entity, sent)
# If the entity crosses a sentence boundary
except CrossSentenceException as e:
# This shouldn't happen if we're only using entity heads; raise an exception.
if self._heads_only:
raise e
# With full extents this may happen; notify user and skip this example.
else:
# Add to list of entities that will be removed.
to_remove.append(entity)
msg = f'Entity "{entity.text_string}" crosses sentence boundary. Skipping.'
print(msg)
continue
if in_range:
debug_if(entity in self._seen_so_far['entity'])
self._seen_so_far["entity"].append(entity)
entities.append(entity)
# If doing full entity extents, remove entities that crossed sentence boundaries.
for failure in to_remove:
self.entity_list.remove(failure)
return entities
def _sentence_get_relations(self, sent):
def in_range(candidate):
each_one = [self._check_in_range(entry, sent) for entry in [candidate.arg1, candidate.arg2]]
if all(each_one):
debug_if(candidate in self._seen_so_far['relation'])
return True
if all([not entry for entry in each_one]):
return False
else:
import ipdb; ipdb.set_trace()
relations = []
for relation in self.relation_list:
# This is an annotation mistake and crosses sentence boundaries. Just ignore it.
if in_range(relation):
self._seen_so_far["relation"].append(relation)
relations.append(relation)
return relations
def _sentence_get_events(self, sent):
def in_range(candidate):
each_one = ([self._check_in_range(candidate.trigger, sent)] +
[self._check_in_range(entry, sent) for entry in candidate.arguments])
if all(each_one):
debug_if(candidate in self._seen_so_far['event'])
return True
if all([not entry for entry in each_one]):
return False
else:
import ipdb; ipdb.set_trace()
events = []
for event in self.event_list:
# Event that crosses sentence.
if in_range(event):
self._seen_so_far["event"].append(event)
trigger_span = get_token_indices(event.trigger, sent)
debug_if(trigger_span[0] != trigger_span[1])
events.append(event)
return events
def _get_entry(self, sent):
toks = [tok for tok in sent]
ner = self._sentence_get_ner(sent)
rel = self._sentence_get_relations(sent)
events = self._sentence_get_events(sent)
return Entry(sent=sent, entities=ner, relations=rel, events=events)
def _check_all_seen(self):
assert len(self._seen_so_far["entity"]) == len(self.entity_list)
assert len(self._seen_so_far["relation"]) == len(self.relation_list)
assert len(self._seen_so_far["event"]) == len(self.event_list)
def to_json(self):
self._seen_so_far = dict(entity=[], relation=[], event=[])
entries = [self._get_entry(sent) for sent in self.doc.sents]
doc = Doc(entries, self._doc_key)
self._check_all_seen()
js = doc.to_json()
return js
####################
# Main function.
def one_fold(fold, output_dir, heads_only=True, real_entities_only=True, include_pronouns=False):
doc_path = "./data/ace-event/raw-data"
split_path = "./scripts/data/ace-event/event-split"
doc_keys = []
with open(path.join(split_path, fold + ".filelist")) as f:
for line in f:
doc_keys.append(line.strip())
to_file = []
with open(path.join(output_dir, fold + ".json"), "w") as g:
for doc_key in doc_keys:
annotation_path = path.join(doc_path, doc_key + ".apf.xml")
text_path = path.join(doc_path, doc_key + ".sgm")
document = Document(annotation_path, text_path, doc_key, fold, heads_only,
real_entities_only, include_pronouns)
js = document.to_json()
to_file.append(js)
g.write(json.dumps(to_file, default=int, indent=4))
def main():
parser = argparse.ArgumentParser(description="Preprocess ACE event data.")
parser.add_argument("output_name", help="Name for output directory.")
parser.add_argument("--use_span_extent", action="store_true",
help="Use full extent of entity mentions instead of just heads.")
parser.add_argument("--include_times_and_values", action="store_true",
help="Treat times and values as entities and include them as event arguments.")
parser.add_argument("--include_pronouns", action="store_true",
help="Include pronouns as entities and include them as event arguments.")
args = parser.parse_args()
output_dir = f"./data/ace-event/processed-data/{args.output_name}/json"
os.makedirs(output_dir, exist_ok=True)
for fold in ["train", "dev", "test"]:
msg = f"Parsing {fold} set."
print(msg)
one_fold(fold,
output_dir,
heads_only=(not args.use_span_extent),
real_entities_only=(not args.include_times_and_values),
include_pronouns=args.include_pronouns)
if __name__ == "__main__":
main()
| [
"xml.etree.ElementTree.parse",
"os.makedirs",
"argparse.ArgumentParser",
"ipdb.set_trace",
"numpy.roll",
"json.dumps",
"numpy.cumsum",
"spacy.load",
"os.path.join",
"re.compile"
] | [((31298, 31363), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess ACE event data."""'}), "(description='Preprocess ACE event data.')\n", (31321, 31363), False, 'import argparse\n'), ((32050, 32088), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (32061, 32088), False, 'import os\n'), ((6399, 6423), 'numpy.cumsum', 'np.cumsum', (['entry_lengths'], {}), '(entry_lengths)\n', (6408, 6423), True, 'import numpy as np\n'), ((6447, 6471), 'numpy.roll', 'np.roll', (['entry_starts', '(1)'], {}), '(entry_starts, 1)\n', (6454, 6471), True, 'import numpy as np\n'), ((7031, 7047), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7045, 7047), False, 'import ipdb\n'), ((8276, 8307), 'xml.etree.ElementTree.parse', 'ET.parse', (['self._annotation_path'], {}), '(self._annotation_path)\n', (8284, 8307), True, 'import xml.etree.ElementTree as ET\n'), ((11291, 11307), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (11301, 11307), False, 'import spacy\n'), ((12875, 12905), 're.compile', 're.compile', (['"""<.*?>"""', 're.DOTALL'], {}), "('<.*?>', re.DOTALL)\n", (12885, 12905), False, 'import re\n'), ((30616, 30657), 'os.path.join', 'path.join', (['split_path', "(fold + '.filelist')"], {}), "(split_path, fold + '.filelist')\n", (30625, 30657), False, 'from os import path\n'), ((30761, 30798), 'os.path.join', 'path.join', (['output_dir', "(fold + '.json')"], {}), "(output_dir, fold + '.json')\n", (30770, 30798), False, 'from os import path\n'), ((30874, 30915), 'os.path.join', 'path.join', (['doc_path', "(doc_key + '.apf.xml')"], {}), "(doc_path, doc_key + '.apf.xml')\n", (30883, 30915), False, 'from os import path\n'), ((30940, 30977), 'os.path.join', 'path.join', (['doc_path', "(doc_key + '.sgm')"], {}), "(doc_path, doc_key + '.sgm')\n", (30949, 30977), False, 'from os import path\n'), ((31218, 31260), 'json.dumps', 'json.dumps', (['to_file'], {'default': 'int', 'indent': '(4)'}), '(to_file, default=int, indent=4)\n', (31228, 31260), False, 'import json\n'), ((28284, 28300), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (28298, 28300), False, 'import ipdb\n'), ((29127, 29143), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (29141, 29143), False, 'import ipdb\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" The io.files module provides basic functions for working with file-based
images in nipy.
* load : load an image from a file
* save : save an image to a file
Examples
--------
See documentation for load and save functions for worked examples.
"""
import os
import numpy as np
import nibabel as nib
from nibabel.spatialimages import HeaderDataError
from ..core.image.image import is_image
from .nifti_ref import (nipy2nifti, nifti2nipy)
def load(filename):
"""Load an image from the given filename.
Parameters
----------
filename : string
Should resolve to a complete filename path.
Returns
-------
image : An `Image` object
If successful, a new `Image` object is returned.
See Also
--------
save_image : function for saving images
Image : image object
Examples
--------
>>> from nipy.io.api import load_image
>>> from nipy.testing import anatfile
>>> img = load_image(anatfile)
>>> img.shape
(33, 41, 25)
"""
img = nib.load(filename)
ni_img = nib.Nifti1Image(img._data, img.get_affine(), img.get_header())
return nifti2nipy(ni_img)
def save(img, filename, dtype_from='data'):
"""Write the image to a file.
Parameters
----------
img : An `Image` object
filename : string
Should be a valid filename.
dtype_from : {'data', 'header'} or dtype specifier, optional
Method of setting dtype to save data to disk. Value of 'data' (default),
means use data dtype to save. 'header' means use data dtype specified
in header, if available, otherwise use data dtype. Can also be any
valid specifier for a numpy dtype, e.g. 'i4', ``np.float32``. Not every
format supports every dtype, so some values of this parameter or data
dtypes will raise errors.
Returns
-------
image : An `Image` object
Possibly modified by saving.
See Also
--------
load_image : function for loading images
Image : image object
Examples
--------
Make a temporary directory to store files
>>> import os
>>> from tempfile import mkdtemp
>>> tmpdir = mkdtemp()
Make some some files and save them
>>> import numpy as np
>>> from nipy.core.api import Image, AffineTransform
>>> from nipy.io.api import save_image
>>> data = np.zeros((91,109,91), dtype=np.uint8)
>>> cmap = AffineTransform('kji', 'zxy', np.eye(4))
>>> img = Image(data, cmap)
>>> fname1 = os.path.join(tmpdir, 'img1.nii.gz')
>>> saved_img1 = save_image(img, fname1)
>>> saved_img1.shape
(91, 109, 91)
>>> fname2 = os.path.join(tmpdir, 'img2.img.gz')
>>> saved_img2 = save_image(img, fname2)
>>> saved_img2.shape
(91, 109, 91)
>>> fname = 'test.mnc'
>>> saved_image3 = save_image(img, fname)
Traceback (most recent call last):
...
ValueError: Sorry, we cannot yet save as format "minc"
Finally, we clear up our temporary files:
>>> import shutil
>>> shutil.rmtree(tmpdir)
Notes
-----
Filetype is determined by the file extension in 'filename'. Currently the
following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* SPM Analyze : ['.img', '.img.gz']
"""
# Try and get nifti
dt_from_is_str = isinstance(dtype_from, basestring)
if dt_from_is_str and dtype_from == 'header':
# All done
io_dtype = None
elif dt_from_is_str and dtype_from == 'data':
io_dtype = img.get_data().dtype
else:
io_dtype = np.dtype(dtype_from)
# make new image
ni_img = nipy2nifti(img, data_dtype = io_dtype)
ftype = _type_from_filename(filename)
if ftype.startswith('nifti1'):
ni_img.to_filename(filename)
elif ftype == 'analyze':
try:
ana_img = nib.Spm2AnalyzeImage.from_image(ni_img)
except HeaderDataError:
raise HeaderDataError('SPM analyze does not support datatype %s' %
ni_img.get_header().get_data_dtype())
ana_img.to_filename(filename)
else:
raise ValueError('Sorry, we cannot yet save as format "%s"' % ftype)
return img
def _type_from_filename(filename):
''' Return image type determined from filename
Filetype is determined by the file extension in 'filename'.
Currently the following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* Analyze file pair : ['.img', '.img.gz']
>>> _type_from_filename('test.nii')
'nifti1single'
>>> _type_from_filename('test')
'nifti1single'
>>> _type_from_filename('test.hdr')
'nifti1pair'
>>> _type_from_filename('test.hdr.gz')
'nifti1pair'
>>> _type_from_filename('test.img.gz')
'analyze'
>>> _type_from_filename('test.mnc')
'minc'
'''
if filename.endswith('.gz'):
filename = filename[:-3]
elif filename.endswith('.bz2'):
filename = filename[:-4]
_, ext = os.path.splitext(filename)
if ext in ('', '.nii'):
return 'nifti1single'
if ext == '.hdr':
return 'nifti1pair'
if ext == '.img':
return 'analyze'
if ext == '.mnc':
return 'minc'
raise ValueError('Strange file extension "%s"' % ext)
def as_image(image_input):
''' Load image from filename or pass through image instance
Parameters
----------
image_input : str or Image instance
image or string filename of image. If a string, load image and
return. If an image, pass through without modification
Returns
-------
img : Image or Image-like instance
Input object if `image_input` seemed to be an image, loaded Image
object if `image_input` was a string.
Raises
------
TypeError : if neither string nor image-like passed
Examples
--------
>>> from nipy.testing import anatfile
>>> from nipy.io.api import load_image
>>> img = as_image(anatfile)
>>> img2 = as_image(img)
>>> img2 is img
True
'''
if is_image(image_input):
return image_input
if isinstance(image_input, basestring):
return load(image_input)
raise TypeError('Expecting an image-like object or filename string')
| [
"nibabel.Spm2AnalyzeImage.from_image",
"numpy.dtype",
"os.path.splitext",
"nibabel.load"
] | [((1141, 1159), 'nibabel.load', 'nib.load', (['filename'], {}), '(filename)\n', (1149, 1159), True, 'import nibabel as nib\n'), ((5219, 5245), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5235, 5245), False, 'import os\n'), ((3745, 3765), 'numpy.dtype', 'np.dtype', (['dtype_from'], {}), '(dtype_from)\n', (3753, 3765), True, 'import numpy as np\n'), ((4017, 4056), 'nibabel.Spm2AnalyzeImage.from_image', 'nib.Spm2AnalyzeImage.from_image', (['ni_img'], {}), '(ni_img)\n', (4048, 4056), True, 'import nibabel as nib\n')] |
import random
from .operators import prod
from numpy import array, float64, ndarray
import numba
MAX_DIMS = 32
class IndexingError(RuntimeError):
"Exception raised for indexing errors."
pass
def index_to_position(index, strides):
"""
Converts a multidimensional tensor `index` into a single-dimensional position in
storage based on strides.
Args:
index (array-like): index tuple of ints
strides (array-like): tensor strides
Return:
int : position in storage
"""
position = 0
for i, s in zip(index, strides):
position += i*s
return position
def count(position, shape, out_index):
"""
Convert a `position` to an index in the `shape`.
Should ensure that enumerating position 0 ... size of a
tensor produces every index exactly once. It
may not be the inverse of `index_to_position`.
Args:
position (int): current position
shape (tuple): tensor shape
out_index (array): the index corresponding to position
Returns:
None : Fills in `index`.
"""
cur_pos = position
for i in range(len(shape) - 1, -1, -1):
sh = shape[i]
out_index[i] = int(cur_pos % sh)
cur_pos = cur_pos // sh
def broadcast_index(big_index, big_shape, shape, out_index):
"""
Convert an index into a position (see `index_to_position`),
when the index is from a broadcasted shape. In this case
it may be larger or with more dimensions than the `shape`
given. Additional dimensions may need to be mapped to 0 or
removed.
Args:
big_index (array-like): multidimensional index of bigger tensor
big_shape (array-like): tensor shape of bigger tensor
shape (array-like): tensor shape of smaller tensor
out_index (array-like): multidimensional index of smaller tensor
"""
for i, s in enumerate(shape):
if s > 1:
out_index[i] = big_index[i + (len(big_shape) - len(shape))]
else:
out_index[i] = 0
def shape_broadcast(shape1, shape2):
"""
Broadcast two shapes to create a new union shape.
Args:
shape1 (tuple): first shape
shape2 (tuple): second shape
Returns:
tuple: broadcasted shape
"""
a, b = shape1, shape2
m = max(len(a), len(b))
c_rev = [0] * m
a_rev = list(reversed(a))
b_rev = list(reversed(b))
for i in range(m):
if i >= len(a):
c_rev[i] = b_rev[i]
elif i >= len(b):
c_rev[i] = a_rev[i]
else:
c_rev[i] = max(a_rev[i], b_rev[i])
if a_rev[i] != c_rev[i] and a_rev[i] != 1:
raise IndexingError("Broadcast failure {a} {b}")
if b_rev[i] != c_rev[i] and b_rev[i] != 1:
raise IndexingError("Broadcast failure {a} {b}")
return tuple(reversed(c_rev))
def strides_from_shape(shape):
layout = [1]
offset = 1
for s in reversed(shape):
layout.append(s * offset)
offset = s * offset
return tuple(reversed(layout[:-1]))
class TensorData:
def __init__(self, storage, shape, strides=None):
if isinstance(storage, ndarray):
self._storage = storage
else:
self._storage = array(storage, dtype=float64)
if strides is None:
strides = strides_from_shape(shape)
assert isinstance(strides, tuple), "Strides must be tuple"
assert isinstance(shape, tuple), "Shape must be tuple"
if len(strides) != len(shape):
raise IndexingError(f"Len of strides {strides} must match {shape}.")
self._strides = array(strides)
self._shape = array(shape)
self.strides = strides
self.dims = len(strides)
self.size = int(prod(shape))
self.shape = shape
assert len(self._storage) == self.size
def to_cuda_(self):
if not numba.cuda.is_cuda_array(self._storage):
self._storage = numba.cuda.to_device(self._storage)
def is_contiguous(self):
"Check that the layout is contiguous, i.e. outer dimensions have bigger strides than inner dimensions. "
last = 1e9
for stride in self._strides:
if stride > last:
return False
last = stride
return True
@staticmethod
def shape_broadcast(shape_a, shape_b):
return shape_broadcast(shape_a, shape_b)
def index(self, index):
if isinstance(index, int):
index = array([index])
if isinstance(index, tuple):
index = array(index)
# Check for errors
if index.shape[0] != len(self.shape):
raise IndexingError(f"Index {index} must be size of {self.shape}.")
for i, ind in enumerate(index):
if ind >= self.shape[i]:
raise IndexingError(f"Index {index} out of range {self.shape}.")
if ind < 0:
raise IndexingError(f"Negative indexing for {index} not supported.")
# Call fast indexing.
return index_to_position(array(index), self._strides)
def indices(self):
lshape = array(self.shape)
out_index = array(self.shape)
for i in range(self.size):
count(i, lshape, out_index)
yield tuple(out_index)
def sample(self):
return tuple((random.randint(0, s - 1) for s in self.shape))
def get(self, key):
return self._storage[self.index(key)]
def set(self, key, val):
self._storage[self.index(key)] = val
def tuple(self):
return (self._storage, self._shape, self._strides)
def permute(self, *order):
"""
Permute the dimensions of the tensor.
Args:
order (list): a permutation of the dimensions
Returns:
:class:`TensorData`: a new TensorData with the same storage and a new dimension order.
"""
assert list(sorted(order)) == list(
range(len(self.shape))
), f"Must give a position to each dimension. Shape: {self.shape} Order: {order}"
return TensorData(
self._storage,
tuple([self.shape[o] for o in order]),
tuple([self._strides[o] for o in order]),
)
def to_string(self):
s = ""
for index in self.indices():
l = ""
for i in range(len(index) - 1, -1, -1):
if index[i] == 0:
l = "\n%s[" % ("\t" * i) + l
else:
break
s += l
v = self.get(index)
s += f"{v:3.2f}"
l = ""
for i in range(len(index) - 1, -1, -1):
if index[i] == self.shape[i] - 1:
l += "]"
else:
break
if l:
s += l
else:
s += " "
return s
| [
"random.randint",
"numpy.array",
"numba.cuda.to_device",
"numba.cuda.is_cuda_array"
] | [((3646, 3660), 'numpy.array', 'array', (['strides'], {}), '(strides)\n', (3651, 3660), False, 'from numpy import array, float64, ndarray\n'), ((3683, 3695), 'numpy.array', 'array', (['shape'], {}), '(shape)\n', (3688, 3695), False, 'from numpy import array, float64, ndarray\n'), ((5155, 5172), 'numpy.array', 'array', (['self.shape'], {}), '(self.shape)\n', (5160, 5172), False, 'from numpy import array, float64, ndarray\n'), ((5193, 5210), 'numpy.array', 'array', (['self.shape'], {}), '(self.shape)\n', (5198, 5210), False, 'from numpy import array, float64, ndarray\n'), ((3264, 3293), 'numpy.array', 'array', (['storage'], {'dtype': 'float64'}), '(storage, dtype=float64)\n', (3269, 3293), False, 'from numpy import array, float64, ndarray\n'), ((3911, 3950), 'numba.cuda.is_cuda_array', 'numba.cuda.is_cuda_array', (['self._storage'], {}), '(self._storage)\n', (3935, 3950), False, 'import numba\n'), ((3980, 4015), 'numba.cuda.to_device', 'numba.cuda.to_device', (['self._storage'], {}), '(self._storage)\n', (4000, 4015), False, 'import numba\n'), ((4515, 4529), 'numpy.array', 'array', (['[index]'], {}), '([index])\n', (4520, 4529), False, 'from numpy import array, float64, ndarray\n'), ((4587, 4599), 'numpy.array', 'array', (['index'], {}), '(index)\n', (4592, 4599), False, 'from numpy import array, float64, ndarray\n'), ((5085, 5097), 'numpy.array', 'array', (['index'], {}), '(index)\n', (5090, 5097), False, 'from numpy import array, float64, ndarray\n'), ((5366, 5390), 'random.randint', 'random.randint', (['(0)', '(s - 1)'], {}), '(0, s - 1)\n', (5380, 5390), False, 'import random\n')] |
#!/usr/bin/env python
# coding: utf-8
# Author:
# <NAME>
# Emotional Sentiment on Twitter
# A coronavirus vaccine online firestorm
# In this python script you will find examples of some of the most common
# NLP (Natural Language Processing) techniques used to uncover patterns of
# sentiment and emotion on social media microblogging platforms like Twitter.
# It is organized as follows:
# - Step 1: Exploratory analysis
# - Step 2: Text processing
# - Step 3: Sentiment analysis
# - Step 4: Word frequency
# - Step 5: LDA topics extraction
# - Step 6: Emotion analysis
#
# ## Step 1: EXPLORATORY ANALYSIS
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import date
import re # for regular expressions
import string
# Importing the data
tweets = pd.read_csv('input/tweets.csv')
# getting the date column ready for datetime operations
tweets['datetime']= pd.to_datetime(tweets['datetime'])
# A plot of the tweets with the word "CureVac" over the past 6 years.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets.set_index("datetime").groupby(pd.Grouper(freq='Y')).count())
plt.title('Tweets with "CureVac" from 2014 to 2020', fontsize=20)
plt.xlabel('Years', fontsize=15)
plt.ylabel('Tweets', fontsize=15)
fig.savefig("images/All_Tweets_2014-2020.png")
# creating a column to filter the online storm period (from 15 and 18 March)
def make_onlinestorm_field():
for i, row in tweets.iterrows():
if pd.to_datetime(tweets.at[i, 'datetime']) > pd.Timestamp(date(2020,3,15)):
tweets.at[i, 'onlinestorm'] = True
else:
tweets.at[i, 'onlinestorm'] = False
make_onlinestorm_field()
# counting tweets during the three days online storm
print('In three days, tweets went over {}, all around the world.'.format(tweets[tweets['onlinestorm']]['onlinestorm'].count()))
tweets[tweets['onlinestorm']]
# Let's now have a look at the distribution of the tweets, by the hour, during the online storm.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets[tweets['onlinestorm'] == True].set_index("datetime").groupby(pd.Grouper(freq='H')).onlinestorm.count())
plt.title('Tweets per hour from 15 to 18 March 2020', fontsize=20)
plt.xlabel('Time (hours)', fontsize=15)
plt.ylabel('No. Tweets', fontsize=15)
fig.savefig("images/All_Tweets_Onlinestorm.png")
# It is time to have a first look at the content of the tweets and do some descriptive statistics.
# For now, I will focus only on features like hastags, mentions, urls, capital words and words in general.
# A function to count tweets based on regular expressions
def count_tweets(reg_expression, tweet):
tweets_list = re.findall(reg_expression, tweet)
return len(tweets_list)
# Creating a dictionary to hold these counts
content_count = {
'words' : tweets['text'].apply(lambda x: count_tweets(r'\w+', x)),
'mentions' : tweets['text'].apply(lambda x: count_tweets(r'@\w+', x)),
'hashtags' : tweets['text'].apply(lambda x: count_tweets(r'#\w+', x)),
'urls' : tweets['text'].apply(lambda x: count_tweets(r'http.?://[^\s]+[\s]?', x)),
}
df = pd.concat([tweets, pd.DataFrame(content_count)], axis=1)
# Tweets descriptive statistics
# Display descriptive statistics fdor words, mentions,
# hashtags and urls
for key in content_count.keys():
print()
print('Descriptive statistics for {}'.format(key))
print(df.groupby('onlinestorm')[key].describe())
# Now plot them
for key in content_count.keys():
bins = np.arange(df[key].min(), df[key].max() + 1)
g = sns.FacetGrid(df, col='onlinestorm', height=5, hue='onlinestorm', palette="RdYlGn")
g = g.map(sns.distplot, key, kde=False, norm_hist=True, bins=bins)
plt.savefig('images/Descriptive_stats_for_' + key + '.png')
# Step 2: TEXT PROCESSING
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import pos_tag
# I am adding my own stopwords list to the NLTK list.
# This way we can drop words that are irrelevant for text processing
MY_STOPWORDS = ['curevac','vaccine','german','mrna','biotech','cancer', 'lilly','eli','ag','etherna_immuno', 'translatebio', 'mooreorless62','boehringer', 'ingelheim','biopharmaceutical', 'company']
STOPLIST = set(stopwords.words('english') + list(MY_STOPWORDS))
SYMBOLS = " ".join(string.punctuation).split(" ") + ["-", "...", "”", "``", ",", ".", ":", "''","#","@"]
# The NLTK lemmatizer and stemmer classes
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
# read english selected tweets, no duplicates
tweets = pd.read_csv('input/tweets_en.csv')
# I use the POS tagging from NLTK to retain only adjectives, verbs, adverbs
# and nouns as a base for for lemmatization.
def get_lemmas(tweet):
# A dictionary to help convert Treebank tags to WordNet
treebank2wordnet = {'NN':'n', 'JJ':'a', 'VB':'v', 'RB':'r'}
postag = ''
lemmas_list = []
for word, tag in pos_tag(word_tokenize(tweet)):
if tag.startswith("JJ") or tag.startswith("RB") or tag.startswith("VB") or tag.startswith("NN"):
try:
postag = treebank2wordnet[tag[:2]]
except:
postag = 'n'
lemmas_list.append(lemmatizer.lemmatize(word.lower(), postag))
return lemmas_list
# We will now pre-process the tweets, following a pipeline of tokenization,
# filtering, case normalization and lemma extraction.
# This is the function to clean and filter the tokens in each tweet
def clean_tweet(tokens):
filtered = []
for token in tokens:
if re.search('[a-zA-Z]', token):
if token not in STOPLIST:
if token[0] not in SYMBOLS:
if not token.startswith('http'):
if '/' not in token:
if '-' not in token:
filtered.append(token)
return filtered
# Prior to lemmatization, I apply POS (part-of-speech) tagging to make sure that only the
# adjectives, verbs, adverbs and nouns are retained.
# Starts the lemmatization process
def get_lemmatized(tweet):
all_tokens_string = ''
filtered = []
tokens = []
# lemmatize
tokens = [token for token in get_lemmas(tweet)]
# filter
filtered = clean_tweet(tokens)
# join everything into a single string
all_tokens_string = ' '.join(filtered)
return all_tokens_string
# get the lemmatized tweets and puts the result in an "edited" text column
# for future use in this script
edited = ''
for i, row in tweets.iterrows():
edited = get_lemmatized(tweets.loc[i]['text'])
if len(edited) > 0:
tweets.at[i,'edited'] = edited
else:
tweets.at[i,'edited'] = None
# After lemmatization, some tweets may end up with the same words
# Let's make sure that we have no duplicates
tweets.drop_duplicates(subset=['edited'], inplace=True)
tweets.dropna(inplace=True)
# With these text processing steps, and the removal of duplicates,
# the final sample counts 5,508 English-language tweets,
# with an average of 30 words (SD 12.5, ranging from 4 to 61 words).
# Using apply/lambda to create a new column with the number of words in each tweet
tweets['word_count'] = tweets.apply(lambda x: len(x['text'].split()),axis=1)
t = pd.DataFrame(tweets['word_count'].describe()).T
tweets.head()
# Step 3: SENTIMENT ANALYSIS
# Let us import the VADER analyser.
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# For the puropose of the timeseries analysis, we must make sure that the tweets are all correctly sorted.
tweets['datetime']=pd.to_datetime(tweets['datetime'])
tweets.sort_values('datetime', inplace=True, ascending=True)
tweets = tweets.reset_index(drop=True)
# Creating a column to "filter" the online storm period.
make_onlinestorm_field()
# To avoid repetitions in our code, here are some plotting functions
# that will be called often ...
def plot_sentiment_period(df, info):
# Using the mean values of sentiment for each period
df1 = df.groupby(df['datetime'].dt.to_period(info['period'])).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
plot_df = pd.DataFrame(df1, df1.index, info['cols'])
plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=plot_df, linewidth = 3, dashes = False)
plt.legend(loc='best', fontsize=15)
plt.title(info['title'], fontsize=20)
plt.xlabel(info['xlab'], fontsize=15)
plt.ylabel(info['ylab'], fontsize=15)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
def plot_fractions(props, title, fname):
plt1 = props.plot(kind='bar', stacked=False, figsize=(16,5), colormap='Spectral')
plt.legend(bbox_to_anchor=(1.005, 1), loc=2, borderaxespad=0.)
plt.xlabel('Online storm', fontweight='bold', fontsize=18)
plt.xticks(rotation=0,fontsize=14)
#plt.ylim(0, 0.5)
plt.ylabel('Fraction of Tweets', fontweight='bold', fontsize=18)
plt1.set_title(label=title, fontweight='bold', size=20)
plt.tight_layout()
plt.savefig('images/' + fname + '.png')
return
def plot_frequency_chart(info):
fig, ax = plt.subplots(figsize=(14, 8))
sns.set_context("notebook", font_scale=1)
ax = sns.barplot(x=info['x'], y=info['y'], data=info['data'], palette=(info['pal']))
ax.set_title(label=info['title'], fontweight='bold', size=18)
plt.ylabel(info['ylab'], fontsize=16)
plt.xlabel(info['xlab'], fontsize=16)
plt.xticks(rotation=info['angle'],fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
# Calling VADER
analyzer = SentimentIntensityAnalyzer()
# Get VADER Compound value for sentiment intensity
tweets['sentiment_intensity'] = [analyzer.polarity_scores(v)['compound'] for v in tweets['edited']]
# This function returns the sentiment category
def get_sentiment(intensity):
if intensity >= 0.05:
return 'Positive'
elif (intensity >= -0.05) and (intensity < 0.05):
return 'Neutral'
else:
return 'Negative'
# Using pandas apply/lambda to speed up the process
tweets['sentiment'] = tweets.apply(lambda x: get_sentiment(x['sentiment_intensity']),axis=1)
# The next plot gives us a clear image of the “explosion” of contradictory sentiments in this period:
df=tweets.loc[:,['datetime','sentiment_intensity']]
# filter for these dates
df.set_index('datetime',inplace=True)
df=df[(df.index>='2020-03-12') & (df.index<'2020-03-18')]
df.plot(figsize=(12,6));
plt.ylabel('Compoud score', fontsize=15)
plt.xlabel('Tweets', fontsize=15)
plt.legend().set_visible(False)
plt.title('Sentiment on tweets with CureVac (12 March to 18 March)', fontsize=20)
plt.tight_layout()
sns.despine(top=True)
plt.savefig('images/Sentiment_during_onlinestorm.png')
plt.show()
# And this one will shows us a comparison of the sentiments before and during the online strom.
# Values are normalized to take into account the number of tweets in each
# of the two different periods
props = tweets.groupby('onlinestorm')['sentiment'].value_counts(normalize=True).unstack()
plot_fractions(props,'Percentage of sentiments before and during the online storm',
'Fraction_sentiments_before_and_during_onlinestorm')
# Step 4: Word frequency
# We need these imports for the wordcloud representation:
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from matplotlib.colors import makeMappingArray
from palettable.colorbrewer.diverging import Spectral_4
from collections import Counter # Counts the most common items in a list
def display_wordcloud(tokens, title, fname):
tokens_upper = [token.upper() for token in tokens]
cloud_mask = np.array(Image.open("images/cloud_mask.png"))
wordcloud = WordCloud(max_font_size=100,
max_words=50, width=2500,
height=1750,mask=cloud_mask,
background_color="white").generate(" ".join(tokens_upper))
plt.figure()
fig, ax = plt.subplots(figsize=(14, 8))
plt.title(title, fontsize=20)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.savefig('images/'+ fname + '.png')
plt.show()
return
def join_edited_string(edited_tweets):
edited_string = ''
for row in edited_tweets:
edited_string = edited_string + ' ' + row
return edited_string
def get_trigrams(trigrams, top_grams):
grams_str = []
data = []
gram_counter = Counter(trigrams)
for grams in gram_counter.most_common(10):
gram = ''
grams_str = grams[0]
grams_str_count = []
for n in range(0,3):
gram = gram + grams_str[n] + ' '
grams_str_count.append(gram)
grams_str_count.append(grams[1])
data.append(grams_str_count)
print(grams_str_count)
df = pd.DataFrame(data, columns = ['Grams', 'Count'])
return df
# Let’s have a look at the 20 most frequent words in tweets before the online storm.
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'viridis',
'title': 'Most frequent words before online storm',
'fname':'word_frequency_before_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# plot trigram frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'viridis',
'title': 'Most frequent trigrams before online storm',
'fname':'trigrams_frequency_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# And the wordcloud ...
display_wordcloud(tokens, 'Wordcloud of most frequent words before online storm',
'WordCloud_before_onlinestorm')
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'inferno',
'title': 'Most frequent words during online storm',
'fname':'word_frequency_during_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# In[139]:
# plot trigrams frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'inferno',
'title': 'Most frequent trigrams during online storm',
'fname':'trigrams_frequency_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# In[140]:
display_wordcloud(tokens, 'Wordcloud of most frequent words during online storm',
'WordCloud_during_onlinestorm')
# Step 5: LDA topics extraction
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer
# I am using here Susan Li's functions to get the top words from a topic:
def get_keys(topic_matrix):
'''
returns an integer list of predicted topic
categories for a given topic matrix
'''
keys = topic_matrix.argmax(axis=1).tolist()
return keys
def keys_to_counts(keys):
'''
returns a tuple of topic categories and their
accompanying magnitudes for a given list of keys
'''
count_pairs = Counter(keys).items()
categories = [pair[0] for pair in count_pairs]
counts = [pair[1] for pair in count_pairs]
return (categories, counts)
def get_top_n_words(n, n_topics, keys, document_term_matrix, tfidf_vectorizer):
'''
returns a list of n_topic strings, where each string contains the n most common
words in a predicted category, in order
'''
top_word_indices = []
for topic in range(n_topics):
temp_vector_sum = 0
for i in range(len(keys)):
if keys[i] == topic:
temp_vector_sum += document_term_matrix[i]
temp_vector_sum = temp_vector_sum.toarray()
top_n_word_indices = np.flip(np.argsort(temp_vector_sum)[0][-n:],0)
top_word_indices.append(top_n_word_indices)
top_words = []
for topic in top_word_indices:
topic_words = []
for index in topic:
temp_word_vector = np.zeros((1,document_term_matrix.shape[1]))
temp_word_vector[:, index] = 1
the_word = tfidf_vectorizer.inverse_transform(temp_word_vector)[0][0]
try:
topic_words.append(the_word.encode('ascii').decode('utf-8'))
except:
pass
top_words.append(", ".join(topic_words))
return top_words
# And here is a function for topics extraction using LDA, in which I produce a dataframe
# with the topics and their top words to facilitate the plotting that follows.
# LDA topics
def get_topics(edited, n_topics, n_words):
eds = edited.values
vec = TfidfVectorizer(use_idf=True, smooth_idf=True)
document_term_matrix = vec.fit_transform(eds)
model = LatentDirichletAllocation(n_components=n_topics)
topic_matrix = model.fit_transform(document_term_matrix)
keys = get_keys(topic_matrix)
categories, counts = keys_to_counts(keys)
top_n_words = get_top_n_words(n_words, n_topics, keys, document_term_matrix, vec)
topics = ['Topic {}: \n'.format(i + 1) + top_n_words[i] for i in categories]
data=[]
for i, topic in enumerate(topics):
tmp = []
tmp.append(topic)
tmp.append(counts[i])
data.append(tmp)
df_topics = pd.DataFrame(data, columns = ['Topics', 'Count'])
return df_topics
# Topics before the online storm
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'viridis',
'title': 'LDA Topics before Online Storm',
'fname':'LDA_Topics_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Topics during the online storm
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'inferno',
'title': 'Main Topics during Online Storm',
'fname':'LDA_Topics_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Step 6: Emotion analysis
import termcolor
import sys
from termcolor import colored, cprint
plt.style.use('fivethirtyeight')
# Importing the data from the NCR lexicon
ncr = pd.read_csv('input/NCR-lexicon.csv', sep =';')
# Let's create a list of the emotions
emotions = ['Anger', 'Anticipation','Disgust','Fear', 'Joy','Sadness', 'Surprise', 'Trust']
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# We build now two dictionaries with indexes and unique words, for future reference
unique_words = set(tokens)
word_to_ind = dict((word, i) for i, word in enumerate(unique_words))
ind_to_word = dict((i, word) for i, word in enumerate(unique_words))
def plot_emotions_period(df, cols, title, fname, period = 'h' ):
df1 = df.groupby(df['datetime'].dt.to_period(period)).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
plot_df = pd.DataFrame(df1, df1.index, cols)
plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=plot_df, linewidth = 3,dashes = False)
plt.legend(loc='best', fontsize=15)
plt.title(title, fontsize=20)
plt.xlabel('Time (hours)', fontsize=15)
plt.ylabel('Z-scored Emotions', fontsize=15)
plt.savefig('images/'+ fname + '.png')
return
def get_tweet_emotions(df, emotions, col):
df_tweets = df.copy()
df_tweets.drop(['sentiment','sentiment_intensity'], axis=1, inplace=True)
emo_info = {'emotion':'' , 'emo_frq': defaultdict(int) }
list_emotion_counts = []
# creating a dictionary list to hold the frequency of the words
# contributing to the emotions
for emotion in emotions:
emo_info = {}
emo_info['emotion'] = emotion
emo_info['emo_frq'] = defaultdict(int)
list_emotion_counts.append(emo_info)
# bulding a zeros matrix to hold the emotions data
df_emotions = pd.DataFrame(0, index=df.index, columns=emotions)
# stemming the word to facilitate the search in NRC
stemmer = SnowballStemmer("english")
# iterating in the tweets data set
for i, row in df_tweets.iterrows(): # for each tweet ...
tweet = word_tokenize(df_tweets.loc[i][col])
for word in tweet: # for each word ...
word_stemmed = stemmer.stem(word.lower())
# check if the word is in NRC
result = ncr[ncr.English == word_stemmed]
# we have a match
if not result.empty:
# update the tweet-emotions counts
for idx, emotion in enumerate(emotions):
df_emotions.at[i, emotion] += result[emotion]
# update the frequencies dictionary list
if result[emotion].any():
try:
list_emotion_counts[idx]['emo_frq'][word_to_ind[word]] += 1
except:
continue
# append the emotions matrix to the tweets data set
df_tweets = pd.concat([df_tweets, df_emotions], axis=1)
return df_tweets, list_emotion_counts
# Create a list of words to highlight
def get_words(word_list, emotions):
words_emotion_idx = []
for i, word in enumerate(word_list):
word = stemmer.stem(word.lower())
result = ncr[ncr.English == word]
if not result.empty:
for emotion in emotions:
if result[emotion].any() > 0:
words_emotion_idx.append(i)
return words_emotion_idx
def get_top_emotion_words(word_counts, n = 5):
# Here I map the numpy array "words" with the index and word frequency
words = np.zeros((len(word_counts), 2), dtype=int)
for i, w in enumerate(word_counts):
words[i][0] = w
words[i][1] = word_counts[w]
# From the indexes generated by the argsort function,
# I get the order of the top n words in the list
top_words_idx = np.flip(np.argsort(words[:,1])[-n:],0)
# The resulting indexes are now used as keys in the dic to get the words
top_words = [words[ind][0] for ind in top_words_idx]
return words, top_words, top_words_idx
# This is now the function to display and highlight
# the words associated to specific emotions
def print_colored_emotions(tweets, emotions, color, on_color):
for tweet in tweets:
word_list = word_tokenize(tweet)
word_emotion_idx = get_words(word_list, emotions)
for i, w in enumerate(word_list):
if i in word_emotion_idx:
w=colored(w, color=color, on_color=on_color)
print(w, end='')
print(' ', end='')
print('\n')
return
# Connecting words to emotions
# We are using the NCR lexicon to associate words to emotions
# Be patient, this will take some time ...
df_emo, list_emotion_counts = get_tweet_emotions(tweets, emotions, 'edited')
# Preparing for time series
df_emo['datetime']= pd.to_datetime(df_emo['datetime'])
# For a better understanding of the word-emotions associations,
# I produce here the plots showing what are the 10 words
# that contributed the most for each of the 8 emotions.
# Plotting the 10 words that contribute the most for each of the 8 emotions
fig, axs = plt.subplots(figsize=(15, 25), frameon=False)
plt.box(False)
plt.axis('off')
plt.subplots_adjust(hspace = 1.6)
counter = 0
for i, emotion in enumerate(emotions): # for each emotioin
# This is the dict that holds the top 10 words
words, top_words, top_words_indices = get_top_emotion_words(list_emotion_counts[i]['emo_frq'], 10)
info = {'values' : [words[ind][1] for ind in top_words_indices],
'labels' : [ind_to_word[word] for word in top_words]}
sns.set(style="whitegrid")
sns.set_context("notebook", font_scale=1.25)
ax = fig.add_subplot(4, 2, counter+1) # plot 2 charts in each of the 4 rows
sns.despine()
ax = sns.barplot(x='labels', y='values', data=info, palette=("cividis"))
plt.ylabel('Top words', fontsize=12)
ax.set_title(label=str('Emotion: ') + emotion, fontweight='bold', size=13)
plt.xticks(rotation=45, fontsize=14)
counter += 1
axs.set_title(label='\nTop 10 words for each emotion\n',
fontweight='bold', size=20, pad=40)
plt.tight_layout()
plt.savefig('images/Top10_words_per_emotion.png')
# Aggregating negative and positive emotions
df_emo['neg_emotions'] = df_emo['Sadness'] + df_emo['Fear'] + df_emo['Disgust'] + df_emo['Anger']
df_emo['pos_emotions'] = df_emo['Joy'] + df_emo['Anticipation'] + df_emo['Trust'] + df_emo['Surprise']
df_emo['total_neg_emotions'] = df_emo['neg_emotions'].apply(lambda x: x > 0)
df_emo['total_pos_emotions'] = df_emo['pos_emotions'].apply(lambda x: x > 0)
# I use here the pandas groupby feature to obtain a normalized account of the emotions
# as a proportion that takes into account the number of tweets in each of the two periods
# (before and during the online storm).
props = df_emo.groupby('onlinestorm')['total_neg_emotions'].value_counts(normalize=True).unstack()
props
# plot it
plot_fractions(props,'Percentage of tweets with negative emotions','Percentage_of_Tweets_with_negative_emotions')
props = df_emo.groupby('onlinestorm')['total_pos_emotions'].value_counts(normalize=True).unstack()
props
plot_fractions(props,'Percentage of tweets with positive emotions','Percentage_of_Tweets_with_positive_emotions')
# Word - emotion connections in the tweets
df = df_emo[df_emo['Sadness'] > 3]
print_colored_emotions(df['text'], ['Disgust','Sadness','Anger','Fear'], 'white', 'on_red')
# And here some positive ones ...
df = df_emo[df_emo['Anticipation'] > 4]
print_colored_emotions(df['text'], ['Joy','Trust','Anticipation'], 'white', 'on_green')
# Proportion of emotions in relation to number of tweets, before and during the online storm
df1 = df_emo.groupby(df_emo['onlinestorm'])[emotions].apply(lambda x:( x.sum()/x.count())*100)
df1.index = ['before_onlinestorm', 'during_onlinestorm']
df1.head()
df_ =df1.T
df_.reset_index()
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
ax.set_title(label='Comparing percentage of emotion-related words before and during online storm\n',
fontweight='bold', size=18)
df_.reset_index().plot(
x="index", y=["before_onlinestorm", "during_onlinestorm"], kind="bar", ax=ax
)
plt.xlabel("Emotions",fontsize = 16)
plt.ylabel("Percentage of emotion-related words",fontsize = 16)
plt.xticks(rotation=45,fontsize=14)
plt.tight_layout()
plt.savefig('images/Percentage_emotions_before_and_during_onlinestorm.png')
# Applying a Z-score normalization
df_zscore = df_emo.groupby(df_emo['onlinestorm'])[emotions].apply(lambda x:(x - x.mean()) / x.std())
df_emo = pd.concat([df_emo[['datetime','text','edited', 'onlinestorm']], df_zscore], axis=1)
df_emo.head()
plot_emotions_period(df_emo[df_emo['onlinestorm']], emotions,
'Emotions time series during online storm','Timeseries_Emotions_OnlineStorm')
# Plotting emotions during online storm
fig, axs = plt.subplots(figsize=(15, 25), frameon=False)
plt.box(False)
plt.axis('off')
plt.subplots_adjust(hspace = 1.6)
counter = 0
df = df_emo[df_emo['onlinestorm']]
df1 = df.groupby(df['datetime'].dt.to_period('h')).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
for i, emotion in enumerate(emotions): # for each emotion
emo = []
emo.append(emotion)
plot_df = pd.DataFrame(df1, df1.index, emo)
sns.set(style="whitegrid")
sns.set_context("notebook", font_scale=1.25)
ax = fig.add_subplot(4, 2, counter+1) # plot 2 charts in each of the 4 rows
sns.despine()
ax = sns.lineplot(data=plot_df, linewidth = 3,dashes = False)
plt.ylabel('Time by the hour', fontsize=12)
ax.set_title(label=str('Emotion: ') + emotion, fontweight='bold', size=13)
counter += 1
axs.set_title(label='\nPlot for each emotion during online storm\n',
fontweight='bold', size=20, pad=40)
plt.tight_layout()
plt.savefig('images/Emotions_during_onlinestorm.png')
# Another way of looking at it is by plotting contrasts of emotions, like joy and sadness ...
plot_emotions_period(df_emo[df_emo['onlinestorm']], ['Joy', 'Sadness'],
'Joy and Sadness time series during online storm','Joy_Sadness_Emotions_OnlineStorm')
# And now trust and fear ...
plot_emotions_period(df_emo[df_emo['onlinestorm']], ['Trust', 'Fear'],
'Trust and Fear time series during online storm','Trust_Fear_Emotions_OnlineStorm')
| [
"matplotlib.pyplot.title",
"seaborn.lineplot",
"pandas.read_csv",
"sklearn.feature_extraction.text.TfidfVectorizer",
"wordcloud.WordCloud",
"matplotlib.pyplot.box",
"collections.defaultdict",
"numpy.argsort",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.use",
"sklearn.decomposition.Laten... | [((861, 892), 'pandas.read_csv', 'pd.read_csv', (['"""input/tweets.csv"""'], {}), "('input/tweets.csv')\n", (872, 892), True, 'import pandas as pd\n'), ((970, 1004), 'pandas.to_datetime', 'pd.to_datetime', (["tweets['datetime']"], {}), "(tweets['datetime'])\n", (984, 1004), True, 'import pandas as pd\n'), ((1082, 1110), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (1092, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1267), 'matplotlib.pyplot.title', 'plt.title', (['"""Tweets with "CureVac" from 2014 to 2020"""'], {'fontsize': '(20)'}), '(\'Tweets with "CureVac" from 2014 to 2020\', fontsize=20)\n', (1211, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years"""'], {'fontsize': '(15)'}), "('Years', fontsize=15)\n", (1278, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1334), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tweets"""'], {'fontsize': '(15)'}), "('Tweets', fontsize=15)\n", (1311, 1334), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2107), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2089, 2107), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2308), 'matplotlib.pyplot.title', 'plt.title', (['"""Tweets per hour from 15 to 18 March 2020"""'], {'fontsize': '(20)'}), "('Tweets per hour from 15 to 18 March 2020', fontsize=20)\n", (2251, 2308), True, 'import matplotlib.pyplot as plt\n'), ((2309, 2348), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (hours)"""'], {'fontsize': '(15)'}), "('Time (hours)', fontsize=15)\n", (2319, 2348), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2386), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""No. Tweets"""'], {'fontsize': '(15)'}), "('No. Tweets', fontsize=15)\n", (2359, 2386), True, 'import matplotlib.pyplot as plt\n'), ((4667, 4686), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (4684, 4686), False, 'from nltk.stem import WordNetLemmatizer\n'), ((4697, 4723), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (4712, 4723), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((4780, 4814), 'pandas.read_csv', 'pd.read_csv', (['"""input/tweets_en.csv"""'], {}), "('input/tweets_en.csv')\n", (4791, 4814), True, 'import pandas as pd\n'), ((7964, 7998), 'pandas.to_datetime', 'pd.to_datetime', (["tweets['datetime']"], {}), "(tweets['datetime'])\n", (7978, 7998), True, 'import pandas as pd\n'), ((10067, 10095), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (10093, 10095), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n'), ((10942, 10982), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Compoud score"""'], {'fontsize': '(15)'}), "('Compoud score', fontsize=15)\n", (10952, 10982), True, 'import matplotlib.pyplot as plt\n'), ((10983, 11016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tweets"""'], {'fontsize': '(15)'}), "('Tweets', fontsize=15)\n", (10993, 11016), True, 'import matplotlib.pyplot as plt\n'), ((11049, 11134), 'matplotlib.pyplot.title', 'plt.title', (['"""Sentiment on tweets with CureVac (12 March to 18 March)"""'], {'fontsize': '(20)'}), "('Sentiment on tweets with CureVac (12 March to 18 March)',\n fontsize=20)\n", (11058, 11134), True, 'import matplotlib.pyplot as plt\n'), ((11131, 11149), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11147, 11149), True, 'import matplotlib.pyplot as plt\n'), ((11150, 11171), 'seaborn.despine', 'sns.despine', ([], {'top': '(True)'}), '(top=True)\n', (11161, 11171), True, 'import seaborn as sns\n'), ((11172, 11226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/Sentiment_during_onlinestorm.png"""'], {}), "('images/Sentiment_during_onlinestorm.png')\n", (11183, 11226), True, 'import matplotlib.pyplot as plt\n'), ((11230, 11240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11238, 11240), True, 'import matplotlib.pyplot as plt\n'), ((13785, 13806), 'nltk.trigrams', 'nltk.trigrams', (['tokens'], {}), '(tokens)\n', (13798, 13806), False, 'import nltk\n'), ((13866, 13881), 'collections.Counter', 'Counter', (['tokens'], {}), '(tokens)\n', (13873, 13881), False, 'from collections import Counter\n'), ((15019, 15040), 'nltk.trigrams', 'nltk.trigrams', (['tokens'], {}), '(tokens)\n', (15032, 15040), False, 'import nltk\n'), ((15099, 15114), 'collections.Counter', 'Counter', (['tokens'], {}), '(tokens)\n', (15106, 15114), False, 'from collections import Counter\n'), ((19872, 19904), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (19885, 19904), True, 'import matplotlib.pyplot as plt\n'), ((19954, 19999), 'pandas.read_csv', 'pd.read_csv', (['"""input/NCR-lexicon.csv"""'], {'sep': '""";"""'}), "('input/NCR-lexicon.csv', sep=';')\n", (19965, 19999), True, 'import pandas as pd\n'), ((24875, 24909), 'pandas.to_datetime', 'pd.to_datetime', (["df_emo['datetime']"], {}), "(df_emo['datetime'])\n", (24889, 24909), True, 'import pandas as pd\n'), ((25180, 25225), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 25)', 'frameon': '(False)'}), '(figsize=(15, 25), frameon=False)\n', (25192, 25225), True, 'import matplotlib.pyplot as plt\n'), ((25227, 25241), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (25234, 25241), True, 'import matplotlib.pyplot as plt\n'), ((25242, 25257), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (25250, 25257), True, 'import matplotlib.pyplot as plt\n'), ((25258, 25289), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1.6)'}), '(hspace=1.6)\n', (25277, 25289), True, 'import matplotlib.pyplot as plt\n'), ((26212, 26230), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26228, 26230), True, 'import matplotlib.pyplot as plt\n'), ((26231, 26280), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/Top10_words_per_emotion.png"""'], {}), "('images/Top10_words_per_emotion.png')\n", (26242, 26280), True, 'import matplotlib.pyplot as plt\n'), ((27996, 28031), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (28008, 28031), True, 'import matplotlib.pyplot as plt\n'), ((28274, 28309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Emotions"""'], {'fontsize': '(16)'}), "('Emotions', fontsize=16)\n", (28284, 28309), True, 'import matplotlib.pyplot as plt\n'), ((28311, 28373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of emotion-related words"""'], {'fontsize': '(16)'}), "('Percentage of emotion-related words', fontsize=16)\n", (28321, 28373), True, 'import matplotlib.pyplot as plt\n'), ((28375, 28411), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'fontsize': '(14)'}), '(rotation=45, fontsize=14)\n', (28385, 28411), True, 'import matplotlib.pyplot as plt\n'), ((28411, 28429), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28427, 28429), True, 'import matplotlib.pyplot as plt\n'), ((28430, 28505), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/Percentage_emotions_before_and_during_onlinestorm.png"""'], {}), "('images/Percentage_emotions_before_and_during_onlinestorm.png')\n", (28441, 28505), True, 'import matplotlib.pyplot as plt\n'), ((28654, 28744), 'pandas.concat', 'pd.concat', (["[df_emo[['datetime', 'text', 'edited', 'onlinestorm']], df_zscore]"], {'axis': '(1)'}), "([df_emo[['datetime', 'text', 'edited', 'onlinestorm']], df_zscore\n ], axis=1)\n", (28663, 28744), True, 'import pandas as pd\n'), ((28966, 29011), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 25)', 'frameon': '(False)'}), '(figsize=(15, 25), frameon=False)\n', (28978, 29011), True, 'import matplotlib.pyplot as plt\n'), ((29013, 29027), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (29020, 29027), True, 'import matplotlib.pyplot as plt\n'), ((29028, 29043), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (29036, 29043), True, 'import matplotlib.pyplot as plt\n'), ((29044, 29075), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1.6)'}), '(hspace=1.6)\n', (29063, 29075), True, 'import matplotlib.pyplot as plt\n'), ((29943, 29961), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29959, 29961), True, 'import matplotlib.pyplot as plt\n'), ((29962, 30015), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/Emotions_during_onlinestorm.png"""'], {}), "('images/Emotions_during_onlinestorm.png')\n", (29973, 30015), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2796), 're.findall', 're.findall', (['reg_expression', 'tweet'], {}), '(reg_expression, tweet)\n', (2773, 2796), False, 'import re\n'), ((3641, 3729), 'seaborn.FacetGrid', 'sns.FacetGrid', (['df'], {'col': '"""onlinestorm"""', 'height': '(5)', 'hue': '"""onlinestorm"""', 'palette': '"""RdYlGn"""'}), "(df, col='onlinestorm', height=5, hue='onlinestorm', palette=\n 'RdYlGn')\n", (3654, 3729), True, 'import seaborn as sns\n'), ((3800, 3859), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/Descriptive_stats_for_' + key + '.png')"], {}), "('images/Descriptive_stats_for_' + key + '.png')\n", (3811, 3859), True, 'import matplotlib.pyplot as plt\n'), ((8577, 8619), 'pandas.DataFrame', 'pd.DataFrame', (['df1', 'df1.index', "info['cols']"], {}), "(df1, df1.index, info['cols'])\n", (8589, 8619), True, 'import pandas as pd\n'), ((8625, 8653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (8635, 8653), True, 'import matplotlib.pyplot as plt\n'), ((8663, 8716), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'plot_df', 'linewidth': '(3)', 'dashes': '(False)'}), '(data=plot_df, linewidth=3, dashes=False)\n', (8675, 8716), True, 'import seaborn as sns\n'), ((8725, 8760), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(15)'}), "(loc='best', fontsize=15)\n", (8735, 8760), True, 'import matplotlib.pyplot as plt\n'), ((8765, 8802), 'matplotlib.pyplot.title', 'plt.title', (["info['title']"], {'fontsize': '(20)'}), "(info['title'], fontsize=20)\n", (8774, 8802), True, 'import matplotlib.pyplot as plt\n'), ((8807, 8844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["info['xlab']"], {'fontsize': '(15)'}), "(info['xlab'], fontsize=15)\n", (8817, 8844), True, 'import matplotlib.pyplot as plt\n'), ((8849, 8886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["info['ylab']"], {'fontsize': '(15)'}), "(info['ylab'], fontsize=15)\n", (8859, 8886), True, 'import matplotlib.pyplot as plt\n'), ((8891, 8909), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8907, 8909), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8952), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/' + info['fname'])"], {}), "('images/' + info['fname'])\n", (8925, 8952), True, 'import matplotlib.pyplot as plt\n'), ((9107, 9170), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.005, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.005, 1), loc=2, borderaxespad=0.0)\n', (9117, 9170), True, 'import matplotlib.pyplot as plt\n'), ((9174, 9232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Online storm"""'], {'fontweight': '"""bold"""', 'fontsize': '(18)'}), "('Online storm', fontweight='bold', fontsize=18)\n", (9184, 9232), True, 'import matplotlib.pyplot as plt\n'), ((9237, 9272), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)', 'fontsize': '(14)'}), '(rotation=0, fontsize=14)\n', (9247, 9272), True, 'import matplotlib.pyplot as plt\n'), ((9298, 9362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction of Tweets"""'], {'fontweight': '"""bold"""', 'fontsize': '(18)'}), "('Fraction of Tweets', fontweight='bold', fontsize=18)\n", (9308, 9362), True, 'import matplotlib.pyplot as plt\n'), ((9427, 9445), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9443, 9445), True, 'import matplotlib.pyplot as plt\n'), ((9450, 9489), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/' + fname + '.png')"], {}), "('images/' + fname + '.png')\n", (9461, 9489), True, 'import matplotlib.pyplot as plt\n'), ((9559, 9588), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (9571, 9588), True, 'import matplotlib.pyplot as plt\n'), ((9593, 9634), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1)'}), "('notebook', font_scale=1)\n", (9608, 9634), True, 'import seaborn as sns\n'), ((9648, 9725), 'seaborn.barplot', 'sns.barplot', ([], {'x': "info['x']", 'y': "info['y']", 'data': "info['data']", 'palette': "info['pal']"}), "(x=info['x'], y=info['y'], data=info['data'], palette=info['pal'])\n", (9659, 9725), True, 'import seaborn as sns\n'), ((9798, 9835), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["info['ylab']"], {'fontsize': '(16)'}), "(info['ylab'], fontsize=16)\n", (9808, 9835), True, 'import matplotlib.pyplot as plt\n'), ((9840, 9877), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["info['xlab']"], {'fontsize': '(16)'}), "(info['xlab'], fontsize=16)\n", (9850, 9877), True, 'import matplotlib.pyplot as plt\n'), ((9882, 9929), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': "info['angle']", 'fontsize': '(14)'}), "(rotation=info['angle'], fontsize=14)\n", (9892, 9929), True, 'import matplotlib.pyplot as plt\n'), ((9933, 9956), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (9943, 9956), True, 'import matplotlib.pyplot as plt\n'), ((9961, 9979), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9977, 9979), True, 'import matplotlib.pyplot as plt\n'), ((9984, 10022), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/' + info['fname'])"], {}), "('images/' + info['fname'])\n", (9995, 10022), True, 'import matplotlib.pyplot as plt\n'), ((12453, 12465), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12463, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12480, 12509), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (12492, 12509), True, 'import matplotlib.pyplot as plt\n'), ((12514, 12543), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (12523, 12543), True, 'import matplotlib.pyplot as plt\n'), ((12548, 12595), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (12558, 12595), True, 'import matplotlib.pyplot as plt\n'), ((12600, 12615), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12608, 12615), True, 'import matplotlib.pyplot as plt\n'), ((12620, 12659), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/' + fname + '.png')"], {}), "('images/' + fname + '.png')\n", (12631, 12659), True, 'import matplotlib.pyplot as plt\n'), ((12667, 12677), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12675, 12677), True, 'import matplotlib.pyplot as plt\n'), ((12976, 12993), 'collections.Counter', 'Counter', (['trigrams'], {}), '(trigrams)\n', (12983, 12993), False, 'from collections import Counter\n'), ((13352, 13398), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Grams', 'Count']"}), "(data, columns=['Grams', 'Count'])\n", (13364, 13398), True, 'import pandas as pd\n'), ((18146, 18192), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'use_idf': '(True)', 'smooth_idf': '(True)'}), '(use_idf=True, smooth_idf=True)\n', (18161, 18192), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((18260, 18308), 'sklearn.decomposition.LatentDirichletAllocation', 'LatentDirichletAllocation', ([], {'n_components': 'n_topics'}), '(n_components=n_topics)\n', (18285, 18308), False, 'from sklearn.decomposition import LatentDirichletAllocation\n'), ((18788, 18835), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Topics', 'Count']"}), "(data, columns=['Topics', 'Count'])\n", (18800, 18835), True, 'import pandas as pd\n'), ((20782, 20816), 'pandas.DataFrame', 'pd.DataFrame', (['df1', 'df1.index', 'cols'], {}), '(df1, df1.index, cols)\n', (20794, 20816), True, 'import pandas as pd\n'), ((20822, 20850), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (20832, 20850), True, 'import matplotlib.pyplot as plt\n'), ((20860, 20913), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'plot_df', 'linewidth': '(3)', 'dashes': '(False)'}), '(data=plot_df, linewidth=3, dashes=False)\n', (20872, 20913), True, 'import seaborn as sns\n'), ((20921, 20956), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(15)'}), "(loc='best', fontsize=15)\n", (20931, 20956), True, 'import matplotlib.pyplot as plt\n'), ((20961, 20990), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (20970, 20990), True, 'import matplotlib.pyplot as plt\n'), ((20995, 21034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (hours)"""'], {'fontsize': '(15)'}), "('Time (hours)', fontsize=15)\n", (21005, 21034), True, 'import matplotlib.pyplot as plt\n'), ((21039, 21083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z-scored Emotions"""'], {'fontsize': '(15)'}), "('Z-scored Emotions', fontsize=15)\n", (21049, 21083), True, 'import matplotlib.pyplot as plt\n'), ((21088, 21127), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/' + fname + '.png')"], {}), "('images/' + fname + '.png')\n", (21099, 21127), True, 'import matplotlib.pyplot as plt\n'), ((21764, 21813), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'df.index', 'columns': 'emotions'}), '(0, index=df.index, columns=emotions)\n', (21776, 21813), True, 'import pandas as pd\n'), ((21890, 21916), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (21905, 21916), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((22900, 22943), 'pandas.concat', 'pd.concat', (['[df_tweets, df_emotions]'], {'axis': '(1)'}), '([df_tweets, df_emotions], axis=1)\n', (22909, 22943), True, 'import pandas as pd\n'), ((25675, 25701), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (25682, 25701), True, 'import seaborn as sns\n'), ((25706, 25750), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.25)'}), "('notebook', font_scale=1.25)\n", (25721, 25750), True, 'import seaborn as sns\n'), ((25835, 25848), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (25846, 25848), True, 'import seaborn as sns\n'), ((25858, 25923), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""labels"""', 'y': '"""values"""', 'data': 'info', 'palette': '"""cividis"""'}), "(x='labels', y='values', data=info, palette='cividis')\n", (25869, 25923), True, 'import seaborn as sns\n'), ((25930, 25966), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Top words"""'], {'fontsize': '(12)'}), "('Top words', fontsize=12)\n", (25940, 25966), True, 'import matplotlib.pyplot as plt\n'), ((26050, 26086), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'fontsize': '(14)'}), '(rotation=45, fontsize=14)\n', (26060, 26086), True, 'import matplotlib.pyplot as plt\n'), ((29396, 29429), 'pandas.DataFrame', 'pd.DataFrame', (['df1', 'df1.index', 'emo'], {}), '(df1, df1.index, emo)\n', (29408, 29429), True, 'import pandas as pd\n'), ((29439, 29465), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (29446, 29465), True, 'import seaborn as sns\n'), ((29470, 29514), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.25)'}), "('notebook', font_scale=1.25)\n", (29485, 29514), True, 'import seaborn as sns\n'), ((29599, 29612), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (29610, 29612), True, 'import seaborn as sns\n'), ((29622, 29675), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'plot_df', 'linewidth': '(3)', 'dashes': '(False)'}), '(data=plot_df, linewidth=3, dashes=False)\n', (29634, 29675), True, 'import seaborn as sns\n'), ((29683, 29726), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time by the hour"""'], {'fontsize': '(12)'}), "('Time by the hour', fontsize=12)\n", (29693, 29726), True, 'import matplotlib.pyplot as plt\n'), ((3226, 3253), 'pandas.DataFrame', 'pd.DataFrame', (['content_count'], {}), '(content_count)\n', (3238, 3253), True, 'import pandas as pd\n'), ((4457, 4483), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4472, 4483), False, 'from nltk.corpus import stopwords\n'), ((5167, 5187), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['tweet'], {}), '(tweet)\n', (5180, 5187), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((5867, 5895), 're.search', 're.search', (['"""[a-zA-Z]"""', 'token'], {}), "('[a-zA-Z]', token)\n", (5876, 5895), False, 'import re\n'), ((11017, 11029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11027, 11029), True, 'import matplotlib.pyplot as plt\n'), ((12172, 12207), 'PIL.Image.open', 'Image.open', (['"""images/cloud_mask.png"""'], {}), "('images/cloud_mask.png')\n", (12182, 12207), False, 'from PIL import Image\n'), ((21348, 21364), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (21359, 21364), False, 'from collections import defaultdict\n'), ((21624, 21640), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (21635, 21640), False, 'from collections import defaultdict\n'), ((22038, 22074), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['df_tweets.loc[i][col]'], {}), '(df_tweets.loc[i][col])\n', (22051, 22074), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((24281, 24301), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['tweet'], {}), '(tweet)\n', (24294, 24301), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((29232, 29263), 'pandas.PeriodIndex', 'pd.PeriodIndex', (["df1['datetime']"], {}), "(df1['datetime'])\n", (29246, 29263), True, 'import pandas as pd\n'), ((1539, 1579), 'pandas.to_datetime', 'pd.to_datetime', (["tweets.at[i, 'datetime']"], {}), "(tweets.at[i, 'datetime'])\n", (1553, 1579), True, 'import pandas as pd\n'), ((8516, 8547), 'pandas.PeriodIndex', 'pd.PeriodIndex', (["df1['datetime']"], {}), "(df1['datetime'])\n", (8530, 8547), True, 'import pandas as pd\n'), ((12225, 12340), 'wordcloud.WordCloud', 'WordCloud', ([], {'max_font_size': '(100)', 'max_words': '(50)', 'width': '(2500)', 'height': '(1750)', 'mask': 'cloud_mask', 'background_color': '"""white"""'}), "(max_font_size=100, max_words=50, width=2500, height=1750, mask=\n cloud_mask, background_color='white')\n", (12234, 12340), False, 'from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n'), ((16581, 16594), 'collections.Counter', 'Counter', (['keys'], {}), '(keys)\n', (16588, 16594), False, 'from collections import Counter\n'), ((17495, 17539), 'numpy.zeros', 'np.zeros', (['(1, document_term_matrix.shape[1])'], {}), '((1, document_term_matrix.shape[1]))\n', (17503, 17539), True, 'import numpy as np\n'), ((20721, 20752), 'pandas.PeriodIndex', 'pd.PeriodIndex', (["df1['datetime']"], {}), "(df1['datetime'])\n", (20735, 20752), True, 'import pandas as pd\n'), ((23854, 23877), 'numpy.argsort', 'np.argsort', (['words[:, 1]'], {}), '(words[:, 1])\n', (23864, 23877), True, 'import numpy as np\n'), ((1595, 1612), 'datetime.date', 'date', (['(2020)', '(3)', '(15)'], {}), '(2020, 3, 15)\n', (1599, 1612), False, 'from datetime import date\n'), ((24460, 24502), 'termcolor.colored', 'colored', (['w'], {'color': 'color', 'on_color': 'on_color'}), '(w, color=color, on_color=on_color)\n', (24467, 24502), False, 'from termcolor import colored, cprint\n'), ((1171, 1191), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""Y"""'}), "(freq='Y')\n", (1181, 1191), True, 'import pandas as pd\n'), ((17263, 17290), 'numpy.argsort', 'np.argsort', (['temp_vector_sum'], {}), '(temp_vector_sum)\n', (17273, 17290), True, 'import numpy as np\n'), ((2199, 2219), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""H"""'}), "(freq='H')\n", (2209, 2219), True, 'import pandas as pd\n')] |
import numpy as np
def hmc(U, grad_U, eps, L, current_q, p_sampler):
q = current_q
p = p_sampler()
current_p = p
p -= eps * grad_U(q)/2.0
for i in range(L):
q = q + eps *p
if i != L-1:
p -= eps * grad_U(q)
p -= eps * grad_U(q)/2.0
p = -p
current_U = U(current_q)
current_K = current_p.T.dot(current_p)*0.5
proposed_U = U(q)
proposed_K = p.T.dot(p)*0.5
prob = np.exp(current_U - proposed_U + current_K - proposed_K)
if np.random.uniform(size=1)< prob:
return q, prob, 1
else:
return current_q, prob, 0
| [
"numpy.random.uniform",
"numpy.exp"
] | [((454, 509), 'numpy.exp', 'np.exp', (['(current_U - proposed_U + current_K - proposed_K)'], {}), '(current_U - proposed_U + current_K - proposed_K)\n', (460, 509), True, 'import numpy as np\n'), ((517, 542), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1)'}), '(size=1)\n', (534, 542), True, 'import numpy as np\n')] |
import vplanet
import vplot
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pathlib
import sys
import glob
import subprocess
from tqdm import tqdm
from scipy.interpolate import interp2d
import os
# Path hacks
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
from get_args import get_args
MSUN = 1.988416e30
LSUN = 3.846e26
# If necessary, build directories and run them
if not ((path / "data").exists()):
sys.stdout.write("Buliding directories.")
sys.stdout.flush()
subprocess.run(["vspace", "vspace.in"], cwd=str(path))
sys.stdout.write("\nRunning trials.")
sys.stdout.flush()
subprocess.run(["multiplanet", "vspace.in"], cwd=str(path))
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("Making plot.")
sys.stdout.flush()
dirs = []
for file in os.listdir("data/"):
d = os.path.join("data/", file)
if os.path.isdir(d):
dirs.append(d)
sorted_dirs = sorted(dirs)
nmass = len(sorted_dirs)
mass = np.zeros(nmass)
recv = np.zeros(nmass)
runaway = np.zeros(nmass)
maxg = np.zeros(nmass)
earlym = np.zeros(nmass)
lum = np.zeros(nmass)
#dirs_list = enumerate(dirs.sort())
#for dir in dirs.sort():
# print(dir)
#exit()
# Run vplanet
i=0
for dir in sorted_dirs:
# Run vplanet
output = vplanet.get_output(path / dir, units=False)
mass[i] = output.log.initial.star.Mass / MSUN
recv[i] = output.log.initial.star.HZLimRecVenus
runaway[i] = output.log.initial.star.HZLimRunaway
maxg[i] = output.log.initial.star.HZLimMaxGreenhouse
earlym[i] = output.log.initial.star.HZLimEarlyMars
lum[i] = output.log.initial.star.Luminosity / LSUN
#print(dir,mass[i],lum[i],recv[i],runaway[i],maxg[i],earlym[i])
i = i+1
fig = plt.figure(figsize=(6.5, 5))
# Arrays ecc,obl,heat now contain the data to make the figure
plt.xlabel("Semi-major Axis (au)", fontsize=20)
plt.ylabel("Stellar Mass (M$_\odot$)", fontsize=20)
plt.tick_params(axis="both", labelsize=20)
plt.xlim(0.01, 2.5)
plt.ylim(0.08,1.1)
plt.plot(recv,mass,color=vplot.colors.orange)
plt.plot(runaway,mass,color=vplot.colors.dark_blue)
plt.plot(maxg,mass,color=vplot.colors.dark_blue)
plt.plot(earlym,mass,color=vplot.colors.pale_blue)
fbk = {'lw':0.0, 'edgecolor':None}
plt.fill_betweenx(mass,runaway,maxg,facecolor=vplot.colors.dark_blue,**fbk)
plt.fill_betweenx(mass,recv,runaway,facecolor=vplot.colors.orange,**fbk)
plt.fill_betweenx(mass,maxg,earlym,facecolor=vplot.colors.pale_blue,**fbk)
plt.fill([1.5, 1.5,1.7,1.7],[0.6,0.65,0.65,0.6],vplot.colors.orange)
plt.annotate("Too Hot?",[1.73,0.601],color=vplot.colors.orange,fontsize=20)
plt.fill([1.5, 1.5,1.7,1.7],[0.5,0.55,0.55,0.5],vplot.colors.dark_blue)
plt.annotate("Hab. Zone",[1.73,0.501],color=vplot.colors.dark_blue,fontsize=20)
plt.fill([1.5, 1.5,1.7,1.7],[0.4,0.45,0.45,0.4],vplot.colors.pale_blue)
plt.annotate("Too Cold?",[1.73,0.401],color=vplot.colors.pale_blue,fontsize=20)
ext = get_args().ext
fig.savefig(path / f"HabitableZone.{ext}", bbox_inches="tight", dpi=600)
print("") | [
"sys.stdout.write",
"matplotlib.pyplot.figure",
"pathlib.Path",
"sys.stdout.flush",
"matplotlib.pyplot.tick_params",
"os.path.join",
"get_args.get_args",
"vplanet.get_output",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.fill_betweenx",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.p... | [((795, 827), 'sys.stdout.write', 'sys.stdout.write', (['"""Making plot."""'], {}), "('Making plot.')\n", (811, 827), False, 'import sys\n'), ((828, 846), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (844, 846), False, 'import sys\n'), ((869, 888), 'os.listdir', 'os.listdir', (['"""data/"""'], {}), "('data/')\n", (879, 888), False, 'import os\n'), ((1035, 1050), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1043, 1050), True, 'import numpy as np\n'), ((1058, 1073), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1066, 1073), True, 'import numpy as np\n'), ((1084, 1099), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1092, 1099), True, 'import numpy as np\n'), ((1107, 1122), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1115, 1122), True, 'import numpy as np\n'), ((1132, 1147), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1140, 1147), True, 'import numpy as np\n'), ((1154, 1169), 'numpy.zeros', 'np.zeros', (['nmass'], {}), '(nmass)\n', (1162, 1169), True, 'import numpy as np\n'), ((1785, 1813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.5, 5)'}), '(figsize=(6.5, 5))\n', (1795, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Semi-major Axis (au)"""'], {'fontsize': '(20)'}), "('Semi-major Axis (au)', fontsize=20)\n", (1887, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1977), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stellar Mass (M$_\\\\odot$)"""'], {'fontsize': '(20)'}), "('Stellar Mass (M$_\\\\odot$)', fontsize=20)\n", (1935, 1977), True, 'import matplotlib.pyplot as plt\n'), ((1977, 2019), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': '(20)'}), "(axis='both', labelsize=20)\n", (1992, 2019), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2040), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.01)', '(2.5)'], {}), '(0.01, 2.5)\n', (2029, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2060), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.08)', '(1.1)'], {}), '(0.08, 1.1)\n', (2049, 2060), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2108), 'matplotlib.pyplot.plot', 'plt.plot', (['recv', 'mass'], {'color': 'vplot.colors.orange'}), '(recv, mass, color=vplot.colors.orange)\n', (2069, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2160), 'matplotlib.pyplot.plot', 'plt.plot', (['runaway', 'mass'], {'color': 'vplot.colors.dark_blue'}), '(runaway, mass, color=vplot.colors.dark_blue)\n', (2115, 2160), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2209), 'matplotlib.pyplot.plot', 'plt.plot', (['maxg', 'mass'], {'color': 'vplot.colors.dark_blue'}), '(maxg, mass, color=vplot.colors.dark_blue)\n', (2167, 2209), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2260), 'matplotlib.pyplot.plot', 'plt.plot', (['earlym', 'mass'], {'color': 'vplot.colors.pale_blue'}), '(earlym, mass, color=vplot.colors.pale_blue)\n', (2216, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2374), 'matplotlib.pyplot.fill_betweenx', 'plt.fill_betweenx', (['mass', 'runaway', 'maxg'], {'facecolor': 'vplot.colors.dark_blue'}), '(mass, runaway, maxg, facecolor=vplot.colors.dark_blue, **fbk)\n', (2312, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2371, 2447), 'matplotlib.pyplot.fill_betweenx', 'plt.fill_betweenx', (['mass', 'recv', 'runaway'], {'facecolor': 'vplot.colors.orange'}), '(mass, recv, runaway, facecolor=vplot.colors.orange, **fbk)\n', (2388, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2522), 'matplotlib.pyplot.fill_betweenx', 'plt.fill_betweenx', (['mass', 'maxg', 'earlym'], {'facecolor': 'vplot.colors.pale_blue'}), '(mass, maxg, earlym, facecolor=vplot.colors.pale_blue, **fbk)\n', (2461, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2595), 'matplotlib.pyplot.fill', 'plt.fill', (['[1.5, 1.5, 1.7, 1.7]', '[0.6, 0.65, 0.65, 0.6]', 'vplot.colors.orange'], {}), '([1.5, 1.5, 1.7, 1.7], [0.6, 0.65, 0.65, 0.6], vplot.colors.orange)\n', (2528, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2668), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Too Hot?"""', '[1.73, 0.601]'], {'color': 'vplot.colors.orange', 'fontsize': '(20)'}), "('Too Hot?', [1.73, 0.601], color=vplot.colors.orange, fontsize=20)\n", (2601, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2744), 'matplotlib.pyplot.fill', 'plt.fill', (['[1.5, 1.5, 1.7, 1.7]', '[0.5, 0.55, 0.55, 0.5]', 'vplot.colors.dark_blue'], {}), '([1.5, 1.5, 1.7, 1.7], [0.5, 0.55, 0.55, 0.5], vplot.colors.dark_blue)\n', (2674, 2744), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2825), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Hab. Zone"""', '[1.73, 0.501]'], {'color': 'vplot.colors.dark_blue', 'fontsize': '(20)'}), "('Hab. Zone', [1.73, 0.501], color=vplot.colors.dark_blue,\n fontsize=20)\n", (2750, 2825), True, 'import matplotlib.pyplot as plt\n'), ((2819, 2897), 'matplotlib.pyplot.fill', 'plt.fill', (['[1.5, 1.5, 1.7, 1.7]', '[0.4, 0.45, 0.45, 0.4]', 'vplot.colors.pale_blue'], {}), '([1.5, 1.5, 1.7, 1.7], [0.4, 0.45, 0.45, 0.4], vplot.colors.pale_blue)\n', (2827, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2978), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Too Cold?"""', '[1.73, 0.401]'], {'color': 'vplot.colors.pale_blue', 'fontsize': '(20)'}), "('Too Cold?', [1.73, 0.401], color=vplot.colors.pale_blue,\n fontsize=20)\n", (2903, 2978), True, 'import matplotlib.pyplot as plt\n'), ((491, 532), 'sys.stdout.write', 'sys.stdout.write', (['"""Buliding directories."""'], {}), "('Buliding directories.')\n", (507, 532), False, 'import sys\n'), ((537, 555), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (553, 555), False, 'import sys\n'), ((619, 656), 'sys.stdout.write', 'sys.stdout.write', (['"""\nRunning trials."""'], {}), "('\\nRunning trials.')\n", (635, 656), False, 'import sys\n'), ((661, 679), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (677, 679), False, 'import sys\n'), ((748, 770), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (764, 770), False, 'import sys\n'), ((775, 793), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (791, 793), False, 'import sys\n'), ((898, 925), 'os.path.join', 'os.path.join', (['"""data/"""', 'file'], {}), "('data/', file)\n", (910, 925), False, 'import os\n'), ((933, 949), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (946, 949), False, 'import os\n'), ((1331, 1374), 'vplanet.get_output', 'vplanet.get_output', (['(path / dir)'], {'units': '(False)'}), '(path / dir, units=False)\n', (1349, 1374), False, 'import vplanet\n'), ((2978, 2988), 'get_args.get_args', 'get_args', ([], {}), '()\n', (2986, 2988), False, 'from get_args import get_args\n'), ((252, 274), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (264, 274), False, 'import pathlib\n')] |
# Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
import numpy as np
np.set_printoptions(linewidth=180)
rg = nk.utils.RandomEngine(seed=1234)
# 1D Lattice
L = 9
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=False)
# Hilbert space of spins on the graph
hi = nk.hilbert.Spin(s=0.5, graph=g)
# Defining the Ising hamiltonian (with sign problem here)
# Using local operators
sx = [[0, 1], [1, 0]]
sy = [[0, -1j], [1j, 0]]
sz = [[1, 0], [0, -1]]
s0 = [[0, 0], [0, 0]]
sigmam = [[0, 0], [1, 0]]
ha = nk.operator.LocalOperator(hi)
j_ops = []
for i in range(L):
ha += nk.operator.LocalOperator(hi, sx, [i])
ha += nk.operator.LocalOperator(hi, np.kron(sz, sz), [i, (i + 1) % L])
j_ops.append(nk.operator.LocalOperator(hi, sigmam, [i]))
# Create the lindbladian with no jump operators
lind = nk.operator.LocalLiouvillian(ha)
# add the jump operators
for j_op in j_ops:
lind.add_jump_op(j_op)
rho = nk.exact.steady_state(lind, method='iterative', sparse=True, maxiter=1000, tol=1e-5)
| [
"numpy.set_printoptions",
"netket.operator.LocalOperator",
"netket.exact.steady_state",
"netket.graph.Hypercube",
"netket.hilbert.Spin",
"numpy.kron",
"netket.utils.RandomEngine",
"netket.operator.LocalLiouvillian"
] | [((651, 685), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(180)'}), '(linewidth=180)\n', (670, 685), True, 'import numpy as np\n'), ((691, 723), 'netket.utils.RandomEngine', 'nk.utils.RandomEngine', ([], {'seed': '(1234)'}), '(seed=1234)\n', (712, 723), True, 'import netket as nk\n'), ((748, 796), 'netket.graph.Hypercube', 'nk.graph.Hypercube', ([], {'length': 'L', 'n_dim': '(1)', 'pbc': '(False)'}), '(length=L, n_dim=1, pbc=False)\n', (766, 796), True, 'import netket as nk\n'), ((841, 872), 'netket.hilbert.Spin', 'nk.hilbert.Spin', ([], {'s': '(0.5)', 'graph': 'g'}), '(s=0.5, graph=g)\n', (856, 872), True, 'import netket as nk\n'), ((1082, 1111), 'netket.operator.LocalOperator', 'nk.operator.LocalOperator', (['hi'], {}), '(hi)\n', (1107, 1111), True, 'import netket as nk\n'), ((1385, 1417), 'netket.operator.LocalLiouvillian', 'nk.operator.LocalLiouvillian', (['ha'], {}), '(ha)\n', (1413, 1417), True, 'import netket as nk\n'), ((1498, 1587), 'netket.exact.steady_state', 'nk.exact.steady_state', (['lind'], {'method': '"""iterative"""', 'sparse': '(True)', 'maxiter': '(1000)', 'tol': '(1e-05)'}), "(lind, method='iterative', sparse=True, maxiter=1000,\n tol=1e-05)\n", (1519, 1587), True, 'import netket as nk\n'), ((1153, 1191), 'netket.operator.LocalOperator', 'nk.operator.LocalOperator', (['hi', 'sx', '[i]'], {}), '(hi, sx, [i])\n', (1178, 1191), True, 'import netket as nk\n'), ((1232, 1247), 'numpy.kron', 'np.kron', (['sz', 'sz'], {}), '(sz, sz)\n', (1239, 1247), True, 'import numpy as np\n'), ((1284, 1326), 'netket.operator.LocalOperator', 'nk.operator.LocalOperator', (['hi', 'sigmam', '[i]'], {}), '(hi, sigmam, [i])\n', (1309, 1326), True, 'import netket as nk\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 22:21:32 2017
@author: yxl
"""
import wx
from imagepy.core.engine import Tool
import numpy as np
import pandas as pd
from numpy.linalg import norm
from .setting import Setting
from imagepy import IPy
class Distance:
"""Define the distance class"""
dtype = 'distance'
def __init__(self, body=None, unit=None):
self.body = body if body!=None else []
self.buf, self.unit = [], unit
def addline(self):
line = self.buf
if len(line)!=2 or line[0] !=line[-1]:
self.body.append(line)
self.buf = []
def snap(self, x, y, lim):
minl, idx = 1000, None
for i in self.body:
for j in i:
d = (j[0]-x)**2+(j[1]-y)**2
if d < minl:minl,idx = d,(i, i.index(j))
return idx if minl**0.5<lim else None
def pick(self, x, y, lim):
return self.snap(x, y, lim)
def draged(self, ox, oy, nx, ny, i):
i[0][i[1]] = (nx, ny)
def draw(self, dc, f, **key):
dc.SetPen(wx.Pen(Setting['color'], width=1, style=wx.SOLID))
dc.SetTextForeground(Setting['tcolor'])
font = wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
dc.SetFont(font)
dc.DrawLines([f(*i) for i in self.buf])
for i in self.buf:dc.DrawCircle(f(*i),2)
for line in self.body:
dc.DrawLines([f(*i) for i in line])
for i in line:dc.DrawCircle(f(*i),2)
pts = np.array(line)
mid = (pts[:-1]+pts[1:])/2
dis = norm((pts[:-1]-pts[1:]), axis=1)
unit = 1 if self.unit is None else self.unit[0]
for i,j in zip(dis, mid):
dc.DrawText('%.2f'%(i*unit), f(*j))
def report(self, title):
rst = []
for line in self.body:
pts = np.array(line)
dis = norm((pts[:-1]-pts[1:]), axis=1)
dis *= 1 if self.unit is None else self.unit[0]
rst.append(list(dis.round(2)))
lens = [len(i) for i in rst]
maxlen = max(lens)
fill = [[0]*(maxlen-i) for i in lens]
rst = [i+j for i,j in zip(rst, fill)]
titles = ['L{}'.format(i+1) for i in range(maxlen)]
IPy.show_table(pd.DataFrame(rst, columns=titles), title)
class Plugin(Tool):
"""Define the diatance class plugin with the event callback functions"""
title = 'Distance'
def __init__(self):
self.curobj = None
self.doing = False
self.odx,self.ody = 0, 0
def mouse_down(self, ips, x, y, btn, **key):
if key['ctrl'] and key['alt']:
if isinstance(ips.mark, Distance):
ips.mark.report(ips.title)
return
lim = 5.0/key['canvas'].get_scale()
if btn==1:
if not self.doing:
if isinstance(ips.mark, Distance):
self.curobj = ips.mark.pick(x, y, lim)
if self.curobj!=None:return
if not isinstance(ips.mark, Distance):
ips.mark = Distance(unit=ips.unit)
self.doing = True
elif key['shift']:
self.doing = True
else: ips.mark = None
if self.doing:
ips.mark.buf.append((x,y))
self.curobj = (ips.mark.buf, -1)
self.odx, self.ody = x,y
elif btn==3:
if self.doing:
ips.mark.buf.append((x,y))
self.doing = False
ips.mark.addline()
ips.update()
def mouse_up(self, ips, x, y, btn, **key):
self.curobj = None
def mouse_move(self, ips, x, y, btn, **key):
if not isinstance(ips.mark, Distance):return
lim = 5.0/key['canvas'].get_scale()
if btn==None:
self.cursor = wx.CURSOR_CROSS
if ips.mark.snap(x, y, lim)!=None:
self.cursor = wx.CURSOR_HAND
elif btn==1:
ips.mark.draged(self.odx, self.ody, x, y, self.curobj)
ips.update()
self.odx, self.ody = x, y
def mouse_wheel(self, ips, x, y, d, **key):
pass | [
"pandas.DataFrame",
"numpy.array",
"wx.Pen",
"numpy.linalg.norm",
"wx.Font"
] | [((1209, 1298), 'wx.Font', 'wx.Font', (['(10)', 'wx.FONTFAMILY_DEFAULT', 'wx.FONTSTYLE_NORMAL', 'wx.FONTWEIGHT_NORMAL', '(False)'], {}), '(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.\n FONTWEIGHT_NORMAL, False)\n', (1216, 1298), False, 'import wx\n'), ((1095, 1144), 'wx.Pen', 'wx.Pen', (["Setting['color']"], {'width': '(1)', 'style': 'wx.SOLID'}), "(Setting['color'], width=1, style=wx.SOLID)\n", (1101, 1144), False, 'import wx\n'), ((1562, 1576), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (1570, 1576), True, 'import numpy as np\n'), ((1635, 1667), 'numpy.linalg.norm', 'norm', (['(pts[:-1] - pts[1:])'], {'axis': '(1)'}), '(pts[:-1] - pts[1:], axis=1)\n', (1639, 1667), False, 'from numpy.linalg import norm\n'), ((1914, 1928), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (1922, 1928), True, 'import numpy as np\n'), ((1947, 1979), 'numpy.linalg.norm', 'norm', (['(pts[:-1] - pts[1:])'], {'axis': '(1)'}), '(pts[:-1] - pts[1:], axis=1)\n', (1951, 1979), False, 'from numpy.linalg import norm\n'), ((2322, 2355), 'pandas.DataFrame', 'pd.DataFrame', (['rst'], {'columns': 'titles'}), '(rst, columns=titles)\n', (2334, 2355), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import math
import sys
import numpy as np
import pandas as pd
import sample_functions
from sklearn import metrics, model_selection, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# 下の y_name を 'pIC50_class', 'pIGC50_class' のいずれかにしてください。
# descriptors_with_[y_name].csv というファイルを dataset として読み込み計算します。
# さらに、y_name を別の名前に変えて、ご自身で別途 sample_program_6_8_0_csv.py もしくは
# sample_program_6_8_0_sdf.py で descriptors_with_[y_name].csv というファイルを、
# 他のファイルと同様の形式で準備すれば、同じように計算することができます。
y_name = 'pIC50_class'
# 'pIC50_class' : クラス分類用の薬理活性のデータセットの場合
# 'pIGC50_class' : クラス分類用の環境毒性のデータセットの場合
rate_of_test_samples = 0.25 # テストデータのサンプル数の割合。0 より大きく 1 未満
method_name = 'rf' # 'knn' or 'svm' or 'rf'
number_of_submodels = 50 # サブモデルの数
rate_of_selected_x_variables = 0.7 # 各サブデータセットで選択される説明変数の数の割合。0 より大きく 1 未満
add_nonlinear_terms_flag = False # True (二乗項・交差項を追加) or False (追加しない)
fold_number = 5 # N-fold CV の N
max_number_of_k = 20 # 使用する k の最大値
svm_cs = 2 ** np.arange(-5, 11, dtype=float)
svm_gammas = 2 ** np.arange(-20, 11, dtype=float)
rf_number_of_trees = 300 # RF における決定木の数
rf_x_variables_rates = np.arange(1, 11, dtype=float) / 10 # 1 つの決定木における説明変数の数の割合の候補
if method_name != 'knn' and method_name != 'svm' and method_name != 'rf':
sys.exit('\'{0}\' というクラス分類手法はありません。method_name を見直してください。'.format(method_name))
dataset = pd.read_csv('descriptors_with_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
y = dataset.iloc[:, 0].copy()
x = dataset.iloc[:, 1:]
x = x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = x.isnull().any() # NaN を含む変数
x = x.drop(x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
number_of_test_samples = round(dataset.shape[0] * rate_of_test_samples)
# ランダムにトレーニングデータとテストデータとに分割
# random_state に数字を与えることで、別のときに同じ数字を使えば、ランダムとはいえ同じ結果にすることができます
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, shuffle=True,
random_state=0)
class_types = list(set(y_train)) # クラスの種類
class_types.sort(reverse=True) # 並び替え
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_variable_flags], axis=1)
if add_nonlinear_terms_flag:
x_train = pd.read_csv('x_train_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
x_test = pd.read_csv('x_test_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
# x_train = sample_functions.add_nonlinear_terms(x_train) # 説明変数の二乗項や交差項を追加
# x_test = sample_functions.add_nonlinear_terms(x_test) # 説明変数の二乗項や交差項を追加
# 標準偏差が 0 の説明変数を削除
std_0_nonlinear_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_nonlinear_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_nonlinear_variable_flags], axis=1)
# オートスケーリング
autoscaled_x_train = (x_train - x_train.mean()) / x_train.std()
autoscaled_x_test = (x_test - x_train.mean()) / x_train.std()
if method_name == 'svm':
# 時間短縮のため、最初だけグラム行列の分散を最大化することによる γ の最適化
optimal_svm_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x_train, svm_gammas)
number_of_x_variables = int(np.ceil(x_train.shape[1] * rate_of_selected_x_variables))
print('各サブデータセットの説明変数の数 :', number_of_x_variables)
estimated_y_train_all = pd.DataFrame() # 空の DataFrame 型を作成し、ここにサブモデルごとのトレーニングデータの y の推定結果を追加
selected_x_variable_numbers = [] # 空の list 型の変数を作成し、ここに各サブデータセットの説明変数の番号を追加
submodels = [] # 空の list 型の変数を作成し、ここに構築済みの各サブモデルを追加
for submodel_number in range(number_of_submodels):
print(submodel_number + 1, '/', number_of_submodels) # 進捗状況の表示
# 説明変数の選択
# 0 から 1 までの間に一様に分布する乱数を説明変数の数だけ生成して、その乱数値が小さい順に説明変数を選択
random_x_variables = np.random.rand(x_train.shape[1])
selected_x_variable_numbers_tmp = random_x_variables.argsort()[:number_of_x_variables]
selected_autoscaled_x_train = autoscaled_x_train.iloc[:, selected_x_variable_numbers_tmp]
selected_x_variable_numbers.append(selected_x_variable_numbers_tmp)
if method_name == 'knn':
# CV による k の最適化
accuracy_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の 正解率 をこの変数に追加していきます
ks = [] # 同じく k の値をこの変数に追加していきます
for k in range(1, max_number_of_k + 1):
model = KNeighborsClassifier(n_neighbors=k, metric='euclidean') # k-NN モデルの宣言
# クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = pd.DataFrame(
model_selection.cross_val_predict(model, selected_autoscaled_x_train, y_train,
cv=fold_number))
accuracy_in_cv = metrics.accuracy_score(y_train, estimated_y_in_cv) # 正解率を計算
accuracy_in_cv_all.append(accuracy_in_cv) # r2 を追加
ks.append(k) # k の値を追加
optimal_k = ks[accuracy_in_cv_all.index(max(accuracy_in_cv_all))]
submodel = KNeighborsClassifier(n_neighbors=optimal_k, metric='euclidean') # k-NN モデルの宣言
elif method_name == 'svm':
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', gamma=optimal_svm_gamma),
{'C': svm_cs}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', C=optimal_svm_c),
{'gamma': svm_gammas}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_gamma = model_in_cv.best_params_['gamma']
submodel = svm.SVC(kernel='rbf', C=optimal_svm_c, gamma=optimal_svm_gamma) # SVM モデルの宣言
elif method_name == 'rf':
# OOB (Out-Of-Bugs) による説明変数の数の割合の最適化
accuracy_oob = []
for index, x_variables_rate in enumerate(rf_x_variables_rates):
model_in_validation = RandomForestClassifier(n_estimators=rf_number_of_trees, max_features=int(
max(math.ceil(selected_autoscaled_x_train.shape[1] * x_variables_rate), 1)), oob_score=True)
model_in_validation.fit(selected_autoscaled_x_train, y_train)
accuracy_oob.append(model_in_validation.oob_score_)
optimal_x_variables_rate = rf_x_variables_rates[accuracy_oob.index(max(accuracy_oob))]
submodel = RandomForestClassifier(n_estimators=rf_number_of_trees,
max_features=int(max(math.ceil(
selected_autoscaled_x_train.shape[1] * optimal_x_variables_rate), 1)),
oob_score=True) # RF モデルの宣言
submodel.fit(selected_autoscaled_x_train, y_train) # モデルの構築
submodels.append(submodel)
# サブデータセットの説明変数の種類やサブモデルを保存。同じ名前のファイルがあるときは上書きされるため注意
pd.to_pickle(selected_x_variable_numbers, 'selected_x_variable_numbers.bin')
pd.to_pickle(submodels, 'submodels.bin')
# サブデータセットの説明変数の種類やサブモデルを読み込み
# 今回は、保存した後にすぐ読み込んでいるため、あまり意味はありませんが、サブデータセットの説明変数の種類やサブモデルを
# 保存しておくことで、後で新しいサンプルを予測したいときにモデル構築の過程を省略できます
selected_x_variable_numbers = pd.read_pickle('selected_x_variable_numbers.bin')
submodels = pd.read_pickle('submodels.bin')
# テストデータの y の推定
# estimated_y_test_all = pd.DataFrame() # 空の DataFrame 型を作成し、ここにサブモデルごとのテストデータの y の推定結果を追加
estimated_y_test_count = np.zeros([x_test.shape[0], len(class_types)]) # クラスごとに、推定したサブモデルの数をカウントして値をここに格納
for submodel_number in range(number_of_submodels):
# 説明変数の選択
selected_autoscaled_x_test = autoscaled_x_test.iloc[:, selected_x_variable_numbers[submodel_number]]
# テストデータの y の推定
estimated_y_test = pd.DataFrame(
submodels[submodel_number].predict(selected_autoscaled_x_test)) # テストデータの y の値を推定し、Pandas の DataFrame 型に変換
# estimated_y_test_all = pd.concat([estimated_y_test_all, estimated_y_test], axis=1)
for sample_number in range(estimated_y_test.shape[0]):
estimated_y_test_count[sample_number, class_types.index(estimated_y_test.iloc[sample_number, 0])] += 1
# テストデータにおける、クラスごとの推定したサブモデルの数
estimated_y_test_count = pd.DataFrame(estimated_y_test_count, index=x_test.index, columns=class_types)
estimated_y_test_count.to_csv('estimated_y_test_count.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# テストデータにおける、クラスごとの確率
estimated_y_test_probability = estimated_y_test_count / number_of_submodels
estimated_y_test_probability.to_csv('estimated_y_test_probability.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# テストデータにおける、多数決で推定された結果
estimated_y_test = pd.DataFrame(estimated_y_test_count.idxmax(axis=1), columns=['estimated_class'])
estimated_y_test.to_csv('estimated_y_test.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# テストデータにおける、クラスごとの確率と推定結果の確認
y_test = pd.DataFrame(y_test) # Series 型のため、行名と列名の設定は別に
y_test.columns = ['actual_class']
estimated_y_test_for_check = pd.concat([estimated_y_test_probability, y_test, estimated_y_test], axis=1) # 結合
estimated_y_test_for_check.to_csv('estimated_y_test_for_check.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
| [
"pandas.DataFrame",
"numpy.ceil",
"math.ceil",
"numpy.random.rand",
"sklearn.model_selection.train_test_split",
"pandas.to_pickle",
"sample_functions.gamma_optimization_with_variance",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.cross_val_predict",
"sklearn.neighbors.KNeighborsClass... | [((2093, 2183), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'number_of_test_samples', 'shuffle': '(True)', 'random_state': '(0)'}), '(x, y, test_size=number_of_test_samples, shuffle=True,\n random_state=0)\n', (2109, 2183), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3657, 3671), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3669, 3671), True, 'import pandas as pd\n'), ((7187, 7263), 'pandas.to_pickle', 'pd.to_pickle', (['selected_x_variable_numbers', '"""selected_x_variable_numbers.bin"""'], {}), "(selected_x_variable_numbers, 'selected_x_variable_numbers.bin')\n", (7199, 7263), True, 'import pandas as pd\n'), ((7265, 7305), 'pandas.to_pickle', 'pd.to_pickle', (['submodels', '"""submodels.bin"""'], {}), "(submodels, 'submodels.bin')\n", (7277, 7305), True, 'import pandas as pd\n'), ((7479, 7528), 'pandas.read_pickle', 'pd.read_pickle', (['"""selected_x_variable_numbers.bin"""'], {}), "('selected_x_variable_numbers.bin')\n", (7493, 7528), True, 'import pandas as pd\n'), ((7542, 7573), 'pandas.read_pickle', 'pd.read_pickle', (['"""submodels.bin"""'], {}), "('submodels.bin')\n", (7556, 7573), True, 'import pandas as pd\n'), ((8470, 8547), 'pandas.DataFrame', 'pd.DataFrame', (['estimated_y_test_count'], {'index': 'x_test.index', 'columns': 'class_types'}), '(estimated_y_test_count, index=x_test.index, columns=class_types)\n', (8482, 8547), True, 'import pandas as pd\n'), ((9135, 9155), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (9147, 9155), True, 'import pandas as pd\n'), ((9248, 9323), 'pandas.concat', 'pd.concat', (['[estimated_y_test_probability, y_test, estimated_y_test]'], {'axis': '(1)'}), '([estimated_y_test_probability, y_test, estimated_y_test], axis=1)\n', (9257, 9323), True, 'import pandas as pd\n'), ((1167, 1197), 'numpy.arange', 'np.arange', (['(-5)', '(11)'], {'dtype': 'float'}), '(-5, 11, dtype=float)\n', (1176, 1197), True, 'import numpy as np\n'), ((1217, 1248), 'numpy.arange', 'np.arange', (['(-20)', '(11)'], {'dtype': 'float'}), '(-20, 11, dtype=float)\n', (1226, 1248), True, 'import numpy as np\n'), ((1315, 1344), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {'dtype': 'float'}), '(1, 11, dtype=float)\n', (1324, 1344), True, 'import numpy as np\n'), ((3409, 3494), 'sample_functions.gamma_optimization_with_variance', 'sample_functions.gamma_optimization_with_variance', (['autoscaled_x_train', 'svm_gammas'], {}), '(autoscaled_x_train,\n svm_gammas)\n', (3458, 3494), False, 'import sample_functions\n'), ((3522, 3578), 'numpy.ceil', 'np.ceil', (['(x_train.shape[1] * rate_of_selected_x_variables)'], {}), '(x_train.shape[1] * rate_of_selected_x_variables)\n', (3529, 3578), True, 'import numpy as np\n'), ((4082, 4114), 'numpy.random.rand', 'np.random.rand', (['x_train.shape[1]'], {}), '(x_train.shape[1])\n', (4096, 4114), True, 'import numpy as np\n'), ((5253, 5316), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'optimal_k', 'metric': '"""euclidean"""'}), "(n_neighbors=optimal_k, metric='euclidean')\n", (5273, 5316), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4636, 4691), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k', 'metric': '"""euclidean"""'}), "(n_neighbors=k, metric='euclidean')\n", (4656, 4691), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4995, 5045), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_train', 'estimated_y_in_cv'], {}), '(y_train, estimated_y_in_cv)\n', (5017, 5045), False, 'from sklearn import metrics, model_selection, svm\n'), ((5980, 6043), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'C': 'optimal_svm_c', 'gamma': 'optimal_svm_gamma'}), "(kernel='rbf', C=optimal_svm_c, gamma=optimal_svm_gamma)\n", (5987, 6043), False, 'from sklearn import metrics, model_selection, svm\n'), ((4818, 4916), 'sklearn.model_selection.cross_val_predict', 'model_selection.cross_val_predict', (['model', 'selected_autoscaled_x_train', 'y_train'], {'cv': 'fold_number'}), '(model, selected_autoscaled_x_train,\n y_train, cv=fold_number)\n', (4851, 4916), False, 'from sklearn import metrics, model_selection, svm\n'), ((5425, 5471), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': 'optimal_svm_gamma'}), "(kernel='rbf', gamma=optimal_svm_gamma)\n", (5432, 5471), False, 'from sklearn import metrics, model_selection, svm\n'), ((5719, 5757), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'C': 'optimal_svm_c'}), "(kernel='rbf', C=optimal_svm_c)\n", (5726, 5757), False, 'from sklearn import metrics, model_selection, svm\n'), ((6830, 6904), 'math.ceil', 'math.ceil', (['(selected_autoscaled_x_train.shape[1] * optimal_x_variables_rate)'], {}), '(selected_autoscaled_x_train.shape[1] * optimal_x_variables_rate)\n', (6839, 6904), False, 'import math\n'), ((6365, 6431), 'math.ceil', 'math.ceil', (['(selected_autoscaled_x_train.shape[1] * x_variables_rate)'], {}), '(selected_autoscaled_x_train.shape[1] * x_variables_rate)\n', (6374, 6431), False, 'import math\n')] |
#-*- coding:utf-8 -*-
import os
import os.path
import sys
import cv2
import random
import torch
import torch.utils.data as data
import numpy as np
from utils import matrix_iof
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
WIDER_CLASSES = ('__background__', 'face')
def _crop(image, boxes, labels, img_dim):
height, width, _ = image.shape
pad_image_flag = True
for _ in range(250):
if random.uniform(0, 1) <= 0.2:
scale = 1
else:
scale = random.uniform(0.3, 1.)
short_side = min(width, height)
w = int(scale * short_side)
h = w
if width == w:
l = 0
else:
l = random.randrange(width - w)
if height == h:
t = 0
else:
t = random.randrange(height - h)
roi = np.array((l, t, l + w, t + h))
value = matrix_iof(boxes, roi[np.newaxis])
flag = (value >= 1)
if not flag.any():
continue
centers = (boxes[:, 0:2] + boxes[:, 2:4]) / 2
mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask_a].copy()
labels_t = labels[mask_a].copy()
if boxes_t.shape[0] == 0:
continue
#the cropped image
image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
#to avoid the TL corner being out of the roi boundary
boxes_t[:, 0:2] = np.maximum(boxes_t[:, :2], roi[:2])
#to avoid the BR corner being out of the roi boundary
boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], roi[2:4])
#shift all points (x,y) according to the TL of the roi
boxes_t[:, 0::2] -= roi[0]
boxes_t[:, 1::2] -= roi[1]
# make sure that the cropped image contains at least one face > 8 pixel at training image scale
b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
mask_b = np.minimum(b_w_t, b_h_t) > 8.0
boxes_t = boxes_t[mask_b]
labels_t = labels_t[mask_b]
if boxes_t.shape[0] == 0:
continue
pad_image_flag = False
return image_t, boxes_t, labels_t, pad_image_flag
return image, boxes, labels, pad_image_flag
def _distort(image):
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = image.copy()
if random.randrange(2):
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
return image
def _expand(image, boxes, fill, p):
if random.randrange(2):
return image, boxes
height, width, depth = image.shape
scale = random.uniform(1, p)
w = int(scale * width)
h = int(scale * height)
left = random.randint(0, w - width)
top = random.randint(0, h - height)
boxes_t = boxes.copy()
boxes_t[:, :2] += (left, top)
boxes_t[:, 2:] += (left, top)
expand_image = np.empty(
(h, w, depth),
dtype=image.dtype)
expand_image[:, :] = fill
expand_image[top:top + height, left:left + width] = image
image = expand_image
return image, boxes_t
def _mirror(image, boxes):
_, width, _ = image.shape
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return image, boxes
def _pad_to_square(image, rgb_mean, pad_image_flag):
if not pad_image_flag:
return image
height, width, _ = image.shape
long_side = max(width, height)
image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
image_t[:, :] = rgb_mean
image_t[0:0 + height, 0:0 + width] = image
return image_t
def _resize_subtract_mean(image, insize, rgb_mean):
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = interp_methods[random.randrange(5)]
image = cv2.resize(image, (insize, insize), interpolation=interp_method)
image = image.astype(np.float32)
image -= rgb_mean
return image
class PreProc(object):
def __init__(self, img_dim, rgb_means):
self.img_dim = img_dim
self.rgb_means = rgb_means
def __call__(self, image, targets):
assert targets.shape[0] > 0, "this image does not have gt"
boxes = targets[:, :-1].copy()
labels = targets[:, -1].copy()
image_t, boxes_t, labels_t, pad_image_flag = _crop(image, boxes, labels, self.img_dim)
image_t = _distort(image_t)
image_t = _pad_to_square(image_t,self.rgb_means, pad_image_flag)
##since the landmarks should also be flipped (left eye<->right eye)
##it's too complex. We disable _mirror_ operation here
#image_t, boxes_t = _mirror(image_t, boxes_t)
#convert (x,y) to range [0,1]
height, width, _ = image_t.shape
boxes_t[:, 0::2] /= width
boxes_t[:, 1::2] /= height
image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means)
labels_t = np.expand_dims(labels_t, 1)
targets_t = np.hstack((boxes_t, labels_t))
return image_t, targets_t
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(WIDER_CLASSES, range(len(WIDER_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0, 5))
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
# has_lm = int(obj.find('has_lm').text)
# get face rect
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text)
bndbox.append(cur_pt)
# get face landmark
# if int(obj.find('has_lm').text.strip()) == 1:
# lm = obj.find('lm')
# pts = ['x1', 'y1', 'x2', 'y2', 'x3', 'y3', 'x4', 'y4', 'x5', 'y5']
# for i, pt in enumerate(pts):
# xy_value = float(lm.find(pt).text)
# bndbox.append(xy_value)
# else: # append 10 zeros
# for i in range(10):
# bndbox.append(0)
# label 0 or 1 (bk or face)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack(
(res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5]
return res
class FaceRectLMDataset(data.Dataset):
"""Face data set with rectangles and/or landmarks
If there is landmark data for that face, the landmarks will be loaded
Otherwise, the landmark values will be zeros
input is image, target is annotation
Arguments:
root (string): filepath to WIDER folder
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
"""
def __init__(self, root, img_dim, rgb_mean):
self.root = root
self.preproc = PreProc(img_dim, rgb_mean)
self.target_transform = AnnotationTransform()
self._annopath = os.path.join(self.root, 'annotations', '{}')
self._imgpath = os.path.join(self.root, 'images', '{}', '{}')
self.ids = list()
with open(os.path.join(self.root, 'img_list.txt'), 'r') as f:
self.ids = [tuple(line.split()) for line in f]
def __getitem__(self, index):
img_id = self.ids[index]
event_id = img_id[0].split('--')[0]
event_name = event_id + img_id[0].split(event_id)[1][:-1]
img_name = event_id + event_id.join(img_id[0].split(event_id)[2:])
target = ET.parse(self._annopath.format(img_id[1])).getroot()
img = cv2.imread(self._imgpath.format(event_name, img_name), cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
return torch.from_numpy(img), target
def __len__(self):
return len(self.ids)
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
| [
"numpy.minimum",
"numpy.maximum",
"random.randint",
"torch.stack",
"random.uniform",
"cv2.cvtColor",
"numpy.empty",
"numpy.logical_and",
"numpy.expand_dims",
"numpy.hstack",
"random.randrange",
"numpy.array",
"torch.is_tensor",
"numpy.vstack",
"utils.matrix_iof",
"os.path.join",
"cv2... | [((2564, 2583), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (2580, 2583), False, 'import random\n'), ((4005, 4024), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (4021, 4024), False, 'import random\n'), ((4107, 4127), 'random.uniform', 'random.uniform', (['(1)', 'p'], {}), '(1, p)\n', (4121, 4127), False, 'import random\n'), ((4195, 4223), 'random.randint', 'random.randint', (['(0)', '(w - width)'], {}), '(0, w - width)\n', (4209, 4223), False, 'import random\n'), ((4234, 4263), 'random.randint', 'random.randint', (['(0)', '(h - height)'], {}), '(0, h - height)\n', (4248, 4263), False, 'import random\n'), ((4379, 4421), 'numpy.empty', 'np.empty', (['(h, w, depth)'], {'dtype': 'image.dtype'}), '((h, w, depth), dtype=image.dtype)\n', (4387, 4421), True, 'import numpy as np\n'), ((4649, 4668), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (4665, 4668), False, 'import random\n'), ((4990, 5044), 'numpy.empty', 'np.empty', (['(long_side, long_side, 3)'], {'dtype': 'image.dtype'}), '((long_side, long_side, 3), dtype=image.dtype)\n', (4998, 5044), True, 'import numpy as np\n'), ((5374, 5438), 'cv2.resize', 'cv2.resize', (['image', '(insize, insize)'], {'interpolation': 'interp_method'}), '(image, (insize, insize), interpolation=interp_method)\n', (5384, 5438), False, 'import cv2\n'), ((891, 921), 'numpy.array', 'np.array', (['(l, t, l + w, t + h)'], {}), '((l, t, l + w, t + h))\n', (899, 921), True, 'import numpy as np\n'), ((939, 973), 'utils.matrix_iof', 'matrix_iof', (['boxes', 'roi[np.newaxis]'], {}), '(boxes, roi[np.newaxis])\n', (949, 973), False, 'from utils import matrix_iof\n'), ((1493, 1528), 'numpy.maximum', 'np.maximum', (['boxes_t[:, :2]', 'roi[:2]'], {}), '(boxes_t[:, :2], roi[:2])\n', (1503, 1528), True, 'import numpy as np\n'), ((1617, 1654), 'numpy.minimum', 'np.minimum', (['boxes_t[:, 2:4]', 'roi[2:4]'], {}), '(boxes_t[:, 2:4], roi[2:4])\n', (1627, 1654), True, 'import numpy as np\n'), ((2628, 2647), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (2644, 2647), False, 'import random\n'), ((2748, 2767), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (2764, 2767), False, 'import random\n'), ((2846, 2884), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (2858, 2884), False, 'import cv2\n'), ((2928, 2947), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (2944, 2947), False, 'import random\n'), ((3054, 3073), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (3070, 3073), False, 'import random\n'), ((3219, 3257), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (3231, 3257), False, 'import cv2\n'), ((3312, 3331), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (3328, 3331), False, 'import random\n'), ((3408, 3446), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (3420, 3446), False, 'import cv2\n'), ((3490, 3509), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (3506, 3509), False, 'import random\n'), ((3616, 3635), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (3632, 3635), False, 'import random\n'), ((3781, 3819), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (3793, 3819), False, 'import cv2\n'), ((3861, 3880), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (3877, 3880), False, 'import random\n'), ((5341, 5360), 'random.randrange', 'random.randrange', (['(5)'], {}), '(5)\n', (5357, 5360), False, 'import random\n'), ((6486, 6513), 'numpy.expand_dims', 'np.expand_dims', (['labels_t', '(1)'], {}), '(labels_t, 1)\n', (6500, 6513), True, 'import numpy as np\n'), ((6534, 6564), 'numpy.hstack', 'np.hstack', (['(boxes_t, labels_t)'], {}), '((boxes_t, labels_t))\n', (6543, 6564), True, 'import numpy as np\n'), ((7628, 7644), 'numpy.empty', 'np.empty', (['(0, 5)'], {}), '((0, 5))\n', (7636, 7644), True, 'import numpy as np\n'), ((9693, 9737), 'os.path.join', 'os.path.join', (['self.root', '"""annotations"""', '"""{}"""'], {}), "(self.root, 'annotations', '{}')\n", (9705, 9737), False, 'import os\n'), ((9762, 9807), 'os.path.join', 'os.path.join', (['self.root', '"""images"""', '"""{}"""', '"""{}"""'], {}), "(self.root, 'images', '{}', '{}')\n", (9774, 9807), False, 'import os\n'), ((11497, 11517), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (11508, 11517), False, 'import torch\n'), ((477, 497), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (491, 497), False, 'import random\n'), ((562, 586), 'random.uniform', 'random.uniform', (['(0.3)', '(1.0)'], {}), '(0.3, 1.0)\n', (576, 586), False, 'import random\n'), ((748, 775), 'random.randrange', 'random.randrange', (['(width - w)'], {}), '(width - w)\n', (764, 775), False, 'import random\n'), ((848, 876), 'random.randrange', 'random.randrange', (['(height - h)'], {}), '(height - h)\n', (864, 876), False, 'import random\n'), ((2042, 2066), 'numpy.minimum', 'np.minimum', (['b_w_t', 'b_h_t'], {}), '(b_w_t, b_h_t)\n', (2052, 2066), True, 'import numpy as np\n'), ((8833, 8857), 'numpy.vstack', 'np.vstack', (['(res, bndbox)'], {}), '((res, bndbox))\n', (8842, 8857), True, 'import numpy as np\n'), ((10614, 10635), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (10630, 10635), False, 'import torch\n'), ((11284, 11304), 'torch.is_tensor', 'torch.is_tensor', (['tup'], {}), '(tup)\n', (11299, 11304), False, 'import torch\n'), ((1122, 1174), 'numpy.logical_and', 'np.logical_and', (['(roi[:2] < centers)', '(centers < roi[2:])'], {}), '(roi[:2] < centers, centers < roi[2:])\n', (1136, 1174), True, 'import numpy as np\n'), ((3122, 3145), 'random.randint', 'random.randint', (['(-18)', '(18)'], {}), '(-18, 18)\n', (3136, 3145), False, 'import random\n'), ((3684, 3707), 'random.randint', 'random.randint', (['(-18)', '(18)'], {}), '(-18, 18)\n', (3698, 3707), False, 'import random\n'), ((9852, 9891), 'os.path.join', 'os.path.join', (['self.root', '"""img_list.txt"""'], {}), "(self.root, 'img_list.txt')\n", (9864, 9891), False, 'import os\n'), ((2682, 2705), 'random.uniform', 'random.uniform', (['(-32)', '(32)'], {}), '(-32, 32)\n', (2696, 2705), False, 'import random\n'), ((2803, 2827), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (2817, 2827), False, 'import random\n'), ((2992, 3016), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (3006, 3016), False, 'import random\n'), ((3366, 3389), 'random.uniform', 'random.uniform', (['(-32)', '(32)'], {}), '(-32, 32)\n', (3380, 3389), False, 'import random\n'), ((3554, 3578), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (3568, 3578), False, 'import random\n'), ((3916, 3940), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (3930, 3940), False, 'import random\n'), ((11377, 11388), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11385, 11388), True, 'import numpy as np\n'), ((11416, 11437), 'torch.from_numpy', 'torch.from_numpy', (['tup'], {}), '(tup)\n', (11432, 11437), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl import app
import os
import torch
import argparse
import nfsp_arm
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from utils.exper_logger import Logger
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class NFSPPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies, mode):
game = env.game
player_ids = [0, 1]
super(NFSPPolicies, self).__init__(game, player_ids)
self._policies = nfsp_policies
self._mode = mode
self._obs = {"info_state": [None, None], "legal_actions": [None, None]}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
with self._policies[cur_player].temp_mode_as(self._mode):
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def main():
parser = argparse.ArgumentParser(description="NFSP LONR in kuhn args.")
parser = argparse.ArgumentParser("NFSP LONR in kuhn args.")
parser.add_argument('--seed', type=int, default=int(0), help="random seed")
parser.add_argument('--results_dir', type=str, default="learn_every_128", help="log direction of nfsp-lonr experiments")
parser.add_argument('--num_train_episodes', type=int, default=int(1e7), help="Number of training episodes.")
parser.add_argument('--eval_every', type=int, default=int(10000), help="Episode frequency at which agents are evaluated.")
parser.add_argument('--hidden_layers_sizes', type=list, default=[128, ], help= "Number of hidden units in the avg-net and Q-net.")
parser.add_argument('--anticipatory_param',type=float, default=0.1, help= "Prob of using the rl best response as episode policy.")
parser.add_argument('--batch_size', type=int,default=int(128), help= "Number of transitions to sample at each learning step." )
parser.add_argument('--learn_every', type=int,default=int(128), help="Number of steps between learning updates.")
parser.add_argument('--replay_buffer_capacity', type=int, default=int(2e5), help="replay_buffer_capacity")
parser.add_argument('--reservoir_buffer_capacity', type=int, default=int(2e6), help= "Size of the reservoir buffer.")
parser.add_argument('--sl_learning_rate', type=float,default=0.001, help="Learning rate for avg-policy sl network.")
parser.add_argument('--rl_q_learning_rate', type=float,default=1e-3, help="Learning rate for inner rl q network learning rate.")
parser.add_argument('--rl_v_learning_rate', type=float,default=1e-3, help="Learning rate for inner rl pi network learning rate.")
parser.add_argument('--discount_factor', type=float, default=1.0, help="Discount factor for future rewards.")
parser.add_argument('--arm_target_step_size',type=float, default=0.01, help= "Target value function parameters are updated via moving average with this rate.")
parser.add_argument('--critic_update_num', default=int(2), help="Number of every collected data being trained")
parser.add_argument('--min_buffer_size_to_learn', default=int(128), help="Number of samples in buffer before learning begins.")
parser.add_argument('--train_batch_size', type=int,default=int(64), help="Number of steps between learning updates.")
parser.add_argument('--optimizer_str', default="adam", help="choose from 'adam' and 'sgd'.")
parser.add_argument('--use_checkpoints', default=True, help="Save/load neural network weights.")
parser.add_argument('--loss_str', default="mse", help="choose from 'mse' and 'huber'.")
args = parser.parse_args()
game = "kuhn_poker"
num_players = 2
env_configs = {"players": num_players}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
absolute_dir = "./kuhn_nfsp_arm"
# final_dir = os.path.join(absolute_dir, args.optimizer_str, args.loss_str) # 正常实验的保存路径
final_dir = os.path.join(absolute_dir, args.results_dir) # 只有arm的保存路径
logger = Logger(final_dir)
checkpoint_dir=os.path.join(absolute_dir, args.results_dir, "tmp")
env.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
hidden_layers_sizes = [int(l) for l in args.hidden_layers_sizes]
agents = [
nfsp_arm.NFSP_ARM(device, idx, info_state_size, num_actions, hidden_layers_sizes, checkpoint_dir, args)
for idx in range(num_players)
]
expl_policies_avg = NFSPPolicies(env, agents, nfsp_arm.MODE.average_policy)
for ep in range(args.num_train_episodes):
if (ep + 1) % args.eval_every == 0:
losses = [agent.loss for agent in agents]
# print("Losses: " , losses)
expl = exploitability.exploitability(env.game, expl_policies_avg)
print("Episode:", ep + 1, "Exploitability AVG", expl, "losses:", losses)
print("_____________________________________")
# logging.info("Losses: %s", losses)
# expl = exploitability.exploitability(env.game, expl_policies_avg)
# logging.info("[%s] Exploitability AVG %s", ep + 1, expl)
# logging.info("_____________________________________________")
logger.log_performance(ep + 1, expl)
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
for agent in agents:
agent.step(time_step)
logger.close_files()
logger.plot('kuhn_nfsp_arm')
if __name__ == "__main__":
main() | [
"open_spiel.python.rl_environment.TimeStep",
"open_spiel.python.algorithms.exploitability.exploitability",
"numpy.random.seed",
"argparse.ArgumentParser",
"nfsp_arm.NFSP_ARM",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"open_spiel.python.rl_environment.Environment"... | [((1565, 1627), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""NFSP LONR in kuhn args."""'}), "(description='NFSP LONR in kuhn args.')\n", (1588, 1627), False, 'import argparse\n'), ((1641, 1691), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""NFSP LONR in kuhn args."""'], {}), "('NFSP LONR in kuhn args.')\n", (1664, 1691), False, 'import argparse\n'), ((4359, 4406), 'open_spiel.python.rl_environment.Environment', 'rl_environment.Environment', (['game'], {}), '(game, **env_configs)\n', (4385, 4406), False, 'from open_spiel.python import rl_environment\n'), ((4667, 4711), 'os.path.join', 'os.path.join', (['absolute_dir', 'args.results_dir'], {}), '(absolute_dir, args.results_dir)\n', (4679, 4711), False, 'import os\n'), ((4740, 4757), 'utils.exper_logger.Logger', 'Logger', (['final_dir'], {}), '(final_dir)\n', (4746, 4757), False, 'from utils.exper_logger import Logger\n'), ((4778, 4829), 'os.path.join', 'os.path.join', (['absolute_dir', 'args.results_dir', '"""tmp"""'], {}), "(absolute_dir, args.results_dir, 'tmp')\n", (4790, 4829), False, 'import os\n'), ((4859, 4887), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4876, 4887), False, 'import torch\n'), ((4892, 4929), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (4918, 4929), False, 'import torch\n'), ((4980, 5005), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4994, 5005), True, 'import numpy as np\n'), ((444, 469), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (467, 469), False, 'import torch\n'), ((1208, 1306), 'open_spiel.python.rl_environment.TimeStep', 'rl_environment.TimeStep', ([], {'observations': 'self._obs', 'rewards': 'None', 'discounts': 'None', 'step_type': 'None'}), '(observations=self._obs, rewards=None, discounts=\n None, step_type=None)\n', (1231, 1306), False, 'from open_spiel.python import rl_environment\n'), ((5100, 5207), 'nfsp_arm.NFSP_ARM', 'nfsp_arm.NFSP_ARM', (['device', 'idx', 'info_state_size', 'num_actions', 'hidden_layers_sizes', 'checkpoint_dir', 'args'], {}), '(device, idx, info_state_size, num_actions,\n hidden_layers_sizes, checkpoint_dir, args)\n', (5117, 5207), False, 'import nfsp_arm\n'), ((5537, 5595), 'open_spiel.python.algorithms.exploitability.exploitability', 'exploitability.exploitability', (['env.game', 'expl_policies_avg'], {}), '(env.game, expl_policies_avg)\n', (5566, 5595), False, 'from open_spiel.python.algorithms import exploitability\n')] |
#
# Script is based on this gist
# https://gist.github.com/StanislawAntol/656e3afe2d43864bb410d71e1c5789c1#file-freeze_mobilenet-py
# and ARM's conversion script
# https://github.com/ARM-software/ComputeLibrary/blob/master/scripts/tensorflow_data_extractor.py
#
# We can't directly use ARM's conversion script because of the open-source TensorFlow can't
# load metagraphs for models listed in
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
# See this issue for details: https://github.com/tensorflow/models/issues/1564
# So we need to build a MobileNet model using module mobilenet_v1.py and restore checkpoints into it.
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py
#
import os
import shutil
import tensorflow as tf
import numpy as np
import mobilenet_v1
from tensorflow.contrib import slim
SOURCE_PATH = ''
TARGET_PATH = os.path.join('.', 'npy')
MULTIPLIER = os.getenv('MOBILENET_MULTIPLIER')
RESOLUTION = os.getenv('MOBILENET_RESOLUTION')
if os.path.isdir(TARGET_PATH):
shutil.rmtree(TARGET_PATH)
os.mkdir(TARGET_PATH)
with tf.Session() as sess:
input_shape = (None, int(RESOLUTION), int(RESOLUTION), 3)
input_node = tf.placeholder(tf.float32, shape=input_shape, name="input")
with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training = False)):
mobilenet_v1.mobilenet_v1(input_node,
num_classes = 1001,
is_training = False,
depth_multiplier = float(MULTIPLIER))
saver = tf.train.Saver()
ckpt_file_prefix = 'mobilenet_v1_{}_{}.ckpt'.format(MULTIPLIER, RESOLUTION)
saver.restore(sess, os.path.join(SOURCE_PATH, ckpt_file_prefix))
for t in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
varname = t.name
if os.path.sep in t.name:
varname = varname.replace(os.path.sep, '_')
if varname.startswith('MobilenetV1_'):
varname = varname[12:]
if varname.endswith(':0'):
varname = varname[:-2]
target_file = os.path.join(TARGET_PATH, varname)
print("Saving variable {0} with shape {1} ...".format(varname, t.shape))
v = sess.run(t)
if len(v.shape) > 1:
v = v.transpose(3, 2, 0, 1)
v = np.ascontiguousarray(v)
np.save(target_file, v)
| [
"os.mkdir",
"numpy.save",
"tensorflow.train.Saver",
"mobilenet_v1.mobilenet_v1_arg_scope",
"os.path.isdir",
"tensorflow.get_collection",
"numpy.ascontiguousarray",
"tensorflow.Session",
"tensorflow.placeholder",
"shutil.rmtree",
"os.path.join",
"os.getenv"
] | [((905, 929), 'os.path.join', 'os.path.join', (['"""."""', '"""npy"""'], {}), "('.', 'npy')\n", (917, 929), False, 'import os\n'), ((943, 976), 'os.getenv', 'os.getenv', (['"""MOBILENET_MULTIPLIER"""'], {}), "('MOBILENET_MULTIPLIER')\n", (952, 976), False, 'import os\n'), ((990, 1023), 'os.getenv', 'os.getenv', (['"""MOBILENET_RESOLUTION"""'], {}), "('MOBILENET_RESOLUTION')\n", (999, 1023), False, 'import os\n'), ((1028, 1054), 'os.path.isdir', 'os.path.isdir', (['TARGET_PATH'], {}), '(TARGET_PATH)\n', (1041, 1054), False, 'import os\n'), ((1085, 1106), 'os.mkdir', 'os.mkdir', (['TARGET_PATH'], {}), '(TARGET_PATH)\n', (1093, 1106), False, 'import os\n'), ((1058, 1084), 'shutil.rmtree', 'shutil.rmtree', (['TARGET_PATH'], {}), '(TARGET_PATH)\n', (1071, 1084), False, 'import shutil\n'), ((1113, 1125), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1123, 1125), True, 'import tensorflow as tf\n'), ((1210, 1269), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'input_shape', 'name': '"""input"""'}), "(tf.float32, shape=input_shape, name='input')\n", (1224, 1269), True, 'import tensorflow as tf\n'), ((1576, 1592), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1590, 1592), True, 'import tensorflow as tf\n'), ((1750, 1798), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (1767, 1798), True, 'import tensorflow as tf\n'), ((1693, 1736), 'os.path.join', 'os.path.join', (['SOURCE_PATH', 'ckpt_file_prefix'], {}), '(SOURCE_PATH, ckpt_file_prefix)\n', (1705, 1736), False, 'import os\n'), ((2051, 2085), 'os.path.join', 'os.path.join', (['TARGET_PATH', 'varname'], {}), '(TARGET_PATH, varname)\n', (2063, 2085), False, 'import os\n'), ((2280, 2303), 'numpy.save', 'np.save', (['target_file', 'v'], {}), '(target_file, v)\n', (2287, 2303), True, 'import numpy as np\n'), ((1292, 1346), 'mobilenet_v1.mobilenet_v1_arg_scope', 'mobilenet_v1.mobilenet_v1_arg_scope', ([], {'is_training': '(False)'}), '(is_training=False)\n', (1327, 1346), False, 'import mobilenet_v1\n'), ((2252, 2275), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['v'], {}), '(v)\n', (2272, 2275), True, 'import numpy as np\n')] |
import numpy as np
import pickle
from astropy.io import fits
import sunpy.map as mp
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
from reproject import reproject_interp
import matplotlib.colors as clr
from matplotlib import cm
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 14
import seaborn as sns
palette = sns.set_palette("colorblind")
clrs = sns.color_palette(palette)
from matplotlib import colors
from matplotlib import gridspec
from matplotlib.ticker import MultipleLocator
# load a crisp fits file and alter header for the isolated wavelength point
crisp_file = "crisp_l2_20140906_152724_8542_r00470.fits"
crisp_fits = fits.open(crisp_file)
header = crisp_fits[0].header
bl_x = header['CRVAL1'] + (0 - header['CRPIX1'])*header['CDELT1']
bl_y = header['CRVAL2'] + (0 - header['CRPIX2'])*header['CDELT2']
header['NAXIS']=2
header['NAXIS1']=147
header['NAXIS2']=193
header['CDELT1']=0.57
header['CDELT2']=0.57
header['CRVAL1']=bl_x
header['CRVAL2']=bl_y
header['CRPIX1']=0
header['CRPIX2']=0
header['PC1_2']=0.0
header['PC2_1']=0.0
header['DATE-OBS']=header['DATE-AVG']
del header['NAXIS3']
del header['CDELT3']
del header['CRPIX3']
del header['CRVAL3']
del header['CUNIT3']
del header['WSTART1']
del header['WWIDTH1']
del header['WDESC1']
del header['TWAVE1']
del header['CTYPE3']
del header['PC3_3']
#load preferred models results and turn into a map object
pref_data1 = pickle.load(open("pref_ca8542_10_post.p","rb"))
pref_data2 = pickle.load(open("pref_ca8542_10_pre.p","rb"))
pref_data1[pref_data1!=2]=0
pref_data2[pref_data2!=2]=0
pref_map1 = mp.Map(pref_data1,header)
pref_map2 = mp.Map(pref_data2,header)
# similar as above but for an intensity image for the footpoints
foot_data = np.load("ca8542_12_post.npy")
foot_data = foot_data[0,:,:]
foot_data = foot_data/np.max(foot_data)
footheader = header
# make it a map
foot_map = mp.Map(foot_data,footheader)
#load in wing data for umbra lines
um_data = np.load("cube_ca8542_00_post_0.1res.npy")
um_data = um_data[0,:,:]
um_data = um_data/np.max(um_data)
umheader = header
um_map = mp.Map(um_data,umheader)
# load in aia submap
aia_map1 = mp.Map("pre_171x.fits")
aia_map2 = mp.Map("post_171x.fits")
# reproject pref, foot and hmi onto aia plane
#pref
y1, footprint = reproject_interp((pref_data1, header), aia_map1.wcs, aia_map1.data.shape)
#clean up pref result
y1 = np.nan_to_num(y1,0)
y1[y1>0.9]=1
y1[y1!=1]=np.nan
y2, footprint = reproject_interp((pref_data2, header), aia_map1.wcs, aia_map1.data.shape)
#clean up pref result
y2 = np.nan_to_num(y2,0)
y2[y2>0.9]=1
y2[y2!=1]=np.nan
#wing
outputum, footprintum = reproject_interp((um_data, umheader), aia_map1.wcs, aia_map1.data.shape)
#foot
footout, footfootprint = reproject_interp((foot_data,footheader), aia_map1.wcs, aia_map1.data.shape)
#make into maps
out_pref1 = mp.Map(y1,aia_map1.wcs)
out_pref2 = mp.Map(y2,aia_map1.wcs)
out_um = mp.Map(outputum,aia_map1.wcs)
out_foot = mp.Map(footout, aia_map1.wcs)
#----------------------
# --- make combined plot!
#----------------------
aiacmap = pickle.load(open("aia171cmap.p","rb"))
contourcolors = colors.ListedColormap('w')
footcolors = colors.ListedColormap(clrs[2])
footplt = out_foot.data
footplt = np.nan_to_num(footplt,0)
# umbra contours
cont = out_um.data
cont=np.nan_to_num(cont,0)
conts = cont/np.max(cont)
m = np.zeros_like(conts)
m[60:-70,60:-70]=1
conts=m*conts
conts[conts==0]=np.nan
im1711 = aia_map1.data
im1712 = aia_map2.data
clip = 15 #no. of clip pixels off the edges
scale = 0.599489 # arcsec per pix
reduction = scale*clip
# this extent lines all features up with previous plots
extent = [-786.7816 + reduction, -667.9816 - reduction, -370.2548 + reduction, -250.8548 - reduction]
new_pref1 = out_pref1.data
new_pref2 = out_pref2.data
prefcolors = colors.ListedColormap([clrs[4],clrs[4]])
# start plotting
f, [ax1, ax2] = plt.subplots(1,2,figsize=(20,15))
im1711 = im1711[clip:-clip,clip:-clip]
im1712 = im1712[clip:-clip,clip:-clip]
new_pref1 = new_pref1[clip:-clip,clip:-clip]
new_pref2 = new_pref2[clip:-clip,clip:-clip]
conts = conts[clip:-clip,clip:-clip]
footplt = footplt[clip:-clip,clip:-clip]
ax1.imshow(np.log2(im1711),origin='lower',extent=extent,cmap=aiacmap,vmax=12)
ax1.imshow(new_pref2,origin='lower',cmap=prefcolors, extent=extent)
ax1.contour(conts, origin='lower', levels=[0.4], extent=extent, cmap=contourcolors)
ax1.contour(footplt/np.max(footplt),extent=extent,origin='lower',levels=[0.55],cmap=contourcolors,linestyles='--')
ax1.set_xlabel("Solar X [arcsec]")
ax1.set_ylabel("Solar Y [arcsec]")
ax1.xaxis.set_major_locator(MultipleLocator(20))
ax1.xaxis.set_minor_locator(MultipleLocator(10))
ax1.yaxis.set_major_locator(MultipleLocator(20))
ax1.yaxis.set_minor_locator(MultipleLocator(10))
ax1.set_title("AIA 171 \u212B, 16:36:35")
ax2.imshow(np.log2(im1712),origin='lower',extent=extent,cmap=aiacmap,vmax=12)
ax2.imshow(new_pref1,origin='lower',cmap=prefcolors, extent=extent)
ax2.contour(conts, origin='lower', levels=[0.4], extent=extent, cmap=contourcolors)
ax2.contour(footplt/np.max(footplt),extent=extent,origin='lower',levels=[0.55],cmap=contourcolors,linestyles="--")
ax2.set_xlabel("Solar X [arcsec]")
ax2.xaxis.set_major_locator(MultipleLocator(20))
ax2.xaxis.set_minor_locator(MultipleLocator(10))
ax2.yaxis.set_major_locator(MultipleLocator(20))
ax2.yaxis.set_minor_locator(MultipleLocator(10))
ax2.set_title("AIA 171 \u212B, 17:26:36")
plt.show()
| [
"numpy.load",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"sunpy.map.Map",
"numpy.nan_to_num",
"numpy.log2",
"matplotlib.pyplot.subplots",
"numpy.max",
"reproject.reproject_interp",
"astropy.io.fits.open",
"seaborn.color_palette",
"matplotlib.ticker.MultipleLocator",
"seaborn.set_palette",... | [((381, 410), 'seaborn.set_palette', 'sns.set_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (396, 410), True, 'import seaborn as sns\n'), ((418, 444), 'seaborn.color_palette', 'sns.color_palette', (['palette'], {}), '(palette)\n', (435, 444), True, 'import seaborn as sns\n'), ((700, 721), 'astropy.io.fits.open', 'fits.open', (['crisp_file'], {}), '(crisp_file)\n', (709, 721), False, 'from astropy.io import fits\n'), ((1631, 1657), 'sunpy.map.Map', 'mp.Map', (['pref_data1', 'header'], {}), '(pref_data1, header)\n', (1637, 1657), True, 'import sunpy.map as mp\n'), ((1669, 1695), 'sunpy.map.Map', 'mp.Map', (['pref_data2', 'header'], {}), '(pref_data2, header)\n', (1675, 1695), True, 'import sunpy.map as mp\n'), ((1773, 1802), 'numpy.load', 'np.load', (['"""ca8542_12_post.npy"""'], {}), "('ca8542_12_post.npy')\n", (1780, 1802), True, 'import numpy as np\n'), ((1921, 1950), 'sunpy.map.Map', 'mp.Map', (['foot_data', 'footheader'], {}), '(foot_data, footheader)\n', (1927, 1950), True, 'import sunpy.map as mp\n'), ((1997, 2038), 'numpy.load', 'np.load', (['"""cube_ca8542_00_post_0.1res.npy"""'], {}), "('cube_ca8542_00_post_0.1res.npy')\n", (2004, 2038), True, 'import numpy as np\n'), ((2126, 2151), 'sunpy.map.Map', 'mp.Map', (['um_data', 'umheader'], {}), '(um_data, umheader)\n', (2132, 2151), True, 'import sunpy.map as mp\n'), ((2185, 2208), 'sunpy.map.Map', 'mp.Map', (['"""pre_171x.fits"""'], {}), "('pre_171x.fits')\n", (2191, 2208), True, 'import sunpy.map as mp\n'), ((2220, 2244), 'sunpy.map.Map', 'mp.Map', (['"""post_171x.fits"""'], {}), "('post_171x.fits')\n", (2226, 2244), True, 'import sunpy.map as mp\n'), ((2314, 2387), 'reproject.reproject_interp', 'reproject_interp', (['(pref_data1, header)', 'aia_map1.wcs', 'aia_map1.data.shape'], {}), '((pref_data1, header), aia_map1.wcs, aia_map1.data.shape)\n', (2330, 2387), False, 'from reproject import reproject_interp\n'), ((2415, 2435), 'numpy.nan_to_num', 'np.nan_to_num', (['y1', '(0)'], {}), '(y1, 0)\n', (2428, 2435), True, 'import numpy as np\n'), ((2482, 2555), 'reproject.reproject_interp', 'reproject_interp', (['(pref_data2, header)', 'aia_map1.wcs', 'aia_map1.data.shape'], {}), '((pref_data2, header), aia_map1.wcs, aia_map1.data.shape)\n', (2498, 2555), False, 'from reproject import reproject_interp\n'), ((2583, 2603), 'numpy.nan_to_num', 'np.nan_to_num', (['y2', '(0)'], {}), '(y2, 0)\n', (2596, 2603), True, 'import numpy as np\n'), ((2664, 2736), 'reproject.reproject_interp', 'reproject_interp', (['(um_data, umheader)', 'aia_map1.wcs', 'aia_map1.data.shape'], {}), '((um_data, umheader), aia_map1.wcs, aia_map1.data.shape)\n', (2680, 2736), False, 'from reproject import reproject_interp\n'), ((2768, 2844), 'reproject.reproject_interp', 'reproject_interp', (['(foot_data, footheader)', 'aia_map1.wcs', 'aia_map1.data.shape'], {}), '((foot_data, footheader), aia_map1.wcs, aia_map1.data.shape)\n', (2784, 2844), False, 'from reproject import reproject_interp\n'), ((2872, 2896), 'sunpy.map.Map', 'mp.Map', (['y1', 'aia_map1.wcs'], {}), '(y1, aia_map1.wcs)\n', (2878, 2896), True, 'import sunpy.map as mp\n'), ((2908, 2932), 'sunpy.map.Map', 'mp.Map', (['y2', 'aia_map1.wcs'], {}), '(y2, aia_map1.wcs)\n', (2914, 2932), True, 'import sunpy.map as mp\n'), ((2941, 2971), 'sunpy.map.Map', 'mp.Map', (['outputum', 'aia_map1.wcs'], {}), '(outputum, aia_map1.wcs)\n', (2947, 2971), True, 'import sunpy.map as mp\n'), ((2982, 3011), 'sunpy.map.Map', 'mp.Map', (['footout', 'aia_map1.wcs'], {}), '(footout, aia_map1.wcs)\n', (2988, 3011), True, 'import sunpy.map as mp\n'), ((3153, 3179), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (['"""w"""'], {}), "('w')\n", (3174, 3179), False, 'from matplotlib import colors\n'), ((3194, 3224), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (['clrs[2]'], {}), '(clrs[2])\n', (3215, 3224), False, 'from matplotlib import colors\n'), ((3259, 3284), 'numpy.nan_to_num', 'np.nan_to_num', (['footplt', '(0)'], {}), '(footplt, 0)\n', (3272, 3284), True, 'import numpy as np\n'), ((3326, 3348), 'numpy.nan_to_num', 'np.nan_to_num', (['cont', '(0)'], {}), '(cont, 0)\n', (3339, 3348), True, 'import numpy as np\n'), ((3378, 3398), 'numpy.zeros_like', 'np.zeros_like', (['conts'], {}), '(conts)\n', (3391, 3398), True, 'import numpy as np\n'), ((3830, 3871), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (['[clrs[4], clrs[4]]'], {}), '([clrs[4], clrs[4]])\n', (3851, 3871), False, 'from matplotlib import colors\n'), ((3906, 3942), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 15)'}), '(1, 2, figsize=(20, 15))\n', (3918, 3942), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5467, 5469), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1871), 'numpy.max', 'np.max', (['foot_data'], {}), '(foot_data)\n', (1860, 1871), True, 'import numpy as np\n'), ((2082, 2097), 'numpy.max', 'np.max', (['um_data'], {}), '(um_data)\n', (2088, 2097), True, 'import numpy as np\n'), ((3361, 3373), 'numpy.max', 'np.max', (['cont'], {}), '(cont)\n', (3367, 3373), True, 'import numpy as np\n'), ((4198, 4213), 'numpy.log2', 'np.log2', (['im1711'], {}), '(im1711)\n', (4205, 4213), True, 'import numpy as np\n'), ((4630, 4649), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (4645, 4649), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4679, 4698), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (4694, 4698), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4728, 4747), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (4743, 4747), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4777, 4796), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (4792, 4796), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4852, 4867), 'numpy.log2', 'np.log2', (['im1712'], {}), '(im1712)\n', (4859, 4867), True, 'import numpy as np\n'), ((5249, 5268), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (5264, 5268), False, 'from matplotlib.ticker import MultipleLocator\n'), ((5298, 5317), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (5313, 5317), False, 'from matplotlib.ticker import MultipleLocator\n'), ((5347, 5366), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (5362, 5366), False, 'from matplotlib.ticker import MultipleLocator\n'), ((5396, 5415), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (5411, 5415), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4437, 4452), 'numpy.max', 'np.max', (['footplt'], {}), '(footplt)\n', (4443, 4452), True, 'import numpy as np\n'), ((5091, 5106), 'numpy.max', 'np.max', (['footplt'], {}), '(footplt)\n', (5097, 5106), True, 'import numpy as np\n')] |
import html
import random
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from common.client import AnimeApiClient
random.seed(12345)
class DatasetGenerator:
def __init__(self, api_client: AnimeApiClient):
self.api_client = api_client
self.anime_lists = {
'training': [],
'validation': [],
}
self.vectorizer = None
self.inverse_vectorizer = None
self.selector = None
self.dataset = {
'training': {
'ids': [],
'data': np.array([]),
'labels': [],
},
'validation': {
'ids': [],
'data': np.array([]),
'labels': [],
}
}
self.metadata = {}
def get_vectorized_dataset(self, set_name, max_df=0.4, min_df=4):
self.dataset[set_name]['data'] = self._vectorize_synopses(
set_name=set_name,
synopses=[anime['sanitized_synopsis'] for anime in self.anime_lists[set_name]],
max_df=max_df,
min_df=min_df)
return self.dataset[set_name]
def load_dataset(self, begin, end, validation_split=0.2):
imported_anime = self.api_client.get_anime_range(begin, end)
random.shuffle(imported_anime)
self.metadata['total_num_media_queried'] = len(imported_anime)
pruned_imported_anime = [
sanitized_anime
for sanitized_anime
in (
self._sanitize_synopsis(anime)
for anime in imported_anime
)
if sanitized_anime['sanitized_synopsis_length'] > 10
]
total_synopsis_length = sum(anime['sanitized_synopsis_length'] for anime in pruned_imported_anime)
self.anime_lists['training'], self.anime_lists['validation'] = self._train_val_split(data=pruned_imported_anime, validation_split=validation_split)
for set_name in ['training', 'validation']:
self.dataset[set_name]['ids'] = [anime['id'] for anime in self.anime_lists[set_name]]
self.dataset[set_name]['labels'] = [self._is_lewd(anime) for anime in self.anime_lists[set_name]]
self.metadata['num_media_in_{}_set'.format(set_name)] = len(self.dataset[set_name]['ids'])
self.metadata['num_lewd_media_in_{}_set'.format(set_name)] = sum(self.dataset[set_name]['labels'])
self.metadata['total_num_media_kept'] = len(pruned_imported_anime)
self.metadata['total_num_media_discarded'] = self.metadata['total_num_media_queried'] - self.metadata['total_num_media_kept']
self.metadata['average_synopsis_length'] = total_synopsis_length / self.metadata['total_num_media_kept']
self.metadata['num_media_in_validation_set'] = len(self.dataset['validation']['ids'])
return self.metadata
def _vectorize_synopses(self, synopses, set_name, max_df, min_df):
if not self.vectorizer:
self.vectorizer = TfidfVectorizer(**{
'ngram_range': (1, 2),
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': 'word',
'max_df': max_df,
'min_df': min_df,
})
if set_name == 'training':
self.vectorizer.fit(synopses)
self.dataset[set_name]['data'] = self.vectorizer.transform(synopses).astype('float32')
self.metadata['num_tokens'] = self.dataset[set_name]['data'].shape[1]
self.metadata['{}_set_data_vector_shape'.format(set_name)] = self.dataset[set_name]['data'].shape
return self.dataset[set_name]['data']
@staticmethod
def _train_val_split(data, validation_split):
return data[int(round(len(data) * validation_split)):], data[:int(round(len(data) * validation_split))]
def vector_to_ngram(self, index):
if not self.inverse_vectorizer:
self.inverse_vectorizer = {v: k for k, v in self.vectorizer.vocabulary_.items()}
return self.inverse_vectorizer[index]
@staticmethod
def _is_lewd(anime):
return bool(anime['is_nsfw'] or "Ecchi" in anime['tags'])
@staticmethod
def _sanitize_synopsis(anime):
if not anime['synopsis']:
anime['sanitized_synopsis'] = ''
anime['sanitized_synopsis_length'] = 0
return anime
anime['sanitized_synopsis'] = anime['synopsis'].strip()
anime['sanitized_synopsis'] = html.unescape(anime['sanitized_synopsis'])
# Remove URLs
anime['sanitized_synopsis'] = re.sub(r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)', '', anime['sanitized_synopsis'])
# Remove html elements
anime['sanitized_synopsis'] = re.sub(r'<[\w\/="!\s]+?>', '', anime['sanitized_synopsis'])
# If the line contains source and a colon, delete source and everything after, as well as parentheses if they exist
anime['sanitized_synopsis'] = re.sub(r'[\[\(]?\s*Source?\s*:.{0,40}\s*$', '', anime['sanitized_synopsis'], flags=re.IGNORECASE | re.MULTILINE)
# If the line contains source and a parentheses, delete it and everything after
anime['sanitized_synopsis'] = re.sub(r'[\[\(]\s*Source?.{0,40}\s*$', '', anime['sanitized_synopsis'], flags=re.IGNORECASE | re.MULTILINE)
# If the line contains from and a weird character in front of it, delete from and everything after
anime['sanitized_synopsis'] = re.sub(r'[~\[\(]\s*from.{0,40}\s*$', '', anime['sanitized_synopsis'], flags=re.IGNORECASE | re.MULTILINE)
anime['sanitized_synopsis'] = re.sub(r'\'’', '', anime['sanitized_synopsis'])
anime['sanitized_synopsis'] = re.sub(r'[^a-zA-Z]', ' ', anime['sanitized_synopsis']).lower().strip()
anime['sanitized_synopsis_length'] = len(anime['sanitized_synopsis'].split())
return anime
| [
"html.unescape",
"sklearn.feature_extraction.text.TfidfVectorizer",
"random.shuffle",
"random.seed",
"numpy.array",
"re.sub"
] | [((159, 177), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (170, 177), False, 'import random\n'), ((1317, 1347), 'random.shuffle', 'random.shuffle', (['imported_anime'], {}), '(imported_anime)\n', (1331, 1347), False, 'import random\n'), ((4513, 4555), 'html.unescape', 'html.unescape', (["anime['sanitized_synopsis']"], {}), "(anime['sanitized_synopsis'])\n", (4526, 4555), False, 'import html\n'), ((4616, 4777), 're.sub', 're.sub', (['"""https?:\\\\/\\\\/(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{1,256}\\\\.[a-zA-Z0-9()]{1,6}\\\\b([-a-zA-Z0-9()@:%_\\\\+.~#?&//=]*)"""', '""""""', "anime['sanitized_synopsis']"], {}), "(\n 'https?:\\\\/\\\\/(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{1,256}\\\\.[a-zA-Z0-9()]{1,6}\\\\b([-a-zA-Z0-9()@:%_\\\\+.~#?&//=]*)'\n , '', anime['sanitized_synopsis'])\n", (4622, 4777), False, 'import re\n'), ((4831, 4892), 're.sub', 're.sub', (['"""<[\\\\w\\\\/="!\\\\s]+?>"""', '""""""', "anime['sanitized_synopsis']"], {}), '(\'<[\\\\w\\\\/="!\\\\s]+?>\', \'\', anime[\'sanitized_synopsis\'])\n', (4837, 4892), False, 'import re\n'), ((5053, 5174), 're.sub', 're.sub', (['"""[\\\\[\\\\(]?\\\\s*Source?\\\\s*:.{0,40}\\\\s*$"""', '""""""', "anime['sanitized_synopsis']"], {'flags': '(re.IGNORECASE | re.MULTILINE)'}), "('[\\\\[\\\\(]?\\\\s*Source?\\\\s*:.{0,40}\\\\s*$', '', anime[\n 'sanitized_synopsis'], flags=re.IGNORECASE | re.MULTILINE)\n", (5059, 5174), False, 'import re\n'), ((5292, 5406), 're.sub', 're.sub', (['"""[\\\\[\\\\(]\\\\s*Source?.{0,40}\\\\s*$"""', '""""""', "anime['sanitized_synopsis']"], {'flags': '(re.IGNORECASE | re.MULTILINE)'}), "('[\\\\[\\\\(]\\\\s*Source?.{0,40}\\\\s*$', '', anime['sanitized_synopsis'],\n flags=re.IGNORECASE | re.MULTILINE)\n", (5298, 5406), False, 'import re\n'), ((5545, 5657), 're.sub', 're.sub', (['"""[~\\\\[\\\\(]\\\\s*from.{0,40}\\\\s*$"""', '""""""', "anime['sanitized_synopsis']"], {'flags': '(re.IGNORECASE | re.MULTILINE)'}), "('[~\\\\[\\\\(]\\\\s*from.{0,40}\\\\s*$', '', anime['sanitized_synopsis'],\n flags=re.IGNORECASE | re.MULTILINE)\n", (5551, 5657), False, 'import re\n'), ((5689, 5736), 're.sub', 're.sub', (['"""\\\\\'’"""', '""""""', "anime['sanitized_synopsis']"], {}), '("\\\\\'’", \'\', anime[\'sanitized_synopsis\'])\n', (5695, 5736), False, 'import re\n'), ((3026, 3187), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), "(**{'ngram_range': (1, 2), 'strip_accents': 'unicode',\n 'decode_error': 'replace', 'analyzer': 'word', 'max_df': max_df,\n 'min_df': min_df})\n", (3041, 3187), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((591, 603), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (599, 603), True, 'import numpy as np\n'), ((729, 741), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (737, 741), True, 'import numpy as np\n'), ((5775, 5828), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', "anime['sanitized_synopsis']"], {}), "('[^a-zA-Z]', ' ', anime['sanitized_synopsis'])\n", (5781, 5828), False, 'import re\n')] |
# -*- coding: utf-8 -*-
#
# network_params.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
pynest microcircuit parameters
------------------------------
Network parameters for the microcircuit.
<NAME>, <NAME>, <NAME>; May 2016
'''
import numpy as np
def get_mean_delays(mean_delay_exc, mean_delay_inh, number_of_pop):
""" Creates matrix containing the delay of all connections.
Arguments
---------
mean_delay_exc
Delay of the excitatory connections.
mean_delay_inh
Delay of the inhibitory connections.
number_of_pop
Number of populations.
Returns
-------
mean_delays
Matrix specifying the mean delay of all connections.
"""
dim = number_of_pop
mean_delays = np.zeros((dim, dim))
mean_delays[:, 0:dim:2] = mean_delay_exc
mean_delays[:, 1:dim:2] = mean_delay_inh
return mean_delays
def get_std_delays(std_delay_exc, std_delay_inh, number_of_pop):
""" Creates matrix containing the standard deviations of all delays.
Arguments
---------
std_delay_exc
Standard deviation of excitatory delays.
std_delay_inh
Standard deviation of inhibitory delays.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
std_delays
Matrix specifying the standard deviation of all delays.
"""
dim = number_of_pop
std_delays = np.zeros((dim, dim))
std_delays[:, 0:dim:2] = std_delay_exc
std_delays[:, 1:dim:2] = std_delay_inh
return std_delays
def get_mean_PSP_matrix(PSP_e, g, number_of_pop):
""" Creates a matrix of the mean evoked postsynaptic potential.
The function creates a matrix of the mean evoked postsynaptic
potentials between the recurrent connections of the microcircuit.
The weight of the connection from L4E to L23E is doubled.
Arguments
---------
PSP_e
Mean evoked potential.
g
Relative strength of the inhibitory to excitatory connection.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
weights
Matrix of the weights for the recurrent connections.
"""
dim = number_of_pop
weights = np.zeros((dim, dim))
exc = PSP_e
inh = PSP_e * g
weights[:, 0:dim:2] = exc
weights[:, 1:dim:2] = inh
weights[0, 2] = exc * 2
return weights
def get_std_PSP_matrix(PSP_rel, number_of_pop):
""" Relative standard deviation matrix of postsynaptic potential created.
The relative standard deviation matrix of the evoked postsynaptic potential
for the recurrent connections of the microcircuit is created.
Arguments
---------
PSP_rel
Relative standard deviation of the evoked postsynaptic potential.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
std_mat
Matrix of the standard deviation of postsynaptic potentials.
"""
dim = number_of_pop
std_mat = np.zeros((dim, dim))
std_mat[:, :] = PSP_rel
return std_mat
net_dict = {
# Neuron model.
'neuron_model': 'iaf_psc_exp',
# The default recording device is the spike_detector. If you also
# want to record the membrane potentials of the neurons, add
# 'voltmeter' to the list.
'rec_dev': ['spike_detector'],
# Names of the simulated populations.
'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'],
# Number of neurons in the different populations. The order of the
# elements corresponds to the names of the variable 'populations'.
'N_full': np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]),
# Mean rates of the different populations in the non-scaled version
# of the microcircuit. Necessary for the scaling of the network.
# The order corresponds to the order in 'populations'.
'full_mean_rates':
np.array([0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523]),
# Connection probabilities. The first index corresponds to the targets
# and the second to the sources.
'conn_probs':
np.array(
[[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.],
[0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0., 0.0042, 0.],
[0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.],
[0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0., 0.1057, 0.],
[0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.],
[0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.],
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252],
[0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]
),
# Number of external connections to the different populations.
# The order corresponds to the order in 'populations'.
'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]),
# Factor to scale the indegrees.
'K_scaling': 0.1,
# Factor to scale the number of neurons.
'N_scaling': 0.1,
# Mean amplitude of excitatory postsynaptic potential (in mV).
'PSP_e': 0.15,
# Relative standard deviation of the postsynaptic potential.
'PSP_sd': 0.1,
# Relative inhibitory synaptic strength (in relative units).
'g': -4,
# Rate of the Poissonian spike generator (in Hz).
'bg_rate': 8.,
# Turn Poisson input on or off (True or False).
'poisson_input': True,
# Delay of the Poisson generator (in ms).
'poisson_delay': 1.5,
# Mean delay of excitatory connections (in ms).
'mean_delay_exc': 1.5,
# Mean delay of inhibitory connections (in ms).
'mean_delay_inh': 0.75,
# Relative standard deviation of the delay of excitatory and
# inhibitory connections (in relative units).
'rel_std_delay': 0.5,
# Parameters of the neurons.
'neuron_params': {
# Membrane potential average for the neurons (in mV).
'V0_mean': -58.0,
# Standard deviation of the average membrane potential (in mV).
'V0_sd': 10.0,
# Reset membrane potential of the neurons (in mV).
'E_L': -65.0,
# Threshold potential of the neurons (in mV).
'V_th': -50.0,
# Membrane potential after a spike (in mV).
'V_reset': -65.0,
# Membrane capacitance (in pF).
'C_m': 250.0,
# Membrane time constant (in ms).
'tau_m': 10.0,
# Time constant of postsynaptic excitatory currents (in ms).
'tau_syn_ex': 0.5,
# Time constant of postsynaptic inhibitory currents (in ms).
'tau_syn_in': 0.5,
# Time constant of external postsynaptic excitatory current (in ms).
'tau_syn_E': 0.5,
# Refractory period of the neurons after a spike (in ms).
't_ref': 2.0}
}
updated_dict = {
# PSP mean matrix.
'PSP_mean_matrix': get_mean_PSP_matrix(
net_dict['PSP_e'], net_dict['g'], len(net_dict['populations'])
),
# PSP std matrix.
'PSP_std_matrix': get_std_PSP_matrix(
net_dict['PSP_sd'], len(net_dict['populations'])
),
# mean delay matrix.
'mean_delay_matrix': get_mean_delays(
net_dict['mean_delay_exc'], net_dict['mean_delay_inh'],
len(net_dict['populations'])
),
# std delay matrix.
'std_delay_matrix': get_std_delays(
net_dict['mean_delay_exc'] * net_dict['rel_std_delay'],
net_dict['mean_delay_inh'] * net_dict['rel_std_delay'],
len(net_dict['populations'])
),
}
net_dict.update(updated_dict)
| [
"numpy.array",
"numpy.zeros"
] | [((1393, 1413), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1401, 1413), True, 'import numpy as np\n'), ((2054, 2074), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (2062, 2074), True, 'import numpy as np\n'), ((2862, 2882), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (2870, 2882), True, 'import numpy as np\n'), ((3638, 3658), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (3646, 3658), True, 'import numpy as np\n'), ((4253, 4314), 'numpy.array', 'np.array', (['[20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]'], {}), '([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948])\n', (4261, 4314), True, 'import numpy as np\n'), ((4547, 4613), 'numpy.array', 'np.array', (['[0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523]'], {}), '([0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523])\n', (4555, 4613), True, 'import numpy as np\n'), ((4753, 5293), 'numpy.array', 'np.array', (['[[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0], [0.1346, \n 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0], [0.0077, 0.0059, \n 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0], [0.0691, 0.0029, 0.0794, \n 0.1597, 0.0033, 0.0, 0.1057, 0.0], [0.1004, 0.0622, 0.0505, 0.0057, \n 0.0831, 0.3726, 0.0204, 0.0], [0.0548, 0.0269, 0.0257, 0.0022, 0.06, \n 0.3158, 0.0086, 0.0], [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, \n 0.0396, 0.2252], [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658,\n 0.1443]]'], {}), '([[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0], [\n 0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0], [0.0077, \n 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0], [0.0691, 0.0029, \n 0.0794, 0.1597, 0.0033, 0.0, 0.1057, 0.0], [0.1004, 0.0622, 0.0505, \n 0.0057, 0.0831, 0.3726, 0.0204, 0.0], [0.0548, 0.0269, 0.0257, 0.0022, \n 0.06, 0.3158, 0.0086, 0.0], [0.0156, 0.0066, 0.0211, 0.0166, 0.0572, \n 0.0197, 0.0396, 0.2252], [0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008,\n 0.0658, 0.1443]])\n', (4761, 5293), True, 'import numpy as np\n'), ((5508, 5566), 'numpy.array', 'np.array', (['[1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]'], {}), '([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100])\n', (5516, 5566), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import figure
## Quick curve fitting of BSA paper from nist
x=[10.0 , 30.0 , 60.0] #Particle diams 10,30,60nm
y=[0.023, 0.017, 0.014] #BSA per square nm assuming spheres, converted x--> area
cov=[60.0, 44.0, 36.0] #Coverage percentage corresponding to bsa/per square nm (y)
def bsa_count(diams, style='single'):
''' Returns bsa molecules per unit surface area given a particle diameter,
and a fitting style. Essentially just returns the y value of a fit curve
given x (diamter).'''
if style=='single':
z=np.polyfit(x, y, 1)
p=np.poly1d(z)
return p(diams)
elif style=='dual':
dout=[]
x1=x[0:2] #Make x[0:2]
y1=y[0:2]# ditto
z1=np.polyfit(x1, y1, 1)
p1=np.poly1d(z1)
x2=x[1:3] #Make x[1:3]
y2=y[1:3] # ditto
z2=np.polyfit(x2, y2, 1)
p2=np.poly1d(z2)
for d in diams:
if d < x[1]: #If d < 30
dout.append(p1(d))
else:
dout.append(p2(d))
return dout
else:
raise AttributeError('syle must be "single" or "dual", not %s'%style)
# IS THIS ACTUALLY IN USE
def _map_cov(bsa_area):
''' Given bsa surface area, map this to percent coverage using the fact
that 0.0386nm-2 is 100% coverage'''
return 100.0* ( bsa_area / 0.0386)
| [
"numpy.poly1d",
"numpy.polyfit"
] | [((601, 620), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (611, 620), True, 'import numpy as np\n'), ((633, 645), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (642, 645), True, 'import numpy as np\n'), ((811, 832), 'numpy.polyfit', 'np.polyfit', (['x1', 'y1', '(1)'], {}), '(x1, y1, 1)\n', (821, 832), True, 'import numpy as np\n'), ((846, 859), 'numpy.poly1d', 'np.poly1d', (['z1'], {}), '(z1)\n', (855, 859), True, 'import numpy as np\n'), ((952, 973), 'numpy.polyfit', 'np.polyfit', (['x2', 'y2', '(1)'], {}), '(x2, y2, 1)\n', (962, 973), True, 'import numpy as np\n'), ((987, 1000), 'numpy.poly1d', 'np.poly1d', (['z2'], {}), '(z2)\n', (996, 1000), True, 'import numpy as np\n')] |
import os
from abc import abstractmethod
import cv2
import numpy as np
from src.loaders.BaseLoader import BaseLoader
from src.loaders.depth_image.CameraConfig import CameraConfig
from src.model.SegmentedPointCloud import SegmentedPointCloud
from src.utils.point_cloud import depth_to_pcd_custom
class ImageLoader(BaseLoader):
def __init__(self, path):
super().__init__(path)
self.config = self._provide_config()
rgb_path, depth_path = self._provide_rgb_and_depth_path(path)
rgb_filenames, depth_filenames = self._provide_filenames(rgb_path, depth_path)
self.depth_images = [os.path.join(depth_path, filename) for filename in depth_filenames]
self.rgb_images = [os.path.join(rgb_path, filename) for filename in rgb_filenames]
self.depth_to_rgb_index = self._match_rgb_with_depth(rgb_filenames, depth_filenames)
def get_frame_count(self) -> int:
return len(self.depth_images)
@abstractmethod
def _provide_config(self) -> CameraConfig:
pass
@abstractmethod
def _provide_rgb_and_depth_path(self, path: str) -> (str, str):
pass
@abstractmethod
def _match_rgb_with_depth(self, rgb_filenames, depth_filenames) -> list:
pass
@abstractmethod
def _provide_filenames(self, rgb_path, depth_path) -> (list, list):
pass
def read_depth_image(self, frame_num) -> np.array:
depth_frame_path = self.depth_images[frame_num]
return cv2.imread(depth_frame_path, cv2.IMREAD_ANYDEPTH)
def read_pcd(self, frame_num) -> SegmentedPointCloud:
depth_image = self.read_depth_image(frame_num)
cam_intrinsics = self.config.get_cam_intrinsic(depth_image.shape)
initial_pcd_transform = self.config.get_initial_pcd_transform()
pcd, zero_depth_indices = depth_to_pcd_custom(depth_image, cam_intrinsics, initial_pcd_transform)
return SegmentedPointCloud(
pcd=pcd,
unsegmented_cloud_indices=np.arange(depth_image.size),
zero_depth_cloud_indices=zero_depth_indices,
structured_shape=(depth_image.shape[0], depth_image.shape[1])
)
| [
"src.utils.point_cloud.depth_to_pcd_custom",
"cv2.imread",
"os.path.join",
"numpy.arange"
] | [((1482, 1531), 'cv2.imread', 'cv2.imread', (['depth_frame_path', 'cv2.IMREAD_ANYDEPTH'], {}), '(depth_frame_path, cv2.IMREAD_ANYDEPTH)\n', (1492, 1531), False, 'import cv2\n'), ((1827, 1898), 'src.utils.point_cloud.depth_to_pcd_custom', 'depth_to_pcd_custom', (['depth_image', 'cam_intrinsics', 'initial_pcd_transform'], {}), '(depth_image, cam_intrinsics, initial_pcd_transform)\n', (1846, 1898), False, 'from src.utils.point_cloud import depth_to_pcd_custom\n'), ((626, 660), 'os.path.join', 'os.path.join', (['depth_path', 'filename'], {}), '(depth_path, filename)\n', (638, 660), False, 'import os\n'), ((721, 753), 'os.path.join', 'os.path.join', (['rgb_path', 'filename'], {}), '(rgb_path, filename)\n', (733, 753), False, 'import os\n'), ((1995, 2022), 'numpy.arange', 'np.arange', (['depth_image.size'], {}), '(depth_image.size)\n', (2004, 2022), True, 'import numpy as np\n')] |
""" Simple plot
In this section, we want to draw the cosine and sine functions
on the same plot. Starting from the default settings, we'll
enrich the figure step by step to make it nicer.
First step is to get the data for the sine and cosine functions:
:lesson goal file: goal01.py
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
c, s = np.cos(x), np.sin(x)
# x is now a numpy array with 256 values ranging from -pi to +pi
# (included). c is the cosine (256 values) and s is the sine
# (256 values).
# To see the plot in PyCharm, first run this file normally.
# That should show the plot in a new window. If it shows up in
# the tool window inside PyCharm, you should probably disable
# the Python Scientific mode under File: Settings.
# Next, choose Run: Start Live Turtle. That should show you two
# plots: the current plot and the goal plot.
# Can you add the sine data to make the first plot match the
# second one?
plt.plot(x, c) # Copy this line and change it.
# Once they match exactly, the goal plot should disappear.
# Then you can open lesson 2.
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((344, 390), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(256)'], {'endpoint': '(True)'}), '(-np.pi, np.pi, 256, endpoint=True)\n', (355, 390), True, 'import numpy as np\n'), ((983, 997), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'c'], {}), '(x, c)\n', (991, 997), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1130, 1132), True, 'import matplotlib.pyplot as plt\n'), ((398, 407), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (404, 407), True, 'import numpy as np\n'), ((409, 418), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (415, 418), True, 'import numpy as np\n')] |
from sklearn.linear_model import LinearRegression
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
import base64
from io import BytesIO
from sourceCode.func import create_t_figure
from sourceCode.func import create_p_figure
from sourceCode.func import create_b_figure
import platform
def getAns(dependent: str, independent: list, session: dict) -> dict:
"""get p-value, f-value, R-square etc."""
reg = LinearRegression()
filename = session["filename"]
data = pd.read_csv(filename)
y = data[dependent].values
xs = data[independent].values
ans = {}
try:
for i in y:
if i <= 0:
ans["flag"] = 0
session[
"error"] = "dependent variable has 0. exp_reg_model not suit"
return ans
ys = np.log(y)
reg.fit(xs, ys)
ans["var"] = [dependent, "Constant"] + independent
n = len(ys)
k = len(independent) + 1
tmp = np.append(reg.intercept_, reg.coef_).tolist()
ans["coefficient"] = [np.round(i, 4) for i in tmp]
ans["R-squared"] = np.round(reg.score(xs, ys), 4)
ans["Adjusted R-squared"] = np.round(
1 - (1 - ans["R-squared"]) * (n - 1) / (n - k), 4)
try:
ans["F-value"] = np.round(
ans["R-squared"] / (1 - ans["R-squared"]) * (n - k) / (k - 1),
4)
except ZeroDivisionError:
ans["F-value"] = 999999
ans["SS"] = {}
tmp = sum((ys - ys.mean())**2)
ans["observation"] = n
ans["df"] = k
ans["SS"]["ESS"] = np.round(tmp * ans["R-squared"], 4)
ans["SS"]["RSS"] = np.round(tmp - ans["SS"]["ESS"], 4)
ans["Prob>F"] = np.round(
scipy.stats.f.sf(ans["F-value"], k - 1, n - k), 4)
ans["Root MSE"] = np.round((ans["SS"]["RSS"] / (n - k))**0.5, 4)
sigma_square = ans["SS"]["RSS"] / (n - k)
one = np.ones(n)
xs = np.insert(xs, 0, values=one, axis=1)
tmp = np.dot(xs.T, xs)
var_cov_beta = sigma_square * np.linalg.inv(tmp)
tmp = np.sqrt(var_cov_beta.diagonal()).tolist()
ans["stderr"] = [np.round(i, 4) for i in tmp]
try:
ans["t"] = [
np.round(ans["coefficient"][i] / ans["stderr"][i], 4)
for i in range(k)
]
except ZeroDivisionError:
ans["t"] = 9999999
ans["P>|t|"] = [
np.round(scipy.stats.t.sf(i, n - k), 4) for i in ans["t"]
]
ans["flag"] = 1
return ans
except:
ans["flag"] = 0
session["error"] = "please check your data"
return ans
def format_(x):
return str(x).rjust(10, ' ')
def showAns(dependent: str, ans: dict, session: dict) -> str:
"""turn to html pages"""
if ans["flag"] == 1:
username = session["username"]
if platform.system() == "windows":
csvfile = open(".\\static\\{}\\downloads\\ans.csv".format(username),
"w",
encoding="utf-8")
else:
csvfile = open("./static/{}/downloads/ans.csv".format(username),
"w",
encoding="utf-8")
print("dependent variable: ", dependent, file=csvfile)
print("variables,Coefficients,Standard Errors,t values,Probabilities",
file=csvfile)
for i in range(ans["df"]):
print(ans["var"][i + 1],
ans["coefficient"][i],
ans["stderr"][i],
ans["t"][i],
ans["P>|t|"][i],
sep=',',
file=csvfile)
csvfile.close()
html = """
<table border="1" style="width: 600px;">
<tr align="middle">
<td>
Source
</td>
<td>SS</td>
<td>df</td>
<td>MS</td>
</tr>
<tr align="right">
<td>Model</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
<tr align="right">
<td>Residual</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
<tr align="right">
<td>Total</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
</table>
""".format(
format_(ans["SS"]["ESS"]), format_(ans["df"] - 1),
format_(np.round(ans["SS"]["ESS"] / (ans["df"] - 1), 4)),
format_(ans["SS"]["RSS"]), format_(ans["observation"] - ans["df"]),
format_(
np.round(ans["SS"]["RSS"] / (ans["observation"] - ans["df"]),
4)), format_(ans["SS"]["ESS"] + ans["SS"]["RSS"]),
format_(ans["observation"] - 1),
format_(
np.round((ans["SS"]["ESS"] + ans["SS"]["RSS"]) /
(ans["observation"] - 1), 4)))
html += """
<table style="width: 600px;">
<tr>
<td align="left">
Number of obs
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
<tr>
<td align="left">
F({},{})
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
<tr>
<td align="left">
Prob>F
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
<tr>
<td align="left">
R-square
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
<tr>
<td align="left">
Adj R-square
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
<tr>
<td align="left">
Root MSE
</td>
<td align="middle">=</td>
<td align="right">{}</td>
</tr>
</table><table style="width:600px;" border="1">
""".format(str(ans["observation"]), str(ans["df"] - 1),
str(ans["observation"] - ans["df"]), str(ans["F-value"]),
str(ans["Prob>F"]), str(ans["R-squared"]),
str(ans["Adjusted R-squared"]), str(ans["Root MSE"]))
for i in range(len(ans["var"])):
if i == 0:
html += """<tr><td>{}</td><td>Coef.</td><td>Std. Err.</td><td>t</td><td> P>|t| </td></tr>""".format(
ans["var"][0])
else:
html += """<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>""".format(
ans["var"][i], ans["coefficient"][i - 1],
ans["stderr"][i - 1], ans["t"][i - 1], ans["P>|t|"][i - 1])
return html + "</table>"
return """<h1>log_lin_model not suit</h1>"""
def showFigure(ans: dict) -> dict:
if ans["flag"] == 1:
tmp = {}
tmp["tvalue"] = create_t_figure(ans)
tmp["bvalue"] = create_b_figure(ans)
tmp["pvalue"] = create_p_figure(ans)
return tmp
return {}
if __name__ == "__main__":
print(getAns("年龄", ["是否使用互联网", "log年收入"], "dcy"))
| [
"numpy.log",
"pandas.read_csv",
"numpy.ones",
"scipy.stats.f.sf",
"numpy.insert",
"sklearn.linear_model.LinearRegression",
"sourceCode.func.create_p_figure",
"numpy.append",
"numpy.linalg.inv",
"scipy.stats.t.sf",
"platform.system",
"numpy.dot",
"numpy.round",
"sourceCode.func.create_b_fig... | [((446, 464), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (462, 464), False, 'from sklearn.linear_model import LinearRegression\n'), ((511, 532), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (522, 532), True, 'import pandas as pd\n'), ((842, 851), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (848, 851), True, 'import numpy as np\n'), ((1201, 1260), 'numpy.round', 'np.round', (["(1 - (1 - ans['R-squared']) * (n - 1) / (n - k))", '(4)'], {}), "(1 - (1 - ans['R-squared']) * (n - 1) / (n - k), 4)\n", (1209, 1260), True, 'import numpy as np\n'), ((1636, 1671), 'numpy.round', 'np.round', (["(tmp * ans['R-squared'])", '(4)'], {}), "(tmp * ans['R-squared'], 4)\n", (1644, 1671), True, 'import numpy as np\n'), ((1699, 1734), 'numpy.round', 'np.round', (["(tmp - ans['SS']['ESS'])", '(4)'], {}), "(tmp - ans['SS']['ESS'], 4)\n", (1707, 1734), True, 'import numpy as np\n'), ((1858, 1906), 'numpy.round', 'np.round', (["((ans['SS']['RSS'] / (n - k)) ** 0.5)", '(4)'], {}), "((ans['SS']['RSS'] / (n - k)) ** 0.5, 4)\n", (1866, 1906), True, 'import numpy as np\n'), ((1969, 1979), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1976, 1979), True, 'import numpy as np\n'), ((1993, 2029), 'numpy.insert', 'np.insert', (['xs', '(0)'], {'values': 'one', 'axis': '(1)'}), '(xs, 0, values=one, axis=1)\n', (2002, 2029), True, 'import numpy as np\n'), ((2044, 2060), 'numpy.dot', 'np.dot', (['xs.T', 'xs'], {}), '(xs.T, xs)\n', (2050, 2060), True, 'import numpy as np\n'), ((7171, 7191), 'sourceCode.func.create_t_figure', 'create_t_figure', (['ans'], {}), '(ans)\n', (7186, 7191), False, 'from sourceCode.func import create_t_figure\n'), ((7216, 7236), 'sourceCode.func.create_b_figure', 'create_b_figure', (['ans'], {}), '(ans)\n', (7231, 7236), False, 'from sourceCode.func import create_b_figure\n'), ((7261, 7281), 'sourceCode.func.create_p_figure', 'create_p_figure', (['ans'], {}), '(ans)\n', (7276, 7281), False, 'from sourceCode.func import create_p_figure\n'), ((1078, 1092), 'numpy.round', 'np.round', (['i', '(4)'], {}), '(i, 4)\n', (1086, 1092), True, 'import numpy as np\n'), ((1316, 1390), 'numpy.round', 'np.round', (["(ans['R-squared'] / (1 - ans['R-squared']) * (n - k) / (k - 1))", '(4)'], {}), "(ans['R-squared'] / (1 - ans['R-squared']) * (n - k) / (k - 1), 4)\n", (1324, 1390), True, 'import numpy as np\n'), ((1781, 1827), 'scipy.stats.f.sf', 'scipy.stats.f.sf', (["ans['F-value']", '(k - 1)', '(n - k)'], {}), "(ans['F-value'], k - 1, n - k)\n", (1797, 1827), False, 'import scipy\n'), ((2099, 2117), 'numpy.linalg.inv', 'np.linalg.inv', (['tmp'], {}), '(tmp)\n', (2112, 2117), True, 'import numpy as np\n'), ((2199, 2213), 'numpy.round', 'np.round', (['i', '(4)'], {}), '(i, 4)\n', (2207, 2213), True, 'import numpy as np\n'), ((2923, 2940), 'platform.system', 'platform.system', ([], {}), '()\n', (2938, 2940), False, 'import platform\n'), ((1002, 1038), 'numpy.append', 'np.append', (['reg.intercept_', 'reg.coef_'], {}), '(reg.intercept_, reg.coef_)\n', (1011, 1038), True, 'import numpy as np\n'), ((2282, 2335), 'numpy.round', 'np.round', (["(ans['coefficient'][i] / ans['stderr'][i])", '(4)'], {}), "(ans['coefficient'][i] / ans['stderr'][i], 4)\n", (2290, 2335), True, 'import numpy as np\n'), ((2495, 2521), 'scipy.stats.t.sf', 'scipy.stats.t.sf', (['i', '(n - k)'], {}), '(i, n - k)\n', (2511, 2521), False, 'import scipy\n'), ((4550, 4597), 'numpy.round', 'np.round', (["(ans['SS']['ESS'] / (ans['df'] - 1))", '(4)'], {}), "(ans['SS']['ESS'] / (ans['df'] - 1), 4)\n", (4558, 4597), True, 'import numpy as np\n'), ((4717, 4781), 'numpy.round', 'np.round', (["(ans['SS']['RSS'] / (ans['observation'] - ans['df']))", '(4)'], {}), "(ans['SS']['RSS'] / (ans['observation'] - ans['df']), 4)\n", (4725, 4781), True, 'import numpy as np\n'), ((4937, 5014), 'numpy.round', 'np.round', (["((ans['SS']['ESS'] + ans['SS']['RSS']) / (ans['observation'] - 1))", '(4)'], {}), "((ans['SS']['ESS'] + ans['SS']['RSS']) / (ans['observation'] - 1), 4)\n", (4945, 5014), True, 'import numpy as np\n')] |
"""
Compile Darknet Models
=====================
This article is a test script to test darknet models with NNVM.
All the required models and libraries will be downloaded from the internet
by the script.
"""
import os
import requests
import sys
import urllib
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from nnvm import frontend
from nnvm.testing.darknet import __darknetffi__
import nnvm.compiler
if sys.version_info >= (3,):
import urllib.request as urllib2
else:
import urllib2
def _download(url, path, overwrite=False, sizecompare=False):
''' Download from internet'''
if os.path.isfile(path) and not overwrite:
if sizecompare:
file_size = os.path.getsize(path)
res_head = requests.head(url)
res_get = requests.get(url, stream=True)
if 'Content-Length' not in res_head.headers:
res_get = urllib2.urlopen(url)
urlfile_size = int(res_get.headers['Content-Length'])
if urlfile_size != file_size:
print("exist file got corrupted, downloading", path, " file freshly")
_download(url, path, True, False)
return
print('File {} exists, skip.'.format(path))
return
print('Downloading from url {} to {}'.format(url, path))
try:
urllib.request.urlretrieve(url, path)
print('')
except:
urllib.urlretrieve(url, path)
DARKNET_LIB = 'libdarknet.so'
DARKNETLIB_URL = 'https://github.com/siju-samuel/darknet/blob/master/lib/' \
+ DARKNET_LIB + '?raw=true'
_download(DARKNETLIB_URL, DARKNET_LIB)
LIB = __darknetffi__.dlopen('./' + DARKNET_LIB)
def _get_tvm_output(net, data):
'''Compute TVM output'''
dtype = 'float32'
sym, params = frontend.darknet.from_darknet(net, dtype)
target = 'llvm'
shape_dict = {'data': data.shape}
graph, library, params = nnvm.compiler.build(sym, target, shape_dict, dtype, params=params)
# Execute on TVM
ctx = tvm.cpu(0)
m = graph_runtime.create(graph, library, ctx)
# set inputs
m.set_input('data', tvm.nd.array(data.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out_shape = (net.outputs,)
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
return tvm_out
def test_forward(net):
'''Test network with given input image on both darknet and tvm'''
def get_darknet_output(net, img):
return LIB.network_predict_image(net, img)
dtype = 'float32'
test_image = 'dog.jpg'
img_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + test_image +'?raw=true'
_download(img_url, test_image)
img = LIB.letterbox_image(LIB.load_image_color(test_image.encode('utf-8'), 0, 0), net.w, net.h)
darknet_output = get_darknet_output(net, img)
darknet_out = np.zeros(net.outputs, dtype='float32')
for i in range(net.outputs):
darknet_out[i] = darknet_output[i]
batch_size = 1
data = np.empty([batch_size, img.c, img.h, img.w], dtype)
i = 0
for c in range(img.c):
for h in range(img.h):
for k in range(img.w):
data[0][c][h][k] = img.data[i]
i = i + 1
tvm_out = _get_tvm_output(net, data)
np.testing.assert_allclose(darknet_out, tvm_out, rtol=1e-3, atol=1e-3)
def test_rnn_forward(net):
'''Test network with given input data on both darknet and tvm'''
def get_darknet_network_predict(net, data):
return LIB.network_predict(net, data)
from cffi import FFI
ffi = FFI()
np_arr = np.zeros([1, net.inputs], dtype='float32')
np_arr[0, 84] = 1
cffi_arr = ffi.cast('float*', np_arr.ctypes.data)
tvm_out = _get_tvm_output(net, np_arr)
darknet_output = get_darknet_network_predict(net, cffi_arr)
darknet_out = np.zeros(net.outputs, dtype='float32')
for i in range(net.outputs):
darknet_out[i] = darknet_output[i]
np.testing.assert_allclose(darknet_out, tvm_out, rtol=1e-4, atol=1e-4)
def test_forward_extraction():
'''test extraction model'''
model_name = 'extraction'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
_download(cfg_url, cfg_name)
_download(weights_url, weights_name)
net = LIB.load_network(cfg_name.encode('utf-8'), weights_name.encode('utf-8'), 0)
test_forward(net)
LIB.free_network(net)
def test_forward_alexnet():
'''test alexnet model'''
model_name = 'alexnet'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
_download(cfg_url, cfg_name)
_download(weights_url, weights_name)
net = LIB.load_network(cfg_name.encode('utf-8'), weights_name.encode('utf-8'), 0)
test_forward(net)
LIB.free_network(net)
def test_forward_resnet50():
'''test resnet50 model'''
model_name = 'resnet50'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
_download(cfg_url, cfg_name)
_download(weights_url, weights_name)
net = LIB.load_network(cfg_name.encode('utf-8'), weights_name.encode('utf-8'), 0)
test_forward(net)
LIB.free_network(net)
def test_forward_yolo():
'''test yolo model'''
model_name = 'yolov2'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
_download(cfg_url, cfg_name)
_download(weights_url, weights_name)
net = LIB.load_network(cfg_name.encode('utf-8'), weights_name.encode('utf-8'), 0)
test_forward(net)
LIB.free_network(net)
def test_forward_convolutional():
'''test convolutional layer'''
net = LIB.make_network(1)
layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_dense():
'''test fully connected layer'''
net = LIB.make_network(1)
layer = LIB.make_connected_layer(1, 75, 20, 1, 0, 0)
net.layers[0] = layer
net.w = net.h = 5
LIB.resize_network(net, 5, 5)
test_forward(net)
LIB.free_network(net)
def test_forward_dense_batchnorm():
'''test fully connected layer with batchnorm'''
net = LIB.make_network(1)
layer = LIB.make_connected_layer(1, 12, 2, 1, 1, 0)
for i in range(5):
layer.rolling_mean[i] = np.random.rand(1)
layer.rolling_variance[i] = np.random.rand(1)
layer.scales[i] = np.random.rand(1)
net.layers[0] = layer
net.w = net.h = 2
LIB.resize_network(net, 2, 2)
test_forward(net)
LIB.free_network(net)
def test_forward_maxpooling():
'''test maxpooling layer'''
net = LIB.make_network(1)
layer = LIB.make_maxpool_layer(1, 224, 224, 3, 2, 2, 0)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_avgpooling():
'''test avgerage pooling layer'''
net = LIB.make_network(1)
layer = LIB.make_avgpool_layer(1, 224, 224, 3)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_batch_norm():
'''test batch normalization layer'''
net = LIB.make_network(1)
layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 1, 0, 0, 0)
for i in range(32):
layer.rolling_mean[i] = np.random.rand(1)
layer.rolling_variance[i] = np.random.rand(1)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_shortcut():
'''test shortcut layer'''
net = LIB.make_network(3)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_convolutional_layer(1, 111, 111, 32, 32, 1, 1, 1, 0, 1, 0, 0, 0, 0)
layer_3 = LIB.make_shortcut_layer(1, 0, 111, 111, 32, 111, 111, 32)
layer_3.activation = 1
net.layers[0] = layer_1
net.layers[1] = layer_2
net.layers[2] = layer_3
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_reorg():
'''test reorg layer'''
net = LIB.make_network(2)
layer_1 = LIB.make_convolutional_layer(1, 222, 222, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_reorg_layer(1, 110, 110, 32, 2, 0, 0, 0)
net.layers[0] = layer_1
net.layers[1] = layer_2
net.w = net.h = 222
LIB.resize_network(net, 222, 222)
test_forward(net)
LIB.free_network(net)
def test_forward_region():
'''test region layer'''
net = LIB.make_network(2)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 8, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_region_layer(1, 111, 111, 2, 2, 1)
layer_2.softmax = 1
net.layers[0] = layer_1
net.layers[1] = layer_2
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_elu():
'''test elu activation layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_1.activation = 8
net.layers[0] = layer_1
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
test_forward(net)
LIB.free_network(net)
def test_forward_softmax():
'''test softmax layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_softmax_layer(1, 75, 1)
layer_1.temperature=1
net.layers[0] = layer_1
net.w = net.h = 5
LIB.resize_network(net, net.w, net.h)
test_forward(net)
LIB.free_network(net)
def test_forward_softmax_temperature():
'''test softmax layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_softmax_layer(1, 75, 1)
layer_1.temperature=0.8
net.layers[0] = layer_1
net.w = net.h = 5
LIB.resize_network(net, net.w, net.h)
test_forward(net)
LIB.free_network(net)
def test_forward_rnn():
'''test softmax layer'''
net = LIB.make_network(1)
batch = 1
inputs = 256
outputs = 256
steps = 1
activation = 1
batch_normalize = 0
adam = 0
layer_1 = LIB.make_rnn_layer(batch, inputs, outputs, steps, activation, batch_normalize, adam)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = outputs
net.w = net.h = 0
LIB.resize_network(net, net.w, net.h)
test_rnn_forward(net)
LIB.free_network(net)
def test_forward_crnn():
'''test softmax layer'''
net = LIB.make_network(1)
batch = 1
c = 3
h = 224
w = 224
hidden_filters = c
output_filters = c
steps = 1
activation = 0
batch_normalize = 0
inputs = 256
outputs = 256
layer_1 = LIB.make_crnn_layer(batch, h, w, c, hidden_filters, output_filters,
steps, activation, batch_normalize)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = output_filters * h * w
net.w = w
net.h = h
LIB.resize_network(net, net.w, net.h)
test_forward(net)
LIB.free_network(net)
def test_forward_activation_logistic():
'''test logistic activation layer'''
net = LIB.make_network(1)
batch = 1
h = 224
w = 224
c = 3
n = 32
groups = 1
size = 3
stride = 2
padding = 0
activation = 0
batch_normalize = 0
binary = 0
xnor = 0
adam = 0
layer_1 = LIB.make_convolutional_layer(batch, h, w, c, n, groups, size, stride, padding,
activation, batch_normalize, binary, xnor, adam)
net.layers[0] = layer_1
net.w = w
net.h = h
LIB.resize_network(net, net.w, net.h)
test_forward(net)
LIB.free_network(net)
if __name__ == '__main__':
test_forward_resnet50()
test_forward_alexnet()
test_forward_extraction()
test_forward_yolo()
test_forward_convolutional()
test_forward_maxpooling()
test_forward_avgpooling()
test_forward_batch_norm()
test_forward_shortcut()
test_forward_dense()
test_forward_dense_batchnorm()
test_forward_softmax()
test_forward_softmax_temperature()
test_forward_rnn()
test_forward_reorg()
test_forward_region()
test_forward_elu()
test_forward_rnn()
test_forward_crnn()
test_forward_activation_logistic() | [
"requests.head",
"cffi.FFI",
"tvm.nd.empty",
"tvm.cpu",
"numpy.empty",
"numpy.testing.assert_allclose",
"numpy.zeros",
"os.path.getsize",
"urllib.request.urlretrieve",
"os.path.isfile",
"requests.get",
"urllib.urlretrieve",
"numpy.random.rand",
"nnvm.frontend.darknet.from_darknet",
"tvm.... | [((1661, 1702), 'nnvm.testing.darknet.__darknetffi__.dlopen', '__darknetffi__.dlopen', (["('./' + DARKNET_LIB)"], {}), "('./' + DARKNET_LIB)\n", (1682, 1702), False, 'from nnvm.testing.darknet import __darknetffi__\n'), ((1805, 1846), 'nnvm.frontend.darknet.from_darknet', 'frontend.darknet.from_darknet', (['net', 'dtype'], {}), '(net, dtype)\n', (1834, 1846), False, 'from nnvm import frontend\n'), ((2033, 2043), 'tvm.cpu', 'tvm.cpu', (['(0)'], {}), '(0)\n', (2040, 2043), False, 'import tvm\n'), ((2052, 2093), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'library', 'ctx'], {}), '(graph, library, ctx)\n', (2072, 2093), False, 'from tvm.contrib import graph_runtime\n'), ((2884, 2922), 'numpy.zeros', 'np.zeros', (['net.outputs'], {'dtype': '"""float32"""'}), "(net.outputs, dtype='float32')\n", (2892, 2922), True, 'import numpy as np\n'), ((3030, 3080), 'numpy.empty', 'np.empty', (['[batch_size, img.c, img.h, img.w]', 'dtype'], {}), '([batch_size, img.c, img.h, img.w], dtype)\n', (3038, 3080), True, 'import numpy as np\n'), ((3303, 3375), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['darknet_out', 'tvm_out'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(darknet_out, tvm_out, rtol=0.001, atol=0.001)\n', (3329, 3375), True, 'import numpy as np\n'), ((3600, 3605), 'cffi.FFI', 'FFI', ([], {}), '()\n', (3603, 3605), False, 'from cffi import FFI\n'), ((3619, 3661), 'numpy.zeros', 'np.zeros', (['[1, net.inputs]'], {'dtype': '"""float32"""'}), "([1, net.inputs], dtype='float32')\n", (3627, 3661), True, 'import numpy as np\n'), ((3863, 3901), 'numpy.zeros', 'np.zeros', (['net.outputs'], {'dtype': '"""float32"""'}), "(net.outputs, dtype='float32')\n", (3871, 3901), True, 'import numpy as np\n'), ((3982, 4056), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['darknet_out', 'tvm_out'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(darknet_out, tvm_out, rtol=0.0001, atol=0.0001)\n', (4008, 4056), True, 'import numpy as np\n'), ((617, 637), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (631, 637), False, 'import os\n'), ((1338, 1375), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'path'], {}), '(url, path)\n', (1364, 1375), False, 'import urllib\n'), ((7081, 7098), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7095, 7098), True, 'import numpy as np\n'), ((7135, 7152), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7149, 7152), True, 'import numpy as np\n'), ((7179, 7196), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7193, 7196), True, 'import numpy as np\n'), ((8151, 8168), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (8165, 8168), True, 'import numpy as np\n'), ((8205, 8222), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (8219, 8222), True, 'import numpy as np\n'), ((705, 726), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (720, 726), False, 'import os\n'), ((750, 768), 'requests.head', 'requests.head', (['url'], {}), '(url)\n', (763, 768), False, 'import requests\n'), ((791, 821), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (803, 821), False, 'import requests\n'), ((1414, 1443), 'urllib.urlretrieve', 'urllib.urlretrieve', (['url', 'path'], {}), '(url, path)\n', (1432, 1443), False, 'import urllib\n'), ((905, 925), 'urllib2.urlopen', 'urllib2.urlopen', (['url'], {}), '(url)\n', (920, 925), False, 'import urllib2\n'), ((2286, 2316), 'tvm.nd.empty', 'tvm.nd.empty', (['out_shape', 'dtype'], {}), '(out_shape, dtype)\n', (2298, 2316), False, 'import tvm\n')] |
# losses.py
import numpy as np
def to_numpy(inputs):
if isinstance(inputs, np.ndarray): inputs = np.array(inputs)
return inputs
class Losses(object):
def __init__(self):
pass
def gradient(self):
return None
def __call__(self):
return None
@staticmethod
def get_loss_fn(name):
if name == 'mse' or name == 'MSE':
return MSE()
elif name == 'crossentropy' or name == 'CrossEntropy':
return CrossEntropy()
else:
raise Exception('{} is not a valid loss function.'.format(name))
class MSE(Losses):
def __init__(self):
super(MSE, self).__init__()
def gradient(self, model, inputs, labels):
# make predictions
preds = model.predict(inputs)
# convert preds and labels to np.array
preds, labels = to_numpy(preds), to_numpy(labels)
# compute gradients
gradients = preds - labels
N = gradients.shape[0]
gradients = {
'weight': np.dot(inputs.T, gradients) * 2 / N,
'bias': np.expand_dims(np.sum(gradients) * 2 / N, axis=0)
}
return np.concatenate(list(gradients.values()), axis=0)
def __call__(self, model, inputs, labels):
# make predictions
preds = model.predict(inputs)
# convert preds and labels to np.array
preds, labels = to_numpy(preds), to_numpy(labels)
return np.average(np.square(preds - labels))
class CrossEntropy(Losses):
def __init__(self):
self.epsilon = 1e-7 # to avoid zero to log
super(CrossEntropy, self).__init__()
def gradient(self, model, inputs, labels):
# make predictions
preds = model.binary_predict(inputs)
# convert preds and labels to np.array
preds, labels = to_numpy(preds), to_numpy(labels)
# compute gradients
gradients = preds - labels
N = labels.shape[0]
gradients = {
'weight': np.dot(inputs.T, gradients) * 2 / N,
'bias': np.expand_dims(np.sum(gradients) * 2 / N, axis=0)
}
return np.concatenate(list(gradients.values()), axis=0)
def __call__(self, model, inputs, labels):
# Args:
# - preds: np.array
# Prediction matrix of shape [batch, n_class]
# - labels: np.array
# One-hot encoding label matrix of shape [batch, n_class]
# make predictions
preds = model.predict(inputs)
# convert preds and labels to np.array
preds, labels = to_numpy(preds), to_numpy(labels)
N = labels.shape[0]
# cross entropy
cross_entropy = np.dot(np.log(self.epsilon + preds).T, labels) + \
np.dot(np.log(1 - preds + self.epsilon).T, 1 - labels)
return -1 * cross_entropy / N
| [
"numpy.sum",
"numpy.log",
"numpy.square",
"numpy.array",
"numpy.dot"
] | [((104, 120), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (112, 120), True, 'import numpy as np\n'), ((1459, 1484), 'numpy.square', 'np.square', (['(preds - labels)'], {}), '(preds - labels)\n', (1468, 1484), True, 'import numpy as np\n'), ((1032, 1059), 'numpy.dot', 'np.dot', (['inputs.T', 'gradients'], {}), '(inputs.T, gradients)\n', (1038, 1059), True, 'import numpy as np\n'), ((1999, 2026), 'numpy.dot', 'np.dot', (['inputs.T', 'gradients'], {}), '(inputs.T, gradients)\n', (2005, 2026), True, 'import numpy as np\n'), ((2693, 2721), 'numpy.log', 'np.log', (['(self.epsilon + preds)'], {}), '(self.epsilon + preds)\n', (2699, 2721), True, 'import numpy as np\n'), ((2768, 2800), 'numpy.log', 'np.log', (['(1 - preds + self.epsilon)'], {}), '(1 - preds + self.epsilon)\n', (2774, 2800), True, 'import numpy as np\n'), ((1104, 1121), 'numpy.sum', 'np.sum', (['gradients'], {}), '(gradients)\n', (1110, 1121), True, 'import numpy as np\n'), ((2071, 2088), 'numpy.sum', 'np.sum', (['gradients'], {}), '(gradients)\n', (2077, 2088), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
__MatricNo__ = "KIE160111"
__Title__ = "Assignment 2"
__GitHub__ = "https://github.com/khvmaths"
import sys
from PyQt5 import QtCore,QtGui,QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QLabel, QPushButton
import cv2
import numpy as np
import math
import pygame
import random
import os
import time
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (-300,-300)
from collections import deque
import argparse
#import gamegui
"""GUI"""
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(637, 317)
self.groupBox = QtWidgets.QGroupBox(Dialog)
self.groupBox.setGeometry(QtCore.QRect(640, 10, 251, 301))
self.groupBox.setObjectName("groupBox")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(20, 20, 101, 16))
self.label.setObjectName("label")
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(20, 200, 141, 16))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(20, 220, 211, 31))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setText("")
self.label_5.setObjectName("label_5")
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setGeometry(QtCore.QRect(20, 40, 221, 151))
self.label_7.setText("")
self.label_7.setObjectName("label_7")
self.pushButton_5 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_5.setGeometry(QtCore.QRect(60, 260, 131, 31))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.pushButton_5.setFont(font)
self.pushButton_5.setAutoDefault(False)
self.pushButton_5.setObjectName("pushButton_5")
self.groupBox_3 = QtWidgets.QGroupBox(Dialog)
self.groupBox_3.setGeometry(QtCore.QRect(10, 210, 141, 71))
self.groupBox_3.setObjectName("groupBox_3")
self.label_6 = QtWidgets.QLabel(self.groupBox_3)
self.label_6.setGeometry(QtCore.QRect(10, 20, 121, 51))
font = QtGui.QFont()
font.setFamily("MS Serif")
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setText("")
self.label_6.setObjectName("label_6")
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(350, 220, 131, 61))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.pushButton_3.setFont(font)
self.pushButton_3.setAutoDefault(True)
self.pushButton_3.setDefault(True)
self.pushButton_3.setFlat(False)
self.pushButton_3.setObjectName("pushButton_3")
self.groupBox_2 = QtWidgets.QGroupBox(Dialog)
self.groupBox_2.setGeometry(QtCore.QRect(10, 10, 621, 181))
self.groupBox_2.setObjectName("groupBox_2")
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(10, 20, 601, 151))
self.label_2.setObjectName("label_2")
self.pushButton_4 = QtWidgets.QPushButton(Dialog)
self.pushButton_4.setGeometry(QtCore.QRect(490, 220, 131, 61))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.pushButton_4.setFont(font)
self.pushButton_4.setAutoDefault(False)
self.pushButton_4.setObjectName("pushButton_4")
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setGeometry(QtCore.QRect(20, 290, 171, 16))
self.label_9.setObjectName("label_9")
self.groupBox_4 = QtWidgets.QGroupBox(Dialog)
self.groupBox_4.setGeometry(QtCore.QRect(160, 210, 141, 71))
self.groupBox_4.setObjectName("groupBox_4")
self.label_10 = QtWidgets.QLabel(self.groupBox_4)
self.label_10.setGeometry(QtCore.QRect(10, 20, 121, 51))
font = QtGui.QFont()
font.setFamily("MS Serif")
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setText("")
self.label_10.setObjectName("label_10")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(340, 290, 291, 20))
self.label_3.setTextFormat(QtCore.Qt.RichText)
self.label_3.setObjectName("label_3")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dino Run"))
self.groupBox.setTitle(_translate("Dialog", "OpenCV Processing"))
self.label.setText(_translate("Dialog", "Original Image"))
self.label_4.setText(_translate("Dialog", "Processed Information"))
self.pushButton_5.setText(_translate("Dialog", "Stop CV"))
self.groupBox_3.setTitle(_translate("Dialog", "Score Board"))
self.pushButton_3.setText(_translate("Dialog", "Normal Start"))
self.groupBox_2.setTitle(_translate("Dialog", "Game Area"))
self.label_2.setText(_translate("Dialog", "TextLabel"))
self.pushButton_4.setText(_translate("Dialog", "Start with CV"))
self.label_9.setText(_translate("Dialog", "TextLabel"))
self.groupBox_4.setTitle(_translate("Dialog", "High Score"))
self.label_3.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:7pt;\">Developed by </span><span style=\" font-size:9pt; font-weight:600;\"><NAME> (KIE160111)</span></p></body></html>"))
"""THE CV IS HIGHLY DEPENDENT ON ENVIRONMENT/NOISE"""
threshold = 60 # BINARY threshold
blurValue = 41 # GaussianBlur parameter
Lower_bound=np.array([110,50,50])
Upper_bound=np.array([130,255,255])
pts=deque(maxlen=64)
"""OPENCV PART == TODO: Change the whole idea of detecting movement"""
class FrameThread(QThread,Ui_Dialog):
imgLab = None
device = None
def __init__(self,deviceIndex,imgLab,action):
QThread.__init__(self)
self.imgLab = imgLab
self.action=action
self.deviceIndex = deviceIndex
self.device = cv2.VideoCapture(self.deviceIndex)
self.device.set(cv2.CAP_PROP_FRAME_WIDTH, 1600)
self.device.set(cv2.CAP_PROP_FRAME_HEIGHT, 1200)
def run(self):
if self.device.isOpened():
last_center=(0,0)
try:
while True:
ret, frame = self.device.read()
height, width, bytesPerComponent = frame.shape
bytesPerLine = bytesPerComponent * width
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, frame)
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
kernel=np.ones((5,5),np.uint8)
mask=cv2.inRange(hsv,Lower_bound,Upper_bound)
mask=cv2.erode(mask,kernel,iterations=2)
mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel)
mask=cv2.dilate(mask,kernel,iterations=1)
res=cv2.bitwise_and(frame,frame,mask=mask)
cnts,heir=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:]
center=None
if len(cnts)>0:
c=max(cnts,key=cv2.contourArea)
((x,y),radius)=cv2.minEnclosingCircle(c)
M=cv2.moments(c)
center=(int(M["m10"]/M["m00"]),int(M["m01"]/M["m00"]))
print(center)
if radius>5:
cv2.circle(frame,(int(x),int(y)),int(radius),(0,255,255),2)
cv2.circle(frame,center,5,(0,0,255),-1)
pts.appendleft(center)
for i in range(1,len(pts)):
if pts[i-1] is None or pts[i] is None:
continue
thick=int(np.sqrt(len(pts)/float(i+1))*2.5)
cv2.line(frame,pts[i-1],pts[i],(0,0,255),thick)
flipped=cv2.flip(frame,1)
image = QImage(flipped, width, height, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
pixmap = pixmap.scaled(221, 191, QtCore.Qt.KeepAspectRatio)
if (center==None):
continue
else:
if(abs(center[0]-last_center[0])>5 and abs(center[1]-last_center[1]<20)):
if(center[0]>last_center[0]):
pass
else:
pass
elif(abs(center[1]-last_center[1]))>10:
if(center[1]>last_center[1]):
act='Down'
else:
act='Up'
else:
act='No action'
last_center=center
self.imgLab.setPixmap(pixmap)
self.action.setText(act)
except:
pass
def destoryed(self,QObject=None):
self.device.release()
"""MAIN GAME TRESHOLD"""
pygame.init()
scr_size = (width,height) = (600,150)
FPS = 60
gravity = 0.6
black = (0,0,0)
white = (255,255,255)
background_col = (235,235,235)
high_score = 0
screen = pygame.display.set_mode(scr_size)
clock = pygame.time.Clock()
pygame.display.set_caption("Dino Run ")
jump_sound = pygame.mixer.Sound('sprites/jump.wav')
die_sound = pygame.mixer.Sound('sprites/die.wav')
checkPoint_sound = pygame.mixer.Sound('sprites/checkPoint.wav')
def load_image(
name,
sizex=-1,
sizey=-1,
colorkey=None,
):
fullname = os.path.join('sprites', name)
image = pygame.image.load(fullname)
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
if sizex != -1 or sizey != -1:
image = pygame.transform.scale(image, (sizex, sizey))
return (image, image.get_rect())
def load_sprite_sheet(
sheetname,
nx,
ny,
scalex = -1,
scaley = -1,
colorkey = None,
):
fullname = os.path.join('sprites',sheetname)
sheet = pygame.image.load(fullname)
sheet = sheet.convert()
sheet_rect = sheet.get_rect()
sprites = []
sizex = sheet_rect.width/nx
sizey = sheet_rect.height/ny
for i in range(0,ny):
for j in range(0,nx):
rect = pygame.Rect((j*sizex,i*sizey,sizex,sizey))
image = pygame.Surface(rect.size)
image = image.convert()
image.blit(sheet,(0,0),rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey,pygame.RLEACCEL)
if scalex != -1 or scaley != -1:
image = pygame.transform.scale(image,(scalex,scaley))
sprites.append(image)
sprite_rect = sprites[0].get_rect()
return sprites,sprite_rect
def disp_gameOver_msg(retbutton_image,gameover_image):
retbutton_rect = retbutton_image.get_rect()
retbutton_rect.centerx = width / 2
retbutton_rect.top = height*0.52
gameover_rect = gameover_image.get_rect()
gameover_rect.centerx = width / 2
gameover_rect.centery = height*0.35
screen.blit(retbutton_image, retbutton_rect)
screen.blit(gameover_image, gameover_rect)
def extractDigits(number):
if number > -1:
digits = []
i = 0
while(number/10 != 0):
digits.append(number%10)
number = int(number/10)
digits.append(number%10)
for i in range(len(digits),5):
digits.append(0)
digits.reverse()
return digits
class Dino():
def __init__(self,sizex=-1,sizey=-1):
self.images,self.rect = load_sprite_sheet('dino.png',5,1,sizex,sizey,-1)
self.images1,self.rect1 = load_sprite_sheet('dino_ducking.png',2,1,59,sizey,-1)
self.rect.bottom = int(0.98*height)
self.rect.left = width/15
self.image = self.images[0]
self.index = 0
self.counter = 0
self.score = 0
self.isJumping = False
self.isDead = False
self.isDucking = False
self.isBlinking = False
self.movement = [0,0]
self.jumpSpeed = 11.5
self.stand_pos_width = self.rect.width
self.duck_pos_width = self.rect1.width
def draw(self):
screen.blit(self.image,self.rect)
def checkbounds(self):
if self.rect.bottom > int(0.98*height):
self.rect.bottom = int(0.98*height)
self.isJumping = False
def update(self):
if self.isJumping:
self.movement[1] = self.movement[1] + gravity
if self.isJumping:
self.index = 0
elif self.isBlinking:
if self.index == 0:
if self.counter % 400 == 399:
self.index = (self.index + 1)%2
else:
if self.counter % 20 == 19:
self.index = (self.index + 1)%2
elif self.isDucking:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2
else:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2 + 2
if self.isDead:
self.index = 4
if not self.isDucking:
self.image = self.images[self.index]
self.rect.width = self.stand_pos_width
else:
self.image = self.images1[(self.index)%2]
self.rect.width = self.duck_pos_width
self.rect = self.rect.move(self.movement)
self.checkbounds()
if not self.isDead and self.counter % 7 == 6 and self.isBlinking == False:
self.score += 1
if self.score % 100 == 0 and self.score != 0:
if pygame.mixer.get_init() != None:
checkPoint_sound.play()
self.counter = (self.counter + 1)
class Cactus(pygame.sprite.Sprite):
def __init__(self,speed=5,sizex=-1,sizey=-1):
pygame.sprite.Sprite.__init__(self,self.containers)
self.images,self.rect = load_sprite_sheet('cacti-small.png',3,1,sizex,sizey,-1)
self.rect.bottom = int(0.98*height)
self.rect.left = width + self.rect.width
self.image = self.images[random.randrange(0,3)]
self.movement = [-1*speed,0]
def draw(self):
screen.blit(self.image,self.rect)
def update(self):
self.rect = self.rect.move(self.movement)
if self.rect.right < 0:
self.kill()
class Ptera(pygame.sprite.Sprite):
def __init__(self,speed=5,sizex=-1,sizey=-1):
pygame.sprite.Sprite.__init__(self,self.containers)
self.images,self.rect = load_sprite_sheet('ptera.png',2,1,sizex,sizey,-1)
self.ptera_height = [height*0.82,height*0.75,height*0.60]
self.rect.centery = self.ptera_height[random.randrange(0,3)]
self.rect.left = width + self.rect.width
self.image = self.images[0]
self.movement = [-1*speed,0]
self.index = 0
self.counter = 0
def draw(self):
screen.blit(self.image,self.rect)
def update(self):
if self.counter % 10 == 0:
self.index = (self.index+1)%2
self.image = self.images[self.index]
self.rect = self.rect.move(self.movement)
self.counter = (self.counter + 1)
if self.rect.right < 0:
self.kill()
class Ground():
def __init__(self,speed=-5):
self.image,self.rect = load_image('ground.png',-1,-1,-1)
self.image1,self.rect1 = load_image('ground.png',-1,-1,-1)
self.rect.bottom = height
self.rect1.bottom = height
self.rect1.left = self.rect.right
self.speed = speed
def draw(self):
screen.blit(self.image,self.rect)
screen.blit(self.image1,self.rect1)
def update(self):
self.rect.left += self.speed
self.rect1.left += self.speed
if self.rect.right < 0:
self.rect.left = self.rect1.right
if self.rect1.right < 0:
self.rect1.left = self.rect.right
class Cloud(pygame.sprite.Sprite):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self,self.containers)
self.image,self.rect = load_image('cloud.png',int(90*30/42),30,-1)
self.speed = 1
self.rect.left = x
self.rect.top = y
self.movement = [-1*self.speed,0]
def draw(self):
screen.blit(self.image,self.rect)
def update(self):
self.rect = self.rect.move(self.movement)
if self.rect.right < 0:
self.kill()
class Scoreboard():
def __init__(self,x=-1,y=-1):
self.score = 0
self.tempimages,self.temprect = load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)
self.image = pygame.Surface((55,int(11*6/5)))
self.rect = self.image.get_rect()
if x == -1:
self.rect.left = width*0.89
else:
self.rect.left = x
if y == -1:
self.rect.top = height*0.1
else:
self.rect.top = y
def draw(self):
screen.blit(self.image,self.rect)
def update(self,score):
score_digits = extractDigits(score)
self.image.fill(background_col)
for s in score_digits:
self.image.blit(self.tempimages[s],self.temprect)
self.temprect.left += self.temprect.width
self.temprect.left = 0
class App(QtWidgets.QMainWindow, Ui_Dialog):
def __init__(self):
#pygame.display.iconify()
self.act=''
super(self.__class__,self).__init__()
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.setupUi(self)
self.frameThread = FrameThread(0,self.label_7,self.label_5)
self.pushButton_4.clicked.connect(self.on_click)
self.pushButton_3.clicked.connect(self.start_game)
self.pushButton_5.clicked.connect(self.stop_cv)
self.pushButton_5.setEnabled(False)
self.timer = QtCore.QTimer(self)
self.timer.setInterval(1000)
self.timer.timeout.connect(self.displayTime)
self.timer.start()
self.label_2.setPixmap(QtGui.QPixmap("sprites/logo.png"))
def on_click(self):
self.resize(905,317)
self.pushButton_3.setEnabled(False)
self.pushButton_4.setEnabled(False)
self.pushButton_5.setEnabled(True)
self.frameThread.start()
isGameQuit = self.introscreen(self.label_2)
if not isGameQuit:
self.gameplay(self.label_2,self.label_6,self.label_10)
pygame.display.flip()
def stop_cv(self):
self.resize(637,317)
self.frameThread.destoryed()
self.label_5.setText('')
self.pushButton_3.setEnabled(True)
self.pushButton_4.setEnabled(True)
self.pushButton_5.setEnabled(False)
def start_game(self):
self.resize(637,317)
self.pushButton_3.setEnabled(False)
self.pushButton_4.setEnabled(False)
self.pushButton_5.setEnabled(False)
isGameQuit = self.introscreen(self.label_2)
if not isGameQuit:
self.gameplay(self.label_2,self.label_6,self.label_10)
pygame.display.flip()
def keyPressEvent(self,event):
key=event.key()
if key==QtCore.Qt.Key_Up or (event.type()==QtCore.QEvent.KeyPress and key==QtCore.Qt.Key_Space):
self.act='space'
if key==QtCore.Qt.Key_Down:
self.act='down'
def displayTime(self):
self.label_9.setText(QtCore.QDateTime.currentDateTime().toString())
def introscreen(self,win):
temp_dino = Dino(44,47)
temp_dino.isBlinking = True
gameStart = False
temp_ground,temp_ground_rect = load_sprite_sheet('ground.png',15,1,-1,-1,-1)
temp_ground_rect.left = width/20
temp_ground_rect.bottom = height
logo,logo_rect = load_image('logoa.png',300,140,-1)
logo_rect.centerx = width*0.6
logo_rect.centery = height*0.6
while not gameStart:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
return True
else:
if self.act=='space':
temp_dino.isJumping=True
temp_dino.isBlinking=False
temp_dino.movement[1]=-1*temp_dino.jumpSpeed
self.act=''
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE or event.key == pygame.K_UP:
temp_dino.isJumping = True
temp_dino.isBlinking = False
temp_dino.movement[1] = -1*temp_dino.jumpSpeed
temp_dino.update()
if pygame.display.get_surface() != None:
screen.fill(background_col)
screen.blit(temp_ground[0],temp_ground_rect)
if temp_dino.isBlinking:
screen.blit(logo,logo_rect)
temp_dino.draw()
pygame.display.update()
data=screen.get_buffer().raw
image=QtGui.QImage(data,width,height,QtGui.QImage.Format_RGB32)
pixmap = QPixmap.fromImage(image)
win.setPixmap(pixmap)
clock.tick(FPS)
if temp_dino.isJumping == False and temp_dino.isBlinking == False:
gameStart = True
def gameplay(self,win,score,hscore):
self.pushButton_3.setEnabled(False)
self.pushButton_4.setEnabled(False)
global high_score
lastduck=0
currentscr=0
gamespeed = 4
startMenu = False
gameOver = False
gameQuit = False
playerDino = Dino(44,47)
new_ground = Ground(-1*gamespeed)
scb = Scoreboard()
highsc = Scoreboard(width*0.78)
counter = 0
cacti = pygame.sprite.Group()
pteras = pygame.sprite.Group()
clouds = pygame.sprite.Group()
last_obstacle = pygame.sprite.Group()
Cactus.containers = cacti
Ptera.containers = pteras
Cloud.containers = clouds
retbutton_image,retbutton_rect = load_image('replay_button.png',35,31,-1)
gameover_image,gameover_rect = load_image('game_over.png',190,11,-1)
temp_images,temp_rect = load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)
HI_image = pygame.Surface((22,int(11*6/5)))
HI_rect = HI_image.get_rect()
HI_image.fill(background_col)
HI_image.blit(temp_images[10],temp_rect)
temp_rect.left += temp_rect.width
HI_image.blit(temp_images[11],temp_rect)
HI_rect.top = height*0.1
HI_rect.left = width*0.73
while not gameQuit:
while startMenu:
pass
while not gameOver:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
gameQuit = True
gameOver = True
else:
if(self.label_5.text()!=''):
if(self.label_5.text()=='Up'):
self.act='space'
if(self.label_5.text()=='Down'):
self.act='down'
#KEYBOARD COMMAND FROM PYQT
currentscr=playerDino.score
if self.act=='space':
if playerDino.rect.bottom==int(0.98*height):
playerDino.isJumping=True
if pygame.mixer.get_init()!=None:
jump_sound.play()
playerDino.movement[1]=-1*playerDino.jumpSpeed
self.act=''
if self.act=='down':
if not (playerDino.isJumping and playerDino.isDead):
playerDino.isDucking=True
self.act=''
lastduck=playerDino.score
if not currentscr==0 and not lastduck ==0 and currentscr-lastduck>=0 and currentscr-lastduck<=1:
playerDino.isDucking=True
else:
playerDino.isDucking=False
#KEYBOARD COMMAND FROM PYGAME
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit = True
gameOver = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if playerDino.rect.bottom == int(0.98*height):
playerDino.isJumping = True
if pygame.mixer.get_init() != None:
jump_sound.play()
playerDino.movement[1] = -1*playerDino.jumpSpeed
if event.key == pygame.K_DOWN:
if not (playerDino.isJumping and playerDino.isDead):
playerDino.isDucking = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
playerDino.isDucking = False
for c in cacti:
c.movement[0] = -1*gamespeed
if pygame.sprite.collide_mask(playerDino,c):
playerDino.isDead = True
if pygame.mixer.get_init() != None:
die_sound.play()
for p in pteras:
p.movement[0] = -1*gamespeed
if pygame.sprite.collide_mask(playerDino,p):
playerDino.isDead = True
if pygame.mixer.get_init() != None:
die_sound.play()
if len(cacti) < 2:
if len(cacti) == 0:
last_obstacle.empty()
last_obstacle.add(Cactus(gamespeed,40,40))
else:
for l in last_obstacle:
if l.rect.right < width*0.7 and random.randrange(0,50) == 10:
last_obstacle.empty()
last_obstacle.add(Cactus(gamespeed, 40, 40))
if len(pteras) == 0 and random.randrange(0,200) == 10 and counter > 500:
for l in last_obstacle:
if l.rect.right < width*0.8:
last_obstacle.empty()
last_obstacle.add(Ptera(gamespeed, 46, 40))
if len(clouds) < 5 and random.randrange(0,300) == 10:
Cloud(width,random.randrange(height/5,height/2))
playerDino.update()
cacti.update()
pteras.update()
clouds.update()
new_ground.update()
scb.update(playerDino.score)
score.setText(str(playerDino.score))
highsc.update(high_score)
if pygame.display.get_surface() != None:
screen.fill(background_col)
new_ground.draw()
clouds.draw(screen)
scb.draw()
if high_score != 0:
highsc.draw()
screen.blit(HI_image,HI_rect)
cacti.draw(screen)
pteras.draw(screen)
playerDino.draw()
pygame.display.update()
#Add to the Qt
data=screen.get_buffer().raw
image=QtGui.QImage(data,width,height,QtGui.QImage.Format_RGB32)
pixmap = QPixmap.fromImage(image)
win.setPixmap(pixmap)
clock.tick(FPS)
if playerDino.isDead:
gameOver = True
if playerDino.score > high_score:
high_score = playerDino.score
hscore.setText(str(high_score))
if counter%700 == 699:
new_ground.speed -= 1
gamespeed += 1
counter = (counter + 1)
if gameQuit:
break
while gameOver:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
gameQuit = True
gameOver = False
else:
self.pushButton_3.setEnabled(True)
self.pushButton_4.setEnabled(True)
if self.act=='space':
gameOver=False
self.gameplay(win,score,hscore)
act=''
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit = True
gameOver = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameQuit = True
gameOver = False
if event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:
gameOver = False
self.gameplay(win,score,hscore)
highsc.update(high_score)
if pygame.display.get_surface() != None:
disp_gameOver_msg(retbutton_image,gameover_image)
if high_score != 0:
highsc.draw()
screen.blit(HI_image,HI_rect)
pygame.display.update()
data=screen.get_buffer().raw
image=QtGui.QImage(data,width,height,QtGui.QImage.Format_RGB32)
pixmap = QPixmap.fromImage(image)
win.setPixmap(pixmap)
clock.tick(FPS)
def main():
app=QtWidgets.QApplication(sys.argv)
form=App()
form.show()
app.exec_()
if __name__ == '__main__':
main()
"""GAME REFERENCE by <NAME>""" | [
"cv2.bitwise_and",
"pygame.event.get",
"PyQt5.QtWidgets.QPushButton",
"pygame.Rect",
"numpy.ones",
"pygame.display.update",
"PyQt5.QtWidgets.QApplication",
"cv2.erode",
"os.path.join",
"cv2.inRange",
"collections.deque",
"pygame.mixer.get_init",
"cv2.line",
"PyQt5.QtWidgets.QLabel",
"PyQ... | [((6332, 6355), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (6340, 6355), True, 'import numpy as np\n'), ((6367, 6392), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (6375, 6392), True, 'import numpy as np\n'), ((6396, 6412), 'collections.deque', 'deque', ([], {'maxlen': '(64)'}), '(maxlen=64)\n', (6401, 6412), False, 'from collections import deque\n'), ((10090, 10103), 'pygame.init', 'pygame.init', ([], {}), '()\n', (10101, 10103), False, 'import pygame\n'), ((10274, 10307), 'pygame.display.set_mode', 'pygame.display.set_mode', (['scr_size'], {}), '(scr_size)\n', (10297, 10307), False, 'import pygame\n'), ((10317, 10336), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (10334, 10336), False, 'import pygame\n'), ((10338, 10377), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Dino Run """'], {}), "('Dino Run ')\n", (10364, 10377), False, 'import pygame\n'), ((10394, 10432), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""sprites/jump.wav"""'], {}), "('sprites/jump.wav')\n", (10412, 10432), False, 'import pygame\n'), ((10446, 10483), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""sprites/die.wav"""'], {}), "('sprites/die.wav')\n", (10464, 10483), False, 'import pygame\n'), ((10504, 10548), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""sprites/checkPoint.wav"""'], {}), "('sprites/checkPoint.wav')\n", (10522, 10548), False, 'import pygame\n'), ((10655, 10684), 'os.path.join', 'os.path.join', (['"""sprites"""', 'name'], {}), "('sprites', name)\n", (10667, 10684), False, 'import os\n'), ((10698, 10725), 'pygame.image.load', 'pygame.image.load', (['fullname'], {}), '(fullname)\n', (10715, 10725), False, 'import pygame\n'), ((11224, 11258), 'os.path.join', 'os.path.join', (['"""sprites"""', 'sheetname'], {}), "('sprites', sheetname)\n", (11236, 11258), False, 'import os\n'), ((11271, 11298), 'pygame.image.load', 'pygame.image.load', (['fullname'], {}), '(fullname)\n', (11288, 11298), False, 'import pygame\n'), ((32241, 32273), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (32263, 32273), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((745, 772), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Dialog'], {}), '(Dialog)\n', (764, 772), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((912, 943), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (928, 943), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1074, 1105), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (1090, 1105), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1243, 1274), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (1259, 1274), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1357, 1370), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1368, 1370), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1599, 1630), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (1615, 1630), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1807, 1843), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (1828, 1843), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1931, 1944), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1942, 1944), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2206, 2233), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Dialog'], {}), '(Dialog)\n', (2225, 2233), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2380, 2413), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_3'], {}), '(self.groupBox_3)\n', (2396, 2413), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2495, 2508), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2506, 2508), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2778, 2807), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (2799, 2807), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2896, 2909), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2907, 2909), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3256, 3283), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Dialog'], {}), '(Dialog)\n', (3275, 3283), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3430, 3463), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (3446, 3463), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3606, 3635), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (3627, 3635), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3724, 3737), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3735, 3737), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3996, 4020), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Dialog'], {}), '(Dialog)\n', (4012, 4020), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4161, 4188), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Dialog'], {}), '(Dialog)\n', (4180, 4188), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4337, 4370), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_4'], {}), '(self.groupBox_4)\n', (4353, 4370), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4453, 4466), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4464, 4466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4735, 4759), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Dialog'], {}), '(Dialog)\n', (4751, 4759), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4977, 5022), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (5014, 5022), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6626, 6648), 'PyQt5.QtCore.QThread.__init__', 'QThread.__init__', (['self'], {}), '(self)\n', (6642, 6648), False, 'from PyQt5.QtCore import QThread\n'), ((6772, 6806), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.deviceIndex'], {}), '(self.deviceIndex)\n', (6788, 6806), False, 'import cv2\n'), ((10968, 11013), 'pygame.transform.scale', 'pygame.transform.scale', (['image', '(sizex, sizey)'], {}), '(image, (sizex, sizey))\n', (10990, 11013), False, 'import pygame\n'), ((15313, 15365), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (15342, 15365), False, 'import pygame\n'), ((15944, 15996), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (15973, 15996), False, 'import pygame\n'), ((17542, 17594), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (17571, 17594), False, 'import pygame\n'), ((19438, 19457), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', (['self'], {}), '(self)\n', (19451, 19457), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((23675, 23696), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (23694, 23696), False, 'import pygame\n'), ((23715, 23736), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (23734, 23736), False, 'import pygame\n'), ((23755, 23776), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (23774, 23776), False, 'import pygame\n'), ((23802, 23823), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (23821, 23823), False, 'import pygame\n'), ((808, 839), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(640)', '(10)', '(251)', '(301)'], {}), '(640, 10, 251, 301)\n', (820, 839), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((976, 1005), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(20)', '(101)', '(16)'], {}), '(20, 20, 101, 16)\n', (988, 1005), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1140, 1170), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(200)', '(141)', '(16)'], {}), '(20, 200, 141, 16)\n', (1152, 1170), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1309, 1339), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(220)', '(211)', '(31)'], {}), '(20, 220, 211, 31)\n', (1321, 1339), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1665, 1695), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(40)', '(221)', '(151)'], {}), '(20, 40, 221, 151)\n', (1677, 1695), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1883, 1913), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(260)', '(131)', '(31)'], {}), '(60, 260, 131, 31)\n', (1895, 1913), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2271, 2301), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(210)', '(141)', '(71)'], {}), '(10, 210, 141, 71)\n', (2283, 2301), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2448, 2477), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(121)', '(51)'], {}), '(10, 20, 121, 51)\n', (2460, 2477), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2847, 2878), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(350)', '(220)', '(131)', '(61)'], {}), '(350, 220, 131, 61)\n', (2859, 2878), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3321, 3351), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(621)', '(181)'], {}), '(10, 10, 621, 181)\n', (3333, 3351), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3498, 3528), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(601)', '(151)'], {}), '(10, 20, 601, 151)\n', (3510, 3528), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3675, 3706), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(220)', '(131)', '(61)'], {}), '(490, 220, 131, 61)\n', (3687, 3706), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4055, 4085), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(290)', '(171)', '(16)'], {}), '(20, 290, 171, 16)\n', (4067, 4085), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4226, 4257), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(210)', '(141)', '(71)'], {}), '(160, 210, 141, 71)\n', (4238, 4257), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4406, 4435), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(121)', '(51)'], {}), '(10, 20, 121, 51)\n', (4418, 4435), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4794, 4825), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(340)', '(290)', '(291)', '(20)'], {}), '(340, 290, 291, 20)\n', (4806, 4825), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11534, 11583), 'pygame.Rect', 'pygame.Rect', (['(j * sizex, i * sizey, sizex, sizey)'], {}), '((j * sizex, i * sizey, sizex, sizey))\n', (11545, 11583), False, 'import pygame\n'), ((11598, 11623), 'pygame.Surface', 'pygame.Surface', (['rect.size'], {}), '(rect.size)\n', (11612, 11623), False, 'import pygame\n'), ((15583, 15605), 'random.randrange', 'random.randrange', (['(0)', '(3)'], {}), '(0, 3)\n', (15599, 15605), False, 'import random\n'), ((16193, 16215), 'random.randrange', 'random.randrange', (['(0)', '(3)'], {}), '(0, 3)\n', (16209, 16215), False, 'import random\n'), ((19620, 19653), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""sprites/logo.png"""'], {}), "('sprites/logo.png')\n", (19633, 19653), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((20050, 20071), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (20069, 20071), False, 'import pygame\n'), ((20693, 20714), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (20712, 20714), False, 'import pygame\n'), ((11966, 12013), 'pygame.transform.scale', 'pygame.transform.scale', (['image', '(scalex, scaley)'], {}), '(image, (scalex, scaley))\n', (11988, 12013), False, 'import pygame\n'), ((21592, 21620), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (21618, 21620), False, 'import pygame\n'), ((22014, 22032), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (22030, 22032), False, 'import pygame\n'), ((22503, 22531), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (22529, 22531), False, 'import pygame\n'), ((22792, 22815), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (22813, 22815), False, 'import pygame\n'), ((22887, 22947), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['data', 'width', 'height', 'QtGui.QImage.Format_RGB32'], {}), '(data, width, height, QtGui.QImage.Format_RGB32)\n', (22899, 22947), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((22971, 22995), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (22988, 22995), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((7286, 7331), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB', 'frame'], {}), '(frame, cv2.COLOR_BGR2RGB, frame)\n', (7298, 7331), False, 'import cv2\n'), ((7385, 7423), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (7397, 7423), False, 'import cv2\n'), ((7451, 7476), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (7458, 7476), True, 'import numpy as np\n'), ((7501, 7543), 'cv2.inRange', 'cv2.inRange', (['hsv', 'Lower_bound', 'Upper_bound'], {}), '(hsv, Lower_bound, Upper_bound)\n', (7512, 7543), False, 'import cv2\n'), ((7568, 7605), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (7577, 7605), False, 'import cv2\n'), ((7630, 7676), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (7646, 7676), False, 'import cv2\n'), ((7701, 7739), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (7711, 7739), False, 'import cv2\n'), ((7763, 7803), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (7778, 7803), False, 'import cv2\n'), ((8854, 8872), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (8862, 8872), False, 'import cv2\n'), ((8901, 8967), 'PyQt5.QtGui.QImage', 'QImage', (['flipped', 'width', 'height', 'bytesPerLine', 'QImage.Format_RGB888'], {}), '(flipped, width, height, bytesPerLine, QImage.Format_RGB888)\n', (8907, 8967), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((8998, 9022), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (9015, 9022), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((15091, 15114), 'pygame.mixer.get_init', 'pygame.mixer.get_init', ([], {}), '()\n', (15112, 15114), False, 'import pygame\n'), ((21050, 21084), 'PyQt5.QtCore.QDateTime.currentDateTime', 'QtCore.QDateTime.currentDateTime', ([], {}), '()\n', (21082, 21084), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((24664, 24692), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (24690, 24692), False, 'import pygame\n'), ((26217, 26235), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (26233, 26235), False, 'import pygame\n'), ((27366, 27407), 'pygame.sprite.collide_mask', 'pygame.sprite.collide_mask', (['playerDino', 'c'], {}), '(playerDino, c)\n', (27392, 27407), False, 'import pygame\n'), ((27675, 27716), 'pygame.sprite.collide_mask', 'pygame.sprite.collide_mask', (['playerDino', 'p'], {}), '(playerDino, p)\n', (27701, 27716), False, 'import pygame\n'), ((29165, 29193), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (29191, 29193), False, 'import pygame\n'), ((29642, 29665), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (29663, 29665), False, 'import pygame\n'), ((29781, 29841), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['data', 'width', 'height', 'QtGui.QImage.Format_RGB32'], {}), '(data, width, height, QtGui.QImage.Format_RGB32)\n', (29793, 29841), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29869, 29893), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (29886, 29893), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((30483, 30511), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (30509, 30511), False, 'import pygame\n'), ((30999, 31017), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (31015, 31017), False, 'import pygame\n'), ((31658, 31686), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (31684, 31686), False, 'import pygame\n'), ((31923, 31946), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (31944, 31946), False, 'import pygame\n'), ((32024, 32084), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['data', 'width', 'height', 'QtGui.QImage.Format_RGB32'], {}), '(data, width, height, QtGui.QImage.Format_RGB32)\n', (32036, 32084), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((32112, 32136), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (32129, 32136), False, 'from PyQt5.QtGui import QImage, QPixmap\n'), ((8079, 8104), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (8101, 8104), False, 'import cv2\n'), ((8132, 8146), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (8143, 8146), False, 'import cv2\n'), ((8755, 8810), 'cv2.line', 'cv2.line', (['frame', 'pts[i - 1]', 'pts[i]', '(0, 0, 255)', 'thick'], {}), '(frame, pts[i - 1], pts[i], (0, 0, 255), thick)\n', (8763, 8810), False, 'import cv2\n'), ((28411, 28435), 'random.randrange', 'random.randrange', (['(0)', '(200)'], {}), '(0, 200)\n', (28427, 28435), False, 'import random\n'), ((28725, 28749), 'random.randrange', 'random.randrange', (['(0)', '(300)'], {}), '(0, 300)\n', (28741, 28749), False, 'import random\n'), ((28789, 28829), 'random.randrange', 'random.randrange', (['(height / 5)', '(height / 2)'], {}), '(height / 5, height / 2)\n', (28805, 28829), False, 'import random\n'), ((8424, 8469), 'cv2.circle', 'cv2.circle', (['frame', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, center, 5, (0, 0, 255), -1)\n', (8434, 8469), False, 'import cv2\n'), ((27486, 27509), 'pygame.mixer.get_init', 'pygame.mixer.get_init', ([], {}), '()\n', (27507, 27509), False, 'import pygame\n'), ((27795, 27818), 'pygame.mixer.get_init', 'pygame.mixer.get_init', ([], {}), '()\n', (27816, 27818), False, 'import pygame\n'), ((25412, 25435), 'pygame.mixer.get_init', 'pygame.mixer.get_init', ([], {}), '()\n', (25433, 25435), False, 'import pygame\n'), ((28205, 28228), 'random.randrange', 'random.randrange', (['(0)', '(50)'], {}), '(0, 50)\n', (28221, 28228), False, 'import random\n'), ((26688, 26711), 'pygame.mixer.get_init', 'pygame.mixer.get_init', ([], {}), '()\n', (26709, 26711), False, 'import pygame\n')] |
"""
Copyright (C) 2021 Adobe. All rights reserved.
"""
import cv2
import os
import numpy as np
import pickle
import torch
import pydiffvg
import torch.nn.functional as F
def txt2list(the_txt_fn):
with open(the_txt_fn, 'r') as txt_obj:
lines = txt_obj.readlines()
lines = [haha.strip() for haha in lines]
return lines
def list2txt(ofn, str_list):
with open(ofn, 'a') as txt_obj:
for small_str in str_list:
txt_obj.write(small_str + '\n')
def show_img(cv2_array, ofn=None, title='image'):
if ofn is None:
cv2.imshow(title, cv2_array)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
cv2.imwrite(ofn, cv2_array)
def create_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
return folder_name
def tensor2img(input_tensor):
img = input_tensor.detach().cpu().numpy()
img = np.clip(img, 0.0, 1.0) * 255.0
img = np.uint8(img)
return img
def tensor2npy(input_tensor):
img = input_tensor.detach().cpu().numpy()
return img
def mask_stroke_texture(SG_tensor, ST_tensor, iteration):
mask = 1.0 - SG_tensor.squeeze().numpy()
mask[mask > 0] = 1.0
textured_drawing = ST_tensor[0, :, :, :].permute(1, 2, 0).cpu().numpy()
assert iteration >= -1
if iteration == -1: # No masking
pass
elif iteration == 0:
mask = mask[:, :, np.newaxis]
textured_drawing = mask * textured_drawing + (1.0 - mask) * 1.0
else:
kernel = np.ones((5, 5), np.uint8)
mask = cv2.dilate(np.uint8(255.0 * mask), kernel, iterations=iteration) / 255.0
mask = mask[:, :, np.newaxis]
textured_drawing = mask * textured_drawing + (1.0 - mask) * 1.0
if textured_drawing.shape[2] == 1:
textured_drawing = textured_drawing[:, :, 0]
return np.uint8(np.clip(textured_drawing, 0.0, 1.0) * 255.0)
def calculate_local_frame_polyline(vertices_np): # nv, 2
nv = vertices_np.shape[0]
assert nv >= 2
ans = np.zeros((nv, 2), dtype=np.float64)
ans[0, :] = vertices_np[1, :] - vertices_np[0, :]
ans[-1, :] = vertices_np[-1, :] - vertices_np[-2, :]
if nv > 2:
ans[1:-1, :] = vertices_np[2:, :] - vertices_np[:-2, :]
l_array = np.sqrt(np.sum(ans ** 2, axis=1)) # tangent vector length
assert np.sum(l_array == 0.0) == 0
l_array = l_array[:, np.newaxis]
ans = ans / l_array # n, 2
ans_normal = np.zeros((nv, 2), dtype=np.float64)
ans_normal[:, 0] = ans[:, 1]
ans_normal[:, 1] = -ans[:, 0]
return ans, ans_normal
def ss2vg(list_fn, hw=768): # read planar map
ss = os.path.split(list_fn)[1].split('.')[0]
with open(list_fn, 'rb') as f:
input_list = pickle.load(f) # a list of strokes
num_strokes = len(input_list)
assert num_strokes >= 1
max_v = -1
for a_idx, a_stroke in enumerate(input_list):
assert len(a_stroke) >= 2
if len(a_stroke) > max_v:
max_v = len(a_stroke)
ans_list = [] # a list of (variant, 2) tensor
num_vertices_list = [] # a list of int the mask number of vertices
features_np = np.zeros((num_strokes, 8, max_v), dtype=np.float32) # zero padding padded
mask_np = np.zeros((num_strokes, max_v), dtype=np.float32) # for loss computation
norm_list = []
tangent_list = []
min_depth = 2e10 + 1.0
max_depth = -2e10 + 1.0
for a_idx, a_stroke in enumerate(input_list):
temp = np.array(a_stroke).astype(np.float32)
all_tensor = torch.from_numpy(temp)
ans_list.append(all_tensor[:, 0:2].contiguous())
NS_tangent, NS_normal = calculate_local_frame_polyline(temp[:, 0:2]) # n, 2; n, 2
norm_list.append(torch.from_numpy(NS_normal.astype(np.float32)))
tangent_list.append(torch.from_numpy(NS_tangent.astype(np.float32)))
this_num_vertices = temp.shape[0]
num_vertices_list.append(this_num_vertices)
mask_np[a_idx, 0:this_num_vertices] = 1.0
vertices_feature = np.arange(temp.shape[0]) / float(temp.shape[0])
features_np[a_idx, 0, 0:this_num_vertices] = vertices_feature # curve parameter 0
features_np[a_idx, 1:3, 0:this_num_vertices] = temp[:, 0:2].T / hw # x, y 1, 2
features_np[a_idx, 3:7, 0:this_num_vertices] = temp[:, 2:6].T # normal 3, 4, tangent 5, 6
if np.amax(temp[:, -1]) > max_depth:
max_depth = np.amax(temp[:, -1])
if np.amin(temp[:, -1]) < min_depth:
min_depth = np.amin(temp[:, -1])
rasterized_pm = np.zeros((7, hw, hw), dtype=np.float32) # zero background
indices_list = [] # store num_path tensors
for a_idx, a_stroke in enumerate(input_list):
temp = np.array(a_stroke).astype(np.float32)[:, -1] # depth np
this_num_vertices = temp.shape[0]
if max_depth == min_depth: # no depth variation
temp = temp * 0.0 + 1.0 # all 1.0
else: # normalize depth
temp = (temp - min_depth) / (max_depth - min_depth)
features_np[a_idx, -1, 0:this_num_vertices] = temp # normalized depth, 7
pos_int = np.flip(np.around(np.array(a_stroke)[:, 0:2].astype(np.float32)).astype(np.int64), axis=1)
indices_tensor = torch.from_numpy(pos_int.copy()) # nv, 2
indices_list.append(indices_tensor)
rasterized_pm[0, pos_int[:, 0], pos_int[:, 1]] = 1.0 # mask channel
rasterized_pm[1:6, pos_int[:, 0], pos_int[:, 1]] = features_np[a_idx, 0:5, 0:num_vertices_list[a_idx]]
rasterized_pm[6, pos_int[:, 0], pos_int[:, 1]] = temp
pm_tensor = torch.from_numpy(rasterized_pm) # 7, hw, hw :: dots_mask, arc, xy, normal, depth (white mask)
features_tensor = torch.from_numpy(features_np) # num_strokes, 8, max_v ::arc, xy, normal, tangent, depth
mask_tensor = torch.from_numpy(mask_np) # num_strokes, max_v
ans_padded = torch.zeros(num_strokes, max_v, 2, dtype=torch.float32)
for path_id, a_tensor in enumerate(ans_list):
ans_padded[path_id, :a_tensor.shape[0], :] = a_tensor
ans_dict = {'ans_list': ans_list, 'norm_list': norm_list, 'tangent_list': tangent_list, 'num_vertices_list': num_vertices_list,
'features_tensor': features_tensor, 'ss': ss, 'pm_tensor': pm_tensor, 'indices_list': indices_list,
'mask_tensor': mask_tensor, 'mask': pm_tensor[0:1, :, :], 'depth': pm_tensor[6:7, :, :],
'normal': pm_tensor[4:6, :, :], 'fake': torch.zeros(1, pm_tensor.shape[1], pm_tensor.shape[2], dtype=torch.float32),
'ans_padded': ans_padded}
# --- get curve features
ans_dict['normOD'] = norm_list
ans_dict['tangentOD'] = tangent_list
arcnorm_list = []
for _, a_tensor in enumerate(ans_list):
this_nv = a_tensor.shape[0]
arcnorm_feature = np.arange(this_nv) / float(this_nv - 1) # 0 -> 1
arcnorm_feature = arcnorm_feature.astype(np.float32)
arcnorm_list.append(torch.from_numpy(arcnorm_feature[:, np.newaxis])) # n, 1
ans_dict['arcnormOD'] = arcnorm_list
return ans_dict
def collect_con_feat(vg_dict, feat_str):
ans_list = []
feat_list = feat_str.split('_')
if len(feat_list) == 1:
return vg_dict[feat_list[0]]
else:
for feat_s_str in feat_list:
ans_list.append(vg_dict[feat_s_str])
return torch.cat(ans_list, 0)
def get_svg_shapes(vg_dict, thickness_list=None, hw=768, dpxy_list=None):
shapes = []
shape_groups = []
new_id = 0
for path_id, points_tensor in enumerate(vg_dict['ans_list']): # creat svg
ncp_tensor = torch.zeros(points_tensor.shape[0] - 1, dtype=torch.int32).cuda()
if dpxy_list is not None:
dp_v = dpxy_list[path_id].permute(1, 0) # nv, 2 # absolute dp
new_pos_tensor = points_tensor + dp_v
else:
new_pos_tensor = points_tensor
if thickness_list is None:
new_t_tensor = torch.ones(points_tensor.shape[0], dtype=torch.float32).to(points_tensor.device)
else:
new_t_tensor = thickness_list[new_id]
path = pydiffvg.Path(num_control_points=ncp_tensor,
points=new_pos_tensor,
is_closed=False,
stroke_width=new_t_tensor,
id='path_%d' % new_id)
shapes.append(path)
path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([new_id]).cuda(),
fill_color=None,
stroke_color=torch.tensor([0, 0, 0, 1]).cuda(),
use_even_odd_rule=False)
shape_groups.append(path_group)
new_id += 1
scene_args = pydiffvg.RenderFunction.serialize_scene(hw, hw, shapes, shape_groups)
return scene_args
def get_pm_attributes_from_1D(pm_dict, feat_1d, min_thickness=0.5): # no activation 1/2, 64, 64 dp then thickness
th_list = []
dp_list = []
for path_id, indices_tensor in enumerate(pm_dict['indices_list']):
this_nv = indices_tensor.shape[0]
propagated_code = feat_1d[path_id, :, 0:this_nv] # 3, this-nv
th_list.append(F.leaky_relu(propagated_code[-1, :]) + min_thickness) # definition of real activation function
dp_list.append(propagated_code[0:2, :]) # absolute dp; no activation; 2, nv
return dp_list, th_list
def remove_alpha(input_tensor, output_color=False):
if not output_color:
img_tensor = input_tensor[:, :, 3] * input_tensor[:, :, 0] + torch.ones(input_tensor.shape[0], input_tensor.shape[1], dtype=torch.float32).cuda() * (1 - input_tensor[:, :, 3])
else:
img_tensor = input_tensor[:, :, 3:4] * input_tensor[:, :, 0:3] + torch.ones(input_tensor.shape[0], input_tensor.shape[1], 3, dtype=torch.float32).cuda() * (1 - input_tensor[:, :, 3:4])
return img_tensor
def resample_curves(ifn, ofn):
with open(ifn, 'rb') as f:
temp_list = pickle.load(f) # a list of curves
input_list = []
for a_curve in temp_list:
new_curve = []
for a_vertex in a_curve:
new_curve.append(a_vertex[0:2])
input_list.append(new_curve)
output_list = []
for a_curve in input_list:
temp_instance = PMPolyLine(a_curve) # a curve instance
temp_curve = temp_instance.resample_curve()
output_list.append(temp_curve)
ans = []
for cid, curve_instance in enumerate(output_list):
ans_stroke = []
for id_vertex, a_vertex in enumerate(curve_instance):
ans_stroke.append([float(a_vertex[0]), float(a_vertex[1])] + [0.0, 0.0, 0.0, 0.0, 0.0])
ans.append(ans_stroke)
with open(ofn, 'wb') as f:
pickle.dump(ans, f, protocol=0)
class PMPolyLine(object):
def __init__(self, a_stroke):
self.vertices_list = []
for a_id, a_vertice in enumerate(a_stroke):
if len(self.vertices_list) > 0 and round(self.vertices_list[-1][0]) == round(a_vertice[0]) and round(self.vertices_list[-1][1]) == round(a_vertice[1]): # duplicate
continue
self.vertices_list.append(a_vertice[:2])
assert len(self.vertices_list) >= 2
self.vertices_np = np.array(self.vertices_list)
self.arc_length = calculate_arc_length(self.vertices_np)
self.arc_t = calculate_arc_t(self.vertices_np)
def resample_curve(self, threshold=3.0): # arc length threshold
ans_list = [[float(self.vertices_np[0, 0]), float(self.vertices_np[0, 1])]]
current_length = 0.0
current_length += threshold
while True:
if current_length > self.arc_length:
break
interpolated_x = np.interp(current_length, self.arc_t, self.vertices_np[:, 0])
interpolated_y = np.interp(current_length, self.arc_t, self.vertices_np[:, 1])
ans_list.append([float(interpolated_x), float(interpolated_y)])
current_length += threshold
return ans_list
def calculate_arc_t(vertices_np):
nv = vertices_np.shape[0]
assert nv >= 2
ans = np.zeros((nv, ), dtype=np.float64)
ans[1:] = np.cumsum(np.sqrt(np.sum((vertices_np[:-1] - vertices_np[1:]) ** 2, axis=1)))
return ans
def calculate_arc_length(vertices_np):
nv = vertices_np.shape[0]
assert nv >= 2
ans = np.sum(np.sqrt(np.sum((vertices_np[:-1] - vertices_np[1:]) ** 2, axis=1)))
return ans
| [
"pickle.dump",
"numpy.sum",
"numpy.amin",
"torch.cat",
"numpy.clip",
"numpy.ones",
"pickle.load",
"numpy.arange",
"torch.nn.functional.leaky_relu",
"numpy.interp",
"cv2.imshow",
"torch.ones",
"cv2.imwrite",
"os.path.exists",
"pydiffvg.Path",
"torch.zeros",
"cv2.destroyAllWindows",
... | [((959, 972), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (967, 972), True, 'import numpy as np\n'), ((2029, 2064), 'numpy.zeros', 'np.zeros', (['(nv, 2)'], {'dtype': 'np.float64'}), '((nv, 2), dtype=np.float64)\n', (2037, 2064), True, 'import numpy as np\n'), ((2454, 2489), 'numpy.zeros', 'np.zeros', (['(nv, 2)'], {'dtype': 'np.float64'}), '((nv, 2), dtype=np.float64)\n', (2462, 2489), True, 'import numpy as np\n'), ((3150, 3201), 'numpy.zeros', 'np.zeros', (['(num_strokes, 8, max_v)'], {'dtype': 'np.float32'}), '((num_strokes, 8, max_v), dtype=np.float32)\n', (3158, 3201), True, 'import numpy as np\n'), ((3240, 3288), 'numpy.zeros', 'np.zeros', (['(num_strokes, max_v)'], {'dtype': 'np.float32'}), '((num_strokes, max_v), dtype=np.float32)\n', (3248, 3288), True, 'import numpy as np\n'), ((4558, 4597), 'numpy.zeros', 'np.zeros', (['(7, hw, hw)'], {'dtype': 'np.float32'}), '((7, hw, hw), dtype=np.float32)\n', (4566, 4597), True, 'import numpy as np\n'), ((5603, 5634), 'torch.from_numpy', 'torch.from_numpy', (['rasterized_pm'], {}), '(rasterized_pm)\n', (5619, 5634), False, 'import torch\n'), ((5723, 5752), 'torch.from_numpy', 'torch.from_numpy', (['features_np'], {}), '(features_np)\n', (5739, 5752), False, 'import torch\n'), ((5832, 5857), 'torch.from_numpy', 'torch.from_numpy', (['mask_np'], {}), '(mask_np)\n', (5848, 5857), False, 'import torch\n'), ((5898, 5953), 'torch.zeros', 'torch.zeros', (['num_strokes', 'max_v', '(2)'], {'dtype': 'torch.float32'}), '(num_strokes, max_v, 2, dtype=torch.float32)\n', (5909, 5953), False, 'import torch\n'), ((8777, 8846), 'pydiffvg.RenderFunction.serialize_scene', 'pydiffvg.RenderFunction.serialize_scene', (['hw', 'hw', 'shapes', 'shape_groups'], {}), '(hw, hw, shapes, shape_groups)\n', (8816, 8846), False, 'import pydiffvg\n'), ((12158, 12191), 'numpy.zeros', 'np.zeros', (['(nv,)'], {'dtype': 'np.float64'}), '((nv,), dtype=np.float64)\n', (12166, 12191), True, 'import numpy as np\n'), ((570, 598), 'cv2.imshow', 'cv2.imshow', (['title', 'cv2_array'], {}), '(title, cv2_array)\n', (580, 598), False, 'import cv2\n'), ((607, 621), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (618, 621), False, 'import cv2\n'), ((630, 653), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (651, 653), False, 'import cv2\n'), ((672, 699), 'cv2.imwrite', 'cv2.imwrite', (['ofn', 'cv2_array'], {}), '(ofn, cv2_array)\n', (683, 699), False, 'import cv2\n'), ((745, 772), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (759, 772), False, 'import os\n'), ((782, 806), 'os.makedirs', 'os.makedirs', (['folder_name'], {}), '(folder_name)\n', (793, 806), False, 'import os\n'), ((918, 940), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (925, 940), True, 'import numpy as np\n'), ((2277, 2301), 'numpy.sum', 'np.sum', (['(ans ** 2)'], {'axis': '(1)'}), '(ans ** 2, axis=1)\n', (2283, 2301), True, 'import numpy as np\n'), ((2339, 2361), 'numpy.sum', 'np.sum', (['(l_array == 0.0)'], {}), '(l_array == 0.0)\n', (2345, 2361), True, 'import numpy as np\n'), ((2738, 2752), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2749, 2752), False, 'import pickle\n'), ((3534, 3556), 'torch.from_numpy', 'torch.from_numpy', (['temp'], {}), '(temp)\n', (3550, 3556), False, 'import torch\n'), ((6477, 6552), 'torch.zeros', 'torch.zeros', (['(1)', 'pm_tensor.shape[1]', 'pm_tensor.shape[2]'], {'dtype': 'torch.float32'}), '(1, pm_tensor.shape[1], pm_tensor.shape[2], dtype=torch.float32)\n', (6488, 6552), False, 'import torch\n'), ((7362, 7384), 'torch.cat', 'torch.cat', (['ans_list', '(0)'], {}), '(ans_list, 0)\n', (7371, 7384), False, 'import torch\n'), ((8124, 8262), 'pydiffvg.Path', 'pydiffvg.Path', ([], {'num_control_points': 'ncp_tensor', 'points': 'new_pos_tensor', 'is_closed': '(False)', 'stroke_width': 'new_t_tensor', 'id': "('path_%d' % new_id)"}), "(num_control_points=ncp_tensor, points=new_pos_tensor,\n is_closed=False, stroke_width=new_t_tensor, id='path_%d' % new_id)\n", (8137, 8262), False, 'import pydiffvg\n'), ((10018, 10032), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10029, 10032), False, 'import pickle\n'), ((10775, 10806), 'pickle.dump', 'pickle.dump', (['ans', 'f'], {'protocol': '(0)'}), '(ans, f, protocol=0)\n', (10786, 10806), False, 'import pickle\n'), ((11282, 11310), 'numpy.array', 'np.array', (['self.vertices_list'], {}), '(self.vertices_list)\n', (11290, 11310), True, 'import numpy as np\n'), ((1528, 1553), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1535, 1553), True, 'import numpy as np\n'), ((1865, 1900), 'numpy.clip', 'np.clip', (['textured_drawing', '(0.0)', '(1.0)'], {}), '(textured_drawing, 0.0, 1.0)\n', (1872, 1900), True, 'import numpy as np\n'), ((4030, 4054), 'numpy.arange', 'np.arange', (['temp.shape[0]'], {}), '(temp.shape[0])\n', (4039, 4054), True, 'import numpy as np\n'), ((4368, 4388), 'numpy.amax', 'np.amax', (['temp[:, -1]'], {}), '(temp[:, -1])\n', (4375, 4388), True, 'import numpy as np\n'), ((4426, 4446), 'numpy.amax', 'np.amax', (['temp[:, -1]'], {}), '(temp[:, -1])\n', (4433, 4446), True, 'import numpy as np\n'), ((4458, 4478), 'numpy.amin', 'np.amin', (['temp[:, -1]'], {}), '(temp[:, -1])\n', (4465, 4478), True, 'import numpy as np\n'), ((4516, 4536), 'numpy.amin', 'np.amin', (['temp[:, -1]'], {}), '(temp[:, -1])\n', (4523, 4536), True, 'import numpy as np\n'), ((6830, 6848), 'numpy.arange', 'np.arange', (['this_nv'], {}), '(this_nv)\n', (6839, 6848), True, 'import numpy as np\n'), ((6969, 7017), 'torch.from_numpy', 'torch.from_numpy', (['arcnorm_feature[:, np.newaxis]'], {}), '(arcnorm_feature[:, np.newaxis])\n', (6985, 7017), False, 'import torch\n'), ((11770, 11831), 'numpy.interp', 'np.interp', (['current_length', 'self.arc_t', 'self.vertices_np[:, 0]'], {}), '(current_length, self.arc_t, self.vertices_np[:, 0])\n', (11779, 11831), True, 'import numpy as np\n'), ((11861, 11922), 'numpy.interp', 'np.interp', (['current_length', 'self.arc_t', 'self.vertices_np[:, 1]'], {}), '(current_length, self.arc_t, self.vertices_np[:, 1])\n', (11870, 11922), True, 'import numpy as np\n'), ((12225, 12282), 'numpy.sum', 'np.sum', (['((vertices_np[:-1] - vertices_np[1:]) ** 2)'], {'axis': '(1)'}), '((vertices_np[:-1] - vertices_np[1:]) ** 2, axis=1)\n', (12231, 12282), True, 'import numpy as np\n'), ((12415, 12472), 'numpy.sum', 'np.sum', (['((vertices_np[:-1] - vertices_np[1:]) ** 2)'], {'axis': '(1)'}), '((vertices_np[:-1] - vertices_np[1:]) ** 2, axis=1)\n', (12421, 12472), True, 'import numpy as np\n'), ((3475, 3493), 'numpy.array', 'np.array', (['a_stroke'], {}), '(a_stroke)\n', (3483, 3493), True, 'import numpy as np\n'), ((7616, 7674), 'torch.zeros', 'torch.zeros', (['(points_tensor.shape[0] - 1)'], {'dtype': 'torch.int32'}), '(points_tensor.shape[0] - 1, dtype=torch.int32)\n', (7627, 7674), False, 'import torch\n'), ((9235, 9271), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['propagated_code[-1, :]'], {}), '(propagated_code[-1, :])\n', (9247, 9271), True, 'import torch.nn.functional as F\n'), ((1580, 1602), 'numpy.uint8', 'np.uint8', (['(255.0 * mask)'], {}), '(255.0 * mask)\n', (1588, 1602), True, 'import numpy as np\n'), ((2642, 2664), 'os.path.split', 'os.path.split', (['list_fn'], {}), '(list_fn)\n', (2655, 2664), False, 'import os\n'), ((4730, 4748), 'numpy.array', 'np.array', (['a_stroke'], {}), '(a_stroke)\n', (4738, 4748), True, 'import numpy as np\n'), ((7963, 8018), 'torch.ones', 'torch.ones', (['points_tensor.shape[0]'], {'dtype': 'torch.float32'}), '(points_tensor.shape[0], dtype=torch.float32)\n', (7973, 8018), False, 'import torch\n'), ((8455, 8477), 'torch.tensor', 'torch.tensor', (['[new_id]'], {}), '([new_id])\n', (8467, 8477), False, 'import torch\n'), ((8598, 8624), 'torch.tensor', 'torch.tensor', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (8610, 8624), False, 'import torch\n'), ((9593, 9670), 'torch.ones', 'torch.ones', (['input_tensor.shape[0]', 'input_tensor.shape[1]'], {'dtype': 'torch.float32'}), '(input_tensor.shape[0], input_tensor.shape[1], dtype=torch.float32)\n', (9603, 9670), False, 'import torch\n'), ((9791, 9876), 'torch.ones', 'torch.ones', (['input_tensor.shape[0]', 'input_tensor.shape[1]', '(3)'], {'dtype': 'torch.float32'}), '(input_tensor.shape[0], input_tensor.shape[1], 3, dtype=torch.float32\n )\n', (9801, 9876), False, 'import torch\n'), ((5151, 5169), 'numpy.array', 'np.array', (['a_stroke'], {}), '(a_stroke)\n', (5159, 5169), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import model_helper, workspace, core, rnn_cell
from caffe2.python.attention import AttentionType
import numpy as np
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from hypothesis import given
class TestRNNExecutor(unittest.TestCase):
def setUp(self):
self.batch_size = 8
self.input_dim = 20
self.hidden_dim = 30
self.encoder_dim = 40
@given(
T=st.integers(10, 100),
forward_only=st.booleans(),
**hu.gcs)
def test_lstm_with_attention_equal_simplenet(self, T, forward_only, gc, dc):
self.Tseq = [T, T // 2, T // 2 + T // 4, T, T // 2 + 1]
workspace.ResetWorkspace()
with core.DeviceScope(gc):
print("Run with device: {}, forward only: {}".format(
gc, forward_only))
workspace.FeedBlob(
"seq_lengths",
np.array([T] * self.batch_size, dtype=np.int32)
)
workspace.FeedBlob("target", np.random.rand(
T, self.batch_size, self.hidden_dim).astype(np.float32))
workspace.FeedBlob("hidden_init", np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
workspace.FeedBlob("cell_init", np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
model = model_helper.ModelHelper(name="lstm")
model.net.AddExternalInputs(["input"])
init_blobs = []
hidden_init, cell_init, encoder_outputs = model.net.AddExternalInputs(
"hidden_init",
"cell_init",
"encoder_outputs"
)
awec_init = model.net.AddExternalInputs([
'initial_attention_weighted_encoder_context',
])
init_blobs.extend([hidden_init, cell_init])
workspace.FeedBlob(
awec_init,
np.random.rand(1, self.batch_size, self.encoder_dim).astype(
np.float32),
)
workspace.FeedBlob(
encoder_outputs,
np.random.rand(1, self.batch_size, self.encoder_dim).astype(
np.float32),
)
outputs = rnn_cell.LSTMWithAttention(
model=model,
decoder_inputs="input",
decoder_input_lengths="seq_lengths",
initial_decoder_hidden_state=hidden_init,
initial_decoder_cell_state=cell_init,
initial_attention_weighted_encoder_context=awec_init,
encoder_output_dim=self.encoder_dim,
encoder_outputs=encoder_outputs,
encoder_lengths=None,
decoder_input_dim=self.input_dim,
decoder_state_dim=self.hidden_dim,
scope="",
attention_type=AttentionType.Recurrent,
forward_only=forward_only,
outputs_with_grads=[0],
)
output = outputs[0]
print(outputs)
loss = model.AveragedLoss(
model.SquaredL2Distance([output, "target"], "dist"),
"loss"
)
# Add gradient ops
if not forward_only:
model.AddGradientOperators([loss])
# init
for init_blob in init_blobs:
workspace.FeedBlob(init_blob, np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
self._compare(model, forward_only)
def init_lstm_model(self, T, num_layers, forward_only, use_loss=True):
workspace.FeedBlob(
"seq_lengths",
np.array([T] * self.batch_size, dtype=np.int32)
)
workspace.FeedBlob("target", np.random.rand(
T, self.batch_size, self.hidden_dim).astype(np.float32))
workspace.FeedBlob("hidden_init", np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
workspace.FeedBlob("cell_init", np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
model = model_helper.ModelHelper(name="lstm")
model.net.AddExternalInputs(["input"])
init_blobs = []
for i in range(num_layers):
hidden_init, cell_init = model.net.AddExternalInputs(
"hidden_init_{}".format(i),
"cell_init_{}".format(i)
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seq_lengths",
initial_states=init_blobs,
dim_in=self.input_dim,
dim_out=[self.hidden_dim] * num_layers,
scope="",
drop_states=True,
forward_only=forward_only,
return_last_layer_only=True,
)
if use_loss:
loss = model.AveragedLoss(
model.SquaredL2Distance([output, "target"], "dist"),
"loss"
)
# Add gradient ops
if not forward_only:
model.AddGradientOperators([loss])
# init
for init_blob in init_blobs:
workspace.FeedBlob(init_blob, np.zeros(
[1, self.batch_size, self.hidden_dim], dtype=np.float32
))
return model, output
def test_empty_sequence(self):
'''
Test the RNN executor's handling of empty input sequences
'''
Tseq = [0, 1, 2, 3, 0, 1]
workspace.ResetWorkspace()
with core.DeviceScope(caffe2_pb2.DeviceOption()):
model, output = self.init_lstm_model(
T=4, num_layers=1, forward_only=True, use_loss=False)
workspace.RunNetOnce(model.param_init_net)
self.enable_rnn_executor(model.net, 1, True)
np.random.seed(10022015)
first_call = True
for seq_len in Tseq:
input_shape = [seq_len, self.batch_size, self.input_dim]
workspace.FeedBlob(
"input", np.random.rand(*input_shape).astype(np.float32))
workspace.FeedBlob(
"target",
np.random.rand(
seq_len, self.batch_size, self.hidden_dim
).astype(np.float32))
if first_call:
workspace.CreateNet(model.net, overwrite=True)
first_call = False
workspace.RunNet(model.net.Proto().name)
val = workspace.FetchBlob(output)
self.assertEqual(val.shape[0], seq_len)
@given(
num_layers=st.integers(1, 8),
T=st.integers(4, 100),
forward_only=st.booleans(),
**hu.gcs)
def test_lstm_equal_simplenet(self, num_layers, T, forward_only, gc, dc):
'''
Test that the RNN executor produces same results as
the non-executor (i.e running step nets as sequence of simple nets).
'''
self.Tseq = [T, T // 2, T // 2 + T // 4, T, T // 2 + 1]
workspace.ResetWorkspace()
with core.DeviceScope(gc):
print("Run with device: {}, forward only: {}".format(
gc, forward_only))
model, _ = self.init_lstm_model(T, num_layers, forward_only)
self._compare(model, forward_only)
def _compare(self, model, forward_only):
# Store list of blobs that exist in the beginning
workspace.RunNetOnce(model.param_init_net)
init_ws = {k: workspace.FetchBlob(k) for k in workspace.Blobs()}
# Run with executor
for enable_executor in [0, 1]:
self.enable_rnn_executor(model.net, enable_executor, forward_only)
workspace.ResetWorkspace()
# Reset original state
for k, v in init_ws.items():
workspace.FeedBlob(k, v)
np.random.seed(10022015)
ws = {}
for j in range(len(self.Tseq)):
input_shape = [self.Tseq[j], self.batch_size, self.input_dim]
workspace.FeedBlob(
"input", np.random.rand(*input_shape).astype(np.float32))
workspace.FeedBlob(
"target",
np.random.rand(
self.Tseq[j], self.batch_size, self.hidden_dim
).astype(np.float32))
if j == 0:
workspace.CreateNet(model.net, overwrite=True)
workspace.RunNet(model.net.Proto().name)
# Store results for each iteration
for k in workspace.Blobs():
ws[k + "." + str(j)] = workspace.FetchBlob(k)
if enable_executor:
rnn_exec_ws = ws
else:
non_exec_ws = ws
# Test that all blobs are equal after running with executor
# or without.
self.assertEqual(list(non_exec_ws.keys()), list(rnn_exec_ws.keys()))
mismatch = False
for k in rnn_exec_ws.keys():
non_exec_v = non_exec_ws[k]
rnn_exec_v = rnn_exec_ws[k]
if type(non_exec_v) is np.ndarray:
if not np.allclose(non_exec_v, rnn_exec_v):
print("Mismatch: {}".format(k))
nv = non_exec_v.flatten()
rv = rnn_exec_v.flatten()
c = 0
for j in range(len(nv)):
if rv[j] != nv[j]:
print(j, rv[j], nv[j])
c += 1
if c == 10:
break
mismatch = True
self.assertFalse(mismatch)
def enable_rnn_executor(self, net, value, forward_only):
num_found = 0
for op in net.Proto().op:
if op.type.startswith("RecurrentNetwork"):
for arg in op.arg:
if arg.name == 'enable_rnn_executor':
arg.i = value
num_found += 1
# This sanity check is so that if someone changes the
# enable_rnn_executor parameter name, the test will
# start failing as this function will become defective.
self.assertEqual(1 if forward_only else 2, num_found)
if __name__ == "__main__":
import unittest
import random
random.seed(2603)
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_rnn_executor=1'])
unittest.main()
| [
"caffe2.python.workspace.GlobalInit",
"caffe2.python.rnn_cell.LSTMWithAttention",
"numpy.random.seed",
"caffe2.proto.caffe2_pb2.DeviceOption",
"numpy.allclose",
"caffe2.python.core.DeviceScope",
"unittest.main",
"caffe2.python.workspace.FeedBlob",
"caffe2.python.workspace.RunNetOnce",
"hypothesis.... | [((915, 941), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (939, 941), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((4574, 4611), 'caffe2.python.model_helper.ModelHelper', 'model_helper.ModelHelper', ([], {'name': '"""lstm"""'}), "(name='lstm')\n", (4598, 4611), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((4998, 5259), 'caffe2.python.rnn_cell.LSTM', 'rnn_cell.LSTM', ([], {'model': 'model', 'input_blob': '"""input"""', 'seq_lengths': '"""seq_lengths"""', 'initial_states': 'init_blobs', 'dim_in': 'self.input_dim', 'dim_out': '([self.hidden_dim] * num_layers)', 'scope': '""""""', 'drop_states': '(True)', 'forward_only': 'forward_only', 'return_last_layer_only': '(True)'}), "(model=model, input_blob='input', seq_lengths='seq_lengths',\n initial_states=init_blobs, dim_in=self.input_dim, dim_out=[self.\n hidden_dim] * num_layers, scope='', drop_states=True, forward_only=\n forward_only, return_last_layer_only=True)\n", (5011, 5259), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((6084, 6110), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (6108, 6110), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((7690, 7716), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (7714, 7716), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((8096, 8138), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.param_init_net'], {}), '(model.param_init_net)\n', (8116, 8138), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((11124, 11141), 'random.seed', 'random.seed', (['(2603)'], {}), '(2603)\n', (11135, 11141), False, 'import random\n'), ((11151, 11238), 'caffe2.python.workspace.GlobalInit', 'workspace.GlobalInit', (["['caffe2', '--caffe2_log_level=0', '--caffe2_rnn_executor=1']"], {}), "(['caffe2', '--caffe2_log_level=0',\n '--caffe2_rnn_executor=1'])\n", (11171, 11238), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((11284, 11299), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11297, 11299), False, 'import unittest\n'), ((956, 976), 'caffe2.python.core.DeviceScope', 'core.DeviceScope', (['gc'], {}), '(gc)\n', (972, 976), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((1673, 1710), 'caffe2.python.model_helper.ModelHelper', 'model_helper.ModelHelper', ([], {'name': '"""lstm"""'}), "(name='lstm')\n", (1697, 1710), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((2592, 3118), 'caffe2.python.rnn_cell.LSTMWithAttention', 'rnn_cell.LSTMWithAttention', ([], {'model': 'model', 'decoder_inputs': '"""input"""', 'decoder_input_lengths': '"""seq_lengths"""', 'initial_decoder_hidden_state': 'hidden_init', 'initial_decoder_cell_state': 'cell_init', 'initial_attention_weighted_encoder_context': 'awec_init', 'encoder_output_dim': 'self.encoder_dim', 'encoder_outputs': 'encoder_outputs', 'encoder_lengths': 'None', 'decoder_input_dim': 'self.input_dim', 'decoder_state_dim': 'self.hidden_dim', 'scope': '""""""', 'attention_type': 'AttentionType.Recurrent', 'forward_only': 'forward_only', 'outputs_with_grads': '[0]'}), "(model=model, decoder_inputs='input',\n decoder_input_lengths='seq_lengths', initial_decoder_hidden_state=\n hidden_init, initial_decoder_cell_state=cell_init,\n initial_attention_weighted_encoder_context=awec_init,\n encoder_output_dim=self.encoder_dim, encoder_outputs=encoder_outputs,\n encoder_lengths=None, decoder_input_dim=self.input_dim,\n decoder_state_dim=self.hidden_dim, scope='', attention_type=\n AttentionType.Recurrent, forward_only=forward_only, outputs_with_grads=[0])\n", (2618, 3118), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((681, 701), 'hypothesis.strategies.integers', 'st.integers', (['(10)', '(100)'], {}), '(10, 100)\n', (692, 701), True, 'import hypothesis.strategies as st\n'), ((725, 738), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (736, 738), True, 'import hypothesis.strategies as st\n'), ((4106, 4153), 'numpy.array', 'np.array', (['([T] * self.batch_size)'], {'dtype': 'np.int32'}), '([T] * self.batch_size, dtype=np.int32)\n', (4114, 4153), True, 'import numpy as np\n'), ((4332, 4397), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (4340, 4397), True, 'import numpy as np\n'), ((4464, 4529), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (4472, 4529), True, 'import numpy as np\n'), ((6307, 6349), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.param_init_net'], {}), '(model.param_init_net)\n', (6327, 6349), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((6425, 6449), 'numpy.random.seed', 'np.random.seed', (['(10022015)'], {}), '(10022015)\n', (6439, 6449), True, 'import numpy as np\n'), ((7731, 7751), 'caffe2.python.core.DeviceScope', 'core.DeviceScope', (['gc'], {}), '(gc)\n', (7747, 7751), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((7263, 7280), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(8)'], {}), '(1, 8)\n', (7274, 7280), True, 'import hypothesis.strategies as st\n'), ((7293, 7312), 'hypothesis.strategies.integers', 'st.integers', (['(4)', '(100)'], {}), '(4, 100)\n', (7304, 7312), True, 'import hypothesis.strategies as st\n'), ((7336, 7349), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (7347, 7349), True, 'import hypothesis.strategies as st\n'), ((8162, 8184), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['k'], {}), '(k)\n', (8181, 8184), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((8377, 8403), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (8401, 8403), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((8541, 8565), 'numpy.random.seed', 'np.random.seed', (['(10022015)'], {}), '(10022015)\n', (8555, 8565), True, 'import numpy as np\n'), ((1165, 1212), 'numpy.array', 'np.array', (['([T] * self.batch_size)'], {'dtype': 'np.int32'}), '([T] * self.batch_size, dtype=np.int32)\n', (1173, 1212), True, 'import numpy as np\n'), ((1407, 1472), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (1415, 1472), True, 'import numpy as np\n'), ((1551, 1616), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (1559, 1616), True, 'import numpy as np\n'), ((5778, 5843), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (5786, 5843), True, 'import numpy as np\n'), ((6142, 6167), 'caffe2.proto.caffe2_pb2.DeviceOption', 'caffe2_pb2.DeviceOption', ([], {}), '()\n', (6165, 6167), False, 'from caffe2.proto import caffe2_pb2\n'), ((7143, 7170), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['output'], {}), '(output)\n', (7162, 7170), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((8194, 8211), 'caffe2.python.workspace.Blobs', 'workspace.Blobs', ([], {}), '()\n', (8209, 8211), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((8501, 8525), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['k', 'v'], {}), '(k, v)\n', (8519, 8525), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((9283, 9300), 'caffe2.python.workspace.Blobs', 'workspace.Blobs', ([], {}), '()\n', (9298, 9300), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((3801, 3866), 'numpy.zeros', 'np.zeros', (['[1, self.batch_size, self.hidden_dim]'], {'dtype': 'np.float32'}), '([1, self.batch_size, self.hidden_dim], dtype=np.float32)\n', (3809, 3866), True, 'import numpy as np\n'), ((4203, 4254), 'numpy.random.rand', 'np.random.rand', (['T', 'self.batch_size', 'self.hidden_dim'], {}), '(T, self.batch_size, self.hidden_dim)\n', (4217, 4254), True, 'import numpy as np\n'), ((6973, 7019), 'caffe2.python.workspace.CreateNet', 'workspace.CreateNet', (['model.net'], {'overwrite': '(True)'}), '(model.net, overwrite=True)\n', (6992, 7019), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((9096, 9142), 'caffe2.python.workspace.CreateNet', 'workspace.CreateNet', (['model.net'], {'overwrite': '(True)'}), '(model.net, overwrite=True)\n', (9115, 9142), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((9346, 9368), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['k'], {}), '(k)\n', (9365, 9368), False, 'from caffe2.python import model_helper, workspace, core, rnn_cell\n'), ((9883, 9918), 'numpy.allclose', 'np.allclose', (['non_exec_v', 'rnn_exec_v'], {}), '(non_exec_v, rnn_exec_v)\n', (9894, 9918), True, 'import numpy as np\n'), ((1270, 1321), 'numpy.random.rand', 'np.random.rand', (['T', 'self.batch_size', 'self.hidden_dim'], {}), '(T, self.batch_size, self.hidden_dim)\n', (1284, 1321), True, 'import numpy as np\n'), ((2263, 2315), 'numpy.random.rand', 'np.random.rand', (['(1)', 'self.batch_size', 'self.encoder_dim'], {}), '(1, self.batch_size, self.encoder_dim)\n', (2277, 2315), True, 'import numpy as np\n'), ((2457, 2509), 'numpy.random.rand', 'np.random.rand', (['(1)', 'self.batch_size', 'self.encoder_dim'], {}), '(1, self.batch_size, self.encoder_dim)\n', (2471, 2509), True, 'import numpy as np\n'), ((6656, 6684), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (6670, 6684), True, 'import numpy as np\n'), ((6794, 6851), 'numpy.random.rand', 'np.random.rand', (['seq_len', 'self.batch_size', 'self.hidden_dim'], {}), '(seq_len, self.batch_size, self.hidden_dim)\n', (6808, 6851), True, 'import numpy as np\n'), ((8778, 8806), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (8792, 8806), True, 'import numpy as np\n'), ((8916, 8978), 'numpy.random.rand', 'np.random.rand', (['self.Tseq[j]', 'self.batch_size', 'self.hidden_dim'], {}), '(self.Tseq[j], self.batch_size, self.hidden_dim)\n', (8930, 8978), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from keras.utils import to_categorical
import numpy as np
import tensorflow as tf
import datetime
import scipy.io as sio
import multiprocessing
import math
from matplotlib.pyplot import pause
import os
import glob
# Parameters for learning rate optimization and batch size ##################
learning_rate = 0.025
learning_rate2 = 0.2 # mu_t \times beta (from paper)
training_epochs = 120
batch_size = 5
display_step = 10
#############################################################################
# sets neighbor indexes for k-regular networks (number of neighbors is 'neighbors'
def get_connectivity(ii_saved_local, neighbors, devices):
if (ii_saved_local == 0):
sets_neighbors_final = np.arange(ii_saved_local + 1, ii_saved_local + neighbors + 1)
elif (ii_saved_local == devices - 1):
sets_neighbors_final = np.arange(ii_saved_local - neighbors, ii_saved_local)
elif (ii_saved_local >= math.ceil(neighbors / 2)) and (ii_saved_local <= devices - math.ceil(neighbors / 2) - 1):
sets_neighbors = np.arange(ii_saved_local - math.floor(neighbors / 2), ii_saved_local + math.floor(neighbors / 2) + 1)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
else:
if (ii_saved_local - math.ceil(neighbors / 2) < 0):
sets_neighbors = np.arange(0, neighbors + 1)
else:
sets_neighbors = np.arange(devices - neighbors - 1, devices)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
return sets_neighbors_final
# compute weights for CFA
def federated_weights_computing2(filename, filename2, ii, ii2, epoch, devices,neighbors):
saved_epoch = epoch
b_v = 1/devices
eps_t_control = 1 #from paper
while not os.path.isfile(filename2):
print('Waiting..')
pause(1)
try:
mathcontent = sio.loadmat(filename2)
except:
print('Detected problem while loading file')
pause(3)
mathcontent = sio.loadmat(filename2)
weights_current = mathcontent['weights']
biases_current = mathcontent['biases']
while not os.path.isfile(filename):
print('Waiting..')
pause(1)
try:
mathcontent = sio.loadmat(filename)
except:
print('Detected problem while loading file')
pause(3)
mathcontent = sio.loadmat(filename)
balancing_vect = np.ones(devices)*b_v
weight_factor = (balancing_vect[ii2]/(balancing_vect[ii2] + (neighbors-1)*balancing_vect[ii]))
updated_weights = weights_current + eps_t_control*weight_factor*(mathcontent['weights'] - weights_current) # see paper section 3
updated_biases = biases_current + eps_t_control*weight_factor*(mathcontent['biases'] - biases_current)
weights = updated_weights
biases = updated_biases
try:
sio.savemat('temp_datamat{}_{}.mat'.format(ii, saved_epoch), {
"weights": weights, "biases": biases})
mathcontent = sio.loadmat('temp_datamat{}_{}.mat'.format(ii, saved_epoch))
except:
print('Unable to save file .. retrying')
pause(3)
print(biases)
sio.savemat('temp_datamat{}_{}.mat'.format(ii, saved_epoch), {
"weights": weights, "biases": biases})
return weights,biases
# CFA-GE 4 stage implementation
def getFederatedWeight_gradients(n_W, n_b, federated, devices, ii_saved_local, epoch, v_loss,eng, x_train2, y_train2, neighbors):
x_c = tf.placeholder(tf.float32, [None, 512]) # 512 point FFT range measurements
y_c = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances => 8 classes
W_ext_c = tf.placeholder(tf.float32, [512, 8])
b_ext_c = tf.placeholder(tf.float32, [8])
# Set model weights
# W_c = tf.Variable(tf.zeros([512, 8]))
# b_c = tf.Variable(tf.zeros([8]))
# Construct model
pred_c = tf.nn.softmax(tf.matmul(x_c, W_ext_c) + b_ext_c) # Softmax use a single layer (other options can be useD)
# Minimize error using cross entropy
cost_c = tf.reduce_mean(-tf.reduce_sum(y_c * tf.log(pred_c), reduction_indices=1))
grad_W_c, grad_b_c = tf.gradients(xs=[W_ext_c, b_ext_c], ys=cost_c)
# Initialize the variables (i.e. assign their default value)
init_c = tf.global_variables_initializer()
if (federated):
if devices > 1:
if epoch == 0:
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": n_W, "biases": n_b, "epoch": epoch, "loss_sample": v_loss})
W_up = n_W
n_up = n_b
else:
sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": n_W, "biases": n_b, "epoch": epoch, "loss_sample": v_loss})
neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices)
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile(
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1))
pause(1)
[W_up, n_up] = federated_weights_computing2('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1),
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local,
neighbor_vec[neighbor_index],
epoch, devices, neighbors)
pause(5)
try:
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch))
except:
print('Unable to save file .. retrying')
pause(3)
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
while not os.path.isfile('datamat{}_{}.mat'.format(ii_saved_local, epoch)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local, epoch))
pause(1)
# waiting for other updates
# expanded for gradient exchange
pause(3)
g_W_c_vect = np.zeros([512, 8, devices])
g_b_c_vect = np.zeros([8, devices])
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch))
pause(1)
try:
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
W_up_neigh = np.asarray(mathcontent['weights'])
n_up_neigh = np.squeeze(np.asarray(mathcontent['biases']))
except:
pause(5)
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
W_up_neigh = np.asarray(mathcontent['weights'])
n_up_neigh = np.squeeze(np.asarray(mathcontent['biases']))
with tf.Session() as sess3:
sess3.run(init_c)
g_W_c, g_b_c = sess3.run([grad_W_c, grad_b_c],
feed_dict={x_c: x_train2, y_c: y_train2, W_ext_c: W_up_neigh,
b_ext_c: n_up_neigh})
g_W_c_vect[:, :, neighbor_vec[neighbor_index]] = g_W_c
g_b_c_vect[:, neighbor_vec[neighbor_index]] = g_b_c
# save gradients and upload
try:
sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), {
"grad_weights": g_W_c_vect, "grad_biases": g_b_c_vect, "epoch": epoch})
# waiting for other gradient updates
pause(5)
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(ii_saved_local, epoch))
test_var = mathcontent['grad_biases']
del mathcontent
except:
print('Unable to save file .. retrying')
pause(3)
sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), {
"grad_weights": g_W_c_vect, "grad_biases": g_b_c_vect, "epoch": epoch})
# waiting for other gradient updates
pause(5)
try:
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch))
W_up = np.asarray(mathcontent['weights'])
n_up = np.squeeze(np.asarray(mathcontent['biases']))
except:
pause(5)
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch))
W_up = np.asarray(mathcontent['weights'])
n_up = np.squeeze(np.asarray(mathcontent['biases']))
# update local model with neighbor gradients
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch)):
pause(1)
try:
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
except:
pause(3)
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
gradW_up_neigh = np.asarray(mathcontent['grad_weights'])
try:
gradn_up_neigh = np.squeeze(np.asarray(mathcontent['grad_biases']))
except:
pause(5)
print('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
del mathcontent
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch))
gradW_up_neigh = np.asarray(mathcontent['grad_weights'])
gradn_up_neigh = np.squeeze(np.asarray(mathcontent['grad_biases']))
W_up = W_up - learning_rate2 * np.squeeze(gradW_up_neigh[:, :, ii_saved_local])
n_up = n_up - learning_rate2 * np.squeeze(gradn_up_neigh[:, ii_saved_local])
else:
W_up = n_W
n_up = n_b
else:
W_up = n_W
n_up = n_b
return W_up, n_up
# CFA - GE: 2 stage (or fast) negotiation
def getFederatedWeight_gradients_fast(n_W, n_b, federated, devices, ii_saved_local, epoch, v_loss,eng, x_train2, y_train2, neighbors):
x_c = tf.placeholder(tf.float32, [None, 512]) # 512 point FFT range measurements
y_c = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances => 8 classes
W_ext_c = tf.placeholder(tf.float32, [512, 8])
b_ext_c = tf.placeholder(tf.float32, [8])
# Set model weights
# W_c = tf.Variable(tf.zeros([512, 8]))
# b_c = tf.Variable(tf.zeros([8]))
# Construct model
pred_c = tf.nn.softmax(tf.matmul(x_c, W_ext_c) + b_ext_c) # Softmax
# Minimize error using cross entropy
cost_c = tf.reduce_mean(-tf.reduce_sum(y_c * tf.log(pred_c), reduction_indices=1))
grad_W_c, grad_b_c = tf.gradients(xs=[W_ext_c, b_ext_c], ys=cost_c)
# Initialize the variables (i.e. assign their default value)
init_c = tf.global_variables_initializer()
if (federated):
if devices > 1:
if epoch == 0:
print("Error - exiting")
exit(1)
else:
sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": n_W, "biases": n_b, "epoch": epoch, "loss_sample": v_loss})
# neighbor_vec = [ii_saved_local - 1, ii_saved_local + 1]
neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices)
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile(
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1))
pause(1)
[W_up,n_up] = federated_weights_computing2('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1),
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local,
neighbor_vec[neighbor_index],
epoch, devices,neighbors)
pause(5)
W_up = np.asarray(W_up)
n_up = np.squeeze(np.asarray(n_up))
pause(3)
try:
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch))
except:
print('Unable to save file .. retrying')
pause(3)
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
g_W_c_vect = np.zeros([512, 8, devices])
g_b_c_vect = np.zeros([8, devices])
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch-1)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch))
pause(1)
try:
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch-1))
W_up_neigh = np.asarray(mathcontent['weights'])
n_up_neigh = np.squeeze(np.asarray(mathcontent['biases']))
except:
pause(5)
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch-1))
W_up_neigh = np.asarray(mathcontent['weights'])
n_up_neigh = np.squeeze(np.asarray(mathcontent['biases']))
with tf.Session() as sess3:
sess3.run(init_c)
g_W_c, g_b_c = sess3.run([grad_W_c, grad_b_c],
feed_dict={x_c: x_train2, y_c: y_train2, W_ext_c: W_up_neigh,
b_ext_c: n_up_neigh})
g_W_c_vect[:, :, neighbor_vec[neighbor_index]] = g_W_c
g_b_c_vect[:, neighbor_vec[neighbor_index]] = g_b_c
# save gradients and upload
try:
sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), {
"grad_weights": g_W_c_vect, "grad_biases": g_b_c_vect, "epoch": epoch})
# waiting for other gradient updates
pause(5)
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(ii_saved_local, epoch))
test_var = mathcontent['grad_biases']
del mathcontent
except:
print('Unable to save file .. retrying')
pause(3)
sio.savemat('datagrad{}_{}.mat'.format(ii_saved_local, epoch), {
"grad_weights": g_W_c_vect, "grad_biases": g_b_c_vect, "epoch": epoch})
pause(5)
# update local model with neighbor gradients (epoch - 1)
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)):
pause(1)
try:
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1))
except:
pause(3)
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1))
gradW_up_neigh = np.asarray(mathcontent['grad_weights'])
try:
gradn_up_neigh = np.squeeze(np.asarray(mathcontent['grad_biases']))
except:
pause(5)
print('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1))
del mathcontent
mathcontent = sio.loadmat('datagrad{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1))
gradW_up_neigh = np.asarray(mathcontent['grad_weights'])
gradn_up_neigh = np.squeeze(np.asarray(mathcontent['grad_biases']))
W_up = W_up - learning_rate2 * np.squeeze(gradW_up_neigh[:, :, ii_saved_local])
n_up = n_up - learning_rate2 * np.squeeze(gradn_up_neigh[:, ii_saved_local])
else:
W_up = n_W
n_up = n_b
else:
W_up = n_W
n_up = n_b
return W_up, n_up
# CFA
def getFederatedWeight(n_W, n_b, federated, devices, ii_saved_local, epoch, v_loss,eng, neighbors):
if (federated):
if devices > 1: # multihop topology
if epoch == 0:
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": n_W, "biases": n_b, "epoch": epoch, "loss_sample": v_loss})
W_up = n_W
n_up = n_b
else:
sio.savemat('temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": n_W, "biases": n_b, "epoch": epoch, "loss_sample": v_loss})
# neighbor_vec = [ii_saved_local - 1, ii_saved_local + 1]
neighbor_vec = get_connectivity(ii_saved_local, neighbors, devices)
print(neighbor_vec)
for neighbor_index in range(neighbor_vec.size):
while not os.path.isfile(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1)) or not os.path.isfile(
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch)):
# print('Waiting for datamat{}_{}.mat'.format(ii_saved_local - 1, epoch - 1))
pause(1)
[W_up, n_up] = federated_weights_computing2(
'datamat{}_{}.mat'.format(neighbor_vec[neighbor_index], epoch - 1),
'temp_datamat{}_{}.mat'.format(ii_saved_local, epoch), ii_saved_local,
neighbor_vec[neighbor_index],
epoch, devices, neighbors)
pause(5)
try:
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
mathcontent = sio.loadmat('datamat{}_{}.mat'.format(ii_saved_local, epoch))
except:
print('Unable to save file .. retrying')
pause(3)
sio.savemat('datamat{}_{}.mat'.format(ii_saved_local, epoch), {
"weights": W_up, "biases": n_up})
W_up = np.asarray(mathcontent['weights'])
n_up = np.squeeze(np.asarray(mathcontent['biases']))
else:
W_up = n_W
n_up = n_b
return W_up, n_up
def processData(samples, iii, federated, tot_devices,fraction_training, neighbors_number,EPOCH_THRESHOLD):
# eng = matlab.engine.start_matlab()
eng = 0
global learning_rate
learning_rate_local = learning_rate
np.random.seed(1)
tf.set_random_seed(1) # common initialization
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # MNIST DATABASE USED AS AN ALTERNATIVE
# mnist2 = input_data.read_data_sets("/tmp/data/", one_hot=True)
database = sio.loadmat('dati_radar_05-07-2019/data_base_all_sequences_random.mat')
x_train = database['Data_train_2']
y_train = database['label_train_2']
y_train_t = to_categorical(y_train)
x_train = (x_train.astype('float32') + 140) / 140 # DATA PREPARATION (NORMALIZATION AND SCALING OF FFT MEASUREMENTS)
x_train2 = x_train[iii * samples:((iii + 1) * samples - 1), :] # DATA PARTITION
y_train2 = y_train_t[iii * samples:((iii + 1) * samples - 1),:]
x_test = database['Data_test_2']
y_test = database['label_test_2']
x_test = (x_test.astype('float32') + 140) / 140
y_test_t = to_categorical(y_test)
total_batch2 = int(fraction_training / batch_size)
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 512]) # 512 POINT FFT RANGE MEASUREMENTS
y = tf.placeholder(tf.float32, [None, 8]) # 0-7 HR distances (safe - unsafe)
W_ext = tf.placeholder(tf.float32, [512, 8])
b_ext = tf.placeholder(tf.float32, [8])
W2_ext = tf.placeholder(tf.float32, [512, 8])
b2_ext = tf.placeholder(tf.float32, [8])
# Set model weights
W = tf.Variable(tf.zeros([512, 8]))
b = tf.Variable(tf.zeros([8]))
# Construct model
pred = tf.nn.softmax(tf.matmul(x, W_ext) + b_ext) # Softmax
pred2 = tf.nn.softmax(tf.matmul(x, W2_ext) + b2_ext) # Softmax
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
cost2 = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred2), reduction_indices=1))
grad_W, grad_b = tf.gradients(xs=[W_ext, b_ext], ys=cost)
new_W = W.assign(W_ext - learning_rate * grad_W)
new_b = b.assign(b_ext - learning_rate * grad_b)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
sess.run(init)
total_batch = int(samples / batch_size)
# PRINTS THE TOTAL NUMBER OF MINI BATCHES
print(total_batch)
# Training cycle
val_loss = np.zeros(training_epochs)
for epoch in range(training_epochs):
avg_cost = 0.
avg_cost_test = 0.
for i in range(total_batch):
batch_xs = x_train2[i * batch_size:((i + 1) * batch_size - 1), :]
batch_ys = y_train2[i * batch_size:((i + 1) * batch_size - 1), :]
if (i == 0) and (epoch == 0): # initialization
W_val = np.zeros([512, 8])
b_val = np.zeros([8])
elif (i > 0):
W_val = n_W # modify for minibatch updates
b_val = n_b
# Fit training using batch data
n_W, n_b, c, g_W, g_b = sess.run([new_W, new_b, cost, grad_W, grad_b], feed_dict={x: batch_xs,
y: batch_ys, W_ext: W_val, b_ext: b_val})
avg_cost += c / total_batch # Training loss
# validation
with tf.Session() as sess2:
sess2.run(init)
for i in range(total_batch2):
# Construct model
batch_xs = x_test[i * batch_size:((i + 1) * batch_size - 1), :]
batch_ys = y_test_t[i * batch_size:((i + 1) * batch_size - 1), :]
c = sess2.run(cost2, feed_dict={x: batch_xs,
y: batch_ys, W2_ext: n_W, b2_ext: n_b})
avg_cost_test += c / total_batch2
val_loss[epoch] = avg_cost_test
print('Test Device: ' + str(iii) + " Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost_test))
###########################################################
# CFA: weights exchange (no gradients)
# COMMENT BELOW IF CFA-GE IS SELECTED
# W_val, b_val = getFederatedWeight(n_W, n_b, federated, tot_devices, iii, epoch, val_loss, eng, neighbors_number)
##################################################
###################################################
# CFA - GE: 2-stage negotiation after epoch EPOCH_THRESHOLD
# COMMENT BELOW IF CFA IS SELECTED
if epoch < EPOCH_THRESHOLD:
W_val, b_val = getFederatedWeight_gradients(n_W, n_b, federated, tot_devices, iii, epoch, val_loss, eng, x_train2, y_train2, neighbors_number) # method with gradients exchange
else:
W_val, b_val = getFederatedWeight_gradients_fast(n_W, n_b, federated, tot_devices, iii, epoch, val_loss, eng, x_train2, y_train2, neighbors_number) # method with gradients exchange
###########################################################
print("Optimization Finished!")
# DUMP RESULTS
sio.savemat(
'results/dump_loss_{}_{date:%Y-%m-%d-%H-%M-%S}.mat'.format(iii, date=datetime.datetime.now().time()), {
"val_acc": val_loss, "device": iii})
# Test model
# correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy for 3000 examples
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if __name__ == "__main__":
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
##################### SETS SIMULATION PARAMETERS ###############################
devices = 15 # NUMBER OF DE VICES
neighbors_number = 2 # NUMBER OF NEIGHBORS PER DEVICE (K-DEGREE NETWORK)
ii_saved = 0
EPOCH_THRESHOLD = 4 # STARTING EPOCH FOR CFA-GE (2-STAGE NEGOTIATION)
federated = True # ENABLE FEDERATED LEARNING)
training_set_per_device = 25 # NUMBER OF TRAINING SAMPLES PER DEVICE
fraction_training = int(devices*training_set_per_device) # total training
b_v = 1/devices
balancing_vect = np.ones(devices)*b_v
samples = np.zeros(devices) # training samples per device
validation_train = 16000 # VALIDATION DATASET
###################################################################################
# START MULTIPROCESSING
for id in range(devices):
samples[id] = math.floor(balancing_vect[id]*fraction_training)
# samples = int(fraction_training/devices) # training samples per device
print(samples)
t = []
iii = 0
for ii in range(devices):
t.append(multiprocessing.Process(target=processData, args=(int(samples[ii]), ii, federated, devices, validation_train, neighbors_number, EPOCH_THRESHOLD)))
t[ii].start()
exit(0)
| [
"os.remove",
"numpy.random.seed",
"scipy.io.loadmat",
"numpy.ones",
"tensorflow.matmul",
"os.path.isfile",
"numpy.arange",
"glob.glob",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"tensorflow.gradients",
"matplotlib.pyplot.pause",
"datetime.datetime.now",
"keras.utils.to_catego... | [((3623, 3662), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 512]'], {}), '(tf.float32, [None, 512])\n', (3637, 3662), True, 'import tensorflow as tf\n'), ((3709, 3746), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 8]'], {}), '(tf.float32, [None, 8])\n', (3723, 3746), True, 'import tensorflow as tf\n'), ((3795, 3831), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[512, 8]'], {}), '(tf.float32, [512, 8])\n', (3809, 3831), True, 'import tensorflow as tf\n'), ((3846, 3877), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[8]'], {}), '(tf.float32, [8])\n', (3860, 3877), True, 'import tensorflow as tf\n'), ((4284, 4330), 'tensorflow.gradients', 'tf.gradients', ([], {'xs': '[W_ext_c, b_ext_c]', 'ys': 'cost_c'}), '(xs=[W_ext_c, b_ext_c], ys=cost_c)\n', (4296, 4330), True, 'import tensorflow as tf\n'), ((4410, 4443), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4441, 4443), True, 'import tensorflow as tf\n'), ((11570, 11609), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 512]'], {}), '(tf.float32, [None, 512])\n', (11584, 11609), True, 'import tensorflow as tf\n'), ((11656, 11693), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 8]'], {}), '(tf.float32, [None, 8])\n', (11670, 11693), True, 'import tensorflow as tf\n'), ((11742, 11778), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[512, 8]'], {}), '(tf.float32, [512, 8])\n', (11756, 11778), True, 'import tensorflow as tf\n'), ((11793, 11824), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[8]'], {}), '(tf.float32, [8])\n', (11807, 11824), True, 'import tensorflow as tf\n'), ((12184, 12230), 'tensorflow.gradients', 'tf.gradients', ([], {'xs': '[W_ext_c, b_ext_c]', 'ys': 'cost_c'}), '(xs=[W_ext_c, b_ext_c], ys=cost_c)\n', (12196, 12230), True, 'import tensorflow as tf\n'), ((12310, 12343), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12341, 12343), True, 'import tensorflow as tf\n'), ((21028, 21045), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (21042, 21045), True, 'import numpy as np\n'), ((21050, 21071), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (21068, 21071), True, 'import tensorflow as tf\n'), ((21290, 21361), 'scipy.io.loadmat', 'sio.loadmat', (['"""dati_radar_05-07-2019/data_base_all_sequences_random.mat"""'], {}), "('dati_radar_05-07-2019/data_base_all_sequences_random.mat')\n", (21301, 21361), True, 'import scipy.io as sio\n'), ((21458, 21481), 'keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (21472, 21481), False, 'from keras.utils import to_categorical\n'), ((21898, 21920), 'keras.utils.to_categorical', 'to_categorical', (['y_test'], {}), '(y_test)\n', (21912, 21920), False, 'from keras.utils import to_categorical\n'), ((22006, 22045), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 512]'], {}), '(tf.float32, [None, 512])\n', (22020, 22045), True, 'import tensorflow as tf\n'), ((22090, 22127), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 8]'], {}), '(tf.float32, [None, 8])\n', (22104, 22127), True, 'import tensorflow as tf\n'), ((22177, 22213), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[512, 8]'], {}), '(tf.float32, [512, 8])\n', (22191, 22213), True, 'import tensorflow as tf\n'), ((22226, 22257), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[8]'], {}), '(tf.float32, [8])\n', (22240, 22257), True, 'import tensorflow as tf\n'), ((22272, 22308), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[512, 8]'], {}), '(tf.float32, [512, 8])\n', (22286, 22308), True, 'import tensorflow as tf\n'), ((22322, 22353), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[8]'], {}), '(tf.float32, [8])\n', (22336, 22353), True, 'import tensorflow as tf\n'), ((22838, 22878), 'tensorflow.gradients', 'tf.gradients', ([], {'xs': '[W_ext, b_ext]', 'ys': 'cost'}), '(xs=[W_ext, b_ext], ys=cost)\n', (22850, 22878), True, 'import tensorflow as tf\n'), ((23063, 23096), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (23094, 23096), True, 'import tensorflow as tf\n'), ((26638, 26673), 'glob.glob', 'glob.glob', (['"""*.mat"""'], {'recursive': '(False)'}), "('*.mat', recursive=False)\n", (26647, 26673), False, 'import glob\n'), ((27410, 27427), 'numpy.zeros', 'np.zeros', (['devices'], {}), '(devices)\n', (27418, 27427), True, 'import numpy as np\n'), ((790, 851), 'numpy.arange', 'np.arange', (['(ii_saved_local + 1)', '(ii_saved_local + neighbors + 1)'], {}), '(ii_saved_local + 1, ii_saved_local + neighbors + 1)\n', (799, 851), True, 'import numpy as np\n'), ((1938, 1963), 'os.path.isfile', 'os.path.isfile', (['filename2'], {}), '(filename2)\n', (1952, 1963), False, 'import os\n'), ((2000, 2008), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (2005, 2008), False, 'from matplotlib.pyplot import pause\n'), ((2041, 2063), 'scipy.io.loadmat', 'sio.loadmat', (['filename2'], {}), '(filename2)\n', (2052, 2063), True, 'import scipy.io as sio\n'), ((2295, 2319), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2309, 2319), False, 'import os\n'), ((2356, 2364), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (2361, 2364), False, 'from matplotlib.pyplot import pause\n'), ((2397, 2418), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (2408, 2418), True, 'import scipy.io as sio\n'), ((2567, 2583), 'numpy.ones', 'np.ones', (['devices'], {}), '(devices)\n', (2574, 2583), True, 'import numpy as np\n'), ((22399, 22417), 'tensorflow.zeros', 'tf.zeros', (['[512, 8]'], {}), '([512, 8])\n', (22407, 22417), True, 'import tensorflow as tf\n'), ((22439, 22452), 'tensorflow.zeros', 'tf.zeros', (['[8]'], {}), '([8])\n', (22447, 22452), True, 'import tensorflow as tf\n'), ((23127, 23139), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (23137, 23139), True, 'import tensorflow as tf\n'), ((23342, 23367), 'numpy.zeros', 'np.zeros', (['training_epochs'], {}), '(training_epochs)\n', (23350, 23367), True, 'import numpy as np\n'), ((27375, 27391), 'numpy.ones', 'np.ones', (['devices'], {}), '(devices)\n', (27382, 27391), True, 'import numpy as np\n'), ((27677, 27727), 'math.floor', 'math.floor', (['(balancing_vect[id] * fraction_training)'], {}), '(balancing_vect[id] * fraction_training)\n', (27687, 27727), False, 'import math\n'), ((925, 978), 'numpy.arange', 'np.arange', (['(ii_saved_local - neighbors)', 'ii_saved_local'], {}), '(ii_saved_local - neighbors, ii_saved_local)\n', (934, 978), True, 'import numpy as np\n'), ((2137, 2145), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (2142, 2145), False, 'from matplotlib.pyplot import pause\n'), ((2168, 2190), 'scipy.io.loadmat', 'sio.loadmat', (['filename2'], {}), '(filename2)\n', (2179, 2190), True, 'import scipy.io as sio\n'), ((2492, 2500), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (2497, 2500), False, 'from matplotlib.pyplot import pause\n'), ((2523, 2544), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (2534, 2544), True, 'import scipy.io as sio\n'), ((3270, 3278), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (3275, 3278), False, 'from matplotlib.pyplot import pause\n'), ((4036, 4059), 'tensorflow.matmul', 'tf.matmul', (['x_c', 'W_ext_c'], {}), '(x_c, W_ext_c)\n', (4045, 4059), True, 'import tensorflow as tf\n'), ((11983, 12006), 'tensorflow.matmul', 'tf.matmul', (['x_c', 'W_ext_c'], {}), '(x_c, W_ext_c)\n', (11992, 12006), True, 'import tensorflow as tf\n'), ((22502, 22521), 'tensorflow.matmul', 'tf.matmul', (['x', 'W_ext'], {}), '(x, W_ext)\n', (22511, 22521), True, 'import tensorflow as tf\n'), ((22568, 22588), 'tensorflow.matmul', 'tf.matmul', (['x', 'W2_ext'], {}), '(x, W2_ext)\n', (22577, 22588), True, 'import tensorflow as tf\n'), ((26749, 26768), 'os.remove', 'os.remove', (['filePath'], {}), '(filePath)\n', (26758, 26768), False, 'import os\n'), ((1243, 1285), 'numpy.where', 'np.where', (['(sets_neighbors == ii_saved_local)'], {}), '(sets_neighbors == ii_saved_local)\n', (1251, 1285), True, 'import numpy as np\n'), ((1317, 1352), 'numpy.delete', 'np.delete', (['sets_neighbors', 'index_ii'], {}), '(sets_neighbors, index_ii)\n', (1326, 1352), True, 'import numpy as np\n'), ((1586, 1628), 'numpy.where', 'np.where', (['(sets_neighbors == ii_saved_local)'], {}), '(sets_neighbors == ii_saved_local)\n', (1594, 1628), True, 'import numpy as np\n'), ((1660, 1695), 'numpy.delete', 'np.delete', (['sets_neighbors', 'index_ii'], {}), '(sets_neighbors, index_ii)\n', (1669, 1695), True, 'import numpy as np\n'), ((6739, 6747), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (6744, 6747), False, 'from matplotlib.pyplot import pause\n'), ((6778, 6805), 'numpy.zeros', 'np.zeros', (['[512, 8, devices]'], {}), '([512, 8, devices])\n', (6786, 6805), True, 'import numpy as np\n'), ((6835, 6857), 'numpy.zeros', 'np.zeros', (['[8, devices]'], {}), '([8, devices])\n', (6843, 6857), True, 'import numpy as np\n'), ((9215, 9223), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (9220, 9223), False, 'from matplotlib.pyplot import pause\n'), ((13738, 13754), 'numpy.asarray', 'np.asarray', (['W_up'], {}), '(W_up)\n', (13748, 13754), True, 'import numpy as np\n'), ((13824, 13832), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (13829, 13832), False, 'from matplotlib.pyplot import pause\n'), ((14387, 14414), 'numpy.zeros', 'np.zeros', (['[512, 8, devices]'], {}), '([512, 8, devices])\n', (14395, 14414), True, 'import numpy as np\n'), ((14444, 14466), 'numpy.zeros', 'np.zeros', (['[8, devices]'], {}), '([8, devices])\n', (14452, 14466), True, 'import numpy as np\n'), ((16777, 16785), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (16782, 16785), False, 'from matplotlib.pyplot import pause\n'), ((20623, 20657), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (20633, 20657), True, 'import numpy as np\n'), ((24322, 24334), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (24332, 24334), True, 'import tensorflow as tf\n'), ((1007, 1031), 'math.ceil', 'math.ceil', (['(neighbors / 2)'], {}), '(neighbors / 2)\n', (1016, 1031), False, 'import math\n'), ((1452, 1479), 'numpy.arange', 'np.arange', (['(0)', '(neighbors + 1)'], {}), '(0, neighbors + 1)\n', (1461, 1479), True, 'import numpy as np\n'), ((1523, 1566), 'numpy.arange', 'np.arange', (['(devices - neighbors - 1)', 'devices'], {}), '(devices - neighbors - 1, devices)\n', (1532, 1566), True, 'import numpy as np\n'), ((4220, 4234), 'tensorflow.log', 'tf.log', (['pred_c'], {}), '(pred_c)\n', (4226, 4234), True, 'import tensorflow as tf\n'), ((5893, 5901), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (5898, 5901), False, 'from matplotlib.pyplot import pause\n'), ((6620, 6628), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (6625, 6628), False, 'from matplotlib.pyplot import pause\n'), ((8650, 8658), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (8655, 8658), False, 'from matplotlib.pyplot import pause\n'), ((9368, 9402), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (9378, 9402), True, 'import numpy as np\n'), ((10412, 10451), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_weights']"], {}), "(mathcontent['grad_weights'])\n", (10422, 10451), True, 'import numpy as np\n'), ((12120, 12134), 'tensorflow.log', 'tf.log', (['pred_c'], {}), '(pred_c)\n', (12126, 12134), True, 'import tensorflow as tf\n'), ((13705, 13713), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (13710, 13713), False, 'from matplotlib.pyplot import pause\n'), ((13789, 13805), 'numpy.asarray', 'np.asarray', (['n_up'], {}), '(n_up)\n', (13799, 13805), True, 'import numpy as np\n'), ((16265, 16273), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (16270, 16273), False, 'from matplotlib.pyplot import pause\n'), ((17462, 17501), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_weights']"], {}), "(mathcontent['grad_weights'])\n", (17472, 17501), True, 'import numpy as np\n'), ((20076, 20084), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (20081, 20084), False, 'from matplotlib.pyplot import pause\n'), ((20692, 20725), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (20702, 20725), True, 'import numpy as np\n'), ((22697, 22709), 'tensorflow.log', 'tf.log', (['pred'], {}), '(pred)\n', (22703, 22709), True, 'import tensorflow as tf\n'), ((22779, 22792), 'tensorflow.log', 'tf.log', (['pred2'], {}), '(pred2)\n', (22785, 22792), True, 'import tensorflow as tf\n'), ((23767, 23785), 'numpy.zeros', 'np.zeros', (['[512, 8]'], {}), '([512, 8])\n', (23775, 23785), True, 'import numpy as np\n'), ((23814, 23827), 'numpy.zeros', 'np.zeros', (['[8]'], {}), '([8])\n', (23822, 23827), True, 'import numpy as np\n'), ((1149, 1174), 'math.floor', 'math.floor', (['(neighbors / 2)'], {}), '(neighbors / 2)\n', (1159, 1174), False, 'import math\n'), ((1392, 1416), 'math.ceil', 'math.ceil', (['(neighbors / 2)'], {}), '(neighbors / 2)\n', (1401, 1416), False, 'import math\n'), ((5457, 5465), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (5462, 5465), False, 'from matplotlib.pyplot import pause\n'), ((6266, 6274), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (6271, 6274), False, 'from matplotlib.pyplot import pause\n'), ((7184, 7192), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (7189, 7192), False, 'from matplotlib.pyplot import pause\n'), ((7369, 7403), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (7379, 7403), True, 'import numpy as np\n'), ((7842, 7854), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7852, 7854), True, 'import tensorflow as tf\n'), ((8955, 8963), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (8960, 8963), False, 'from matplotlib.pyplot import pause\n'), ((9441, 9474), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (9451, 9474), True, 'import numpy as np\n'), ((9520, 9528), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (9525, 9528), False, 'from matplotlib.pyplot import pause\n'), ((9652, 9686), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (9662, 9686), True, 'import numpy as np\n'), ((10050, 10058), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (10055, 10058), False, 'from matplotlib.pyplot import pause\n'), ((13271, 13279), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (13276, 13279), False, 'from matplotlib.pyplot import pause\n'), ((14202, 14210), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (14207, 14210), False, 'from matplotlib.pyplot import pause\n'), ((14795, 14803), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (14800, 14803), False, 'from matplotlib.pyplot import pause\n'), ((14982, 15016), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (14992, 15016), True, 'import numpy as np\n'), ((15457, 15469), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (15467, 15469), True, 'import tensorflow as tf\n'), ((16570, 16578), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (16575, 16578), False, 'from matplotlib.pyplot import pause\n'), ((17092, 17100), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (17097, 17100), False, 'from matplotlib.pyplot import pause\n'), ((19690, 19698), 'matplotlib.pyplot.pause', 'pause', (['(1)'], {}), '(1)\n', (19695, 19698), False, 'from matplotlib.pyplot import pause\n'), ((20449, 20457), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (20454, 20457), False, 'from matplotlib.pyplot import pause\n'), ((1066, 1090), 'math.ceil', 'math.ceil', (['(neighbors / 2)'], {}), '(neighbors / 2)\n', (1075, 1090), False, 'import math\n'), ((1193, 1218), 'math.floor', 'math.floor', (['(neighbors / 2)'], {}), '(neighbors / 2)\n', (1203, 1218), False, 'import math\n'), ((7452, 7485), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (7462, 7485), True, 'import numpy as np\n'), ((7539, 7547), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (7544, 7547), False, 'from matplotlib.pyplot import pause\n'), ((7699, 7733), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (7709, 7733), True, 'import numpy as np\n'), ((9725, 9758), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (9735, 9758), True, 'import numpy as np\n'), ((10251, 10259), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (10256, 10259), False, 'from matplotlib.pyplot import pause\n'), ((10529, 10567), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_biases']"], {}), "(mathcontent['grad_biases'])\n", (10539, 10567), True, 'import numpy as np\n'), ((10621, 10629), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (10626, 10629), False, 'from matplotlib.pyplot import pause\n'), ((10921, 10960), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_weights']"], {}), "(mathcontent['grad_weights'])\n", (10931, 10960), True, 'import numpy as np\n'), ((11104, 11152), 'numpy.squeeze', 'np.squeeze', (['gradW_up_neigh[:, :, ii_saved_local]'], {}), '(gradW_up_neigh[:, :, ii_saved_local])\n', (11114, 11152), True, 'import numpy as np\n'), ((11204, 11249), 'numpy.squeeze', 'np.squeeze', (['gradn_up_neigh[:, ii_saved_local]'], {}), '(gradn_up_neigh[:, ii_saved_local])\n', (11214, 11249), True, 'import numpy as np\n'), ((15065, 15098), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (15075, 15098), True, 'import numpy as np\n'), ((15152, 15160), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (15157, 15160), False, 'from matplotlib.pyplot import pause\n'), ((15314, 15348), 'numpy.asarray', 'np.asarray', (["mathcontent['weights']"], {}), "(mathcontent['weights'])\n", (15324, 15348), True, 'import numpy as np\n'), ((17297, 17305), 'matplotlib.pyplot.pause', 'pause', (['(3)'], {}), '(3)\n', (17302, 17305), False, 'from matplotlib.pyplot import pause\n'), ((17579, 17617), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_biases']"], {}), "(mathcontent['grad_biases'])\n", (17589, 17617), True, 'import numpy as np\n'), ((17671, 17679), 'matplotlib.pyplot.pause', 'pause', (['(5)'], {}), '(5)\n', (17676, 17679), False, 'from matplotlib.pyplot import pause\n'), ((17979, 18018), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_weights']"], {}), "(mathcontent['grad_weights'])\n", (17989, 18018), True, 'import numpy as np\n'), ((18162, 18210), 'numpy.squeeze', 'np.squeeze', (['gradW_up_neigh[:, :, ii_saved_local]'], {}), '(gradW_up_neigh[:, :, ii_saved_local])\n', (18172, 18210), True, 'import numpy as np\n'), ((18262, 18307), 'numpy.squeeze', 'np.squeeze', (['gradn_up_neigh[:, ii_saved_local]'], {}), '(gradn_up_neigh[:, ii_saved_local])\n', (18272, 18307), True, 'import numpy as np\n'), ((26248, 26271), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26269, 26271), False, 'import datetime\n'), ((7782, 7815), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (7792, 7815), True, 'import numpy as np\n'), ((11013, 11051), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_biases']"], {}), "(mathcontent['grad_biases'])\n", (11023, 11051), True, 'import numpy as np\n'), ((15397, 15430), 'numpy.asarray', 'np.asarray', (["mathcontent['biases']"], {}), "(mathcontent['biases'])\n", (15407, 15430), True, 'import numpy as np\n'), ((18071, 18109), 'numpy.asarray', 'np.asarray', (["mathcontent['grad_biases']"], {}), "(mathcontent['grad_biases'])\n", (18081, 18109), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#%% imports
import os
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
# use local cython installs
import sys
#sys.path.append(f"{BASE_PATH}/gtsam/install/cython")
import gtsam
import inspect
import numpy as np
import math
import ics
import matplotlib.pyplot as plt
#object_methods = [method_name for method_name in dir(gtsam)
# if callable(getattr(gtsam, method_name))]
#print(object_methods)
object_methods = [method_name for method_name in dir(ics)
if callable(getattr(ics, method_name))]
print(object_methods)
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
def classlookup(cls):
c = list(cls.__bases__)
for base in c:
c.extend(classlookup(base))
return c
d = classlookup(ics.QuadraticUnaryFactor1D)
print(d)
def main():
print("calling main")
graph = gtsam.NonlinearFactorGraph()
params_isam2 = gtsam.ISAM2Params()
optimizer = gtsam.ISAM2(params_isam2)
pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([1.]))
# NOTE: these calls seem to have been deprecated
# unary_noise_model = gtsam.noiseModel_Gaussian.Covariance(cov_mat_unary)
# binary_noise_model = gtsam.noiseModel_Gaussian.Covariance(cov_mat_binary)
factor = ics.QuadraticUnaryFactor1D(gtsam.symbol('x', 0), np.array([0.]), 0, pose_noise)
factor.printfromnonlinearfactor()
print(factor.isConstraintFactor())
help(factor)
#graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
graph.add(ics.QuadraticUnaryFactor1D(gtsam.symbol('x', 0), np.array([0.]), 0, pose_noise))
graph.add(ics.QuadraticUnaryFactor1D(gtsam.symbol('x', 1), np.array([1.]), 0, pose_noise))
graph.add(ics.QuadraticUnaryFactor1D(gtsam.symbol('x', 2), np.array([1.]), 0, pose_noise))
graph.add(ics.QuadraticUnaryFactor1D(gtsam.symbol('x', 3), np.array([3.]), 0, pose_noise))
# create graph: binary factors
graph.add(ics.QuadraticBinaryFactor1D(gtsam.symbol('x', 0), gtsam.symbol('x', 1), np.array([1.]), pose_noise))
graph.add(ics.QuadraticBinaryFactor1D(gtsam.symbol('x', 1), gtsam.symbol('x', 2), np.array([1.]), pose_noise))
graph.add(ics.QuadraticBinaryFactor1D(gtsam.symbol('x', 2), gtsam.symbol('x', 3), np.array([1.]), pose_noise))
# init values
initial_estimate = gtsam.Values()
initial_estimate.insert(gtsam.symbol('x', 0), np.array([0.]))
initial_estimate.insert(gtsam.symbol('x', 1), np.array([0.]))
initial_estimate.insert(gtsam.symbol('x', 2), np.array([0.]))
initial_estimate.insert(gtsam.symbol('x', 3), np.array([0.]))
optimizer.update(graph, initial_estimate)
result = optimizer.calculateEstimate()
print(result)
if __name__=='__main__':
main()
| [
"gtsam.Values",
"gtsam.Pose2",
"gtsam.symbol",
"os.path.dirname",
"gtsam.ISAM2Params",
"numpy.array",
"gtsam.NonlinearFactorGraph",
"gtsam.ISAM2"
] | [((773, 799), 'gtsam.Pose2', 'gtsam.Pose2', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (784, 799), False, 'import gtsam\n'), ((660, 685), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.1]'], {}), '([0.2, 0.2, 0.1])\n', (668, 685), True, 'import numpy as np\n'), ((734, 759), 'numpy.array', 'np.array', (['[0.3, 0.3, 0.1]'], {}), '([0.3, 0.3, 0.1])\n', (742, 759), True, 'import numpy as np\n'), ((1047, 1075), 'gtsam.NonlinearFactorGraph', 'gtsam.NonlinearFactorGraph', ([], {}), '()\n', (1073, 1075), False, 'import gtsam\n'), ((1095, 1114), 'gtsam.ISAM2Params', 'gtsam.ISAM2Params', ([], {}), '()\n', (1112, 1114), False, 'import gtsam\n'), ((1131, 1156), 'gtsam.ISAM2', 'gtsam.ISAM2', (['params_isam2'], {}), '(params_isam2)\n', (1142, 1156), False, 'import gtsam\n'), ((2503, 2517), 'gtsam.Values', 'gtsam.Values', ([], {}), '()\n', (2515, 2517), False, 'import gtsam\n'), ((86, 111), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n'), ((1208, 1223), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1216, 1223), True, 'import numpy as np\n'), ((1485, 1505), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(0)'], {}), "('x', 0)\n", (1497, 1505), False, 'import gtsam\n'), ((1507, 1522), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1515, 1522), True, 'import numpy as np\n'), ((2546, 2566), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(0)'], {}), "('x', 0)\n", (2558, 2566), False, 'import gtsam\n'), ((2568, 2583), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2576, 2583), True, 'import numpy as np\n'), ((2612, 2632), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(1)'], {}), "('x', 1)\n", (2624, 2632), False, 'import gtsam\n'), ((2634, 2649), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2642, 2649), True, 'import numpy as np\n'), ((2678, 2698), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(2)'], {}), "('x', 2)\n", (2690, 2698), False, 'import gtsam\n'), ((2700, 2715), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2708, 2715), True, 'import numpy as np\n'), ((2744, 2764), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(3)'], {}), "('x', 3)\n", (2756, 2764), False, 'import gtsam\n'), ((2766, 2781), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2774, 2781), True, 'import numpy as np\n'), ((1741, 1761), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(0)'], {}), "('x', 0)\n", (1753, 1761), False, 'import gtsam\n'), ((1763, 1778), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1771, 1778), True, 'import numpy as np\n'), ((1836, 1856), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(1)'], {}), "('x', 1)\n", (1848, 1856), False, 'import gtsam\n'), ((1858, 1873), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1866, 1873), True, 'import numpy as np\n'), ((1931, 1951), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(2)'], {}), "('x', 2)\n", (1943, 1951), False, 'import gtsam\n'), ((1953, 1968), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1961, 1968), True, 'import numpy as np\n'), ((2026, 2046), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(3)'], {}), "('x', 3)\n", (2038, 2046), False, 'import gtsam\n'), ((2048, 2063), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (2056, 2063), True, 'import numpy as np\n'), ((2158, 2178), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(0)'], {}), "('x', 0)\n", (2170, 2178), False, 'import gtsam\n'), ((2180, 2200), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(1)'], {}), "('x', 1)\n", (2192, 2200), False, 'import gtsam\n'), ((2202, 2217), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2210, 2217), True, 'import numpy as np\n'), ((2273, 2293), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(1)'], {}), "('x', 1)\n", (2285, 2293), False, 'import gtsam\n'), ((2295, 2315), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(2)'], {}), "('x', 2)\n", (2307, 2315), False, 'import gtsam\n'), ((2317, 2332), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2325, 2332), True, 'import numpy as np\n'), ((2388, 2408), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(2)'], {}), "('x', 2)\n", (2400, 2408), False, 'import gtsam\n'), ((2410, 2430), 'gtsam.symbol', 'gtsam.symbol', (['"""x"""', '(3)'], {}), "('x', 3)\n", (2422, 2430), False, 'import gtsam\n'), ((2432, 2447), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2440, 2447), True, 'import numpy as np\n')] |
"""LTI Control for SBML models. Base handles initialization, get, set."""
"""
This module creates an LTI system for an SBML model.
States are chemical species. Inputs are unnormalized enzyme reaction elasticities.
Outputs are chemical species.
Notes:
1. Reaction enzymes are identified by the SBML reaction ID.
2. The Jacobian is only recalculated if there is a change in time
"""
from controlSBML.make_roadrunner import makeRoadrunner
import controlSBML as ctl
from controlSBML import util
from controlSBML import msgs
import control
import numpy as np
import pandas as pd
ATOL = 1e-8 # Absolute tolerance for comparisons
TIME = "time"
START_TIME = 0 # Default start time
END_TIME = 5 # Default endtime
POINTS_PER_TIME = 10
TIME = "time"
IS_DEBUG = False
TIMEPOINT_NULL = 1
cleanIt = lambda n: n if n[0] != "[" else n[1:-1]
class ControlBase(object):
def __init__(self, model_reference, input_names=None, output_names=None,
is_reduced=False):
"""
Initializes instance variables
model_reference: str
string, SBML file or Roadrunner object
input_names: name (id) of reaction whose flux is being controlled
or the name of a chemical species
output_names: list-str
output species
is_reduced: bool
construct a reduced model so that the A matrix is nonsingular
"""
# First initializations
self.model_reference = model_reference
self.roadrunner = makeRoadrunner(self.model_reference)
self.species_names = list(
self.roadrunner.getFullStoichiometryMatrix().rownames)
if output_names is None:
output_names = self.species_names
self.output_names = output_names
# Initializations with error checks
self.is_reduced = is_reduced
if len(set(self.roadrunner.getReactionIds()).intersection(self.output_names)) > 0:
if self.is_reduced:
self.is_reduced = False
text = "Cannot have a flux output and is_reduced=True."
text += " Setting is_reduced=False."
msgs.warn(text)
# Iinitial model calculations
self._jacobian_time = TIMEPOINT_NULL
self._jacobian_df = None
# Set defaults.
self.depeendent_names = list(
set(self.species_names).symmetric_difference(self.state_names))
if not set(self.state_names) <= set(self.species_names):
raise RuntimeError("State name is not a species name.")
self.full_stoichiometry_df, self.reduced_stoichiometry_df \
= self._makeStoichiometryDF()
# Check for consistency on the state specification
if self.is_reduced:
self.reaction_names = list(self.reduced_stoichiometry_df.columns)
else:
self.reaction_names = list(self.reduced_stoichiometry_df.columns)
# Handle defaults
if input_names is None:
self.input_names = []
else:
self.input_names = input_names
#self.input_names = self._sortList(self.reaction_names, self.input_names)
self.num_input = len(self.input_names)
self.num_output = len(self.output_names)
# Other calculations
self.antimony = self.roadrunner.getAntimony()
# Do the initializations
self.roadrunner.reset()
# Validation checks
if not set(self.state_names) <= set(self.species_names):
text = "State does not include some spaces.\n"
text += " Species are: %s" % str(self.species_names)
text += " States are: %s" % str(self.state_names)
raise RuntimeError(text)
possible_names = set(self.species_names).union(self.reaction_names)
if not set(self.output_names) <= set(possible_names):
diff = list(set(self.output_names).difference(self.species_names))
text = "Outputs must be species or fluxes."
text += "The following outputs are invalid: %s" % str(diff)
raise ValueError(text)
possible_names = set(self.species_names).union(self.reaction_names)
if not set(self.input_names) <= set(possible_names):
diff = list(set(self.input_names).difference(possible_names))
text = "Inputs must be a species or a reaction."
text += " Invalid names are: %s" % str(diff)
raise ValueError(text)
@property
def B_df(self):
return self._makeBDF()
@property
def C_df(self):
return self._makeCDF()
def _makeStoichiometryDF(self):
"""
Creates the reduced stoichiometry matrix and the auxiliary matrix
Returns
-------
DataFrame - full stoichiometry matrix
DataFrame - reduced stoichiometry matrix
"""
#
reduced_stoichiometry_df = util.mat2DF(
self.roadrunner.getReducedStoichiometryMatrix())
full_stoichiometry_df = util.mat2DF(
self.roadrunner.getFullStoichiometryMatrix())
return full_stoichiometry_df, reduced_stoichiometry_df
def _makeCDF(self):
"""
Creates the output C dataframe based on the requested output_names.
Columns should be the states. Rows are the outputs.
There are 3 cases for outputs:
1) The output is a state
2) The output is a floating species whose concentration
is a linear function of the other state variables
3) The output is a flux and this
is a linear function of the other state variables
Returns
-------
pd.DataFrame
"""
# FIXME: This may fail if is_reduced = True because
# and input_names includes state since these states are dropped
is_state_input = len(set(self.input_names).intersection(
self.state_names)) > 0
if self.is_reduced and is_state_input:
raise ValueError("Cannot handle input states for is_reduced=True")
# Initializations
state_names = self.state_names
# Delete states that are inputs
state_names = list(set(self.state_names).difference(self.input_names))
state_names = sorted(state_names,
key=lambda n: self.state_names.index(n))
num_state = len(state_names)
num_output = len(self.output_names)
if len(set(self.reaction_names).intersection(self.output_names)) > 0:
flux_jacobian_df = self.makeFluxJacobian()
else:
flux_jacobian_df = None
L0 = self.roadrunner.getL0Matrix()
if len(L0) > 0:
L0_df = pd.DataFrame(L0, columns=L0.colnames, index=L0.rownames)
else:
L0_df = None
# Iterate across each output to construct the transpose of the C matrix
C_T_dct = {}
for name in self.output_names:
if name in state_names:
values = np.repeat(0, num_state)
idx = state_names.index(name)
values[idx] = 1
C_T_dct[name] = values
elif name in self.species_names:
if L0_df is None:
raise RuntimeError("Species missing from L0: %s" % name)
if not name in L0.rownames:
raise RuntimeError("Species missing from L0: %s" % name)
C_T_dct[name] = L0_df.loc[name, :]
elif name in self.reaction_names:
C_T_dct[name] = flux_jacobian_df.loc[name, :]
C_df_T = pd.DataFrame(C_T_dct, index=state_names)
C_df = C_df_T.transpose()
values = C_df.values.flatten()
if any([np.isnan(v) for v in values]):
raise RuntimeError("Nan value encountered.")
return C_df
@property
def roadrunner_namespace(self):
"""
Constructs the roadrunner namespace and associated values.
Parameters
----------
:
Returns
-------
dict
"""
dct = {}
for id_lst in [self.roadrunner.getCompartmentIds(),
self.roadrunner.getBoundarySpeciesIds(),
self.roadrunner.getFloatingSpeciesIds(),
self.roadrunner.getBoundarySpeciesIds(),
self.roadrunner.getGlobalParameterIds(),
]:
for idx in id_lst:
dct[idx] = self.get(idx)
return dct
@property
def state_ser(self):
"""
Contructs vector of current state values.
Returns
-------
np.ndarray: N X 1
"""
ser = util.makeRoadrunnerSer(self.roadrunner, self.state_names)
return ser
@property
def output_ser(self):
"""
Contructs vector of current state values.
Returns
-------
np.ndarray: N X 1
"""
return util.makeSer(self.roadrunner, self.output_names)
@property
def jacobian_df(self):
"""
Calculates the Jacobian, or a reduced Jacobian if the option is selected.
Improves efficiency by using a previous calculation if the time has not changed.
Returns
-------
pd.DataFrame, species_names
"""
if np.isclose(self.getTime(), self._jacobian_time):
if self._jacobian_df is not None:
return self._jacobian_df
if self.is_reduced:
current_bool = self.roadrunner.conservedMoietyAnalysis
self.roadrunner.conservedMoietyAnalysis = True
jacobian_mat = self.roadrunner.getReducedJacobian()
self.roadrunner.conservedMoietyAnalysis = current_bool
else:
jacobian_mat = self.roadrunner.getFullJacobian()
if len(jacobian_mat.rownames) != len(jacobian_mat.colnames):
raise RuntimeError("Jacobian is not square!")
names = list(jacobian_mat.colnames)
self._jacobian_df = pd.DataFrame(jacobian_mat, columns=names, index=names)
self._jacobian_time = self.getTime()
return self._jacobian_df
@property
def state_names(self):
state_names = list(self.jacobian_df.columns)
return self._sortList(self.species_names, state_names)
@property
def num_state(self):
return len(self.state_names)
@property
def A_df(self):
return self.jacobian_df
@staticmethod
def isRoadrunnerKey(key):
return not ((key[0] == "_") or ("(" in key) or (key[-1] == "'"))
def getJacobian(self, time=None):
"""
Calculates the Jacobian at the specified time.
Parameters
----------
time: float
Returns
-------
pd.DataFrame
"""
# Calculate the Jacobian
if time is not None:
current_time = self.getTime()
self.setTime(time)
jacobian_df = self.jacobian_df.copy()
if time is not None:
self.setTime(current_time)
return jacobian_df
def setTime(self, time):
self.roadrunner.reset()
self._jacobian_time = TIMEPOINT_NULL
if time > 0.01:
_ = self.roadrunner.simulate(0.0, time)
# FIXME: Doesn't update "sets" done to roadrunner. Can resolve by
# by assigning sets to new instance.
def copy(self):
"""
Creates a copy of the object.
Returns
-------
controlSBML
"""
ctlsb = self.__class__(self.model_reference,
input_names=self.input_names,
output_names=self.output_names)
ctlsb.setTime(self.getTime())
return ctlsb
def getTime(self):
"""
Gets current simulation time.
Returns
-------
float
"""
return self.roadrunner.model.getTime()
# TODO: More complete check of attributes?
def equals(self, other):
"""
Checks that they have the same information
Parameters
----------
other: ControlSBML
Returns
-------
bool
"""
bValue = self.antimony == other.antimony
if IS_DEBUG:
print("1: %d" % bValue)
bValue = bValue and np.isclose(self.getTime(), \
other.getTime())
if IS_DEBUG:
print("2: %d" % bValue)
bValue = bValue and all([s1 == s2 for s1, s2
in zip(self.state_names, other.state_names)])
if IS_DEBUG:
print("3: %d" % bValue)
diff = set(self.roadrunner.keys()).symmetric_difference(
other.roadrunner.keys())
bValue = bValue and (len(diff) == 0)
if IS_DEBUG:
print("4: %d" % bValue)
for attr in ["state_names", "input_names", "output_names"]:
expr1 = "self.%s" % attr
expr2 = "other.%s" % attr
try:
np.array(eval(expr1)) == np.array(eval(expr2))
except Exception:
bValue = False
break
if IS_DEBUG:
print("5: %d" % bValue)
# Check the roadrunner state
if bValue:
for key, value in self.roadrunner.items():
if self.isRoadrunnerKey(key):
bValue = bValue and (other.roadrunner[key] == value)
if IS_DEBUG:
print("6: %d" % bValue)
return bValue
def get(self, names=None):
"""
Provides the roadrunner values for a name. If no name,
then all values are given.
Parameters
----------
name: str/list-str
Returns
-------
object/dict
"""
if names is None:
names = self.roadrunner.keys()
return util.getRoadrunnerValue(self.roadrunner, names)
def set(self, name_dct):
"""
Sets the values of names and values.
Parameters
----------
name_dct: dict
key: str
value: value
"""
util.setRoadrunnerValue(self.roadrunner, name_dct)
def add(self, name_dct):
"""
Adds the indicated value to the current value of the variable.
Parameters
----------
name_dct: dict
key: str
value: value
"""
cur_dct = util.getRoadrunnerValue(self.roadrunner, name_dct.keys())
new_dct = {n: cur_dct[n] + name_dct[n] for n in name_dct.keys()}
util.setRoadrunnerValue(self.roadrunner, new_dct)
@staticmethod
def _sortList(super_lst, sub_lst):
"""
Sorts the sub_lst in the same order as the super_lst.
Parameters
----------
super_lst: list
sub_lst: list
Returns
-------
list
"""
new_super_lst = list(super_lst)
return sorted(sub_lst, key=lambda v: new_super_lst.index(v))
def separateSpeciesReactionInputs(self):
species_inputs = [n for n in self.input_names if n in self.species_names]
reaction_inputs = [n for n in self.input_names if n in self.reaction_names]
return species_inputs, reaction_inputs
def _makeBDF(self, time=None):
"""
Constructs a dataframe for the B matrix.
The columns must be in the same order as the input_names.
Parameters
---------
time: float
Returns
-------
np.ndarray (n X p), where p = len(input_names)
"""
if len(self.input_names) > 0:
# Determine which inputs are reactions and which inputs are species
species_inputs, reaction_inputs = self.separateSpeciesReactionInputs()
# Construct the matrix for species inputs
if len(species_inputs) > 0:
jacobian_df = self.getJacobian(time=time)
# Don't include states that are input species
df = jacobian_df.drop(species_inputs, axis=0)
B_species_df = df[species_inputs]
B_df = B_species_df
if len(reaction_inputs) > 0:
B_reaction_df = self.full_stoichiometry_df[reaction_inputs]
B_df = B_reaction_df
# Select the columns needed from the stoichiometry matrix
# Merge the two
if (len(species_inputs) > 0) and (len(reaction_inputs) > 0):
B_df = pd.concat([B_species_df, B_reaction_df], axis=1)
B_df = B_df[self.input_names]
else:
ncol = 1
B_mat = np.repeat(0, self.num_state)
B_mat = np.reshape(B_mat, (self.num_state, ncol))
B_df = pd.DataFrame(B_mat, index=self.state_names)
#
return B_df
def makeStateSpace(self, time=None, A_mat=None, B_mat=None,
C_mat=None, D_mat=None):
"""
Creates a state space control object for
the n X n jacobian. By default, the D matrix is always 0.
Parameters
----------
The default values of the matrices are calculated in the constructor.
These can be overridden.
time: float (time at which Jacobian is obtained)
A_mat: np.array(n X n) or DataFrame
B_mat: np.array(n X p) or DataFrame
C_mat: np.array(q X n) or DataFrame
D_mat: np.array(q X p) or DataFrame
Returns
-------
control.StateSpace
"""
def df2Mat(df):
if isinstance(df, pd.DataFrame):
return df.values
else:
return df
#
if time is None:
time = self.getTime()
# Construct the matrices
A_mat = df2Mat(A_mat)
B_mat = df2Mat(B_mat)
C_mat = df2Mat(C_mat)
D_mat = df2Mat(D_mat)
# Calculate the Jacobian
#
if A_mat is None:
A_df = self.getJacobian(time)
columns = A_df.columns
# Remove any state that's an input
# Allow state to be generated internally
for name in self.input_names:
if name in columns:
A_df = A_df.drop(name, axis=0)
A_df = A_df.drop(name, axis=1)
A_mat = A_df.values
#
if B_mat is None:
B_df = self._makeBDF(time=time)
if B_df is None:
B_mat = None
else:
B_mat = B_df.values
if C_mat is None:
# Construct the output matrix
C_mat = self.C_df.values
if D_mat is None:
nrow = len(self.output_names)
if B_mat is None:
ncol = 1
else:
ncol = np.shape(B_mat)[1]
D_mat = np.repeat(0, nrow*ncol)
D_mat = np.reshape(D_mat, (nrow, ncol))
ss = control.StateSpace(A_mat, B_mat, C_mat, D_mat)
return ss
def makeNonlinearIOSystem(self, name, effector_dct=None):
"""
Creates an object that can be used in connections with the
control package.
Parameters
----------
name: str (name of the system)
effector_dct: dict (maps reaction inputs to roadrunner muteables)
key: str (input name)
value: str (name of roadrunner muteable)
Returns
-------
controlSBML.NonelinearIOSystem
"""
return ctl.NonlinearIOSystem(name, self, effector_dct=effector_dct)
@staticmethod
def reduceTransferFunction(tf, atol=ATOL):
"""
Reduces the order of a transfer function if trailing zeroes.
Parameters
----------
tf: control.TransferFunction
Returns
-------
tf: control.TransferFunction
"""
def findOrderOfFirstNonzeroDigit(polynomial):
for idx in range(len(polynomial)):
pos = len(polynomial) - idx - 1
if not np.isclose(polynomial[pos], 0, atol=atol):
return idx
return len(polynomial) - 1
def reduceOrder(polynomial, new_order):
pos = len(polynomial) - new_order
return polynomial[0:pos]
#
numerator = tf.num[0][0]
denominator = tf.den[0][0]
lowest_order = min(findOrderOfFirstNonzeroDigit(numerator),
findOrderOfFirstNonzeroDigit(denominator))
#
new_numerator = reduceOrder(numerator, lowest_order)
new_denominator = reduceOrder(denominator, lowest_order)
new_tf = control.TransferFunction(new_numerator, new_denominator)
return new_tf
def makeTransferFunction(self, time=None, atol=ATOL):
"""
Creates a transfer function for the system. Verifies that there
is a single input and a single output. Reduces the order of the
transfer function as needed.
Parameters
----------
time: float (time at which Jacobian is obtained)
atol: absolute tolerance for comparison
Returns
-------
control.TransferFunction
"""
# Validity checks
if len(self.input_names) != 1:
raise ValueError("Must have exactly one input.")
if len(self.output_names) != 1:
raise ValueError("Must have exactly one output.")
# Get initial transfer function
state_space = self.makeStateSpace(time=time)
is_nan = any([np.isnan(v) for v in state_space.A.flatten()])
if is_nan:
tf = control.TransferFunction([0], [1])
else:
tf = control.ss2tf(state_space)
return tf
#
return self.reduceTransferFunction(tf, atol=atol)
def makeFluxJacobian(self, time=None):
"""
Constructs the Jacobian of the reaction flux vector.
Parameters
----------
time: float
Time at which this is calculated
Returns
-------
pd.DataFrame
index: reaction id
column: species
"""
# FIXME : Handle case where state_names < species_names
# This calculation only works if state_names == species_names
diff = set(self.state_names).symmetric_difference(self.species_names)
if len(diff) > 0:
raise RuntimeError(
"Code doesn't work if species_names != state_names")
# Adjust time if necessary
cur_time = self.getTime()
if time is None:
time = cur_time
if not np.isclose(time, cur_time):
self.setTime(time)
#
dct = {}
for state_name in self.state_names:
dct[state_name] = []
for reaction_name in self.reaction_names:
dct[state_name].append(self.roadrunner.getEE(
reaction_name, state_name))
#
df = pd.DataFrame(dct, index=self.reaction_names)
if time != cur_time:
self.setTime(cur_time)
return df
| [
"pandas.DataFrame",
"control.ss2tf",
"controlSBML.util.makeRoadrunnerSer",
"controlSBML.util.setRoadrunnerValue",
"controlSBML.make_roadrunner.makeRoadrunner",
"numpy.isnan",
"numpy.shape",
"numpy.isclose",
"numpy.repeat",
"numpy.reshape",
"controlSBML.NonlinearIOSystem",
"controlSBML.util.mak... | [((1507, 1543), 'controlSBML.make_roadrunner.makeRoadrunner', 'makeRoadrunner', (['self.model_reference'], {}), '(self.model_reference)\n', (1521, 1543), False, 'from controlSBML.make_roadrunner import makeRoadrunner\n'), ((7586, 7626), 'pandas.DataFrame', 'pd.DataFrame', (['C_T_dct'], {'index': 'state_names'}), '(C_T_dct, index=state_names)\n', (7598, 7626), True, 'import pandas as pd\n'), ((8651, 8708), 'controlSBML.util.makeRoadrunnerSer', 'util.makeRoadrunnerSer', (['self.roadrunner', 'self.state_names'], {}), '(self.roadrunner, self.state_names)\n', (8673, 8708), False, 'from controlSBML import util\n'), ((8917, 8965), 'controlSBML.util.makeSer', 'util.makeSer', (['self.roadrunner', 'self.output_names'], {}), '(self.roadrunner, self.output_names)\n', (8929, 8965), False, 'from controlSBML import util\n'), ((9978, 10032), 'pandas.DataFrame', 'pd.DataFrame', (['jacobian_mat'], {'columns': 'names', 'index': 'names'}), '(jacobian_mat, columns=names, index=names)\n', (9990, 10032), True, 'import pandas as pd\n'), ((13796, 13843), 'controlSBML.util.getRoadrunnerValue', 'util.getRoadrunnerValue', (['self.roadrunner', 'names'], {}), '(self.roadrunner, names)\n', (13819, 13843), False, 'from controlSBML import util\n'), ((14059, 14109), 'controlSBML.util.setRoadrunnerValue', 'util.setRoadrunnerValue', (['self.roadrunner', 'name_dct'], {}), '(self.roadrunner, name_dct)\n', (14082, 14109), False, 'from controlSBML import util\n'), ((14500, 14549), 'controlSBML.util.setRoadrunnerValue', 'util.setRoadrunnerValue', (['self.roadrunner', 'new_dct'], {}), '(self.roadrunner, new_dct)\n', (14523, 14549), False, 'from controlSBML import util\n'), ((18852, 18898), 'control.StateSpace', 'control.StateSpace', (['A_mat', 'B_mat', 'C_mat', 'D_mat'], {}), '(A_mat, B_mat, C_mat, D_mat)\n', (18870, 18898), False, 'import control\n'), ((19422, 19482), 'controlSBML.NonlinearIOSystem', 'ctl.NonlinearIOSystem', (['name', 'self'], {'effector_dct': 'effector_dct'}), '(name, self, effector_dct=effector_dct)\n', (19443, 19482), True, 'import controlSBML as ctl\n'), ((20568, 20624), 'control.TransferFunction', 'control.TransferFunction', (['new_numerator', 'new_denominator'], {}), '(new_numerator, new_denominator)\n', (20592, 20624), False, 'import control\n'), ((22926, 22970), 'pandas.DataFrame', 'pd.DataFrame', (['dct'], {'index': 'self.reaction_names'}), '(dct, index=self.reaction_names)\n', (22938, 22970), True, 'import pandas as pd\n'), ((6695, 6751), 'pandas.DataFrame', 'pd.DataFrame', (['L0'], {'columns': 'L0.colnames', 'index': 'L0.rownames'}), '(L0, columns=L0.colnames, index=L0.rownames)\n', (6707, 6751), True, 'import pandas as pd\n'), ((16571, 16599), 'numpy.repeat', 'np.repeat', (['(0)', 'self.num_state'], {}), '(0, self.num_state)\n', (16580, 16599), True, 'import numpy as np\n'), ((16620, 16661), 'numpy.reshape', 'np.reshape', (['B_mat', '(self.num_state, ncol)'], {}), '(B_mat, (self.num_state, ncol))\n', (16630, 16661), True, 'import numpy as np\n'), ((16681, 16724), 'pandas.DataFrame', 'pd.DataFrame', (['B_mat'], {'index': 'self.state_names'}), '(B_mat, index=self.state_names)\n', (16693, 16724), True, 'import pandas as pd\n'), ((18763, 18788), 'numpy.repeat', 'np.repeat', (['(0)', '(nrow * ncol)'], {}), '(0, nrow * ncol)\n', (18772, 18788), True, 'import numpy as np\n'), ((18807, 18838), 'numpy.reshape', 'np.reshape', (['D_mat', '(nrow, ncol)'], {}), '(D_mat, (nrow, ncol))\n', (18817, 18838), True, 'import numpy as np\n'), ((21563, 21597), 'control.TransferFunction', 'control.TransferFunction', (['[0]', '[1]'], {}), '([0], [1])\n', (21587, 21597), False, 'import control\n'), ((21629, 21655), 'control.ss2tf', 'control.ss2tf', (['state_space'], {}), '(state_space)\n', (21642, 21655), False, 'import control\n'), ((22574, 22600), 'numpy.isclose', 'np.isclose', (['time', 'cur_time'], {}), '(time, cur_time)\n', (22584, 22600), True, 'import numpy as np\n'), ((2154, 2169), 'controlSBML.msgs.warn', 'msgs.warn', (['text'], {}), '(text)\n', (2163, 2169), False, 'from controlSBML import msgs\n'), ((6992, 7015), 'numpy.repeat', 'np.repeat', (['(0)', 'num_state'], {}), '(0, num_state)\n', (7001, 7015), True, 'import numpy as np\n'), ((7716, 7727), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (7724, 7727), True, 'import numpy as np\n'), ((16425, 16473), 'pandas.concat', 'pd.concat', (['[B_species_df, B_reaction_df]'], {'axis': '(1)'}), '([B_species_df, B_reaction_df], axis=1)\n', (16434, 16473), True, 'import pandas as pd\n'), ((21480, 21491), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (21488, 21491), True, 'import numpy as np\n'), ((18724, 18739), 'numpy.shape', 'np.shape', (['B_mat'], {}), '(B_mat)\n', (18732, 18739), True, 'import numpy as np\n'), ((19968, 20009), 'numpy.isclose', 'np.isclose', (['polynomial[pos]', '(0)'], {'atol': 'atol'}), '(polynomial[pos], 0, atol=atol)\n', (19978, 20009), True, 'import numpy as np\n')] |
"""This module contains functionality for generating scenarios.
Specifically, it generates network configurations and action space
configurations based on number of hosts and services in network using standard
formula.
"""
import numpy as np
import nasim.scenarios.utils as u
from nasim.scenarios import Scenario
from nasim.scenarios.host import Host
# Constants for generating network
USER_SUBNET_SIZE = 5
DMZ = 1
SENSITIVE = 2
USER = 3
class ScenarioGenerator:
"""Generates a scenario based on standard formula
For explanation of the details of how scenarios are generated see
:ref:`scenario_generation_explanation`.
Notes
-----
**Exploit Probabilities**:
Success probabilities of each exploit are determined based on the value of
the ``exploit_probs`` argument, as follows:
- ``exploit_probs=None`` - probabilities generated randomly from uniform
distribution
- ``exploit_probs="mixed"`` - probabilities are chosen from [0.3, 0.6, 0.9]
with probability [0.2, 0.4, 0.4] (see :ref:`generated_exploit_probs` for
explanation).
- ``exploit_probs=float`` - probability of each exploit is set to value
- ``exploit_probs=list[float]`` - probability of each exploit is set to
corresponding value in list
For deterministic exploits set ``exploit_probs=1.0``.
**Host Configuration distribution**:
1. if ``uniform=True`` then host configurations are chosen uniformly at
random from set of all valid possible configurations
2. if ``uniform=False`` host configurations are chosen to be correlated
(see :ref:`correlated_configurations` for explanation)
"""
def generate(self,
num_hosts,
num_services,
num_os=2,
num_exploits=None,
r_sensitive=10,
r_user=10,
exploit_cost=1,
exploit_probs=1.0,
service_scan_cost=1,
os_scan_cost=1,
subnet_scan_cost=1,
uniform=False,
alpha_H=2.0,
alpha_V=2.0,
lambda_V=1.0,
restrictiveness=5,
random_goal=False,
base_host_value=1,
host_discovery_value=1,
seed=None,
name=None,
step_limit=None,
**kwargs):
"""Generate the network configuration based on standard formula.
Parameters
----------
num_hosts : int
number of hosts to include in network (minimum is 3)
num_services : int
number of services running on network (minimum is 1)
num_os : int, optional
number of OS running on network (minimum is 1) (default=2)
num_exploits : int, optional
number of exploits to use. minimum is 1. If None will use
num_services (default=None)
r_sensitive : float, optional
reward for sensitive subnet documents (default=10)
r_user : float, optional
reward for user subnet documents (default=10)
exploit_cost : int or float, optional
cost for an exploit (default=1)
exploit_probs : None, float, list of floats or "mixed", optional
success probability of exploits (default=1.0)
service_scan_cost : int or float, optional
cost for a service scan (default=1)
os_scan_cost : int or float, optional
cost for an os scan (default=1)
subnet_scan_cost : int or float, optional
cost for an subnet scan (default=1)
uniform : bool, optional
whether to use uniform distribution or correlated host configs
(default=False)
alpha_H : float, optional
(only used when uniform=False) Scaling/concentration parameter for
controlling corelation between host configurations (must be > 0)
(default=2.0)
alpha_V : float, optional
(only used when uniform=False) scaling/concentration parameter for
controlling corelation between services across host configurations
(must be > 0) (default=2.0)
lambda_V : float, optional
(only used when uniform=False) parameter for controlling average
number of services running per host configuration (must be > 0)
(default=1.0)
restrictiveness : int, optional
max number of services allowed to pass through firewalls between
zones (default=5)
random_goal : bool, optional
whether to randomly assign the goal user host or not
(default=False)
base_host_value : int, optional,
value of non sensitive hosts (default=1)
host_discovery_value : int, optional
value of discovering a host for the first time (default=1)
seed : int, optional
random number generator seed (default=None)
name : str, optional
name of the scenario, if None one will be generated (default=None)
step_limit : int, optional
max number of steps permitted in a single episode, if None there is
no limit (default=None)
Returns
-------
Scenario
scenario description
"""
assert 0 < num_services
assert 2 < num_hosts
assert num_exploits is None or 0 < num_exploits
assert 0 < num_os
assert 0 < r_sensitive and 0 < r_user
assert 0 < alpha_H and 0 < alpha_V and 0 < lambda_V
assert 0 < restrictiveness
if seed is not None:
np.random.seed(seed)
if num_exploits is None:
num_exploits = num_services
self._generate_subnets(num_hosts)
self._generate_topology()
self._generate_services(num_services)
self._generate_os(num_os)
self._generate_exploits(num_exploits, exploit_cost, exploit_probs)
self._generate_sensitive_hosts(r_sensitive, r_user, random_goal)
self.base_host_value = base_host_value
self.host_discovery_value = host_discovery_value
if uniform:
self._generate_uniform_hosts()
else:
self._generate_correlated_hosts(alpha_H, alpha_V, lambda_V)
self._ensure_host_vulnerability()
self._generate_firewall(restrictiveness)
self.service_scan_cost = service_scan_cost
self.os_scan_cost = os_scan_cost
self.subnet_scan_cost = subnet_scan_cost
if name is None:
name = f"gen_H{num_hosts}_E{num_exploits}_S{num_services}"
self.name = name
self.step_limit = step_limit
return self._construct_scenario()
def _construct_scenario(self):
scenario_dict = dict()
scenario_dict[u.SUBNETS] = self.subnets
scenario_dict[u.TOPOLOGY] = self.topology
scenario_dict[u.SERVICES] = self.services
scenario_dict[u.OS] = self.os
scenario_dict[u.SENSITIVE_HOSTS] = self.sensitive_hosts
scenario_dict[u.EXPLOITS] = self.exploits
scenario_dict[u.SERVICE_SCAN_COST] = self.service_scan_cost
scenario_dict[u.OS_SCAN_COST] = self.os_scan_cost
scenario_dict[u.SUBNET_SCAN_COST] = self.subnet_scan_cost
scenario_dict[u.FIREWALL] = self.firewall
scenario_dict[u.HOSTS] = self.hosts
scenario_dict[u.STEP_LIMIT] = self.step_limit
scenario = Scenario(scenario_dict, name=self.name)
return scenario
def _generate_subnets(self, num_hosts):
# Internet (0), DMZ (1) and sensitive (2) subnets both contain 1 host
subnets = [1, 1, 1]
# remainder of hosts go into user subnet tree
num_full_user_subnets = ((num_hosts - 2) // USER_SUBNET_SIZE)
subnets += [USER_SUBNET_SIZE] * num_full_user_subnets
if ((num_hosts - 2) % USER_SUBNET_SIZE) != 0:
subnets.append((num_hosts - 2) % USER_SUBNET_SIZE)
self.subnets = subnets
def _generate_topology(self):
# including internet subnet
num_subnets = len(self.subnets)
topology = np.zeros((num_subnets, num_subnets))
# DMZ subnet is connected to sensitive and first user subnet and also
# to internet
for row in range(USER + 1):
for col in range(USER + 1):
if row == u.INTERNET and col > DMZ:
continue
if row > DMZ and col == u.INTERNET:
continue
topology[row][col] = 1
if num_subnets == USER + 1:
self.topology = topology
return
# all other subnets are part of user binary tree
for row in range(USER, num_subnets):
# subnet connected to itself
topology[row][row] = 1
# position in tree
pos = row - USER
if pos > 0:
parent = ((pos - 1) // 2) + 3
topology[row][parent] = 1
child_left = ((2 * pos) + 1) + 3
child_right = ((2 * pos) + 2) + 3
if child_left < num_subnets:
topology[row][child_left] = 1
if child_right < num_subnets:
topology[row][child_right] = 1
self.topology = topology
def _generate_services(self, num_services):
self.services = [f"srv_{s}" for s in range(num_services)]
def _generate_os(self, num_os):
self.os = [f"os_{i}" for i in range(num_os)]
def _generate_sensitive_hosts(self, r_sensitive, r_user, random_goal):
sensitive_hosts = {}
# first sensitive host is first host in SENSITIVE network
sensitive_hosts[(SENSITIVE, 0)] = r_sensitive
# second sensitive host in USER network
if random_goal and len(self.subnets) > SENSITIVE:
# randomly choose user host to be goal
subnet_id = np.random.randint(USER, len(self.subnets))
host_id = np.random.randint(0, self.subnets[subnet_id])
sensitive_hosts[(subnet_id, host_id)] = r_user
else:
# second last host in USER network is goal
sensitive_hosts[(len(self.subnets)-1, self.subnets[-1]-1)] = r_user
self.sensitive_hosts = sensitive_hosts
def _generate_uniform_hosts(self):
hosts = dict()
host_config_set = self._possible_host_configs()
num_configs = len(host_config_set)
for subnet, size in enumerate(self.subnets):
if subnet == u.INTERNET:
continue
for h in range(size):
service_cfg = host_config_set[np.random.choice(num_configs)]
service_cfg = self._convert_to_service_map(service_cfg)
os = np.random.choice(self.os)
os_cfg = self._convert_to_os_map(os)
address = (subnet, h)
value = self._get_host_value(address)
host = Host(address, os_cfg.copy(), service_cfg.copy(), value,
self.host_discovery_value)
hosts[address] = host
self.hosts = hosts
def _possible_host_configs(self):
"""Generate set of all possible host service configurations based
on number of exploits/services in environment.
Note: Each host is vulnerable to at least one exploit, so there is
no configuration where all services are absent.
Returns
-------
configs : ndarray
all possible configurations, where each configuration is a list of
bools corresponding to the presence or absence of a service
"""
# remove last permutation which is all False
configs = self._permutations(len(self.services))[:-1]
return configs
def _permutations(self, n):
"""Generate list of all possible permutations of n bools
N.B First permutation in list is always the all True permutation
and final permutation in list is always the all False permutationself.
perms[1] = [True, ..., True]
perms[-1] = [False, ..., False]
Parameters
----------
n : int
bool list length
Returns
-------
perms : list[list]
all possible permutations of n bools
"""
# base cases
if n <= 0:
return []
if n == 1:
return [[True], [False]]
perms = []
for p in self._permutations(n - 1):
perms.append([True] + p)
perms.append([False] + p)
return perms
def _generate_correlated_hosts(self, alpha_H, alpha_V, lambda_V):
hosts = dict()
prev_configs = []
prev_vuls = []
prev_os = []
host_num = 0
for subnet, size in enumerate(self.subnets):
if subnet == u.INTERNET:
continue
for m in range(size):
services, os = self._get_host_config(host_num,
alpha_H,
prev_configs,
alpha_V,
prev_vuls,
lambda_V,
prev_os)
service_cfg = self._convert_to_service_map(services)
os_cfg = self._convert_to_os_map(os)
host_num += 1
address = (subnet, m)
value = self._get_host_value(address)
host = Host(address, os_cfg.copy(), service_cfg.copy(), value,
self.host_discovery_value)
hosts[address] = host
self.hosts = hosts
def _get_host_config(self, host_num, alpha_H, prev_configs, alpha_V,
prev_vuls, lambda_V, prev_os):
"""Select a host configuration from all possible configurations based
using a Nested Dirichlet Process
"""
if host_num == 0 \
or np.random.rand() < (alpha_H / (alpha_H + host_num - 1)):
# if first host or with prob proportional to alpha_H
# choose new config
new_config = self._sample_config(
alpha_V, prev_vuls, lambda_V, prev_os
)
else:
# sample uniformly from previous sampled configs
new_config = prev_configs[np.random.choice(len(prev_configs))]
prev_configs.append(new_config)
return new_config
def _sample_config(self, alpha_V, prev_vuls, lambda_V, prev_os):
"""Sample a host configuration from all possible configurations based
using a Dirichlet Process
"""
num_services = len(self.services)
# no services present by default
new_services_cfg = [False for i in range(num_services)]
# randomly get number of times to sample using poission dist in range
# (0, num_services) minimum 1 service running
n = max(np.random.poisson(lambda_V), 1)
# draw n samples from Dirichlet Process
# (alpha_V, uniform dist of services)
for i in range(n):
if i == 0 or np.random.rand() < (alpha_V / (alpha_V + i - 1)):
# draw randomly from uniform dist over services
x = np.random.randint(0, num_services)
else:
# draw uniformly at random from previous choices
x = np.random.choice(prev_vuls)
new_services_cfg[x] = True
prev_vuls.append(x)
# sample an os from Dirichlet Process (alpha_V, uniform dist of OSs)
if len(prev_os) == 0 \
or np.random.rand() < (alpha_V / (alpha_V + i - 1)):
# draw randomly from uniform dist over services
os = np.random.choice(self.os)
else:
# draw uniformly at random from previous choices
os = np.random.choice(prev_os)
prev_os.append(os)
return (new_services_cfg, os)
def _is_sensitive_host(self, addr):
return addr in self.sensitive_hosts
def _convert_to_service_map(self, config):
"""Converts list of bools to a map from service name -> bool """
service_map = {}
for srv, val in zip(self.services, config):
service_map[srv] = val
return service_map
def _convert_to_os_map(self, os):
"""Converts an OS string to a map from os name -> bool
N.B. also adds an entry for None os, which makes it easier for
vectorizing and checking if an exploit will work (since exploits can
have os=None)
"""
os_map = {}
for os_name in self.os:
os_map[os_name] = os_name == os
return os_map
def _ensure_host_vulnerability(self):
"""Ensures each subnet has atleast one vulnerable host and all sensitive hosts
are vulnerable
"""
vulnerable_subnets = set()
for host_addr, host in self.hosts.items():
if not self._is_sensitive_host(host_addr) \
and host_addr[0] in vulnerable_subnets:
continue
if self._host_is_vulnerable(host):
vulnerable_subnets.add(host_addr[0])
elif self._is_sensitive_host(host_addr):
self._update_host_to_vulnerable(host)
vulnerable_subnets.add(host_addr[0])
for subnet, size in enumerate(self.subnets):
if subnet in vulnerable_subnets or subnet == u.INTERNET:
continue
host_num = np.random.randint(size)
host = self.hosts[(subnet, host_num)]
self._update_host_to_vulnerable(host)
vulnerable_subnets.add(subnet)
def _host_is_vulnerable(self, host):
for e_def in self.exploits.values():
if self._host_is_vulnerable_to_exploit(host, e_def):
return True
return False
def _host_is_vulnerable_to_exploit(self, host, exploit_def):
e_srv = exploit_def[u.EXPLOIT_SERVICE]
e_os = exploit_def[u.EXPLOIT_OS]
if not host.services[e_srv]:
return False
return e_os is None or host.os[e_os]
def _update_host_to_vulnerable(self, host):
"""Update host config so it's vulnerable to at least one exploit """
# choose an exploit randomly and make host vulnerable to it
e_def = np.random.choice(list(self.exploits.values()))
host.services[e_def[u.EXPLOIT_SERVICE]] = True
if e_def[u.EXPLOIT_OS] is not None:
# must set all to false first, so only one host OS is true
for os_name in host.os.keys():
host.os[os_name] = False
host.os[e_def[u.EXPLOIT_OS]] = True
def _get_host_value(self, address):
return float(self.sensitive_hosts.get(address, self.base_host_value))
def _generate_firewall(self, restrictiveness):
"""Generate the firewall rules.
Parameters
----------
restrictiveness : int
parameter that controls how many services are blocked by
firewall between zones (i.e. between internet, DMZ, sensitive
and user zones).
Returns
-------
dict
firewall rules that are a mapping from (src, dest) connection to
set of allowed services, which defines for each service whether
traffic using that service is allowed between pairs of subnets.
Notes
-----
Traffic from at least one service running on each subnet will be
allowed between each zone. This may mean more services will be allowed
than restrictiveness parameter.
"""
num_subnets = len(self.subnets)
firewall = {}
# find services running on each subnet that are vulnerable
subnet_services = {}
subnet_services[u.INTERNET] = set()
for host_addr, host in self.hosts.items():
subnet = host_addr[0]
if subnet not in subnet_services:
subnet_services[subnet] = set()
for e_def in self.exploits.values():
if self._host_is_vulnerable_to_exploit(host, e_def):
subnet_services[subnet].add(e_def[u.EXPLOIT_SERVICE])
for src in range(num_subnets):
for dest in range(num_subnets):
if src == dest or not self.topology[src][dest]:
# no inter subnet connection so no firewall
continue
elif src > SENSITIVE and dest > SENSITIVE:
# all services allowed between user subnets
allowed = set(self.services)
firewall[(src, dest)] = allowed
continue
# else src and dest in different zones => block services based
# on restrictiveness
dest_avail = subnet_services[dest].copy()
if len(dest_avail) < restrictiveness:
# restrictiveness not limiting allowed traffic, all
# services allowed
firewall[(src, dest)] = dest_avail.copy()
continue
# add at least one service to allowed service
dest_allowed = np.random.choice(list(dest_avail))
# for dest subnet choose available services upto
# restrictiveness limit or all services
dest_avail.remove(dest_allowed)
allowed = set()
allowed.add(dest_allowed)
while len(allowed) < restrictiveness:
dest_allowed = np.random.choice(list(dest_avail))
if dest_allowed not in allowed:
allowed.add(dest_allowed)
dest_avail.remove(dest_allowed)
firewall[(src, dest)] = allowed
self.firewall = firewall
def _generate_exploits(self, num_exploits, exploit_cost, exploit_probs):
exploits = {}
exploit_probs = self._get_exploit_probs(num_exploits, exploit_probs)
# add None since some exploits might work for all OS
possible_os = self.os + [None]
# we create one exploit per service
exploits_added = 0
while exploits_added < num_exploits:
srv = np.random.choice(self.services)
os = np.random.choice(possible_os)
e_name = f"e_{srv}"
if os is not None:
e_name += f"_{os}"
if e_name not in exploits:
exploits[e_name] = {
u.EXPLOIT_SERVICE: srv,
u.EXPLOIT_OS: os,
u.EXPLOIT_PROB: exploit_probs[exploits_added],
u.EXPLOIT_COST: exploit_cost}
exploits_added += 1
self.exploits = exploits
def _get_exploit_probs(self, num_exploits, exploit_probs):
if exploit_probs is None:
exploit_probs = np.random.random_sample(num_exploits)
elif exploit_probs == 'mixed':
# success probability of low, med, high attack complexity
if num_exploits == 1:
# for case where only 1 service ignore low probability actions
# since could lead to unnecessarily long attack paths
levels = [0.6, 0.9]
probs = [0.5, 0.5]
else:
levels = [0.3, 0.6, 0.9]
probs = [0.2, 0.4, 0.4]
exploit_probs = np.random.choice(levels, num_exploits, p=probs)
elif type(exploit_probs) is list:
if len(exploit_probs) == num_exploits:
raise ValueError("Length of exploit probability list must "
"equal number of exploits")
for e in exploit_probs:
if e <= 0.0 or e > 1.0:
raise ValueError("Exploit probs must be in (0.0, 1.0]")
else:
if exploit_probs <= 0.0 or exploit_probs > 1.0:
raise ValueError("Exploit probs must be in (0.0, 1.0]")
exploit_probs = [exploit_probs] * num_exploits
return exploit_probs
| [
"numpy.random.seed",
"numpy.random.random_sample",
"numpy.zeros",
"nasim.scenarios.Scenario",
"numpy.random.randint",
"numpy.random.poisson",
"numpy.random.choice",
"numpy.random.rand"
] | [((7546, 7585), 'nasim.scenarios.Scenario', 'Scenario', (['scenario_dict'], {'name': 'self.name'}), '(scenario_dict, name=self.name)\n', (7554, 7585), False, 'from nasim.scenarios import Scenario\n'), ((8225, 8261), 'numpy.zeros', 'np.zeros', (['(num_subnets, num_subnets)'], {}), '((num_subnets, num_subnets))\n', (8233, 8261), True, 'import numpy as np\n'), ((5732, 5752), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5746, 5752), True, 'import numpy as np\n'), ((10058, 10103), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.subnets[subnet_id]'], {}), '(0, self.subnets[subnet_id])\n', (10075, 10103), True, 'import numpy as np\n'), ((15178, 15205), 'numpy.random.poisson', 'np.random.poisson', (['lambda_V'], {}), '(lambda_V)\n', (15195, 15205), True, 'import numpy as np\n'), ((15976, 16001), 'numpy.random.choice', 'np.random.choice', (['self.os'], {}), '(self.os)\n', (15992, 16001), True, 'import numpy as np\n'), ((16094, 16119), 'numpy.random.choice', 'np.random.choice', (['prev_os'], {}), '(prev_os)\n', (16110, 16119), True, 'import numpy as np\n'), ((17755, 17778), 'numpy.random.randint', 'np.random.randint', (['size'], {}), '(size)\n', (17772, 17778), True, 'import numpy as np\n'), ((22546, 22577), 'numpy.random.choice', 'np.random.choice', (['self.services'], {}), '(self.services)\n', (22562, 22577), True, 'import numpy as np\n'), ((22595, 22624), 'numpy.random.choice', 'np.random.choice', (['possible_os'], {}), '(possible_os)\n', (22611, 22624), True, 'import numpy as np\n'), ((23193, 23230), 'numpy.random.random_sample', 'np.random.random_sample', (['num_exploits'], {}), '(num_exploits)\n', (23216, 23230), True, 'import numpy as np\n'), ((10841, 10866), 'numpy.random.choice', 'np.random.choice', (['self.os'], {}), '(self.os)\n', (10857, 10866), True, 'import numpy as np\n'), ((14205, 14221), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14219, 14221), True, 'import numpy as np\n'), ((15490, 15524), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_services'], {}), '(0, num_services)\n', (15507, 15524), True, 'import numpy as np\n'), ((15628, 15655), 'numpy.random.choice', 'np.random.choice', (['prev_vuls'], {}), '(prev_vuls)\n', (15644, 15655), True, 'import numpy as np\n'), ((15849, 15865), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15863, 15865), True, 'import numpy as np\n'), ((23722, 23769), 'numpy.random.choice', 'np.random.choice', (['levels', 'num_exploits'], {'p': 'probs'}), '(levels, num_exploits, p=probs)\n', (23738, 23769), True, 'import numpy as np\n'), ((10717, 10746), 'numpy.random.choice', 'np.random.choice', (['num_configs'], {}), '(num_configs)\n', (10733, 10746), True, 'import numpy as np\n'), ((15356, 15372), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15370, 15372), True, 'import numpy as np\n')] |
import numpy as np
def cosine8(rampparams, t, etc = []):
"""
This function creates a model that fits a superposition of up to 8 sinusoids.
Parameters
----------
a#: amplitude
p#: period
t#: phase/time offset
c: vertical offset
t: Array of time/phase points
Returns
-------
This function returns an array of y values...
Revisions
---------
2014-05-14 <NAME>, UChicago
<EMAIL>
Modified from sinnp.cos.py
"""
a1,p1,t1 = rampparams[ 0:3]
a2,p2,t2 = rampparams[ 3:6]
a3,p3,t3 = rampparams[ 6:9]
a4,p4,t4 = rampparams[ 9:12]
a5,p5,t5 = rampparams[12:15]
a6,p6,t6 = rampparams[15:18]
a7,p7,t7 = rampparams[18:21]
a8,p8,t8 = rampparams[21:24]
c = rampparams[24]
pi = np.pi
return a1*np.cos(2*pi*(t-t1)/p1) + a2*np.cos(2*pi*(t-t2)/p2) + a3*np.cos(2*pi*(t-t3)/p3) + a4*np.cos(2*pi*(t-t4)/p4) + a5*np.cos(2*pi*(t-t5)/p5) + a6*np.cos(2*pi*(t-t6)/p6) + a7*np.cos(2*pi*(t-t7)/p7) + a8*np.cos(2*pi*(t-t8)/p8) + c
| [
"numpy.cos"
] | [((1020, 1050), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t8) / p8)'], {}), '(2 * pi * (t - t8) / p8)\n', (1026, 1050), True, 'import numpy as np\n'), ((992, 1022), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t7) / p7)'], {}), '(2 * pi * (t - t7) / p7)\n', (998, 1022), True, 'import numpy as np\n'), ((964, 994), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t6) / p6)'], {}), '(2 * pi * (t - t6) / p6)\n', (970, 994), True, 'import numpy as np\n'), ((936, 966), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t5) / p5)'], {}), '(2 * pi * (t - t5) / p5)\n', (942, 966), True, 'import numpy as np\n'), ((908, 938), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t4) / p4)'], {}), '(2 * pi * (t - t4) / p4)\n', (914, 938), True, 'import numpy as np\n'), ((880, 910), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t3) / p3)'], {}), '(2 * pi * (t - t3) / p3)\n', (886, 910), True, 'import numpy as np\n'), ((824, 854), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t1) / p1)'], {}), '(2 * pi * (t - t1) / p1)\n', (830, 854), True, 'import numpy as np\n'), ((852, 882), 'numpy.cos', 'np.cos', (['(2 * pi * (t - t2) / p2)'], {}), '(2 * pi * (t - t2) / p2)\n', (858, 882), True, 'import numpy as np\n')] |
"""Advanced 3D Polylidar Example
This example shows the limitations of Polylidar in extracting planes from 3D point clouds.
The main limitations are:
1. Only planes that have normals at roughly [0,0,1] are extracted.
Rotate point cloud prior to sending to Polylidar to resolve issue.
2. Planes can not be on top of eachother. More precisely ponint cloud DATA can not be on top of eachother.
Resolve by paritioning the planes into seperate point clouds.
An example of two planes on top of eachtother where the DATA is NOT on top of eachother is on basic3d.py
Polylidar was primarily designed for finding flat surfaces on rooftops from downward facing sensors
where these issues are not as prevalent as seen in this advanced example.
"""
import time
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from polylidar import extractPlanesAndPolygons
from polylidarutil import (plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d,
scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE)
# arguments to polylidar used throughout this example
polylidar_kwargs = dict(alpha=0.0, lmax=1.0,
minTriangles=20, zThresh=0.1, normThresh=0.98)
np.random.seed(1)
# generate random plane
plane = generate_3d_plane(bounds_x=[0, 10, 0.5], bounds_y=[
0, 10, 0.5], holes=[], height_noise=0.02, planar_noise=0.02)
# Generate 2 walls
box_side = generate_3d_plane(bounds_x=[0, 10, 0.5], bounds_y=[
0, 10, 0.5], holes=[], height_noise=0.02, planar_noise=0.02)
rm = rotation_matrix([0, 1, 0], -math.pi/2.0)
box_side_left = apply_rotation(rm, box_side) + [0, 0, 0] # first wall
box_side_right = apply_rotation(rm, box_side) + [10, 0, 0] # second wall
# All points joined together
points = np.concatenate((plane, box_side_left, box_side_right))
# create figure and axes
fig, ax = plt.subplots(figsize=(10, 10), nrows=1, ncols=1, num='Base Frame',
subplot_kw=dict(projection='3d'))
# plot points
ax.scatter(*scale_points(points), c='k', s=0.4)
set_up_axes(fig, ax)
plt.show(block=False)
print("Raw Point Cloud of ground floor and two walls (Base Frame). Don't close figure.")
print("This example will show how to extract all three planes.")
print("During this process you will learn the limitations of Polylidar for 3D point clouds and how to work around them.")
input("Press Enter to Continue: ")
print("")
# extract ground plane
print("Extracting ground plane from raw point cloud. Bottom Plane Extracted")
delaunay, planes, polygons = extractPlanesAndPolygons(
points, **polylidar_kwargs)
triangles = np.asarray(delaunay.triangles).reshape(
int(len(delaunay.triangles) / 3), 3)
plot_planes_3d(points, triangles, planes, ax)
plot_polygons_3d(points, polygons, ax, color=COLOR_PALETTE[0])
plt.show(block=False)
input("Press Enter to Continue: ")
print("")
# Rotate Point Cloud for walls
print("The walls must be rotated to be extracted such that their normal is (0,0,1). Rotated Frame.")
fig2, ax2 = plt.subplots(figsize=(10, 10), nrows=1, ncols=1, num='Rotated Frame',
subplot_kw=dict(projection='3d'))
# Transpose provides reverse rotation
pc_rot = apply_rotation(rm.T, points)
scatter = ax2.scatter(*scale_points(pc_rot), c='k', s=0.4)
set_up_axes(fig, ax2)
plt.show(block=False)
input("Press Enter to Continue: ")
print("")
# Seperate point clouds
print("Unfortunately these 2 walls will interefere with eachother when projected to the z=0 xy-Plane.")
print("They must be seperated into two different point clouds (orange/green)")
ax2.clear()
pc_top = pc_rot[pc_rot[:, 2] > -5.0, :]
pc_bottom = pc_rot[pc_rot[:, 2] < -5.0, :]
scatter1 = ax2.scatter(*scale_points(pc_top), c=[COLOR_PALETTE[1]], s=0.4)
scatter2 = ax2.scatter(*scale_points(pc_bottom), c=[COLOR_PALETTE[2]], s=0.4)
plt.show(block=False)
input("Press Enter to Continue: ")
print("")
# Extract planes from top and bottom
delaunay_top, planes_top, polygons_top = extractPlanesAndPolygons(
pc_top, **polylidar_kwargs)
triangles_top = np.asarray(delaunay_top.triangles).reshape(
int(len(delaunay_top.triangles) / 3), 3)
delaunay_bot, planes_bot, polygons_bot = extractPlanesAndPolygons(
pc_bottom, **polylidar_kwargs)
triangles_bot = np.asarray(delaunay_bot.triangles).reshape(
int(len(delaunay_bot.triangles) / 3), 3)
plot_planes_3d(pc_top, triangles_top, planes_top, ax2, color=COLOR_PALETTE[1])
plot_planes_3d(pc_bottom, triangles_bot, planes_bot,
ax2, color=COLOR_PALETTE[2])
plt.show(block=False)
print("Showing newly extracted top and bottom planes in Rotated Frame.")
input("Press Enter to Continue: ")
print("")
print("Putting the extracted planes all together back in the Base Frame.")
print("Also plotting the polygon outline of these planes")
# just need to rotate the point cloud back to Base Frame
pc_top = apply_rotation(rm, pc_top)
pc_bottom = apply_rotation(rm, pc_bottom)
plot_planes_3d(pc_top, triangles_top, planes_top, ax, color=COLOR_PALETTE[1])
plot_planes_3d(pc_bottom, triangles_bot, planes_bot,
ax, color=COLOR_PALETTE[2])
plot_polygons_3d(pc_top, polygons_top, ax, color=COLOR_PALETTE[1])
plot_polygons_3d(pc_bottom, polygons_bot, ax, color=COLOR_PALETTE[2])
ax.legend()
fig.show()
input("Press Enter to Exit: ")
| [
"polylidarutil.set_up_axes",
"polylidarutil.apply_rotation",
"numpy.random.seed",
"matplotlib.pyplot.show",
"polylidarutil.plot_polygons_3d",
"numpy.asarray",
"polylidarutil.rotation_matrix",
"polylidarutil.scale_points",
"polylidarutil.generate_3d_plane",
"polylidarutil.plot_planes_3d",
"polyli... | [((1301, 1318), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1315, 1318), True, 'import numpy as np\n'), ((1351, 1466), 'polylidarutil.generate_3d_plane', 'generate_3d_plane', ([], {'bounds_x': '[0, 10, 0.5]', 'bounds_y': '[0, 10, 0.5]', 'holes': '[]', 'height_noise': '(0.02)', 'planar_noise': '(0.02)'}), '(bounds_x=[0, 10, 0.5], bounds_y=[0, 10, 0.5], holes=[],\n height_noise=0.02, planar_noise=0.02)\n', (1368, 1466), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((1520, 1635), 'polylidarutil.generate_3d_plane', 'generate_3d_plane', ([], {'bounds_x': '[0, 10, 0.5]', 'bounds_y': '[0, 10, 0.5]', 'holes': '[]', 'height_noise': '(0.02)', 'planar_noise': '(0.02)'}), '(bounds_x=[0, 10, 0.5], bounds_y=[0, 10, 0.5], holes=[],\n height_noise=0.02, planar_noise=0.02)\n', (1537, 1635), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((1667, 1709), 'polylidarutil.rotation_matrix', 'rotation_matrix', (['[0, 1, 0]', '(-math.pi / 2.0)'], {}), '([0, 1, 0], -math.pi / 2.0)\n', (1682, 1709), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((1891, 1945), 'numpy.concatenate', 'np.concatenate', (['(plane, box_side_left, box_side_right)'], {}), '((plane, box_side_left, box_side_right))\n', (1905, 1945), True, 'import numpy as np\n'), ((2168, 2188), 'polylidarutil.set_up_axes', 'set_up_axes', (['fig', 'ax'], {}), '(fig, ax)\n', (2179, 2188), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((2189, 2210), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2197, 2210), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2715), 'polylidar.extractPlanesAndPolygons', 'extractPlanesAndPolygons', (['points'], {}), '(points, **polylidar_kwargs)\n', (2687, 2715), False, 'from polylidar import extractPlanesAndPolygons\n'), ((2815, 2860), 'polylidarutil.plot_planes_3d', 'plot_planes_3d', (['points', 'triangles', 'planes', 'ax'], {}), '(points, triangles, planes, ax)\n', (2829, 2860), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((2861, 2923), 'polylidarutil.plot_polygons_3d', 'plot_polygons_3d', (['points', 'polygons', 'ax'], {'color': 'COLOR_PALETTE[0]'}), '(points, polygons, ax, color=COLOR_PALETTE[0])\n', (2877, 2923), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((2924, 2945), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2932, 2945), True, 'import matplotlib.pyplot as plt\n'), ((3314, 3342), 'polylidarutil.apply_rotation', 'apply_rotation', (['rm.T', 'points'], {}), '(rm.T, points)\n', (3328, 3342), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((3402, 3423), 'polylidarutil.set_up_axes', 'set_up_axes', (['fig', 'ax2'], {}), '(fig, ax2)\n', (3413, 3423), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((3424, 3445), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3432, 3445), True, 'import matplotlib.pyplot as plt\n'), ((3947, 3968), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3955, 3968), True, 'import matplotlib.pyplot as plt\n'), ((4093, 4145), 'polylidar.extractPlanesAndPolygons', 'extractPlanesAndPolygons', (['pc_top'], {}), '(pc_top, **polylidar_kwargs)\n', (4117, 4145), False, 'from polylidar import extractPlanesAndPolygons\n'), ((4298, 4353), 'polylidar.extractPlanesAndPolygons', 'extractPlanesAndPolygons', (['pc_bottom'], {}), '(pc_bottom, **polylidar_kwargs)\n', (4322, 4353), False, 'from polylidar import extractPlanesAndPolygons\n'), ((4466, 4544), 'polylidarutil.plot_planes_3d', 'plot_planes_3d', (['pc_top', 'triangles_top', 'planes_top', 'ax2'], {'color': 'COLOR_PALETTE[1]'}), '(pc_top, triangles_top, planes_top, ax2, color=COLOR_PALETTE[1])\n', (4480, 4544), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((4545, 4631), 'polylidarutil.plot_planes_3d', 'plot_planes_3d', (['pc_bottom', 'triangles_bot', 'planes_bot', 'ax2'], {'color': 'COLOR_PALETTE[2]'}), '(pc_bottom, triangles_bot, planes_bot, ax2, color=\n COLOR_PALETTE[2])\n', (4559, 4631), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((4642, 4663), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4650, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4984, 5010), 'polylidarutil.apply_rotation', 'apply_rotation', (['rm', 'pc_top'], {}), '(rm, pc_top)\n', (4998, 5010), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((5023, 5052), 'polylidarutil.apply_rotation', 'apply_rotation', (['rm', 'pc_bottom'], {}), '(rm, pc_bottom)\n', (5037, 5052), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((5053, 5130), 'polylidarutil.plot_planes_3d', 'plot_planes_3d', (['pc_top', 'triangles_top', 'planes_top', 'ax'], {'color': 'COLOR_PALETTE[1]'}), '(pc_top, triangles_top, planes_top, ax, color=COLOR_PALETTE[1])\n', (5067, 5130), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((5131, 5216), 'polylidarutil.plot_planes_3d', 'plot_planes_3d', (['pc_bottom', 'triangles_bot', 'planes_bot', 'ax'], {'color': 'COLOR_PALETTE[2]'}), '(pc_bottom, triangles_bot, planes_bot, ax, color=COLOR_PALETTE[2]\n )\n', (5145, 5216), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((5227, 5293), 'polylidarutil.plot_polygons_3d', 'plot_polygons_3d', (['pc_top', 'polygons_top', 'ax'], {'color': 'COLOR_PALETTE[1]'}), '(pc_top, polygons_top, ax, color=COLOR_PALETTE[1])\n', (5243, 5293), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((5294, 5363), 'polylidarutil.plot_polygons_3d', 'plot_polygons_3d', (['pc_bottom', 'polygons_bot', 'ax'], {'color': 'COLOR_PALETTE[2]'}), '(pc_bottom, polygons_bot, ax, color=COLOR_PALETTE[2])\n', (5310, 5363), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((1724, 1752), 'polylidarutil.apply_rotation', 'apply_rotation', (['rm', 'box_side'], {}), '(rm, box_side)\n', (1738, 1752), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((1796, 1824), 'polylidarutil.apply_rotation', 'apply_rotation', (['rm', 'box_side'], {}), '(rm, box_side)\n', (1810, 1824), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((2132, 2152), 'polylidarutil.scale_points', 'scale_points', (['points'], {}), '(points)\n', (2144, 2152), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((2734, 2764), 'numpy.asarray', 'np.asarray', (['delaunay.triangles'], {}), '(delaunay.triangles)\n', (2744, 2764), True, 'import numpy as np\n'), ((3366, 3386), 'polylidarutil.scale_points', 'scale_points', (['pc_rot'], {}), '(pc_rot)\n', (3378, 3386), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((3818, 3838), 'polylidarutil.scale_points', 'scale_points', (['pc_top'], {}), '(pc_top)\n', (3830, 3838), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((3893, 3916), 'polylidarutil.scale_points', 'scale_points', (['pc_bottom'], {}), '(pc_bottom)\n', (3905, 3916), False, 'from polylidarutil import plot_polygons_3d, generate_3d_plane, set_axes_equal, plot_planes_3d, scale_points, rotation_matrix, apply_rotation, set_up_axes, COLOR_PALETTE\n'), ((4168, 4202), 'numpy.asarray', 'np.asarray', (['delaunay_top.triangles'], {}), '(delaunay_top.triangles)\n', (4178, 4202), True, 'import numpy as np\n'), ((4376, 4410), 'numpy.asarray', 'np.asarray', (['delaunay_bot.triangles'], {}), '(delaunay_bot.triangles)\n', (4386, 4410), True, 'import numpy as np\n')] |
# coding: utf-8
import logging
import numpy as np
from ppyt.indicators import IndicatorBase
from ppyt.indicators.closerecenthighlow_indicators import (
CloseGtRecentHighIndicator, CloseLtRecentLowIndicator
)
logger = logging.getLogger(__name__)
class UpperBreakoutIndicator(IndicatorBase):
"""上にブレイクアウトしたかを示す指標です。"""
_findkey = 'UpperBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の高値が、前日までの直近高値を超えたかの指標を取得します。
indi = CloseGtRecentHighIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近高値以下で、当日に直近高値を超えているかを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
class LowerBreakoutIndicator(IndicatorBase):
"""下にブレイクアウトしたかを示す指標です。"""
_findkey = 'LowerBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の安値が、前日までの直近安値を下回った指標を取得します。
indi = CloseLtRecentLowIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近安値以上で、当日に直近安値未満かを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
| [
"ppyt.indicators.closerecenthighlow_indicators.CloseLtRecentLowIndicator",
"ppyt.indicators.closerecenthighlow_indicators.CloseGtRecentHighIndicator",
"logging.getLogger",
"numpy.logical_not"
] | [((222, 249), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (239, 249), False, 'import logging\n'), ((557, 612), 'ppyt.indicators.closerecenthighlow_indicators.CloseGtRecentHighIndicator', 'CloseGtRecentHighIndicator', ([], {'stock': 'self.stock', 'span': 'span'}), '(stock=self.stock, span=span)\n', (583, 612), False, 'from ppyt.indicators.closerecenthighlow_indicators import CloseGtRecentHighIndicator, CloseLtRecentLowIndicator\n'), ((1108, 1162), 'ppyt.indicators.closerecenthighlow_indicators.CloseLtRecentLowIndicator', 'CloseLtRecentLowIndicator', ([], {'stock': 'self.stock', 'span': 'span'}), '(stock=self.stock, span=span)\n', (1133, 1162), False, 'from ppyt.indicators.closerecenthighlow_indicators import CloseGtRecentHighIndicator, CloseLtRecentLowIndicator\n'), ((780, 800), 'numpy.logical_not', 'np.logical_not', (['arr2'], {}), '(arr2)\n', (794, 800), True, 'import numpy as np\n'), ((1326, 1346), 'numpy.logical_not', 'np.logical_not', (['arr2'], {}), '(arr2)\n', (1340, 1346), True, 'import numpy as np\n')] |
from mrr import mrr
import numpy as np
# in this case, we have two queries, each of one composed of 3 elements,
# as indicated by the array groups.
# in label, we have the labels for any of the 3 documents in the two queries
# in prediction, we have the scores assigned by the algorithm to the documents
label = np.array([0, 1, 0, 1, 0, 0], dtype=np.int32)
prediction = np.array([0.1, 0.2, 0.3, 1, 0.5, 0], dtype=np.float32)
groups = np.array([3,3], dtype=np.int32)
# as expected, this prints 0.75
print(mrr(memoryview(label),
memoryview(prediction),
memoryview(groups),
len(groups)))
| [
"numpy.array"
] | [((314, 358), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0, 0]'], {'dtype': 'np.int32'}), '([0, 1, 0, 1, 0, 0], dtype=np.int32)\n', (322, 358), True, 'import numpy as np\n'), ((372, 426), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 1, 0.5, 0]'], {'dtype': 'np.float32'}), '([0.1, 0.2, 0.3, 1, 0.5, 0], dtype=np.float32)\n', (380, 426), True, 'import numpy as np\n'), ((436, 468), 'numpy.array', 'np.array', (['[3, 3]'], {'dtype': 'np.int32'}), '([3, 3], dtype=np.int32)\n', (444, 468), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""The Mosaic Python API
.. moduleauthor:: <NAME>
This module provides abstract base classes that define the Mosaic
Python API and implement validation code. They also provide a few
convenience functions implemented in terms of the raw API.
Concrete implementations subclass the abstract base classes and
implement all the abstract properties.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
from abc import ABCMeta, abstractproperty
import collections
import itertools as IT
import re
import numpy as N
import numpy.linalg as LA
import mosaic.utility
# Mosaic version number (major, minor)
# An increase in the minor number indicates a superset of the preceding
# versions. An increase in the major number indicated an incompatibility
# with preceding versions.
MOSAIC_VERSION = (1, 0)
# Base class for all classes that represent top-level data items,
# i.e. items that can be stored in files, retrieved, etc.
class MosaicDataItem(object):
"""Base class for top-level data items
Instances of subclasses of MosaicDataItem can be stored in files.
"""
__metaclass__ = ABCMeta
# An atom is defined by the following characteristics:
#
# - a type, which is
# - 'element' for a standard atom that has a chemical element.
# The element symbol is the value of the name attribute.
# - 'cgparticle' , for particles in coarse-grained models
# representing multiple atoms.
# - 'dummy' for pseudo-atoms that don't physically exist,
# such as virtual interaction sites.
# - '' for anything else
#
# - a name, which is the chemical element symbol for atoms of type
# 'element', and any suitable identifier for the other types
#
# - a label, which identifies the atom uniquely inside its fragment
#
# - the number of sites, equal to the number of Cartesian coordinate
# sets required by the atom. It is > 1 for path integral, wave
# functions, atoms with alternate positions in crystal structures, etc.
class MosaicAtom(object):
"""Atom inside a :py:class:`MosaicUniverse`
See the :ref:`data model documentation<mosaic-atom>` for atoms.
"""
__metaclass__ = ABCMeta
# API properties
@abstractproperty
def label(self):
"""An ASCII string not containing dots and identifying
the atom uniquely inside its parent fragment.
See the :ref:`data model documentation<mosaic-atom-label>`.
"""
raise NotImplementedError()
@abstractproperty
def name(self):
"""An ASCII string describing the type of the atom. For 'real'
atoms in the chemical sense, this must be the chemical element
symbol.
See the :ref:`data model documentation<mosaic-atom-name>`.
"""
raise NotImplementedError()
@abstractproperty
def type(self):
"""A string identifying the type of the atom.
See the :ref:`data model documentation<mosaic-atom-type>`.
"""
raise NotImplementedError()
@abstractproperty
def number_of_sites(self):
"""The number of sites associated with the atom.
See the :ref:`data model documentation<mosaic-atom-nsites>`.
"""
raise NotImplementedError()
# Property shared by all atoms
@property
def number_of_atoms(self):
"""The number of atoms associated with the atom (always 1)
"""
return 1
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicAtom):
raise TypeError("%s is not an atom" % str(type(other)))
if self.label != other.label:
raise ValueError("labels differ: %s != %s"
% (repr(self.label), repr(other.label)))
if self.name != other.name:
raise ValueError("names differ: %s != %s"
% (repr(self.name), repr(other.name)))
if self.type != other.type:
raise ValueError("types differ: %s != %s"
% (repr(self.type), repr(other.type)))
if self.number_of_sites != other.number_of_sites:
raise ValueError("site numbers differ: %d != %d"
% (self.number_of_sites, other.number_of_sites))
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_allowed_types = ('element', 'cgparticle', 'dummy', '')
_elements = ('Ac', 'Ag', 'Al', 'Am', 'Ar', 'As', 'At', 'Au',
'B', 'Ba', 'Be', 'Bh', 'Bi', 'Bk', 'Br',
'C', 'Ca', 'Cd', 'Ce', 'Cf', 'Cl', 'Cm',
'Co', 'Cn', 'Cr', 'Cs', 'Cu',
'D', 'Db', 'Ds', 'Dy',
'Er', 'Es', 'Eu',
'F', 'Fe', 'Fm', 'Fr',
'Ga', 'Gd', 'Ge',
'H', 'He', 'Hf', 'Hg', 'Ho', 'Hs',
'I', 'In', 'Ir',
'K', 'Kr',
'La', 'Li', 'Lr', 'Lu',
'Md', 'Mg', 'Mn', 'Mo', 'Mt',
'N', 'Na', 'Nb', 'Nd', 'Ne', 'Ni', 'No', 'Np',
'O', 'Os',
'P', 'Pa', 'Pb', 'Pd', 'Pm', 'Po', 'Pr', 'Pt', 'Pu',
'Ra', 'Rb', 'Re', 'Rf', 'Rg', 'Rh', 'Rn', 'Ru',
'S', 'Sb', 'Sc', 'Se', 'Sg', 'Si', 'Sm', 'Sn', 'Sr',
'Ta', 'Tb', 'Tc', 'Te', 'Th', 'Ti', 'Tl', 'Tm',
'U', 'V', 'W', 'Xe', 'Y', 'Yb', 'Zn', 'Zr')
@classmethod
def validate_element_name(self, name):
validate_value(name, self._elements, "name")
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_label(self.label, "Atom.label")
validate_value(self.type, self._allowed_types, "Atom type")
validate_label(self.name, "Atom.name")
if self.type == 'element':
validate_value(self.name, self._elements, "Atom element")
if not isinstance(self.number_of_sites, int):
raise ValueError("Atom.number_of_sites must be an integer")
if self.number_of_sites <= 0:
raise ValueError("Atom.number_of_sites must be positive")
if self.number_of_atoms != 1:
raise ValueError("Atom.number_of_atoms must be 1")
# A fragment describes a node in the tree defining the chemical
# structure of the molecules. It can represent a molecule,
# a supermolecule, or part of a molecule. A fragment is defined
# by
#
# - a species, which is a text string describing the chemical
# nature of the fragment
#
# - a label, which describes the role of the fragment inside its
# parent structure, and must be unique within that parent structure
#
# - a list of sub-fragments
#
# - a list of atoms
#
# - a list of bonds
#
# - the boolean flag is_polymer. Polymer fragments have an emtpy
# atom list and an additional attribute 'polymer_type' whose
# allowed values are
# - 'polypeptide', for a peptide chain
# - 'polyribonucleotide', for an RNA chain
# - 'polydeoxyribonucleotide', for a DNA chain
# - 'polynucleotide', for a chain that can contain
# nucleotides with either type of sugar
# - the empty string, for any other polymer
class MosaicFragment(collections.Mapping):
"""Fragment inside a :py:class:`MosaicUniverse`
See the :ref:`data model documentation<mosaic-fragment>` for fragments.
Fragments implement the Mapping interface. Valid keys are strings
identifying atoms or sub-fragments, using dots to separate subsequent
labels. For polymer fragments, integer keys are valid as well to refer
to a specific sub-fragment in the chain.
"""
# API properties
@abstractproperty
def label(self):
"""An ASCII string not containing dots and identifying
the fragment uniquely inside its parent fragment.
See the :ref:`data model documentation<mosaic-fragment-label>`.
"""
raise NotImplementedError()
@abstractproperty
def species(self):
"""An ASCII string describing the species of the fragment
(e.g. what kind of molecule or moiety it represents).
See the :ref:`data model documentation<mosaic-fragment-species>`.
"""
raise NotImplementedError()
@abstractproperty
def fragments(self):
"""Sequence of sub-fragments, may be empty.
See the :ref:`data model documentation<mosaic-fragment-fragments>`.
"""
raise NotImplementedError()
@abstractproperty
def atoms(self):
"""Sequence of atoms, may be empty.
See the :ref:`data model documentation<mosaic-fragment-atoms>`.
"""
raise NotImplementedError()
@abstractproperty
def bonds(self):
"""Sequence of bonds, may be empty. Each bond is
repreented by a tuple (atom_ref_1, atom_ref_2, bond_order).
See the :ref:`data model documentation<mosaic-fragment-bonds>`
and the :ref:`bond reference documentation<mosaic-bonds>`.
"""
raise NotImplementedError()
@abstractproperty
def is_polymer(self):
"""True if the fragment is a polymer.
See the :ref:`data model documentation<mosaic-fragment-polymer>`.
"""
raise NotImplementedError()
@abstractproperty
def polymer_type(self):
"""String identifying the polymer type if is_polymer is true.
See the :ref:`data model documentation<mosaic-fragment-polymer>`.
"""
raise NotImplementedError()
# Properties that can be computed in terms of the API properties
@property
def number_of_atoms(self):
"""The number of atoms in the fragment (including the atoms
in sub-fragments).
"""
return sum(f.number_of_atoms for f in self.fragments) \
+ len(self.atoms)
@property
def number_of_sites(self):
"""The number of sites associated with the fragment, i.e.
the sum of the numbers of sites of all the fragment's atoms,
including those of sub-fragments.
"""
return sum(f.number_of_sites for f in self.fragments) \
+ sum(a.number_of_sites for a in self.atoms)
@property
def number_of_bonds(self):
"""The number of bonds associated with the fragment,
including the bonds inside sub-fragments.
"""
return sum(ff.number_of_bonds for ff in self.fragments) \
+ len(self.bonds)
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicFragment):
raise TypeError("%s is not a fragment" % str(type(other)))
if self.label != other.label:
raise ValueError("labels differ: %s != %s"
% (repr(self.label), repr(other.label)))
if self.species != other.species:
raise ValueError("species differ: %s != %s"
% (repr(self.species), repr(other.species)))
if self.is_polymer:
if not other.is_polymer:
raise ValueError("%s is not a polymer" % str(other))
if self.polymer_type != other.polymer_type:
raise ValueError("polymer types differ: %s != %s"
% (repr(self.polymer_type),
repr(other.polymer_type)))
for s, o in zip(self.fragments, other.fragments):
s.validate_equivalence(o)
for s, o in zip(self.atoms, other.atoms):
s.validate_equivalence(o)
if self._bond_set() != other._bond_set():
raise ValueError("bonds differ: %s != %s"
% (repr(self._bond_set), repr(other._bond_set)))
def _bond_set(self):
return frozenset((frozenset((a1, a2)), order)
for a1, a2, order in self.bonds)
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_polymer_types = ['',
'polypeptide',
'polyribonucleotide',
'polydeoxyribonucleotide',
'polynucleotide']
_bond_orders = ['', 'single', 'double', 'triple', 'quadruple', 'aromatic']
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_label(self.label, "Fragment.label")
validate_label(self.species, "Fragment.species")
validate_value(self.is_polymer, [True, False], "Fragment.is_polymer")
if self.is_polymer:
validate_value(self.polymer_type, self._polymer_types,
"Fragment.polymer_type")
validate_sequence(self.fragments, MosaicFragment, "Fragment.fragments")
labels = set()
for f in self.fragments:
f.validate()
if f.label in labels:
raise ValueError("Label %s occurs more than once" % f.label)
labels.add(f.label)
validate_sequence(self.atoms, MosaicAtom, "Fragment.atoms")
for a in self.atoms:
a.validate()
if a.label in labels:
raise ValueError("Label %s occurs more than once" % a.label)
labels.add(a.label)
for property in ['number_of_atoms',
'number_of_sites',
'number_of_bonds']:
value = getattr(self, property)
reference = getattr(MosaicFragment, property).fget(self)
if value != reference:
raise ValueError("Fragment.%s is %s, should be %s"
% (property, str(value), str(reference)))
# Methods based on API properties
def recursive_atom_iterator(self):
"""An iterator over the atoms in the fragment, including the
atoms in sub-fragments.
"""
return IT.chain(IT.chain.from_iterable(f.recursive_atom_iterator()
for f in self.fragments),
iter(self.atoms))
def recursive_atom_path_iterator(self):
"""An iterator over the atom paths in the fragment, including the
atoms in sub-fragments.
"""
for f in self.fragments:
l = f.label
for ap in f.recursive_atom_path_iterator():
yield l + '.' + ap
for a in self.atoms:
yield a.label
def recursive_bond_iterator(self):
"""An iterator over the bonds in the fragment, including the
bonds in sub-fragments.
"""
for f in self.fragments:
l = f.label
for a1, a2, order in f.recursive_bond_iterator():
yield (l + '.' + a1, l + '.' + a2, order)
for b in self.bonds:
yield b
def site_to_atom_index_mapping(self):
"""
:returns: an array whose element [s] is the atom index
corresponding to site index s.
:rtype: numpy.ndarray
"""
ns = [a.number_of_sites for a in self.recursive_atom_iterator()]
return N.repeat(N.arange(len(ns)), ns)
def _atom_to_site_index_mapping(self):
ns = [a.number_of_sites for a in self.recursive_atom_iterator()]
return N.add.accumulate(ns)
def atom_to_site_index_mapping(self):
"""
:returns: an array whose element [s] is the site index
corresponding to the first site of the atom with
atom index s.
:rtype: numpy.ndarray
"""
m = self._atom_to_site_index_mapping()
m[1:] = m[:-1]
m[0] = 0
return m
# Mapping interface
def __getitem__(self, item):
if isinstance(item, int) and self.is_polymer:
return self.fragments[item]
# A rather inefficient implementation of substructure selection
# using only the public API elements. Concrete implementations
# can do better.
assert isinstance(item, str)
path = item.split('.')
item = None
for f in self.fragments:
if f.label == path[0]:
item = f
break
if item is None:
for a in self.atoms:
if a.label == path[0]:
item = a
break
if item is None:
raise KeyError(path[0])
if len(path) > 1:
return item['.'.join(path[1:])]
else:
return item
def __len__(self):
"""
:returns: the number of sub-elements, i.e. atoms and sub-fragments
:rtype: int
"""
return len(self.fragments) + len(self.atoms)
def __iter__(self):
"""Iterate over sub-fragments first, then over the fragment's atoms.
"""
for f in self.fragments:
yield f.label
for a in self.atoms:
yield a.label
# Override some methods from collections.Mapping for efficiency
def values(self):
return self.fragments + self.atoms
def itervalues(self):
return IT.chain(iter(self.fragments), iter(self.atoms))
# A universe description consists of
#
# - the cell shape
#
# - a list of symmetry transformations
#
# - the chemical structure of its contents
#
# - a label indicating additional conventions that this universe
# conforms to, in particular naming conventions for atoms and molecules
class MosaicUniverse(MosaicDataItem):
"""Universe
See the :ref:`data model documentation<mosaic-universe>` for universes.
"""
# API properties
@abstractproperty
def cell_shape(self):
"""A string identifying the cell shape.
See the :ref:`data model documentation<mosaic-universe-cell-shape>`.
"""
raise NotImplementedError()
@abstractproperty
def symmetry_transformations(self):
"""A sequence of symmetry transformations, possibly empty.
Each symmetry tranformation is defined by a two-element tuple,
whose first item is a rotation matrix and whose second item
is a translation vector.
See the :ref:`data model documentation<mosaic-universe-symmetry>`.
"""
raise NotImplementedError()
@abstractproperty
def convention(self):
"""An ASCII string naming the conventions used inside the
definitions of fragements and atoms.
See the :ref:`data model documentation<mosaic-universe-convention>`.
"""
raise NotImplementedError()
@abstractproperty
def molecules(self):
"""A sequence of molecule specifications. Each element is a
two-element tuple, whose first element is a :py:class:`MosaicFragment`
and whose second element is an integer specifying the number of
copies of the molecule.
See the :ref:`data model documentation<mosaic-universe-molecules>`.
"""
raise NotImplementedError()
# Properties that can be computed in terms of the API properties
_cell_parameter_array_shapes = {
'infinite': (0,),
'cube': (),
'cuboid': (3,),
'parallelepiped': (3, 3)}
@property
def cell_parameter_array_shape(self):
"""The shape of a valid cell_parameters array
in a :py:class:`MosaicConfiguration`.
"""
return self._cell_parameter_array_shapes[self.cell_shape]
@property
def number_of_molecules(self):
"The number of molecules in the universe."
return sum(n for f, n in self.molecules)
@property
def number_of_atoms(self):
"The number of atoms in the universe."
return sum(n * f.number_of_atoms for f, n in self.molecules)
@property
def number_of_sites(self):
"The number of sites in the universe."
return sum(n * f.number_of_sites for f, n in self.molecules)
@property
def number_of_bonds(self):
"The number of bonds in the universe."
return sum(n * f.number_of_bonds for f, n in self.molecules)
@property
def number_of_template_atoms(self):
"""The number of template atoms in the universe, i.e. the
total number of atoms in all fragment definitions. It is
equal to the number of atoms iff all molecule repetition
counts are 1.
"""
return sum(f.number_of_atoms for f, n in self.molecules)
@property
def number_of_template_sites(self):
"""The number of template sites in the universe, i.e. the
total number of sites in all fragment definitions. It is
equal to the number of sites iff all molecule repetition
counts are 1.
"""
return sum(f.number_of_sites for f, n in self.molecules)
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicUniverse):
raise TypeError("%s is not a universe" % str(type(other)))
if self.cell_shape != other.cell_shape:
raise ValueError("cell shapes differ: %s != %s"
% (repr(self.cell_shape), repr(other.cell_shape)))
if self.convention != other.convention:
raise ValueError("naming conventions differ: %s != %s"
% (repr(self.convention),
repr(other.convention)))
for (r1, t1), (r2, t2) in zip(self.symmetry_transformations,
other.symmetry_transformations):
if (r1 != r2).any():
raise ValueError("rotation matrices differ: %s != %s"
% (str(r1), str(r2)))
if (t1 != t2).any():
raise ValueError("translation vectors differ: %s != %s"
% (str(t1), str(t2)))
for (sf, sc), (of, oc) in zip(self.molecules, other.molecules):
sf.validate_equivalence(of)
if sc != oc:
raise ValueError("molecule counts differ: %d != %d" % (sc, oc))
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_value(self.cell_shape,
self._cell_parameter_array_shapes.keys(),
"Universe.cell_shape")
validate_label(self.convention, "Universe.convention")
validate_sequence(self.symmetry_transformations,
collections.Sequence,
"Universe.symmetry_transformations",
((lambda t: len(t) == 2,
"must have length 2"),
(lambda rot, trans:
getattr(rot, "shape", None) == (3, 3),
"rotation matrix shape is not (3, 3)"),
(lambda rot, trans:
getattr(trans, "shape", None) == (3,),
"translation vector shape is not (3,)"),
(lambda rot, trans:
rot.dtype == N.float64
and trans.dtype == N.float64,
"rotation and translation must be float64")))
if self.cell_shape == "infinite" \
and len(self.symmetry_transformations) > 0:
raise ValueError("Symmetry transformations are allowed "
"only in periodic universes")
for f, n in self.molecules:
f.validate()
if not isinstance(n, int):
raise ValueError("Molecule count must be an integer")
if n <= 0:
raise ValueError("Molecule count must be positive")
for property in ['cell_parameter_array_shape',
'number_of_molecules', 'number_of_atoms',
'number_of_sites', 'number_of_bonds',
'number_of_template_atoms',
'number_of_template_sites']:
value = getattr(self, property)
reference = getattr(MosaicUniverse, property).fget(self)
if value != reference:
raise ValueError("Universe.%s is %s, should be %s"
% (property, str(value), str(reference)))
# Methods based on API properties
def recursive_atom_iterator(self):
"""An iterator over the atoms in the universe.
"""
for fragment, count in self.molecules:
for _ in range(count):
for a in fragment.recursive_atom_iterator():
yield a
def bond_index_array(self):
"""Returns an integer array of shape (N, 2), where N
is the total number of bonds in the universe. The entries
[i, 0] and [i, 1] refer to the two atoms that are connected
by bond i. The entry [i, 0] is smaller than the entry [i, 1].
:returns: the bond index array
:rtype: numpy.ndarray
"""
natoms = 0
bonds = []
for fragment, count in self.molecules:
f_paths = list(fragment.recursive_atom_path_iterator())
f_bonds = [sorted((f_paths.index(a1), f_paths.index(a2)))
for a1, a2, order in fragment.recursive_bond_iterator()]
for _ in range(count):
bonds.extend([(natoms+a1, natoms+a2) for a1, a2 in f_bonds])
natoms += len(f_paths)
return N.array(bonds)
def site_to_atom_index_mapping(self):
"""
:returns: an array whose element [s] is the atom index
corresponding to site index s.
:rtype: numpy.ndarray
"""
natoms = 0
total = []
for fragment, count in self.molecules:
per_fragment = fragment.site_to_atom_index_mapping()
f_natoms = fragment.number_of_atoms
for _ in range(count):
total.append(per_fragment + natoms)
natoms += f_natoms
return N.concatenate(total)
def atom_to_site_index_mapping(self):
"""
:returns: an array whose element [s] is the site index
corresponding to the first site of the atom with
atom index s.
:rtype: numpy.ndarray
"""
nsites = 0
total = []
for fragment, count in self.molecules:
per_fragment = fragment._atom_to_site_index_mapping()
for _ in range(count):
total.append(per_fragment + nsites)
nsites += per_fragment[-1]
m = N.concatenate(total)
m[1:] = m[:-1]
m[0] = 0
return m
def template_site_to_template_atom_index_mapping(self):
"""
:returns: an array whose element [s] is the template atom index
corresponding to template site index s.
:rtype: numpy.ndarray
"""
natoms = 0
total = []
for fragment, count in self.molecules:
per_fragment = fragment.site_to_atom_index_mapping()
f_natoms = fragment.number_of_atoms
total.append(per_fragment + natoms)
natoms += f_natoms
return N.concatenate(total)
def site_to_template_index_mapping(self):
"""
:returns: an array whose element [s] is the template site index
corresponding to site index s.
:rtype: numpy.ndarray
"""
ntsites = 0
total = []
for fragment, count in self.molecules:
f_nsites = fragment.number_of_sites
per_fragment = N.arange(f_nsites)
for _ in range(count):
total.append(per_fragment + ntsites)
ntsites += f_nsites
return N.concatenate(total)
def atom_to_template_index_mapping(self):
"""
:returns: an array whose element [s] is the template atom index
corresponding to atom index s.
:rtype: numpy.ndarray
"""
ntatoms = 0
total = []
for fragment, count in self.molecules:
f_natoms = fragment.number_of_atoms
per_fragment = N.arange(f_natoms)
for _ in range(count):
total.append(per_fragment + ntatoms)
ntatoms += f_natoms
return N.concatenate(total)
# Properties associate a value with each atom or site in a
# universe. The value can be an array of any shape and any element
# type, but shape and element type are the same for all atoms.
# Properties also have a name that describes the quantitity they
# store, and the units of this quantity.
#
# Properties whose type starts with "template" are defined only for
# each atom or site in the molecule templates, not for each individual
# molecular instance. They are used for properties that are the same
# for all molecules of the same type (e.g. atomic mass).
class MosaicProperty(MosaicDataItem):
"""Property
See the :ref:`data model documentation<mosaic-property>` for properties.
"""
# API properties
@abstractproperty
def type(self):
"""A string identifying the type of the property.
See the :ref:`data model documentation<mosaic-property-type>`.
"""
raise NotImplementedError()
@abstractproperty
def name(self):
"""An ASCII string describing the property.
See the :ref:`data model documentation<mosaic-property-name>`.
"""
raise NotImplementedError()
@abstractproperty
def units(self):
"""A string identifying the physical units of the property.
See the :ref:`data model documentation<mosaic-property-units>`.
"""
raise NotImplementedError()
@abstractproperty
def universe(self):
"The :py:class:`MosaicUniverse` for which the property is defined."
raise NotImplementedError()
@abstractproperty
def data(self):
"""An array containing the property's values.
See the :ref:`data model documentation<mosaic-property-data>`.
"""
raise NotImplementedError()
# Properties that can be computed in terms of the API properties
@property
def element_shape(self):
"""The shape of the sub-array containing the propery for one
atom or site.
"""
return self.data.shape[1:]
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicProperty):
raise TypeError("%s is not a property" % str(type(other)))
self.universe.validate_equivalence(other.universe)
if self.type != other.type:
raise ValueError("types differ: %s != %s"
% (repr(self.type), repr(other.type)))
if self.name != other.name:
raise ValueError("names differ: %s != %s"
% (repr(self.name), repr(other.name)))
if self.units != other.units:
raise ValueError("units differ: %s != %s"
% (repr(self.units), repr(other.units)))
if self.element_shape != other.element_shape:
raise ValueError("element shapes differ: %s != %s"
% (repr(self.element_shape),
repr(other.element_shape)))
if self.data.dtype != other.data.dtype:
raise ValueError("data dtypes differ: %s != %s"
% (repr(self.data.dtype),
repr(other.data.dtype)))
if (self.data != other.data).any():
raise ValueError("data arrays differ")
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_allowed_dtypes = [N.int8, N.int16, N.int32, N.int64,
N.uint8, N.uint16, N.uint32, N.uint64,
N.float32, N.float64,
N.bool]
_allowed_types = ["atom", "site", "template_atom", "template_site"]
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_value(self.type, self._allowed_types, "Property.type")
validate_label(self.name, "Property.name")
validate_units(self.units, "Property.units")
validate_type(self.universe, MosaicUniverse, "Property.universe")
self.universe.validate()
el_shape = self.element_shape
data_shape = ({"atom": self.universe.number_of_atoms,
"site": self.universe.number_of_sites,
"template_atom": self.universe.number_of_template_atoms,
"template_site": self.universe.number_of_template_sites,
}[self.type],) + el_shape
validate_array(self.data,
data_shape,
self._allowed_dtypes,
"Property.data")
# Labels associate a text string with each atom or site in a
# universe. They work much like properties, except for having a string
# value. Labels are a separate data item because the differences
# compared to numerical properties (no element shape, no unit) would
# make validation of a common data item type too complicated.
#
# Labels whose type starts with "template" are defined only for
# each atom or site in the molecule templates, not for each individual
# molecular instance.
class MosaicLabel(MosaicDataItem):
"""Label
See the :ref:`data model documentation<mosaic-label>` for labels.
"""
# API properties
@abstractproperty
def type(self):
"""A string identifying the type of the label.
See the :ref:`data model documentation<mosaic-label-type>`.
"""
raise NotImplementedError()
@abstractproperty
def name(self):
"""An ASCII string describing the label.
See the :ref:`data model documentation<mosaic-label-name>`.
"""
raise NotImplementedError()
@abstractproperty
def universe(self):
"The :py:class:`MosaicUniverse` for which the property is defined."
raise NotImplementedError()
@abstractproperty
def strings(self):
"""A sequence of strings representing a label for each atom or site.
See the :ref:`data model documentation<mosaic-label-strings>`.
"""
raise NotImplementedError()
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicLabel):
raise TypeError("%s is not a label" % str(type(other)))
self.universe.validate_equivalence(other.universe)
if self.type != other.type:
raise ValueError("types differ: %s != %s"
% (repr(self.type), repr(other.type)))
if self.name != other.name:
raise ValueError("names differ: %s != %s"
% (repr(self.name), repr(other.name)))
for s1, s2 in zip(self.strings, other.strings):
if s1 != s2:
raise ValueError("labels differ: %s != %s"
% (repr(s1), repr(s2)))
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_allowed_types = ["atom", "site", "template_atom", "template_site"]
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_value(self.type, self._allowed_types, "Label.type")
validate_label(self.name, "Label.name")
validate_type(self.universe, MosaicUniverse, "Label.universe")
self.universe.validate()
validate_sequence(self.strings, str, "Label.strings")
nstrings = {"atom": self.universe.number_of_atoms,
"site": self.universe.number_of_sites,
"template_atom": self.universe.number_of_template_atoms,
"template_site": self.universe.number_of_template_sites,
}[self.type]
if len(self.strings) != nstrings:
raise ValueError("incorrect number of strings")
for s in self.strings:
validate_ascii_string(s, "label")
# A configuration specifies a coordinate for each site and
# the shape and size of the cell.
class MosaicConfiguration(MosaicDataItem):
"""Configuration
See the :ref:`data model documentation<mosaic-configuration>`
for configurations.
"""
# API properties
@abstractproperty
def universe(self):
"The :py:class:`MosaicUniverse` for which the property is defined."
raise NotImplementedError()
@abstractproperty
def cell_parameters(self):
"""An array containing the parameters defining the shape and size
of the cell.
See the :ref:`data model documentation<mosaic-configuration-cp>`.
"""
raise NotImplementedError()
@abstractproperty
def positions(self):
"""An array containing an (x, y, z) position for each site.
See the :ref:`data model documentation<mosaic-configuration-pos>`.
"""
raise NotImplementedError()
# Methods based on API properties
def lattice_vectors(self):
"""
:returns: a sequence of arrays of shape (3,) containing the lattice
vectors for the simulation cell.
:rtype: tuple
"""
return {'infinite': lambda p: (),
'cube': lambda p: (N.array([p, 0., 0.], dtype=p.dtype),
N.array([0., p, 0.], dtype=p.dtype),
N.array([0., 0., p], dtype=p.dtype)),
'cuboid': lambda p: (N.array([p[0], 0., 0.], dtype=p.dtype),
N.array([0., p[1], 0.], dtype=p.dtype),
N.array([0., 0., p[2]], dtype=p.dtype)),
'parallelepiped': lambda p: tuple(N.array(v) for v in p),
}[self.universe.cell_shape](self.cell_parameters)
def cell_volume(self):
"""
:returns: the volume the simulation cell, or None for
infinite universes
:rtype: float
"""
return {'infinite': lambda p: None,
'cube': lambda p: float(p*p*p),
'cuboid': lambda p: p[0]*p[1]*p[2],
'parallelepiped': lambda p: abs(LA.det(p)),
}[self.universe.cell_shape](self.cell_parameters)
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicConfiguration):
raise TypeError("%s is not a configuration" % str(type(other)))
self.universe.validate_equivalence(other.universe)
if (self.cell_parameters != other.cell_parameters).any():
raise ValueError("cell parameters differ: %s != %s"
% (repr(self.cell_parameters),
repr(other.cell_parameters)))
if self.cell_parameters.dtype != other.cell_parameters.dtype:
raise ValueError("cell parameter dtypes differ: %s != %s"
% (repr(self.cell_parameters.dtype),
repr(other.cell_parameters.dtype)))
if self.positions.dtype != other.positions.dtype:
raise ValueError("position dtypes differ: %s != %s"
% (repr(self.positions.dtype),
repr(other.positions.dtype)))
if (self.positions != other.positions).any():
raise ValueError("position arrays differ")
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_allowed_dtypes = [N.float32, N.float64]
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_type(self.universe, MosaicUniverse, "Configuration.universe")
self.universe.validate()
validate_array(self.cell_parameters,
self.universe.cell_parameter_array_shape,
self._allowed_dtypes,
"Configuration.cell_parameters")
validate_array(self.positions,
(self.universe.number_of_sites, 3),
self._allowed_dtypes,
"Configuration.positions")
if self.cell_parameters.dtype != self.positions.dtype:
raise ValueError("Configuration.cell_parameters and "
"Configuration.positions must have same dtypes")
# Selections specify a subset of atoms or sites, either in the templates
# or in the molecules of a universe.
class MosaicSelection(MosaicDataItem):
"""Selection
See the :ref:`data model documentation<mosaic-selection>` for selections.
"""
# API properties
@abstractproperty
def type(self):
"""A string identifying the type of the selection.
See the :ref:`data model documentation<mosaic-selection-type>`.
"""
raise NotImplementedError()
@abstractproperty
def universe(self):
"The :py:class:`MosaicUniverse` for which the selection is defined."
raise NotImplementedError()
@abstractproperty
def indices(self):
"""An array containing the indices of the contained atoms or sites.
See the :ref:`data model documentation<mosaic-selection-indices>`.
"""
raise NotImplementedError()
# Properties that can be computed in terms of the API properties
@property
def number_of_atoms(self):
"""The number of atoms in the selection.
"""
indices = self.indices
if self.type == "atom":
return len(indices)
elif self.type == "template_atom":
natoms = 0
ntatoms = 0
for fragment, count in self.universe.molecules:
nta = fragment.number_of_atoms
natoms += count * N.sum((indices >= ntatoms)
& (indices < ntatoms+nta))
ntatoms += nta
return natoms
else:
raise TypeError("number of atoms undefined in site selection")
@property
def number_of_sites(self):
"""The number of sites in the selection.
"""
indices = self.indices
if self.type == "atom":
sites_per_atom = [a.number_of_sites
for a in self.universe.recursive_atom_iterator()]
# If 'indices' is an immutable array, N.take crashes due to
# a numpy bug. Conversion to a plain array prevents this.
# See Github issue #3758 for numpy/numpy.
return N.sum(N.take(sites_per_atom, N.array(indices)))
elif self.type == "template_atom":
sites_per_atom = [a.number_of_sites
for a in self.universe.recursive_atom_iterator()]
nsites = 0
ntatoms = 0
for fragment, count in self.universe.molecules:
nta = fragment.number_of_atoms
mask = (indices >= ntatoms) & (indices < ntatoms+nta)
ntatoms += nta
# Conversion to plain arrays is required for the same
# reason as above.
nsites += count * N.sum(N.take(sites_per_atom,
N.repeat(N.array(indices),
N.array(mask))))
return nsites
elif self.type == "site":
return len(indices)
elif self.type == "template_site":
nsites = 0
ntsites = 0
for fragment, count in self.universe.molecules:
nts = fragment.number_of_sites
nsites += count * N.sum((indices >= ntsites)
& (indices < ntsites+nts))
ntsites += nts
return nsites
# Equivalence test
def validate_equivalence(self, other):
"""Verify the equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:raises ValueError: if other is not equivalent to self
"""
if not isinstance(other, MosaicSelection):
raise TypeError("%s is not a selection" % str(type(other)))
self.universe.validate_equivalence(other.universe)
if self.type != other.type:
raise ValueError("types differ: %s != %s"
% (repr(self.type), repr(other.type)))
if (self.indices != other.indices).any():
raise ValueError("indices differ")
def is_equivalent(self, other):
"""Check for equivalence of Python objects representing
Mosaic data. The two objects can belong to different models;
only the common API functionality is used for the test.
:parameter other: an arbitrary Python object
:returns: True if other is equivalent to self
:rtype: bool
"""
try:
self.validate_equivalence(other)
return True
except:
return False
# Validation
_allowed_types = ["atom", "site", "template_atom", "template_site"]
def validate(self):
"""Verify that the object satisfies the constraints of the
Mosaic data model.
:raises ValueError: if the object is not valid Mosaic data
"""
validate_value(self.type, self._allowed_types, "Selection.type")
validate_type(self.universe, MosaicUniverse, "Selection.universe")
max_index = {"atom": self.universe.number_of_atoms,
"site": self.universe.number_of_sites,
"template_atom": self.universe.number_of_template_atoms,
"template_site": self.universe.number_of_template_sites,
}[self.type]
validate_indices(self.indices, max_index, "Selection.indices")
# Validation functions
def validate_type(obj, cls, text):
if isinstance(obj, cls):
return
raise TypeError("%s must be of type %s (is %s)"
% (text, cls.__name__, str(type(obj))))
def validate_value(value, allowed_values, name):
if not value in allowed_values:
raise ValueError("%s must be one of %s"
% (name, ", ".join([str(v) for v in allowed_values])))
def validate_ascii_string(s, text):
if not mosaic.utility.isascii(s):
raise ValueError("non-ASCII string in %s" % text)
def validate_label(label, text):
validate_type(label, str, text)
if len(label) > 32767:
raise ValueError("%s too long, must be <= 32767 characters" % text)
for c in label:
if not c in _allowed_in_labels:
raise ValueError("illegal character '%s' in %s"
% (c, text))
_allowed_in_labels = 'abcdefghijklmnopqrstuvwxyz' + \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + \
'0123456789!#$%&?@^_~+-*/=,()[]' + "'"
def validate_array(array, shape, allowed_dtypes, text):
# Don't check for N.ndarray in order to allow on-disk
# arrays (HDF5, netCDF)
try:
a_shape = array.shape
a_dtype = array.dtype
except AttributeError:
raise TypeError("%s must be an array" % text)
if shape is not None and a_shape != shape:
raise ValueError("%s must have shape %s"
% (text, str(shape)))
if a_dtype not in allowed_dtypes:
raise TypeError(" %s must have element type %s"
% (text,
" or ".join(str(t) for t in allowed_dtypes)))
def validate_sequence(obj, el_cls, text, additional_tests=()):
if not (isinstance(obj, collections.Sequence) and
all(isinstance(item, el_cls) for item in obj)):
raise TypeError("%s must be a sequence of %s elements"
% (text, el_cls.__name__))
for test_fn, text in additional_tests:
for el in obj:
if not test_fn(el):
raise ValueError("%s: %s" % (str(el), text))
def validate_indices(indices, max_index, text):
validate_array(indices, None,
[N.uint8, N.uint16, N.uint32, N.uint64],
text)
if len(indices.shape) != 1:
raise ValueError("index array not 1d")
if (indices >= max_index).any():
raise ValueError("index too large")
if len(indices) > 1:
d = indices[1:]-indices[:-1]
if (d <= 0).any():
raise ValueError("indices not sorted")
# The unit validator accepts a very limited syntax, which
# should be sufficient: a unit is defined by a string of
# unit names with an optional numeric suffix indication a power.
# The unit list can include integers or decimal fractions.
#
# Examples: "nm ps-1" (velocity), "nm2" (area), "kJ mol-1" (energy)
# "0.1 nm" (length), "60 s" (time)
def validate_units(unit_spec, text):
validate_type(unit_spec, str, text)
first = True
for unit in unit_spec.split():
# number (integer or decimal fraction)
if first:
first = False
if (re.match("^[1-9][0-9]*$", unit) \
or re.match("^0\.[0-9]+$", unit)):
continue
# symbol+exponent
m = re.match("^(?P<symbol>[a-zA-Z]+)([-]?[0-9]+)?$", unit)
if m and m.group('symbol') in _allowed_units:
continue
raise ValueError("invalid unit '%s' in %s" % (unit, unit_spec))
_allowed_units = \
["pm", "Ang", "nm", "um", "mm", "m",
"fs", "ps", "ns", "us", "ms", "s",
"amu", "g", "kg",
"mol",
"J", "kJ", "cal", "kcal", "eV",
"K",
"Pa", "kPa", "MPa", "GPa", "atm", "bar", "kbar",
"e", "C", "A", "V",
"rad",
"c", "h", "me"]
# Validation as a test function rather than raising exceptions
def is_valid(obj):
try:
obj.validate()
return True
except:
return False
| [
"numpy.sum",
"re.match",
"numpy.arange",
"numpy.array",
"numpy.linalg.det",
"numpy.add.accumulate",
"numpy.concatenate"
] | [((17229, 17249), 'numpy.add.accumulate', 'N.add.accumulate', (['ns'], {}), '(ns)\n', (17245, 17249), True, 'import numpy as N\n'), ((28332, 28346), 'numpy.array', 'N.array', (['bonds'], {}), '(bonds)\n', (28339, 28346), True, 'import numpy as N\n'), ((28891, 28911), 'numpy.concatenate', 'N.concatenate', (['total'], {}), '(total)\n', (28904, 28911), True, 'import numpy as N\n'), ((29464, 29484), 'numpy.concatenate', 'N.concatenate', (['total'], {}), '(total)\n', (29477, 29484), True, 'import numpy as N\n'), ((30079, 30099), 'numpy.concatenate', 'N.concatenate', (['total'], {}), '(total)\n', (30092, 30099), True, 'import numpy as N\n'), ((30637, 30657), 'numpy.concatenate', 'N.concatenate', (['total'], {}), '(total)\n', (30650, 30657), True, 'import numpy as N\n'), ((31195, 31215), 'numpy.concatenate', 'N.concatenate', (['total'], {}), '(total)\n', (31208, 31215), True, 'import numpy as N\n'), ((54894, 54948), 're.match', 're.match', (['"""^(?P<symbol>[a-zA-Z]+)([-]?[0-9]+)?$"""', 'unit'], {}), "('^(?P<symbol>[a-zA-Z]+)([-]?[0-9]+)?$', unit)\n", (54902, 54948), False, 'import re\n'), ((30483, 30501), 'numpy.arange', 'N.arange', (['f_nsites'], {}), '(f_nsites)\n', (30491, 30501), True, 'import numpy as N\n'), ((31041, 31059), 'numpy.arange', 'N.arange', (['f_natoms'], {}), '(f_natoms)\n', (31049, 31059), True, 'import numpy as N\n'), ((54746, 54777), 're.match', 're.match', (['"""^[1-9][0-9]*$"""', 'unit'], {}), "('^[1-9][0-9]*$', unit)\n", (54754, 54777), False, 'import re\n'), ((54799, 54829), 're.match', 're.match', (['"""^0\\\\.[0-9]+$"""', 'unit'], {}), "('^0\\\\.[0-9]+$', unit)\n", (54807, 54829), False, 'import re\n'), ((48145, 48161), 'numpy.array', 'N.array', (['indices'], {}), '(indices)\n', (48152, 48161), True, 'import numpy as N\n'), ((47363, 47418), 'numpy.sum', 'N.sum', (['((indices >= ntatoms) & (indices < ntatoms + nta))'], {}), '((indices >= ntatoms) & (indices < ntatoms + nta))\n', (47368, 47418), True, 'import numpy as N\n'), ((42008, 42045), 'numpy.array', 'N.array', (['[p, 0.0, 0.0]'], {'dtype': 'p.dtype'}), '([p, 0.0, 0.0], dtype=p.dtype)\n', (42015, 42045), True, 'import numpy as N\n'), ((42080, 42117), 'numpy.array', 'N.array', (['[0.0, p, 0.0]'], {'dtype': 'p.dtype'}), '([0.0, p, 0.0], dtype=p.dtype)\n', (42087, 42117), True, 'import numpy as N\n'), ((42152, 42189), 'numpy.array', 'N.array', (['[0.0, 0.0, p]'], {'dtype': 'p.dtype'}), '([0.0, 0.0, p], dtype=p.dtype)\n', (42159, 42189), True, 'import numpy as N\n'), ((42227, 42267), 'numpy.array', 'N.array', (['[p[0], 0.0, 0.0]'], {'dtype': 'p.dtype'}), '([p[0], 0.0, 0.0], dtype=p.dtype)\n', (42234, 42267), True, 'import numpy as N\n'), ((42304, 42344), 'numpy.array', 'N.array', (['[0.0, p[1], 0.0]'], {'dtype': 'p.dtype'}), '([0.0, p[1], 0.0], dtype=p.dtype)\n', (42311, 42344), True, 'import numpy as N\n'), ((42381, 42421), 'numpy.array', 'N.array', (['[0.0, 0.0, p[2]]'], {'dtype': 'p.dtype'}), '([0.0, 0.0, p[2]], dtype=p.dtype)\n', (42388, 42421), True, 'import numpy as N\n'), ((42927, 42936), 'numpy.linalg.det', 'LA.det', (['p'], {}), '(p)\n', (42933, 42936), True, 'import numpy.linalg as LA\n'), ((42472, 42482), 'numpy.array', 'N.array', (['v'], {}), '(v)\n', (42479, 42482), True, 'import numpy as N\n'), ((49228, 49283), 'numpy.sum', 'N.sum', (['((indices >= ntsites) & (indices < ntsites + nts))'], {}), '((indices >= ntsites) & (indices < ntsites + nts))\n', (49233, 49283), True, 'import numpy as N\n'), ((48814, 48830), 'numpy.array', 'N.array', (['indices'], {}), '(indices)\n', (48821, 48830), True, 'import numpy as N\n'), ((48888, 48901), 'numpy.array', 'N.array', (['mask'], {}), '(mask)\n', (48895, 48901), True, 'import numpy as N\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, model_helpers
from caffe2.python.model_helper import ModelHelperBase
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
class ModelHelpersTest(hu.HypothesisTestCase):
@given(n=st.integers(2, 5), m=st.integers(2, 5), **hu.gcs)
def test_dropout(self, n, m, gc, dc):
X = np.random.rand(n, m).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelperBase(name="test_model")
out = model_helpers.Dropout(model, "x", "out")
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("x")
np.testing.assert_equal(X, out)
@given(n=st.integers(2, 5), m=st.integers(2, 5),
k=st.integers(2, 5), **hu.gcs)
def test_fc(self, n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelperBase(name="test_model")
out = model_helpers.FC(model, "x", "out_1", k, n)
out = model_helpers.PackedFC(model, out, "out_2", n, n)
out = model_helpers.FC_Decomp(model, out, "out_3", n, n)
out = model_helpers.FC_Prune(model, out, "out_4", n, n)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
| [
"caffe2.python.model_helpers.FC_Decomp",
"caffe2.python.workspace.FetchBlob",
"caffe2.python.model_helpers.FC_Prune",
"numpy.random.rand",
"caffe2.python.workspace.FeedBlob",
"caffe2.python.model_helpers.PackedFC",
"caffe2.python.workspace.RunNetOnce",
"caffe2.python.model_helper.ModelHelperBase",
"... | [((608, 634), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['"""x"""', 'X'], {}), "('x', X)\n", (626, 634), False, 'from caffe2.python import workspace, model_helpers\n'), ((651, 685), 'caffe2.python.model_helper.ModelHelperBase', 'ModelHelperBase', ([], {'name': '"""test_model"""'}), "(name='test_model')\n", (666, 685), False, 'from caffe2.python.model_helper import ModelHelperBase\n'), ((700, 740), 'caffe2.python.model_helpers.Dropout', 'model_helpers.Dropout', (['model', '"""x"""', '"""out"""'], {}), "(model, 'x', 'out')\n", (721, 740), False, 'from caffe2.python import workspace, model_helpers\n'), ((749, 791), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.param_init_net'], {}), '(model.param_init_net)\n', (769, 791), False, 'from caffe2.python import workspace, model_helpers\n'), ((800, 831), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.net'], {}), '(model.net)\n', (820, 831), False, 'from caffe2.python import workspace, model_helpers\n'), ((846, 870), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""x"""'], {}), "('x')\n", (865, 870), False, 'from caffe2.python import workspace, model_helpers\n'), ((879, 910), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['X', 'out'], {}), '(X, out)\n', (902, 910), True, 'import numpy as np\n'), ((1114, 1140), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['"""x"""', 'X'], {}), "('x', X)\n", (1132, 1140), False, 'from caffe2.python import workspace, model_helpers\n'), ((1157, 1191), 'caffe2.python.model_helper.ModelHelperBase', 'ModelHelperBase', ([], {'name': '"""test_model"""'}), "(name='test_model')\n", (1172, 1191), False, 'from caffe2.python.model_helper import ModelHelperBase\n'), ((1206, 1249), 'caffe2.python.model_helpers.FC', 'model_helpers.FC', (['model', '"""x"""', '"""out_1"""', 'k', 'n'], {}), "(model, 'x', 'out_1', k, n)\n", (1222, 1249), False, 'from caffe2.python import workspace, model_helpers\n'), ((1264, 1313), 'caffe2.python.model_helpers.PackedFC', 'model_helpers.PackedFC', (['model', 'out', '"""out_2"""', 'n', 'n'], {}), "(model, out, 'out_2', n, n)\n", (1286, 1313), False, 'from caffe2.python import workspace, model_helpers\n'), ((1328, 1378), 'caffe2.python.model_helpers.FC_Decomp', 'model_helpers.FC_Decomp', (['model', 'out', '"""out_3"""', 'n', 'n'], {}), "(model, out, 'out_3', n, n)\n", (1351, 1378), False, 'from caffe2.python import workspace, model_helpers\n'), ((1393, 1442), 'caffe2.python.model_helpers.FC_Prune', 'model_helpers.FC_Prune', (['model', 'out', '"""out_4"""', 'n', 'n'], {}), "(model, out, 'out_4', n, n)\n", (1415, 1442), False, 'from caffe2.python import workspace, model_helpers\n'), ((1452, 1494), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.param_init_net'], {}), '(model.param_init_net)\n', (1472, 1494), False, 'from caffe2.python import workspace, model_helpers\n'), ((1503, 1534), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['model.net'], {}), '(model.net)\n', (1523, 1534), False, 'from caffe2.python import workspace, model_helpers\n'), ((450, 467), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (461, 467), True, 'import hypothesis.strategies as st\n'), ((471, 488), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (482, 488), True, 'import hypothesis.strategies as st\n'), ((925, 942), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (936, 942), True, 'import hypothesis.strategies as st\n'), ((946, 963), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (957, 963), True, 'import hypothesis.strategies as st\n'), ((978, 995), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(5)'], {}), '(2, 5)\n', (989, 995), True, 'import hypothesis.strategies as st\n'), ((554, 574), 'numpy.random.rand', 'np.random.rand', (['n', 'm'], {}), '(n, m)\n', (568, 574), True, 'import numpy as np\n'), ((1059, 1079), 'numpy.random.rand', 'np.random.rand', (['m', 'k'], {}), '(m, k)\n', (1073, 1079), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import uqra, warnings, random, math
import numpy as np, os, sys
import collections
import scipy.stats as stats
import scipy
import scipy.io
from tqdm import tqdm
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
def get_basis(deg, simparams, solver):
if simparams.doe_method.lower().startswith('mcs'):
if simparams.poly_type.lower() == 'leg':
print(' Legendre polynomial')
basis = uqra.Legendre(d=solver.ndim, deg=deg)
elif simparams.poly_type.lower().startswith('hem'):
print(' Probabilists Hermite polynomial')
basis = uqra.Hermite(d=solver.ndim,deg=deg, hem_type='probabilists')
else:
raise ValueError
elif simparams.doe_method.lower().startswith('cls'):
if simparams.poly_type.lower() == 'leg':
print(' Legendre polynomial')
basis = uqra.Legendre(d=solver.ndim,deg=deg)
elif simparams.poly_type.lower().startswith('hem'):
print(' Probabilists Hermite polynomial')
basis = uqra.Hermite(d=solver.ndim,deg=deg, hem_type='physicists')
else:
raise ValueError
else:
raise ValueError
return basis
def main(theta):
print('------------------------------------------------------------')
print('>>> Model: FPSO, Short-term simulation (n={:d}) '.format(theta))
print('------------------------------------------------------------')
## ------------------------ Displaying set up ------------------- ###
np.random.seed(100)
np.set_printoptions(precision=4)
np.set_printoptions(threshold=8)
np.set_printoptions(suppress=True)
pf = 1e-5 #0.5/(50*365.25*24)
radius_surrogate= 5
Kvitebjorn = uqra.environment.Kvitebjorn()
# short_term_seeds_applied = np.setdiff1d(np.arange(10), np.array([]))
## ------------------------ Simulation Parameters ----------------- ###
# theta = np.arange(theta,theta+1)
# assert theta.size == 1
solver = uqra.FPSO(phase=[theta,])
simparams = uqra.Parameters()
simparams.solver = solver
simparams.pce_degs = np.array(range(2,11))
simparams.n_cand = int(1e5)
simparams.n_test = -1
simparams.n_pred = int(1e6)
simparams.doe_method = 'CLS4' ### 'mcs', 'cls1', 'cls2', ..., 'cls5', 'reference'
simparams.optimality = 'D'# 'D', 'S', None
simparams.poly_type = 'hem'
simparams.fit_method = 'LASSOLARS'
simparams.n_splits = 50
alphas = 1.2
# # simparams.num_samples=np.arange(21+1, 130, 5)
simparams.update()
simparams.info()
n_initial = 20
u_center = np.array([0,0]).reshape(-1,1)
x_center = np.array([0,0]).reshape(-1,1)
## ----------- Test data set ----------- ###
## ----- Testing data set centered around u_center, first 100000
print(' > Getting Test data set...')
filename = 'FPSO_SDOF_DoE_McsE5R{:d}.npy'.format(theta)
data_test = np.load(os.path.join(simparams.data_dir_result,'TestData', filename))
u_test = data_test[ : solver.ndim, :]
x_test = data_test[solver.ndim :2*solver.ndim, :]
y_test = data_test[-1]
print(' - {:<25s} : {}, {}, {}'.format('Test Dataset (U,X,Y)',u_test.shape, x_test.shape, y_test.shape ))
print(' - {:<25s} : [{}, {}]'.format('Test U[mean, std]',np.mean(u_test, axis=1),np.std (u_test, axis=1)))
print(' - {:<25s} : [{}]'.format('Test max(U)[U1, U2]',np.amax(abs(u_test), axis=1)))
print(' - {:<25s} : [{}]'.format('Test [min(Y), max(Y)]',np.array([np.amin(y_test),np.amax(y_test)])))
## ----------- Predict data set ----------- ###
## ----- Prediction data set centered around u_center, all
filename= 'DoE_McsE7R{:d}.npy'.format(theta)
mcs_data= np.load(os.path.join(simparams.data_dir_sample,'MCS', 'Norm', filename))
u_pred = mcs_data[:solver.ndim, :simparams.n_pred]
x_pred = Kvitebjorn.ppf(stats.norm.cdf(u_pred))
# x_pred = mcs_data_ux[-2:,np.linalg.norm(mcs_data_ux[:2] - u_center, axis=0) < radius_surrogate]
## ----------- Candidate and testing data set for DoE ----------- ###
print(' > Getting candidate data set...')
# u_cand = modeling.get_candidate_data()
if simparams.doe_method.lower().startswith('cls2'):
filename = os.path.join(simparams.data_dir_sample, 'CLS', 'DoE_Cls2E7d2R{:d}.npy'.format(theta))
u_cand = np.load(filename)[:solver.ndim, :simparams.n_cand]
u_cand = u_cand * radius_surrogate
elif simparams.doe_method.lower().startswith('cls4'):
filename = os.path.join(simparams.data_dir_sample, 'CLS', 'DoE_Cls4E7d2R{:d}.npy'.format(theta))
u_cand = np.load(filename)[:solver.ndim, :simparams.n_cand]
elif simparams.doe_method.lower().startswith('mcs'):
filename = os.path.join(simparams.data_dir_sample, 'MCS','Norm','DoE_McsE7R{:d}.npy'.format(theta))
u_cand = np.load(filename)[:solver.ndim, :simparams.n_cand]
# u_cand = np.load(os.path.join(simparams.data_dir_sample, 'MCS','Norm','DoE_McsE7R0.npy'))
# u_cand = u_cand[:solver.ndim, np.linalg.norm(u_cand[:2], axis=0)<radius_surrogate]
# u_cand = u_cand[:, :simparams.n_cand]
# u_cand = 2** 0.5 * u_cand if modeling.is_cls_unbounded() else u_cand
metrics_each_deg = []
pred_uxy_each_deg = []
for deg in simparams.pce_degs:
print('\n================================================================================')
print(' - Sampling and Fitting:')
print(' - {:<23s} : {}'.format('Sampling method' , simparams.doe_method))
print(' - {:<23s} : {}'.format('Optimality ' , simparams.optimality))
print(' - {:<23s} : {}'.format('Fitting method' , simparams.fit_method))
print(' > Building surrogate model ...')
## ----------- Define PCE ----------- ###
basis = get_basis(deg, simparams, solver)
pce_model = uqra.PCE(basis)
modeling = uqra.Modeling(solver, pce_model, simparams)
pce_model.info()
u_cand_p = deg ** 0.5 * u_cand if simparams.doe_method.lower() in ['cls4', 'cls5'] else u_cand
### ----------- Oversampling ratio ----------- ###
simparams.update_num_samples(pce_model.num_basis, alphas=alphas)
n_train = int(alphas * pce_model.num_basis)
### ============ Initial Values ============
n_initial = max(20, int(0.8 * pce_model.num_basis))
if simparams.doe_method.lower().startswith('mcs'):
doe = uqra.LHS([stats.norm(),]*solver.ndim)
u_train = doe.samples(size=n_initial, loc=0, scale=1, random_state=100)
elif simparams.doe_method.lower().startswith('cls'):
u_train = u_cand_p[:, :n_initial]
x_train = Kvitebjorn.ppf(stats.norm.cdf(u_train))
y_train = solver.run(x_train)
print(' - {:<25s} : {}, {}, {}'.format('LHS Dataset (U,X,Y)',u_train.shape, x_train.shape, y_train.shape))
print(' - {:<25s} : [{}, {}]'.format('LHS U[mean, std]',np.mean(u_train, axis=1),np.std (u_train, axis=1)))
print(' - {:<25s} : [{}]'.format('LHS max(U)[U1, U2]',np.amax(abs(u_train), axis=1)))
print(' - {:<25s} : [{}]'.format('LHS [min(Y), max(Y)]',np.array([np.amin(y_train),np.amax(y_train)])))
while True:
### ============ Estimate sparsity ============
print(' > 1. Sparsity estimation ...')
if simparams.doe_method.lower().startswith('cls'):
w_train = modeling.cal_cls_weight(u_train, pce_model.basis, active_index=None)
else:
w_train = None
pce_model.fit('LASSOLARS', u_train, y_train.T, w=w_train,
n_splits=simparams.n_splits, epsilon=1e-3)
print(' > 2. Getting n training data ...')
pce_model_sparsity = pce_model.sparsity
n_train_new = pce_model_sparsity
### ============ Build Surrogate Model ============
# u_train = u_cand[:, doe_idx_u_cand[:int(n_train*0.75)]]
# x_train = Kvitebjorn.ppf(stats.norm.cdf(u_train + u_center))
# y_train = solver.run(x_train)
# curr_doe_idx = list(doe_idx_u_cand[:int(n_train*0.75)])
# ### surrogate model for each short term simulation
tqdm.write(' > {}:{}; Basis: {}/{}; # samples = {:d}'.format(
'New samples', simparams.optimality, pce_model_sparsity, pce_model.num_basis, n_train_new ))
u_train_new, _ = modeling.get_train_data(n_train_new, u_cand_p, u_train,
basis=pce_model.basis, active_basis=pce_model.active_basis)
x_train_new = Kvitebjorn.ppf(stats.norm.cdf(u_train_new))
y_train_new = solver.run(x_train_new)
u_train = np.hstack((u_train, u_train_new))
x_train = np.hstack((x_train, x_train_new))
y_train = np.hstack((y_train, y_train_new))
if np.isnan(y_train).any():
print(u_train[:, np.isnan(y_train)])
print(y_train[np.isnan(y_train)])
raise ValueError
if np.isnan(u_train).any():
print(u_train[:, np.isnan(u_train)])
print(y_train[np.isnan(u_train)])
raise ValueError
if np.isinf(y_train).any():
print(u_train[:, np.isinf(y_train)])
print(y_train[np.isinf(y_train)])
raise ValueError
if u_train.shape[1] > n_train:
print(' > Oversampling ratio: {}'.format(np.around(u_train.shape[1]/pce_model.num_basis,2)))
break
### ============ Build 2nd Surrogate Model ============
U_train = pce_model.basis.vandermonde(u_train)
if simparams.doe_method.lower().startswith('cls'):
w_train = modeling.cal_cls_weight(u_train, pce_model.basis, active_index=pce_model.active_index)
U_train = U_train[:, pce_model.active_index]
U_train = modeling.rescale_data(U_train, w_train)
else:
w_train = None
U_train = U_train[:, pce_model.active_index]
_, sig_value, _ = np.linalg.svd(U_train)
kappa = max(abs(sig_value)) / min(abs(sig_value))
pce_model.fit('OLS', u_train, y_train.T, w_train,
n_splits=simparams.n_splits, active_basis=pce_model.active_basis)
print(' - {:<25s} : {:s}'.format('File', filename))
print(' - {:<25s} : {}, {}, {}'.format('Train Dataset (U,X,Y)',u_train.shape, x_train.shape, y_train.shape))
if w_train is None:
print(' - {:<25s} : {}'.format('Train Dataset W ', 'None'))
else:
print(' - {:<25s} : {}'.format('Train Dataset W ', w_train.shape))
print(' - {:<25s} : [{}, {}]'.format('Train U[mean, std]',np.mean(u_train, axis=1),np.std (u_train, axis=1)))
print(' - {:<25s} : [{}]'.format('Train max(U)[U1, U2]',np.amax(abs(u_train), axis=1)))
print(' - {:<25s} : [{}]'.format('Train [min(Y), max(Y)]',np.array([np.amin(y_train),np.amax(y_train)])))
y_train_hat= pce_model.predict(u_train)
y_test_hat = pce_model.predict(u_test - u_center)
test_error = uqra.metrics.mean_squared_error(y_test, y_test_hat,squared=False)
### prediction data set, randomly draw or from MCS directory
# y_pred = pce_model.predict(u_pred - u_center)
# alpha = (pf * mcs_data_ux.shape[1]) / y_pred.size
# y50_pce_y = uqra.metrics.mquantiles(y_pred, 1-alpha)
# y50_pce_idx = np.array(abs(y_pred - y50_pce_y)).argmin()
# y50_pce_uxy = np.concatenate((u_pred[:,y50_pce_idx], x_pred[:, y50_pce_idx], y50_pce_y))
# pred_uxy_each_deg.append([deg, n_train, y_pred])
# np.random.seed()
# u_pred = stats.norm.rvs(size=(solver.ndim,simparams.n_pred))
# x_pred = Kvitebjorn.ppf(stats.norm.cdf(u_pred))
y_pred = pce_model.predict(u_pred - u_center)
y50_pce_y = uqra.metrics.mquantiles(y_pred, 1-pf)
y50_pce_idx = np.array(abs(y_pred - y50_pce_y)).argmin()
y50_pce_uxy = np.concatenate((u_pred[:,y50_pce_idx], x_pred[:, y50_pce_idx], y50_pce_y))
pred_uxy_each_deg.append([deg, u_train.shape[1], y_pred])
res = [deg, u_train.shape[1], pce_model.cv_error, test_error[0]]
for item in y50_pce_uxy:
res.append(item)
metrics_each_deg.append(res)
### ============ calculating & updating metrics ============
tqdm.write(' > Summary')
with np.printoptions(precision=4):
tqdm.write(' - {:<15s} : {}'.format( 'y50_pce_y' , np.array(metrics_each_deg)[-1:,-1]))
tqdm.write(' - {:<15s} : {}'.format( 'Test MSE ' , np.array(metrics_each_deg)[-1:, 3]))
tqdm.write(' - {:<15s} : {}'.format( 'CV MSE' , np.array(metrics_each_deg)[-1:, 2]))
tqdm.write(' - {:<15s} : {}'.format( 'Design state', np.array(metrics_each_deg)[-1:,6:8]))
tqdm.write(' - {:<15s} : {:.4f}'.format( 'kappa ' , kappa))
tqdm.write(' ----------------------------------------')
### ============ Saving QoIs ============
metrics_each_deg = np.array(metrics_each_deg)
with open(os.path.join(simparams.data_dir_result, 'outlist_name.txt'), "w") as text_file:
text_file.write('\n'.join(['deg', 'n_train', 'cv_error', 'test mse', 'y50_pce_u', 'y50_pce_x', 'y50_pce_y']))
filename = '{:s}_{:s}_Adap{:s}_Alpha{}_ST{}'.format(solver.nickname, pce_model.tag,
simparams.tag, str(alphas).replace('.', 'pt'), theta)
try:
np.save(os.path.join(simparams.data_dir_result, filename), metrics_each_deg)
except:
print(' Directory not found: {}, file save locally... '.format(simparams.data_dir_result))
np.save(os.path.join(os.getcwd(), filename), metrics_each_deg)
### ============ Saving Predict data ============
pred_uxy_each_deg = np.array(pred_uxy_each_deg, dtype=object)
filename = '{:s}_{:s}_Adap{:s}_Alpha{}_ST{}_pred'.format(solver.nickname, pce_model.tag,
simparams.tag, str(alphas).replace('.', 'pt'), theta)
try:
np.save(os.path.join(simparams.data_dir_result, filename), pred_uxy_each_deg)
except:
print(' Directory not found: {}, file save locally... '.format(simparams.data_dir_result))
np.save(os.path.join(os.getcwd(), filename), pred_uxy_each_deg)
if __name__ == '__main__':
for s in range(10):
main(s)
| [
"numpy.load",
"numpy.random.seed",
"numpy.amin",
"uqra.metrics.mean_squared_error",
"numpy.isnan",
"numpy.around",
"numpy.linalg.svd",
"numpy.mean",
"uqra.Legendre",
"os.path.join",
"numpy.set_printoptions",
"scipy.stats.norm",
"numpy.std",
"scipy.stats.norm.cdf",
"numpy.printoptions",
... | [((330, 418), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'module': '"""scipy"""', 'message': '"""^internal gelsd"""'}), "(action='ignore', module='scipy', message=\n '^internal gelsd')\n", (353, 418), False, 'import uqra, warnings, random, math\n'), ((428, 459), 'uqra.utilities.classes.Logger', 'uqra.utilities.classes.Logger', ([], {}), '()\n', (457, 459), False, 'import uqra, warnings, random, math\n'), ((1764, 1783), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (1778, 1783), True, 'import numpy as np, os, sys\n'), ((1788, 1820), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (1807, 1820), True, 'import numpy as np, os, sys\n'), ((1825, 1857), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(8)'}), '(threshold=8)\n', (1844, 1857), True, 'import numpy as np, os, sys\n'), ((1862, 1896), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (1881, 1896), True, 'import numpy as np, os, sys\n'), ((1977, 2006), 'uqra.environment.Kvitebjorn', 'uqra.environment.Kvitebjorn', ([], {}), '()\n', (2004, 2006), False, 'import uqra, warnings, random, math\n'), ((2243, 2267), 'uqra.FPSO', 'uqra.FPSO', ([], {'phase': '[theta]'}), '(phase=[theta])\n', (2252, 2267), False, 'import uqra, warnings, random, math\n'), ((2285, 2302), 'uqra.Parameters', 'uqra.Parameters', ([], {}), '()\n', (2300, 2302), False, 'import uqra, warnings, random, math\n'), ((13552, 13578), 'numpy.array', 'np.array', (['metrics_each_deg'], {}), '(metrics_each_deg)\n', (13560, 13578), True, 'import numpy as np, os, sys\n'), ((14302, 14343), 'numpy.array', 'np.array', (['pred_uxy_each_deg'], {'dtype': 'object'}), '(pred_uxy_each_deg, dtype=object)\n', (14310, 14343), True, 'import numpy as np, os, sys\n'), ((3211, 3272), 'os.path.join', 'os.path.join', (['simparams.data_dir_result', '"""TestData"""', 'filename'], {}), "(simparams.data_dir_result, 'TestData', filename)\n", (3223, 3272), False, 'import numpy as np, os, sys\n'), ((4040, 4104), 'os.path.join', 'os.path.join', (['simparams.data_dir_sample', '"""MCS"""', '"""Norm"""', 'filename'], {}), "(simparams.data_dir_sample, 'MCS', 'Norm', filename)\n", (4052, 4104), False, 'import numpy as np, os, sys\n'), ((4191, 4213), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['u_pred'], {}), '(u_pred)\n', (4205, 4213), True, 'import scipy.stats as stats\n'), ((6214, 6229), 'uqra.PCE', 'uqra.PCE', (['basis'], {}), '(basis)\n', (6222, 6229), False, 'import uqra, warnings, random, math\n'), ((6250, 6293), 'uqra.Modeling', 'uqra.Modeling', (['solver', 'pce_model', 'simparams'], {}), '(solver, pce_model, simparams)\n', (6263, 6293), False, 'import uqra, warnings, random, math\n'), ((10483, 10505), 'numpy.linalg.svd', 'np.linalg.svd', (['U_train'], {}), '(U_train)\n', (10496, 10505), True, 'import numpy as np, os, sys\n'), ((11548, 11614), 'uqra.metrics.mean_squared_error', 'uqra.metrics.mean_squared_error', (['y_test', 'y_test_hat'], {'squared': '(False)'}), '(y_test, y_test_hat, squared=False)\n', (11579, 11614), False, 'import uqra, warnings, random, math\n'), ((12326, 12365), 'uqra.metrics.mquantiles', 'uqra.metrics.mquantiles', (['y_pred', '(1 - pf)'], {}), '(y_pred, 1 - pf)\n', (12349, 12365), False, 'import uqra, warnings, random, math\n'), ((12451, 12526), 'numpy.concatenate', 'np.concatenate', (['(u_pred[:, y50_pce_idx], x_pred[:, y50_pce_idx], y50_pce_y)'], {}), '((u_pred[:, y50_pce_idx], x_pred[:, y50_pce_idx], y50_pce_y))\n', (12465, 12526), True, 'import numpy as np, os, sys\n'), ((12844, 12868), 'tqdm.tqdm.write', 'tqdm.write', (['""" > Summary"""'], {}), "(' > Summary')\n", (12854, 12868), False, 'from tqdm import tqdm\n'), ((666, 703), 'uqra.Legendre', 'uqra.Legendre', ([], {'d': 'solver.ndim', 'deg': 'deg'}), '(d=solver.ndim, deg=deg)\n', (679, 703), False, 'import uqra, warnings, random, math\n'), ((2886, 2902), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2894, 2902), True, 'import numpy as np, os, sys\n'), ((2931, 2947), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2939, 2947), True, 'import numpy as np, os, sys\n'), ((3599, 3622), 'numpy.mean', 'np.mean', (['u_test'], {'axis': '(1)'}), '(u_test, axis=1)\n', (3606, 3622), True, 'import numpy as np, os, sys\n'), ((3623, 3645), 'numpy.std', 'np.std', (['u_test'], {'axis': '(1)'}), '(u_test, axis=1)\n', (3629, 3645), True, 'import numpy as np, os, sys\n'), ((4663, 4680), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (4670, 4680), True, 'import numpy as np, os, sys\n'), ((7069, 7092), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['u_train'], {}), '(u_train)\n', (7083, 7092), True, 'import scipy.stats as stats\n'), ((9096, 9129), 'numpy.hstack', 'np.hstack', (['(u_train, u_train_new)'], {}), '((u_train, u_train_new))\n', (9105, 9129), True, 'import numpy as np, os, sys\n'), ((9153, 9186), 'numpy.hstack', 'np.hstack', (['(x_train, x_train_new)'], {}), '((x_train, x_train_new))\n', (9162, 9186), True, 'import numpy as np, os, sys\n'), ((9210, 9243), 'numpy.hstack', 'np.hstack', (['(y_train, y_train_new)'], {}), '((y_train, y_train_new))\n', (9219, 9243), True, 'import numpy as np, os, sys\n'), ((12882, 12910), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (12897, 12910), True, 'import numpy as np, os, sys\n'), ((13422, 13481), 'tqdm.tqdm.write', 'tqdm.write', (['""" ----------------------------------------"""'], {}), "(' ----------------------------------------')\n", (13432, 13481), False, 'from tqdm import tqdm\n'), ((13593, 13652), 'os.path.join', 'os.path.join', (['simparams.data_dir_result', '"""outlist_name.txt"""'], {}), "(simparams.data_dir_result, 'outlist_name.txt')\n", (13605, 13652), False, 'import numpy as np, os, sys\n'), ((13972, 14021), 'os.path.join', 'os.path.join', (['simparams.data_dir_result', 'filename'], {}), '(simparams.data_dir_result, filename)\n', (13984, 14021), False, 'import numpy as np, os, sys\n'), ((14529, 14578), 'os.path.join', 'os.path.join', (['simparams.data_dir_result', 'filename'], {}), '(simparams.data_dir_result, filename)\n', (14541, 14578), False, 'import numpy as np, os, sys\n'), ((839, 900), 'uqra.Hermite', 'uqra.Hermite', ([], {'d': 'solver.ndim', 'deg': 'deg', 'hem_type': '"""probabilists"""'}), "(d=solver.ndim, deg=deg, hem_type='probabilists')\n", (851, 900), False, 'import uqra, warnings, random, math\n'), ((1113, 1150), 'uqra.Legendre', 'uqra.Legendre', ([], {'d': 'solver.ndim', 'deg': 'deg'}), '(d=solver.ndim, deg=deg)\n', (1126, 1150), False, 'import uqra, warnings, random, math\n'), ((4938, 4955), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (4945, 4955), True, 'import numpy as np, os, sys\n'), ((7317, 7341), 'numpy.mean', 'np.mean', (['u_train'], {'axis': '(1)'}), '(u_train, axis=1)\n', (7324, 7341), True, 'import numpy as np, os, sys\n'), ((7342, 7365), 'numpy.std', 'np.std', (['u_train'], {'axis': '(1)'}), '(u_train, axis=1)\n', (7348, 7365), True, 'import numpy as np, os, sys\n'), ((8995, 9022), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['u_train_new'], {}), '(u_train_new)\n', (9009, 9022), True, 'import scipy.stats as stats\n'), ((11154, 11178), 'numpy.mean', 'np.mean', (['u_train'], {'axis': '(1)'}), '(u_train, axis=1)\n', (11161, 11178), True, 'import numpy as np, os, sys\n'), ((11179, 11202), 'numpy.std', 'np.std', (['u_train'], {'axis': '(1)'}), '(u_train, axis=1)\n', (11185, 11202), True, 'import numpy as np, os, sys\n'), ((1285, 1344), 'uqra.Hermite', 'uqra.Hermite', ([], {'d': 'solver.ndim', 'deg': 'deg', 'hem_type': '"""physicists"""'}), "(d=solver.ndim, deg=deg, hem_type='physicists')\n", (1297, 1344), False, 'import uqra, warnings, random, math\n'), ((3814, 3829), 'numpy.amin', 'np.amin', (['y_test'], {}), '(y_test)\n', (3821, 3829), True, 'import numpy as np, os, sys\n'), ((3830, 3845), 'numpy.amax', 'np.amax', (['y_test'], {}), '(y_test)\n', (3837, 3845), True, 'import numpy as np, os, sys\n'), ((5172, 5189), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (5179, 5189), True, 'import numpy as np, os, sys\n'), ((9260, 9277), 'numpy.isnan', 'np.isnan', (['y_train'], {}), '(y_train)\n', (9268, 9277), True, 'import numpy as np, os, sys\n'), ((9438, 9455), 'numpy.isnan', 'np.isnan', (['u_train'], {}), '(u_train)\n', (9446, 9455), True, 'import numpy as np, os, sys\n'), ((9615, 9632), 'numpy.isinf', 'np.isinf', (['y_train'], {}), '(y_train)\n', (9623, 9632), True, 'import numpy as np, os, sys\n'), ((14181, 14192), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14190, 14192), False, 'import numpy as np, os, sys\n'), ((14739, 14750), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14748, 14750), False, 'import numpy as np, os, sys\n'), ((6816, 6828), 'scipy.stats.norm', 'stats.norm', ([], {}), '()\n', (6826, 6828), True, 'import scipy.stats as stats\n'), ((7541, 7557), 'numpy.amin', 'np.amin', (['y_train'], {}), '(y_train)\n', (7548, 7557), True, 'import numpy as np, os, sys\n'), ((7558, 7574), 'numpy.amax', 'np.amax', (['y_train'], {}), '(y_train)\n', (7565, 7574), True, 'import numpy as np, os, sys\n'), ((9368, 9385), 'numpy.isnan', 'np.isnan', (['y_train'], {}), '(y_train)\n', (9376, 9385), True, 'import numpy as np, os, sys\n'), ((9546, 9563), 'numpy.isnan', 'np.isnan', (['u_train'], {}), '(u_train)\n', (9554, 9563), True, 'import numpy as np, os, sys\n'), ((9723, 9740), 'numpy.isinf', 'np.isinf', (['y_train'], {}), '(y_train)\n', (9731, 9740), True, 'import numpy as np, os, sys\n'), ((9877, 9929), 'numpy.around', 'np.around', (['(u_train.shape[1] / pce_model.num_basis)', '(2)'], {}), '(u_train.shape[1] / pce_model.num_basis, 2)\n', (9886, 9929), True, 'import numpy as np, os, sys\n'), ((11382, 11398), 'numpy.amin', 'np.amin', (['y_train'], {}), '(y_train)\n', (11389, 11398), True, 'import numpy as np, os, sys\n'), ((11399, 11415), 'numpy.amax', 'np.amax', (['y_train'], {}), '(y_train)\n', (11406, 11415), True, 'import numpy as np, os, sys\n'), ((12979, 13005), 'numpy.array', 'np.array', (['metrics_each_deg'], {}), '(metrics_each_deg)\n', (12987, 13005), True, 'import numpy as np, os, sys\n'), ((13083, 13109), 'numpy.array', 'np.array', (['metrics_each_deg'], {}), '(metrics_each_deg)\n', (13091, 13109), True, 'import numpy as np, os, sys\n'), ((13187, 13213), 'numpy.array', 'np.array', (['metrics_each_deg'], {}), '(metrics_each_deg)\n', (13195, 13213), True, 'import numpy as np, os, sys\n'), ((13293, 13319), 'numpy.array', 'np.array', (['metrics_each_deg'], {}), '(metrics_each_deg)\n', (13301, 13319), True, 'import numpy as np, os, sys\n'), ((9318, 9335), 'numpy.isnan', 'np.isnan', (['y_train'], {}), '(y_train)\n', (9326, 9335), True, 'import numpy as np, os, sys\n'), ((9496, 9513), 'numpy.isnan', 'np.isnan', (['u_train'], {}), '(u_train)\n', (9504, 9513), True, 'import numpy as np, os, sys\n'), ((9673, 9690), 'numpy.isinf', 'np.isinf', (['y_train'], {}), '(y_train)\n', (9681, 9690), True, 'import numpy as np, os, sys\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 7 13:33:59 2016
@author: cs401
"""
import os
import numpy
import pandas
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
import copy
from .._toolboxPath import toolboxPath
from ..objects import Dataset
from pyChemometrics.ChemometricsPCA import ChemometricsPCA
from ..multivariate.multivariateUtilities import pcaSignificance, metadataTypeGrouping
from ..plotting._multivariatePlotting import plotMetadataDistribution, plotScree, plotScores, plotLoadings, plotOutliers
from ..utilities._internal import _copyBackingFiles as copyBackingFiles
from ..enumerations import AssayRole, SampleType
import re
import numbers
import shutil
from IPython.display import display
from warnings import warn
from ..__init__ import __version__ as version
def multivariateReport(dataTrue, pcaModel, reportType='analytical', withExclusions=False, biologicalMeasurements=None, dModX_criticalVal=None, dModX_criticalVal_type=None, scores_criticalVal=None, kw_threshold=0.05, r_threshold=0.3, hotellings_alpha=0.05, excludeFields=None, destinationPath=None):
"""
PCA based analysis of a dataset. A PCA model is generated for the data object, then potential associations between the scores and any sample metadata determined by correlation (continuous data) or a Kruskal-Wallis test (categorical data).
The multivariateReport has three options for the **reportType** argument:
* **'analytical'** Reports on analytical qualities of the data only (as defined in the relevant SOP).
* **'biological'** Reports on biological qualities of the data only (all columns in *sampleMetadata* except those defined as analytical or skipped in the SOP).
* **'all'** Reports on all qualities of the data (all columns in *sampleMetadata* except those defined as skipped in the SOP).
:param Dataset dataTrue: Dataset to report on
:param ChemometricsPCA pcaModel: PCA model object (scikit-learn based)
:param str reportType: Type of sample metadata to report on, one of ``analytical``, ``biological`` or ``all``
:param bool withExclusions: If ``True``, only report on features and samples not masked by the sample and feature masks
:param dict biologicalMeasurements: Dictionary of type of data contained in each biological sampleMetadata field. Keys are sampleMetadata column names, and values one of 'categorical', 'continuous', 'date'
:param dModX_criticalVal: Samples with a value in DModX space exceeding this critical value are listed as potential outliers
:type dModX_criticalVal: None or float
:param dModX_criticalVal_type: Type of critical value in DModX, one of ``Fcrit`` or ``Percentile``
:type dModX_criticalVal_type: None or str
:param scores_criticalVal: Samples with a value in scores space exceeding this critical value are listed as potential outliers
:type scores_criticalVal: None or float
:param kw_threshold: Fields with a Kruskal-Willis p-value greater than this are not deemed to have a significant association with the PCA score
:type kw_threshold: None or float
:param r_threshold: Fields with a (absolute) correlation coefficient value less than this are not deemed to have a significant association with the PCA score
:type r_threshold: None or float
:param float hotellings_alpha: Alpha value for plotting the Hotelling's ellipse in scores plots (default = 0.05)
:param excludeFields: If not None, list of sample metadata fields to be additionally excluded from analysis
:type excludeFields: None or list
:param destinationPath: If ``None`` plot interactively, otherwise save report to the path specified
:type destinationPath: None or str
"""
# Check inputs
if not isinstance(dataTrue, Dataset):
raise TypeError('dataTrue must be an instance of nPYc.Dataset')
if not isinstance(pcaModel, ChemometricsPCA):
raise TypeError('PCA model must be an instance of pyChemometrics.ChemometricsPCA')
if not isinstance(reportType, str) & (reportType in {'all', 'analytical', 'biological'}):
raise ValueError('reportType must be == ' + str({'all', 'analytical', 'biological'}))
if not isinstance(withExclusions, bool):
raise TypeError('withExclusions must be a bool')
if biologicalMeasurements is not None:
if not isinstance(biologicalMeasurements, dict):
raise TypeError('biologicalMeasurements must be a dictionary')
temp = list(biologicalMeasurements.values())
if any(val not in {'categorical','continuous','date'} for val in temp):
raise ValueError('biologicalMeasurements values must be == ' + str({'categorical', 'continuous', 'date'}))
if dModX_criticalVal is not None:
if not isinstance(dModX_criticalVal, numbers.Number) & ((dModX_criticalVal < 1) & (dModX_criticalVal > 0)):
raise ValueError('dModX_criticalVal must be a number in the range 0 to 1')
if dModX_criticalVal_type is None:
raise ValueError('If dModX_criticalVal is specfied, specify dModX_criticalVal_type (must be == ' + str({'Fcrit', 'Percentile'}) + ')')
if dModX_criticalVal_type is not None:
if not isinstance(dModX_criticalVal_type, str) & (dModX_criticalVal_type in {'Fcrit', 'Percentile'}):
raise ValueError('dModX_criticalVal_type must be == ' + str({'Fcrit', 'Percentile'}))
if scores_criticalVal is not None:
if not isinstance(scores_criticalVal, numbers.Number) & ((scores_criticalVal < 1) & (scores_criticalVal > 0)):
raise ValueError('scores_criticalVal must be a number in the range 0 to 1')
if kw_threshold is not None:
if not isinstance(kw_threshold, numbers.Number) or kw_threshold < 0:
raise ValueError('kw_threshold must be a positive number')
if r_threshold is not None:
if not isinstance(r_threshold, numbers.Number) or r_threshold < 0:
raise ValueError('r_threshold must be a positive number')
if not isinstance(hotellings_alpha, numbers.Number) & ((hotellings_alpha < 1) & (hotellings_alpha > 0)):
raise ValueError('hotellings_alpha must be a number in the range 0 to 1')
if excludeFields is not None:
if not isinstance(excludeFields, list):
raise TypeError('excludeFields must be a list of column headers from data.sampleMetadata')
if destinationPath is not None:
if not isinstance(destinationPath, str):
raise TypeError('destinationPath must be a string')
# Create directory to save destinationPath
if destinationPath:
saveDir = os.path.join(destinationPath, 'graphics', 'report_multivariate' + reportType.capitalize())
# If directory exists delete directory and contents
if os.path.exists(saveDir):
shutil.rmtree(saveDir)
# Create directory to save destinationPath
os.makedirs(saveDir)
else:
saveAs = None
# Filter dataset if required
data = copy.deepcopy(dataTrue)
if withExclusions:
data.applyMasks()
if hasattr(pcaModel, '_npyc_dataset_shape'):
if pcaModel._npyc_dataset_shape['NumberSamples'] != data.intensityData.shape[0] \
or pcaModel._npyc_dataset_shape['NumberFeatures'] != data.intensityData.shape[1]:
raise ValueError('Data dimension mismatch: Number of samples and features in the nPYc Dataset do not match'
'the numbers present when PCA was fitted. Verify if withExclusions argument is matching.')
else:
raise ValueError('Fit a PCA model beforehand using exploratoryAnalysisPCA.')
# Set up template item and save required info
figuresQCscores = OrderedDict()
figuresLoadings = OrderedDict()
figuresCORscores = OrderedDict()
figuresKWscores = OrderedDict()
figuresOTHERscores = OrderedDict()
item = dict()
item['ReportType'] = reportType.title()
item['Name'] = data.name
ns, nv = data.intensityData.shape
item['Nfeatures'] = str(nv)
item['Nsamples'] = str(ns)
SPmask = (data.sampleMetadata['SampleType'] == SampleType.StudyPool) & (data.sampleMetadata['AssayRole'] == AssayRole.PrecisionReference)
item['SPcount'] = str(sum(SPmask))
SSmask = (data.sampleMetadata['SampleType'] == SampleType.StudySample) & (data.sampleMetadata['AssayRole'] == AssayRole.Assay)
item['SScount'] = str(sum(SSmask))
ERmask = (data.sampleMetadata['SampleType'] == SampleType.ExternalReference) & (data.sampleMetadata['AssayRole'] == AssayRole.PrecisionReference)
item['ERcount'] = str(sum(ERmask))
item['OTHERcount'] = str(ns - sum(SSmask) - sum(SPmask) - sum(ERmask))
data.sampleMetadata.loc[~SSmask & ~SPmask & ~ERmask, 'Plot Sample Type'] = 'Sample'
data.sampleMetadata.loc[SSmask, 'Plot Sample Type'] = 'Study Sample'
data.sampleMetadata.loc[SPmask, 'Plot Sample Type'] = 'Study Reference'
data.sampleMetadata.loc[ERmask, 'Plot Sample Type'] = 'Long-Term Reference'
item['Normalisation'] = str(dataTrue.Normalisation)
special_scaling = dict([(0, 'mc'), (1, 'uv'), (0.5, 'par')])
if pcaModel.scaler.scale_power in special_scaling:
item['Scaling'] = special_scaling[pcaModel.scaler.scale_power]
else:
item['Scaling'] = str(pcaModel.scaler.scale_power)
# Fields to plot
includeForPlotting = {}
if reportType in {'analytical', 'all'}:
includeForPlotting.update(data.Attributes['analyticalMeasurements'])
if reportType in {'biological', 'all'}:
if biologicalMeasurements is not None:
includeForPlotting.update(biologicalMeasurements)
else:
temp = [val for val in data.sampleMetadata.columns if val not in data.Attributes['analyticalMeasurements']]
# Create dictionary with key and type (categorical/continuous etc) for each biological parameter field
for plotdata in temp:
out = metadataTypeGrouping(data.sampleMetadata[plotdata], sampleGroups=data.sampleMetadata['Plot Sample Type'])
includeForPlotting[plotdata] = out
# Fields not to plot
excludeFromPlotting = data.Attributes['excludeFromPlotting']
if excludeFields != None:
excludeFromPlotting.append(excludeFields)
# Remove fields either marked not to plot or not present in sampleMetadata
includeForPlotting = {i:includeForPlotting[i] for i in includeForPlotting if ((i in data.sampleMetadata.columns) and (i not in excludeFromPlotting))}
# Generate DataFrame of only data for plotting
dataForPlotting = copy.deepcopy(data.sampleMetadata[list(includeForPlotting.keys())])
# Check for data integrity
for plotdata in includeForPlotting.keys():
# Check all values in column have the same type
myset = set(list(type(data.sampleMetadata[plotdata][i]) for i in range(ns)))
if len(myset) == 1:
pass
# elif all((my == pandas._libs.tslib.NaTType or my == pandas._libs.tslib.Timestamp) for my in myset):
# pass
elif str in myset:
data.sampleMetadata[plotdata] = data.sampleMetadata[plotdata].astype(str)
warning_string = "Ensure datatype of all entries in \"{0}\" are consistent. Column \"{0}\" has been typecasted to str".format(plotdata)
warn(warning_string)
else:
warning_string = "Ensure datatype of all entries in \"{0}\" are consistent. Skipping Column \"{0}\"".format(plotdata)
warn(warning_string)
continue
# Change type if uniform, uniformBySampleType or unique (and categorical) - do not plot these
out = metadataTypeGrouping(data.sampleMetadata[plotdata], sampleGroups=data.sampleMetadata['Plot Sample Type'])
if out in {'uniform', 'uniformBySampleType', 'unique'}:
includeForPlotting[plotdata] = out
# Remove unwanted characters from column titles
dataForPlotting.rename(columns={plotdata: plotdata.translate({ord(c): " " for c in "!@#$%^&*()[]{};:,./<>?\|`~-=+_"}).strip()}, inplace=True)
# Correct duplicate column names
cols = pandas.Series(dataForPlotting.columns)
for dup in dataForPlotting.columns[dataForPlotting.columns.duplicated()].unique():
cols.loc[dataForPlotting.columns.get_loc(dup)] = [dup + '.' + str(d_idx) if d_idx != 0 else dup for d_idx in range(dataForPlotting.columns.get_loc(dup).sum())]
dataForPlotting.columns = cols
nc = pcaModel.ncomps
item['Ncomponents'] = str(nc)
if dModX_criticalVal is not None:
if dModX_criticalVal_type == 'Fcrit':
item['dModX_criticalVal'] = dModX_criticalVal_type + ' (' + str(dModX_criticalVal) + ')'
else:
item['dModX_criticalVal'] = 'Q' + str(100-dModX_criticalVal*100)
else:
item['dModX_criticalVal'] = 'None'
if scores_criticalVal is not None:
item['scores_criticalVal'] = 'Q' + str(100-scores_criticalVal*100)
else:
item['scores_criticalVal'] = 'None'
# Add check for if 2nd component added for plotting purposes only
if nc==2:
if ( (pcaModel.cvParameters['Q2X_Scree'][1] - pcaModel.cvParameters['Q2X_Scree'][0])/pcaModel.cvParameters['Q2X_Scree'][0] < pcaModel.cvParameters['stopping_condition'] ):
item['Ncomponents_optimal'] = '1'
# Datast summary
if destinationPath is None:
print('\033[1m' + 'Dataset' + '\033[0m')
print('\nOriginal data consists of ' + item['Nsamples'] + ' samples and ' + item['Nfeatures'] + ' features')
print('\t' + item['SScount'] + ' Study Samples')
print('\t' + item['SPcount'] + ' Study Reference Samples')
print('\t' + item['ERcount'] + ' Long-Term Reference Samples')
print('\t' + item['OTHERcount'] + ' Other Samples')
print('\033[1m' + '\nPCA Analysis' + '\033[0m')
print('\nPCA Model Parameters')
print('\tNormalisation method: ' + item['Normalisation'])
print('\tScaling: ' + item['Scaling'])
print('\tNumber of components: ' + item['Ncomponents'])
if 'Ncomponents_optimal' in item:
print('\t' + '\033[1m' + 'IMPORTANT NOTE: Optimal number of components: 1 (second component added for plotting purposes)' + '\033[0m')
print('\tCritical value for flagging outliers in DmodX space: ' + item['dModX_criticalVal'])
print('\tCritical value for flagging outliers in scores space: ' + item['scores_criticalVal'])
print('\033[1m' + '\nPCA QC Outputs' + '\033[0m')
# Scree plot
if destinationPath:
item['PCA_screePlot'] = os.path.join(saveDir, item['Name'] + '_PCAscreePlot.' + data.Attributes['figureFormat'])
saveAs = item['PCA_screePlot']
item['PCA_var_exp'] = pcaModel.modelParameters['VarExpRatio']
else:
print('\nFigure 1: PCA scree plot of variance explained by each component (cumulative)')
plotScree(pcaModel.cvParameters['R2X_Scree'],
Q2=pcaModel.cvParameters['Q2X_Scree'],
xlabel='Component',
ylabel='Percentage variance (cumulative)',
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
# Scores plot (coloured by sample type)
temp = dict()
if destinationPath:
temp['PCA_scoresPlot'] = os.path.join(saveDir, item['Name'] + '_PCAscoresPlot_')
saveAs = temp['PCA_scoresPlot']
else:
print('\n\nFigure 2: PCA scores plots coloured by sample type.')
figuresQCscores = plotScores(pcaModel,
classes=data.sampleMetadata['Plot Sample Type'],
classType='Plot Sample Type',
title='Sample Type',
figures=figuresQCscores,
alpha=hotellings_alpha,
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
for key in figuresQCscores:
if os.path.join(destinationPath, 'graphics') in str(figuresQCscores[key]):
figuresQCscores[key] = re.sub('.*graphics', 'graphics', figuresQCscores[key])
item['QCscores'] = figuresQCscores
# Calculate sum of scores across all PCs for each sample
sumT = numpy.sum(numpy.absolute(pcaModel.scores), axis=1)
# Scatter plot of summed scores distance from origin (strong outliers in PCA)
if destinationPath:
item['PCA_strongOutliersPlot'] = os.path.join(saveDir, item['Name'] + '_strongOutliersPlot.' + data.Attributes['figureFormat'])
saveAs = item['PCA_strongOutliersPlot']
else:
print('\n\nFigure 3: Distribution in total distance from origin (scores space) by sample type.')
if not 'Run Order' in data.sampleMetadata.columns:
data.sampleMetadata['Run Order'] = data.sampleMetadata.index.values
# Flag potential strong outliers (exceed outliers_criticalVal)
if scores_criticalVal is not None:
PcritPercentile = 100 - scores_criticalVal*100
quantilesVals = numpy.percentile(sumT, [100 - scores_criticalVal*100])
which_scores_outlier = (sumT >= quantilesVals)
item['Noutliers_strong'] = str(sum(which_scores_outlier))
else:
PcritPercentile = None
which_scores_outlier = numpy.zeros(sumT.shape, dtype=bool)
plotOutliers(sumT,
data.sampleMetadata['Run Order'],
sampleType=data.sampleMetadata['Plot Sample Type'],
addViolin=True,
ylabel='Summed distance from origin (all PCs)',
PcritPercentile=PcritPercentile,
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
if (scores_criticalVal is not None) & (destinationPath is None):
print('\nExcluding samples with total distance from origin exceeding the ' + item['scores_criticalVal'] + ' limit would result in ' + item['Noutliers_strong'] + ' exclusions.')
# Scatter plot of DmodX (moderate outliers in PCA)
if destinationPath:
item['PCA_modOutliersPlot'] = os.path.join(saveDir, item['Name'] + '_modOutliersPlot.' + data.Attributes['figureFormat'])
saveAs = item['PCA_modOutliersPlot']
else:
print('\n\nFigure 4: Distribution in distance from model (DmodX) by sample type.')
sample_dmodx_values = pcaModel.dmodx(data.intensityData)
# Define defaults for plotting if no critical values specified by user
PcritPercentile = None
Fcrit = pcaModel._dmodx_fcrit(data.intensityData, alpha = 0.05)
FcritAlpha = 0.05
which_dmodx_outlier = numpy.zeros(sample_dmodx_values.shape, dtype=bool)
# Flag potential moderate outliers (exceed critical value)
if dModX_criticalVal is not None:
if dModX_criticalVal_type == 'Fcrit':
dModX_threshold = pcaModel._dmodx_fcrit(data.intensityData, alpha = dModX_criticalVal)
Fcrit = dModX_threshold
FcritAlpha = dModX_criticalVal
else:
dModX_threshold = numpy.percentile(sample_dmodx_values, [100 - dModX_criticalVal*100])
PcritPercentile = 100 - dModX_criticalVal*100
which_dmodx_outlier = (sample_dmodx_values >= dModX_threshold)
item['Noutliers_moderate'] = str(sum(which_dmodx_outlier))
plotOutliers(sample_dmodx_values,
data.sampleMetadata['Run Order'],
sampleType=data.sampleMetadata['Plot Sample Type'],
addViolin=True,
Fcrit=Fcrit,
FcritAlpha=FcritAlpha,
PcritPercentile=PcritPercentile,
ylabel='DmodX',
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
if (dModX_criticalVal is not None) & (destinationPath is None):
print('\nExcluding samples with DmodX exceeding the ' + item['dModX_criticalVal'] + ' limit would result in ' + item['Noutliers_moderate'] + ' exclusions.')
# Total number of outliers
if sum(which_scores_outlier | which_dmodx_outlier) > 0:
outliers = (which_scores_outlier | which_dmodx_outlier)
item['Noutliers_total'] = str(sum(outliers))
item['Outliers_total_details'] = data.sampleMetadata[['Sample File Name']][outliers]
item['Outliers_total_details']['DModX Outlier'] = which_dmodx_outlier[outliers]
item['Outliers_total_details']['Scores Outlier'] = which_scores_outlier[outliers]
if destinationPath is None:
print('\nExcluding outliers (as specified) would result in ' + item['Noutliers_total'] + ' exclusions.')
display(item['Outliers_total_details'])
print('\n')
# Loadings plot
if destinationPath:
temp['PCA_loadingsPlot'] = os.path.join(saveDir, item['Name'] + '_PCAloadingsPlot_')
saveAs = temp['PCA_loadingsPlot']
else:
print('\n\nFigure 5: PCA loadings plots.')
figuresLoadings = plotLoadings(pcaModel,
data,
title='',
figures=figuresLoadings,
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
for key in figuresLoadings:
if os.path.join(destinationPath, 'graphics') in str(figuresLoadings[key]):
figuresLoadings[key] = re.sub('.*graphics', 'graphics', figuresLoadings[key])
item['loadings'] = figuresLoadings
# Plot metadata and assess potential association with PCA scores
# Set up:
if destinationPath:
temp['metadataPlot'] = os.path.join(saveDir, item['Name'] + '_metadataPlot_')
saveAs = temp['metadataPlot']
else:
print('\033[1m' + '\nDistribution of Values in each Metadata Field\n'+ '\033[0m')
print('Figure 6: Distribution of values in each metadata field (plotted for fields with non-uniform values only).\n')
# Plot distribution for each field and calculate measure of association to PCA scores (categorical/continuous only)
valueType = list(includeForPlotting.values())
allTypes = set(valueType)
signif = numpy.full([nc,len(includeForPlotting)], numpy.nan)
countKW = 0
fieldsKW = []
countKWfail = 0
fieldsKWfail = []
for eachType in allTypes:
if eachType in {'continuous', 'categorical', 'date'}:
figuresMetadataDist = OrderedDict()
if destinationPath is None:
print(eachType.title() + ' data.')
# Find indices of instances of this type
indices = [i for i, x in enumerate(valueType) if x == eachType]
# Plot
figuresMetadataDist = plotMetadataDistribution(dataForPlotting.iloc[:, indices],
eachType,
figures=figuresMetadataDist,
savePath=saveAs,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
for key in figuresMetadataDist:
if os.path.join(destinationPath, 'graphics') in str(figuresMetadataDist[key]):
figuresMetadataDist[key] = re.sub('.*graphics', 'graphics', figuresMetadataDist[key])
if eachType == 'continuous':
item['metadataDistContinuous'] = figuresMetadataDist
elif eachType == 'categorical':
item['metadataDistCategorical'] = figuresMetadataDist
else:
item['metadataDistDate'] = figuresMetadataDist
# Calculate metric of association between metadata and PCA score
if eachType in {'continuous', 'categorical'}:
for field in dataForPlotting.columns[indices]:
out = pcaSignificance(pcaModel.scores, dataForPlotting[field], eachType)
if out is not None:
index = dataForPlotting.columns.get_loc(field)
signif[:,index] = out
if eachType == 'categorical':
countKW = countKW+1
fieldsKW.append(field)
# Count the number of classes where KW cannot be calculated
else:
if eachType == 'categorical':
countKWfail = countKWfail+1
fieldsKWfail.append(field)
fieldNames = dataForPlotting.columns
item['Nmetadata'] = str(len(includeForPlotting))
item['Ncorr'] = str(valueType.count('continuous'))
item['Nkw'] = str(countKW)
item['Ndate'] = str(valueType.count('date'))
item['Ninsuf'] = str(countKWfail)
item['Nuniform'] = str(valueType.count('uniform'))
item['NuniformByType'] = str(valueType.count('uniformBySampleType'))
item['Nunique'] = str(valueType.count('unique'))
item['Nex'] = str(valueType.count('excluded'))
item['r_threshold'] = str(r_threshold)
item['kw_threshold'] = str(kw_threshold)
if destinationPath is None:
# Summarise results
print('\033[1m' + '\n\nAssociation of PCA Scores with Metadata' + '\033[0m')
print('\nCalculations Performed')
print('\nTotal number of metadata fields: ' + item['Nmetadata'])
print('\tNumber of fields where correlation to PCA scores calculated: ' + item['Ncorr'])
print('\tNumber of fields where Kruskal-Wallis test between groups in PCA scores calculated: ' + item['Nkw'])
print('\tNumber of fields with date values: ' + item['Ndate'])
print('\tNumber of fields where insufficent sample numbers to estimate significance: ' + item['Ninsuf'])
print('\tNumber of fields with uniform class for all samples: ' + item['Nuniform'])
print('\tNumber of fields with uniform class for all samples with same sample type: ' + item['NuniformByType'])
print('\tNumber of fields with unique non-numeric values for all samples in class: ' + item['Nunique'])
print('\tNumber of fields excluded from calculations: ' + item['Nex'])
print('\n\tCorrelation threshold for plotting: ' + item['r_threshold'])
print('\tKruskal-Willis p-value threshold for plotting: ' + item['kw_threshold'])
# Heatmap of results - correlation
if destinationPath is None:
print('\n\nFigure 7: Heatmap of correlation to PCA scores for suitable metadata fields.')
if valueType.count('continuous') > 0:
sigCor = numpy.full([nc*valueType.count('continuous'), 3], numpy.nan)
index = [i for i, j in enumerate(valueType) if j == 'continuous']
i=0
for IX in index:
sigCor[i:i+nc,0] = IX
sigCor[i:i+nc,1] = numpy.arange(1,nc+1)
sigCor[i:i+nc,2] = signif[:, IX]
i=i+nc
sigCor = pandas.DataFrame(sigCor, columns=['Field', 'PC', 'Correlation'])
sigCor['Field'] = fieldNames[sigCor['Field'].values.astype('int')]
sigCor = sigCor.pivot('Field','PC','Correlation')
# plot heatmap
with sns.axes_style("white"):
plt.figure(figsize=data.Attributes['figureSize'], dpi=data.Attributes['dpi'])
sns.heatmap(sigCor, annot=True, fmt='.3g', vmin=-1, vmax=1, cmap='RdBu_r')
if destinationPath:
item['sigCorHeatmap'] = os.path.join(saveDir, item['Name'] + '_sigCorHeatmap.' + data.Attributes['figureFormat'])
plt.savefig(item['sigCorHeatmap'], bbox_inches='tight', format=data.Attributes['figureFormat'], dpi=data.Attributes['dpi'])
plt.close()
else:
plt.show()
else:
if destinationPath is None:
print('\n' + str(valueType.count('correlation')) + ' fields where correlation to PCA scores calculated.')
# Heatmap of results - Kruskal-Wallis
if destinationPath is None:
print('\n\nFigure 8: Heatmap of Kruskal-Wallis Test against PCA scores for suitable metadata fields.')
if countKW > 0:
sigKru = numpy.full([nc*countKW, 3], numpy.nan)
index = [dataForPlotting.columns.get_loc(field) for field in fieldsKW]
i=0
for IX in index:
sigKru[i:i+nc,0] = IX
sigKru[i:i+nc,1] = numpy.arange(1,nc+1)
sigKru[i:i+nc,2] = signif[:, IX]
i=i+nc
sigKru = pandas.DataFrame(sigKru, columns=['Field', 'PC', 'Kruskal-Wallis p-value'])
sigKru['Field'] = fieldNames[sigKru['Field'].values.astype('int')]
sigKru = sigKru.pivot('Field','PC','Kruskal-Wallis p-value')
# plot heatmap
with sns.axes_style("white"):
plt.figure(figsize=data.Attributes['figureSize'], dpi=data.Attributes['dpi'])
sns.heatmap(sigKru, annot=True, fmt='.3g', vmin=0, vmax=1, cmap='OrRd_r')
if destinationPath:
item['sigKruHeatmap'] = os.path.join(saveDir, item['Name'] + '_sigKruHeatmap.' + data.Attributes['figureFormat'])
plt.savefig(item['sigKruHeatmap'], bbox_inches='tight', format=data.Attributes['figureFormat'], dpi=data.Attributes['dpi'])
plt.close()
else:
plt.show()
else:
if destinationPath is None:
print('\n'+ str(valueType.count('KW')) + ' fields where Kruskal-Wallis test between groups in PCA scores calculated.')
# Scores plots coloured by each available metadata, above thresholds if required and sorted by significance
if destinationPath:
saveAs = saveDir
# Plots for continuous data fields (passing correlation threshold)
item['Ncorr_passing'] = '0'
if destinationPath is None:
print('\n\nFigure 9: PCA scores plots coloured by metadata (significance by correlation).')
if valueType.count('continuous') > 0:
if r_threshold == 'None':
r_threshold = numpy.min(abs(sigCor.values))
item['Ncorr_passing'] = str(sum((abs(sigCor.values) >= r_threshold).any(axis=1)==True))
if destinationPath is None:
print('\n' + item['Ncorr_passing'] + ' fields where correlation coefficient to PCA scores exceeded threshold of ' + str(r_threshold))
if (abs(sigCor.values) >= r_threshold).any():
fields = sigCor.index[(abs(sigCor.values) >= r_threshold).any(axis=1)]
figuresCORscores = _plotScoresLocal(dataForPlotting,
fields,
pcaModel,
'continuous',
data.name,
alpha=hotellings_alpha,
plotAssociation=sigCor,
r_threshold=r_threshold,
saveDir=saveAs,
figures=figuresCORscores,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
if destinationPath is not None:
for key in figuresCORscores:
if os.path.join(destinationPath, 'graphics') in str(figuresCORscores[key]):
figuresCORscores[key] = re.sub('.*graphics', 'graphics', figuresCORscores[key])
item['CORscores'] = figuresCORscores
else:
if destinationPath is None:
print('\n' + item['Ncorr_passing'] + ' fields where correlation coefficient to PCA scores exceeded threshold of ' + str(r_threshold))
# Plots for catagorical data fields (passing Kruskal-Wallis threshold)
item['Nkw_passing'] = '0'
if destinationPath is None:
print('\n\nFigure 10: PCA scores plots coloured by metadata (significance by Kruskal-Wallis).')
if countKW > 0:
if kw_threshold == 'None':
kw_threshold = numpy.max(abs(sigKru.values))
item['Nkw_passing'] = str(sum((sigKru.values <= kw_threshold).any(axis=1)==True))
if destinationPath is None:
print('\n' + item['Nkw_passing'] + ' fields where Kruskal-Wallis p-value against PCA scores exceeded threshold of ' + str(kw_threshold))
if (sigKru.values <= kw_threshold).any():
fields = sigKru.index[(sigKru.values <= kw_threshold).any(axis=1)]
figuresKWscores = _plotScoresLocal(dataForPlotting,
fields,
pcaModel,
'categorical',
data.name,
alpha=hotellings_alpha,
plotAssociation=sigKru,
kw_threshold=kw_threshold,
saveDir=saveAs,
figures=figuresKWscores,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
if destinationPath is not None:
for key in figuresKWscores:
if os.path.join(destinationPath, 'graphics') in str(figuresKWscores[key]):
figuresKWscores[key] = re.sub('.*graphics', 'graphics', figuresKWscores[key])
item['KWscores'] = figuresKWscores
else:
if destinationPath is None:
print('\n' + item['Nkw_passing'] + ' fields where Kruskal-Wallis p-value against PCA scores exceeded threshold of ' + str(kw_threshold))
# Plots for catagorical data fields (with insufficient numbers to test significance)
if destinationPath is None:
print('\n\nFigure 11: PCA scores plots coloured by metadata (insufficent sample numbers to estimate significance).')
print('\n' + item['Ninsuf'] + ' fields where insufficent sample numbers to estimate significance.')
if countKWfail > 0:
# Create a dataframe with null significance values
sigNone = numpy.full([nc*countKWfail, 3], numpy.nan)
index = [dataForPlotting.columns.get_loc(field) for field in fieldsKWfail]
i=0
for IX in index:
sigNone[i:i+nc,0] = IX
sigNone[i:i+nc,1] = numpy.arange(1,nc+1)
i=i+nc
sigNone = pandas.DataFrame(sigNone, columns=['Field', 'PC', 'Kruskal-Wallis p-value'])
sigNone['Field'] = fieldNames[sigNone['Field'].values.astype('int')]
sigNone = sigNone.pivot('Field','PC','Kruskal-Wallis p-value')
figuresOTHERscores = _plotScoresLocal(dataForPlotting,
fieldsKWfail,
pcaModel,
'categorical',
data.name,
alpha=hotellings_alpha,
plotAssociation=sigNone,
saveDir=saveAs,
figures=figuresOTHERscores,
figureFormat=data.Attributes['figureFormat'],
dpi=data.Attributes['dpi'],
figureSize=data.Attributes['figureSize'])
if destinationPath is not None:
for key in figuresOTHERscores:
if os.path.join(destinationPath, 'graphics') in str(figuresOTHERscores[key]):
figuresOTHERscores[key] = re.sub('.*graphics', 'graphics', figuresOTHERscores[key])
item['OTHERscores'] = figuresOTHERscores
# Generate html report
if destinationPath:
# Make paths for graphics local not absolute for use in the HTML.
for key in item:
if os.path.join(destinationPath, 'graphics') in str(item[key]):
item[key] = re.sub('.*graphics', 'graphics', item[key])
# Generate report
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(os.path.join(toolboxPath(), 'Templates')))
template = env.get_template('NPC_MultivariateReport.html')
filename = os.path.join(destinationPath, data.name + '_report_multivariate' + reportType.capitalize() + '.html')
f = open(filename,'w')
f.write(template.render(item=item, version=version, graphicsPath='/report_multivariate' + reportType.capitalize()))
f.close()
copyBackingFiles(toolboxPath(), os.path.join(destinationPath, 'graphics'))
return None
def _plotScoresLocal(data, metadata, pcaModel, classType, name, alpha=0.05, plotAssociation=None, r_threshold=None, kw_threshold=None, saveDir=None, figures=None, figureFormat='png', dpi=72, figureSize=(11, 7)):
"""
Local function to plot scores for each metadata field
"""
temp = dict()
nc = pcaModel.scores.shape[1]
if saveDir:
temp['PCA_scoresPlot'] = os.path.join(saveDir, name + '_PCAscoresPlot_')
saveAs = temp['PCA_scoresPlot']
else:
saveAs = None
fieldNames = data.columns
for plotdata in metadata:
if saveDir is None:
print('\n' + plotdata)
# Find components with significance exceeding threshold
if plotAssociation is None:
sigLocal = None
else:
sigLocal = plotAssociation.loc[plotdata].values
if r_threshold is not None:
components = abs(sigLocal) >= r_threshold
elif kw_threshold is not None:
components = sigLocal <= kw_threshold
else:
components = numpy.ones([nc]).astype(bool)
index = fieldNames.str.match(plotdata+'$')
if index.any():
plotScores(pcaModel,
classes=data.iloc[:,index].squeeze(),
classType=classType,
components=components,
alpha=alpha,
plotAssociation=sigLocal,
title=plotdata,
figures=figures,
savePath=saveAs,
figureFormat=figureFormat,
dpi=dpi,
figureSize=figureSize)
else:
print(plotdata + ' not present in sampleMetadata - check this!')
if figures:
return figures | [
"numpy.absolute",
"seaborn.heatmap",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"shutil.rmtree",
"os.path.join",
"pandas.DataFrame",
"numpy.full",
"seaborn.axes_style",
"matplotlib.pyplot.close",
"os.path.exists",
"IPython.display.display",
"re.sub",
"copy.deepcopy",
"ma... | [((6662, 6685), 'copy.deepcopy', 'copy.deepcopy', (['dataTrue'], {}), '(dataTrue)\n', (6675, 6685), False, 'import copy\n'), ((7307, 7320), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7318, 7320), False, 'from collections import OrderedDict\n'), ((7340, 7353), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7351, 7353), False, 'from collections import OrderedDict\n'), ((7374, 7387), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7385, 7387), False, 'from collections import OrderedDict\n'), ((7407, 7420), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7418, 7420), False, 'from collections import OrderedDict\n'), ((7443, 7456), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7454, 7456), False, 'from collections import OrderedDict\n'), ((17312, 17362), 'numpy.zeros', 'numpy.zeros', (['sample_dmodx_values.shape'], {'dtype': 'bool'}), '(sample_dmodx_values.shape, dtype=bool)\n', (17323, 17362), False, 'import numpy\n'), ((6479, 6502), 'os.path.exists', 'os.path.exists', (['saveDir'], {}), '(saveDir)\n', (6493, 6502), False, 'import os\n'), ((6578, 6598), 'os.makedirs', 'os.makedirs', (['saveDir'], {}), '(saveDir)\n', (6589, 6598), False, 'import os\n'), ((11368, 11406), 'pandas.Series', 'pandas.Series', (['dataForPlotting.columns'], {}), '(dataForPlotting.columns)\n', (11381, 11406), False, 'import pandas\n'), ((13647, 13740), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_PCAscreePlot.' + data.Attributes['figureFormat'])"], {}), "(saveDir, item['Name'] + '_PCAscreePlot.' + data.Attributes[\n 'figureFormat'])\n", (13659, 13740), False, 'import os\n'), ((14334, 14389), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_PCAscoresPlot_')"], {}), "(saveDir, item['Name'] + '_PCAscoresPlot_')\n", (14346, 14389), False, 'import os\n'), ((15142, 15173), 'numpy.absolute', 'numpy.absolute', (['pcaModel.scores'], {}), '(pcaModel.scores)\n', (15156, 15173), False, 'import numpy\n'), ((15319, 15418), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_strongOutliersPlot.' + data.Attributes['figureFormat'])"], {}), "(saveDir, item['Name'] + '_strongOutliersPlot.' + data.\n Attributes['figureFormat'])\n", (15331, 15418), False, 'import os\n'), ((15853, 15909), 'numpy.percentile', 'numpy.percentile', (['sumT', '[100 - scores_criticalVal * 100]'], {}), '(sumT, [100 - scores_criticalVal * 100])\n', (15869, 15909), False, 'import numpy\n'), ((16075, 16110), 'numpy.zeros', 'numpy.zeros', (['sumT.shape'], {'dtype': 'bool'}), '(sumT.shape, dtype=bool)\n', (16086, 16110), False, 'import numpy\n'), ((16823, 16919), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_modOutliersPlot.' + data.Attributes['figureFormat'])"], {}), "(saveDir, item['Name'] + '_modOutliersPlot.' + data.Attributes[\n 'figureFormat'])\n", (16835, 16919), False, 'import os\n'), ((19253, 19310), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_PCAloadingsPlot_')"], {}), "(saveDir, item['Name'] + '_PCAloadingsPlot_')\n", (19265, 19310), False, 'import os\n'), ((19981, 20035), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_metadataPlot_')"], {}), "(saveDir, item['Name'] + '_metadataPlot_')\n", (19993, 20035), False, 'import os\n'), ((24486, 24550), 'pandas.DataFrame', 'pandas.DataFrame', (['sigCor'], {'columns': "['Field', 'PC', 'Correlation']"}), "(sigCor, columns=['Field', 'PC', 'Correlation'])\n", (24502, 24550), False, 'import pandas\n'), ((25539, 25579), 'numpy.full', 'numpy.full', (['[nc * countKW, 3]', 'numpy.nan'], {}), '([nc * countKW, 3], numpy.nan)\n', (25549, 25579), False, 'import numpy\n'), ((25801, 25876), 'pandas.DataFrame', 'pandas.DataFrame', (['sigKru'], {'columns': "['Field', 'PC', 'Kruskal-Wallis p-value']"}), "(sigKru, columns=['Field', 'PC', 'Kruskal-Wallis p-value'])\n", (25817, 25876), False, 'import pandas\n'), ((30388, 30432), 'numpy.full', 'numpy.full', (['[nc * countKWfail, 3]', 'numpy.nan'], {}), '([nc * countKWfail, 3], numpy.nan)\n', (30398, 30432), False, 'import numpy\n'), ((30626, 30702), 'pandas.DataFrame', 'pandas.DataFrame', (['sigNone'], {'columns': "['Field', 'PC', 'Kruskal-Wallis p-value']"}), "(sigNone, columns=['Field', 'PC', 'Kruskal-Wallis p-value'])\n", (30642, 30702), False, 'import pandas\n'), ((32687, 32734), 'os.path.join', 'os.path.join', (['saveDir', "(name + '_PCAscoresPlot_')"], {}), "(saveDir, name + '_PCAscoresPlot_')\n", (32699, 32734), False, 'import os\n'), ((6507, 6529), 'shutil.rmtree', 'shutil.rmtree', (['saveDir'], {}), '(saveDir)\n', (6520, 6529), False, 'import shutil\n'), ((14874, 14915), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (14886, 14915), False, 'import os\n'), ((14972, 15026), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresQCscores[key]'], {}), "('.*graphics', 'graphics', figuresQCscores[key])\n", (14978, 15026), False, 'import re\n'), ((17683, 17753), 'numpy.percentile', 'numpy.percentile', (['sample_dmodx_values', '[100 - dModX_criticalVal * 100]'], {}), '(sample_dmodx_values, [100 - dModX_criticalVal * 100])\n', (17699, 17753), False, 'import numpy\n'), ((19130, 19169), 'IPython.display.display', 'display', (["item['Outliers_total_details']"], {}), "(item['Outliers_total_details'])\n", (19137, 19169), False, 'from IPython.display import display\n'), ((19665, 19706), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (19677, 19706), False, 'import os\n'), ((19763, 19817), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresLoadings[key]'], {}), "('.*graphics', 'graphics', figuresLoadings[key])\n", (19769, 19817), False, 'import re\n'), ((20708, 20721), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20719, 20721), False, 'from collections import OrderedDict\n'), ((24408, 24431), 'numpy.arange', 'numpy.arange', (['(1)', '(nc + 1)'], {}), '(1, nc + 1)\n', (24420, 24431), False, 'import numpy\n'), ((24698, 24721), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (24712, 24721), True, 'import seaborn as sns\n'), ((24726, 24803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': "data.Attributes['figureSize']", 'dpi': "data.Attributes['dpi']"}), "(figsize=data.Attributes['figureSize'], dpi=data.Attributes['dpi'])\n", (24736, 24803), True, 'import matplotlib.pyplot as plt\n'), ((24807, 24881), 'seaborn.heatmap', 'sns.heatmap', (['sigCor'], {'annot': '(True)', 'fmt': '""".3g"""', 'vmin': '(-1)', 'vmax': '(1)', 'cmap': '"""RdBu_r"""'}), "(sigCor, annot=True, fmt='.3g', vmin=-1, vmax=1, cmap='RdBu_r')\n", (24818, 24881), True, 'import seaborn as sns\n'), ((25723, 25746), 'numpy.arange', 'numpy.arange', (['(1)', '(nc + 1)'], {}), '(1, nc + 1)\n', (25735, 25746), False, 'import numpy\n'), ((26035, 26058), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (26049, 26058), True, 'import seaborn as sns\n'), ((26063, 26140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': "data.Attributes['figureSize']", 'dpi': "data.Attributes['dpi']"}), "(figsize=data.Attributes['figureSize'], dpi=data.Attributes['dpi'])\n", (26073, 26140), True, 'import matplotlib.pyplot as plt\n'), ((26144, 26217), 'seaborn.heatmap', 'sns.heatmap', (['sigKru'], {'annot': '(True)', 'fmt': '""".3g"""', 'vmin': '(0)', 'vmax': '(1)', 'cmap': '"""OrRd_r"""'}), "(sigKru, annot=True, fmt='.3g', vmin=0, vmax=1, cmap='OrRd_r')\n", (26155, 26217), True, 'import seaborn as sns\n'), ((30583, 30606), 'numpy.arange', 'numpy.arange', (['(1)', '(nc + 1)'], {}), '(1, nc + 1)\n', (30595, 30606), False, 'import numpy\n'), ((32263, 32304), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (32275, 32304), False, 'import os\n'), ((10635, 10655), 'warnings.warn', 'warn', (['warning_string'], {}), '(warning_string)\n', (10639, 10655), False, 'from warnings import warn\n'), ((10789, 10809), 'warnings.warn', 'warn', (['warning_string'], {}), '(warning_string)\n', (10793, 10809), False, 'from warnings import warn\n'), ((24933, 25027), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_sigCorHeatmap.' + data.Attributes['figureFormat'])"], {}), "(saveDir, item['Name'] + '_sigCorHeatmap.' + data.Attributes[\n 'figureFormat'])\n", (24945, 25027), False, 'import os\n'), ((25027, 25155), 'matplotlib.pyplot.savefig', 'plt.savefig', (["item['sigCorHeatmap']"], {'bbox_inches': '"""tight"""', 'format': "data.Attributes['figureFormat']", 'dpi': "data.Attributes['dpi']"}), "(item['sigCorHeatmap'], bbox_inches='tight', format=data.\n Attributes['figureFormat'], dpi=data.Attributes['dpi'])\n", (25038, 25155), True, 'import matplotlib.pyplot as plt\n'), ((25155, 25166), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25164, 25166), True, 'import matplotlib.pyplot as plt\n'), ((25180, 25190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25188, 25190), True, 'import matplotlib.pyplot as plt\n'), ((26269, 26363), 'os.path.join', 'os.path.join', (['saveDir', "(item['Name'] + '_sigKruHeatmap.' + data.Attributes['figureFormat'])"], {}), "(saveDir, item['Name'] + '_sigKruHeatmap.' + data.Attributes[\n 'figureFormat'])\n", (26281, 26363), False, 'import os\n'), ((26363, 26491), 'matplotlib.pyplot.savefig', 'plt.savefig', (["item['sigKruHeatmap']"], {'bbox_inches': '"""tight"""', 'format': "data.Attributes['figureFormat']", 'dpi': "data.Attributes['dpi']"}), "(item['sigKruHeatmap'], bbox_inches='tight', format=data.\n Attributes['figureFormat'], dpi=data.Attributes['dpi'])\n", (26374, 26491), True, 'import matplotlib.pyplot as plt\n'), ((26491, 26502), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26500, 26502), True, 'import matplotlib.pyplot as plt\n'), ((26516, 26526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26524, 26526), True, 'import matplotlib.pyplot as plt\n'), ((31615, 31656), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (31627, 31656), False, 'import os\n'), ((31692, 31735), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'item[key]'], {}), "('.*graphics', 'graphics', item[key])\n", (31698, 31735), False, 'import re\n'), ((21251, 21292), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (21263, 21292), False, 'import os\n'), ((21359, 21417), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresMetadataDist[key]'], {}), "('.*graphics', 'graphics', figuresMetadataDist[key])\n", (21365, 21417), False, 'import re\n'), ((31267, 31308), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (31279, 31308), False, 'import os\n'), ((31373, 31430), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresOTHERscores[key]'], {}), "('.*graphics', 'graphics', figuresOTHERscores[key])\n", (31379, 31430), False, 'import re\n'), ((28044, 28085), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (28056, 28085), False, 'import os\n'), ((28147, 28202), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresCORscores[key]'], {}), "('.*graphics', 'graphics', figuresCORscores[key])\n", (28153, 28202), False, 'import re\n'), ((29588, 29629), 'os.path.join', 'os.path.join', (['destinationPath', '"""graphics"""'], {}), "(destinationPath, 'graphics')\n", (29600, 29629), False, 'import os\n'), ((29689, 29743), 're.sub', 're.sub', (['""".*graphics"""', '"""graphics"""', 'figuresKWscores[key]'], {}), "('.*graphics', 'graphics', figuresKWscores[key])\n", (29695, 29743), False, 'import re\n'), ((33238, 33254), 'numpy.ones', 'numpy.ones', (['[nc]'], {}), '([nc])\n', (33248, 33254), False, 'import numpy\n')] |
import os
import time
import torch
import glob
import numpy as np
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from dataclasses import dataclass
from transformers.data.data_collator import DataCollatorMixin
from fengshen.data.MMapIndexDataset import MMapIndexDataset
def safe_check(a, type='uint8'):
d = {'uint8': [0, 255],
'uint16': [0, 65535]
}
range = d[type]
for l in a:
for e in l:
assert e >= range[0] and e <= range[1]
@dataclass
class CBartDataCollator(DataCollatorMixin):
tokenizer: None
return_tensors: str = "pt"
def __init__(self, args):
self.masked_lm = args.masked_lm
self.encoder_loss_type = args.encoder_loss_type
@staticmethod
def create_decoder_inputs(encoder_inputs, encoder_labels, mask_token_id):
"""
:param encoder_inputs: list, each element is an int
:param encoder_labels: list, each element is an int
:return:
"""
decoder_inputs = []
for i, l in zip(encoder_inputs, encoder_labels):
if l == 0:
decoder_inputs.append(i)
elif l == 1:
decoder_inputs.append(mask_token_id)
else:
decoder_inputs += [mask_token_id] * (l - 1)
decoder_inputs.append(i)
return torch.tensor(decoder_inputs, dtype=torch.long)
@staticmethod
def torch_call(self, features):
encoder_inputs = [s[0] for s in features]
encoder_labels = [s[1] for s in features]
decoder_labels = [s[2] for s in features]
# Mask to avoid performing attention on padding token indices in encoder_inputs.
_mask = pad_sequence(
encoder_inputs, batch_first=True, padding_value=-100)
attention_mask = torch.zeros(_mask.shape, dtype=torch.float32)
attention_mask = attention_mask.masked_fill(_mask != -100, 1)
encoder_inputs = pad_sequence(encoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
encoder_labels = pad_sequence(
encoder_labels, batch_first=True, padding_value=-100)
if self.encoder_loss_type == 1: # labels for mse loss
encoder_labels = encoder_labels.float()
decoder_labels = pad_sequence(
decoder_labels, batch_first=True, padding_value=-100)
# avoid computing loss on the first token, i.e. bos_token
decoder_labels[:, 0] = -100
# this method is for non-autoregressive decoding.
decoder_inputs = [self.create_decoder_inputs(
s[0], s[1], self.tokenizer.mask_token_id) for s in features]
# replace the eos_token_id with pad_token_id
for i, _ in enumerate(decoder_inputs):
decoder_inputs[i][-1] = self.tokenizer.pad_token_id
decoder_inputs = pad_sequence(decoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
# create decoder_inputs by shifting the decoder_labels right,
_tmp = decoder_inputs.clone()
decoder_inputs[:, 1:] = _tmp[:, :-1]
decoder_inputs[:, 0] = self.tokenizer.eos_token_id
# construct labels for masked lm loss
masked_lm_labels = decoder_labels.clone()
masked_lm_labels[_tmp != self.tokenizer.mask_token_id] = -100
if self.masked_lm:
decoder_labels = masked_lm_labels
return {
"input_ids": encoder_inputs,
"encoder_labels": encoder_labels,
"decoder_input_ids": decoder_inputs,
"labels": decoder_labels,
"attention_mask": attention_mask,
}
class BARTDataset(Dataset):
def __init__(self, dataset, mode, tokenizer=None, num_labels=-1, insert_mode=-1, max_sentence_length=40,
encoder_loss_type=0, statistics=True):
self.encoder_loss_type = encoder_loss_type
assert mode in ["train", "test", 'dev']
self.mode = mode
if self.mode == 'test' or self.mode == 'dev':
self.is_train = False
else:
self.is_train = True
self.tokenizer = tokenizer
self.max_sentence_length = max_sentence_length + 2 # the bos and eos tokens
self.input_dataset = []
self.encoder_labels_dataset = []
self.decoder_labels_dataset = []
data_dict_path_format = '/cognitive_comp/gaoxinyu/data/{}/{}_synthetic_max_insert_label{}_insert_mode{}_*.pt'.format(
dataset, mode, num_labels - 2, insert_mode)
data_dict_paths = glob.glob(data_dict_path_format)
for data_dict_path in data_dict_paths:
if os.path.exists(data_dict_path):
print(f'''Loading data from {data_dict_path}''', flush=True)
filename = ''.join(data_dict_path.rsplit('.pt', 1))
self.input_dataset += [MMapIndexDataset(filename + "_incorrect_input_ids_list")]
self.encoder_labels_dataset += [MMapIndexDataset(
filename + "_label_ids_list")]
self.decoder_labels_dataset += [MMapIndexDataset(
filename + "_target_ids_list")]
else:
print(
f'Please create the synthetic datafile {data_dict_path} with create_synthetic_data.py.')
self.len = 0
for ds in self.input_dataset:
self.len += len(ds)
# TODO make sure the encoder loss weighting logic applys to every rank !
if statistics:
# print('Statistics for sentence length:')
# lengths = [len(e) for e in self.decoder_labels]
# (unique, counts) = np.unique(lengths, return_counts=True)
# for k, v in zip(unique,counts):
# print(f'sentence length{k}: {v}')
# print('Statistics for sentence labels:')
labels = []
# too slow!!
# for ds in self.encoder_labels_dataset:
# for i in range(0, len(ds)):
# labels.extend(ds.__getitem__(i))
# use only one dataset to calc
for i in self.encoder_labels_dataset[0]:
labels.extend(i)
print(len(labels))
(unique, counts) = np.unique(labels, return_counts=True)
all_label_counts = 0
for k, v in zip(unique, counts):
print(f'Label {k}: {v}')
all_label_counts += v
# ZZ: calculate weights for differnet labels, labels with higher numbers get lower weights proportionally!
revert_label_weights = 1 / \
np.array([v / all_label_counts for k, v in zip(unique, counts)])
self.label_weights = revert_label_weights / \
np.sum(revert_label_weights)
else:
# ZZ: if statistics is not triggered, manually assign weights to different class
if num_labels == 7:
# the cross entropy loss weighst does not need to sum to 1
self.label_weights = [0.01, 0.05, 0.1, 0.1, 0.5, 0.5, 0.5]
else:
self.label_weights = [1 / num_labels] * num_labels
print(f"label weights for encoder will be {self.label_weights}")
def __getitem__(self, idx):
for i in range(0, len(self.input_dataset)):
if idx >= len(self.input_dataset[i]):
idx -= len(self.input_dataset[i])
else:
break
return torch.tensor(self.input_dataset[i].__getitem__(idx), dtype=torch.long), \
torch.tensor(self.encoder_labels_dataset[i].__getitem__(idx), dtype=torch.long), \
torch.tensor(self.decoder_labels_dataset[i].__getitem__(idx), dtype=torch.long)
def __len__(self):
return self.len
def create_decoder_inputs(self, encoder_inputs, encoder_labels, mask_token_id):
"""
:param encoder_inputs: list, each element is an int
:param encoder_labels: list, each element is an int
:return:
"""
decoder_inputs = []
for i, l in zip(encoder_inputs, encoder_labels):
if l == 0:
decoder_inputs.append(i)
elif l == 1:
decoder_inputs.append(mask_token_id)
else:
decoder_inputs += [mask_token_id] * (l - 1)
decoder_inputs.append(i)
return torch.tensor(decoder_inputs, dtype=torch.long)
def create_mini_batch(self, samples):
encoder_inputs = [s[0] for s in samples]
encoder_labels = [s[1] for s in samples]
decoder_labels = [s[2] for s in samples]
# Mask to avoid performing attention on padding token indices in encoder_inputs.
_mask = pad_sequence(encoder_inputs, batch_first=True, padding_value=-100)
attention_mask = torch.zeros(_mask.shape, dtype=torch.float32)
attention_mask = attention_mask.masked_fill(_mask != -100, 1)
encoder_inputs = pad_sequence(encoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
encoder_labels = pad_sequence(encoder_labels, batch_first=True, padding_value=-100)
if self.encoder_loss_type == 1: # labels for mse loss
encoder_labels = encoder_labels.float()
decoder_labels = pad_sequence(decoder_labels, batch_first=True, padding_value=-100)
# avoid computing loss on the first token, i.e. bos_token
decoder_labels[:, 0] = -100
# this method is for non-autoregressive decoding.
decoder_inputs = [self.create_decoder_inputs(
s[0], s[1], self.tokenizer.mask_token_id) for s in samples]
# replace the eos_token_id with pad_token_id
for i, _ in enumerate(decoder_inputs):
decoder_inputs[i][-1] = self.tokenizer.pad_token_id
decoder_inputs = pad_sequence(decoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
# create decoder_inputs by shifting the decoder_labels right,
_tmp = decoder_inputs.clone()
decoder_inputs[:, 1:] = _tmp[:, :-1]
decoder_inputs[:, 0] = self.tokenizer.eos_token_id
# construct labels for masked lm loss
masked_lm_labels = decoder_labels.clone()
masked_lm_labels[_tmp != self.tokenizer.mask_token_id] = -100
return {
"input_ids": encoder_inputs,
"encoder_labels": encoder_labels,
"decoder_input_ids": decoder_inputs,
"labels": decoder_labels,
"attention_mask": attention_mask,
}
def get_train_dev_dataset(args, tokenizer):
trainset = BARTDataset(
args.dataset, "train", tokenizer=tokenizer, num_labels=args.num_labels,
insert_mode=args.insert_mode, encoder_loss_type=args.encoder_loss_type)
testset = BARTDataset(
args.dataset, mode='dev', tokenizer=tokenizer, num_labels=args.num_labels,
insert_mode=args.insert_mode, encoder_loss_type=args.encoder_loss_type)
return trainset, testset
| [
"numpy.sum",
"numpy.unique",
"os.path.exists",
"fengshen.data.MMapIndexDataset.MMapIndexDataset",
"glob.glob",
"torch.zeros",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor"
] | [((1370, 1416), 'torch.tensor', 'torch.tensor', (['decoder_inputs'], {'dtype': 'torch.long'}), '(decoder_inputs, dtype=torch.long)\n', (1382, 1416), False, 'import torch\n'), ((1728, 1794), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_inputs'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(encoder_inputs, batch_first=True, padding_value=-100)\n', (1740, 1794), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((1833, 1878), 'torch.zeros', 'torch.zeros', (['_mask.shape'], {'dtype': 'torch.float32'}), '(_mask.shape, dtype=torch.float32)\n', (1844, 1878), False, 'import torch\n'), ((1975, 2069), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_inputs'], {'batch_first': '(True)', 'padding_value': 'self.tokenizer.pad_token_id'}), '(encoder_inputs, batch_first=True, padding_value=self.tokenizer\n .pad_token_id)\n', (1987, 2069), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2128, 2194), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_labels'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(encoder_labels, batch_first=True, padding_value=-100)\n', (2140, 2194), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2349, 2415), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['decoder_labels'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(decoder_labels, batch_first=True, padding_value=-100)\n', (2361, 2415), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2908, 3002), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['decoder_inputs'], {'batch_first': '(True)', 'padding_value': 'self.tokenizer.pad_token_id'}), '(decoder_inputs, batch_first=True, padding_value=self.tokenizer\n .pad_token_id)\n', (2920, 3002), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((4634, 4666), 'glob.glob', 'glob.glob', (['data_dict_path_format'], {}), '(data_dict_path_format)\n', (4643, 4666), False, 'import glob\n'), ((8464, 8510), 'torch.tensor', 'torch.tensor', (['decoder_inputs'], {'dtype': 'torch.long'}), '(decoder_inputs, dtype=torch.long)\n', (8476, 8510), False, 'import torch\n'), ((8807, 8873), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_inputs'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(encoder_inputs, batch_first=True, padding_value=-100)\n', (8819, 8873), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((8899, 8944), 'torch.zeros', 'torch.zeros', (['_mask.shape'], {'dtype': 'torch.float32'}), '(_mask.shape, dtype=torch.float32)\n', (8910, 8944), False, 'import torch\n'), ((9041, 9135), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_inputs'], {'batch_first': '(True)', 'padding_value': 'self.tokenizer.pad_token_id'}), '(encoder_inputs, batch_first=True, padding_value=self.tokenizer\n .pad_token_id)\n', (9053, 9135), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((9194, 9260), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['encoder_labels'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(encoder_labels, batch_first=True, padding_value=-100)\n', (9206, 9260), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((9402, 9468), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['decoder_labels'], {'batch_first': '(True)', 'padding_value': '(-100)'}), '(decoder_labels, batch_first=True, padding_value=-100)\n', (9414, 9468), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((9947, 10041), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['decoder_inputs'], {'batch_first': '(True)', 'padding_value': 'self.tokenizer.pad_token_id'}), '(decoder_inputs, batch_first=True, padding_value=self.tokenizer\n .pad_token_id)\n', (9959, 10041), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((4729, 4759), 'os.path.exists', 'os.path.exists', (['data_dict_path'], {}), '(data_dict_path)\n', (4743, 4759), False, 'import os\n'), ((6322, 6359), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (6331, 6359), True, 'import numpy as np\n'), ((6832, 6860), 'numpy.sum', 'np.sum', (['revert_label_weights'], {}), '(revert_label_weights)\n', (6838, 6860), True, 'import numpy as np\n'), ((4945, 5001), 'fengshen.data.MMapIndexDataset.MMapIndexDataset', 'MMapIndexDataset', (["(filename + '_incorrect_input_ids_list')"], {}), "(filename + '_incorrect_input_ids_list')\n", (4961, 5001), False, 'from fengshen.data.MMapIndexDataset import MMapIndexDataset\n'), ((5051, 5097), 'fengshen.data.MMapIndexDataset.MMapIndexDataset', 'MMapIndexDataset', (["(filename + '_label_ids_list')"], {}), "(filename + '_label_ids_list')\n", (5067, 5097), False, 'from fengshen.data.MMapIndexDataset import MMapIndexDataset\n'), ((5168, 5215), 'fengshen.data.MMapIndexDataset.MMapIndexDataset', 'MMapIndexDataset', (["(filename + '_target_ids_list')"], {}), "(filename + '_target_ids_list')\n", (5184, 5215), False, 'from fengshen.data.MMapIndexDataset import MMapIndexDataset\n')] |
# coding: utf-8
__author__ = 'cleardusk'
import sys
sys.path.append('..')
import cv2
import numpy as np
import os.path as osp
import scipy.io as sio
from ..Sim3DR import rasterize
from .functions import plot_image
from .io import _load
from .tddfa_util import _to_ctype
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
def load_uv_coords(fp):
C = sio.loadmat(fp)
uv_coords = C['UV'].copy(order='C').astype(np.float32)
return uv_coords
def process_uv(uv_coords, uv_h=256, uv_w=256):
uv_coords[:, 0] = uv_coords[:, 0] * (uv_w - 1)
uv_coords[:, 1] = uv_coords[:, 1] * (uv_h - 1)
uv_coords[:, 1] = uv_h - uv_coords[:, 1] - 1
uv_coords = np.hstack((uv_coords, np.zeros((uv_coords.shape[0], 1), dtype=np.float32))) # add z
return uv_coords
g_uv_coords = load_uv_coords(make_abs_path('../configs/BFM_UV.mat'))
indices = _load(make_abs_path('../configs/indices.npy')) # todo: handle bfm_slim
g_uv_coords = g_uv_coords[indices, :]
def get_colors(img, ver):
# nearest-neighbor sampling
[h, w, _] = img.shape
ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1) # x
ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1) # y
ind = np.round(ver).astype(np.int32)
colors = img[ind[1, :], ind[0, :], :] # n x 3
return colors
def bilinear_interpolate(img, x, y):
"""
https://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python
"""
x0 = np.floor(x).astype(np.int32)
x1 = x0 + 1
y0 = np.floor(y).astype(np.int32)
y1 = y0 + 1
x0 = np.clip(x0, 0, img.shape[1] - 1)
x1 = np.clip(x1, 0, img.shape[1] - 1)
y0 = np.clip(y0, 0, img.shape[0] - 1)
y1 = np.clip(y1, 0, img.shape[0] - 1)
i_a = img[y0, x0]
i_b = img[y1, x0]
i_c = img[y0, x1]
i_d = img[y1, x1]
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
return wa[..., np.newaxis] * i_a + wb[..., np.newaxis] * i_b + wc[..., np.newaxis] * i_c + wd[..., np.newaxis] * i_d
def uv_tex(img, ver_lst, tri, uv_h=256, uv_w=256, uv_c=3, show_flag=False, wfp=None):
uv_coords = process_uv(g_uv_coords.copy(), uv_h=uv_h, uv_w=uv_w)
res_lst = []
for ver_ in ver_lst:
ver = _to_ctype(ver_.T) # transpose to m x 3
colors = bilinear_interpolate(img, ver[:, 0], ver[:, 1]) / 255.
# `rasterize` here serves as texture sampling, may need to optimization
res = rasterize(uv_coords, tri, colors, height=uv_h, width=uv_w, channel=uv_c)
res_lst.append(res)
# concat if there more than one image
res = np.concatenate(res_lst, axis=1) if len(res_lst) > 1 else res_lst[0]
if wfp is not None:
cv2.imwrite(wfp, res)
print(f'Save visualization result to {wfp}')
if show_flag:
plot_image(res)
return res
def uv_tex_single(img, ver_lst, tri, uv_h=256, uv_w=256, uv_c=3, show_flag=False, wfp=None):
uv_coords = process_uv(g_uv_coords.copy(), uv_h=uv_h, uv_w=uv_w)
res_lst = []
for ver_ in ver_lst:
ver = _to_ctype(ver_.T) # transpose to m x 3
colors = bilinear_interpolate(img, ver[:, 0], ver[:, 1]) / 255.
print(colors)
# `rasterize` here serves as texture sampling, may need to optimization
res = rasterize(uv_coords, tri, colors, height=uv_h, width=uv_w, channel=uv_c)
res_lst.append(res)
# concat if there more than one image
res = np.concatenate(res_lst, axis=1) if len(res_lst) > 1 else res_lst[0]
if wfp is not None:
wfp = wfp.split('.jpg')[0]
for cnt in range(len(res_lst)):
outname = wfp+str(cnt+1)+'.jpg'
cv2.imwrite(outname, res_lst[cnt])
print(f'Save visualization result to {outname}')
if show_flag:
plot_image(res)
return res | [
"sys.path.append",
"numpy.maximum",
"scipy.io.loadmat",
"cv2.imwrite",
"os.path.realpath",
"numpy.floor",
"numpy.zeros",
"numpy.clip",
"numpy.round",
"numpy.concatenate"
] | [((55, 76), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (70, 76), False, 'import sys\n'), ((387, 402), 'scipy.io.loadmat', 'sio.loadmat', (['fp'], {}), '(fp)\n', (398, 402), True, 'import scipy.io as sio\n'), ((1615, 1647), 'numpy.clip', 'np.clip', (['x0', '(0)', '(img.shape[1] - 1)'], {}), '(x0, 0, img.shape[1] - 1)\n', (1622, 1647), True, 'import numpy as np\n'), ((1657, 1689), 'numpy.clip', 'np.clip', (['x1', '(0)', '(img.shape[1] - 1)'], {}), '(x1, 0, img.shape[1] - 1)\n', (1664, 1689), True, 'import numpy as np\n'), ((1699, 1731), 'numpy.clip', 'np.clip', (['y0', '(0)', '(img.shape[0] - 1)'], {}), '(y0, 0, img.shape[0] - 1)\n', (1706, 1731), True, 'import numpy as np\n'), ((1741, 1773), 'numpy.clip', 'np.clip', (['y1', '(0)', '(img.shape[0] - 1)'], {}), '(y1, 0, img.shape[0] - 1)\n', (1748, 1773), True, 'import numpy as np\n'), ((1109, 1133), 'numpy.maximum', 'np.maximum', (['ver[0, :]', '(0)'], {}), '(ver[0, :], 0)\n', (1119, 1133), True, 'import numpy as np\n'), ((1174, 1198), 'numpy.maximum', 'np.maximum', (['ver[1, :]', '(0)'], {}), '(ver[1, :], 0)\n', (1184, 1198), True, 'import numpy as np\n'), ((2676, 2707), 'numpy.concatenate', 'np.concatenate', (['res_lst'], {'axis': '(1)'}), '(res_lst, axis=1)\n', (2690, 2707), True, 'import numpy as np\n'), ((2777, 2798), 'cv2.imwrite', 'cv2.imwrite', (['wfp', 'res'], {}), '(wfp, res)\n', (2788, 2798), False, 'import cv2\n'), ((3513, 3544), 'numpy.concatenate', 'np.concatenate', (['res_lst'], {'axis': '(1)'}), '(res_lst, axis=1)\n', (3527, 3544), True, 'import numpy as np\n'), ((324, 346), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (336, 346), True, 'import os.path as osp\n'), ((721, 772), 'numpy.zeros', 'np.zeros', (['(uv_coords.shape[0], 1)'], {'dtype': 'np.float32'}), '((uv_coords.shape[0], 1), dtype=np.float32)\n', (729, 772), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.round', 'np.round', (['ver'], {}), '(ver)\n', (1230, 1235), True, 'import numpy as np\n'), ((1506, 1517), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (1514, 1517), True, 'import numpy as np\n'), ((1560, 1571), 'numpy.floor', 'np.floor', (['y'], {}), '(y)\n', (1568, 1571), True, 'import numpy as np\n'), ((3736, 3770), 'cv2.imwrite', 'cv2.imwrite', (['outname', 'res_lst[cnt]'], {}), '(outname, res_lst[cnt])\n', (3747, 3770), False, 'import cv2\n')] |
"""
Some helper functions for the matern prior covariance
Hmm... it would be nice to add this on to the ControlField
infrastructure...
Maybe once I understand things more
"""
import os
import numpy as np
import xarray as xr
from MITgcmutils import wrmds
from scipy.special import gamma, kv
from .io import read_mds
def calc_correlation_field(xda, mask,
dimlist=['Z','YC'],
n_shift=15,
mask_in_betweens=False):
"""calculate the correlation field for each shifted distance
Parameters
----------
xda : xarray.DataArray
The field to compute correlations on, over the 'sample' dimension
mask : xarra.DataArray
True/False inside/outside of domain
dimlist : list of str
denoting dimensions to compute shifted correlations
n_shift : int
number of shifts to do
mask_in_betweens : bool, optional
if True, then if there is a portion of the domain such that for a
particular dimension, there is a gap between two points, ignore all
points with larger correlation length than where the gap occurs
doesn't affect results much
"""
xds = xr.Dataset()
shifty = np.arange(-n_shift,n_shift+1)
shifty = xr.DataArray(shifty,coords={'shifty':shifty},dims=('shifty',))
xds['shifty'] = shifty
for dim in dimlist:
corrfld = f'corr_{dim.lower()}'
template = xda.isel(sample=0).drop('sample')
xds[corrfld] = xr.zeros_like(shifty*template)
x_deviation = (xda - xda.mean('sample')).where(mask)
x_ssr = np.sqrt( (x_deviation**2).sum('sample') )
for s in shifty.values:
y_deviation = x_deviation.shift({dim:s})
numerator = (x_deviation*y_deviation).sum('sample')
y_ssr = np.sqrt( (y_deviation**2).sum('sample') )
denominator = x_ssr*y_ssr
xds[corrfld].loc[{'shifty':s}] = numerator / denominator
if mask_in_betweens:
for dim in dimlist:
corrfld = f'corr_{dim.lower()}'
for s in shifty.values:
if s < 0:
bigger_than = shifty<s
else:
bigger_than = shifty>s
imnan = np.isnan(xds[corrfld].sel(shifty=s))
xds[corrfld] = xr.where(bigger_than*imnan,np.nan,xds[corrfld])
return xds
def corr_iso(dist, Nx, ndims):
nu = .5 if ndims == 3 else 1
fact = 2**(1-nu) / gamma(nu)
arg = np.sqrt(8*nu)/Nx * dist
left = (arg)**nu
right = kv(nu, arg)
return fact*left*right
def calc_variance(Nx,ndims=2):
nu = 1/2 if ndims==3 else 1
delta_hat = 8*nu / (Nx**2)
denom = gamma(nu+ndims/2)*((4*np.pi)**(ndims/2))*(delta_hat**(nu))
return gamma(nu)/denom
def _getL(ds):
"""get horizontal length scale"""
if isinstance(ds,xr.core.dataarray.DataArray):
ds = ds.to_dataset(name='tmp')
if 'rA' in ds:
L = np.sqrt(ds['rA'])
elif 'dyG' in ds:
L = ds['dyG']
elif 'dxG' in ds:
L = ds['dxG']
else:
raise NotImplementedError('Other length scales not recognized')
return L
def get_alpha(ds):
"""Return the grid-define aspect ratio
alpha = drF/sqrt(rA)
"""
L = _getL(ds)
xda = ds['drF'] / L
xda.name = 'alpha'
return xda
def getPhi(mymask,xi=1):
"""Return Jacobian of deformation tensor as a dict
ux 0 0
Phi = 0 vy 0
0 0 wz
or a 2D section of that...
xi is an additional factor on ux and/or vy
to accentuate the horizontal scales over the vertical
xi must be same size as mymask, or just a scalar
"""
ndims = len(mymask.dims)
L = _getL(mymask).broadcast_like(mymask)
H = mymask['drF'].broadcast_like(mymask) if 'drF' in mymask.coords else xr.ones_like(mymask)
xi = xi*xr.ones_like(mymask)
ux = xi*L
vy = xi*L
wz = H
if ndims==2:
if set(('XC','YC')).issubset(mymask.dims):
return {'ux':ux/xi,'vy':vy/xi}
elif set(('YC','Z')).issubset(mymask.dims):
return {'vy':vy,'wz':wz}
elif set(('XC','Z')).issubset(mymask.dims):
return {'ux':ux,'wz':wz}
else:
raise TypeError('getPhi dims problem 2d')
elif ndims==3:
return {'ux':ux,'vy':vy,'wz':wz}
else:
raise TypeError('Only 2d or 3d for this phd')
def get_delta(Nx,determinant,mymask):
ndims = len(mymask.dims)
nu = 1/2 if ndims==3 else 1
numer = 8*nu
Nx_hat_squared = Nx**2
denom = Nx_hat_squared * determinant
xda = (numer/denom).broadcast_like(mymask)
xda.name='delta'
return xda
def get_cell_volume(mymask):
"""return the cell volume as part of the normalization factor
for the white noise process"""
ndims = len(mymask.dims)
L = _getL(mymask)
if ndims==2:
if set(('XC','YC')).issubset(mymask.dims):
return L**2
elif set(('YC','Z')).issubset(mymask.dims):
return mymask['drF']*L
elif set(('XC','Z')).issubset(mymask.dims):
return mymask['drF']*L
else:
return mymask['drF']*L**2
def get_matern(Nx,mymask,xi=1):
C = {}
K = {}
ndims = len(mymask.dims)
Phi = getPhi(mymask,xi=xi)
C['alpha'] = get_alpha(mymask.to_dataset(name='mask')).broadcast_like(mymask)
C['Nx'] = Nx
if ndims == 2:
if set(('XC','YC')).issubset(mymask.dims):
C['determinant'] = Phi['vy']*Phi['ux']
elif set(('YC','Z')).issubset(mymask.dims):
C['determinant'] = Phi['wz']*Phi['vy']
elif set(('XC','Z')).issubset(mymask.dims):
C['determinant'] = Phi['wz']*Phi['ux']
else:
raise TypeError('Help my dims out')
else:
C['determinant'] = Phi['wz']*Phi['vy']*Phi['ux']
C['randNorm'] = 1/np.sqrt(C['determinant'])/np.sqrt(get_cell_volume(mymask))
C['delta'] = get_delta(Nx,determinant=C['determinant'],mymask=mymask)
if 'XC' in mymask.dims:
K['ux'] = 1 / C['determinant'] * Phi['ux']*Phi['ux']
if 'YC' in mymask.dims:
K['vy'] = 1 / C['determinant'] * Phi['vy']*Phi['vy']
if 'Z' in mymask.dims:
K['wz'] = 1 / C['determinant'] * Phi['wz']*Phi['wz']
for dd,lbl in zip([C,K,Phi],['constants','K','Phi']):
for key,val in dd.items():
if key != 'alpha':
if isinstance(val,xr.core.dataarray.DataArray):
try:
assert val.dims == mymask.dims
except:
raise TypeError(f'dim order for {lbl}[{key}] is: ',val.dims)
return C,K
def write_matern(write_dir,smoothOpNb,Nx,mymask,xdalike,xi=1):
"""Write everything to describe the SPDE operator associated
with the Matern covariance
Parameters
----------
write_dir : str
path with directory to write to
smoothOpNb : int
smooth operator number
Nx : int
number of neighboring grid cells to smooth by...
mymask : xarray DataArray
defining the ControlField
xdalike : xarray DataArray
to write the fields like, since mymask may have a different
ordering than what the MITgcm wants
"""
ndims = len(mymask.dims)
# Make the tensor and put into big array
C,K = get_matern(Nx,mymask,xi=xi)
# Write out the fields
if not os.path.isdir(write_dir):
os.makedirs(write_dir)
dimstr = f'{ndims}D'
for el in ['ux','vy','wz']:
if el in K.keys():
K[el] = K[el].reindex_like(xdalike)
wrmds(f'{write_dir}/smooth{dimstr}K{el}{smoothOpNb:03}',
arr=K[el].values,
dataprec='float64')
for f,fstr in zip(['delta','randNorm'],
['Delta','RandNorm']):
C[f] = C[f].reindex_like(xdalike)
wrmds(f'{write_dir}/smooth{dimstr}{fstr}{smoothOpNb:03}',
arr=C[f].values,
dataprec='float64')
def get_matern_dataset(run_dir,smoothOpNb,xdalike,sample_num=None,
read_filternorm=True):
ndims = len(xdalike.dims)
if read_filternorm:
smooth_mean = read_mds(f'{run_dir}/smooth{ndims}Dmean{smoothOpNb:03}',
xdalike=xdalike)
smooth_norm = read_mds(f'{run_dir}/smooth{ndims}Dnorm{smoothOpNb:03}',
xdalike=xdalike)
fld_fname = f'{run_dir}/smooth{ndims}Dfld{smoothOpNb:03}'
if sample_num is None:
smooth_fld = read_mds(fld_fname,xdalike=xdalike)
else:
if isinstance(sample_num,list):
sample_num = np.array(sample_num)
elif isinstance(sample_num,int):
sample_num = np.array([sample_num])
sample = xr.DataArray(sample_num,
coords={'sample':sample_num},
dims=('sample',),name='sample')
smooth_fld = read_mds(fld_fname,xdalike=(sample*xdalike).squeeze(),
rec=sample_num)
if read_filternorm:
names = ['ginv','filternorm','ginv_norm','ginv_nomean_norm']
fldlist = [smooth_fld,smooth_norm,smooth_fld*smooth_norm,
(smooth_fld-smooth_mean)*smooth_norm]
labels = [r'$\mathcal{A}^{-1}g$',
r'$X$',
r'$X\mathcal{A}^{-1}g$',
r'$X\mathcal{A}^{-1}(g-\bar{g}$']
else:
names = ['ginv']
fldlist = [smooth_fld]
labels = [r'$\mathcal{A}^{-1}g$']
ds = xr.Dataset(dict(zip(names,fldlist)))
for key,lbl in zip(ds.data_vars,labels):
ds[key].attrs['label'] = lbl
return ds
| [
"os.makedirs",
"os.path.isdir",
"xarray.Dataset",
"xarray.zeros_like",
"MITgcmutils.wrmds",
"numpy.arange",
"xarray.DataArray",
"scipy.special.kv",
"numpy.array",
"xarray.where",
"xarray.ones_like",
"scipy.special.gamma",
"numpy.sqrt"
] | [((1212, 1224), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (1222, 1224), True, 'import xarray as xr\n'), ((1238, 1270), 'numpy.arange', 'np.arange', (['(-n_shift)', '(n_shift + 1)'], {}), '(-n_shift, n_shift + 1)\n', (1247, 1270), True, 'import numpy as np\n'), ((1281, 1346), 'xarray.DataArray', 'xr.DataArray', (['shifty'], {'coords': "{'shifty': shifty}", 'dims': "('shifty',)"}), "(shifty, coords={'shifty': shifty}, dims=('shifty',))\n", (1293, 1346), True, 'import xarray as xr\n'), ((2574, 2585), 'scipy.special.kv', 'kv', (['nu', 'arg'], {}), '(nu, arg)\n', (2576, 2585), False, 'from scipy.special import gamma, kv\n'), ((1512, 1544), 'xarray.zeros_like', 'xr.zeros_like', (['(shifty * template)'], {}), '(shifty * template)\n', (1525, 1544), True, 'import xarray as xr\n'), ((2496, 2505), 'scipy.special.gamma', 'gamma', (['nu'], {}), '(nu)\n', (2501, 2505), False, 'from scipy.special import gamma, kv\n'), ((2790, 2799), 'scipy.special.gamma', 'gamma', (['nu'], {}), '(nu)\n', (2795, 2799), False, 'from scipy.special import gamma, kv\n'), ((2982, 2999), 'numpy.sqrt', 'np.sqrt', (["ds['rA']"], {}), "(ds['rA'])\n", (2989, 2999), True, 'import numpy as np\n'), ((3870, 3890), 'xarray.ones_like', 'xr.ones_like', (['mymask'], {}), '(mymask)\n', (3882, 3890), True, 'import xarray as xr\n'), ((3904, 3924), 'xarray.ones_like', 'xr.ones_like', (['mymask'], {}), '(mymask)\n', (3916, 3924), True, 'import xarray as xr\n'), ((7440, 7464), 'os.path.isdir', 'os.path.isdir', (['write_dir'], {}), '(write_dir)\n', (7453, 7464), False, 'import os\n'), ((7474, 7496), 'os.makedirs', 'os.makedirs', (['write_dir'], {}), '(write_dir)\n', (7485, 7496), False, 'import os\n'), ((7918, 8016), 'MITgcmutils.wrmds', 'wrmds', (['f"""{write_dir}/smooth{dimstr}{fstr}{smoothOpNb:03}"""'], {'arr': 'C[f].values', 'dataprec': '"""float64"""'}), "(f'{write_dir}/smooth{dimstr}{fstr}{smoothOpNb:03}', arr=C[f].values,\n dataprec='float64')\n", (7923, 8016), False, 'from MITgcmutils import wrmds\n'), ((8813, 8905), 'xarray.DataArray', 'xr.DataArray', (['sample_num'], {'coords': "{'sample': sample_num}", 'dims': "('sample',)", 'name': '"""sample"""'}), "(sample_num, coords={'sample': sample_num}, dims=('sample',),\n name='sample')\n", (8825, 8905), True, 'import xarray as xr\n'), ((2516, 2531), 'numpy.sqrt', 'np.sqrt', (['(8 * nu)'], {}), '(8 * nu)\n', (2523, 2531), True, 'import numpy as np\n'), ((2720, 2741), 'scipy.special.gamma', 'gamma', (['(nu + ndims / 2)'], {}), '(nu + ndims / 2)\n', (2725, 2741), False, 'from scipy.special import gamma, kv\n'), ((5902, 5927), 'numpy.sqrt', 'np.sqrt', (["C['determinant']"], {}), "(C['determinant'])\n", (5909, 5927), True, 'import numpy as np\n'), ((7643, 7741), 'MITgcmutils.wrmds', 'wrmds', (['f"""{write_dir}/smooth{dimstr}K{el}{smoothOpNb:03}"""'], {'arr': 'K[el].values', 'dataprec': '"""float64"""'}), "(f'{write_dir}/smooth{dimstr}K{el}{smoothOpNb:03}', arr=K[el].values,\n dataprec='float64')\n", (7648, 7741), False, 'from MITgcmutils import wrmds\n'), ((8685, 8705), 'numpy.array', 'np.array', (['sample_num'], {}), '(sample_num)\n', (8693, 8705), True, 'import numpy as np\n'), ((2345, 2396), 'xarray.where', 'xr.where', (['(bigger_than * imnan)', 'np.nan', 'xds[corrfld]'], {}), '(bigger_than * imnan, np.nan, xds[corrfld])\n', (2353, 2396), True, 'import xarray as xr\n'), ((8772, 8794), 'numpy.array', 'np.array', (['[sample_num]'], {}), '([sample_num])\n', (8780, 8794), True, 'import numpy as np\n')] |
import itertools
import numpy as np
from collections import deque
from tensorboardX import SummaryWriter
import Config
from AgentControl import AgentControl
from Buffer import Buffer
from TestAgent import TestAgent
class Agent:
# Role of Agent class is to coordinate between AgentControll where we do all calculations and Memory where we
# store all of the data. Also the Agent will occasionally test the trained model.
def __init__(self, env, behavior_name, num_agents, state_shape, action_shape, episode_length):
self.agent_control = AgentControl(state_shape, action_shape)
self.buffer = Buffer(num_agents, state_shape, action_shape, episode_length)
self.test_agent = TestAgent(env, behavior_name, num_agents, state_shape, action_shape)
self.writer = SummaryWriter(logdir="content/runs/" + str(Config.seed) + Config.writer_name) if Config.write else None
self.policy_loss_mean = deque(maxlen=100)
self.critic_loss_mean = deque(maxlen=100)
self.return_queue = deque(maxlen=100)
self.current_ep_rewards = []
self.max_reward = -10
self.num_agents = num_agents
self.reward_agents = [0] * self.num_agents
def get_actions(self, decision_steps):
actions, logprob = self.agent_control.get_actions(decision_steps.obs[0])
self.buffer.add_old(decision_steps, actions, logprob)
return actions
def update_lr(self, n_step):
self.agent_control.update_lr(n_step)
def calculate_ep_reward(self, decision_steps, terminal_steps):
for a_id in decision_steps.agent_id:
self.reward_agents[a_id] += decision_steps.reward[a_id]
cnt = 0
for a_id in terminal_steps.agent_id:
self.reward_agents[a_id] += terminal_steps.reward[cnt]
self.current_ep_rewards.append(self.reward_agents[a_id])
self.return_queue.append(self.reward_agents[a_id])
self.reward_agents[a_id] = 0
cnt += 1
@staticmethod
def get_steps(env, behavior_name):
steps = list(env.get_steps(behavior_name))
return steps[0], steps[1]
def add_to_buffer(self, decision_steps, terminal_steps):
self.buffer.add(decision_steps, terminal_steps)
def buffer_full(self):
return self.buffer.full
def calculate_advantage(self):
# For basic advantage function we have to calculate future rewards we got from each state, where reward from
# last state is estimation (since we only know rewards in steps we took, not after), discount them and
# subtract from baseline which in this case will be estimated value of each state.
# GAE advantage gives us to decide we want each state advantage to be calculated with
# reward + estimate(next state) - estimate(state) which has low variance but high bias or with
# reward + gamma*next_reward + ... + gamma^n * estimate(last next state) - estimate(state) which has high
# variance but low bias. We can decide to calculate advantage with somethig between those two and Config.LAMBDA
# will be hyperparameter for that
state_values = self.agent_control.get_critic_value_d(self.buffer.states)
if Config.gae:
new_state_values = self.agent_control.get_critic_value_d(self.buffer.new_states)
self.buffer.gae_advantage(state_values, new_state_values)
else:
last_state_value = self.agent_control.get_critic_value_d(self.buffer.new_states[-1])
self.buffer.advantage(state_values, last_state_value)
def update(self, indices):
# Main PPO point is updating policy NN. This is done by calculating derivative of loss function and doing
# gradient descent. First we have to calculate ratio. Second to find minimum between ratio*advantage and
# clipped_ratio*advantage. Third to find mean of Config.MINIBATCH_SIZE losses.
# To calculate ratio we need new and old action probability. We already have old when we fed states to
# policy NN when we wanted to get action from it. We can get new action probabilities if we give same states
# but also actions we got. With states NN can create Normal distribution and with action he will sample the same
# part of distribution, but now with different probability because Normal distribution is different.
ratio = self.agent_control.calculate_ratio(self.buffer.states[indices], self.buffer.actions[indices],
self.buffer.logprob[indices])
policy_loss = self.agent_control.update_policy(ratio, self.buffer.advantages[indices])
critic_loss = self.agent_control.update_critic(self.buffer.states[indices], self.buffer.gt[indices])
self.policy_loss_mean.append(policy_loss)
self.critic_loss_mean.append(critic_loss)
def record_data(self, n_step):
if len(self.current_ep_rewards) > 0:
self.max_reward = np.maximum(self.max_reward, np.max(self.current_ep_rewards))
print("St " + str(n_step) + "/" + str(Config.total_steps) + " Mean 100 policy loss: " + str(
np.round(np.mean(self.policy_loss_mean), 4)) + " Mean 100 critic loss: " + str(
np.round(np.mean(self.critic_loss_mean), 4)) + " Max reward: " + str(
np.round(self.max_reward, 2)) + " Mean 100 reward: " + str(
np.round(np.mean(self.return_queue), 2)) + " Last rewards: " + str(
np.round(self.current_ep_rewards, 2)))
if Config.write:
self.writer.add_scalar('pg_loss', np.mean(self.policy_loss_mean), n_step)
self.writer.add_scalar('vl_loss', np.mean(self.critic_loss_mean), n_step)
self.writer.add_scalar('100rew', np.mean(self.return_queue), n_step)
if len(self.current_ep_rewards) > 0:
self.writer.add_scalar('rew', np.mean(self.current_ep_rewards), n_step)
self.current_ep_rewards = []
def check_test(self, n_step):
# Agent will test the model every 50th iteration or if its performing well enough for past few episodes.
if (n_step + 1) % 50 == 0 or (len(self.return_queue) >= 100 and np.mean(
list(itertools.islice(self.return_queue, 90, 100))) >= 100):
return True
return False
def test(self, n_step):
# Test the model for 100 episodes. Since we pass environment to TestAgent (and not creating separate)
# we need to reset some variables and return wether the goal is met.
self.reward_agents = [0] * self.num_agents
end = self.test_agent.test(self.agent_control.get_policy_nn(), self.writer, n_step)
self.buffer.reset(full=False)
return end
| [
"AgentControl.AgentControl",
"numpy.max",
"numpy.mean",
"itertools.islice",
"TestAgent.TestAgent",
"Buffer.Buffer",
"numpy.round",
"collections.deque"
] | [((574, 613), 'AgentControl.AgentControl', 'AgentControl', (['state_shape', 'action_shape'], {}), '(state_shape, action_shape)\n', (586, 613), False, 'from AgentControl import AgentControl\n'), ((637, 698), 'Buffer.Buffer', 'Buffer', (['num_agents', 'state_shape', 'action_shape', 'episode_length'], {}), '(num_agents, state_shape, action_shape, episode_length)\n', (643, 698), False, 'from Buffer import Buffer\n'), ((726, 794), 'TestAgent.TestAgent', 'TestAgent', (['env', 'behavior_name', 'num_agents', 'state_shape', 'action_shape'], {}), '(env, behavior_name, num_agents, state_shape, action_shape)\n', (735, 794), False, 'from TestAgent import TestAgent\n'), ((955, 972), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (960, 972), False, 'from collections import deque\n'), ((1006, 1023), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1011, 1023), False, 'from collections import deque\n'), ((1053, 1070), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1058, 1070), False, 'from collections import deque\n'), ((5123, 5154), 'numpy.max', 'np.max', (['self.current_ep_rewards'], {}), '(self.current_ep_rewards)\n', (5129, 5154), True, 'import numpy as np\n'), ((5715, 5745), 'numpy.mean', 'np.mean', (['self.policy_loss_mean'], {}), '(self.policy_loss_mean)\n', (5722, 5745), True, 'import numpy as np\n'), ((5802, 5832), 'numpy.mean', 'np.mean', (['self.critic_loss_mean'], {}), '(self.critic_loss_mean)\n', (5809, 5832), True, 'import numpy as np\n'), ((5888, 5914), 'numpy.mean', 'np.mean', (['self.return_queue'], {}), '(self.return_queue)\n', (5895, 5914), True, 'import numpy as np\n'), ((5601, 5637), 'numpy.round', 'np.round', (['self.current_ep_rewards', '(2)'], {}), '(self.current_ep_rewards, 2)\n', (5609, 5637), True, 'import numpy as np\n'), ((6021, 6053), 'numpy.mean', 'np.mean', (['self.current_ep_rewards'], {}), '(self.current_ep_rewards)\n', (6028, 6053), True, 'import numpy as np\n'), ((6356, 6400), 'itertools.islice', 'itertools.islice', (['self.return_queue', '(90)', '(100)'], {}), '(self.return_queue, 90, 100)\n', (6372, 6400), False, 'import itertools\n'), ((5529, 5555), 'numpy.mean', 'np.mean', (['self.return_queue'], {}), '(self.return_queue)\n', (5536, 5555), True, 'import numpy as np\n'), ((5447, 5475), 'numpy.round', 'np.round', (['self.max_reward', '(2)'], {}), '(self.max_reward, 2)\n', (5455, 5475), True, 'import numpy as np\n'), ((5373, 5403), 'numpy.mean', 'np.mean', (['self.critic_loss_mean'], {}), '(self.critic_loss_mean)\n', (5380, 5403), True, 'import numpy as np\n'), ((5280, 5310), 'numpy.mean', 'np.mean', (['self.policy_loss_mean'], {}), '(self.policy_loss_mean)\n', (5287, 5310), True, 'import numpy as np\n')] |
import numpy as np
from nptyping import NDArray
from ..objective import Objective
def char_vector(f: Objective, e: int) -> NDArray[int]:
"""
Return the n-dimensional characteristic vector with 1 on coordinate e.
:param f: integer-lattice submodular function
:param e: coordinate of the characteristic vector that should be set to 1
"""
return (np.in1d(f.V, [e]) * 1).astype(np.int64)
| [
"numpy.in1d"
] | [((370, 387), 'numpy.in1d', 'np.in1d', (['f.V', '[e]'], {}), '(f.V, [e])\n', (377, 387), True, 'import numpy as np\n')] |
from typing import Dict
import pytest
from numpy.testing import assert_almost_equal
from allopy import ActivePortfolioOptimizer, OptData
from allopy.datasets import load_monte_carlo
from tests.active_portfolio.data import (
adj, cov_mat, get_expected_results, get_linear_constraints, get_risk_cstr, lb, ub
)
from tests.utils import fetch_opt_data_test_file
scenarios = 'Baseline', 'Upside', 'Downside'
agg_weights = [0.25, 0.18, 0.24, 0.13, 0.11, 0.04, 0.05]
def parameters():
risk = get_risk_cstr()
results = get_expected_results()
for scenario in scenarios:
yield (
scenario,
float(risk.loc[0, scenario]),
float(risk.loc[1, scenario]),
results[scenario].to_numpy()
)
def set_linear_constraints(opt: ActivePortfolioOptimizer):
linear_constraints = get_linear_constraints()
matrix = linear_constraints.iloc[:, :-2].to_numpy()
B = linear_constraints.B.to_numpy()
equalities = linear_constraints.EQUALITY.to_numpy()
# swap greater than or equal to equalities
matrix[equalities == ">="] *= -1
B[equalities == ">="] *= -1
equalities[equalities == ">="] = "<="
for eq in ["=", "<="]:
a, b = matrix[equalities == eq], B[equalities == eq]
if eq == "=":
opt.add_equality_matrix_constraint(a, b)
else:
opt.add_inequality_matrix_constraint(a, b)
return opt
@pytest.fixture(scope="module")
def scenario_data_map() -> Dict[str, OptData]:
res = {}
for s in scenarios:
data = fetch_opt_data_test_file(f"active-{s}")
if data is None:
data = OptData(load_monte_carlo(total=True)) \
.calibrate_data(adj[s], adj.Vol) \
.alter_frequency('quarter') \
.aggregate_assets(agg_weights) \
.set_cov_mat(cov_mat)
res[s] = data
return res
@pytest.fixture(scope="module")
def cvar_data() -> OptData:
data = fetch_opt_data_test_file(f"active-cvar")
if data is None:
data = OptData(load_monte_carlo(total=True)) \
.cut_by_horizon(3) \
.calibrate_data(sd=adj.Vol) \
.alter_frequency('quarterly') \
.aggregate_assets(agg_weights)
return data
@pytest.mark.parametrize("scenario, max_cvar, max_te, expected", parameters())
def test_optimizer(scenario_data_map, cvar_data, scenario, max_cvar, max_te, expected):
x0 = [1.0, 0.107, 0.008, 0.044, 0.064, 0.124, 0.009, 0.003, 0.003, 0.02, 0.006, 0.005, 0.054, 0.003, 0.0007,
0.109, 0.004, 0.004, 0.01, 0.027, 0.001, 0.003, 0.003, 0.008, 0.006, 0.008, 0.002, 0.011, 0.0, 0.0]
cube = scenario_data_map[scenario]
opt = ActivePortfolioOptimizer(cube, cvar_data=cvar_data)
opt = set_linear_constraints(opt)
opt.set_bounds(lb, ub)
results = opt.maximize_eva(max_te, max_cvar, x0=x0).round(4)
assert_almost_equal(results, expected, 4)
| [
"tests.active_portfolio.data.get_expected_results",
"tests.active_portfolio.data.get_linear_constraints",
"allopy.ActivePortfolioOptimizer",
"numpy.testing.assert_almost_equal",
"pytest.fixture",
"tests.utils.fetch_opt_data_test_file",
"tests.active_portfolio.data.get_risk_cstr",
"allopy.datasets.load... | [((1429, 1459), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1443, 1459), False, 'import pytest\n'), ((1910, 1940), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1924, 1940), False, 'import pytest\n'), ((497, 512), 'tests.active_portfolio.data.get_risk_cstr', 'get_risk_cstr', ([], {}), '()\n', (510, 512), False, 'from tests.active_portfolio.data import adj, cov_mat, get_expected_results, get_linear_constraints, get_risk_cstr, lb, ub\n'), ((527, 549), 'tests.active_portfolio.data.get_expected_results', 'get_expected_results', ([], {}), '()\n', (547, 549), False, 'from tests.active_portfolio.data import adj, cov_mat, get_expected_results, get_linear_constraints, get_risk_cstr, lb, ub\n'), ((841, 865), 'tests.active_portfolio.data.get_linear_constraints', 'get_linear_constraints', ([], {}), '()\n', (863, 865), False, 'from tests.active_portfolio.data import adj, cov_mat, get_expected_results, get_linear_constraints, get_risk_cstr, lb, ub\n'), ((1980, 2020), 'tests.utils.fetch_opt_data_test_file', 'fetch_opt_data_test_file', (['f"""active-cvar"""'], {}), "(f'active-cvar')\n", (2004, 2020), False, 'from tests.utils import fetch_opt_data_test_file\n'), ((2719, 2770), 'allopy.ActivePortfolioOptimizer', 'ActivePortfolioOptimizer', (['cube'], {'cvar_data': 'cvar_data'}), '(cube, cvar_data=cvar_data)\n', (2743, 2770), False, 'from allopy import ActivePortfolioOptimizer, OptData\n'), ((2905, 2946), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['results', 'expected', '(4)'], {}), '(results, expected, 4)\n', (2924, 2946), False, 'from numpy.testing import assert_almost_equal\n'), ((1560, 1599), 'tests.utils.fetch_opt_data_test_file', 'fetch_opt_data_test_file', (['f"""active-{s}"""'], {}), "(f'active-{s}')\n", (1584, 1599), False, 'from tests.utils import fetch_opt_data_test_file\n'), ((2065, 2093), 'allopy.datasets.load_monte_carlo', 'load_monte_carlo', ([], {'total': '(True)'}), '(total=True)\n', (2081, 2093), False, 'from allopy.datasets import load_monte_carlo\n'), ((1652, 1680), 'allopy.datasets.load_monte_carlo', 'load_monte_carlo', ([], {'total': '(True)'}), '(total=True)\n', (1668, 1680), False, 'from allopy.datasets import load_monte_carlo\n')] |
"""Conversion tool from EDF+,BDF to FIF
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import calendar
import datetime
import re
import warnings
from math import ceil, floor
import numpy as np
from ...transforms import als_ras_trans_mm, apply_trans
from ...utils import verbose, logger
from ..raw import Raw
from ..meas_info import Info
from ..constants import FIFF
from ...coreg import get_ras_to_neuromag_trans
from ...filter import resample
from ...externals.six.moves import zip
class RawEDF(Raw):
"""Raw object from EDF+,BDF file
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
n_eeg : int | None
Number of EEG electrodes.
If None, all channels are considered EEG.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
hpts : str | None
Path to the hpts file containing electrode positions.
If None, sensor locations are (0,0,0).
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
There is an assumption that the data are arranged such that EEG channels
appear first then miscellaneous channels (EOGs, AUX, STIM).
The stimulus channel is saved as 'STI 014'
See Also
--------
mne.fiff.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, n_eeg=None, stim_channel=-1, annot=None,
annotmap=None, hpts=None, preload=False, verbose=None):
logger.info('Extracting edf Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
self.info, self._edf_info = _get_edf_info(input_fname, n_eeg,
stim_channel, annot,
annotmap, hpts, preload)
logger.info('Creating Raw.info structure...')
if bool(annot) != bool(annotmap):
warnings.warn(("Stimulus Channel will not be annotated. "
"Both 'annot' and 'annotmap' must be specified."))
# Raw attributes
self.verbose = verbose
self._preloaded = False
self.fids = list()
self._projector = None
self.first_samp = 0
self.last_samp = self._edf_info['nsamples'] - 1
self.comp = None # no compensation for EDF
self.proj = False
self._first_samps = np.array([self.first_samp])
self._last_samps = np.array([self.last_samp])
self._raw_lengths = np.array([self._edf_info['nsamples']])
self.rawdirs = np.array([])
self.cals = np.array([])
self.orig_format = 'int'
if preload:
self._preloaded = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
assert len(self._data) == self.info['nchan']
# Add time info
self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
self._times = np.arange(self.first_samp, self.last_samp + 1,
dtype=np.float64)
self._times /= self.info['sfreq']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def __repr__(self):
n_chan = self.info['nchan']
data_range = self.last_samp - self.first_samp + 1
s = ('%r' % os.path.basename(self.info['file_id']),
"n_channels x n_times : %s x %s" % (n_chan, data_range))
return "<RawEDF | %s>" % ', '.join(s)
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
if sel is None:
sel = list(range(self.info['nchan']))
elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._edf_info['data_size']
data_offset = self._edf_info['data_offset']
stim_channel = self._edf_info['stim_channel']
annot = self._edf_info['annot']
annotmap = self._edf_info['annotmap']
blockstart = int(floor(float(start) / sfreq) * sfreq)
blockstop = int(ceil(float(stop) / sfreq) * sfreq)
if start >= stop:
raise ValueError('No data in this range')
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(sfreq),
(stop - 1) / float(sfreq)))
gains = []
for chan in range(n_chan):
# gain constructor
physical_range = self.info['chs'][chan]['range']
cal = float(self.info['chs'][chan]['cal'])
unit_mul = 10 ** self.info['chs'][chan]['unit_mul']
gains.append(unit_mul * (physical_range / cal))
with open(self.info['file_id'], 'rb') as fid:
# extract data
fid.seek(data_offset)
buffer_size = blockstop - blockstart
pointer = blockstart * n_chan * data_size
fid.seek(data_offset + pointer)
if 'n_samps' in self._edf_info:
n_samps = self._edf_info['n_samps']
max_samp = float(np.max(n_samps))
blocks = int(buffer_size / max_samp)
else:
blocks = int(ceil(float(buffer_size) / sfreq))
datas = []
# bdf data: 24bit data
if self._edf_info['subtype'] == '24BIT':
data = fid.read(buffer_size * n_chan * data_size)
data = np.fromstring(data, np.uint8)
data = data.reshape(-1, 3).astype(np.int32)
# this converts to 24-bit little endian integer
# # no support in numpy
data = (data[:, 0] + (data[:, 1] << 8) + (data[:, 2] << 16))
# 24th bit determines the sign
data[data >= (1 << 23)] -= (1 << 24)
data = data.reshape((sfreq, n_chan, blocks), order='F')
for i in range(blocks):
datas.append(data[:, :, i].T)
else:
if 'n_samps' in self._edf_info:
data = []
for _ in range(blocks):
for samp in n_samps:
chan_data = np.fromfile(fid, dtype='<i2',
count=samp)
data.append(chan_data)
for i, samp in enumerate(n_samps):
chan_data = data[i::n_chan]
chan_data = np.hstack(chan_data)
if samp != max_samp:
mult = max_samp / samp
chan_data = resample(x=chan_data, up=mult,
down=1, npad=0)
datas.append(chan_data)
else:
data = np.fromfile(fid, dtype='<i2',
count=buffer_size * n_chan)
data = data.reshape((sfreq, n_chan, blocks), order='F')
for i in range(blocks):
datas.append(data[:, :, i].T)
if 'n_samps' in self._edf_info:
data = np.vstack(datas)
else:
data = np.hstack(datas)
gains = np.array([gains])
data = gains.T * data
if stim_channel is not None:
if annot and annotmap:
data[stim_channel] = 0
evts = _read_annot(annot, annotmap, sfreq, self.last_samp)
data[stim_channel, :evts.size] = evts[start:stop]
else:
stim = np.array(data[stim_channel], int)
mask = 255 * np.ones(stim.shape, int)
stim = np.bitwise_and(stim, mask)
data[stim_channel] = stim
datastart = start - blockstart
datastop = stop - blockstart
data = data[sel, datastart:datastop]
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / self.info['sfreq']
return data, times
def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
"""Extracts all the information from the EDF+,BDF file.
Parameters
----------
fname : str
Raw EDF+,BDF file to be read.
n_eeg : int | None
Number of EEG electrodes.
If None, all channels are considered EEG.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel.
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
hpts : str | None
Path to the hpts file containing electrode positions.
If None, sensor locations are (0,0,0).
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
Returns
-------
info : instance of Info
The measurement info.
edf_info : dict
A dict containing all the EDF+,BDF specific parameters.
"""
info = Info()
info['file_id'] = fname
# Add info for fif object
info['meas_id'] = None
info['projs'] = []
info['comps'] = []
info['bads'] = []
info['acq_pars'], info['acq_stim'] = None, None
info['filename'] = fname
info['ctf_head_t'] = None
info['dev_ctf_t'] = []
info['filenames'] = []
info['dig'] = None
info['dev_head_t'] = None
info['proj_id'] = None
info['proj_name'] = None
info['experimenter'] = None
info['line_freq'] = None
info['subject_info'] = None
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(8)
_ = fid.read(80).strip() # subject id
_ = fid.read(80).strip() # recording id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
info['meas_date'] = calendar.timegm(date.utctimetuple())
edf_info['data_offset'] = header_nbytes = int(fid.read(8))
subtype = fid.read(44).strip().decode()[:5]
edf_info['subtype'] = subtype
edf_info['n_records'] = n_records = int(fid.read(8))
# record length in seconds
edf_info['record_length'] = record_length = float(fid.read(8))
info['nchan'] = int(fid.read(4))
if n_eeg is None:
n_eeg = info['nchan']
channels = list(range(info['nchan']))
ch_names = [fid.read(16).strip().decode() for _ in channels]
_ = [fid.read(80).strip() for _ in channels] # transducer type
units = [fid.read(8).strip().decode() for _ in channels]
for i, unit in enumerate(units):
if unit == 'uV':
units[i] = -6
elif unit == 'V':
units[i] = 0
else:
units[i] = 1
physical_min = np.array([float(fid.read(8)) for _ in channels])
physical_max = np.array([float(fid.read(8)) for _ in channels])
digital_min = np.array([float(fid.read(8)) for _ in channels])
digital_max = np.array([float(fid.read(8)) for _ in channels])
prefiltering = [fid.read(80).strip().decode() for _ in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
if highpass.size == 0:
info['highpass'] = None
elif all(highpass):
if highpass[0] == 'NaN':
info['highpass'] = None
elif highpass[0] == 'DC':
info['highpass'] = 0
else:
info['highpass'] = int(highpass[0])
else:
info['highpass'] = np.min(highpass)
warnings.warn('%s' % ('Channels contain different highpass'
+ 'filters. Highest filter setting will'
+ 'be stored.'))
if lowpass.size == 0:
info['lowpass'] = None
elif all(lowpass):
if lowpass[0] == 'NaN':
info['lowpass'] = None
else:
info['lowpass'] = int(lowpass[0])
else:
info['lowpass'] = np.min(lowpass)
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
n_samples_per_record = [int(fid.read(8)) for _ in channels]
if np.unique(n_samples_per_record).size != 1:
edf_info['n_samps'] = np.array(n_samples_per_record)
if not preload:
raise RuntimeError('%s' % ('Channels contain different'
'sampling rates. '
'Must set preload=True'))
n_samples_per_record = n_samples_per_record[0]
fid.read(32 * info['nchan']) # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
info['sfreq'] = int(n_samples_per_record / record_length)
edf_info['nsamples'] = n_records * n_samples_per_record
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 10.
info['orig_blocks'] = None
info['orig_fid_str'] = None
if edf_info['subtype'] == '24BIT':
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
if hpts and os.path.lexists(hpts):
fid = open(hpts, 'rb').read().decode()
locs = {}
temp = re.findall('eeg\s(\w+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)',
fid)
temp += re.findall('cardinal\s([\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?'
'[\d,.]+)', fid)
for loc in temp:
coord = np.array(loc[1:], dtype=float)
coord = apply_trans(als_ras_trans_mm, coord)
locs[loc[0].lower()] = coord
trans = get_ras_to_neuromag_trans(nasion=locs['2'], lpa=locs['1'],
rpa=locs['3'])
for loc in locs:
locs[loc] = apply_trans(trans, locs[loc])
info['dig'] = []
point_dict = {}
point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
point_dict['ident'] = FIFF.FIFFV_POINT_NASION
point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
point_dict['r'] = apply_trans(trans, locs['2'])
info['dig'].append(point_dict)
point_dict = {}
point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
point_dict['ident'] = FIFF.FIFFV_POINT_LPA
point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
point_dict['r'] = apply_trans(trans, locs['1'])
info['dig'].append(point_dict)
point_dict = {}
point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
point_dict['ident'] = FIFF.FIFFV_POINT_RPA
point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
point_dict['r'] = apply_trans(trans, locs['3'])
info['dig'].append(point_dict)
else:
locs = {}
locs = [locs[ch_name.lower()] if ch_name.lower() in locs.keys()
else (0, 0, 0) for ch_name in ch_names]
sensor_locs = np.array(locs)
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
if stim_channel == -1:
stim_channel = info['nchan']
for idx, ch_info in enumerate(zip(ch_names, sensor_locs, physical_ranges,
cals, units), 1):
ch_name, ch_loc, physical_range, cal, unit_mul = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx
chan_info['scanno'] = idx
chan_info['range'] = physical_range
chan_info['unit_mul'] = unit_mul
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['eeg_loc'] = ch_loc
chan_info['loc'] = np.zeros(12)
chan_info['loc'][:3] = ch_loc
if idx > n_eeg:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = info['nchan'] > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['unit_mul'] = 0
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
chan_info['ch_name'] = 'STI 014'
info['ch_names'][idx - 1] = chan_info['ch_name']
if isinstance(stim_channel, str):
stim_channel = idx
info['chs'].append(chan_info)
if stim_channel is None:
edf_info['stim_channel'] = stim_channel
else:
edf_info['stim_channel'] = stim_channel - 1
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : int
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
annotmap=None, hpts=None, preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
n_eeg : int | None
Number of EEG electrodes.
If None, all channels are considered EEG.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel.
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
hpts : str | None
Path to the hpts file containing electrode positions.
If None, sensor locations are (0,0,0).
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
return RawEDF(input_fname=input_fname, n_eeg=n_eeg,
stim_channel=stim_channel, annot=annot, annotmap=annotmap,
hpts=hpts, preload=preload, verbose=verbose)
| [
"numpy.ones",
"numpy.arange",
"numpy.unique",
"os.path.lexists",
"os.path.abspath",
"numpy.max",
"re.findall",
"numpy.fromstring",
"os.path.basename",
"datetime.datetime",
"numpy.hstack",
"numpy.min",
"numpy.vstack",
"numpy.fromfile",
"numpy.zeros",
"numpy.array",
"numpy.logical_or",... | [((18179, 18193), 'numpy.array', 'np.array', (['locs'], {}), '(locs)\n', (18187, 18193), True, 'import numpy as np\n'), ((20744, 20766), 're.findall', 're.findall', (['pat', 'annot'], {}), '(pat, annot)\n', (20754, 20766), False, 'import re\n'), ((20931, 20956), 're.findall', 're.findall', (['pat', 'annotmap'], {}), '(pat, annotmap)\n', (20941, 20956), False, 'import re\n'), ((21112, 21133), 'numpy.zeros', 'np.zeros', (['data_length'], {}), '(data_length)\n', (21120, 21133), True, 'import numpy as np\n'), ((2214, 2242), 'os.path.abspath', 'os.path.abspath', (['input_fname'], {}), '(input_fname)\n', (2229, 2242), False, 'import os\n'), ((3041, 3068), 'numpy.array', 'np.array', (['[self.first_samp]'], {}), '([self.first_samp])\n', (3049, 3068), True, 'import numpy as np\n'), ((3096, 3122), 'numpy.array', 'np.array', (['[self.last_samp]'], {}), '([self.last_samp])\n', (3104, 3122), True, 'import numpy as np\n'), ((3151, 3189), 'numpy.array', 'np.array', (["[self._edf_info['nsamples']]"], {}), "([self._edf_info['nsamples']])\n", (3159, 3189), True, 'import numpy as np\n'), ((3213, 3225), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3221, 3225), True, 'import numpy as np\n'), ((3246, 3258), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3254, 3258), True, 'import numpy as np\n'), ((9449, 9466), 'numpy.array', 'np.array', (['[gains]'], {}), '([gains])\n', (9457, 9466), True, 'import numpy as np\n'), ((12635, 12696), 'datetime.datetime', 'datetime.datetime', (['(year + 2000)', 'month', 'day', 'hour', 'minute', 'sec'], {}), '(year + 2000, month, day, hour, minute, sec)\n', (12652, 12696), False, 'import datetime\n'), ((16425, 16446), 'os.path.lexists', 'os.path.lexists', (['hpts'], {}), '(hpts)\n', (16440, 16446), False, 'import os\n'), ((16528, 16604), 're.findall', 're.findall', (['"""eeg\\\\s(\\\\w+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)"""', 'fid'], {}), "('eeg\\\\s(\\\\w+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)', fid)\n", (16538, 16604), False, 'import re\n'), ((16639, 16729), 're.findall', 're.findall', (['"""cardinal\\\\s([\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)"""', 'fid'], {}), "('cardinal\\\\s([\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)\\\\s(-?[\\\\d,.]+)'\n , fid)\n", (16649, 16729), False, 'import re\n'), ((19134, 19146), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (19142, 19146), True, 'import numpy as np\n'), ((2568, 2681), 'warnings.warn', 'warnings.warn', (['"""Stimulus Channel will not be annotated. Both \'annot\' and \'annotmap\' must be specified."""'], {}), '(\n "Stimulus Channel will not be annotated. Both \'annot\' and \'annotmap\' must be specified."\n )\n', (2581, 2681), False, 'import warnings\n'), ((3654, 3718), 'numpy.arange', 'np.arange', (['self.first_samp', '(self.last_samp + 1)'], {'dtype': 'np.float64'}), '(self.first_samp, self.last_samp + 1, dtype=np.float64)\n', (3663, 3718), True, 'import numpy as np\n'), ((9366, 9382), 'numpy.vstack', 'np.vstack', (['datas'], {}), '(datas)\n', (9375, 9382), True, 'import numpy as np\n'), ((9416, 9432), 'numpy.hstack', 'np.hstack', (['datas'], {}), '(datas)\n', (9425, 9432), True, 'import numpy as np\n'), ((10138, 10173), 'numpy.arange', 'np.arange', (['start', 'stop'], {'dtype': 'float'}), '(start, stop, dtype=float)\n', (10147, 10173), True, 'import numpy as np\n'), ((15428, 15458), 'numpy.array', 'np.array', (['n_samples_per_record'], {}), '(n_samples_per_record)\n', (15436, 15458), True, 'import numpy as np\n'), ((16792, 16822), 'numpy.array', 'np.array', (['loc[1:]'], {'dtype': 'float'}), '(loc[1:], dtype=float)\n', (16800, 16822), True, 'import numpy as np\n'), ((19467, 19496), 'numpy.logical_or', 'np.logical_or', (['check1', 'check2'], {}), '(check1, check2)\n', (19480, 19496), True, 'import numpy as np\n'), ((4246, 4284), 'os.path.basename', 'os.path.basename', (["self.info['file_id']"], {}), "(self.info['file_id'])\n", (4262, 4284), False, 'import os\n'), ((7640, 7669), 'numpy.fromstring', 'np.fromstring', (['data', 'np.uint8'], {}), '(data, np.uint8)\n', (7653, 7669), True, 'import numpy as np\n'), ((9790, 9823), 'numpy.array', 'np.array', (['data[stim_channel]', 'int'], {}), '(data[stim_channel], int)\n', (9798, 9823), True, 'import numpy as np\n'), ((9901, 9927), 'numpy.bitwise_and', 'np.bitwise_and', (['stim', 'mask'], {}), '(stim, mask)\n', (9915, 9927), True, 'import numpy as np\n'), ((14040, 14073), 're.findall', 're.findall', (['"""HP:\\\\s+(\\\\w+)"""', 'filt'], {}), "('HP:\\\\s+(\\\\w+)', filt)\n", (14050, 14073), False, 'import re\n'), ((14156, 14189), 're.findall', 're.findall', (['"""LP:\\\\s+(\\\\w+)"""', 'filt'], {}), "('LP:\\\\s+(\\\\w+)', filt)\n", (14166, 14189), False, 'import re\n'), ((14605, 14621), 'numpy.min', 'np.min', (['highpass'], {}), '(highpass)\n', (14611, 14621), True, 'import numpy as np\n'), ((14634, 14755), 'warnings.warn', 'warnings.warn', (["('%s' % ('Channels contain different highpass' +\n 'filters. Highest filter setting will' + 'be stored.'))"], {}), "('%s' % ('Channels contain different highpass' +\n 'filters. Highest filter setting will' + 'be stored.'))\n", (14647, 14755), False, 'import warnings\n'), ((15099, 15114), 'numpy.min', 'np.min', (['lowpass'], {}), '(lowpass)\n', (15105, 15114), True, 'import numpy as np\n'), ((15127, 15241), 'warnings.warn', 'warnings.warn', (["('%s' %\n 'Channels contain different lowpass filters. Lowest filter setting will be stored.'\n )"], {}), "('%s' %\n 'Channels contain different lowpass filters. Lowest filter setting will be stored.'\n )\n", (15140, 15241), False, 'import warnings\n'), ((15351, 15382), 'numpy.unique', 'np.unique', (['n_samples_per_record'], {}), '(n_samples_per_record)\n', (15360, 15382), True, 'import numpy as np\n'), ((7289, 7304), 'numpy.max', 'np.max', (['n_samps'], {}), '(n_samps)\n', (7295, 7304), True, 'import numpy as np\n'), ((9036, 9093), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '"""<i2"""', 'count': '(buffer_size * n_chan)'}), "(fid, dtype='<i2', count=buffer_size * n_chan)\n", (9047, 9093), True, 'import numpy as np\n'), ((9853, 9877), 'numpy.ones', 'np.ones', (['stim.shape', 'int'], {}), '(stim.shape, int)\n', (9860, 9877), True, 'import numpy as np\n'), ((8686, 8706), 'numpy.hstack', 'np.hstack', (['chan_data'], {}), '(chan_data)\n', (8695, 8706), True, 'import numpy as np\n'), ((8398, 8439), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '"""<i2"""', 'count': 'samp'}), "(fid, dtype='<i2', count=samp)\n", (8409, 8439), True, 'import numpy as np\n')] |
'''
use tensorflow for event embedding
input n_size*[[actor],[action],[object]]
output event embedding & model
'''
import tensorflow as tf
import pickle
import numpy as np
import datetime
import pandas as pd
wordsDic = pickle.load(open("./traindata/wordsDic.pkl", "rb"))
eventWord_time = pickle.load(open("./traindata/eventWord_time.pkl", "rb"))
def getWordVec(str):
word = str.split()
if len(word) == 1:
if str in wordsDic:
return np.array(wordsDic[str])
else:
return np.zeros([1, 100])
else:
vec = np.zeros([1, 100])
cnt = 0
for w in word:
if w in wordsDic:
vec += np.array(wordsDic[w]).reshape([1, 100])
cnt += 1
else:
continue
return np.divide(vec, cnt).reshape([-1, 100])
# change eventset to time: [600]
def getEventSet():
event_dataset = []
event_time_set = []
for i in range(len(eventWord_time)):
try:
vec_actor = getWordVec(eventWord_time[i]["subWord"])
vec_action = getWordVec(eventWord_time[i]["eventWord"])
vec_object = getWordVec(eventWord_time[i]["objWord"])
event_time = datetime.datetime.strptime(eventWord_time[i]["time"], "%Y%m%d").date()
tmp = np.vstack((vec_actor, vec_action, vec_object))
if event_time < datetime.date(2010, 11, 20):
event_dataset.append(tmp)
event_time_set.append(event_time)
except Exception as e:
print(e, eventWord_time[i])
return event_time_set, np.array(event_dataset)
# Parameters
dimension = 100
learning_rate = 1e-10
training_epochs = 100 # 500
k = 64
batch_size = 256
# input
event = tf.placeholder(tf.float32, [None, dimension * 3])
event_corrupted = tf.placeholder(tf.float32, [None, dimension * 3])
# define variables
tensor = {
't1': tf.Variable(tf.random_normal([dimension, k])),
't2': tf.Variable(tf.random_normal([dimension, k])),
't3': tf.Variable(tf.random_normal([k, k]))
}
weights = {
'w1': tf.Variable(tf.zeros([dimension * 2, k]) + 0.1),
'w2': tf.Variable(tf.random_normal([dimension * 2, k])),
'w3': tf.Variable(tf.random_normal([k * 2, k]))
}
baises = {
'b1': tf.Variable(tf.zeros([1, k]) + 0.1),
'b2': tf.Variable(tf.zeros([1, k]) + 0.1),
'b3': tf.Variable(tf.zeros([1, k]) + 0.1)
}
# define NTN
def tnn(event):
actor = event[:, :dimension]
action = event[:, dimension:-dimension]
object = event[:, -dimension:]
r1 = tf.matmul(tf.reshape(tf.stack([actor, action]), [-1, 2 * dimension]), weights['w1']) + \
baises['b1']
r2 = tf.matmul(tf.reshape(tf.stack([action, object]), [-1, 2 * dimension]), weights['w2']) + \
baises['b2']
u = tf.matmul(tf.reshape(tf.stack([r1, r2]), [-1, 2 * k]), weights['w3']) + \
baises['b3']
u = tf.Print(u, [u], message="This is u: ")
print('===done=======')
return u
pred = tnn(event)
corrupted_pred = tnn(event)
loss = -tf.reduce_sum(pred * tf.log(tf.clip_by_value(corrupted_pred, 1e-1, 1.0)), reduction_indices=[1])
# loss = tf.maximum(tf.zeros([64, 64]), tf.ones([64, 64]) - pred + corrupted_pred)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# load dataSet
eTime, eSet = getEventSet()
input_event = np.reshape(eSet, [-1, 3 * dimension])
input_event = pd.DataFrame(input_event).fillna(0)
input_event.dtype = 'float32'
input_event = np.array(input_event)
# Add ops to save and restore all the variables
saver = tf.train.Saver()
print("====================begin train================================")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(200):
_, lossvalue = sess.run([optimizer, loss], feed_dict={event: input_event})
test_pre = sess.run(pred, feed_dict={event: input_event})
# print(test_pre)
print(lossvalue)
test_pre = sess.run(pred, feed_dict={event: input_event})
# Save the variables to disk
save_path = saver.save(sess, "./model/eventModel.ckpt")
print("Event Model saved in file: ", save_path)
result = list(zip(eTime, test_pre))
# print(result[:2])
pickle.dump(result, open("./traindata/eventEmbedding01.pkl", "wb"), True)
| [
"pandas.DataFrame",
"numpy.divide",
"tensorflow.train.Saver",
"tensorflow.clip_by_value",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.zeros",
"datetime.date",
"tensorflow.stack",
"tensorflow.placeholder",
"datetime.datetime.strptime",
"tensorflow.Print",
"numpy.ar... | [((1746, 1795), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, dimension * 3]'], {}), '(tf.float32, [None, dimension * 3])\n', (1760, 1795), True, 'import tensorflow as tf\n'), ((1814, 1863), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, dimension * 3]'], {}), '(tf.float32, [None, dimension * 3])\n', (1828, 1863), True, 'import tensorflow as tf\n'), ((3351, 3388), 'numpy.reshape', 'np.reshape', (['eSet', '[-1, 3 * dimension]'], {}), '(eSet, [-1, 3 * dimension])\n', (3361, 3388), True, 'import numpy as np\n'), ((3483, 3504), 'numpy.array', 'np.array', (['input_event'], {}), '(input_event)\n', (3491, 3504), True, 'import numpy as np\n'), ((3562, 3578), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3576, 3578), True, 'import tensorflow as tf\n'), ((3660, 3672), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3670, 3672), True, 'import tensorflow as tf\n'), ((2898, 2937), 'tensorflow.Print', 'tf.Print', (['u', '[u]'], {'message': '"""This is u: """'}), "(u, [u], message='This is u: ')\n", (2906, 2937), True, 'import tensorflow as tf\n'), ((3682, 3715), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3713, 3715), True, 'import tensorflow as tf\n'), ((564, 582), 'numpy.zeros', 'np.zeros', (['[1, 100]'], {}), '([1, 100])\n', (572, 582), True, 'import numpy as np\n'), ((1599, 1622), 'numpy.array', 'np.array', (['event_dataset'], {}), '(event_dataset)\n', (1607, 1622), True, 'import numpy as np\n'), ((1917, 1949), 'tensorflow.random_normal', 'tf.random_normal', (['[dimension, k]'], {}), '([dimension, k])\n', (1933, 1949), True, 'import tensorflow as tf\n'), ((1974, 2006), 'tensorflow.random_normal', 'tf.random_normal', (['[dimension, k]'], {}), '([dimension, k])\n', (1990, 2006), True, 'import tensorflow as tf\n'), ((2031, 2055), 'tensorflow.random_normal', 'tf.random_normal', (['[k, k]'], {}), '([k, k])\n', (2047, 2055), True, 'import tensorflow as tf\n'), ((2153, 2189), 'tensorflow.random_normal', 'tf.random_normal', (['[dimension * 2, k]'], {}), '([dimension * 2, k])\n', (2169, 2189), True, 'import tensorflow as tf\n'), ((2214, 2242), 'tensorflow.random_normal', 'tf.random_normal', (['[k * 2, k]'], {}), '([k * 2, k])\n', (2230, 2242), True, 'import tensorflow as tf\n'), ((3229, 3277), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3262, 3277), True, 'import tensorflow as tf\n'), ((3403, 3428), 'pandas.DataFrame', 'pd.DataFrame', (['input_event'], {}), '(input_event)\n', (3415, 3428), True, 'import pandas as pd\n'), ((464, 487), 'numpy.array', 'np.array', (['wordsDic[str]'], {}), '(wordsDic[str])\n', (472, 487), True, 'import numpy as np\n'), ((521, 539), 'numpy.zeros', 'np.zeros', (['[1, 100]'], {}), '([1, 100])\n', (529, 539), True, 'import numpy as np\n'), ((1305, 1351), 'numpy.vstack', 'np.vstack', (['(vec_actor, vec_action, vec_object)'], {}), '((vec_actor, vec_action, vec_object))\n', (1314, 1351), True, 'import numpy as np\n'), ((2094, 2122), 'tensorflow.zeros', 'tf.zeros', (['[dimension * 2, k]'], {}), '([dimension * 2, k])\n', (2102, 2122), True, 'import tensorflow as tf\n'), ((2280, 2296), 'tensorflow.zeros', 'tf.zeros', (['[1, k]'], {}), '([1, k])\n', (2288, 2296), True, 'import tensorflow as tf\n'), ((2327, 2343), 'tensorflow.zeros', 'tf.zeros', (['[1, k]'], {}), '([1, k])\n', (2335, 2343), True, 'import tensorflow as tf\n'), ((2374, 2390), 'tensorflow.zeros', 'tf.zeros', (['[1, k]'], {}), '([1, k])\n', (2382, 2390), True, 'import tensorflow as tf\n'), ((798, 817), 'numpy.divide', 'np.divide', (['vec', 'cnt'], {}), '(vec, cnt)\n', (807, 817), True, 'import numpy as np\n'), ((1380, 1407), 'datetime.date', 'datetime.date', (['(2010)', '(11)', '(20)'], {}), '(2010, 11, 20)\n', (1393, 1407), False, 'import datetime\n'), ((2574, 2599), 'tensorflow.stack', 'tf.stack', (['[actor, action]'], {}), '([actor, action])\n', (2582, 2599), True, 'import tensorflow as tf\n'), ((2695, 2721), 'tensorflow.stack', 'tf.stack', (['[action, object]'], {}), '([action, object])\n', (2703, 2721), True, 'import tensorflow as tf\n'), ((2815, 2833), 'tensorflow.stack', 'tf.stack', (['[r1, r2]'], {}), '([r1, r2])\n', (2823, 2833), True, 'import tensorflow as tf\n'), ((3065, 3107), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['corrupted_pred', '(0.1)', '(1.0)'], {}), '(corrupted_pred, 0.1, 1.0)\n', (3081, 3107), True, 'import tensorflow as tf\n'), ((1216, 1279), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["eventWord_time[i]['time']", '"""%Y%m%d"""'], {}), "(eventWord_time[i]['time'], '%Y%m%d')\n", (1242, 1279), False, 'import datetime\n'), ((675, 696), 'numpy.array', 'np.array', (['wordsDic[w]'], {}), '(wordsDic[w])\n', (683, 696), True, 'import numpy as np\n')] |
#
# Copyright © 2020 Intel Corporation.
#
# This software and the related documents are Intel copyrighted
# materials, and your use of them is governed by the express
# license under which they were provided to you (License). Unless
# the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the
# related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with
# no express or implied warranties, other than those that are
# expressly stated in the License.
"""Functionality to find the optimal partitioning of a DNN layer."""
import os
import time
from collections import namedtuple, OrderedDict
from typing import TYPE_CHECKING
import numpy as np
from nxsdk_modules_ncl.dnn.src.data_structures import Layer
from nxsdk_modules_ncl.dnn.src.utils import getCoreOccupancy, \
getCoreIdMapFromCoreShape, getS
if TYPE_CHECKING:
import logging
from nxsdk_modules_ncl.dnn.src.dnn_layers import NxLayer
CostTerms = namedtuple('CostTerms', ['coreCost', 'inputAxonCost',
'outputAxonCost', 'synCost',
'postLayerCost'])
class ExclusionCriteria:
"""Set of criteria that define the limits of Loihi hardware."""
__slots__ = ['maxNumSynPerSynEntry', 'maxNumCompartments',
'maxNumAxons', 'maxNumSynMemWords', 'maxNumSynFmt',
'maxNumDestinationGroups', 'maxNumSynMemWordsPerAxon',
'maxNumCoresPerChip', 'numDestinationGroups',
'coreSizeInterleaved', 'numSynFmts', 'synMemPerAxon',
'numSynMemWords', 'numInputAxons', 'numOutputAxons',
'_counterAttr']
def __init__(self):
self.maxNumSynPerSynEntry = 60
self.maxNumCompartments = 1024
self.maxNumAxons = 4096
self.maxNumSynMemWords = 16384
# ToDo: Raise maxNumSynFmt to 15 once we have a proper synapse encoder.
self.maxNumSynFmt = 7
self.maxNumDestinationGroups = 16 # Due to cxBase bug.
self.maxNumSynMemWordsPerAxon = 256
self.maxNumCoresPerChip = 128
self.numDestinationGroups = 0
self.coreSizeInterleaved = 0
self.numSynFmts = 0
self.synMemPerAxon = 0
self.numSynMemWords = 0
self.numInputAxons = 0
self.numOutputAxons = 0
self._counterAttr = [a for a in self.__slots__
if 'maxNum' not in a and '_' not in a]
def toList(self):
"""Transform class attributes into list.
:return: List of exclusion criteria.
:rtype: list[int]
"""
return [getattr(self, attr) for attr in self._counterAttr]
def asdict(self):
"""Transform class attributes into dictionary.
:return: Dictionary of exclusion criteria. Ordered according to the
time each criterion is applied.
:rtype: OrderedDict
"""
return OrderedDict([(key, getattr(self, key))
for key in self._counterAttr])
def print(self):
"""Print exclusion criteria."""
print("Excluded the following partition candidates:")
for attr in self._counterAttr:
print("\t{}: {}".format(attr, getattr(self, attr)))
@property
def numCandidates(self):
return np.sum(self.toList())
class PartitionOptimizer:
"""Determine optimal partitioning of a DNN layer.
:param int numCandidatesToCompute: Number of partitions to compare.
:param logging.Logger | None logger: Logging object.
:param str logdir: Where to save figures.
:param bool storeAllCandidates: Whether to keep all partition candidates in
memory. This flag needs to be set to ``True`` if user wants to call the
``saveCanddiateCosts`` method.
"""
def __init__(self, numCandidatesToCompute, logger, logdir=None,
storeAllCandidates=None):
self.numCandidatesToCompute = numCandidatesToCompute
self.logger = logger
self._optimalPartitions = None
self._allCandidates = []
self._storeAllCandidates = storeAllCandidates
if logdir is None:
logdir = os.path.join(os.path.expanduser('~'),
'dnn_partitioner_plots',
time.strftime('%Y%m%d-%H%M%S'))
self.logdir = logdir
@staticmethod
def savePartitionConfig(path, layer):
"""Save partition configuration of a layer.
This method saves the coreIdMap and the multiplicityMap, which can be
used to partition a layer.
:param str path: Where to save partition.
:param Layer layer: Partitioned layer to save.
"""
np.savez_compressed(os.path.join(path, layer.id), **layer.asDict())
def savePartitionConfigs(self, path):
"""Save partition configurations for all layers of a network.
:param str path: Where to save layers.
"""
self.logger.info("Saving model partitions to %s.", path)
for layer in self.getLayers():
self.savePartitionConfig(path, layer)
def getOptimalPartition(self):
"""Get optimal ``Layer`` partition.
:return: Optimal partition.
:rtype: Layer
"""
# Sorted at construction.
return self._optimalPartitions[0]
def saveOptimalPartitionCostTerms(self, path):
"""Save cost terms of optimal partition.
:param str path: Where to save cost terms.
"""
self.logger.info("Saving partition cost terms to %s.", path)
cost_terms = {}
layers = self.getLayers()
for layer in layers:
for key, value in getCostTerms(layer)._asdict().items():
if key not in cost_terms:
cost_terms[key] = []
cost_terms[key].append(value)
np.savez_compressed(os.path.join(path, 'cost_terms'), **cost_terms)
def saveCandidateCosts(self, path):
"""Save total cost of all partition candidates of each layer.
:param str path: Where to save cost.
"""
if not len(self._allCandidates):
self.logger.warning(
"Saving candidate cost failed: Candidates were not kept for "
"memory reasons. Need to set 'storeAllCandidates=True' before "
"partitioning.")
return
self.logger.info("Saving partition candidate costs to %s.", path)
allCosts = []
for candidate in self._allCandidates:
allCosts.append([layer.cost for layer in
self.getLayers(candidate)])
np.savez_compressed(os.path.join(path, 'candidate_costs'),
all_costs=allCosts)
def getLayers(self, startLayer=None):
"""Helper function to extract all partitioned layers.
Each layer stores a pointer to its parent layer, which we use to
reconstruct the network hierarchy.
:param Layer | None startLayer: If provided, use this layer as starting
point to traverse network hierarchy. If not provided, choose
optimally partitioned layer.
:return: List of partitioned layers.
:rtype list[Layer]
"""
layers = []
# Start with bottom layer.
postLayer = self.getOptimalPartition() if startLayer is None \
else startLayer
while True:
layers.append(postLayer)
postLayer = postLayer.postLayer
if postLayer is None:
# Remove last layer in this list, which is a dummy layer.
return layers[:-1]
def initialize(self, modelOutputShape):
"""Initialize ``PartitionOptimizer`` with a dummy ``Layer`` partition.
This is necessary because the optimization of layer L requires the
partitioning of layer L+1, and we iterate over the layers of the
network starting with the output layer.
:param np.ndarray modelOutputShape: Shape of output layer.
"""
self._optimalPartitions = [getDummyLayer(modelOutputShape[:-1])]
def run(self, layer):
"""Partition layer.
Computes total resource requirements for inputAxons, synapses,
compartments and outputAxons given the partitions of the subsequent
layer.
Checks different ways of partitioning this layer across cores and
computes cost of each partitioning.
:param NxLayer | KerasLayer layer: The layer to partition.
"""
assert self._optimalPartitions is not None, \
"Need to call PartitionOptimizer.initialize() before running."
# Propose a set of possible partitions for this layer, purely based on
# its shape, not taking into account the post-layer partition.
candidateDict = layer.getPartitionCandidates()
# For each of the selected partition candidates of the post-layer,
# choose again as many for the current layer.
candidates = []
for postLayerPartition in self._optimalPartitions:
# Todo: The iterations in this loop are independent - parallelize!
candidates += self.selectCandidates(candidateDict, layer,
postLayerPartition)
# Update the set of optimal partition candidates.
costs = [computeTotalCost(partitionCandidate)
for partitionCandidate in candidates]
candidates = np.array(candidates)[np.argsort(costs)]
if self._storeAllCandidates:
self._allCandidates = candidates
self._optimalPartitions = candidates[:self.numCandidatesToCompute]
# kernelIdMap of this layer is not needed anymore (can be many GB).
layer.deleteKernelIdMap()
def clearTemp(self):
"""Remove temporary data used during optimization."""
candidates = self._allCandidates if self._storeAllCandidates \
else self._optimalPartitions
for candidate in candidates:
for layer in self.getLayers(candidate):
layer.clearTemp()
def selectCandidates(self, candidateDict, layer, postLayerPartition):
"""From a set of candidates, choose a subset of valid partitions.
:param dict candidateDict: Set of possible partition candidates.
:param NxLayer | KerasLayer layer: The layer to partition.
:param Layaer postLayerPartition: The next higher layer, which has been
partitioned already.
:return:
:rtype: list[Layer]
"""
# Iterate through set of partition candidates for this layer,
# and validate candidate based on the partition of the subsequent
# layer.
candidates = []
numToFind = self.numCandidatesToCompute
numFound = 0
for numCores in sorted(candidateDict):
for numCoresPerAxis, coreShape in candidateDict[numCores]:
partitionCandidate = tryCreatePartition(
numCoresPerAxis, coreShape, postLayerPartition, layer,
self.logdir)
if partitionCandidate is not None:
candidates.append(partitionCandidate)
numFound = len(candidates)
if numFound == numToFind:
print('\n')
break
if numFound == numToFind:
break
if numFound == 0:
layer.exclusionCriteria.print()
raise RuntimeError("No valid partition found.")
if numFound < numToFind:
self.logger.debug(
"Found %s partition candidate%s, not the requested %s.",
numFound, getS(numFound), numToFind)
return candidates
def getDummyLayer(shape):
"""Create a dummy layer, typically as postLayer of the output layer.
:param list | tuple | np.ndarray shape: Shape of layer.
:return: Dummy layer.
:rtype: Layer
"""
return Layer('DummyPartitionFinalLayer', '', {}, {}, np.array([]),
np.ones(shape, int))
def tryCreatePartition(numCoresPerAxis, coreShape, postLayerPartition, layer,
logdir):
"""Try creating a partition of the layer.
Fails if proposed partition exceeds one of the Loihi limits.
:param np.ndarray | list | tuple numCoresPerAxis: Number of cores along
each layer dimension.
:param np.ndarray | list | tuple coreShape: The shape of the largest
core.
:param Layer postLayerPartition: The subsequent partitioned layer.
:param KerasLayer | NxConv2D layer: The layer to partition.
:param str logdir: Where to save plots.
:return: Valid partition candidate.
:rtype: Layer
"""
# output_shape3D is used in Conv1D layers to fake Conv2D behavior.
output_shape = layer._output_shape3D if hasattr(layer, '_output_shape3D') \
else layer.output_shape
outputShape = output_shape[1:]
# When using signed spikes the number of channels in the output
# is doubled.
if hasattr(layer, 'signed'):
if layer.signed:
outputShape = outputShape[:-1] + (2 * outputShape[-1],)
coreIdMap = getCoreIdMapFromCoreShape(coreShape, outputShape,
numCoresPerAxis)
coreOccupancy = getCoreOccupancy(coreIdMap, numCoresPerAxis)
if np.any(coreOccupancy > layer.maxNumCompartments):
return
multiplicityMap = layer.getMultiplicityMap(coreIdMap)
partitionCandidate = Layer(layer.name, layer.__class__.__name__,
layer.compartmentKwargs, layer.connectionKwargs,
coreIdMap, multiplicityMap, postLayerPartition)
# Pass coreOccupancy to partitionCandidate here only to be able to plot it
# later when the partition has been stored to disk.
partitionCandidate.coreOccupancy = coreOccupancy
partitionCandidate = layer.compile(partitionCandidate)
if partitionCandidate is None:
print('.', end='', flush=True)
return
layer.validatePartition(partitionCandidate)
layer.visualizePartition(logdir, partitionCandidate, coreIdMap,
coreOccupancy, multiplicityMap=multiplicityMap)
print('x', end='', flush=True)
return partitionCandidate
def getCostTerms(partitionedLayer):
"""Get cost terms of partitioned layer.
Each cost term has been normalized with respect to the capacity of one
core, to allow adding the cost terms up.
:param Layer partitionedLayer: Partitioned layer.
:return: Cost terms of partitioned layer.
:rtype: CostTerms
"""
# Skip dummy partition of final layer (exists only to provide a
# multiplicityMap to the final layer).
if partitionedLayer.postLayer is None:
return
# The cost of each layer accumulates the cost of the post layer partition.
# This way, the cost of the currently lowest layer in the partitioning
# loop represents the total cost of this partitioning path.
postLayerCost = computeTotalCost(partitionedLayer.postLayer)
return CostTerms(partitionedLayer.coreCost,
partitionedLayer.inputAxonCost,
partitionedLayer.outputAxonCost,
partitionedLayer.synapseCost,
postLayerCost)
def computeTotalCost(partitionedLayer, weights=None):
"""Get total cost of partitioned layer.
:param Layer partitionedLayer: Partitioned layer.
:param np.ndarray weights: Weight coefficients of cost terms.
:return: Total cost of partitioned layer.
:rtype: float
"""
costTerms = getCostTerms(partitionedLayer)
if costTerms is None:
return 0
costTerms = np.array(list(costTerms._asdict().values()))
if weights is None:
weights = np.ones(len(costTerms))
elif np.isscalar(weights):
weights = weights * np.ones(len(costTerms))
assert len(weights) == len(costTerms)
cost = np.dot(costTerms, weights)
return cost
| [
"os.path.expanduser",
"numpy.isscalar",
"numpy.ones",
"time.strftime",
"numpy.any",
"numpy.argsort",
"nxsdk_modules_ncl.dnn.src.utils.getS",
"nxsdk_modules_ncl.dnn.src.data_structures.Layer",
"collections.namedtuple",
"numpy.array",
"numpy.dot",
"nxsdk_modules_ncl.dnn.src.utils.getCoreOccupanc... | [((1059, 1163), 'collections.namedtuple', 'namedtuple', (['"""CostTerms"""', "['coreCost', 'inputAxonCost', 'outputAxonCost', 'synCost', 'postLayerCost']"], {}), "('CostTerms', ['coreCost', 'inputAxonCost', 'outputAxonCost',\n 'synCost', 'postLayerCost'])\n", (1069, 1163), False, 'from collections import namedtuple, OrderedDict\n'), ((13372, 13438), 'nxsdk_modules_ncl.dnn.src.utils.getCoreIdMapFromCoreShape', 'getCoreIdMapFromCoreShape', (['coreShape', 'outputShape', 'numCoresPerAxis'], {}), '(coreShape, outputShape, numCoresPerAxis)\n', (13397, 13438), False, 'from nxsdk_modules_ncl.dnn.src.utils import getCoreOccupancy, getCoreIdMapFromCoreShape, getS\n'), ((13502, 13546), 'nxsdk_modules_ncl.dnn.src.utils.getCoreOccupancy', 'getCoreOccupancy', (['coreIdMap', 'numCoresPerAxis'], {}), '(coreIdMap, numCoresPerAxis)\n', (13518, 13546), False, 'from nxsdk_modules_ncl.dnn.src.utils import getCoreOccupancy, getCoreIdMapFromCoreShape, getS\n'), ((13555, 13603), 'numpy.any', 'np.any', (['(coreOccupancy > layer.maxNumCompartments)'], {}), '(coreOccupancy > layer.maxNumCompartments)\n', (13561, 13603), True, 'import numpy as np\n'), ((13705, 13850), 'nxsdk_modules_ncl.dnn.src.data_structures.Layer', 'Layer', (['layer.name', 'layer.__class__.__name__', 'layer.compartmentKwargs', 'layer.connectionKwargs', 'coreIdMap', 'multiplicityMap', 'postLayerPartition'], {}), '(layer.name, layer.__class__.__name__, layer.compartmentKwargs, layer.\n connectionKwargs, coreIdMap, multiplicityMap, postLayerPartition)\n', (13710, 13850), False, 'from nxsdk_modules_ncl.dnn.src.data_structures import Layer\n'), ((16192, 16218), 'numpy.dot', 'np.dot', (['costTerms', 'weights'], {}), '(costTerms, weights)\n', (16198, 16218), True, 'import numpy as np\n'), ((12209, 12221), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12217, 12221), True, 'import numpy as np\n'), ((12240, 12259), 'numpy.ones', 'np.ones', (['shape', 'int'], {}), '(shape, int)\n', (12247, 12259), True, 'import numpy as np\n'), ((16063, 16083), 'numpy.isscalar', 'np.isscalar', (['weights'], {}), '(weights)\n', (16074, 16083), True, 'import numpy as np\n'), ((4851, 4879), 'os.path.join', 'os.path.join', (['path', 'layer.id'], {}), '(path, layer.id)\n', (4863, 4879), False, 'import os\n'), ((6005, 6037), 'os.path.join', 'os.path.join', (['path', '"""cost_terms"""'], {}), "(path, 'cost_terms')\n", (6017, 6037), False, 'import os\n'), ((6790, 6827), 'os.path.join', 'os.path.join', (['path', '"""candidate_costs"""'], {}), "(path, 'candidate_costs')\n", (6802, 6827), False, 'import os\n'), ((9625, 9645), 'numpy.array', 'np.array', (['candidates'], {}), '(candidates)\n', (9633, 9645), True, 'import numpy as np\n'), ((9646, 9663), 'numpy.argsort', 'np.argsort', (['costs'], {}), '(costs)\n', (9656, 9663), True, 'import numpy as np\n'), ((4298, 4321), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (4316, 4321), False, 'import os\n'), ((4416, 4446), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (4429, 4446), False, 'import time\n'), ((11882, 11896), 'nxsdk_modules_ncl.dnn.src.utils.getS', 'getS', (['numFound'], {}), '(numFound)\n', (11886, 11896), False, 'from nxsdk_modules_ncl.dnn.src.utils import getCoreOccupancy, getCoreIdMapFromCoreShape, getS\n')] |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import numpy as np
from keras2onnx.proto import keras
from keras2onnx.proto.tfcompat import is_tf2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_keras_and_ort, test_level_0
from keras_applications.imagenet_utils import _obtain_input_shape
K = keras.backend
is_keras_tensor = K.is_keras_tensor
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def squeeze_excite_block(input_tensor, ratio=16):
init = input_tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else '{name}_bn'.format(name=name)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else '{name}_ac'.format(name=name)
x = Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: {block_type}'.format(block_type=block_type))
block_name = '{block_type}_{block_idx}'.format(block_type=block_type, block_idx=block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(branches, axis=channel_axis, name='{block_name}_mixed'.format(block_name=block_name))
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name='{block_name}_conv'.format(block_name=block_name))
x = Lambda(lambda inputs, scale_: inputs[0] + inputs[1] * scale_,
output_shape=K.int_shape(x)[1:],
arguments={'scale_': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name='{block_name}_ac'.format(block_name=block_name))(x)
# squeeze and excite block
x = squeeze_excite_block(x)
return x
def SEInceptionResNetV2(include_top=True,
weights=None,
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(branches, axis=channel_axis, name='mixed_5b')
# squeeze and excite block
x = squeeze_excite_block(x)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(branches, axis=channel_axis, name='mixed_6a')
# squeeze and excite block
x = squeeze_excite_block(x)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(branches, axis=channel_axis, name='mixed_7a')
# squeeze and excite block
x = squeeze_excite_block(x)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# squeeze and excite block
x = squeeze_excite_block(x)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
inputs = img_input
# Create model
model = Model(inputs, x, name='se_inception_resnet_v2')
return model
# Model from https://github.com/titu1994/keras-squeeze-excite-network
class TestSEInceptionResNetV2(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0 or not is_tf2,
"Test level 0 only.")
def test_SE_InceptionResNetV2(self):
K.clear_session()
keras_model = SEInceptionResNetV2()
data = np.random.rand(2, 128, 128, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.skipIf",
"os.path.abspath",
"os.remove",
"test_utils.run_keras_and_ort",
"keras2onnx.convert_keras",
"numpy.random.rand"
] | [((10051, 10116), 'unittest.skipIf', 'unittest.skipIf', (['(test_level_0 or not is_tf2)', '"""Test level 0 only."""'], {}), "(test_level_0 or not is_tf2, 'Test level 0 only.')\n", (10066, 10116), False, 'import unittest\n'), ((10607, 10622), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10620, 10622), False, 'import unittest\n'), ((10380, 10435), 'keras2onnx.convert_keras', 'keras2onnx.convert_keras', (['keras_model', 'keras_model.name'], {}), '(keras_model, keras_model.name)\n', (10404, 10435), False, 'import keras2onnx\n'), ((548, 565), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (555, 565), False, 'from os.path import dirname, abspath\n'), ((10031, 10044), 'os.remove', 'os.remove', (['fl'], {}), '(fl)\n', (10040, 10044), False, 'import os\n'), ((10473, 10576), 'test_utils.run_keras_and_ort', 'run_keras_and_ort', (['onnx_model.graph.name', 'onnx_model', 'keras_model', 'data', 'expected', 'self.model_files'], {}), '(onnx_model.graph.name, onnx_model, keras_model, data,\n expected, self.model_files)\n', (10490, 10576), False, 'from test_utils import run_keras_and_ort, test_level_0\n'), ((10264, 10294), 'numpy.random.rand', 'np.random.rand', (['(2)', '(128)', '(128)', '(3)'], {}), '(2, 128, 128, 3)\n', (10278, 10294), True, 'import numpy as np\n')] |
"""
This module contains all routines for training GDML and sGDML models.
"""
# MIT License
#
# Copyright (c) 2018-2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import multiprocessing as mp
import sys
import timeit
import warnings
from functools import partial
import numpy as np
import scipy as sp
from . import __version__
from .predict import GDMLPredict
from .utils import ui, io, desc, perm
glob = {}
def _share_array(arr_np, typecode_or_type):
"""
Return a ctypes array allocated from shared memory with data from a
NumPy array.
Parameters
----------
arr_np : :obj:`numpy.ndarray`
NumPy array.
typecode_or_type : char or :obj:`ctype`
Either a ctypes type or a one character typecode of the
kind used by the Python array module.
Returns
-------
array of :obj:`ctype`
"""
arr = mp.RawArray(typecode_or_type, arr_np.ravel())
return arr, arr_np.shape
def _assemble_kernel_mat_wkr(j, n_perms, tril_perms_lin, sig, use_E_cstr=False):
r"""
Compute one row and column of the force field kernel matrix.
The Hessian of the Matern kernel is used with n = 2 (twice
differentiable). Each row and column consists of matrix-valued
blocks, which encode the interaction of one training point with all
others. The result is stored in shared memory (a global variable).
Parameters
----------
j : int
Index of training point.
n_perms : int
Number of individual permutations encoded in
`tril_perms_lin`.
tril_perms_lin : :obj:`numpy.ndarray`
1D array (int) containing all recovered permutations
expanded as one large permutation to be applied to a tiled
copy of the object to be permuted.
sig : int
Hyper-parameter :math:`\sigma`.
Returns
-------
int
Number of kernel matrix blocks created, divided by 2
(symmetric blocks are always created at together).
"""
global glob
R_desc = np.frombuffer(glob['R_desc']).reshape(glob['R_desc_shape'])
R_d_desc = np.frombuffer(glob['R_d_desc']).reshape(glob['R_d_desc_shape'])
K = np.frombuffer(glob['K']).reshape(glob['K_shape'])
n_train, dim_d, dim_i = R_d_desc.shape
mat52_base_div = 3 * sig ** 4
sqrt5 = np.sqrt(5.0)
sig_pow2 = sig ** 2
base = np.arange(dim_i) # base set of indices
blk_j = base + j * dim_i
E_off = dim_i * n_train
# Create permutated variants of 'rj_desc' and 'rj_d_desc'.
rj_desc_perms = np.reshape(
np.tile(R_desc[j, :], n_perms)[tril_perms_lin], (n_perms, -1), order='F'
)
rj_d_desc_perms = np.reshape(
np.tile(R_d_desc[j, :, :].T, n_perms)[:, tril_perms_lin], (-1, dim_d, n_perms)
)
for i in range(j, n_train):
blk_i = base[:, np.newaxis] + i * dim_i
diff_ab_perms = R_desc[i, :] - rj_desc_perms
norm_ab_perms = sqrt5 * np.linalg.norm(diff_ab_perms, axis=1)
mat52_base_perms = np.exp(-norm_ab_perms / sig) / mat52_base_div * 5
diff_ab_outer_perms = 5 * np.einsum(
'ki,kj->ij',
diff_ab_perms * mat52_base_perms[:, None],
np.einsum('ik,jki -> ij', diff_ab_perms, rj_d_desc_perms),
)
diff_ab_outer_perms -= np.einsum(
'ijk,k->ji',
rj_d_desc_perms,
((sig_pow2 + sig * norm_ab_perms) * mat52_base_perms),
)
K[blk_i, blk_j] = K[blk_j, blk_i] = R_d_desc[i, :, :].T.dot(diff_ab_outer_perms)
if use_E_cstr:
for i in range(n_train):
blk_i = base[:, np.newaxis] + i * dim_i
diff_ab_perms = R_desc[i, :] - rj_desc_perms
norm_ab_perms = sqrt5 * np.linalg.norm(diff_ab_perms, axis=1)
if use_E_cstr:
K_fe = (
5
* diff_ab_perms
/ (3 * sig ** 3)
* (norm_ab_perms[:, None] + sig)
* np.exp(-norm_ab_perms / sig)[:, None]
)
K[E_off + i, blk_j] = np.einsum('ik,jki -> j', K_fe, rj_d_desc_perms)
K[E_off + i, E_off + j] = K[E_off + j, E_off + i] = (
1 + (norm_ab_perms / sig) * (1 + norm_ab_perms / (3 * sig))
).dot(np.exp(-norm_ab_perms / sig))
return n_train - j
class GDMLTrain:
def __init__(self, max_processes=None):
self._max_processes = max_processes
def create_task(
self,
train_dataset,
n_train,
valid_dataset,
n_valid,
sig,
lam=1e-15,
use_sym=True,
use_E=True,
use_E_cstr=False,
use_cprsn=False,
):
"""
Create a data structure of custom type `task`.
These data structures serve as recipes for model creation,
summarizing the configuration of one particular training run.
Training and test points are sampled from the provided dataset,
without replacement. If the same dataset if given for training
and testing, the subsets are drawn without overlap.
Each task also contains a choice for the hyper-parameters of the
training process and the MD5 fingerprints of the used datasets.
Parameters
----------
train_dataset : :obj:`dict`
Data structure of custom type :obj:`dataset` containing
train dataset.
n_train : int
Number of training points to sample.
valid_dataset : :obj:`dict`
Data structure of custom type :obj:`dataset` containing
validation dataset.
n_valid : int
Number of validation points to sample.
sig : int
Hyper-parameter (kernel length scale).
lam : float, optional
Hyper-parameter lambda (regularization strength).
use_sym : bool, optional
True: include symmetries (sGDML), False: GDML.
use_E : bool, optional
True: reconstruct force field with corresponding potential energy surface,
False: ignore energy during training, even if energy labels are available
in the dataset. The trained model will still be able to predict
energies up to an unknown integration constant. Note, that the
energy predictions accuracy will be untested.
use_E_cstr : bool, optional
True: include energy constraints in the kernel,
False: default (s)GDML.
use_cprsn : bool, optional
True: compress kernel matrix along symmetric degrees of
freedom,
False: train using full kernel matrix
Returns
-------
dict
Data structure of custom type :obj:`task`.
Raises
------
ValueError
If a reconstruction of the potential energy surface is requested,
but the energy labels are missing in the dataset.
"""
if use_E and 'E' not in train_dataset:
raise ValueError(
'No energy labels found in dataset!'
+ '\n By default, force fields are always reconstructed including the'
+ '\n corresponding potential energy surface (this can be turned off).\n'
+ '\n However, the energy labels are missing in the provided dataset.\n'
)
use_E_cstr = use_E and use_E_cstr
sys.stdout.write('[\x1b[5m .. \x1b[0m] Hashing dataset(s)...')
sys.stdout.flush()
md5_train = io.dataset_md5(train_dataset)
md5_valid = io.dataset_md5(valid_dataset)
sys.stdout.write(ui.info_str('\r[DONE]') + ' Hashing dataset(s)...\n')
sys.stdout.flush()
sys.stdout.write(
'[\x1b[5m .. \x1b[0m] Sampling training and validation subset...'
)
sys.stdout.flush()
if 'E' in train_dataset:
idxs_train = self.draw_strat_sample(train_dataset['E'], n_train)
else:
idxs_train = np.random.choice(
np.arange(train_dataset['F'].shape[0]), n_train, replace=False
)
excl_idxs = idxs_train if md5_train == md5_valid else None
if 'E' in valid_dataset:
idxs_valid = self.draw_strat_sample(valid_dataset['E'], n_valid, excl_idxs)
else:
idxs_valid_all = np.setdiff1d(
np.arange(valid_dataset['F'].shape[0]), excl_idxs, assume_unique=True
)
idxs_valid = np.random.choice(idxs_valid_all, n_valid, replace=False)
sys.stdout.write(ui.info_str('\r[DONE]') + ' Sampling training and validation subset...\n')
sys.stdout.flush()
R_train = train_dataset['R'][idxs_train, :, :]
task = {
'type': 't',
'code_version': __version__,
'dataset_name': train_dataset['name'].astype(str),
'dataset_theory': train_dataset['theory'].astype(str),
'z': train_dataset['z'],
'R_train': R_train,
'F_train': train_dataset['F'][idxs_train, :, :],
'idxs_train': idxs_train,
'md5_train': md5_train,
'idxs_valid': idxs_valid,
'md5_valid': md5_valid,
'sig': sig,
'lam': lam,
'use_E': use_E,
'use_E_cstr': use_E_cstr,
'use_sym': use_sym,
'use_cprsn': use_cprsn,
}
if use_E:
task['E_train'] = train_dataset['E'][idxs_train]
if use_sym:
task['perms'] = perm.sync_mat(
R_train, train_dataset['z'], self._max_processes
)
task['perms'] = perm.complete_group(task['perms'])
else:
task['perms'] = np.arange(train_dataset['R'].shape[1])[
None, :
] # no symmetries
return task
def train( # noqa: C901
self, task, cprsn_callback=None, ker_progr_callback=None, solve_callback=None
):
"""
Train a model based on a training task.
Parameters
----------
task : :obj:`dict`
Data structure of custom type :obj:`task`.
cprsn_callback : callable, optional
Symmetry compression status.
n_atoms : int
Total number of atoms.
n_atoms_kept : float or None, optional
Number of atoms kept after compression.
ker_progr_callback : callable, optional
Kernel assembly progress function that takes three
arguments:
current : int
Current progress (number of completed entries).
total : int
Task size (total number of entries to create).
duration_s : float or None, optional
Once complete, this parameter contains the
time it took to assemble the kernel (seconds).
solve_callback : callable, optional
Linear system solver status.
done : bool
False when solver starts, True when it finishes.
duration_s : float or None, optional
Once done, this parameter contains the runtime
of the solver (seconds).
Returns
-------
:obj:`dict`
Data structure of custom type :obj:`model`.
"""
sig = np.squeeze(task['sig'])
lam = np.squeeze(task['lam'])
n_perms = task['perms'].shape[0]
tril_perms = np.array([desc.perm(p) for p in task['perms']])
n_train, n_atoms = task['R_train'].shape[:2]
dim_i = 3 * n_atoms
dim_d = tril_perms.shape[1]
perm_offsets = np.arange(n_perms)[:, None] * dim_d
tril_perms_lin = (tril_perms + perm_offsets).flatten('F')
R_desc = np.empty([n_train, dim_d])
R_d_desc = np.empty([n_train, dim_d, dim_i])
for i in range(n_train):
r = task['R_train'][i]
pdist = sp.spatial.distance.pdist(r, 'euclidean')
#pdist = sp.spatial.distance.pdist(r, lambda u, v: np.linalg.norm(desc.pbc_diff(u,v)))
pdist = sp.spatial.distance.squareform(pdist)
R_desc[i, :] = desc.r_to_desc(r, pdist)
R_d_desc[i, :, :] = desc.r_to_d_desc(r, pdist)
if task['use_cprsn'] and n_perms > 1:
_, cprsn_keep_idxs = np.unique(
np.sort(task['perms'], axis=0), axis=1, return_index=True
)
# _, _, inv_idxs = np.unique(
# np.sort(task['perms'], axis=0), axis=1, return_index=True, return_inverse=True
# )
# R_d_desc = R_d_desc.reshape(n_train,dim_d,n_atoms,3)
# task = dict(task) #
# for kii,ki in enumerate(cprsn_keep_idxs):
# idx_to = ki
# idxs_from = np.where(inv_idxs==kii)[0]
# for fr in idxs_from[1:]:
# R_d_desc[:,:,idx_to,:] += R_d_desc[:,:,fr,:] / len(idxs_from)
# task['F_train'][:,idx_to,:] += task['F_train'][:,fr,:] / len(idxs_from)
# R_d_desc = R_d_desc.reshape(n_train,dim_d,-1)
cprsn_keep_idxs_lin = (
np.arange(dim_i).reshape(n_atoms, -1)[cprsn_keep_idxs, :].ravel()
)
if cprsn_callback is not None:
cprsn_callback(n_atoms, cprsn_keep_idxs.shape[0])
task = dict(task) # enable item assignment in NPZ
task['F_train'] = task['F_train'][:, cprsn_keep_idxs, :]
R_d_desc = R_d_desc[:, :, cprsn_keep_idxs_lin]
Ft = task['F_train'].ravel()
Ft_std = np.std(Ft)
Ft /= Ft_std
y = Ft
if task['use_E'] and task['use_E_cstr']:
Et = task['E_train'].ravel()
Et /= Ft_std
y = np.hstack((Ft, Et))
start = timeit.default_timer()
K = self._assemble_kernel_mat(
R_desc,
R_d_desc,
n_perms,
tril_perms_lin,
sig,
use_E_cstr=task['use_E_cstr'],
progr_callback=ker_progr_callback,
)
stop = timeit.default_timer()
if ker_progr_callback is not None:
ker_progr_callback(
1, 1, (stop - start) / 2
) # callback one last time with 100% and measured duration
if solve_callback is not None:
solve_callback(done=False)
start = timeit.default_timer()
K[np.diag_indices_from(K)] -= lam # regularizer
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
# Cholesky
L, lower = sp.linalg.cho_factor(
-K, overwrite_a=True, check_finite=False
)
alphas = -sp.linalg.cho_solve(
(L, lower), y, overwrite_b=True, check_finite=False
)
except Exception:
# LU
alphas = sp.linalg.solve(
K, y, overwrite_a=True, overwrite_b=True, check_finite=False
)
# alphas = np.linalg.lstsq(K,Ft)[0]
stop = timeit.default_timer()
alphas_F = alphas
if task['use_E_cstr']:
alphas_E = alphas[-n_train:]
alphas_F = alphas[:-n_train]
if solve_callback is not None:
solve_callback(done=True, duration_s=(stop - start) / 2)
r_dim = R_d_desc.shape[2]
r_d_desc_alpha = [
rj_d_desc.dot(alphas_F[(j * r_dim):((j + 1) * r_dim)])
for j, rj_d_desc in enumerate(R_d_desc)
]
model = {
'type': 'm',
'code_version': __version__,
'dataset_name': task['dataset_name'],
'dataset_theory': task['dataset_theory'],
'z': task['z'],
'idxs_train': task['idxs_train'],
'md5_train': task['md5_train'],
'idxs_valid': task['idxs_valid'],
'md5_valid': task['md5_valid'],
'n_test': 0,
'md5_test': None,
'f_err': {'mae': np.nan, 'rmse': np.nan},
'R_desc': R_desc.T,
'R_d_desc_alpha': r_d_desc_alpha,
'c': 0.0,
'std': Ft_std,
'sig': sig,
'perms': task['perms'],
'tril_perms_lin': tril_perms_lin,
'use_E': task['use_E'],
'use_cprsn': task['use_cprsn'],
}
if task['use_E']:
model['e_err'] = {'mae': np.nan, 'rmse': np.nan}
if task['use_E_cstr']:
model['alphas_E'] = alphas_E
else:
model['c'] = self._recov_int_const(model, task)
return model
def _recov_int_const(self, model, task):
"""
Estimate the integration constant for a force field model.
The offset between the energies predicted for the original training
data and the true energy labels is computed in the least square sense.
Furthermore, common issues with the user-provided datasets are self
diagnosed here.
Parameters
----------
model : :obj:`dict`
Data structure of custom type :obj:`model`.
task : :obj:`dict`
Data structure of custom type :obj:`task`.
Returns
-------
float
Estimate for the integration constant.
Raises
------
ValueError
If the sign of the force labels in the dataset from
which the model emerged is switched (e.g. gradients
instead of forces).
ValueError
If inconsistent/corrupted energy labels are detected
in the provided dataset.
ValueError
If different scales in energy vs. force labels are
detected in the provided dataset.
"""
gdml = GDMLPredict(model)
n_train = task['E_train'].shape[0]
R = task['R_train'].reshape(n_train, -1)
E_pred, _ = gdml.predict(R)
E_ref = np.squeeze(task['E_train'])
# E_ref = E_ref[0] # debug remove me NEW
# _,F_pred = gdml.predict(R)
# print
# print task['F_train'].shape
# print E_pred + np.sum(E_ref - E_pred) / E_ref.shape[0]
# print E_ref
# import matplotlib.pyplot as plt
# plt.plot(E_ref, '--', label='ref')
# plt.plot(E_pred + np.sum(E_ref - E_pred) / E_ref.shape[0], label='pred')
# plt.plot(E_pred, label='pred')
# plt.title('energy')
# plt.legend()
# plt.show()
# plt.plot(task['F_train'][:,0,2] , '--', label='ref')
# plt.plot(F_pred[:,2], label='pred')
# plt.title('force')
# plt.legend()
# plt.show()
e_fact = np.linalg.lstsq(
np.column_stack((E_pred, np.ones(E_ref.shape))), E_ref, rcond=-1
)[0][0]
corrcoef = np.corrcoef(E_ref, E_pred)[0, 1]
if np.sign(e_fact) == -1:
raise ValueError(
'Provided dataset contains gradients instead of force labels (flipped sign). Please correct!'
)
if corrcoef < 0.95:
raise ValueError(
'Inconsistent energy labels detected!'
+ '\n The predicted energies for the training data are only weakly correlated'
+ '\n with the reference labels (correlation coefficient %.2f) which indicates'
% corrcoef
+ '\n that the issue is most likely NOT just a unit conversion error.\n'
+ '\n Troubleshooting tips:'
+ '\n (1) Verify correct correspondence between geometries and labels in'
+ '\n the provided dataset.'
+ '\n (2) Verify consistency between energy and force labels.'
+ '\n - Correspondence correct?'
+ '\n - Same level of theory?'
+ '\n - Accuracy of forces (if numerical)?'
+ '\n (3) Is the training data spread too broadly (i.e. weakly sampled'
+ '\n transitions between example clusters)?'
+ '\n (4) Are there duplicate geometries in the training data?'
+ '\n (5) Are there any corrupted data points (e.g. parsing errors)?\n'
)
if np.abs(e_fact - 1) > 1e-1:
raise ValueError(
'Different scales in energy vs. force labels detected!'
+ '\n The integrated forces differ from energy labels by factor ~%.2E.\n'
% e_fact
+ '\n Troubleshooting tips:'
+ '\n (1) Verify consistency of units in energy and force labels.'
+ '\n (2) Is the training data spread too broadly (i.e. weakly sampled'
+ '\n transitions between example clusters)?\n'
)
# Least squares estimate for integration constant.
return np.sum(E_ref - E_pred) / E_ref.shape[0]
def _assemble_kernel_mat(
self,
R_desc,
R_d_desc,
n_perms,
tril_perms_lin,
sig,
use_E_cstr=False,
progr_callback=None,
):
r"""
Compute force field kernel matrix.
The Hessian of the Matern kernel is used with n = 2 (twice
differentiable). Each row and column consists of matrix-valued blocks,
which encode the interaction of one training point with all others. The
result is stored in shared memory (a global variable).
Parameters
----------
R_desc : :obj:`numpy.ndarray`
Array containing the descriptor for each training point.
R_d_desc : :obj:`numpy.ndarray`
Array containing the gradient of the descriptor for
each training point.
n_perms : int
Number of individual permutations encoded in
`tril_perms_lin`.
tril_perms_lin : :obj:`numpy.ndarray`
1D array containing all recovered permutations
expanded as one large permutation to be applied to a
tiled copy of the object to be permuted.
sig : int
Hyper-parameter :math:`\sigma`(kernel length scale).
use_E_cstr : bool, optional
True: include energy constraints in the kernel,
False: default (s)GDML kernel.
progress_callback : callable, optional
Kernel assembly progress function that takes three
arguments:
current : int
Current progress (number of completed entries).
total : int
Task size (total number of entries to create).
duration_s : float or None, optional
Once complete, this parameter contains the
time it took to assemble the kernel (seconds).
Returns
-------
:obj:`numpy.ndarray`
Force field kernel matrix.
"""
global glob
n_train, dim_d, dim_i = R_d_desc.shape
dim_K = n_train * dim_i
dim_K += n_train if use_E_cstr else 0
K = mp.RawArray('d', dim_K ** 2)
glob['K'], glob['K_shape'] = K, (dim_K, dim_K)
glob['R_desc'], glob['R_desc_shape'] = _share_array(R_desc, 'd')
glob['R_d_desc'], glob['R_d_desc_shape'] = _share_array(R_d_desc, 'd')
pool = mp.Pool(self._max_processes)
todo = (n_train ** 2 - n_train) // 2 + n_train
done_total = 0
for done in pool.imap_unordered(
partial(
_assemble_kernel_mat_wkr,
n_perms=n_perms,
tril_perms_lin=tril_perms_lin,
sig=sig,
use_E_cstr=use_E_cstr,
),
list(range(n_train)),
):
done_total += done
if progr_callback is not None:
progr_callback(done_total, todo)
pool.close()
# Release some memory.
glob.pop('K', None)
glob.pop('R_desc', None)
glob.pop('R_d_desc', None)
return np.frombuffer(K).reshape(glob['K_shape'])
def draw_strat_sample(self, T, n, excl_idxs=None):
"""
Draw sample from dataset that preserves its original distribution.
The distribution is estimated from a histogram were the bin size is
determined using the Freedman-Diaconis rule. This rule is designed to
minimize the difference between the area under the empirical
probability distribution and the area under the theoretical
probability distribution. A reduced histogram is then constructed by
sampling uniformly in each bin. It is intended to populate all bins
with at least one sample in the reduced histogram, even for small
training sizes.
Parameters
----------
T : :obj:`numpy.ndarray`
Dataset to sample from.
n : int
Number of examples.
excl_idxs : :obj:`numpy.ndarray`, optional
Array of indices to exclude from sample.
Returns
-------
:obj:`numpy.ndarray`
Array of indices that form the sample.
"""
if T.size == n:
return np.arange(n)
# Freedman-Diaconis rule
h = 2 * np.subtract(*np.percentile(T, [75, 25])) / np.cbrt(n)
n_bins = int(np.ceil((np.max(T) - np.min(T)) / h)) if h > 0 else 1
n_bins = min(
n_bins, n / 2
) # Limit number of bins to half of requested subset size.
bins = np.linspace(np.min(T), np.max(T), n_bins, endpoint=False)
idxs = np.digitize(T, bins)
# Exclude restricted indices.
if excl_idxs is not None:
idxs[excl_idxs] = n_bins + 1 # Impossible bin.
uniq_all, cnts_all = np.unique(idxs, return_counts=True)
# Remove restricted bin.
if excl_idxs is not None:
excl_bin_idx = np.where(uniq_all == n_bins + 1)
cnts_all = np.delete(cnts_all, excl_bin_idx)
uniq_all = np.delete(uniq_all, excl_bin_idx)
# Compute reduced bin counts.
reduced_cnts = np.ceil(cnts_all / np.sum(cnts_all, dtype=float) * n).astype(int)
reduced_cnts = np.minimum(
reduced_cnts, cnts_all
) # limit reduced_cnts to what is available in cnts_all
# Reduce/increase bin counts to desired total number of points.
reduced_cnts_delta = n - np.sum(reduced_cnts)
while np.abs(reduced_cnts_delta) > 0:
# How many members can we remove from an arbitrary bucket, without any bucket with more than one member going to zero?
max_bin_reduction = np.min(reduced_cnts[np.where(reduced_cnts > 1)]) - 1
# Generate additional bin members to fill up/drain bucket counts of subset. This array contains (repeated) bucket IDs.
outstanding = np.random.choice(
uniq_all,
min(max_bin_reduction, np.abs(reduced_cnts_delta)),
p=(reduced_cnts - 1) / np.sum(reduced_cnts - 1, dtype=float),
)
uniq_outstanding, cnts_outstanding = np.unique(
outstanding, return_counts=True
) # Aggregate bucket IDs.
outstanding_bucket_idx = np.where(
np.in1d(uniq_all, uniq_outstanding, assume_unique=True)
)[
0
] # Bucket IDs to Idxs.
reduced_cnts[outstanding_bucket_idx] += (
np.sign(reduced_cnts_delta) * cnts_outstanding
)
reduced_cnts_delta = n - np.sum(reduced_cnts)
# Draw examples for each bin.
idxs_train = np.empty((0,), dtype=int)
for uniq_idx, bin_cnt in zip(uniq_all, reduced_cnts):
idx_in_bin_all = np.where(idxs.ravel() == uniq_idx)[0]
idxs_train = np.append(
idxs_train, np.random.choice(idx_in_bin_all, bin_cnt, replace=False)
)
return idxs_train
| [
"sys.stdout.write",
"scipy.linalg.solve",
"numpy.diag_indices_from",
"numpy.abs",
"numpy.sum",
"numpy.empty",
"numpy.einsum",
"numpy.ones",
"sys.stdout.flush",
"numpy.arange",
"numpy.tile",
"numpy.linalg.norm",
"scipy.spatial.distance.pdist",
"numpy.exp",
"numpy.unique",
"warnings.simp... | [((3433, 3445), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (3440, 3445), True, 'import numpy as np\n'), ((3482, 3498), 'numpy.arange', 'np.arange', (['dim_i'], {}), '(dim_i)\n', (3491, 3498), True, 'import numpy as np\n'), ((4411, 4507), 'numpy.einsum', 'np.einsum', (['"""ijk,k->ji"""', 'rj_d_desc_perms', '((sig_pow2 + sig * norm_ab_perms) * mat52_base_perms)'], {}), "('ijk,k->ji', rj_d_desc_perms, (sig_pow2 + sig * norm_ab_perms) *\n mat52_base_perms)\n", (4420, 4507), True, 'import numpy as np\n'), ((8745, 8807), 'sys.stdout.write', 'sys.stdout.write', (['"""[\x1b[5m .. \x1b[0m] Hashing dataset(s)..."""'], {}), "('[\\x1b[5m .. \\x1b[0m] Hashing dataset(s)...')\n", (8761, 8807), False, 'import sys\n'), ((8816, 8834), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8832, 8834), False, 'import sys\n'), ((9024, 9042), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9040, 9042), False, 'import sys\n'), ((9052, 9140), 'sys.stdout.write', 'sys.stdout.write', (['"""[\x1b[5m .. \x1b[0m] Sampling training and validation subset..."""'], {}), "(\n '[\\x1b[5m .. \\x1b[0m] Sampling training and validation subset...')\n", (9068, 9140), False, 'import sys\n'), ((9166, 9184), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9182, 9184), False, 'import sys\n'), ((9983, 10001), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9999, 10001), False, 'import sys\n'), ((12863, 12886), 'numpy.squeeze', 'np.squeeze', (["task['sig']"], {}), "(task['sig'])\n", (12873, 12886), True, 'import numpy as np\n'), ((12901, 12924), 'numpy.squeeze', 'np.squeeze', (["task['lam']"], {}), "(task['lam'])\n", (12911, 12924), True, 'import numpy as np\n'), ((13298, 13324), 'numpy.empty', 'np.empty', (['[n_train, dim_d]'], {}), '([n_train, dim_d])\n', (13306, 13324), True, 'import numpy as np\n'), ((13344, 13377), 'numpy.empty', 'np.empty', (['[n_train, dim_d, dim_i]'], {}), '([n_train, dim_d, dim_i])\n', (13352, 13377), True, 'import numpy as np\n'), ((15131, 15141), 'numpy.std', 'np.std', (['Ft'], {}), '(Ft)\n', (15137, 15141), True, 'import numpy as np\n'), ((15348, 15370), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15368, 15370), False, 'import timeit\n'), ((15634, 15656), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15654, 15656), False, 'import timeit\n'), ((15941, 15963), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15961, 15963), False, 'import timeit\n'), ((16676, 16698), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (16696, 16698), False, 'import timeit\n'), ((19641, 19668), 'numpy.squeeze', 'np.squeeze', (["task['E_train']"], {}), "(task['E_train'])\n", (19651, 19668), True, 'import numpy as np\n'), ((25020, 25048), 'multiprocessing.RawArray', 'mp.RawArray', (['"""d"""', '(dim_K ** 2)'], {}), "('d', dim_K ** 2)\n", (25031, 25048), True, 'import multiprocessing as mp\n'), ((25272, 25300), 'multiprocessing.Pool', 'mp.Pool', (['self._max_processes'], {}), '(self._max_processes)\n', (25279, 25300), True, 'import multiprocessing as mp\n'), ((27567, 27587), 'numpy.digitize', 'np.digitize', (['T', 'bins'], {}), '(T, bins)\n', (27578, 27587), True, 'import numpy as np\n'), ((27751, 27786), 'numpy.unique', 'np.unique', (['idxs'], {'return_counts': '(True)'}), '(idxs, return_counts=True)\n', (27760, 27786), True, 'import numpy as np\n'), ((28184, 28218), 'numpy.minimum', 'np.minimum', (['reduced_cnts', 'cnts_all'], {}), '(reduced_cnts, cnts_all)\n', (28194, 28218), True, 'import numpy as np\n'), ((29639, 29664), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'int'}), '((0,), dtype=int)\n', (29647, 29664), True, 'import numpy as np\n'), ((3144, 3173), 'numpy.frombuffer', 'np.frombuffer', (["glob['R_desc']"], {}), "(glob['R_desc'])\n", (3157, 3173), True, 'import numpy as np\n'), ((3219, 3250), 'numpy.frombuffer', 'np.frombuffer', (["glob['R_d_desc']"], {}), "(glob['R_d_desc'])\n", (3232, 3250), True, 'import numpy as np\n'), ((3292, 3316), 'numpy.frombuffer', 'np.frombuffer', (["glob['K']"], {}), "(glob['K'])\n", (3305, 3316), True, 'import numpy as np\n'), ((3684, 3714), 'numpy.tile', 'np.tile', (['R_desc[j, :]', 'n_perms'], {}), '(R_desc[j, :], n_perms)\n', (3691, 3714), True, 'import numpy as np\n'), ((3805, 3842), 'numpy.tile', 'np.tile', (['R_d_desc[j, :, :].T', 'n_perms'], {}), '(R_d_desc[j, :, :].T, n_perms)\n', (3812, 3842), True, 'import numpy as np\n'), ((4058, 4095), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_ab_perms'], {'axis': '(1)'}), '(diff_ab_perms, axis=1)\n', (4072, 4095), True, 'import numpy as np\n'), ((9817, 9873), 'numpy.random.choice', 'np.random.choice', (['idxs_valid_all', 'n_valid'], {'replace': '(False)'}), '(idxs_valid_all, n_valid, replace=False)\n', (9833, 9873), True, 'import numpy as np\n'), ((13467, 13508), 'scipy.spatial.distance.pdist', 'sp.spatial.distance.pdist', (['r', '"""euclidean"""'], {}), "(r, 'euclidean')\n", (13492, 13508), True, 'import scipy as sp\n'), ((13628, 13665), 'scipy.spatial.distance.squareform', 'sp.spatial.distance.squareform', (['pdist'], {}), '(pdist)\n', (13658, 13665), True, 'import scipy as sp\n'), ((15311, 15330), 'numpy.hstack', 'np.hstack', (['(Ft, Et)'], {}), '((Ft, Et))\n', (15320, 15330), True, 'import numpy as np\n'), ((15974, 15997), 'numpy.diag_indices_from', 'np.diag_indices_from', (['K'], {}), '(K)\n', (15994, 15997), True, 'import numpy as np\n'), ((16034, 16059), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (16057, 16059), False, 'import warnings\n'), ((16073, 16104), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (16094, 16104), False, 'import warnings\n'), ((20514, 20540), 'numpy.corrcoef', 'np.corrcoef', (['E_ref', 'E_pred'], {}), '(E_ref, E_pred)\n', (20525, 20540), True, 'import numpy as np\n'), ((20559, 20574), 'numpy.sign', 'np.sign', (['e_fact'], {}), '(e_fact)\n', (20566, 20574), True, 'import numpy as np\n'), ((22046, 22064), 'numpy.abs', 'np.abs', (['(e_fact - 1)'], {}), '(e_fact - 1)\n', (22052, 22064), True, 'import numpy as np\n'), ((22703, 22725), 'numpy.sum', 'np.sum', (['(E_ref - E_pred)'], {}), '(E_ref - E_pred)\n', (22709, 22725), True, 'import numpy as np\n'), ((25433, 25551), 'functools.partial', 'partial', (['_assemble_kernel_mat_wkr'], {'n_perms': 'n_perms', 'tril_perms_lin': 'tril_perms_lin', 'sig': 'sig', 'use_E_cstr': 'use_E_cstr'}), '(_assemble_kernel_mat_wkr, n_perms=n_perms, tril_perms_lin=\n tril_perms_lin, sig=sig, use_E_cstr=use_E_cstr)\n', (25440, 25551), False, 'from functools import partial\n'), ((27166, 27178), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (27175, 27178), True, 'import numpy as np\n'), ((27276, 27286), 'numpy.cbrt', 'np.cbrt', (['n'], {}), '(n)\n', (27283, 27286), True, 'import numpy as np\n'), ((27506, 27515), 'numpy.min', 'np.min', (['T'], {}), '(T)\n', (27512, 27515), True, 'import numpy as np\n'), ((27517, 27526), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (27523, 27526), True, 'import numpy as np\n'), ((27882, 27914), 'numpy.where', 'np.where', (['(uniq_all == n_bins + 1)'], {}), '(uniq_all == n_bins + 1)\n', (27890, 27914), True, 'import numpy as np\n'), ((27938, 27971), 'numpy.delete', 'np.delete', (['cnts_all', 'excl_bin_idx'], {}), '(cnts_all, excl_bin_idx)\n', (27947, 27971), True, 'import numpy as np\n'), ((27995, 28028), 'numpy.delete', 'np.delete', (['uniq_all', 'excl_bin_idx'], {}), '(uniq_all, excl_bin_idx)\n', (28004, 28028), True, 'import numpy as np\n'), ((28402, 28422), 'numpy.sum', 'np.sum', (['reduced_cnts'], {}), '(reduced_cnts)\n', (28408, 28422), True, 'import numpy as np\n'), ((28438, 28464), 'numpy.abs', 'np.abs', (['reduced_cnts_delta'], {}), '(reduced_cnts_delta)\n', (28444, 28464), True, 'import numpy as np\n'), ((29098, 29140), 'numpy.unique', 'np.unique', (['outstanding'], {'return_counts': '(True)'}), '(outstanding, return_counts=True)\n', (29107, 29140), True, 'import numpy as np\n'), ((4124, 4152), 'numpy.exp', 'np.exp', (['(-norm_ab_perms / sig)'], {}), '(-norm_ab_perms / sig)\n', (4130, 4152), True, 'import numpy as np\n'), ((4311, 4368), 'numpy.einsum', 'np.einsum', (['"""ik,jki -> ij"""', 'diff_ab_perms', 'rj_d_desc_perms'], {}), "('ik,jki -> ij', diff_ab_perms, rj_d_desc_perms)\n", (4320, 4368), True, 'import numpy as np\n'), ((4843, 4880), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_ab_perms'], {'axis': '(1)'}), '(diff_ab_perms, axis=1)\n', (4857, 4880), True, 'import numpy as np\n'), ((5198, 5245), 'numpy.einsum', 'np.einsum', (['"""ik,jki -> j"""', 'K_fe', 'rj_d_desc_perms'], {}), "('ik,jki -> j', K_fe, rj_d_desc_perms)\n", (5207, 5245), True, 'import numpy as np\n'), ((9369, 9407), 'numpy.arange', 'np.arange', (["train_dataset['F'].shape[0]"], {}), "(train_dataset['F'].shape[0])\n", (9378, 9407), True, 'import numpy as np\n'), ((9708, 9746), 'numpy.arange', 'np.arange', (["valid_dataset['F'].shape[0]"], {}), "(valid_dataset['F'].shape[0])\n", (9717, 9746), True, 'import numpy as np\n'), ((11069, 11107), 'numpy.arange', 'np.arange', (["train_dataset['R'].shape[1]"], {}), "(train_dataset['R'].shape[1])\n", (11078, 11107), True, 'import numpy as np\n'), ((13178, 13196), 'numpy.arange', 'np.arange', (['n_perms'], {}), '(n_perms)\n', (13187, 13196), True, 'import numpy as np\n'), ((13885, 13915), 'numpy.sort', 'np.sort', (["task['perms']"], {'axis': '(0)'}), "(task['perms'], axis=0)\n", (13892, 13915), True, 'import numpy as np\n'), ((16177, 16239), 'scipy.linalg.cho_factor', 'sp.linalg.cho_factor', (['(-K)'], {'overwrite_a': '(True)', 'check_finite': '(False)'}), '(-K, overwrite_a=True, check_finite=False)\n', (16197, 16239), True, 'import scipy as sp\n'), ((25977, 25993), 'numpy.frombuffer', 'np.frombuffer', (['K'], {}), '(K)\n', (25990, 25993), True, 'import numpy as np\n'), ((29456, 29483), 'numpy.sign', 'np.sign', (['reduced_cnts_delta'], {}), '(reduced_cnts_delta)\n', (29463, 29483), True, 'import numpy as np\n'), ((29554, 29574), 'numpy.sum', 'np.sum', (['reduced_cnts'], {}), '(reduced_cnts)\n', (29560, 29574), True, 'import numpy as np\n'), ((29858, 29914), 'numpy.random.choice', 'np.random.choice', (['idx_in_bin_all', 'bin_cnt'], {'replace': '(False)'}), '(idx_in_bin_all, bin_cnt, replace=False)\n', (29874, 29914), True, 'import numpy as np\n'), ((5419, 5447), 'numpy.exp', 'np.exp', (['(-norm_ab_perms / sig)'], {}), '(-norm_ab_perms / sig)\n', (5425, 5447), True, 'import numpy as np\n'), ((16304, 16376), 'scipy.linalg.cho_solve', 'sp.linalg.cho_solve', (['(L, lower)', 'y'], {'overwrite_b': '(True)', 'check_finite': '(False)'}), '((L, lower), y, overwrite_b=True, check_finite=False)\n', (16323, 16376), True, 'import scipy as sp\n'), ((16491, 16568), 'scipy.linalg.solve', 'sp.linalg.solve', (['K', 'y'], {'overwrite_a': '(True)', 'overwrite_b': '(True)', 'check_finite': '(False)'}), '(K, y, overwrite_a=True, overwrite_b=True, check_finite=False)\n', (16506, 16568), True, 'import scipy as sp\n'), ((28928, 28954), 'numpy.abs', 'np.abs', (['reduced_cnts_delta'], {}), '(reduced_cnts_delta)\n', (28934, 28954), True, 'import numpy as np\n'), ((29260, 29315), 'numpy.in1d', 'np.in1d', (['uniq_all', 'uniq_outstanding'], {'assume_unique': '(True)'}), '(uniq_all, uniq_outstanding, assume_unique=True)\n', (29267, 29315), True, 'import numpy as np\n'), ((5104, 5132), 'numpy.exp', 'np.exp', (['(-norm_ab_perms / sig)'], {}), '(-norm_ab_perms / sig)\n', (5110, 5132), True, 'import numpy as np\n'), ((27246, 27272), 'numpy.percentile', 'np.percentile', (['T', '[75, 25]'], {}), '(T, [75, 25])\n', (27259, 27272), True, 'import numpy as np\n'), ((28654, 28680), 'numpy.where', 'np.where', (['(reduced_cnts > 1)'], {}), '(reduced_cnts > 1)\n', (28662, 28680), True, 'import numpy as np\n'), ((28996, 29033), 'numpy.sum', 'np.sum', (['(reduced_cnts - 1)'], {'dtype': 'float'}), '(reduced_cnts - 1, dtype=float)\n', (29002, 29033), True, 'import numpy as np\n'), ((20439, 20459), 'numpy.ones', 'np.ones', (['E_ref.shape'], {}), '(E_ref.shape)\n', (20446, 20459), True, 'import numpy as np\n'), ((27317, 27326), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (27323, 27326), True, 'import numpy as np\n'), ((27329, 27338), 'numpy.min', 'np.min', (['T'], {}), '(T)\n', (27335, 27338), True, 'import numpy as np\n'), ((28114, 28143), 'numpy.sum', 'np.sum', (['cnts_all'], {'dtype': 'float'}), '(cnts_all, dtype=float)\n', (28120, 28143), True, 'import numpy as np\n'), ((14694, 14710), 'numpy.arange', 'np.arange', (['dim_i'], {}), '(dim_i)\n', (14703, 14710), True, 'import numpy as np\n')] |
"""Probablistic forecast error metrics."""
import numpy as np
def brier_score(obs, fx, fx_prob):
"""Brier Score (BS).
BS = 1/n sum_{i=1}^n (f_i - o_i)^2
where n is the number of forecasts, f_i is the forecasted probability of
event i, and o_i is the observed event indicator (o_i=0: event did not
occur, o_i=1: event occured). The forecasts are supplied as the
right-hand-side of a CDF interval, e.g., forecast <= 10 MW at time i, and
therefore o_i is defined as:
o_i = 1 if obs_i <= fx_i, else o_i = 0
where fx_i and obs_i are the forecast and observation at time i,
respectively.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
bs : float
The Brier Score [unitless], bounded between 0 and 1, where values
closer to 0 indicate better forecast performance and values closer to 1
indicate worse performance.
Notes
-----
The Brier Score implemented in this function is for binary outcomes only,
rather than the more general (but less commonly used) categorical version.
"""
# event: 0=did not happen, 1=did happen
o = np.where(obs <= fx, 1.0, 0.0)
# forecast probabilities [unitless]
f = fx_prob / 100.0
bs = np.mean((f - o) ** 2)
return bs
def brier_skill_score(obs, fx, fx_prob, ref, ref_prob):
"""Brier Skill Score (BSS).
BSS = 1 - BS_fx / BS_ref
where BS_fx is the Brier Score of the evaluated forecast and BS_ref is the
Brier Score of a reference forecast.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
ref : (n,) array_like
Reference forecast (physical units) of the right-hand-side of a CDF
interval.
ref_prob : (n,) array_like
Probability [%] associated with the reference forecast.
Returns
-------
skill : float
The Brier Skill Score [unitless].
"""
bs_fx = brier_score(obs, fx, fx_prob)
bs_ref = brier_score(obs, ref, ref_prob)
skill = 1.0 - bs_fx / bs_ref
return skill
def quantile_score(obs, fx, fx_prob):
"""Quantile Score (QS).
.. math::
\\text{QS} = \\frac{1}{n} \\sum_{i=1}^n (fx_i - obs_i) * (p - 1\\{obs_i > fx_i\\})
where :math:`n` is the number of forecasts, :math:`obs_i` is an
observation, :math:`fx_i` is a forecast, :math:`1\\{obs_i > fx_i\\}` is an
indicator function (1 if :math:`obs_i > fx_i`, 0 otherwise) and :math:`p`
is the probability that :math:`obs_i <= fx_i`. [1]_ [2]_
If :math:`obs > fx`, then we have:
.. math::
(fx - obs) < 0 \\\\
(p - 1\\{obs > fx\\}) = (p - 1) <= 0 \\\\
(fx - obs) * (p - 1) >= 0
If instead :math:`obs < fx`, then we have:
.. math::
(fx - obs) > 0 \\\\
(p - 1\\{obs > fx\\}) = (p - 0) >= 0 \\\\
(fx - obs) * p >= 0
Therefore, the quantile score is non-negative regardless of the obs and fx.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
qs : float
The Quantile Score, with the same units as the observations.
Notes
-----
Quantile score is meant to be computed for a single probability of
:math:`n` samples.
Examples
--------
>>> obs = 100 # observation [MW]
>>> fx = 80 # forecast [MW]
>>> fx_prob = 60 # probability [%]
>>> quantile_score(obs, fx, fx_prob) # score [MW]
8.0
References
----------
.. [1] <NAME> <NAME>. (1978) "Regression Quantiles", Econometrica
46 (1), pp. 33-50. doi: 10.2307/1913643
.. [2] Wilks (2020) "Forecast Verification". In "Statistical Methods in the
Atmospheric Sciences" (3rd edition). Academic Press. ISBN: 9780123850225
""" # NOQA: E501,W605
# Prob(obs <= fx) = p
p = fx_prob / 100.0
qs = np.mean((fx - obs) * (p - np.where(obs > fx, 1.0, 0.0)))
return qs
def quantile_skill_score(obs, fx, fx_prob, ref, ref_prob):
"""Quantile Skill Score (QSS).
.. math::
\\text{QSS} = 1 - \\text{QS}_{\\text{fx}} / \\text{QS}_{\\text{ref}}
where :math:`\\text{QS}_{\\text{fx}}` is the Quantile Score of the
evaluated forecast and :math:`\\text{QS}_{\\text{ref}}` is the Quantile
Score of a reference forecast. [1]_
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
ref : (n,) array_like
Reference forecast (physical units) of the right-hand-side of a CDF
interval.
ref_prob : (n,) array_like
Probability [%] associated with the reference forecast.
Returns
-------
skill : float
The Quantile Skill Score [unitless].
References
----------
.. [1] Bouallegue, <NAME> Friederichs (2015) "Quantile forecast
discrimination ability and value", Quarterly Journal of the Royal
Meteorological Society 141, pp. 3415-3424. doi: 10.1002/qj.2624
Notes
-----
This function returns 0 if QS_fx and QS_ref are both 0.
See Also
--------
:py:func:`solarforecastarbiter.metrics.probabilistic.quantile_score`
"""
qs_fx = quantile_score(obs, fx, fx_prob)
qs_ref = quantile_score(obs, ref, ref_prob)
# avoid 0 / 0 --> nan
if qs_fx == qs_ref:
return 0.0
elif qs_ref == 0.0:
# avoid divide by 0
# typically caused by deadbands and short time periods
return np.NINF
else:
return 1.0 - qs_fx / qs_ref
def _unique_forecasts(f):
"""Convert forecast probabilities to a set of unique values.
Determine a set of unique forecast probabilities, based on input forecast
probabilities of arbitrary precision, and approximate the input
probabilities to lie within the set of unique values.
Parameters
----------
f : (n,) array_like
Probability [unitless] associated with the forecasts.
Returns
-------
f_uniq : (n,) array_like
The converted forecast probabilities [unitless].
Notes
-----
This implementation determines the set of unique forecast probabilities by
rounding the input probabilities to a precision determined by the number of
input probability values: if less than 1000 samples, bin by tenths;
otherwise bin by hundredths.
Examples
--------
>>> f = np.array([0.1234, 0.156891, 0.10561])
>>> _unique_forecasts(f)
array([0.1, 0.2, 0.1])
"""
if len(f) >= 1000:
n_decimals = 2 # bin by hundredths (0.01, 0.02, etc.)
else:
n_decimals = 1 # bin by tenths (0.1, 0.2, etc.)
f_uniq = np.around(f, decimals=n_decimals)
return f_uniq
def brier_decomposition(obs, fx, fx_prob):
"""The 3-component decomposition of the Brier Score.
BS = REL - RES + UNC
where REL is the reliability, RES is the resolution and UNC is the
uncertatinty.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
rel : float
The reliability of the forecast [unitless], where a perfectly reliable
forecast has value of 0.
res : float
The resolution of the forecast [unitless], where higher values are
better.
unc : float
The uncertainty [unitless], where lower values indicate the event being
forecasted occurs rarely.
Notes
-----
The current implementation iterates over the unique forecasts to compute
the reliability and resolution, rather than using a vectorized formulation.
While a vectorized formulation may be more computationally efficient, the
clarity of the iterate version outweighs the efficiency gains from the
vectorized version. Additionally, the number of unique forecasts is
currently capped at 100, which small enough that there is likely no
practical difference in computation time between the iterate vs vectorized
versions.
"""
# event: 0=did not happen, 1=did happen
o = np.where(obs <= fx, 1.0, 0.0)
# forecast probabilities [unitless]
f = fx_prob / 100.0
# get unique forecast probabilities by binning
f = _unique_forecasts(f)
# reliability and resolution
rel, res = 0.0, 0.0
o_avg = np.mean(o)
for f_i, N_i in np.nditer(np.unique(f, return_counts=True)):
o_i = np.mean(o[f == f_i]) # mean event value per set
rel += N_i * (f_i - o_i) ** 2
res += N_i * (o_i - o_avg) ** 2
rel /= len(f)
res /= len(f)
# uncertainty
base_rate = np.mean(o)
unc = base_rate * (1.0 - base_rate)
return rel, res, unc
def reliability(obs, fx, fx_prob):
"""Reliability (REL) of the forecast.
REL = 1/n sum_{i=1}^I N_i (f_i - o_{i,avg})^2
where n is the total number of forecasts, I is the number of unique
forecasts (f_1, f_2, ..., f_I), N_i is the number of times each unique
forecast occurs, o_{i,avg} is the average of the observed events during
which the forecast was f_i.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
rel : float
The reliability of the forecast [unitless], where a perfectly reliable
forecast has value of 0.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
rel = brier_decomposition(obs, fx, fx_prob)[0]
return rel
def resolution(obs, fx, fx_prob):
"""Resolution (RES) of the forecast.
RES = 1/n sum_{i=1}^I N_i (o_{i,avg} - o_{avg})^2
where n is the total number of forecasts, I is the number of unique
forecasts (f_1, f_2, ..., f_I), N_i is the number of times each unique
forecast occurs, o_{i,avg} is the average of the observed events during
which the forecast was f_i, and o_{avg} is the average of all observed
events.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
res : float
The resolution of the forecast [unitless], where higher values are
better.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
res = brier_decomposition(obs, fx, fx_prob)[1]
return res
def uncertainty(obs, fx, fx_prob):
"""Uncertainty (UNC) of the forecast.
UNC = base_rate * (1 - base_rate)
where base_rate = 1/n sum_{i=1}^n o_i, and o_i is the observed event.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
unc : float
The uncertainty [unitless], where lower values indicate the event being
forecasted occurs rarely.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
unc = brier_decomposition(obs, fx, fx_prob)[2]
return unc
def sharpness(fx_lower, fx_upper):
"""Sharpness (SH).
SH = 1/n sum_{i=1}^n (f_{u,i} - f_{l,i})
where n is the total number of forecasts, f_{u,i} is the upper prediction
interval value and f_{l,i} is the lower prediction interval value for
sample i.
Parameters
----------
fx_lower : (n,) array_like
The lower prediction interval values (physical units).
fx_upper : (n,) array_like
The upper prediction interval values (physical units).
Returns
-------
SH : float
The sharpness (physical units), where smaller sharpness values indicate
"tighter" prediction intervals.
"""
sh = np.mean(fx_upper - fx_lower)
return sh
def continuous_ranked_probability_score(obs, fx, fx_prob):
"""Continuous Ranked Probability Score (CRPS).
CRPS = 1/n sum_{i=1}^n int (F_i - O_i)^2 dx
where F_i is the CDF of the forecast at time i and O_i is the CDF
associated with the observed value obs_i:
O_{i, j} = 1 if obs_i <= fx_{i, j}, else O_{i, j} = 0
where obs_i is the observation at time i, and fx_{i, j} is the forecast at
time i for CDF interval j.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n, d) array_like
Forecasts (physical units) of the right-hand-side of a CDF with d
intervals (d >= 2), e.g., fx = [10 MW, 20 MW, 30 MW] is interpreted as
<= 10 MW, <= 20 MW, <= 30 MW.
fx_prob : (n, d) array_like
Probability [%] associated with the forecasts.
Returns
-------
crps : float
The Continuous Ranked Probability Score [unitless].
Raises
------
ValueError
If the forecasts have incorrect dimensions; either a) the forecasts are
for a single sample (n=1) with d CDF intervals but are given as a 1D
array with d values or b) the forecasts are given as 2D arrays (n,d)
but do not contain at least 2 CDF intervals (i.e. d < 2).
Examples
--------
Forecast probabilities of <= 10 MW and <= 20 MW:
>>> fx = np.array([[10, 20], [10, 20]])
>>> fx_prob = np.array([[30, 50], [65, 100]])
>>> obs = np.array([8, 12])
>>> continuous_ranked_probability_score(obs, fx, fx_prob)
4.5625
Forecast thresholds for constant probabilities (25%, 75%):
>>> fx = np.array([[5, 15], [8, 14]])
>>> fx_prob = np.array([[25, 75], [25, 75]])
>>> obs = np.array([8, 10])
>>> continuous_ranked_probability_score(obs, fx, fx_prob)
0.5
"""
# match observations to fx shape: (n,) => (n, d)
if np.ndim(fx) < 2:
raise ValueError("forecasts must be 2D arrays (expected (n,d), got"
f"{np.shape(fx)})")
elif np.shape(fx)[1] < 2:
raise ValueError("forecasts must have d >= 2 CDF intervals "
f"(expected >= 2, got {np.shape(fx)[1]})")
else:
obs = np.tile(obs, (fx.shape[1], 1)).T
# event: 0=did not happen, 1=did happen
o = np.where(obs <= fx, 1.0, 0.0)
# forecast probabilities [unitless]
f = fx_prob / 100.0
# integrate along each sample, then average all samples
integrand = (f - o) ** 2
dx = np.diff(fx, axis=1)
crps = np.mean(np.sum(integrand[:, :-1] * dx, axis=1))
return crps
def crps_skill_score(obs, fx, fx_prob, ref, ref_prob):
"""CRPS skill score.
CRPSS = 1 - CRPS_fx / CRPS_ref
where CRPS_fx is the CPRS of the evaluated forecast and CRPS_ref is the
CRPS of a reference forecast.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n, d) array_like
Forecasts (physical units) of the right-hand-side of a CDF with d
intervals (d >= 2), e.g., fx = [10 MW, 20 MW, 30 MW] is interpreted as
<= 10 MW, <= 20 MW, <= 30 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
ref : (n, d) array_like
Reference forecasts (physical units) of the right-hand-side of a CDF
with d intervals (d >= 2), e.g., fx = [10 MW, 20 MW, 30 MW] is
interpreted as <= 10 MW, <= 20 MW, <= 30 MW.
ref_prob : (n,) array_like
Probability [%] associated with the reference forecast.
Returns
-------
skill : float
The CRPS skill score [unitless].
See Also
--------
:py:func:`solarforecastarbiter.metrics.probabilistic.continuous_ranked_probability_score`
"""
if np.isscalar(ref):
return np.nan
else:
crps_fx = continuous_ranked_probability_score(obs, fx, fx_prob)
crps_ref = continuous_ranked_probability_score(obs, ref, ref_prob)
if crps_fx == crps_ref:
return 0.0
elif crps_ref == 0.0:
# avoid divide by zero
return np.NINF
else:
return 1 - crps_fx / crps_ref
# Add new metrics to this map to map shorthand to function
_MAP = {
'bs': (brier_score, 'BS'),
'bss': (brier_skill_score, 'BSS'),
'rel': (reliability, 'REL'),
'res': (resolution, 'RES'),
'unc': (uncertainty, 'UNC'),
'qs': (quantile_score, 'QS'),
'qss': (quantile_skill_score, 'QSS'),
# 'sh': (sharpness, 'SH'), # TODO
'crps': (continuous_ranked_probability_score, 'CRPS'),
'crpss': (crps_skill_score, 'CRPSS'),
}
__all__ = [m[0].__name__ for m in _MAP.values()]
# Functions that require a reference forecast
_REQ_REF_FX = ['bss', 'qss', 'crpss']
# Functions that require normalized factor
_REQ_NORM = []
# Functions that require full distribution forecasts (as 2dim)
_REQ_DIST = ['crps', 'crpss']
# TODO: Functions that require two forecasts (e.g., sharpness)
# _REQ_FX_FX = ['sh']
| [
"numpy.sum",
"numpy.isscalar",
"numpy.ndim",
"numpy.shape",
"numpy.around",
"numpy.mean",
"numpy.where",
"numpy.diff",
"numpy.tile",
"numpy.unique"
] | [((1454, 1483), 'numpy.where', 'np.where', (['(obs <= fx)', '(1.0)', '(0.0)'], {}), '(obs <= fx, 1.0, 0.0)\n', (1462, 1483), True, 'import numpy as np\n'), ((1559, 1580), 'numpy.mean', 'np.mean', (['((f - o) ** 2)'], {}), '((f - o) ** 2)\n', (1566, 1580), True, 'import numpy as np\n'), ((7691, 7724), 'numpy.around', 'np.around', (['f'], {'decimals': 'n_decimals'}), '(f, decimals=n_decimals)\n', (7700, 7724), True, 'import numpy as np\n'), ((9332, 9361), 'numpy.where', 'np.where', (['(obs <= fx)', '(1.0)', '(0.0)'], {}), '(obs <= fx, 1.0, 0.0)\n', (9340, 9361), True, 'import numpy as np\n'), ((9578, 9588), 'numpy.mean', 'np.mean', (['o'], {}), '(o)\n', (9585, 9588), True, 'import numpy as np\n'), ((9870, 9880), 'numpy.mean', 'np.mean', (['o'], {}), '(o)\n', (9877, 9880), True, 'import numpy as np\n'), ((13658, 13686), 'numpy.mean', 'np.mean', (['(fx_upper - fx_lower)'], {}), '(fx_upper - fx_lower)\n', (13665, 13686), True, 'import numpy as np\n'), ((16019, 16048), 'numpy.where', 'np.where', (['(obs <= fx)', '(1.0)', '(0.0)'], {}), '(obs <= fx, 1.0, 0.0)\n', (16027, 16048), True, 'import numpy as np\n'), ((16213, 16232), 'numpy.diff', 'np.diff', (['fx'], {'axis': '(1)'}), '(fx, axis=1)\n', (16220, 16232), True, 'import numpy as np\n'), ((17485, 17501), 'numpy.isscalar', 'np.isscalar', (['ref'], {}), '(ref)\n', (17496, 17501), True, 'import numpy as np\n'), ((9619, 9651), 'numpy.unique', 'np.unique', (['f'], {'return_counts': '(True)'}), '(f, return_counts=True)\n', (9628, 9651), True, 'import numpy as np\n'), ((9668, 9688), 'numpy.mean', 'np.mean', (['o[f == f_i]'], {}), '(o[f == f_i])\n', (9675, 9688), True, 'import numpy as np\n'), ((15604, 15615), 'numpy.ndim', 'np.ndim', (['fx'], {}), '(fx)\n', (15611, 15615), True, 'import numpy as np\n'), ((16252, 16290), 'numpy.sum', 'np.sum', (['(integrand[:, :-1] * dx)'], {'axis': '(1)'}), '(integrand[:, :-1] * dx, axis=1)\n', (16258, 16290), True, 'import numpy as np\n'), ((4717, 4745), 'numpy.where', 'np.where', (['(obs > fx)', '(1.0)', '(0.0)'], {}), '(obs > fx, 1.0, 0.0)\n', (4725, 4745), True, 'import numpy as np\n'), ((15751, 15763), 'numpy.shape', 'np.shape', (['fx'], {}), '(fx)\n', (15759, 15763), True, 'import numpy as np\n'), ((15933, 15963), 'numpy.tile', 'np.tile', (['obs', '(fx.shape[1], 1)'], {}), '(obs, (fx.shape[1], 1))\n', (15940, 15963), True, 'import numpy as np\n'), ((15725, 15737), 'numpy.shape', 'np.shape', (['fx'], {}), '(fx)\n', (15733, 15737), True, 'import numpy as np\n'), ((15889, 15901), 'numpy.shape', 'np.shape', (['fx'], {}), '(fx)\n', (15897, 15901), True, 'import numpy as np\n')] |
"""
==================================================
Automatic Fiber Bundle Extraction with RecoBundles
==================================================
This example explains how we can use RecoBundles [Garyfallidis17]_ to
extract bundles from tractograms.
First import the necessary modules.
"""
import numpy as np
from dipy.segment.bundles import RecoBundles
from dipy.align.streamlinear import whole_brain_slr
from dipy.viz import window, actor
from dipy.io.streamline import load_trk, save_trk
"""
Download and read data for this tutorial
"""
from dipy.data.fetcher import (fetch_target_tractogram_hcp,
fetch_bundle_atlas_hcp842,
get_bundle_atlas_hcp842,
get_target_tractogram_hcp)
target_file, target_folder = fetch_target_tractogram_hcp()
atlas_file, atlas_folder = fetch_bundle_atlas_hcp842()
atlas_file, all_bundles_files = get_bundle_atlas_hcp842()
target_file = get_target_tractogram_hcp()
atlas, atlas_header = load_trk(atlas_file)
target, target_header = load_trk(target_file)
"""
let's visualize atlas tractogram and target tractogram before registration
"""
interactive = False
ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.line(atlas, colors=(1,0,1)))
ren.add(actor.line(target, colors=(1,1,0)))
window.record(ren, out_path='tractograms_initial.png', size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: tractograms_initial.png
:align: center
Atlas and target before registration.
"""
"""
We will register target tractogram to model atlas' space using streamlinear
registeration (SLR) [Garyfallidis15]_
"""
moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
atlas, target, x0='affine', verbose=True, progressive=True)
"""
We save the transform generated in this registration, so that we can use
it in the bundle profiles example
"""
np.save("slr_transform.npy", transform)
"""
let's visualize atlas tractogram and target tractogram after registration
"""
interactive = False
ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.line(atlas, colors=(1,0,1)))
ren.add(actor.line(moved, colors=(1,1,0)))
window.record(ren, out_path='tractograms_after_registration.png',
size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: tractograms_after_registration.png
:align: center
Atlas and target after registration.
"""
"""
Read AF left and CST left bundles from already fetched atlas data to use them
as model bundles
"""
from dipy.data.fetcher import get_two_hcp842_bundles
model_af_l_file, model_cst_l_file = get_two_hcp842_bundles()
"""
Extracting bundles using recobundles [Garyfallidis17]_
"""
model_af_l, hdr = load_trk(model_af_l_file)
rb = RecoBundles(moved, verbose=True)
recognized_af_l, af_l_labels = rb.recognize(model_bundle=model_af_l,
model_clust_thr=5.,
reduction_thr=10,
reduction_distance='mam',
slr=True,
slr_metric='asymmetric',
pruning_distance='mam')
"""
let's visualize extracted Arcuate Fasciculus Left bundle and model bundle
together
"""
interactive = False
ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.line(model_af_l, colors=(.1,.7,.26)))
ren.add(actor.line(recognized_af_l, colors=(.1,.1,6)))
ren.set_camera(focal_point=(320.21296692, 21.28884506, 17.2174015),
position=(2.11, 200.46, 250.44) , view_up=(0.1, -1.028, 0.18))
window.record(ren, out_path='AF_L_recognized_bundle.png',
size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: AF_L_recognized_bundle.png
:align: center
Extracted Arcuate Fasciculus Left bundle and model bundle
"""
"""
Save the bundle as a trk file. Rather than saving the recognized streamlines
in the space of the atlas, we save the streamlines that are in the original
space of the subject anatomy.
"""
save_trk( "AF_L.trk", target[af_l_labels], hdr['voxel_to_rasmm'])
model_cst_l, hdr = load_trk(model_cst_l_file)
recognized_cst_l, cst_l_labels = rb.recognize(model_bundle=model_cst_l,
model_clust_thr=5.,
reduction_thr=10,
reduction_distance='mam',
slr=True,
slr_metric='asymmetric',
pruning_distance='mam')
"""
let's visualize extracted Corticospinal Tract (CST) Left bundle and model
bundle together
"""
interactive = False
ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.line(model_cst_l, colors=(.1,.7,.26)))
ren.add(actor.line(recognized_cst_l, colors=(.1,.1,6)))
ren.set_camera(focal_point=(-18.17281532, -19.55606842, 6.92485857),
position=(-360.11, -340.46, -40.44),
view_up=(-0.03, 0.028, 0.89))
window.record(ren, out_path='CST_L_recognized_bundle.png',
size=(600, 600))
if interactive:
window.show(ren)
"""
.. figure:: CST_L_recognized_bundle.png
:align: center
Extracted Corticospinal Tract (CST) Left bundle and model bundle
"""
"""
Save the bundle as a trk file:
"""
save_trk("CST_L.trk", target[cst_l_labels], hdr['voxel_to_rasmm'])
"""
References
----------
.. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
bundles using local and global streamline-based registration
and clustering, Neuroimage, 2017.
""" | [
"dipy.io.streamline.load_trk",
"numpy.save",
"dipy.data.fetcher.fetch_target_tractogram_hcp",
"dipy.data.fetcher.get_two_hcp842_bundles",
"dipy.segment.bundles.RecoBundles",
"dipy.io.streamline.save_trk",
"dipy.viz.actor.line",
"dipy.viz.window.show",
"dipy.align.streamlinear.whole_brain_slr",
"di... | [((819, 848), 'dipy.data.fetcher.fetch_target_tractogram_hcp', 'fetch_target_tractogram_hcp', ([], {}), '()\n', (846, 848), False, 'from dipy.data.fetcher import fetch_target_tractogram_hcp, fetch_bundle_atlas_hcp842, get_bundle_atlas_hcp842, get_target_tractogram_hcp\n'), ((876, 903), 'dipy.data.fetcher.fetch_bundle_atlas_hcp842', 'fetch_bundle_atlas_hcp842', ([], {}), '()\n', (901, 903), False, 'from dipy.data.fetcher import fetch_target_tractogram_hcp, fetch_bundle_atlas_hcp842, get_bundle_atlas_hcp842, get_target_tractogram_hcp\n'), ((937, 962), 'dipy.data.fetcher.get_bundle_atlas_hcp842', 'get_bundle_atlas_hcp842', ([], {}), '()\n', (960, 962), False, 'from dipy.data.fetcher import fetch_target_tractogram_hcp, fetch_bundle_atlas_hcp842, get_bundle_atlas_hcp842, get_target_tractogram_hcp\n'), ((977, 1004), 'dipy.data.fetcher.get_target_tractogram_hcp', 'get_target_tractogram_hcp', ([], {}), '()\n', (1002, 1004), False, 'from dipy.data.fetcher import fetch_target_tractogram_hcp, fetch_bundle_atlas_hcp842, get_bundle_atlas_hcp842, get_target_tractogram_hcp\n'), ((1028, 1048), 'dipy.io.streamline.load_trk', 'load_trk', (['atlas_file'], {}), '(atlas_file)\n', (1036, 1048), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((1073, 1094), 'dipy.io.streamline.load_trk', 'load_trk', (['target_file'], {}), '(target_file)\n', (1081, 1094), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((1207, 1224), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (1222, 1224), False, 'from dipy.viz import window, actor\n'), ((1339, 1410), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""tractograms_initial.png"""', 'size': '(600, 600)'}), "(ren, out_path='tractograms_initial.png', size=(600, 600))\n", (1352, 1410), False, 'from dipy.viz import window, actor\n'), ((1727, 1802), 'dipy.align.streamlinear.whole_brain_slr', 'whole_brain_slr', (['atlas', 'target'], {'x0': '"""affine"""', 'verbose': '(True)', 'progressive': '(True)'}), "(atlas, target, x0='affine', verbose=True, progressive=True)\n", (1742, 1802), False, 'from dipy.align.streamlinear import whole_brain_slr\n'), ((1934, 1973), 'numpy.save', 'np.save', (['"""slr_transform.npy"""', 'transform'], {}), "('slr_transform.npy', transform)\n", (1941, 1973), True, 'import numpy as np\n'), ((2086, 2103), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (2101, 2103), False, 'from dipy.viz import window, actor\n'), ((2217, 2303), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""tractograms_after_registration.png"""', 'size': '(600, 600)'}), "(ren, out_path='tractograms_after_registration.png', size=(600,\n 600))\n", (2230, 2303), False, 'from dipy.viz import window, actor\n'), ((2661, 2685), 'dipy.data.fetcher.get_two_hcp842_bundles', 'get_two_hcp842_bundles', ([], {}), '()\n', (2683, 2685), False, 'from dipy.data.fetcher import get_two_hcp842_bundles\n'), ((2769, 2794), 'dipy.io.streamline.load_trk', 'load_trk', (['model_af_l_file'], {}), '(model_af_l_file)\n', (2777, 2794), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((2801, 2833), 'dipy.segment.bundles.RecoBundles', 'RecoBundles', (['moved'], {'verbose': '(True)'}), '(moved, verbose=True)\n', (2812, 2833), False, 'from dipy.segment.bundles import RecoBundles\n'), ((3411, 3428), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (3426, 3428), False, 'from dipy.viz import window, actor\n'), ((3710, 3784), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""AF_L_recognized_bundle.png"""', 'size': '(600, 600)'}), "(ren, out_path='AF_L_recognized_bundle.png', size=(600, 600))\n", (3723, 3784), False, 'from dipy.viz import window, actor\n'), ((4160, 4224), 'dipy.io.streamline.save_trk', 'save_trk', (['"""AF_L.trk"""', 'target[af_l_labels]', "hdr['voxel_to_rasmm']"], {}), "('AF_L.trk', target[af_l_labels], hdr['voxel_to_rasmm'])\n", (4168, 4224), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((4246, 4272), 'dipy.io.streamline.load_trk', 'load_trk', (['model_cst_l_file'], {}), '(model_cst_l_file)\n', (4254, 4272), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((4872, 4889), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (4887, 4889), False, 'from dipy.viz import window, actor\n'), ((5192, 5267), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""CST_L_recognized_bundle.png"""', 'size': '(600, 600)'}), "(ren, out_path='CST_L_recognized_bundle.png', size=(600, 600))\n", (5205, 5267), False, 'from dipy.viz import window, actor\n'), ((5500, 5566), 'dipy.io.streamline.save_trk', 'save_trk', (['"""CST_L.trk"""', 'target[cst_l_labels]', "hdr['voxel_to_rasmm']"], {}), "('CST_L.trk', target[cst_l_labels], hdr['voxel_to_rasmm'])\n", (5508, 5566), False, 'from dipy.io.streamline import load_trk, save_trk\n'), ((1260, 1295), 'dipy.viz.actor.line', 'actor.line', (['atlas'], {'colors': '(1, 0, 1)'}), '(atlas, colors=(1, 0, 1))\n', (1270, 1295), False, 'from dipy.viz import window, actor\n'), ((1303, 1339), 'dipy.viz.actor.line', 'actor.line', (['target'], {'colors': '(1, 1, 0)'}), '(target, colors=(1, 1, 0))\n', (1313, 1339), False, 'from dipy.viz import window, actor\n'), ((1431, 1447), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (1442, 1447), False, 'from dipy.viz import window, actor\n'), ((2139, 2174), 'dipy.viz.actor.line', 'actor.line', (['atlas'], {'colors': '(1, 0, 1)'}), '(atlas, colors=(1, 0, 1))\n', (2149, 2174), False, 'from dipy.viz import window, actor\n'), ((2182, 2217), 'dipy.viz.actor.line', 'actor.line', (['moved'], {'colors': '(1, 1, 0)'}), '(moved, colors=(1, 1, 0))\n', (2192, 2217), False, 'from dipy.viz import window, actor\n'), ((2334, 2350), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (2345, 2350), False, 'from dipy.viz import window, actor\n'), ((3464, 3511), 'dipy.viz.actor.line', 'actor.line', (['model_af_l'], {'colors': '(0.1, 0.7, 0.26)'}), '(model_af_l, colors=(0.1, 0.7, 0.26))\n', (3474, 3511), False, 'from dipy.viz import window, actor\n'), ((3516, 3565), 'dipy.viz.actor.line', 'actor.line', (['recognized_af_l'], {'colors': '(0.1, 0.1, 6)'}), '(recognized_af_l, colors=(0.1, 0.1, 6))\n', (3526, 3565), False, 'from dipy.viz import window, actor\n'), ((3819, 3835), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (3830, 3835), False, 'from dipy.viz import window, actor\n'), ((4925, 4973), 'dipy.viz.actor.line', 'actor.line', (['model_cst_l'], {'colors': '(0.1, 0.7, 0.26)'}), '(model_cst_l, colors=(0.1, 0.7, 0.26))\n', (4935, 4973), False, 'from dipy.viz import window, actor\n'), ((4978, 5028), 'dipy.viz.actor.line', 'actor.line', (['recognized_cst_l'], {'colors': '(0.1, 0.1, 6)'}), '(recognized_cst_l, colors=(0.1, 0.1, 6))\n', (4988, 5028), False, 'from dipy.viz import window, actor\n'), ((5302, 5318), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (5313, 5318), False, 'from dipy.viz import window, actor\n')] |
import click
import cv2
import numpy as np
from scipy.ndimage.filters import rank_filter
def process_image(image_array):
"""Given a numpy array, return an image cropped to the "useful" area of text"""
decoded_image = cv2.imdecode(image_array, 1) # Decode the image as color
# Load and scale down image.
scale, im = downscale_image(decoded_image)
# Reduce noise.
blur = reduce_noise_raw(im.copy())
# Edged.
edges = auto_canny(blur.copy())
# Reduce noise and remove thin borders.
debordered = reduce_noise_edges(edges.copy())
# Dilate until there are a few components.
_, rects, _ = find_components(debordered, 16)
# Find the final crop.
final_rect = find_final_crop(rects)
# Crop the image and smooth.
cropped = crop_image(decoded_image, final_rect, scale)
kernel = np.ones((5, 5), np.float32) / 25
smooth2d = cv2.filter2D(cropped, -1, kernel=kernel)
click.echo("Returning processed image")
return smooth2d
def auto_canny(image, sigma=0.33):
"""Perform edge detection"""
# apparently from https://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/a
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper, True)
return edged
def dilate(image, kernel, iterations):
return d_image
def downscale_image(im, max_dim=2048):
"""Shrink im until its longest dimension is <= max_dim.
Returns new_image, scale (where scale <= 1)."""
a, b = im.shape[:2]
if max(a, b) <= max_dim:
return 1.0, im
scale = 1.0 * max_dim / max(a, b)
new_im = cv2.resize(im, (int(b * scale), int(a * scale)), cv2.INTER_AREA)
return scale, new_im
def find_components(im, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
dilation_iterations = 6
count = 21
n = 0
sigma = 0.000
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
dilation = cv2.dilate(im, kernel, dilation_iterations)
while count > max_components:
n += 1
sigma += 0.005
result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(result) == 3:
_, contours, _ = result
elif len(result) == 2:
contours, _ = result
possible = find_likely_rectangles(contours, sigma)
count = len(possible)
return (dilation, possible, n)
def find_likely_rectangles(contours, sigma):
"""Find boxes that seem likely; return a list of them"""
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
possible = []
for c in contours:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, sigma * peri, True)
box = make_box(approx)
possible.append(box)
return possible
def make_box(poly):
"""Generate a bounding box from a polygon"""
x = []
y = []
for p in poly:
for point in p:
x.append(point[0])
y.append(point[1])
return (min(x), min(y), max(x), max(y))
def rect_union(crop1, crop2):
"""Union two (x1, y1, x2, y2) rects."""
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)
def rect_area(crop):
x1, y1, x2, y2 = crop
return max(0, x2 - x1) * max(0, y2 - y1)
def crop_image(im, rect, scale):
xmin, ymin, xmax, ymax = rect
crop = [xmin, ymin, xmax, ymax]
xmin, ymin, xmax, ymax = [int(x / scale) for x in crop]
cropped = im[ymin:ymax, xmin:xmax]
return cropped
def reduce_noise_raw(im):
"""Apply some noise reduction"""
bilat = cv2.bilateralFilter(im, 9, 75, 75)
blur = cv2.medianBlur(bilat, 5)
return blur
def reduce_noise_edges(im):
"""Apply some noise reduction around the edges"""
structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element)
maxed_rows = rank_filter(opening, -4, size=(1, 20))
maxed_cols = rank_filter(opening, -4, size=(20, 1))
debordered = np.minimum(np.minimum(opening, maxed_rows), maxed_cols)
return debordered
def rects_are_vertical(rect1, rect2):
xmin1, _, xmax1, _ = rect1
xmin2, _, xmax2, _ = rect2
midpoint1 = (xmin1 + xmax1) / 2
midpoint2 = (xmin2 + xmax2) / 2
dist = abs(midpoint1 - midpoint2)
rectarea1 = rect_area(rect1)
rectarea2 = rect_area(rect2)
if rectarea1 > rectarea2:
thres = (xmax1 - xmin1) * 0.1
else:
thres = (xmax2 - xmin2) * 0.1
return thres > dist
def find_final_crop(rects):
current = None
for rect in rects:
if current is None:
current = rect
continue
aligned = rects_are_vertical(current, rect)
if not aligned:
continue
current = rect_union(current, rect)
return current
def rad_to_deg(theta):
return theta * 180 / np.pi
def rotate(image, theta):
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, theta, 1)
rotated = cv2.warpAffine(image, M, (int(w), int(h)), cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
return rotated
def estimate_skew(image):
edges = auto_canny(image)
lines = cv2.HoughLines(edges, 1, np.pi / 90, 200)
new = edges.copy()
thetas = []
for line in lines:
for rho, theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
if theta > np.pi / 3 and theta < np.pi * 2 / 3:
thetas.append(theta)
new = cv2.line(new, (x1, y1), (x2, y2), (255, 255, 255), 1)
theta_mean = np.mean(thetas)
theta = rad_to_deg(theta_mean) if thetas else 0
return theta
def compute_skew(theta):
# We assume a perfectly aligned page has lines at theta = 90 deg
diff = 90 - theta
# We want to reverse the difference.
return -diff
def process_skew(image):
theta = compute_skew(estimate_skew(image))
rotated = rotate(image, theta)
return rotated
| [
"cv2.approxPolyDP",
"cv2.medianBlur",
"cv2.arcLength",
"cv2.imdecode",
"click.echo",
"numpy.ones",
"cv2.bilateralFilter",
"numpy.mean",
"numpy.sin",
"cv2.getRotationMatrix2D",
"cv2.line",
"scipy.ndimage.filters.rank_filter",
"cv2.filter2D",
"cv2.dilate",
"cv2.Canny",
"numpy.minimum",
... | [((227, 255), 'cv2.imdecode', 'cv2.imdecode', (['image_array', '(1)'], {}), '(image_array, 1)\n', (239, 255), False, 'import cv2\n'), ((891, 931), 'cv2.filter2D', 'cv2.filter2D', (['cropped', '(-1)'], {'kernel': 'kernel'}), '(cropped, -1, kernel=kernel)\n', (903, 931), False, 'import cv2\n'), ((936, 975), 'click.echo', 'click.echo', (['"""Returning processed image"""'], {}), "('Returning processed image')\n", (946, 975), False, 'import click\n'), ((1208, 1224), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (1217, 1224), True, 'import numpy as np\n'), ((1325, 1361), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper', '(True)'], {}), '(image, lower, upper, True)\n', (1334, 1361), False, 'import cv2\n'), ((2060, 2111), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(10, 10)'], {}), '(cv2.MORPH_RECT, (10, 10))\n', (2085, 2111), False, 'import cv2\n'), ((2127, 2170), 'cv2.dilate', 'cv2.dilate', (['im', 'kernel', 'dilation_iterations'], {}), '(im, kernel, dilation_iterations)\n', (2137, 2170), False, 'import cv2\n'), ((3858, 3892), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['im', '(9)', '(75)', '(75)'], {}), '(im, 9, 75, 75)\n', (3877, 3892), False, 'import cv2\n'), ((3904, 3928), 'cv2.medianBlur', 'cv2.medianBlur', (['bilat', '(5)'], {}), '(bilat, 5)\n', (3918, 3928), False, 'import cv2\n'), ((4055, 4104), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(1, 1)'], {}), '(cv2.MORPH_RECT, (1, 1))\n', (4080, 4104), False, 'import cv2\n'), ((4119, 4176), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im', 'cv2.MORPH_OPEN', 'structuring_element'], {}), '(im, cv2.MORPH_OPEN, structuring_element)\n', (4135, 4176), False, 'import cv2\n'), ((4194, 4232), 'scipy.ndimage.filters.rank_filter', 'rank_filter', (['opening', '(-4)'], {'size': '(1, 20)'}), '(opening, -4, size=(1, 20))\n', (4205, 4232), False, 'from scipy.ndimage.filters import rank_filter\n'), ((4250, 4288), 'scipy.ndimage.filters.rank_filter', 'rank_filter', (['opening', '(-4)'], {'size': '(20, 1)'}), '(opening, -4, size=(20, 1))\n', (4261, 4288), False, 'from scipy.ndimage.filters import rank_filter\n'), ((5265, 5306), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'theta', '(1)'], {}), '(center, theta, 1)\n', (5288, 5306), False, 'import cv2\n'), ((5561, 5602), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 90)', '(200)'], {}), '(edges, 1, np.pi / 90, 200)\n', (5575, 5602), False, 'import cv2\n'), ((6154, 6169), 'numpy.mean', 'np.mean', (['thetas'], {}), '(thetas)\n', (6161, 6169), True, 'import numpy as np\n'), ((843, 870), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.float32'], {}), '((5, 5), np.float32)\n', (850, 870), True, 'import numpy as np\n'), ((2261, 2327), 'cv2.findContours', 'cv2.findContours', (['dilation', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2277, 2327), False, 'import cv2\n'), ((2853, 2875), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (2866, 2875), False, 'import cv2\n'), ((2893, 2932), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(sigma * peri)', '(True)'], {}), '(c, sigma * peri, True)\n', (2909, 2932), False, 'import cv2\n'), ((4317, 4348), 'numpy.minimum', 'np.minimum', (['opening', 'maxed_rows'], {}), '(opening, maxed_rows)\n', (4327, 4348), True, 'import numpy as np\n'), ((5715, 5728), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5721, 5728), True, 'import numpy as np\n'), ((5745, 5758), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5751, 5758), True, 'import numpy as np\n'), ((6082, 6135), 'cv2.line', 'cv2.line', (['new', '(x1, y1)', '(x2, y2)', '(255, 255, 255)', '(1)'], {}), '(new, (x1, y1), (x2, y2), (255, 255, 255), 1)\n', (6090, 6135), False, 'import cv2\n')] |
import numpy as np
# Hyperparameters
x = 0.1
noise = 0.1
print("x: %f" % x)
print("noise: %f" % noise)
# Simulated training loss
loss = np.sin(5 * x) * (1 - np.tanh(x ** 2)) + np.random.randn() * noise
print("loss: %f" % loss)
| [
"numpy.sin",
"numpy.tanh",
"numpy.random.randn"
] | [((139, 152), 'numpy.sin', 'np.sin', (['(5 * x)'], {}), '(5 * x)\n', (145, 152), True, 'import numpy as np\n'), ((179, 196), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (194, 196), True, 'import numpy as np\n'), ((160, 175), 'numpy.tanh', 'np.tanh', (['(x ** 2)'], {}), '(x ** 2)\n', (167, 175), True, 'import numpy as np\n')] |
from threading import Thread
import socket
import sys
from time import time, sleep
import numpy as np
TEST_MSG_COUNT = 1000
class Sender(Thread):
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('127.0.0.1', 8125)
try:
num_send = 0
while True:
message = f'{time()}'
sock.sendto(str.encode(message), server_address)
num_send += 1
if num_send % 100 == 0:
print(f"sent {num_send} messages")
sleep(0.01)
if num_send > TEST_MSG_COUNT:
break
finally:
print('closing socket')
sock.close()
class Receiver(Thread):
def run(self):
UDP_IP = "127.0.0.1"
UDP_PORT = 8126
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
latencies = []
while True:
data, addr = sock.recvfrom(8192)
now = time()
then = float(data)
took = now - then
latencies.append(took)
if len(latencies) % 100 == 0:
print(f"received {len(latencies)} messages")
if len(latencies) >= TEST_MSG_COUNT:
print(f"received {TEST_MSG_COUNT} message, exiting")
break
np_latencies = np.array(latencies)
print(f"median = {np.percentile(np_latencies, 50)*1000000} us")
print(f"p95 = {np.percentile(np_latencies, 95)*1000000} us")
def main():
receiver = Receiver()
sender = Sender()
receiver.start()
print("started receiver thread, waiting 5s")
sleep(5)
sender.start()
print("started sender thread, testing in progress")
receiver.join()
sender.join()
print("test completed")
if __name__ == '__main__':
main()
| [
"socket.socket",
"time.time",
"numpy.percentile",
"time.sleep",
"numpy.array"
] | [((1782, 1790), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (1787, 1790), False, 'from time import time, sleep\n'), ((183, 231), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (196, 231), False, 'import socket\n'), ((864, 912), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (877, 912), False, 'import socket\n'), ((1480, 1499), 'numpy.array', 'np.array', (['latencies'], {}), '(latencies)\n', (1488, 1499), True, 'import numpy as np\n'), ((1107, 1113), 'time.time', 'time', ([], {}), '()\n', (1111, 1113), False, 'from time import time, sleep\n'), ((586, 597), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (591, 597), False, 'from time import time, sleep\n'), ((369, 375), 'time.time', 'time', ([], {}), '()\n', (373, 375), False, 'from time import time, sleep\n'), ((1526, 1557), 'numpy.percentile', 'np.percentile', (['np_latencies', '(50)'], {}), '(np_latencies, 50)\n', (1539, 1557), True, 'import numpy as np\n'), ((1595, 1626), 'numpy.percentile', 'np.percentile', (['np_latencies', '(95)'], {}), '(np_latencies, 95)\n', (1608, 1626), True, 'import numpy as np\n')] |
from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D
from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply
from keras.layers import Reshape, Input, Flatten, BatchNormalization
from keras.models import Model
from keras.utils import to_categorical
import keras
import matplotlib.pyplot as plt
'''lib loading error prevention'''
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
'''========================'''
'''tensorflow configuration'''
'''========================'''
import tensorflow as tf
from keras import backend as K
num_cores = 48
num_CPU = 1
num_GPU = 1
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\
inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\
device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
K.set_session(session)
'''scientific packages'''
import numpy as np
import pickle
import datetime
'''load data'''
(sigs, sigs_noisy, idx, sps, ecg_N, base_snr, artifact_snr) = pickle.load(open('dae_ecgsim_stepnoise_500.dat', 'rb'))
'''global parameters'''
sample_len = len(sigs)
input_dim = 1
output_dim = 1
if input_dim < output_dim:
print('input_dim smaller than output_dim, quit task')
stride = output_dim
timestep = 0
# neural params
batch_size = 40
epochs = 200
filter_size = 80
kernel_size = 4
dropout = 0.2
# stagging the signal
x_train = []
for sig in sigs_noisy:
seq_noisy = np.array([sig[i*stride:i*stride+input_dim] for i in range((len(sig)-input_dim)//stride)])
x_train.append(seq_noisy)
labels = []
for idxx in idx:
label = np.ones(np.shape(idxx))
label_start = -1
for m in range(len(idxx)):
if idxx[m] < 0:
# search the first non-negative idxx
continue
else:
# label the precedent points
label_start = idxx[m] - 1
break
for m in range(len(idxx)):
if idxx[m] >= 0:
if idxx[m] == 4:
label_start = -1
else:
label_start = idxx[m]
else:
pass
if idxx[m] == 4:
label[m] = idxx[m]
else:
label[m] = label_start
labels.append(label)
# training y for dae
truth = []
for sig in sigs:
tr = np.array([sig[i*stride+input_dim//2-output_dim//2:i*stride+input_dim//2 - output_dim//2 + output_dim] for i in range( (len(sig)-input_dim)//stride )])
truth.append(tr)
# training y for classify
label_train = []
for label in labels:
y = np.array([label[i*stride+input_dim//2-output_dim//2:i*stride+input_dim//2 - output_dim//2 + output_dim] for i in range( (len(label)-input_dim)//stride )])
y = to_categorical(y, num_classes=6)
label_train.append(y)
'''data test code'''
# plt.plot(idx[0])
# plt.plot(labels[0])
# plt.plot(truth[0])
# plt.show()
# update the timestep
timestep = len(x_train[0])
x_train = np.array(x_train)
label_train = np.array(label_train)
truth = np.array(truth)
'''build neural'''
input = Input(shape=(timestep, input_dim))
commons = input
'''common layers (encoder)'''
'''ConvNN before putting into LSTM'''
if input_dim > kernel_size:
commons = Reshape(target_shape=(timestep, input_dim, 1))(commons)
commons = TimeDistributed(Conv1D(16, kernel_size=kernel_size, data_format='channels_last', activation='relu'))(commons)
commons = TimeDistributed(Conv1D(32, kernel_size=kernel_size, data_format='channels_last', activation='relu'))(commons)
commons = TimeDistributed(Flatten(data_format='channels_last'))(commons)
'''bidirectional LSTM'''
o1 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(commons)
o1 = Dropout(0.2)(o1)
o2 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(o1)
o2 = Add()([o1, o2])
o2 = Dropout(0.2)(o2)
o3 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(o2)
o3 = Add()([o1, o2, o3])
o3 = Dropout(0.2)(o3)
'''attention model for classifier'''
o4 = TimeDistributed(Dense(filter_size*2, activation='relu'))(o3)
o4 = TimeDistributed(Dense(filter_size*2, activation='softmax'))(o4)
o5 = Multiply()([o3, o4])
classifier = o5
classifier = TimeDistributed(Dense(filter_size*2, activation='relu'))(classifier)
classifier = TimeDistributed(Dense(6, activation='softmax'))(classifier)
classifier_model = Model(input, classifier)
'''denoiser (decode)'''
dae = o3
o6 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(dae)
o6 = Dropout(0.2)(o6)
o7 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(o6)
o7 = Add()([o6, o7])
o7 = Dropout(0.2)(o7)
o8 = Bidirectional(CuDNNLSTM(filter_size, return_sequences=True))(o7)
o8 = Add()([o6, o7, o8])
o8 = Dropout(0.2)(o8)
'''attention model for dae'''
o9 = TimeDistributed(Dense(filter_size*2, activation='relu'))(o8)
o9 = TimeDistributed(Dense(filter_size*2, activation='softmax'))(o9)
o10 = Multiply()([o8, o9])
dae = o10
dae = TimeDistributed(Dense(filter_size*2, activation='relu'))(dae)
dae = TimeDistributed(Dense(1, activation='linear'))(dae)
dae_model = Model(input, dae)
print(dae_model.summary())
print(classifier_model.summary())
dae_model.compile(optimizer=keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0), metrics=['mae'], loss='logcosh')
# train the dae model and freeze the weights of the first 3 LSTM layers
for layer in dae_model.layers:
layer.trainable = False
classifier_model.compile(optimizer=keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0), metrics=['accuracy', 'categorical_accuracy'], loss='categorical_crossentropy')
hist_dae = dae_model.fit(x_train[:300], truth[:300], validation_data=(x_train[300:400], truth[300:400]), batch_size=batch_size, epochs=epochs, verbose=1)
hist_classifier = classifier_model.fit(x_train[:300], label_train[:300], validation_data=(x_train[300:400], label_train[300:400]), batch_size=batch_size, epochs=epochs, verbose=1)
idx_ref = idx[400:]
tested = x_train[400:]
sig_truth = truth[400:]
expected_label = label_train[400:]
sig_predicted = dae_model.predict(tested)
label_predicted = classifier_model.predict(tested)
# '''save the results'''
date_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
dae_model.save('multitask_dae_ecgsim' + date_str + '.h5')
classifier_model.save('multitask_classifier_ecgsim'+date_str+'.h5')
hist_name = 'multitask_ecgsim_hist_' + date_str +'.dat'
pickle.dump((hist_dae, hist_classifier), open(hist_name, 'wb'))
plot_idx = 30
pr = [np.argmax(p) for p in label_predicted[plot_idx]]
ex = [np.argmax(p) for p in expected_label[plot_idx]]
plt.plot(pr)
plt.plot(ex)
plt.plot(tested[plot_idx])
plt.plot(sig_truth[plot_idx])
plt.plot(sig_predicted[plot_idx])
plt.plot(idx_ref[plot_idx]) | [
"numpy.argmax",
"keras.models.Model",
"numpy.shape",
"tensorflow.ConfigProto",
"keras.layers.Input",
"keras.layers.Reshape",
"keras.layers.Flatten",
"datetime.datetime.now",
"keras.layers.Multiply",
"keras.utils.to_categorical",
"keras.layers.Dropout",
"keras.backend.set_session",
"tensorflo... | [((678, 854), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': 'num_cores', 'inter_op_parallelism_threads': 'num_cores', 'allow_soft_placement': '(True)', 'device_count': "{'CPU': num_CPU, 'GPU': num_GPU}"}), "(intra_op_parallelism_threads=num_cores,\n inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\n device_count={'CPU': num_CPU, 'GPU': num_GPU})\n", (692, 854), True, 'import tensorflow as tf\n'), ((879, 904), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (889, 904), True, 'import tensorflow as tf\n'), ((905, 927), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (918, 927), True, 'from keras import backend as K\n'), ((2968, 2985), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (2976, 2985), True, 'import numpy as np\n'), ((3000, 3021), 'numpy.array', 'np.array', (['label_train'], {}), '(label_train)\n', (3008, 3021), True, 'import numpy as np\n'), ((3030, 3045), 'numpy.array', 'np.array', (['truth'], {}), '(truth)\n', (3038, 3045), True, 'import numpy as np\n'), ((3075, 3109), 'keras.layers.Input', 'Input', ([], {'shape': '(timestep, input_dim)'}), '(shape=(timestep, input_dim))\n', (3080, 3109), False, 'from keras.layers import Reshape, Input, Flatten, BatchNormalization\n'), ((4363, 4387), 'keras.models.Model', 'Model', (['input', 'classifier'], {}), '(input, classifier)\n', (4368, 4387), False, 'from keras.models import Model\n'), ((5089, 5106), 'keras.models.Model', 'Model', (['input', 'dae'], {}), '(input, dae)\n', (5094, 5106), False, 'from keras.models import Model\n'), ((6613, 6625), 'matplotlib.pyplot.plot', 'plt.plot', (['pr'], {}), '(pr)\n', (6621, 6625), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6638), 'matplotlib.pyplot.plot', 'plt.plot', (['ex'], {}), '(ex)\n', (6634, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6639, 6665), 'matplotlib.pyplot.plot', 'plt.plot', (['tested[plot_idx]'], {}), '(tested[plot_idx])\n', (6647, 6665), True, 'import matplotlib.pyplot as plt\n'), ((6666, 6695), 'matplotlib.pyplot.plot', 'plt.plot', (['sig_truth[plot_idx]'], {}), '(sig_truth[plot_idx])\n', (6674, 6695), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6729), 'matplotlib.pyplot.plot', 'plt.plot', (['sig_predicted[plot_idx]'], {}), '(sig_predicted[plot_idx])\n', (6704, 6729), True, 'import matplotlib.pyplot as plt\n'), ((6730, 6757), 'matplotlib.pyplot.plot', 'plt.plot', (['idx_ref[plot_idx]'], {}), '(idx_ref[plot_idx])\n', (6738, 6757), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2784), 'keras.utils.to_categorical', 'to_categorical', (['y'], {'num_classes': '(6)'}), '(y, num_classes=6)\n', (2766, 2784), False, 'from keras.utils import to_categorical\n'), ((3725, 3737), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3732, 3737), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3817, 3822), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3820, 3822), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3838, 3850), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3845, 3850), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3931, 3936), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3934, 3936), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3956, 3968), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3963, 3968), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4151, 4161), 'keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (4159, 4161), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4498, 4510), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4505, 4510), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4590, 4595), 'keras.layers.Add', 'Add', ([], {}), '()\n', (4593, 4595), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4611, 4623), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4618, 4623), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4704, 4709), 'keras.layers.Add', 'Add', ([], {}), '()\n', (4707, 4709), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4729, 4741), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4736, 4741), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4918, 4928), 'keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (4926, 4928), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((6510, 6522), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (6519, 6522), True, 'import numpy as np\n'), ((6565, 6577), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (6574, 6577), True, 'import numpy as np\n'), ((1674, 1688), 'numpy.shape', 'np.shape', (['idxx'], {}), '(idxx)\n', (1682, 1688), True, 'import numpy as np\n'), ((3238, 3284), 'keras.layers.Reshape', 'Reshape', ([], {'target_shape': '(timestep, input_dim, 1)'}), '(target_shape=(timestep, input_dim, 1))\n', (3245, 3284), False, 'from keras.layers import Reshape, Input, Flatten, BatchNormalization\n'), ((3664, 3709), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (3673, 3709), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3761, 3806), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (3770, 3806), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((3875, 3920), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (3884, 3920), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4032, 4073), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""relu"""'}), "(filter_size * 2, activation='relu')\n", (4037, 4073), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4098, 4142), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""softmax"""'}), "(filter_size * 2, activation='softmax')\n", (4103, 4142), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4218, 4259), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""relu"""'}), "(filter_size * 2, activation='relu')\n", (4223, 4259), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4300, 4330), 'keras.layers.Dense', 'Dense', (['(6)'], {'activation': '"""softmax"""'}), "(6, activation='softmax')\n", (4305, 4330), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4441, 4486), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (4450, 4486), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4534, 4579), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (4543, 4579), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4648, 4693), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['filter_size'], {'return_sequences': '(True)'}), '(filter_size, return_sequences=True)\n', (4657, 4693), False, 'from keras.layers import Bidirectional, CuDNNLSTM, Dropout, LSTM, Add, Conv2D, Multiply\n'), ((4798, 4839), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""relu"""'}), "(filter_size * 2, activation='relu')\n", (4803, 4839), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4864, 4908), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""softmax"""'}), "(filter_size * 2, activation='softmax')\n", (4869, 4908), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((4972, 5013), 'keras.layers.Dense', 'Dense', (['(filter_size * 2)'], {'activation': '"""relu"""'}), "(filter_size * 2, activation='relu')\n", (4977, 5013), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((5040, 5069), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (5045, 5069), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((5198, 5266), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.001)', 'rho': '(0.9)', 'epsilon': 'None', 'decay': '(0.0)'}), '(lr=0.001, rho=0.9, epsilon=None, decay=0.0)\n', (5222, 5266), False, 'import keras\n'), ((5468, 5536), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.001)', 'rho': '(0.9)', 'epsilon': 'None', 'decay': '(0.0)'}), '(lr=0.001, rho=0.9, epsilon=None, decay=0.0)\n', (5492, 5536), False, 'import keras\n'), ((6187, 6210), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6208, 6210), False, 'import datetime\n'), ((3324, 3412), 'keras.layers.Conv1D', 'Conv1D', (['(16)'], {'kernel_size': 'kernel_size', 'data_format': '"""channels_last"""', 'activation': '"""relu"""'}), "(16, kernel_size=kernel_size, data_format='channels_last', activation\n ='relu')\n", (3330, 3412), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((3448, 3536), 'keras.layers.Conv1D', 'Conv1D', (['(32)'], {'kernel_size': 'kernel_size', 'data_format': '"""channels_last"""', 'activation': '"""relu"""'}), "(32, kernel_size=kernel_size, data_format='channels_last', activation\n ='relu')\n", (3454, 3536), False, 'from keras.layers import ConvLSTM2D, Dense, Conv1D, TimeDistributed, BatchNormalization, MaxPooling2D, MaxPooling1D\n'), ((3572, 3608), 'keras.layers.Flatten', 'Flatten', ([], {'data_format': '"""channels_last"""'}), "(data_format='channels_last')\n", (3579, 3608), False, 'from keras.layers import Reshape, Input, Flatten, BatchNormalization\n')] |
import numpy as np
import scipy.sparse
import used.unused.softmax as softmax
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def stack2params(stack):
"""
Converts a "stack" structure into a flattened parameter vector and also
stores the network configuration. This is useful when working with
optimization toolboxes such as minFunc.
[params, netconfig] = stack2params(stack)
stack - the stack structure, where stack{1}.w = weights of first layer
stack{1}.b = weights of first layer
stack{2}.w = weights of second layer
stack{2}.b = weights of second layer
... etc.
:param stack: the stack structure
:return: params: flattened parameter vector
:return: net_config: aux. variable with network structure
"""
params = []
for s in stack:
params.append(s['w'].flatten())
params.append(s['b'].flatten())
params = np.concatenate(params)
net_config = {}
if len(stack) == 0:
net_config['input_size'] = 0
net_config['layer_sizes'] = []
else:
net_config['input_size'] = stack[0]['w'].shape[1]
net_config['layer_sizes'] = []
for s in stack:
net_config['layer_sizes'].append(s['w'].shape[0])
return params, net_config
def params2stack(params, net_config):
"""
Converts a flattened parameter vector into a nice "stack" structure
for us to work with. This is useful when you're building multilayer
networks.
stack = params2stack(params, netconfig)
:param params: flattened parameter vector
:param net_config: aux. variable containing network config.
:return: stack structure (see above)
"""
# Map the params (a vector into a stack of weights)
depth = len(net_config['layer_sizes'])
stack = [dict() for i in range(depth)]
prev_layer_size = net_config['input_size']
current_pos = 0
for i in range(depth):
# Extract weights
wlen = prev_layer_size * net_config['layer_sizes'][i]
stack[i]['w'] = params[current_pos:current_pos + wlen].reshape(net_config['layer_sizes'][i], prev_layer_size)
current_pos = current_pos + wlen
# Extract bias
blen = net_config['layer_sizes'][i]
stack[i]['b'] = params[current_pos:current_pos + blen]
current_pos = current_pos + blen
# Set previous layer size
prev_layer_size = net_config['layer_sizes'][i]
return stack
def stacked_autoencoder_cost(theta, input_size, hidden_size, num_classes,
net_config, lambda_, data, labels):
"""
Takes a trained softmax_theta and a training data set with labels
and returns cost and gradient using stacked autoencoder model.
Used only for finetuning
:param theta: trained weights from the autoencoder
:param input_size: the number of input units
:param hidden_size: the number of hidden units (at the layer before softmax)
:param num_classes: number of categories
:param net_config: network configuration of the stack
:param lambda_: weight regularization penalty
:param data: matrix containing data as columns. data[:,i-1] is i-th example
:param labels: vector containing labels, labels[i-1] is the label for i-th example
"""
## Unroll softmax_theta parameter
# We first extract the part which compute the softmax gradient
softmax_theta = theta[0:hidden_size * num_classes].reshape(num_classes, hidden_size)
# Extract out the "stack"
stack = params2stack(theta[hidden_size * num_classes:], net_config)
m = data.shape[1]
# Forward propagation
a = [data]
z = [np.array(0)] # Dummy value
for s in stack:
z.append(s['w'].dot(a[-1]) + np.tile(s['b'], (m, 1)).transpose())
a.append(sigmoid(z[-1]))
# Softmax
prod = softmax_theta.dot(a[-1])
prod = prod - np.max(prod)
prob = np.exp(prod) / np.sum(np.exp(prod), axis=0)
indicator = scipy.sparse.csr_matrix((np.ones(m), (labels, np.array(range(m)))))
indicator = np.array(indicator.todense())
cost = (-1 / float(m)) * np.sum(indicator * np.log(prob)) + (lambda_ / 2) * np.sum(softmax_theta * softmax_theta)
softmax_grad = (-1 / float(m)) * (indicator - prob).dot(a[-1].transpose()) + lambda_ * softmax_theta
# Backprop
# Compute partial of cost (J) w.r.t to outputs of last layer (before softmax)
softmax_grad_a = softmax_theta.transpose().dot(indicator - prob)
# Compute deltas
delta = [-softmax_grad_a * sigmoid_prime(z[-1])]
for i in reversed(range(len(stack))):
d = stack[i]['w'].transpose().dot(delta[0]) * sigmoid_prime(z[i])
delta.insert(0, d)
# Compute gradients
stack_grad = [dict() for i in range(len(stack))]
for i in range(len(stack_grad)):
stack_grad[i]['w'] = delta[i + 1].dot(a[i].transpose()) / m
stack_grad[i]['b'] = np.sum(delta[i + 1], axis=1) / m
grad_params, net_config = stack2params(stack_grad)
grad = np.concatenate((softmax_grad.flatten(), grad_params))
return cost, grad
def stacked_autoencoder_predict(theta, input_size, hidden_size, num_classes, net_config, data):
"""
Takes a trained theta and a test data set,
and returns the predicted labels for each example
:param theta: trained weights from the autoencoder
:param input_size: the number of input units
:param hidden_size: the number of hidden units at the layer before softmax
:param num_classes: the number of categories
:param netconfig: network configuration of the stack
:param data: the matrix containing the training data as columsn. data[:,i-1] is the i-th training example
:return:
Your code should produce the prediction matrix
pred, where pred(i) is argmax_c P(y(c) | x(i)).
"""
## Unroll theta parameter
# We first extract the part which compute the softmax gradient
softmax_theta = theta[0:hidden_size * num_classes].reshape(num_classes, hidden_size)
# Extract out the "stack"
stack = params2stack(theta[hidden_size * num_classes:], net_config)
m = data.shape[1]
# Compute predictions
a = [data]
z = [np.array(0)] # Dummy value
# Sparse Autoencoder Computation
for s in stack:
z.append(s['w'].dot(a[-1]) + np.tile(s['b'], (m, 1)).transpose())
a.append(sigmoid(z[-1]))
# Softmax
pred = softmax.softmax_predict((softmax_theta, hidden_size, num_classes), a[-1])
return pred | [
"used.unused.softmax.softmax_predict",
"numpy.sum",
"numpy.log",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.tile",
"numpy.concatenate"
] | [((1104, 1126), 'numpy.concatenate', 'np.concatenate', (['params'], {}), '(params)\n', (1118, 1126), True, 'import numpy as np\n'), ((6585, 6658), 'used.unused.softmax.softmax_predict', 'softmax.softmax_predict', (['(softmax_theta, hidden_size, num_classes)', 'a[-1]'], {}), '((softmax_theta, hidden_size, num_classes), a[-1])\n', (6608, 6658), True, 'import used.unused.softmax as softmax\n'), ((3850, 3861), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3858, 3861), True, 'import numpy as np\n'), ((4075, 4087), 'numpy.max', 'np.max', (['prod'], {}), '(prod)\n', (4081, 4087), True, 'import numpy as np\n'), ((4099, 4111), 'numpy.exp', 'np.exp', (['prod'], {}), '(prod)\n', (4105, 4111), True, 'import numpy as np\n'), ((6366, 6377), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (6374, 6377), True, 'import numpy as np\n'), ((116, 126), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (122, 126), True, 'import numpy as np\n'), ((4121, 4133), 'numpy.exp', 'np.exp', (['prod'], {}), '(prod)\n', (4127, 4133), True, 'import numpy as np\n'), ((4184, 4194), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (4191, 4194), True, 'import numpy as np\n'), ((4354, 4391), 'numpy.sum', 'np.sum', (['(softmax_theta * softmax_theta)'], {}), '(softmax_theta * softmax_theta)\n', (4360, 4391), True, 'import numpy as np\n'), ((5094, 5122), 'numpy.sum', 'np.sum', (['delta[i + 1]'], {'axis': '(1)'}), '(delta[i + 1], axis=1)\n', (5100, 5122), True, 'import numpy as np\n'), ((4322, 4334), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (4328, 4334), True, 'import numpy as np\n'), ((3936, 3959), 'numpy.tile', 'np.tile', (["s['b']", '(m, 1)'], {}), "(s['b'], (m, 1))\n", (3943, 3959), True, 'import numpy as np\n'), ((6489, 6512), 'numpy.tile', 'np.tile', (["s['b']", '(m, 1)'], {}), "(s['b'], (m, 1))\n", (6496, 6512), True, 'import numpy as np\n')] |
import logging
from random import shuffle
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from typing import Optional, List, Callable, Tuple
from frag_gt.src.afp import calculate_alignment_similarity_scores
logger = logging.getLogger(__name__)
class FragSampler:
"""
This class provides functionality to reorder an input list of fragments according to a desired distribution.
It has 3 primary stages: SCORE -> MODIFY -> CHOOSE
SCORE
Method by which to score the candidate fragments for replacement
- "afps" uses the quality of alignment by afps to score fragments (slower than the others)
- "counts" uses a prior based on the prevalence of fragments in the corpus used to generate the
fragment database (can also be used to supply any external score)
- "random" ignores the scoring aspect of this function and returns nan as the scores
- "ecfp4" ranks candidate replacement fragments from the fragstore according to similarity to the query
MODIFY
- A modifier can be applied to the counts, valid modifiers can be found at the link below (e.g. np.log):
- http://seismo.berkeley.edu/~kirchner/eps_120/Toolkits/Toolkit_03.pdf
CHOOSE
The desired number of fragments are returned (n_choices)
- either deterministically using the highest score (stochastic=False)
- Or stochastically using the scores to form probabilities and by choosing using np.random.choice
"""
def __init__(self,
scorer: str = "random",
modifier: Optional[Callable[[List], List]] = None,
stochastic: Optional[bool] = False):
self.scorer = scorer
self.modifier = modifier
self.stochastic = stochastic
logger.info(f"fragment sampler initialised: scorer={scorer}, modifier={modifier}, stochastic={stochastic}")
def __call__(self,
gene_frag_list: List[Tuple[str, int]],
n_choices: int = -1,
query_frag: Chem.rdchem.Mol = None,
) -> Tuple[List[str], List[float]]:
"""
Args:
gene_frag_list: [("[2*]Cc1cc(O)cc(O[4*])c1", 2), ("[2*]CC(=N[4*])C(C)(C)C", 8), ("[2*]CC(N[4*])C(C)C", 1)]
n_choices: number of fragments to return or -1 for entire list (with scores)
query_frag: (optional) mol to guide scoring (not used by "counts" or "random")
Returns:
list of smiles, list of floating point "scores" whose interpretation depends on the type of scorer used
"""
# Unzip list of tuples retrieved from fragstore
smiles, counts = zip(*gene_frag_list)
# Determine how many molecules to return
n_smiles = len(smiles)
if n_choices == -1:
n_choices = n_smiles
# (1) SCORE
if self.scorer == "counts":
# determine the probability of sampling molecule proportional to count in corpus
scores = counts
elif self.scorer == "ecfp4":
# determine the probability of sampling molecule proportional to ecfp4 similarity to query frag
assert query_frag is not None, "Must specify `query_frag` argument if using the ecfp4 scorer to sample"
scores = score_with_fingerprints(query_frag, smiles)
elif self.scorer == "afps":
# use the alignment score to sort mols returned
assert query_frag is not None, "Must specify `query_frag` argument if using the afp scorer to sample"
try:
scores = calculate_alignment_similarity_scores(query_frag, list(smiles))
except AssertionError as e:
if str(e) == "query must have attachments":
# if query has no attachment points, score with fingerprints
scores = score_with_fingerprints(query_frag, smiles)
else:
raise AssertionError(e)
elif self.scorer == "random":
scores = np.full(len(smiles), np.nan)
smiles = list(smiles)
shuffle(smiles)
else:
raise ValueError(f"requested scorer for sampling not recognised: {self.scorer}")
# (2) MODIFY
# if modifier is provided, apply to scores
if self.modifier is not None:
scores = self.modifier(scores)
# (3) CHOOSE
# choose idxs
if self.stochastic:
if self.scorer == "random":
idx_lst = range(n_choices)
else:
total = np.sum(scores)
probabilities = np.array([float(score) / total for score in np.array(scores)])
try:
idx_lst = np.random.choice(n_smiles, n_choices, p=probabilities, replace=False)
except ValueError as e:
if str(e) == "Fewer non-zero entries in p than size":
# if n_choices > num non zero, pick non zeros first
num_non_zero = len(np.where(probabilities > 0)[0])
idx_lst = np.random.choice(n_smiles, num_non_zero, p=probabilities, replace=False)
n_remaining = n_choices - num_non_zero
remaining_to_choose_from = np.array(list(set(range(n_choices)) - set(idx_lst)))
idx_lst2 = np.random.choice(remaining_to_choose_from, n_remaining, replace=False)
idx_lst = np.concatenate([idx_lst, idx_lst2])
else:
raise ValueError(e)
# get smiles and scores that have been sampled by np.random.choice
# todo should this actually return genes in the tuple format to match the input?
smiles = [smiles[i] for i in idx_lst]
scores = [scores[i] for i in idx_lst]
else:
# return smiles according to decreasing score (deterministically)
# todo dont we want to sort the stochastic samples too|?
sorted_tuples = sorted(zip(smiles, scores), key=lambda t: t[1], reverse=True)[:n_choices]
smiles = [s for s, sc in sorted_tuples]
scores = [sc for s, sc in sorted_tuples]
return smiles, scores
def score_with_fingerprints(query_mol, smiles_list):
scores = np.zeros(len(smiles_list))
query_fp = AllChem.GetMorganFingerprintAsBitVect(query_mol, 2, nBits=512)
for n, s in enumerate(smiles_list):
m = Chem.MolFromSmiles(s)
if m is None:
score = np.nan
else:
score = DataStructs.TanimotoSimilarity(query_fp, AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=512))
scores[n] = score
return scores
| [
"numpy.sum",
"numpy.concatenate",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"random.shuffle",
"numpy.where",
"numpy.array",
"numpy.random.choice",
"rdkit.Chem.MolFromSmiles",
"logging.getLogger"
] | [((274, 301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'import logging\n'), ((6444, 6506), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['query_mol', '(2)'], {'nBits': '(512)'}), '(query_mol, 2, nBits=512)\n', (6481, 6506), False, 'from rdkit.Chem import AllChem\n'), ((6559, 6580), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (6577, 6580), False, 'from rdkit import Chem\n'), ((4645, 4659), 'numpy.sum', 'np.sum', (['scores'], {}), '(scores)\n', (4651, 4659), True, 'import numpy as np\n'), ((6705, 6759), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['m', '(2)'], {'nBits': '(512)'}), '(m, 2, nBits=512)\n', (6742, 6759), False, 'from rdkit.Chem import AllChem\n'), ((4806, 4875), 'numpy.random.choice', 'np.random.choice', (['n_smiles', 'n_choices'], {'p': 'probabilities', 'replace': '(False)'}), '(n_smiles, n_choices, p=probabilities, replace=False)\n', (4822, 4875), True, 'import numpy as np\n'), ((4170, 4185), 'random.shuffle', 'shuffle', (['smiles'], {}), '(smiles)\n', (4177, 4185), False, 'from random import shuffle\n'), ((4736, 4752), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4744, 4752), True, 'import numpy as np\n'), ((5175, 5247), 'numpy.random.choice', 'np.random.choice', (['n_smiles', 'num_non_zero'], {'p': 'probabilities', 'replace': '(False)'}), '(n_smiles, num_non_zero, p=probabilities, replace=False)\n', (5191, 5247), True, 'import numpy as np\n'), ((5450, 5520), 'numpy.random.choice', 'np.random.choice', (['remaining_to_choose_from', 'n_remaining'], {'replace': '(False)'}), '(remaining_to_choose_from, n_remaining, replace=False)\n', (5466, 5520), True, 'import numpy as np\n'), ((5555, 5590), 'numpy.concatenate', 'np.concatenate', (['[idx_lst, idx_lst2]'], {}), '([idx_lst, idx_lst2])\n', (5569, 5590), True, 'import numpy as np\n'), ((5109, 5136), 'numpy.where', 'np.where', (['(probabilities > 0)'], {}), '(probabilities > 0)\n', (5117, 5136), True, 'import numpy as np\n')] |
import numpy as np
import torch
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
batch_output = model(batch_waveform)
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'clipwise_output',
batch_output['clipwise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output | [
"torch.LongTensor",
"torch.cat",
"torch.Tensor",
"torch.no_grad",
"numpy.concatenate"
] | [((3390, 3431), 'torch.cat', 'torch.cat', (['(framewise_output, pad)'], {'dim': '(1)'}), '((framewise_output, pad), dim=1)\n', (3399, 3431), False, 'import torch\n'), ((114, 129), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (126, 129), False, 'import torch\n'), ((2274, 2314), 'numpy.concatenate', 'np.concatenate', (['output_dict[key]'], {'axis': '(0)'}), '(output_dict[key], axis=0)\n', (2288, 2314), True, 'import numpy as np\n'), ((174, 193), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (190, 193), False, 'import torch\n'), ((1652, 1667), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1665, 1667), False, 'import torch\n')] |
# 走迷宫
# 注意,为了和屏幕坐标系一致,maze[x,y]和我们一般的矩阵的行列是反的
from typing import Tuple
from easygraphics import * # 使用easygraphics绘图
import numpy as np # 使用numpy的ndarray来保存迷宫信息
import scanf
BRICK_WIDTH = 25 # 迷宫每格宽度
BRICK_HEIGHT = 25 # 迷宫每格高度
WALL_COLOR = Color.DARK_RED
WAY_COLOR = Color.LIGHT_GRAY
WAY_VISITED_COLOR = Color.DARK_GRAY
# 向四个方向走的位置(坐标)变化
step_x=[0,-1,0,1]
step_y=[-1,0,1,0]
class Maze:
def __init__(self,maze:np.ndarray, entrance: Tuple[int], exit: Tuple[int]):
self.maze = maze
# 入口坐标
self.entrance_x,self.entrance_y = entrance
# 出口坐标
self.exit_x,self.exit_y = exit
# 走迷宫步骤记录
self.history = []
n, m = maze.shape # n和m分别为maze的行数和列数
self.width = BRICK_WIDTH * m
self.height = BRICK_HEIGHT * n
def draw_brick(x,y):
"""
绘制单个方块
:param x:
:param y:
"""
draw_rect(x, y, x + BRICK_WIDTH, y + BRICK_HEIGHT)
def draw_maze(maze:Maze):
"""
绘制迷宫
:param ma:
"""
set_background_color(Color.WHITE)
clear_device()
n,m = maze.maze.shape #n和m分别为maze的行数和列数
# 迷宫左上角坐标(不含外墙)
start_x = (get_width() - maze.width) //2
start_y = (get_height() - maze.height) // 2
# 绘制迷宫
set_color(Color.WHITE)
for i in range(0,n):
for j in range(0,m):
if maze.maze[i,j] == 1: # 是墙
set_fill_color(WALL_COLOR)
elif maze.maze[i,j] == 0: #是路
set_fill_color(WAY_COLOR)
else:
set_fill_color(WAY_VISITED_COLOR)
x = start_x + i*BRICK_WIDTH
y = start_y + j*BRICK_HEIGHT
draw_brick(x,y)
# 绘制历史步骤
set_fill_color(Color.BLACK)
for i in range(len(maze.history)):
x = start_x + maze.history[i][0] * BRICK_WIDTH + BRICK_WIDTH // 2
y = start_y + maze.history[i][1] * BRICK_HEIGHT + BRICK_HEIGHT // 2
fill_circle(x, y, 10)
def loadmaze(filepath):
"""
从数据文件中读入迷宫
:param filepath: 数据文件路径
:return: 读入的迷宫
"""
with open(filepath,mode="r") as file:
line = file.readline().strip()
n,m = scanf.scanf("%d,%d",line)
# 注意,读入的文件中矩阵的行列 和 屏幕坐标(x,y)是反的
# matrix[i,j],i是行下标,相当于y; j是列下标,相当于x
maze = np.zeros((m,n),dtype='int')
y=0
while y<n:
datas = file.readline().strip().split(",")
if len(datas)!=m:
raise ValueError(f"第{y+1}行迷宫数据有误,应该有{m}列,但实际有{len(datas)}列")
for x in range(m):
maze[x,y] = ord(datas[x])-ord('0')
y+=1
line = file.readline().strip()
entrance = scanf.scanf("%d,%d",line)
line = file.readline().strip()
exit = scanf.scanf("%d,%d",line)
m = Maze(maze,entrance,exit)
return m
def can_go(maze:Maze, x:int, y:int):
"""
检测是否可以走到迷宫的x,y位置
:param maze:
:param x:
:param y:
:return:
"""
if x<0 or y<0:
return False
n,m=maze.maze.shape
if x>=n or y>=m:
return False
if maze.maze[x, y]!=0:
return False
return True
history_x=[]
history_y=[]
def try_solve(maze:Maze, x:int, y:int, step:int):
"""
递归走迷宫
:param maze:
:param x:
:param y:
:param step:
:return:
"""
# 记录当前位置
maze.history.append((x, y))
# 设置已经走过
maze.maze[x][y]=2
#绘制迷宫当前状态
draw_maze(maze)
delay(100)
# 已经到达终点,结束
if x==maze.exit_x and y ==maze.exit_y:
return True
# 尝试4个方向走法
for i in range(4):
next_x = x + step_x[i]
next_y = y + step_y[i]
if can_go(maze,next_x,next_y):
# 可以走,那么就从该点继续递归尝试
found=try_solve(maze,next_x,next_y,step+1)
# 已找到迷宫出口,不需要再试了,直接结束
if found:
return True
#从该位置出发已尝试完所有走法,仍未找到出口,回退,从行走历史中删除当前位置
maze.history.pop()
return False
def solve(maze:Maze):
"""
解决迷宫问题
:param maze:
:return:
"""
return try_solve(maze,maze.entrance_x,maze.entrance_y,0)
def main():
init_graph(800,600)
set_render_mode(RenderMode.RENDER_MANUAL)
maze=loadmaze("test.maze")
draw_maze(maze)
pause()
solve(maze)
pause()
easy_run(main) | [
"scanf.scanf",
"numpy.zeros"
] | [((2098, 2124), 'scanf.scanf', 'scanf.scanf', (['"""%d,%d"""', 'line'], {}), "('%d,%d', line)\n", (2109, 2124), False, 'import scanf\n'), ((2224, 2253), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': '"""int"""'}), "((m, n), dtype='int')\n", (2232, 2253), True, 'import numpy as np\n'), ((2602, 2628), 'scanf.scanf', 'scanf.scanf', (['"""%d,%d"""', 'line'], {}), "('%d,%d', line)\n", (2613, 2628), False, 'import scanf\n'), ((2682, 2708), 'scanf.scanf', 'scanf.scanf', (['"""%d,%d"""', 'line'], {}), "('%d,%d', line)\n", (2693, 2708), False, 'import scanf\n')] |
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ddsp.training.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ddsp.training import nn
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class SplitToDictTest(tf.test.TestCase):
def test_output_is_correct(self):
tensor_splits = (('x1', 1), ('x2', 2), ('x3', 3))
x1 = np.zeros((2, 3, 1), dtype=np.float32) + 1.0
x2 = np.zeros((2, 3, 2), dtype=np.float32) + 2.0
x3 = np.zeros((2, 3, 3), dtype=np.float32) + 3.0
x = tf.constant(np.concatenate([x1, x2, x3], axis=2))
output = nn.split_to_dict(x, tensor_splits)
with self.cached_session() as sess:
signal_dict = sess.run(output)
self.assertSetEqual(set(['x1', 'x2', 'x3']), set(signal_dict.keys()))
self.assertAllEqual(x1, signal_dict.get('x1'))
self.assertAllEqual(x2, signal_dict.get('x2'))
self.assertAllEqual(x3, signal_dict.get('x3'))
if __name__ == '__main__':
tf.test.main()
| [
"numpy.zeros",
"tensorflow.compat.v1.test.main",
"ddsp.training.nn.split_to_dict",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.concatenate"
] | [((829, 853), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (851, 853), True, 'import tensorflow.compat.v1 as tf\n'), ((1590, 1604), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (1602, 1604), True, 'import tensorflow.compat.v1 as tf\n'), ((1219, 1253), 'ddsp.training.nn.split_to_dict', 'nn.split_to_dict', (['x', 'tensor_splits'], {}), '(x, tensor_splits)\n', (1235, 1253), False, 'from ddsp.training import nn\n'), ((997, 1034), 'numpy.zeros', 'np.zeros', (['(2, 3, 1)'], {'dtype': 'np.float32'}), '((2, 3, 1), dtype=np.float32)\n', (1005, 1034), True, 'import numpy as np\n'), ((1050, 1087), 'numpy.zeros', 'np.zeros', (['(2, 3, 2)'], {'dtype': 'np.float32'}), '((2, 3, 2), dtype=np.float32)\n', (1058, 1087), True, 'import numpy as np\n'), ((1103, 1140), 'numpy.zeros', 'np.zeros', (['(2, 3, 3)'], {'dtype': 'np.float32'}), '((2, 3, 3), dtype=np.float32)\n', (1111, 1140), True, 'import numpy as np\n'), ((1167, 1203), 'numpy.concatenate', 'np.concatenate', (['[x1, x2, x3]'], {'axis': '(2)'}), '([x1, x2, x3], axis=2)\n', (1181, 1203), True, 'import numpy as np\n')] |
from os.path import dirname, join
from scipy.io import wavfile
import numpy as np
import mel
from sklearn.preprocessing import MinMaxScaler
# the model was trained in tf1
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras.models import load_model
tf.disable_v2_behavior()
def load_sound(filename):
'''
Return sampling_rate and period for .wav file at filename
Args:
filename(str): directory of the .wav file
Returns:
sampling_rate(int): sampling rate
wave.T(list(int)): lengths of sound wave in seconds
'''
sampling_rate, wave = wavfile.read(filename)
# what is it for?
assert (len(wave.T) > 4)
return sampling_rate, wave.T
def divide_single_wave_into_smaller_chunk(output_duration=3, wave=None, sampling_rate=None, shift=0):
'''
Divide single wave into chunks of 3 seconds
Args:
output_duration(int): number of seconds of an output chunk
wave(ndarray): sound wave
sampling_rate(int): sampling rate in Hz
shift(int): ?
Returns:
wave_chunks(ndarray): wave chunks with length of output_duration (s)
'''
shift_abs = int(sampling_rate * shift)
chunk_length = sampling_rate*output_duration
min_length = (output_duration)*sampling_rate - 2
wave_chunks = []
temp_wave = wave.copy()
count = 0
temp_wave = temp_wave[shift_abs:]
while(len(temp_wave) >= min_length):
count += 1
new_chunk = temp_wave[0:chunk_length]
wave_chunks.append(new_chunk)
temp_wave = temp_wave[chunk_length*count:]
return wave_chunks
def minmax(wave):
scaler = MinMaxScaler()
scaler.fit(wave.reshape(-1, 1))
wave = scaler.transform(wave.reshape(-1, 1))
wave = wave.reshape(8192*3,)
return wave
def audio2spec(filename):
sr, wav = load_sound(filename)
# wav_chunks = []
# assert(sr == 8192)
chunks = divide_single_wave_into_smaller_chunk(3, wav, sr)
# wav_chunks.append(chunks)
c = chunks[0]
# for i, c in enumerate(chunks):
spec = mel.pretty_spectrogram(
c.astype('float64'), fft_size=512, step_size=128, log=True)
return spec
def transform(spectrogram):
# this is for chaquopy to know where the binary(ires) is, relative to src/main/python
model_path = join(dirname(__file__), '32-16-16-32.hdf5')
model = load_model(model_path)
sp_reshaped = np.expand_dims(spectrogram, -1)
sp_reshaped = np.expand_dims(sp_reshaped, axis=0)
pred = model.predict(sp_reshaped)
pred_reshaped = np.squeeze(pred)
return pred_reshaped
def back_to_audio(pred_spectrogram):
recovered_audio_orig = mel.invert_pretty_spectrogram(
pred_spectrogram, fft_size=512, step_size=128, log=True, n_iter=40)
return recovered_audio_orig
def denoise(wav_path):
spec = audio2spec(wav_path)
denoised_spec = transform(spec)
denoised = back_to_audio(denoised_spec)
return denoised
| [
"os.path.dirname",
"sklearn.preprocessing.MinMaxScaler",
"numpy.expand_dims",
"scipy.io.wavfile.read",
"mel.invert_pretty_spectrogram",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.squeeze",
"tensorflow.compat.v1.keras.models.load_model"
] | [((265, 289), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (287, 289), True, 'import tensorflow.compat.v1 as tf\n'), ((620, 642), 'scipy.io.wavfile.read', 'wavfile.read', (['filename'], {}), '(filename)\n', (632, 642), False, 'from scipy.io import wavfile\n'), ((1695, 1709), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1707, 1709), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2417, 2439), 'tensorflow.compat.v1.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (2427, 2439), False, 'from tensorflow.compat.v1.keras.models import load_model\n'), ((2459, 2490), 'numpy.expand_dims', 'np.expand_dims', (['spectrogram', '(-1)'], {}), '(spectrogram, -1)\n', (2473, 2490), True, 'import numpy as np\n'), ((2509, 2544), 'numpy.expand_dims', 'np.expand_dims', (['sp_reshaped'], {'axis': '(0)'}), '(sp_reshaped, axis=0)\n', (2523, 2544), True, 'import numpy as np\n'), ((2605, 2621), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (2615, 2621), True, 'import numpy as np\n'), ((2714, 2815), 'mel.invert_pretty_spectrogram', 'mel.invert_pretty_spectrogram', (['pred_spectrogram'], {'fft_size': '(512)', 'step_size': '(128)', 'log': '(True)', 'n_iter': '(40)'}), '(pred_spectrogram, fft_size=512, step_size=128,\n log=True, n_iter=40)\n', (2743, 2815), False, 'import mel\n'), ((2365, 2382), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2372, 2382), False, 'from os.path import dirname, join\n')] |
# This file is covered by the LICENSE file in the root of this project.
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import os
import numpy as np
from PIL import Image
import random
import torchvision.transforms.functional as TF
import cv2
'''
means(rgb): [0.47037394 0.44669544 0.40731883]
stds(rgb): [0.27876515 0.27429348 0.28861644]
Num of pixels: 20354514743
Frequency: [1.03595582e-01 8.69684146e-02 1.56773018e-03 5.82611153e-03
4.81058803e-03 3.16223407e-03 7.10428246e-03 7.05528129e-03
5.76380800e-03 2.61561927e-03 6.43652977e-04 1.06484818e-03
1.07875453e-03 6.98690299e-04 3.30846713e-03 1.65507630e-03
6.01311471e-03 4.48253614e-03 3.37169861e-03 1.84147500e-03
2.59677750e-03 4.60398424e-03 1.72642509e-03 3.25079452e-03
3.17922092e-03 9.28004241e-04 3.00187903e-03 1.02122941e-03
7.74191387e-04 3.01174387e-03 3.52895713e-04 3.00067384e-04
3.31869518e-04 1.49010479e-04 6.79291802e-04 1.38228842e-04
1.80938973e-04 5.82766927e-04 1.16591352e-03 5.55644934e-04
1.83246594e-03 9.64564533e-04 2.68603416e-03 3.53508157e-04
4.86584039e-04 3.04124273e-04 6.10763335e-03 2.51745687e-03
1.19416608e-03 3.49547734e-03 1.43915212e-03 1.98661498e-03
8.55161482e-04 1.22814719e-03 8.29490195e-03 2.09027995e-03
3.95652007e-03 6.19389573e-03 5.21590882e-03 2.07798941e-03
9.07128538e-03 2.41144264e-02 3.08866224e-03 3.29269545e-03
3.44996375e-03 2.17966680e-04 5.69893272e-04 1.33344903e-03
1.06328032e-03 9.01832455e-04 3.21914572e-03 5.66035602e-05
1.64377842e-03 3.49153060e-03 2.07557215e-03 1.33823711e-03
1.73024557e-03 3.61442810e-04 3.16915293e-03 3.26746183e-05
1.69843597e-04 2.24706580e-03 1.08037029e-03 1.15556594e-03
2.19738081e-03 2.83867548e-03 4.58330597e-03 6.13085488e-03
5.53305060e-03 1.95223391e-03 1.24932391e-03 2.50343202e-03
4.28674371e-03 1.36921250e-03 3.32965639e-03 1.77840698e-03
5.10465080e-04 2.04364749e-03 1.78148449e-02 2.76140555e-03
5.15718043e-03 2.26026582e-02 1.41155564e-03 9.53189813e-03
2.24532113e-02 2.74807151e-03 1.89481003e-02 1.06579298e-03
7.92184791e-04 7.43852368e-04 5.30637362e-03 2.23005552e-03
8.45400979e-03 6.19471526e-03 4.12920107e-03 1.70490166e-03
9.71786370e-03 6.47590623e-02 1.39815155e-02 8.92733677e-03
8.67340285e-02 8.37997595e-03 1.41617307e-02 1.35923816e-02
2.34834311e-02 7.09260706e-03 4.15174260e-02 1.33029928e-02
4.80344372e-03 7.12591456e-03 3.01482646e-02 4.35955532e-03
6.39422134e-02 6.29973913e-03]
********************************************************************************
Log strategy
Weights: [3.30289772 3.44347075 4.45638856 4.38993873 4.40558454 4.43124634
4.3704214 4.37116607 4.39089505 4.43982977 4.47110532 4.46438403
4.4641625 4.47022578 4.42895632 4.45500307 4.38707112 4.4106653
4.42796692 4.45204959 4.4401263 4.40878283 4.45387203 4.42985916
4.43098019 4.46656527 4.43376055 4.46507904 4.46901983 4.43360579
4.47575828 4.47660484 4.47609518 4.47902746 4.47053574 4.47920049
4.47851516 4.47207879 4.4627746 4.47251257 4.45219224 4.46598228
4.43872198 4.47574847 4.47361754 4.47653982 4.3856233 4.44137513
4.46232492 4.42603155 4.45842983 4.44975287 4.46772733 4.46178419
4.35241406 4.44811407 4.41883936 4.38430288 4.39932503 4.44830829
4.34076057 4.12794364 4.43239948 4.42920318 4.42674297 4.4779212
4.47228467 4.4601095 4.464409 4.46698271 4.43035479 4.4805109
4.45518222 4.42609323 4.44834649 4.46003338 4.45381149 4.47562135
4.43113793 4.48089522 4.47869317 4.44563805 4.46413676 4.46293932
4.44642236 4.43632268 4.40910322 4.38526776 4.3944411 4.45029668
4.46144729 4.44159602 4.41370389 4.45954104 4.42862471 4.45304841
4.47323538 4.4488511 4.21416693 4.43753689 4.40023077 4.14827356
4.45886822 4.33387961 4.15029549 4.4377465 4.19835921 4.46436897
4.46873253 4.46950434 4.39793066 4.44590653 4.35002018 4.38429034
4.41615226 4.45421316 4.33110842 3.65425719 4.26863963 4.34291598
3.44555095 4.3511337 4.26604345 4.27425755 4.13640191 4.37059881
3.90903173 4.27844617 4.40569505 4.37009275 4.04897801 4.41257335
3.66257514 4.38268395]
Linear strategy
Weights: [0.89640442 0.91303159 0.99843227 0.99417389 0.99518941 0.99683777
0.99289572 0.99294472 0.99423619 0.99738438 0.99935635 0.99893515
0.99892125 0.99930131 0.99669153 0.99834492 0.99398689 0.99551746
0.9966283 0.99815853 0.99740322 0.99539602 0.99827357 0.99674921
0.99682078 0.999072 0.99699812 0.99897877 0.99922581 0.99698826
0.9996471 0.99969993 0.99966813 0.99985099 0.99932071 0.99986177
0.99981906 0.99941723 0.99883409 0.99944436 0.99816753 0.99903544
0.99731397 0.99964649 0.99951342 0.99969588 0.99389237 0.99748254
0.99880583 0.99650452 0.99856085 0.99801339 0.99914484 0.99877185
0.9917051 0.99790972 0.99604348 0.9938061 0.99478409 0.99792201
0.99092871 0.97588557 0.99691134 0.9967073 0.99655004 0.99978203
0.99943011 0.99866655 0.99893672 0.99909817 0.99678085 0.9999434
0.99835622 0.99650847 0.99792443 0.99866176 0.99826975 0.99963856
0.99683085 0.99996733 0.99983016 0.99775293 0.99891963 0.99884443
0.99780262 0.99716132 0.99541669 0.99386915 0.99446695 0.99804777
0.99875068 0.99749657 0.99571326 0.99863079 0.99667034 0.99822159
0.99948953 0.99795635 0.98218516 0.99723859 0.99484282 0.97739734
0.99858844 0.9904681 0.97754679 0.99725193 0.9810519 0.99893421
0.99920782 0.99925615 0.99469363 0.99776994 0.99154599 0.99380528
0.9958708 0.9982951 0.99028214 0.93524094 0.98601848 0.99107266
0.91326597 0.99162002 0.98583827 0.98640762 0.97651657 0.99290739
0.95848257 0.98669701 0.99519656 0.99287409 0.96985174 0.99564044
0.93605779 0.99370026]
Squared strategy
Weights: [0.80354088 0.83362668 0.996867 0.98838172 0.99040197 0.99368553
0.98584191 0.98593921 0.98850561 0.9947756 0.99871311 0.99787144
0.99784365 0.99860311 0.99339401 0.99669259 0.98800993 0.99105502
0.99326797 0.99632044 0.99481319 0.99081323 0.99655013 0.99350898
0.99365167 0.99814485 0.99400525 0.99795858 0.99845222 0.99398558
0.99929433 0.99939996 0.99933637 0.999702 0.99864188 0.99972356
0.99963815 0.99883481 0.99766953 0.99888902 0.99633843 0.9980718
0.99463515 0.99929311 0.99902707 0.99939184 0.98782204 0.99497142
0.99761309 0.99302126 0.99712377 0.99603072 0.99829041 0.99754521
0.983479 0.99582381 0.99210261 0.98765057 0.98959539 0.99584834
0.98193972 0.95235265 0.99383222 0.99342545 0.99311197 0.99956411
0.99886054 0.99733488 0.99787457 0.99819715 0.99357207 0.9998868
0.99671515 0.99302913 0.99585316 0.99732532 0.9965425 0.99927725
0.99367174 0.99993465 0.99966034 0.99551092 0.99784043 0.9976902
0.99561007 0.99433071 0.99085439 0.98777588 0.98896451 0.99609934
0.99750291 0.9949994 0.99144489 0.99726345 0.99335177 0.99644635
0.99897933 0.99591688 0.96468768 0.99448481 0.98971224 0.95530556
0.99717888 0.98102706 0.95559772 0.99451141 0.96246283 0.99786955
0.99841626 0.99851285 0.98941541 0.99554486 0.98316345 0.98764894
0.99175865 0.9965931 0.98065871 0.87467561 0.97223245 0.98222502
0.83405473 0.98331027 0.97187709 0.97299999 0.95358461 0.98586509
0.91868884 0.97357098 0.99041619 0.98579895 0.94061239 0.9912999
0.87620418 0.98744021]
1/w strategy
Weights: [9.65292034e+00 1.14984261e+01 6.37860798e+02 1.71640773e+02
2.07874363e+02 3.16231125e+02 1.40759971e+02 1.41737592e+02
1.73496110e+02 3.82317177e+02 1.55360808e+03 9.39092189e+02
9.26986355e+02 1.43122881e+03 3.02253865e+02 6.04198100e+02
1.66302887e+02 2.23087497e+02 2.96585535e+02 5.43039993e+02
3.85091194e+02 2.17202705e+02 5.79228263e+02 3.07616159e+02
3.14541481e+02 1.07756967e+03 3.33123573e+02 9.79202324e+02
1.29165359e+03 3.32032445e+02 2.83361805e+03 3.33247373e+03
3.01314165e+03 6.71048707e+03 1.47209973e+03 7.23385690e+03
5.52641986e+03 1.71592243e+03 8.57689188e+02 1.79967807e+03
5.45709757e+02 1.03672652e+03 3.72294698e+02 2.82870902e+03
2.05510121e+03 3.28802141e+03 1.63729272e+02 3.97224691e+02
8.37397449e+02 2.86083142e+02 6.94848751e+02 5.03366266e+02
1.16935611e+03 8.14228025e+02 1.20555831e+02 4.78402530e+02
2.52746721e+02 1.61449018e+02 1.91720775e+02 4.81232091e+02
1.10237839e+02 4.14689352e+01 3.23763716e+02 3.03701627e+02
2.89857278e+02 4.58764672e+03 1.75468373e+03 7.49929301e+02
9.40476913e+02 1.10884112e+03 3.10640456e+02 1.76636127e+04
6.08350801e+02 2.86406522e+02 4.81792541e+02 7.47246149e+02
5.77949302e+02 2.76661288e+03 3.15540735e+02 3.05954315e+04
5.88742315e+03 4.45022815e+02 9.25600003e+02 8.65369349e+02
4.55085184e+02 3.52275730e+02 2.18182645e+02 1.63109124e+02
1.80731800e+02 5.12231075e+02 8.00426523e+02 3.99450034e+02
2.33276756e+02 7.30341490e+02 3.00330389e+02 5.62297827e+02
1.95895948e+03 4.89318785e+02 5.61329299e+01 3.62133109e+02
1.93904029e+02 4.42425642e+01 7.08433228e+02 1.04910789e+02
4.45370393e+01 3.63890226e+02 5.27757115e+01 9.38259711e+02
1.26231580e+03 1.34433471e+03 1.88452263e+02 4.48417318e+02
1.18286924e+02 1.61427660e+02 2.42177012e+02 5.86540654e+02
1.02903169e+02 1.54418518e+01 7.15229535e+01 1.12015364e+02
1.15294989e+01 1.19331942e+02 7.06127883e+01 7.35705703e+01
4.25831970e+01 1.40991681e+02 2.40862658e+01 7.51709983e+01
2.08183540e+02 1.40332667e+02 3.31693940e+01 2.29380667e+02
1.56391184e+01 1.58736480e+02]
'''
IMG_EXT = ['.jpg']
LBL_EXT = ['.png']
SCALES = [1.0]
class ToLabel:
def __call__(self, label):
label = np.array(label)
return torch.from_numpy(label).long()
def load_image(file):
return Image.open(file)
def load_label(file):
return Image.open(file)
def is_image(filename):
return any(filename.endswith(ext) for ext in IMG_EXT)
def is_label(filename):
return any(filename.endswith(ext) for ext in LBL_EXT)
def resize_and_fit(img, new_h, new_w, img_type):
# check img_type
assert(img_type is "RGB" or img_type is "L")
# get current size
w, h = img.size
# generate new img
out_img = Image.new(img_type, (new_w, new_h))
# now do size magic
curr_asp_ratio = h / w
new_asp_ratio = new_h / new_w
# do resizing according to aspect ratio
if curr_asp_ratio > new_asp_ratio:
# fit h to h
new_tmp_h = new_h
new_tmp_w = int(w * new_h / h)
else:
# fit w to w
new_tmp_w = new_w
new_tmp_h = int(h * new_w / w)
# resize the original image
if img_type is "RGB":
tmp_img = img.resize((new_tmp_w, new_tmp_h), Image.BILINEAR)
else:
tmp_img = img.resize((new_tmp_w, new_tmp_h), Image.NEAREST)
# put in padded image
out_img.paste(tmp_img, (int((new_w-new_tmp_w)//2),
int((new_h-new_tmp_h)//2)))
return out_img
class MS_COCO(Dataset):
def __init__(self, root, subset, h, w, means, stds, crop_h=None, crop_w=None):
self.images_root = os.path.join(root, subset + "2017")
self.labels_root = os.path.join(root,
"annotations/panoptic_"+subset+"2017_remap")
self.subset = subset
assert self.subset == 'train' or self.subset == 'val'
self.w = w
self.h = h
self.means = means
self.stds = stds
if self.subset == 'train':
self.crop_h = crop_h
self.crop_w = crop_w
# check that parameters make sense
assert(self.crop_h <= self.h)
assert(self.crop_w <= self.w)
self.resize_crop_img = transforms.Resize((self.crop_h, self.crop_w),
Image.BILINEAR)
self.resize_crop_lbl = transforms.Resize((self.crop_h, self.crop_w),
Image.NEAREST)
print("Images from: ", self.images_root)
print("Labels from: ", self.labels_root)
self.filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(self.images_root)) for f in fn if is_image(f)]
self.filenames.sort()
self.filenamesGt = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(self.labels_root)) for f in fn if is_label(f)]
self.filenamesGt.sort()
assert len(self.filenames) == len(self.filenamesGt)
# transformations for images
self.jitter = transforms.ColorJitter(brightness=0.05,
contrast=0.05,
saturation=0.05,
hue=0.05)
self.h_flip = TF.hflip
self.crop_param = transforms.RandomCrop.get_params
self.crop = TF.crop
# transformations for tensors
self.norm = transforms.Normalize(mean=self.means, std=self.stds)
self.tensorize_img = transforms.ToTensor()
self.tensorize_lbl = ToLabel()
def __getitem__(self, index):
filename = self.filenames[index]
filenameGt = self.filenamesGt[index]
with open(filename, 'rb') as f:
image = load_image(f).convert('RGB')
with open(filenameGt, 'rb') as f:
label = load_label(f).convert('L')
# resize (resizing is different if we are in train or valid mode)
# generate resizer
if self.subset == 'train':
new_h = self.crop_h
new_w = self.crop_w
else:
new_h = self.h
new_w = self.w
image = resize_and_fit(image, new_h, new_w, "RGB")
label = resize_and_fit(label, new_h, new_w, "L")
# augment data and tensorize
if self.subset == 'train':
# crop randomly sized patches
scale = SCALES[random.randrange(len(SCALES))]
size = (int(self.crop_h * scale), int(self.crop_w * scale))
i, j, h, w = self.crop_param(image, output_size=size)
image = self.resize_crop_img(self.crop(image, i, j, h, w))
label = self.resize_crop_lbl(self.crop(label, i, j, h, w))
# flip
if random.random() > 0.5:
image = self.h_flip(image)
label = self.h_flip(label)
# jitter
if random.random() > 0.5:
image = self.jitter(image)
# show (set workers = 0)
# cv2.imshow("train_img", np.array(image)[:, :, ::-1])
# cv2.imshow("train_lbl", LUT[np.array(label)].astype(np.float32) / 21.0)
# cv2.waitKey(0)
# if self.subset == 'val':
# show (set workers = 0)
# cv2.imshow("valid_img", np.array(image)[:, :, ::-1])
# cv2.waitKey(0)
# tensorize
image = self.tensorize_img(image)
label = self.tensorize_lbl(label)
# normalize
image = self.norm(image)
return image, label
def __len__(self):
return len(self.filenames)
class Parser():
# standard conv, BN, relu
def __init__(self, img_prop, img_means, img_stds, classes, train, location=None, batch_size=None, crop_prop=None, workers=2):
super(Parser, self).__init__()
self.img_prop = img_prop
self.img_means = img_means
self.img_stds = img_stds
self.classes = classes
self.train = train
if self.train:
# if I am training, get the dataset
self.location = location
self.batch_size = batch_size
self.crop_prop = crop_prop
self.workers = workers
# Data loading code
self.train_dataset = MS_COCO(root=self.location,
subset='train',
h=self.img_prop["height"],
w=self.img_prop["width"],
means=self.img_means,
stds=self.img_stds,
crop_h=self.crop_prop["height"],
crop_w=self.crop_prop["width"])
self.trainloader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.trainloader) > 0
self.trainiter = iter(self.trainloader)
# calculate validation batch from train batch and image sizes
factor_val_over_train = float(self.img_prop["height"] * self.img_prop["width"]) / float(
self.crop_prop["height"] * self.crop_prop["width"])
self.val_batch_size = max(
1, int(self.batch_size / factor_val_over_train))
# if gpus are available make val_batch_size at least the number of gpus
if torch.cuda.is_available() and torch.cuda.device_count() > 1:
self.val_batch_size = max(
self.val_batch_size, torch.cuda.device_count())
print("Inference batch size: ", self.val_batch_size)
self.valid_dataset = MS_COCO(root=self.location,
subset='val',
h=self.img_prop["height"],
w=self.img_prop["width"],
means=self.img_means,
stds=self.img_stds)
self.validloader = torch.utils.data.DataLoader(self.valid_dataset,
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.validloader) > 0
self.validiter = iter(self.validloader)
def get_train_batch(self):
images, labels = self.trainiter.next()
return images, labels
def get_train_set(self):
return self.trainloader
def get_valid_batch(self):
images, labels = self.validiter.next()
return images, labels
def get_valid_set(self):
return self.validloader
def get_train_size(self):
return len(self.trainloader)
def get_valid_size(self):
return len(self.validloader)
def get_img_size(self):
h = self.img_prop["height"]
w = self.img_prop["width"]
d = self.img_prop["depth"]
return h, w, d
def get_n_classes(self):
return len(self.classes)
def get_class_string(self, idx):
return self.classes[idx]
def get_means_stds(self):
return self.img_means, self.img_stds
| [
"torchvision.transforms.ColorJitter",
"os.path.expanduser",
"PIL.Image.new",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"PIL.Image.open",
"torch.cuda.device_count",
"random.random",
"numpy.array",
"torch.cuda.is_available",
"torchvision.transforms.Normalize",
"torchvision.transforms.Re... | [((9396, 9412), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (9406, 9412), False, 'from PIL import Image\n'), ((9446, 9462), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (9456, 9462), False, 'from PIL import Image\n'), ((9818, 9853), 'PIL.Image.new', 'Image.new', (['img_type', '(new_w, new_h)'], {}), '(img_type, (new_w, new_h))\n', (9827, 9853), False, 'from PIL import Image\n'), ((9305, 9320), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (9313, 9320), True, 'import numpy as np\n'), ((10642, 10677), 'os.path.join', 'os.path.join', (['root', "(subset + '2017')"], {}), "(root, subset + '2017')\n", (10654, 10677), False, 'import os\n'), ((10701, 10768), 'os.path.join', 'os.path.join', (['root', "('annotations/panoptic_' + subset + '2017_remap')"], {}), "(root, 'annotations/panoptic_' + subset + '2017_remap')\n", (10713, 10768), False, 'import os\n'), ((11980, 12066), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.05)', 'contrast': '(0.05)', 'saturation': '(0.05)', 'hue': '(0.05)'}), '(brightness=0.05, contrast=0.05, saturation=0.05, hue\n =0.05)\n', (12002, 12066), True, 'import torchvision.transforms as transforms\n'), ((12342, 12394), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'self.means', 'std': 'self.stds'}), '(mean=self.means, std=self.stds)\n', (12362, 12394), True, 'import torchvision.transforms as transforms\n'), ((12420, 12441), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12439, 12441), True, 'import torchvision.transforms as transforms\n'), ((11190, 11251), 'torchvision.transforms.Resize', 'transforms.Resize', (['(self.crop_h, self.crop_w)', 'Image.BILINEAR'], {}), '((self.crop_h, self.crop_w), Image.BILINEAR)\n', (11207, 11251), True, 'import torchvision.transforms as transforms\n'), ((11328, 11388), 'torchvision.transforms.Resize', 'transforms.Resize', (['(self.crop_h, self.crop_w)', 'Image.NEAREST'], {}), '((self.crop_h, self.crop_w), Image.NEAREST)\n', (11345, 11388), True, 'import torchvision.transforms as transforms\n'), ((11550, 11569), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (11562, 11569), False, 'import os\n'), ((11722, 11741), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (11734, 11741), False, 'import os\n'), ((15320, 15472), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.train_dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.workers', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(self.train_dataset, batch_size=self.batch_size,\n shuffle=True, num_workers=self.workers, pin_memory=True, drop_last=True)\n', (15347, 15472), False, 'import torch\n'), ((16807, 16970), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valid_dataset'], {'batch_size': 'self.val_batch_size', 'shuffle': '(False)', 'num_workers': 'self.workers', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(self.valid_dataset, batch_size=self.\n val_batch_size, shuffle=False, num_workers=self.workers, pin_memory=\n True, drop_last=True)\n', (16834, 16970), False, 'import torch\n'), ((9332, 9355), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (9348, 9355), False, 'import torch\n'), ((13516, 13531), 'random.random', 'random.random', ([], {}), '()\n', (13529, 13531), False, 'import random\n'), ((13634, 13649), 'random.random', 'random.random', ([], {}), '()\n', (13647, 13649), False, 'import random\n'), ((16225, 16250), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16248, 16250), False, 'import torch\n'), ((11605, 11641), 'os.path.expanduser', 'os.path.expanduser', (['self.images_root'], {}), '(self.images_root)\n', (11623, 11641), False, 'import os\n'), ((11777, 11813), 'os.path.expanduser', 'os.path.expanduser', (['self.labels_root'], {}), '(self.labels_root)\n', (11795, 11813), False, 'import os\n'), ((16255, 16280), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16278, 16280), False, 'import torch\n'), ((16354, 16379), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16377, 16379), False, 'import torch\n')] |
#ref: https://github.com/mileyan/Pseudo_Lidar_V2/compare/master...swdev1202:master
import argparse
import os
import numpy as np
import tqdm
def pto_rec_map(velo_points, H=64, W=512, D=800):
# depth, width, height
valid_inds = (velo_points[:, 0] < 80) & \
(velo_points[:, 0] >= 0) & \
(velo_points[:, 1] < 50) & \
(velo_points[:, 1] >= -50) & \
(velo_points[:, 2] < 1) & \
(velo_points[:, 2] >= -2.5)
velo_points = velo_points[valid_inds]
x, y, z, i = velo_points[:, 0], velo_points[:, 1], velo_points[:, 2], velo_points[:, 3]
x_grid = (x * D / 80.).astype(int)
x_grid[x_grid < 0] = 0
x_grid[x_grid >= D] = D - 1
y_grid = ((y + 50) * W / 100.).astype(int)
y_grid[y_grid < 0] = 0
y_grid[y_grid >= W] = W - 1
z_grid = ((z + 2.5) * H / 3.5).astype(int)
z_grid[z_grid < 0] = 0
z_grid[z_grid >= H] = H - 1
depth_map = - np.ones((D, W, H, 4))
depth_map[x_grid, y_grid, z_grid, 0] = x
depth_map[x_grid, y_grid, z_grid, 1] = y
depth_map[x_grid, y_grid, z_grid, 2] = z
depth_map[x_grid, y_grid, z_grid, 3] = i
depth_map = depth_map.reshape((-1, 4))
depth_map = depth_map[depth_map[:, 0] != -1.0]
return depth_map
def pto_ang_map(velo_points, H=64, W=512, slice=1, argo=False):
"""
:param H: the row num of depth map, could be 64(default), 32, 16
:param W: the col num of depth map
:param slice: output every slice lines
"""
if(argo):
dtheta = np.radians(0.625 * 64.0 / H)
else:
dtheta = np.radians(0.4 * 64.0 / H)
dphi = np.radians(90.0 / W)
x, y, z, i = velo_points[:, 0], velo_points[:, 1], velo_points[:, 2], velo_points[:, 3]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W - 1
if(argo):
theta = np.radians(15.) - np.arcsin(z / d)
else:
theta = np.radians(2.) - np.arcsin(z / d)
theta_ = (theta / dtheta).astype(int)
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H - 1
depth_map = - np.ones((H, W, 4))
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
depth_map = depth_map[0::slice, :, :]
depth_map = depth_map.reshape((-1, 4))
depth_map = depth_map[depth_map[:, 0] != -1.0]
return depth_map
def pto_ang_map_div(velo_points, H=64, W=512, slice=1, argo=False, div=1):
dtheta = np.radians(0.625 * 64.0 / H)
dphi = np.radians(90.0 / W)
x_low_bound = 0
x_high_bound = 0
x_div = 80.0 / div
depth_map_container = []
for i in range(div):
x_high_bound += x_div
valid_inds = (velo_points[:, 0] < x_high_bound) & \
(velo_points[:, 0] >= x_low_bound)
velo_points_i = velo_points[valid_inds]
x, y, z, i = velo_points_i[:, 0], velo_points_i[:, 1], velo_points_i[:, 2], velo_points_i[:, 3]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W - 1
theta = np.radians(15.) - np.arcsin(z / r)
theta_ = (theta / dtheta).astype(int)
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H - 1
depth_map = -np.ones((H, W, 4))
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
depth_map = depth_map[0::slice, :, :]
depth_map = depth_map.reshape((-1, 4))
depth_map = depth_map[depth_map[:, 0] != -1.0]
depth_map_container.append(depth_map)
x_low_bound += x_div
result = depth_map_container[0]
if(len(depth_map_container) > 1):
for i in range(1, len(depth_map_container)):
result = np.vstack((result, depth_map_container[i]))
return result
def gen_sparse_points(pl_data_path, args):
pc_velo = np.fromfile(pl_data_path, dtype=np.float32).reshape((-1, 4))
if(args.div > 1):
valid_inds = (pc_velo[:, 1] < 50) & \
(pc_velo[:, 1] >= -50) & \
(pc_velo[:, 2] < 1.5) & \
(pc_velo[:, 2] >= -2.5)
pc_velo = pc_velo[valid_inds]
return pto_ang_map_div(pc_velo, H=args.H, W=args.W, slice=args.slice, argo=args.argo, div=args.div)
else:
# depth, width, height
valid_inds = (pc_velo[:, 0] < 80) & \
(pc_velo[:, 0] >= 0) & \
(pc_velo[:, 1] < 50) & \
(pc_velo[:, 1] >= -50) & \
(pc_velo[:, 2] < 1.5) & \
(pc_velo[:, 2] >= -2.5)
pc_velo = pc_velo[valid_inds]
return pto_ang_map(pc_velo, H=args.H, W=args.W, slice=args.slice, argo=args.argo)
def gen_sparse_points_all(args):
outputfolder = args.sparse_pl_path
os.makedirs(outputfolder, exist_ok=True)
data_idx_list = sorted([x.strip() for x in os.listdir(args.pl_path) if x[-3:] == 'bin'])
for data_idx in tqdm.tqdm(data_idx_list):
sparse_points = gen_sparse_points(os.path.join(args.pl_path, data_idx), args)
sparse_points = sparse_points.astype(np.float32)
sparse_points.tofile(f'{outputfolder}/{data_idx}')
if __name__ == '__main__':
parser = argparse.ArgumentParser("Generate sparse pseudo-LiDAR points")
parser.add_argument('--pl_path', default='/scratch/datasets', help='pseudo-lidar path')
parser.add_argument('--sparse_pl_path', default='/scratch/datasets', help='sparsed pseudo lidar path')
parser.add_argument('--slice', default=1, type=int)
parser.add_argument('--H', default=64, type=int)
parser.add_argument('--W', default=512, type=int)
parser.add_argument('--D', default=700, type=int)
parser.add_argument('--argo', action='store_true')
parser.add_argument('--div', default=1, type=int)
args = parser.parse_args()
gen_sparse_points_all(args) | [
"numpy.radians",
"tqdm.tqdm",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.fromfile",
"numpy.ones",
"numpy.arcsin",
"os.path.join",
"os.listdir",
"numpy.vstack",
"numpy.sqrt"
] | [((1639, 1659), 'numpy.radians', 'np.radians', (['(90.0 / W)'], {}), '(90.0 / W)\n', (1649, 1659), True, 'import numpy as np\n'), ((1762, 1795), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (1769, 1795), True, 'import numpy as np\n'), ((1804, 1828), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (1811, 1828), True, 'import numpy as np\n'), ((2662, 2690), 'numpy.radians', 'np.radians', (['(0.625 * 64.0 / H)'], {}), '(0.625 * 64.0 / H)\n', (2672, 2690), True, 'import numpy as np\n'), ((2702, 2722), 'numpy.radians', 'np.radians', (['(90.0 / W)'], {}), '(90.0 / W)\n', (2712, 2722), True, 'import numpy as np\n'), ((5221, 5261), 'os.makedirs', 'os.makedirs', (['outputfolder'], {'exist_ok': '(True)'}), '(outputfolder, exist_ok=True)\n', (5232, 5261), False, 'import os\n'), ((5376, 5400), 'tqdm.tqdm', 'tqdm.tqdm', (['data_idx_list'], {}), '(data_idx_list)\n', (5385, 5400), False, 'import tqdm\n'), ((5646, 5708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Generate sparse pseudo-LiDAR points"""'], {}), "('Generate sparse pseudo-LiDAR points')\n", (5669, 5708), False, 'import argparse\n'), ((963, 984), 'numpy.ones', 'np.ones', (['(D, W, H, 4)'], {}), '((D, W, H, 4))\n', (970, 984), True, 'import numpy as np\n'), ((1544, 1572), 'numpy.radians', 'np.radians', (['(0.625 * 64.0 / H)'], {}), '(0.625 * 64.0 / H)\n', (1554, 1572), True, 'import numpy as np\n'), ((1600, 1626), 'numpy.radians', 'np.radians', (['(0.4 * 64.0 / H)'], {}), '(0.4 * 64.0 / H)\n', (1610, 1626), True, 'import numpy as np\n'), ((1889, 1905), 'numpy.radians', 'np.radians', (['(45.0)'], {}), '(45.0)\n', (1899, 1905), True, 'import numpy as np\n'), ((1907, 1923), 'numpy.arcsin', 'np.arcsin', (['(y / r)'], {}), '(y / r)\n', (1916, 1923), True, 'import numpy as np\n'), ((2257, 2275), 'numpy.ones', 'np.ones', (['(H, W, 4)'], {}), '((H, W, 4))\n', (2264, 2275), True, 'import numpy as np\n'), ((3155, 3188), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (3162, 3188), True, 'import numpy as np\n'), ((3201, 3225), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (3208, 3225), True, 'import numpy as np\n'), ((2042, 2058), 'numpy.radians', 'np.radians', (['(15.0)'], {}), '(15.0)\n', (2052, 2058), True, 'import numpy as np\n'), ((2060, 2076), 'numpy.arcsin', 'np.arcsin', (['(z / d)'], {}), '(z / d)\n', (2069, 2076), True, 'import numpy as np\n'), ((2103, 2118), 'numpy.radians', 'np.radians', (['(2.0)'], {}), '(2.0)\n', (2113, 2118), True, 'import numpy as np\n'), ((2120, 2136), 'numpy.arcsin', 'np.arcsin', (['(z / d)'], {}), '(z / d)\n', (2129, 2136), True, 'import numpy as np\n'), ((3298, 3314), 'numpy.radians', 'np.radians', (['(45.0)'], {}), '(45.0)\n', (3308, 3314), True, 'import numpy as np\n'), ((3316, 3332), 'numpy.arcsin', 'np.arcsin', (['(y / r)'], {}), '(y / r)\n', (3325, 3332), True, 'import numpy as np\n'), ((3449, 3465), 'numpy.radians', 'np.radians', (['(15.0)'], {}), '(15.0)\n', (3459, 3465), True, 'import numpy as np\n'), ((3467, 3483), 'numpy.arcsin', 'np.arcsin', (['(z / r)'], {}), '(z / r)\n', (3476, 3483), True, 'import numpy as np\n'), ((3619, 3637), 'numpy.ones', 'np.ones', (['(H, W, 4)'], {}), '((H, W, 4))\n', (3626, 3637), True, 'import numpy as np\n'), ((4170, 4213), 'numpy.vstack', 'np.vstack', (['(result, depth_map_container[i])'], {}), '((result, depth_map_container[i]))\n', (4179, 4213), True, 'import numpy as np\n'), ((4291, 4334), 'numpy.fromfile', 'np.fromfile', (['pl_data_path'], {'dtype': 'np.float32'}), '(pl_data_path, dtype=np.float32)\n', (4302, 4334), True, 'import numpy as np\n'), ((5444, 5480), 'os.path.join', 'os.path.join', (['args.pl_path', 'data_idx'], {}), '(args.pl_path, data_idx)\n', (5456, 5480), False, 'import os\n'), ((5309, 5333), 'os.listdir', 'os.listdir', (['args.pl_path'], {}), '(args.pl_path)\n', (5319, 5333), False, 'import os\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from isegm.utils import misc
class NormalizedFocalLossSigmoid(nn.Module):
def __init__(self, axis=-1, alpha=0.25, gamma=2, max_mult=-1, eps=1e-12,
from_sigmoid=False, detach_delimeter=True,
batch_axis=0, weight=None, size_average=True,
ignore_label=-1):
super(NormalizedFocalLossSigmoid, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._from_logits = from_sigmoid
self._eps = eps
self._size_average = size_average
self._detach_delimeter = detach_delimeter
self._max_mult = max_mult
self._k_sum = 0
self._m_max = 0
def forward(self, pred, label):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(one_hot, self._alpha * sample_weight, (1 - self._alpha) * sample_weight)
pt = torch.where(sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred))
beta = (1 - pt) ** self._gamma
sw_sum = torch.sum(sample_weight, dim=(-2, -1), keepdim=True)
beta_sum = torch.sum(beta, dim=(-2, -1), keepdim=True)
mult = sw_sum / (beta_sum + self._eps)
if self._detach_delimeter:
mult = mult.detach()
beta = beta * mult
if self._max_mult > 0:
beta = torch.clamp_max(beta, self._max_mult)
with torch.no_grad():
ignore_area = torch.sum(label == self._ignore_label, dim=tuple(range(1, label.dim()))).cpu().numpy()
sample_mult = torch.mean(mult, dim=tuple(range(1, mult.dim()))).cpu().numpy()
if np.any(ignore_area == 0):
self._k_sum = 0.9 * self._k_sum + 0.1 * sample_mult[ignore_area == 0].mean()
beta_pmax, _ = torch.flatten(beta, start_dim=1).max(dim=1)
beta_pmax = beta_pmax.mean().item()
self._m_max = 0.8 * self._m_max + 0.2 * beta_pmax
loss = -alpha * beta * torch.log(torch.min(pt + self._eps, torch.ones(1, dtype=torch.float).to(pt.device)))
loss = self._weight * (loss * sample_weight)
if self._size_average:
bsum = torch.sum(sample_weight, dim=misc.get_dims_with_exclusion(sample_weight.dim(), self._batch_axis))
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis)) / (bsum + self._eps)
else:
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
return loss
def log_states(self, sw, name, global_step):
sw.add_scalar(tag=name + '_k', value=self._k_sum, global_step=global_step)
sw.add_scalar(tag=name + '_m', value=self._m_max, global_step=global_step)
class FocalLoss(nn.Module):
def __init__(self, axis=-1, alpha=0.25, gamma=2,
from_logits=False, batch_axis=0,
weight=None, num_class=None,
eps=1e-9, size_average=True, scale=1.0,
ignore_label=-1):
super(FocalLoss, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._scale = scale
self._num_class = num_class
self._from_logits = from_logits
self._eps = eps
self._size_average = size_average
def forward(self, pred, label, sample_weight=None):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(one_hot, self._alpha * sample_weight, (1 - self._alpha) * sample_weight)
pt = torch.where(sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred))
beta = (1 - pt) ** self._gamma
loss = -alpha * beta * torch.log(torch.min(pt + self._eps, torch.ones(1, dtype=torch.float).to(pt.device)))
loss = self._weight * (loss * sample_weight)
if self._size_average:
tsum = torch.sum(sample_weight, dim=misc.get_dims_with_exclusion(label.dim(), self._batch_axis))
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis)) / (tsum + self._eps)
else:
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
return self._scale * loss
class SoftIoU(nn.Module):
def __init__(self, from_sigmoid=False, ignore_label=-1):
super().__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
if not self._from_sigmoid:
pred = torch.sigmoid(pred)
loss = 1.0 - torch.sum(pred * label * sample_weight, dim=(1, 2, 3)) \
/ (torch.sum(torch.max(pred, label) * sample_weight, dim=(1, 2, 3)) + 1e-8)
return loss
class SigmoidBinaryCrossEntropyLoss(nn.Module):
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, ignore_label=-1):
super(SigmoidBinaryCrossEntropyLoss, self).__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
label = torch.where(sample_weight, label, torch.zeros_like(label))
if not self._from_sigmoid:
loss = torch.relu(pred) - pred * label + F.softplus(-torch.abs(pred))
else:
eps = 1e-12
loss = -(torch.log(pred + eps) * label
+ torch.log(1. - pred + eps) * (1. - label))
loss = self._weight * (loss * sample_weight)
return torch.mean(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
| [
"torch.ones_like",
"torch.flatten",
"torch.ones",
"torch.relu",
"torch.where",
"torch.zeros_like",
"numpy.any",
"torch.abs",
"torch.sigmoid",
"torch.max",
"torch.clamp_max",
"torch.no_grad",
"torch.sum",
"torch.log"
] | [((1130, 1218), 'torch.where', 'torch.where', (['one_hot', '(self._alpha * sample_weight)', '((1 - self._alpha) * sample_weight)'], {}), '(one_hot, self._alpha * sample_weight, (1 - self._alpha) *\n sample_weight)\n', (1141, 1218), False, 'import torch\n'), ((1367, 1419), 'torch.sum', 'torch.sum', (['sample_weight'], {'dim': '(-2, -1)', 'keepdim': '(True)'}), '(sample_weight, dim=(-2, -1), keepdim=True)\n', (1376, 1419), False, 'import torch\n'), ((1439, 1482), 'torch.sum', 'torch.sum', (['beta'], {'dim': '(-2, -1)', 'keepdim': '(True)'}), '(beta, dim=(-2, -1), keepdim=True)\n', (1448, 1482), False, 'import torch\n'), ((4001, 4089), 'torch.where', 'torch.where', (['one_hot', '(self._alpha * sample_weight)', '((1 - self._alpha) * sample_weight)'], {}), '(one_hot, self._alpha * sample_weight, (1 - self._alpha) *\n sample_weight)\n', (4012, 4089), False, 'import torch\n'), ((1093, 1112), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (1106, 1112), False, 'import torch\n'), ((1286, 1307), 'torch.ones_like', 'torch.ones_like', (['pred'], {}), '(pred)\n', (1301, 1307), False, 'import torch\n'), ((1675, 1712), 'torch.clamp_max', 'torch.clamp_max', (['beta', 'self._max_mult'], {}), '(beta, self._max_mult)\n', (1690, 1712), False, 'import torch\n'), ((1727, 1742), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1740, 1742), False, 'import torch\n'), ((1962, 1986), 'numpy.any', 'np.any', (['(ignore_area == 0)'], {}), '(ignore_area == 0)\n', (1968, 1986), True, 'import numpy as np\n'), ((3964, 3983), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (3977, 3983), False, 'import torch\n'), ((4157, 4178), 'torch.ones_like', 'torch.ones_like', (['pred'], {}), '(pred)\n', (4172, 4178), False, 'import torch\n'), ((5183, 5202), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (5196, 5202), False, 'import torch\n'), ((5953, 5976), 'torch.zeros_like', 'torch.zeros_like', (['label'], {}), '(label)\n', (5969, 5976), False, 'import torch\n'), ((1261, 1284), 'torch.abs', 'torch.abs', (['(label - pred)'], {}), '(label - pred)\n', (1270, 1284), False, 'import torch\n'), ((4132, 4155), 'torch.abs', 'torch.abs', (['(label - pred)'], {}), '(label - pred)\n', (4141, 4155), False, 'import torch\n'), ((5225, 5279), 'torch.sum', 'torch.sum', (['(pred * label * sample_weight)'], {'dim': '(1, 2, 3)'}), '(pred * label * sample_weight, dim=(1, 2, 3))\n', (5234, 5279), False, 'import torch\n'), ((6033, 6049), 'torch.relu', 'torch.relu', (['pred'], {}), '(pred)\n', (6043, 6049), False, 'import torch\n'), ((2113, 2145), 'torch.flatten', 'torch.flatten', (['beta'], {'start_dim': '(1)'}), '(beta, start_dim=1)\n', (2126, 2145), False, 'import torch\n'), ((6079, 6094), 'torch.abs', 'torch.abs', (['pred'], {}), '(pred)\n', (6088, 6094), False, 'import torch\n'), ((6155, 6176), 'torch.log', 'torch.log', (['(pred + eps)'], {}), '(pred + eps)\n', (6164, 6176), False, 'import torch\n'), ((6208, 6235), 'torch.log', 'torch.log', (['(1.0 - pred + eps)'], {}), '(1.0 - pred + eps)\n', (6217, 6235), False, 'import torch\n'), ((2343, 2375), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float'}), '(1, dtype=torch.float)\n', (2353, 2375), False, 'import torch\n'), ((4288, 4320), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float'}), '(1, dtype=torch.float)\n', (4298, 4320), False, 'import torch\n'), ((5307, 5329), 'torch.max', 'torch.max', (['pred', 'label'], {}), '(pred, label)\n', (5316, 5329), False, 'import torch\n')] |
import signal
import numpy as np
from keras import Input
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.engine import Model
from keras.layers import Dense, Activation
from keras.losses import mean_squared_error
from keras.models import Sequential
from keras.utils import plot_model
from sklearn.gaussian_process import GaussianProcessRegressor
from autokeras.layers import WeightedAdd
def graph_model():
"""test an graph model"""
a = Input(shape=(32,))
original_input = a
b = Dense(32)(a)
b = Dense(32)(b)
dense_model1 = Model(inputs=a, outputs=b)
a = Input(shape=(32,))
b = Dense(32)(a)
b = Dense(32)(b)
dense_model2 = Model(inputs=a, outputs=b)
dense_model = Sequential([dense_model1, dense_model2])
print(dense_model1.input_shape)
print(dense_model1.output_shape)
print(dense_model.input_shape)
print(dense_model.output_shape)
print(dense_model.layers)
a = dense_model1.output
b = Dense(32)(a)
b = Dense(32)(b)
final_model = Model(inputs=original_input, outputs=b)
print(final_model.layers)
def my_layer():
"""test one specify layer"""
a = Input(shape=(3, 3, 2))
b = WeightedAdd()(a)
model = Model(inputs=a, outputs=b)
data = np.ones((1, 3, 3, 2))
print(model.predict_on_batch(data))
model.compile(optimizer='Adam', loss=mean_squared_error)
model.fit(data, data, epochs=1000)
print(model.predict_on_batch(data))
def gpr():
gpr = GaussianProcessRegressor()
gpr.fit([[0, 1, 0, 1]], [1])
print(gpr.predict([[1, 0, 1, 0]]))
print(gpr.predict([[0, 1, 0, 1]]))
# Conclusion: GPR can work with single fit.
def long_function_call():
a = 1
for i in range(int(1e10)):
a += 1
def time_limit():
def signal_handler(signum, frame):
raise Exception("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(3) # Ten seconds
try:
long_function_call()
except Exception as msg:
print(type(msg))
print("Timed is up!")
def visualize(model, path='/tmp/logs/model.png'):
svg = model_to_dot(model).create(prog='dot', format='svg')
print(str(svg))
plot_model(model, to_file=path, show_shapes=True)
# model = Sequential()
#
# model.add(Dense(10, input_shape=(784,)))
# model.add(Activation('softmax'))
#
# model.compile(optimizer='sgd', loss='categorical_crossentropy')
#
# visualize(model)
time_limit()
| [
"keras.Input",
"keras.engine.Model",
"numpy.ones",
"autokeras.layers.WeightedAdd",
"keras.utils.plot_model",
"keras.layers.Dense",
"keras.utils.vis_utils.model_to_dot",
"signal.alarm",
"keras.models.Sequential",
"signal.signal",
"sklearn.gaussian_process.GaussianProcessRegressor"
] | [((489, 507), 'keras.Input', 'Input', ([], {'shape': '(32,)'}), '(shape=(32,))\n', (494, 507), False, 'from keras import Input\n'), ((592, 618), 'keras.engine.Model', 'Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (597, 618), False, 'from keras.engine import Model\n'), ((627, 645), 'keras.Input', 'Input', ([], {'shape': '(32,)'}), '(shape=(32,))\n', (632, 645), False, 'from keras import Input\n'), ((707, 733), 'keras.engine.Model', 'Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (712, 733), False, 'from keras.engine import Model\n'), ((752, 792), 'keras.models.Sequential', 'Sequential', (['[dense_model1, dense_model2]'], {}), '([dense_model1, dense_model2])\n', (762, 792), False, 'from keras.models import Sequential\n'), ((1056, 1095), 'keras.engine.Model', 'Model', ([], {'inputs': 'original_input', 'outputs': 'b'}), '(inputs=original_input, outputs=b)\n', (1061, 1095), False, 'from keras.engine import Model\n'), ((1185, 1207), 'keras.Input', 'Input', ([], {'shape': '(3, 3, 2)'}), '(shape=(3, 3, 2))\n', (1190, 1207), False, 'from keras import Input\n'), ((1245, 1271), 'keras.engine.Model', 'Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (1250, 1271), False, 'from keras.engine import Model\n'), ((1283, 1304), 'numpy.ones', 'np.ones', (['(1, 3, 3, 2)'], {}), '((1, 3, 3, 2))\n', (1290, 1304), True, 'import numpy as np\n'), ((1508, 1534), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (1532, 1534), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((1881, 1926), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'signal_handler'], {}), '(signal.SIGALRM, signal_handler)\n', (1894, 1926), False, 'import signal\n'), ((1931, 1946), 'signal.alarm', 'signal.alarm', (['(3)'], {}), '(3)\n', (1943, 1946), False, 'import signal\n'), ((2223, 2272), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': 'path', 'show_shapes': '(True)'}), '(model, to_file=path, show_shapes=True)\n', (2233, 2272), False, 'from keras.utils import plot_model\n'), ((539, 548), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (544, 548), False, 'from keras.layers import Dense, Activation\n'), ((560, 569), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (565, 569), False, 'from keras.layers import Dense, Activation\n'), ((654, 663), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (659, 663), False, 'from keras.layers import Dense, Activation\n'), ((675, 684), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (680, 684), False, 'from keras.layers import Dense, Activation\n'), ((1004, 1013), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (1009, 1013), False, 'from keras.layers import Dense, Activation\n'), ((1025, 1034), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (1030, 1034), False, 'from keras.layers import Dense, Activation\n'), ((1216, 1229), 'autokeras.layers.WeightedAdd', 'WeightedAdd', ([], {}), '()\n', (1227, 1229), False, 'from autokeras.layers import WeightedAdd\n'), ((2146, 2165), 'keras.utils.vis_utils.model_to_dot', 'model_to_dot', (['model'], {}), '(model)\n', (2158, 2165), False, 'from keras.utils.vis_utils import model_to_dot\n')] |
##########################################################################
#
# Copyright 2007-2019 by <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
##########################################################################
import vigra, geomap, numpy, math
from geomap import Polygon
# FIXME: computing bsd42044 1.5 SPWS (spline order 2)...
# ...
# File ".../maputils.py", line 473, in addFlowLinesToMap
# for cp in polytools.clipPoly(points, clipBox):
# File ".../polytools.py", line 209, in clipPoly
# assert outside != prevOutside; prevOutside = outside
# AssertionError
def squaredNorm(v):
return numpy.dot(v, v)
def maxDistIter(polygon, maxDist):
maxDist2 = numpy.square(maxDist)
it = iter(polygon)
prev = it.next()
yield prev
for p in it:
dist2 = squaredNorm(p-prev)
if dist2 > maxDist2:
dist = math.sqrt(dist2)
segmentCount = int(math.ceil(dist / maxDist))
segment = p - prev
for i in range(1, segmentCount):
yield p + segment*i/segmentCount
yield p
# --------------------------------------------------------------------
LEFT = 1
RIGHT = 2
TOP = 4
BOTTOM = 8
def clipPoly(polygon, clipRect, closeAtBorder = None):
"""clipPoly(polygon, clipRect)
Clips away those parts of polygon which are not in clipRect.
Returns a list of polygons (since the polygon may leave clipRect,
enter again, leave, ...). Polygon segments crossing clipRect's
borders are cut, such that the resulting polyons get new endpoints
exactly on the border."""
result = []
# print "clipPoly(%s..%s)" % (clipRect.begin(), clipRect.end())
# print list(polygon)
if closeAtBorder is None:
closeAtBorder = (polygon[0] == polygon[-1])
x1, y1 = clipRect.begin()
x2, y2 = clipRect.end()
part = None
startBorder = None
parts = []
relPos = None
for i, p in enumerate(polygon):
prevRP = relPos
relPos = 0
if p[0] < x1:
relPos |= LEFT
elif p[0] > x2:
relPos |= RIGHT
if p[1] < y1:
relPos |= TOP
elif p[1] > y2:
relPos |= BOTTOM
if relPos: # outside
if not i: # incomplete first segment
continue
if prevRP & relPos:
# complete segment outside
continue
# calculate leaving intersection
diff = polygon[i-1] - p
l = -1.0
if relPos & LEFT:
l = max(l, (x1 - p[0]) / diff[0])
endBorder = LEFT
if relPos & RIGHT:
l = max(l, (x2 - p[0]) / diff[0])
endBorder = RIGHT
if relPos & TOP:
nl = (y1 - p[1]) / diff[1]
if nl > l:
l = nl
endBorder = TOP
if relPos & BOTTOM:
nl = (y2 - p[1]) / diff[1]
if nl > l:
l = nl
endBorder = BOTTOM
ip = p + l * diff
if prevRP:
# segment may cross cliprect, calc. start intersection
pl = 2.0
if prevRP & LEFT:
pl = min(pl, (x1 - p[0]) / diff[0])
startBorder = LEFT
if prevRP & RIGHT:
pl = min(pl, (x2 - p[0]) / diff[0])
startBorder = RIGHT
if prevRP & TOP:
npl = (y1 - p[1]) / diff[1]
if npl < pl:
pl = npl
startBorder = TOP
if prevRP & BOTTOM:
npl = (y2 - p[1]) / diff[1]
if npl < pl:
pl = npl
startBorder = BOTTOM
if pl <= l:
# we never crossed the clipRect
continue
pip = p + pl * diff
part = Polygon([pip, ip])
else:
part.append(ip)
if part.length():
parts.append((startBorder, part, endBorder))
part = None
continue
if not part:
part = Polygon()
if i:
# calculate entering intersection:
diff = polygon[i-1] - p
l = 2.0
if prevRP & LEFT:
l = min(l, (x1 - p[0]) / diff[0])
startBorder = LEFT
if prevRP & RIGHT:
l = min(l, (x2 - p[0]) / diff[0])
startBorder = RIGHT
if prevRP & TOP:
nl = (y1 - p[1]) / diff[1]
if nl < l:
l = nl
startBorder = TOP
if prevRP & BOTTOM:
nl = (y2 - p[1]) / diff[1]
if nl < l:
l = nl
startBorder = BOTTOM
ip = p + l * diff
part.append(ip)
part.append(p)
if part and part.length():
parts.append((startBorder, part, None))
if not parts:
return []
if not polygon.closed():
return [p[1] for p in parts]
# if polygon[0] (== polygon[-1]) is inside clipRect, we may
# need to join the first and last part here:
if parts[0][1][0] == parts[-1][1][-1]:
assert parts[0][0] is None and parts[-1][-1] is None
# polygon is entirely within clipRect:
if len(parts) == 1:
return [parts[0][1]]
parts[-1][1].extend(parts[0][1])
parts[0] = (parts[-1][0], parts[-1][1], parts[0][2])
del parts[-1]
if not closeAtBorder:
return [p[1] for p in parts]
# compose counterclockwise list of intersection points at clip border:
sides = (
([(-p[1][-1][0], p[1], True ) for p in parts if p[2] == TOP] +
[(-p[1][ 0][0], p[1], False) for p in parts if p[0] == TOP]),
([( p[1][-1][1], p[1], True ) for p in parts if p[2] == LEFT] +
[( p[1][ 0][1], p[1], False) for p in parts if p[0] == LEFT]),
([( p[1][-1][0], p[1], True ) for p in parts if p[2] == BOTTOM] +
[( p[1][ 0][0], p[1], False) for p in parts if p[0] == BOTTOM]),
([(-p[1][-1][1], p[1], True ) for p in parts if p[2] == RIGHT] +
[(-p[1][ 0][1], p[1], False) for p in parts if p[0] == RIGHT]))
# counterclockwise list of corner positions:
corners = (clipRect.begin(),
clipRect.begin()+(0, clipRect.size()[1]),
clipRect.end(),
clipRect.begin()+(clipRect.size()[0], 0))
isCCW = polygon.partialArea() > 0
# bookkeeping about mergings (always use the most current polygon)
merged = {}
def mergeRoot(poly):
while True:
result = merged.get(poly, poly)
if result is poly:
break
poly = result
return result
lastPoly = None
prevPoly = None
prevOutside = None
for side, end in zip(sides, corners):
for _, poly, outside in sorted(side):
# assert outside != prevOutside; prevOutside = outside
if outside == isCCW:
prevPoly = poly
else:
if prevPoly == None:
lastPoly = poly
continue
prevPoly = mergeRoot(prevPoly)
if prevPoly == poly:
poly.append(poly[0])
result.append(poly)
else:
prevPoly.extend(poly)
merged[poly] = prevPoly
prevPoly = None
if prevPoly:
mergeRoot(prevPoly).append(end)
if lastPoly:
lastPoly.append(lastPoly[0])
if lastPoly.length():
result.append(lastPoly)
return result
# --------------------------------------------------------------------
class Line(object):
def __init__(self, norm, dist):
self.norm = norm
self.dist = dist
def isParallel(self, other):
return abs(numpy.dot(self.norm, other.norm)) == 1.0
def intersect(self, other):
assert not self.isParallel(other)
if abs(self.norm[0]) > abs(other.norm[0]):
a, b = self, other
else:
a, b = other, self
top = (a.norm/a.norm[0], a.dist/a.norm[0])
bottom = (b.norm-top[0]*b.norm[0], b.dist-top[1]*b.norm[0])
y = bottom[1]/bottom[0][1]
x = top[1]-y*top[0][1]
return (x, y)
def dir(self):
"""Return direction vector."""
return (self.norm[1], -self.norm[0])
def orthogonalDistance(self, point):
"""Return distance between given point and this line"""
return numpy.dot(point, self.norm) - self.dist
def point(self, l = 0):
"""Return point on line. For l == 0 (default), this will be
the closest point to the origin. l moves on the line."""
return self.dist * self.norm + self.dir() * l
class LineSegment(object):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def dir(self):
result = self.p2 - self.p1
result /= result.magnitude()
return result
def norm(self):
d = self.dir()
return (-d[1], d[0])
def dist(self):
"""distance to origin"""
return numpy.dot(self.p1, self.norm())
def line(self):
return Line(self.norm(), self.dist())
def polyLineSegment(poly, index):
return LineSegment(poly[index % len(poly)], poly[(index+1) % len(poly)])
def polyLineSegments(poly):
for i in range(len(poly)-1):
yield polyLineSegment(poly, i)
def shrinkPoly(poly, offset):
assert poly[0] == poly[-1], "polygon should be closed"
lines = [seg.line() for seg in polyLineSegments(poly)]
for line in lines:
line.dist -= offset
i = 1
while i < len(lines):
if lines[i-1].isParallel(lines[i]):
del lines[i]
else:
i += 1
if lines[-1].isParallel(lines[0]):
del lines[-1]
result = Polygon([lines[i].intersect(lines[(i+1)%len(lines)])
for i in range(len(lines))])
result.append(result[0])
return result
def rotatePoly(poly, angle):
"""Rotate polygon by the given angle around the origin."""
unitX = (math.cos(angle), -math.sin(angle))
unitY = (math.sin(angle), math.cos(angle))
result = Polygon()
for point in poly:
result.append((numpy.dot(point, unitX), numpy.dot(point, unitY)))
return result
def subsetDigitization(poly, shift = None, size = None):
"""Sample poly with a regular grid at integer coordinates starting
from (0,0) to the given size (which should be a Size2D object)."""
if size == None:
size = poly.boundingBox().size()
size = (int(math.ceil(size[0]))+2,
int(math.ceil(size[1]))+2)
if not shift:
shift = (0, 0)
shift = numpy.asarray(shift) + (1, 1) - poly.boundingBox().begin()
poly = Polygon(poly + shift)
result = vigra.GrayImage(size)
for p in vigra.meshIter(size):
result[p] = poly.contains((p[0], p[1])) and 1 or 0
return result
# --------------------------------------------------------------------
def smallestBoundingBox(ch):
"""Determine rotated bbox from convex hull"""
# FIXME: use rotating calipers for O(N) instead of O(N^2)!
assert ch.closed()
bboxes = []
for seg in polyLineSegments(ch):
line = seg.line()
norm = line.norm
dir = line.dir()
dists = []
positions = []
for p in ch:
dists.append(numpy.dot(norm, p))
positions.append(numpy.dot(dir, p))
l1 = min(positions)
l2 = max(positions)
l3 = min(dists)
l4 = max(dists)
area = (l2 - l1) * (l4 - l3)
bboxes.append((area, line, l1, l2, l3, l4))
bboxes.sort()
_, line, l1, l2, l3, l4 = bboxes[0]
p1 = l1 * line.dir() + l3 * line.norm
p2 = l1 * line.dir() + l4 * line.norm
p3 = l2 * line.dir() + l4 * line.norm
p4 = l2 * line.dir() + l3 * line.norm
return Polygon([p1, p2, p3, p4, p1])
# --------------------------------------------------------------------
if __name__ == "__main__":
import fig
f = fig.File("cliptest.fig")
cr = geomap.BoundingBox((0, 0), (4500, 4500))
f.layer(1).remove()
for o in f.findObjects(type = fig.PolylineBase, depth = 42):
p = Polygon(o.points)
if o.closed():
p.append(p[0])
pp = clipPoly(p, cr)
for p in pp:
no = fig.Polygon(p, p[0] == p[-1])
no.depth = 1
no.lineWidth = 3
if no.closed():
no.fillStyle = fig.FillStyle.Solid
no.fillColor = f.getColor(0.5)
else:
no.forwardArrow = fig.Arrow()
f.append(no)
f.save(fig2dev = "eps")
| [
"geomap.Polygon",
"fig.File",
"math.sqrt",
"math.ceil",
"vigra.meshIter",
"vigra.GrayImage",
"numpy.square",
"numpy.asarray",
"math.sin",
"fig.Polygon",
"fig.Arrow",
"math.cos",
"geomap.BoundingBox",
"numpy.dot"
] | [((1738, 1753), 'numpy.dot', 'numpy.dot', (['v', 'v'], {}), '(v, v)\n', (1747, 1753), False, 'import vigra, geomap, numpy, math\n'), ((1805, 1826), 'numpy.square', 'numpy.square', (['maxDist'], {}), '(maxDist)\n', (1817, 1826), False, 'import vigra, geomap, numpy, math\n'), ((11698, 11707), 'geomap.Polygon', 'Polygon', ([], {}), '()\n', (11705, 11707), False, 'from geomap import Polygon\n'), ((12347, 12368), 'vigra.GrayImage', 'vigra.GrayImage', (['size'], {}), '(size)\n', (12362, 12368), False, 'import vigra, geomap, numpy, math\n'), ((12382, 12402), 'vigra.meshIter', 'vigra.meshIter', (['size'], {}), '(size)\n', (12396, 12402), False, 'import vigra, geomap, numpy, math\n'), ((13434, 13463), 'geomap.Polygon', 'Polygon', (['[p1, p2, p3, p4, p1]'], {}), '([p1, p2, p3, p4, p1])\n', (13441, 13463), False, 'from geomap import Polygon\n'), ((13587, 13611), 'fig.File', 'fig.File', (['"""cliptest.fig"""'], {}), "('cliptest.fig')\n", (13595, 13611), False, 'import fig\n'), ((13621, 13661), 'geomap.BoundingBox', 'geomap.BoundingBox', (['(0, 0)', '(4500, 4500)'], {}), '((0, 0), (4500, 4500))\n', (13639, 13661), False, 'import vigra, geomap, numpy, math\n'), ((11602, 11617), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (11610, 11617), False, 'import vigra, geomap, numpy, math\n'), ((11650, 11665), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (11658, 11665), False, 'import vigra, geomap, numpy, math\n'), ((11668, 11683), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (11676, 11683), False, 'import vigra, geomap, numpy, math\n'), ((12311, 12332), 'geomap.Polygon', 'Polygon', (['(poly + shift)'], {}), '(poly + shift)\n', (12318, 12332), False, 'from geomap import Polygon\n'), ((13763, 13780), 'geomap.Polygon', 'Polygon', (['o.points'], {}), '(o.points)\n', (13770, 13780), False, 'from geomap import Polygon\n'), ((1987, 2003), 'math.sqrt', 'math.sqrt', (['dist2'], {}), '(dist2)\n', (1996, 2003), False, 'import vigra, geomap, numpy, math\n'), ((5396, 5405), 'geomap.Polygon', 'Polygon', ([], {}), '()\n', (5403, 5405), False, 'from geomap import Polygon\n'), ((10004, 10031), 'numpy.dot', 'numpy.dot', (['point', 'self.norm'], {}), '(point, self.norm)\n', (10013, 10031), False, 'import vigra, geomap, numpy, math\n'), ((11620, 11635), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (11628, 11635), False, 'import vigra, geomap, numpy, math\n'), ((13898, 13927), 'fig.Polygon', 'fig.Polygon', (['p', '(p[0] == p[-1])'], {}), '(p, p[0] == p[-1])\n', (13909, 13927), False, 'import fig\n'), ((2035, 2060), 'math.ceil', 'math.ceil', (['(dist / maxDist)'], {}), '(dist / maxDist)\n', (2044, 2060), False, 'import vigra, geomap, numpy, math\n'), ((5149, 5167), 'geomap.Polygon', 'Polygon', (['[pip, ip]'], {}), '([pip, ip])\n', (5156, 5167), False, 'from geomap import Polygon\n'), ((9319, 9351), 'numpy.dot', 'numpy.dot', (['self.norm', 'other.norm'], {}), '(self.norm, other.norm)\n', (9328, 9351), False, 'import vigra, geomap, numpy, math\n'), ((11754, 11777), 'numpy.dot', 'numpy.dot', (['point', 'unitX'], {}), '(point, unitX)\n', (11763, 11777), False, 'import vigra, geomap, numpy, math\n'), ((11779, 11802), 'numpy.dot', 'numpy.dot', (['point', 'unitY'], {}), '(point, unitY)\n', (11788, 11802), False, 'import vigra, geomap, numpy, math\n'), ((12237, 12257), 'numpy.asarray', 'numpy.asarray', (['shift'], {}), '(shift)\n', (12250, 12257), False, 'import vigra, geomap, numpy, math\n'), ((12936, 12954), 'numpy.dot', 'numpy.dot', (['norm', 'p'], {}), '(norm, p)\n', (12945, 12954), False, 'import vigra, geomap, numpy, math\n'), ((12985, 13002), 'numpy.dot', 'numpy.dot', (['dir', 'p'], {}), '(dir, p)\n', (12994, 13002), False, 'import vigra, geomap, numpy, math\n'), ((14160, 14171), 'fig.Arrow', 'fig.Arrow', ([], {}), '()\n', (14169, 14171), False, 'import fig\n'), ((12106, 12124), 'math.ceil', 'math.ceil', (['size[0]'], {}), '(size[0])\n', (12115, 12124), False, 'import vigra, geomap, numpy, math\n'), ((12149, 12167), 'math.ceil', 'math.ceil', (['size[1]'], {}), '(size[1])\n', (12158, 12167), False, 'import vigra, geomap, numpy, math\n')] |
import typing
from enum import unique
import os
import numpy as np
import torch
from tqdm import tqdm
import xarray as xr
from langbrainscore.dataset import Dataset
from langbrainscore.interface import EncoderRepresentations, _ModelEncoder
from langbrainscore.utils.encoder import (
aggregate_layers,
cos_sim_matrix,
count_zero_threshold_values,
flatten_activations_per_sample,
get_context_groups,
get_torch_device,
pick_matching_token_ixs,
postprocess_activations,
repackage_flattened_activations,
encode_stimuli_in_context,
)
from langbrainscore.utils.logging import log
from langbrainscore.utils.xarray import copy_metadata, fix_xr_dtypes
from langbrainscore.utils.resources import model_classes, config_name_mappings
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class HuggingFaceEncoder(_ModelEncoder):
def __init__(
self,
model_id,
emb_aggregation: typing.Union[str, None, typing.Callable],
device=get_torch_device(),
context_dimension: str = None,
bidirectional: bool = False,
emb_preproc: typing.Tuple[str] = (),
include_special_tokens: bool = True,
):
"""
Args:
model_id (str): the model id
device (None, ?): the device to use
context_dimension (str, optional): the dimension to use for extracting strings using context.
if None, each sampleid (stimuli) will be treated as a single context group.
if a string is specified, the string must refer to the name of a dimension in the xarray-like dataset
object (langbrainscore.dataset.Dataset) that provides groupings of sampleids (stimuli) that should be
used as context when generating encoder representations [default: None].
bidirectional (bool): whether to use bidirectional encoder (i.e., access both forward and backward context)
[default: False]
emb_aggregation (typing.Union[str, None, typing.Callable], optional): how to aggregate the hidden states of
the encoder representations for each sampleid (stimuli). [default: "last"]
emb_preproc (tuple): a list of strings specifying preprocessing functions to apply to the aggregated embeddings.
Processing is performed layer-wise.
include_special_tokens (bool): whether to include special tokens in the encoder representations.
"""
super().__init__(
model_id,
_context_dimension=context_dimension,
_bidirectional=bidirectional,
_emb_aggregation=emb_aggregation,
_emb_preproc=emb_preproc,
_include_special_tokens=include_special_tokens,
)
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers import logging as transformers_logging
transformers_logging.set_verbosity_error()
self.device = device or get_torch_device()
self.config = AutoConfig.from_pretrained(self._model_id)
self.tokenizer = AutoTokenizer.from_pretrained(
self._model_id, multiprocessing=True
)
self.model = AutoModel.from_pretrained(self._model_id, config=self.config)
try:
self.model = self.model.to(self.device)
except RuntimeError:
self.device = "cpu"
self.model = self.model.to(self.device)
def get_encoder_representations_template(
self, dataset=None, representations=xr.DataArray()
) -> EncoderRepresentations:
"""
returns an empty `EncoderRepresentations` object with all the appropriate
attributes but the `dataset` and `representations` missing and to be filled in
later.
"""
return EncoderRepresentations(
dataset=dataset,
representations=representations,
model_id=self._model_id,
context_dimension=self._context_dimension,
bidirectional=self._bidirectional,
emb_aggregation=self._emb_aggregation,
emb_preproc=self._emb_preproc,
include_special_tokens=self._include_special_tokens,
)
def encode(
self,
dataset: Dataset,
read_cache: bool = True, # avoid recomputing if cached `EncoderRepresentations` exists, recompute if not
write_cache: bool = True, # dump the result of this computation to cache?
) -> EncoderRepresentations:
"""
Input a langbrainscore Dataset, encode the stimuli according to the parameters specified in init, and return
the an xarray DataArray of aggregated representations for each stimulus.
Args:
dataset (langbrainscore.dataset.DataSet): [description]
read_cache (bool): Avoid recomputing if cached `EncoderRepresentations` exists, recompute if not
write_cache (bool): Dump and write the result of the computed encoder representations to cache
Raises:
NotImplementedError: [description]
ValueError: [description]
Returns:
[type]: [description]
"""
# before computing the representations from scratch, we will first see if any
# cached representations exist already.
if read_cache:
to_check_in_cache: EncoderRepresentations = (
self.get_encoder_representations_template(dataset=dataset)
)
try:
to_check_in_cache.load_cache()
return to_check_in_cache
except FileNotFoundError:
log(
f"couldn't load cached reprs for {to_check_in_cache.identifier_string}; recomputing.",
cmap="WARN",
type="WARN",
)
self.model.eval()
stimuli = dataset.stimuli.values
# Initialize the context group coordinate (obtain embeddings with context)
context_groups = get_context_groups(dataset, self._context_dimension)
# list for storing activations for each stimulus with all layers flattened
# list for storing layer ids ([0 0 0 0 ... 1 1 1 ...]) indicating which layer each
# neuroid (representation dimension) came from
flattened_activations, layer_ids = [], []
###############################################################################
# ALL SAMPLES LOOP
###############################################################################
_, unique_ixs = np.unique(context_groups, return_index=True)
# Make sure context group order is preserved
for group in tqdm(context_groups[np.sort(unique_ixs)], desc="Encoding stimuli"):
# Mask based on the context group
mask_context = context_groups == group
stimuli_in_context = stimuli[mask_context]
# store model states for each stimulus in this context group
encoded_stimuli = []
###############################################################################
# CONTEXT LOOP
###############################################################################
for encoded_stim in encode_stimuli_in_context(
stimuli_in_context=stimuli_in_context,
tokenizer=self.tokenizer,
model=self.model,
bidirectional=self._bidirectional,
include_special_tokens=self._include_special_tokens,
emb_aggregation=self._emb_aggregation,
device=self.device,
):
encoded_stimuli += [encoded_stim]
###############################################################################
# END CONTEXT LOOP
###############################################################################
# Flatten activations across layers and package as xarray
flattened_activations_and_layer_ids = [
*map(flatten_activations_per_sample, encoded_stimuli)
]
for f_as, l_ids in flattened_activations_and_layer_ids:
flattened_activations += [f_as]
layer_ids += [l_ids]
assert len(f_as) == len(l_ids) # Assert all layer lists are equal
###############################################################################
# END ALL SAMPLES LOOP
###############################################################################
# Stack flattened activations and layer ids to obtain [n_samples, emb_din * n_layers]
activations_2d = np.vstack(flattened_activations)
layer_ids_1d = np.squeeze(np.unique(np.vstack(layer_ids), axis=0))
# Post-process activations after obtaining them (or "pre-process" them before computing brainscore)
if len(self._emb_preproc) > 0:
for mode in self._emb_preproc:
activations_2d, layer_ids_1d = postprocess_activations(
activations_2d=activations_2d,
layer_ids_1d=layer_ids_1d,
emb_preproc_mode=mode,
)
assert activations_2d.shape[1] == len(layer_ids_1d)
assert activations_2d.shape[0] == len(stimuli)
# Package activations as xarray and reapply metadata
encoded_dataset: xr.DataArray = repackage_flattened_activations(
activations_2d=activations_2d,
layer_ids_1d=layer_ids_1d,
dataset=dataset,
)
encoded_dataset: xr.DataArray = copy_metadata(
encoded_dataset,
dataset.contents,
"sampleid",
)
to_return: EncoderRepresentations = self.get_encoder_representations_template()
to_return.dataset = dataset
to_return.representations = fix_xr_dtypes(encoded_dataset)
if write_cache:
to_return.to_cache(overwrite=True)
return to_return
def get_modelcard(self):
"""
Returns the model card of the model (model-wise, and not layer-wise)
"""
model_classes = [
"gpt",
"bert",
] # continuously update based on new model classes supported
# based on the model_id, figure out which model class it is
model_class = [x for x in model_classes if x in self._model_id][0]
assert model_class is not None, f"model_id {self._model_id} not supported"
config_specs_of_interest = config_name_mappings[model_class]
model_specs = {}
for (
k_spec,
v_spec,
) in (
config_specs_of_interest.items()
): # key is the name we want to use in the model card,
# value is the name in the config
if v_spec is not None:
model_specs[k_spec] = getattr(self.config, v_spec)
else:
model_specs[k_spec] = None
self.model_specs = model_specs
return model_specs
class PTEncoder(_ModelEncoder):
def __init__(self, model_id: str) -> "PTEncoder":
super().__init__(model_id)
def encode(self, dataset: "langbrainscore.dataset.Dataset") -> xr.DataArray:
# TODO
...
class EncoderCheck:
"""
Class for checking whether obtained embeddings from the Encoder class are correct and similar to other encoder objects.
"""
def __init__(
self,
):
pass
def _load_cached_activations(self, encoded_ann_identifier: str):
raise NotImplementedError
def similiarity_metric_across_layers(
self,
sim_metric: str = "tol",
enc1: xr.DataArray = None,
enc2: xr.DataArray = None,
tol: float = 1e-8,
threshold: float = 1e-4,
) -> bool:
"""
Given two activations, iterate across layers and check np.allclose using different tolerance levels.
Parameters:
sim_metric: str
Similarity metric to use.
enc1: xr.DataArray
First encoder activations.
enc2: xr.DataArray
Second encoder activations.
tol: float
Tolerance level to start at (we will iterate upwards the tolerance level). Default is 1e-8.
Returns:
bool: whether the tolerance level was met (True) or not (False)
bad_stim: set of stimuli indices that did not meet tolerance level `threshold` (if any)
"""
# First check is whether number of layers / shapes match
assert enc1.shape == enc2.shape
assert (
enc1.sampleid.values == enc2.sampleid.values
).all() # ensure that we are looking at the same stimuli
layer_ids = enc1.layer.values
_, unique_ixs = np.unique(layer_ids, return_index=True)
print(f"\n\nChecking similarity across layers using sim_metric: {sim_metric}")
all_good = True
bad_stim = set() # store indices of stimuli that are not similar
# Iterate across layers
for layer_id in tqdm(layer_ids[np.sort(unique_ixs)]):
enc1_layer = enc1.isel(neuroid=(enc1.layer == layer_id)) # .squeeze()
enc2_layer = enc2.isel(neuroid=(enc2.layer == layer_id)) # .squeeze()
# Check whether values match. If not, iteratively increase tolerance until values match
if sim_metric in ("tol", "diff"):
abs_diff = np.abs(enc1_layer - enc2_layer)
abs_diff_per_stim = np.max(
abs_diff, axis=1
) # Obtain the biggest difference aross neuroids (units)
while (abs_diff_per_stim > tol).all():
tol *= 10
elif "cos" in sim_metric:
# Check cosine distance between each row, e.g., sentence vector
cos_sim = cos_sim_matrix(enc1_layer, enc2_layer)
cos_dist = (
1 - cos_sim
) # 0 means identical, 1 means orthogonal, 2 means opposite
# We still want this as close to zero as possible for similar vectors.
cos_dist_abs = np.abs(cos_dist)
abs_diff_per_stim = cos_dist_abs
# Check how close the cosine distance is to 0
while (cos_dist_abs > tol).all():
tol *= 10
else:
raise NotImplementedError(f"Invalid `sim_metric`: {sim_metric}")
print(f"Layer {layer_id}: Similarity at tolerance: {tol:.3e}")
if tol > threshold:
print(f"WARNING: Low tolerance level")
all_good = False
bad_stim.update(
enc1.sampleid[np.where(abs_diff_per_stim > tol)[0]]
) # get sampleids of stimuli that are not similar
return all_good, bad_stim
| [
"langbrainscore.utils.encoder.get_torch_device",
"numpy.abs",
"transformers.AutoModel.from_pretrained",
"langbrainscore.utils.logging.log",
"langbrainscore.utils.xarray.fix_xr_dtypes",
"numpy.unique",
"langbrainscore.utils.encoder.encode_stimuli_in_context",
"langbrainscore.utils.encoder.get_context_g... | [((986, 1004), 'langbrainscore.utils.encoder.get_torch_device', 'get_torch_device', ([], {}), '()\n', (1002, 1004), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((2919, 2961), 'transformers.logging.set_verbosity_error', 'transformers_logging.set_verbosity_error', ([], {}), '()\n', (2959, 2961), True, 'from transformers import logging as transformers_logging\n'), ((3036, 3078), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['self._model_id'], {}), '(self._model_id)\n', (3062, 3078), False, 'from transformers import AutoConfig, AutoModel, AutoTokenizer\n'), ((3104, 3171), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self._model_id'], {'multiprocessing': '(True)'}), '(self._model_id, multiprocessing=True)\n', (3133, 3171), False, 'from transformers import AutoConfig, AutoModel, AutoTokenizer\n'), ((3215, 3276), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self._model_id'], {'config': 'self.config'}), '(self._model_id, config=self.config)\n', (3240, 3276), False, 'from transformers import AutoConfig, AutoModel, AutoTokenizer\n'), ((3546, 3560), 'xarray.DataArray', 'xr.DataArray', ([], {}), '()\n', (3558, 3560), True, 'import xarray as xr\n'), ((3817, 4133), 'langbrainscore.interface.EncoderRepresentations', 'EncoderRepresentations', ([], {'dataset': 'dataset', 'representations': 'representations', 'model_id': 'self._model_id', 'context_dimension': 'self._context_dimension', 'bidirectional': 'self._bidirectional', 'emb_aggregation': 'self._emb_aggregation', 'emb_preproc': 'self._emb_preproc', 'include_special_tokens': 'self._include_special_tokens'}), '(dataset=dataset, representations=representations,\n model_id=self._model_id, context_dimension=self._context_dimension,\n bidirectional=self._bidirectional, emb_aggregation=self.\n _emb_aggregation, emb_preproc=self._emb_preproc, include_special_tokens\n =self._include_special_tokens)\n', (3839, 4133), False, 'from langbrainscore.interface import EncoderRepresentations, _ModelEncoder\n'), ((6028, 6080), 'langbrainscore.utils.encoder.get_context_groups', 'get_context_groups', (['dataset', 'self._context_dimension'], {}), '(dataset, self._context_dimension)\n', (6046, 6080), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((6589, 6633), 'numpy.unique', 'np.unique', (['context_groups'], {'return_index': '(True)'}), '(context_groups, return_index=True)\n', (6598, 6633), True, 'import numpy as np\n'), ((8699, 8731), 'numpy.vstack', 'np.vstack', (['flattened_activations'], {}), '(flattened_activations)\n', (8708, 8731), True, 'import numpy as np\n'), ((9447, 9558), 'langbrainscore.utils.encoder.repackage_flattened_activations', 'repackage_flattened_activations', ([], {'activations_2d': 'activations_2d', 'layer_ids_1d': 'layer_ids_1d', 'dataset': 'dataset'}), '(activations_2d=activations_2d, layer_ids_1d\n =layer_ids_1d, dataset=dataset)\n', (9478, 9558), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((9641, 9701), 'langbrainscore.utils.xarray.copy_metadata', 'copy_metadata', (['encoded_dataset', 'dataset.contents', '"""sampleid"""'], {}), "(encoded_dataset, dataset.contents, 'sampleid')\n", (9654, 9701), False, 'from langbrainscore.utils.xarray import copy_metadata, fix_xr_dtypes\n'), ((9910, 9940), 'langbrainscore.utils.xarray.fix_xr_dtypes', 'fix_xr_dtypes', (['encoded_dataset'], {}), '(encoded_dataset)\n', (9923, 9940), False, 'from langbrainscore.utils.xarray import copy_metadata, fix_xr_dtypes\n'), ((12940, 12979), 'numpy.unique', 'np.unique', (['layer_ids'], {'return_index': '(True)'}), '(layer_ids, return_index=True)\n', (12949, 12979), True, 'import numpy as np\n'), ((2995, 3013), 'langbrainscore.utils.encoder.get_torch_device', 'get_torch_device', ([], {}), '()\n', (3011, 3013), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((7279, 7548), 'langbrainscore.utils.encoder.encode_stimuli_in_context', 'encode_stimuli_in_context', ([], {'stimuli_in_context': 'stimuli_in_context', 'tokenizer': 'self.tokenizer', 'model': 'self.model', 'bidirectional': 'self._bidirectional', 'include_special_tokens': 'self._include_special_tokens', 'emb_aggregation': 'self._emb_aggregation', 'device': 'self.device'}), '(stimuli_in_context=stimuli_in_context, tokenizer=\n self.tokenizer, model=self.model, bidirectional=self._bidirectional,\n include_special_tokens=self._include_special_tokens, emb_aggregation=\n self._emb_aggregation, device=self.device)\n', (7304, 7548), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((6728, 6747), 'numpy.sort', 'np.sort', (['unique_ixs'], {}), '(unique_ixs)\n', (6735, 6747), True, 'import numpy as np\n'), ((8776, 8796), 'numpy.vstack', 'np.vstack', (['layer_ids'], {}), '(layer_ids)\n', (8785, 8796), True, 'import numpy as np\n'), ((9045, 9154), 'langbrainscore.utils.encoder.postprocess_activations', 'postprocess_activations', ([], {'activations_2d': 'activations_2d', 'layer_ids_1d': 'layer_ids_1d', 'emb_preproc_mode': 'mode'}), '(activations_2d=activations_2d, layer_ids_1d=\n layer_ids_1d, emb_preproc_mode=mode)\n', (9068, 9154), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((13238, 13257), 'numpy.sort', 'np.sort', (['unique_ixs'], {}), '(unique_ixs)\n', (13245, 13257), True, 'import numpy as np\n'), ((13601, 13632), 'numpy.abs', 'np.abs', (['(enc1_layer - enc2_layer)'], {}), '(enc1_layer - enc2_layer)\n', (13607, 13632), True, 'import numpy as np\n'), ((13669, 13693), 'numpy.max', 'np.max', (['abs_diff'], {'axis': '(1)'}), '(abs_diff, axis=1)\n', (13675, 13693), True, 'import numpy as np\n'), ((5655, 5776), 'langbrainscore.utils.logging.log', 'log', (['f"""couldn\'t load cached reprs for {to_check_in_cache.identifier_string}; recomputing."""'], {'cmap': '"""WARN"""', 'type': '"""WARN"""'}), '(f"couldn\'t load cached reprs for {to_check_in_cache.identifier_string}; recomputing."\n , cmap=\'WARN\', type=\'WARN\')\n', (5658, 5776), False, 'from langbrainscore.utils.logging import log\n'), ((14018, 14056), 'langbrainscore.utils.encoder.cos_sim_matrix', 'cos_sim_matrix', (['enc1_layer', 'enc2_layer'], {}), '(enc1_layer, enc2_layer)\n', (14032, 14056), False, 'from langbrainscore.utils.encoder import aggregate_layers, cos_sim_matrix, count_zero_threshold_values, flatten_activations_per_sample, get_context_groups, get_torch_device, pick_matching_token_ixs, postprocess_activations, repackage_flattened_activations, encode_stimuli_in_context\n'), ((14313, 14329), 'numpy.abs', 'np.abs', (['cos_dist'], {}), '(cos_dist)\n', (14319, 14329), True, 'import numpy as np\n'), ((14884, 14917), 'numpy.where', 'np.where', (['(abs_diff_per_stim > tol)'], {}), '(abs_diff_per_stim > tol)\n', (14892, 14917), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019/12/7 14:46
# @Author : zhoujun
import numpy as np
import cv2
import os
import random
from tqdm import tqdm
# calculate means and std
train_txt_path = './train_val_list.txt'
CNum = 10000 # 挑选多少图片进行计算
img_h, img_w = 640, 640
imgs = np.zeros([img_w, img_h, 3, 1])
means, stdevs = [], []
with open(train_txt_path, 'r') as f:
lines = f.readlines()
random.shuffle(lines) # shuffle , 随机挑选图片
for i in tqdm(range(CNum)):
img_path = lines[i].split('\t')[0]
img = cv2.imread(img_path)
img = cv2.resize(img, (img_h, img_w))
img = img[:, :, :, np.newaxis]
imgs = np.concatenate((imgs, img), axis=3)
# print(i)
imgs = imgs.astype(np.float32) / 255.
for i in tqdm(range(3)):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# cv2 读取的图像格式为BGR,PIL/Skimage读取到的都是RGB不用转
means.reverse() # BGR --> RGB
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
print('transforms.Normalize(normMean = {}, normStd = {})'.format(means, stdevs)) | [
"cv2.resize",
"numpy.std",
"random.shuffle",
"numpy.zeros",
"cv2.imread",
"numpy.mean",
"numpy.concatenate"
] | [((277, 307), 'numpy.zeros', 'np.zeros', (['[img_w, img_h, 3, 1]'], {}), '([img_w, img_h, 3, 1])\n', (285, 307), True, 'import numpy as np\n'), ((399, 420), 'random.shuffle', 'random.shuffle', (['lines'], {}), '(lines)\n', (413, 420), False, 'import random\n'), ((532, 552), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (542, 552), False, 'import cv2\n'), ((567, 598), 'cv2.resize', 'cv2.resize', (['img', '(img_h, img_w)'], {}), '(img, (img_h, img_w))\n', (577, 598), False, 'import cv2\n'), ((654, 689), 'numpy.concatenate', 'np.concatenate', (['(imgs, img)'], {'axis': '(3)'}), '((imgs, img), axis=3)\n', (668, 689), True, 'import numpy as np\n'), ((837, 852), 'numpy.mean', 'np.mean', (['pixels'], {}), '(pixels)\n', (844, 852), True, 'import numpy as np\n'), ((872, 886), 'numpy.std', 'np.std', (['pixels'], {}), '(pixels)\n', (878, 886), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for measurements in the Fock basis"""
import pytest
import numpy as np
NUM_REPEATS = 50
@pytest.mark.backends("gaussian")
class TestGaussianRepresentation:
"""Tests that make use of the Fock basis representation."""
def measure_fock_gaussian_warning(self, setup_backend):
"""Tests that Fock measurements are not implemented when shots != 1.
Should be deleted when this functionality is implemented."""
backend = setup_backend(3)
with pytest.warns(Warning, match="Cannot simulate non-Gaussian states. Conditional state after "
"Fock measurement has not been updated."):
backend.measure_fock([0, 1], shots=5)
@pytest.mark.backends("fock", "tf")
class TestFockRepresentation:
"""Tests that make use of the Fock basis representation."""
def shots_not_implemented_fock(self, setup_backend):
"""Tests that Fock measurements are not implemented when shots != 1.
Should be deleted when this functionality is implemented."""
backend = setup_backend(3)
with pytest.raises(NotImplementedError, match="{} backend currently does not support "
"shots != 1 for Fock measurement".format(backend.short_name)):
backend.measure_fock([0, 1], shots=5)
with pytest.raises(NotImplementedError, match="{} backend currently does not support "
"shots != 1 for Fock measurement".format(backend.short_name)):
backend.measure_fock([0, 1], shots=-5)
def shots_not_implemented_homodyne(self, setup_backend):
"""Tests that homodyne measurements are not implemented when shots != 1.
Should be deleted when this functionality is implemented."""
backend = setup_backend(3)
with pytest.raises(NotImplementedError, match="{} backend currently does not support "
"shots != 1 for homodyne measurement".format(backend.short_name)):
backend.measure_homodyne([0, 1], shots=5)
with pytest.raises(NotImplementedError, match="{} backend currently does not support "
"shots != 1 for homodyne measurement".format(backend.short_name)):
backend.measure_homodyne([0, 1], shots=-5)
def test_normalized_conditional_states(self, setup_backend, cutoff, pure, tol):
"""Tests if the conditional states resulting from Fock measurements in a subset of modes are normalized."""
state_preps = [n for n in range(cutoff)] + [
cutoff - n for n in range(cutoff)
] # [0, 1, 2, ..., cutoff-1, cutoff, cutoff-1, ..., 2, 1]
backend = setup_backend(3)
for idx in range(NUM_REPEATS):
backend.reset(pure=pure)
# cycles through consecutive triples in `state_preps`
backend.prepare_fock_state(state_preps[idx % cutoff], 0)
backend.prepare_fock_state(state_preps[(idx + 1) % cutoff], 1)
backend.prepare_fock_state(state_preps[(idx + 2) % cutoff], 2)
for mode in range(3):
backend.measure_fock([mode])
state = backend.state()
tr = state.trace()
assert np.allclose(tr, 1, atol=tol, rtol=0)
def test_fock_measurements(self, setup_backend, cutoff, batch_size, pure, tol):
"""Tests if Fock measurements results on a variety of multi-mode Fock states are correct."""
state_preps = [n for n in range(cutoff)] + [
cutoff - n for n in range(cutoff)
] # [0, 1, 2, ..., cutoff-1, cutoff, cutoff-1, ..., 2, 1]
singletons = [(0,), (1,), (2,)]
pairs = [(0, 1), (0, 2), (1, 2)]
triples = [(0, 1, 2)]
mode_choices = singletons + pairs + triples
backend = setup_backend(3)
for idx in range(NUM_REPEATS):
backend.reset(pure=pure)
n = [
state_preps[idx % cutoff],
state_preps[(idx + 1) % cutoff],
state_preps[(idx + 2) % cutoff],
]
n = np.array(n)
meas_modes = np.array(
mode_choices[idx % len(mode_choices)]
) # cycle through mode choices
backend.prepare_fock_state(n[0], 0)
backend.prepare_fock_state(n[1], 1)
backend.prepare_fock_state(n[2], 2)
meas_result = backend.measure_fock(meas_modes)
ref_result = n[meas_modes]
if batch_size is not None:
ref_result = tuple(np.array([i] * batch_size) for i in ref_result)
assert np.allclose(meas_result, ref_result, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestRepresentationIndependent:
"""Basic implementation-independent tests."""
def test_two_mode_squeezed_measurements(self, setup_backend, pure):
"""Tests Fock measurement on the two mode squeezed vacuum state."""
for _ in range(NUM_REPEATS):
backend = setup_backend(2)
backend.reset(pure=pure)
r = 0.25
# Circuit to prepare two mode squeezed vacuum
backend.squeeze(-r, 0)
backend.squeeze(r, 1)
backend.beamsplitter(np.sqrt(0.5), -np.sqrt(0.5), 0, 1)
meas_modes = [0, 1]
meas_results = backend.measure_fock(meas_modes)
assert np.all(meas_results[0] == meas_results[1])
def test_vacuum_measurements(self, setup_backend, pure):
"""Tests Fock measurement on the vacuum state."""
backend = setup_backend(3)
for _ in range(NUM_REPEATS):
backend.reset(pure=pure)
meas = backend.measure_fock([0, 1, 2])[0]
assert np.all(np.array(meas) == 0)
def test_coherent_state_has_photons(self, setup_backend, pure):
"""Test that a coherent state with a mean photon number of 4 and sampled NUM_REPEATS times will produce photons"""
backend = setup_backend(1)
alpha = 2.0
meas = np.array(backend.measure_fock([0]))
for _ in range(NUM_REPEATS):
backend.reset(pure=pure)
backend.displacement(alpha, 0)
meas += backend.measure_fock([0])
assert np.all(meas > 0)
| [
"pytest.warns",
"numpy.allclose",
"pytest.mark.backends",
"numpy.array",
"numpy.all",
"numpy.sqrt"
] | [((703, 735), 'pytest.mark.backends', 'pytest.mark.backends', (['"""gaussian"""'], {}), "('gaussian')\n", (723, 735), False, 'import pytest\n'), ((1320, 1354), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (1340, 1354), False, 'import pytest\n'), ((5396, 5442), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""', '"""gaussian"""'], {}), "('fock', 'tf', 'gaussian')\n", (5416, 5442), False, 'import pytest\n'), ((6973, 6989), 'numpy.all', 'np.all', (['(meas > 0)'], {}), '(meas > 0)\n', (6979, 6989), True, 'import numpy as np\n'), ((1091, 1231), 'pytest.warns', 'pytest.warns', (['Warning'], {'match': '"""Cannot simulate non-Gaussian states. Conditional state after Fock measurement has not been updated."""'}), "(Warning, match=\n 'Cannot simulate non-Gaussian states. Conditional state after Fock measurement has not been updated.'\n )\n", (1103, 1231), False, 'import pytest\n'), ((4806, 4817), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (4814, 4817), True, 'import numpy as np\n'), ((5338, 5392), 'numpy.allclose', 'np.allclose', (['meas_result', 'ref_result'], {'atol': 'tol', 'rtol': '(0)'}), '(meas_result, ref_result, atol=tol, rtol=0)\n', (5349, 5392), True, 'import numpy as np\n'), ((6120, 6162), 'numpy.all', 'np.all', (['(meas_results[0] == meas_results[1])'], {}), '(meas_results[0] == meas_results[1])\n', (6126, 6162), True, 'import numpy as np\n'), ((3950, 3986), 'numpy.allclose', 'np.allclose', (['tr', '(1)'], {'atol': 'tol', 'rtol': '(0)'}), '(tr, 1, atol=tol, rtol=0)\n', (3961, 3986), True, 'import numpy as np\n'), ((5974, 5986), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (5981, 5986), True, 'import numpy as np\n'), ((5989, 6001), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (5996, 6001), True, 'import numpy as np\n'), ((6474, 6488), 'numpy.array', 'np.array', (['meas'], {}), '(meas)\n', (6482, 6488), True, 'import numpy as np\n'), ((5270, 5296), 'numpy.array', 'np.array', (['([i] * batch_size)'], {}), '([i] * batch_size)\n', (5278, 5296), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pdb
reso = False
y_D_pixel_416 = np.load('abs_test_pixel.npy');pdb.set_trace()
# check certain supervision result
# y_test = y_D_416[0,:]#for direct supervision
# y_test = y_S_416[:,0]
y_test = y_D_pixel_416[:,0]
# max_5_error_index = np.argpartition(y_test, -5)[-5:]
# max_5_error = y_test[max_5_error_index]
# min_5_error_index = np.argpartition(y_test, 5)[:5]
# min_5_error = y_test[min_5_error_index]#;pdb.set_trace()
#max error and corresponding index
#array([382, 395, 388, 385, 174])
#array([0.23704123, 0.25923422, 0.27255267, 0.29910684, 0.3097115 ],dtype=float32)#D result
#array([0.24553713, 0.26688939, 0.25118491, 0.16997431, 0.25148839])#S result
#array([0.11141012, 0.34819573, 0.14325334, 0.19597265, 0.23082088])#M result
#min error and corresponding index
#array([405, 660, 216, 654, 293])#array([0.04548579, 0.03550705, 0.04592678, 0.04738897, 0.04654082],dtype=float32)
#result of y_S_416#similar max error images
#max error and corresponding index
#array([382, 388, 395, 383, 174])array([0.24553713, 0.25118491, 0.26688939, 0.3148692 , 0.25148839])
#min error and corresponding index
#array([216, 2, 52, 293, 4])array([0.04179476, 0.04006128, 0.04325297, 0.0440316 , 0.04586333])
#result of y_M_416
#max error and corresponding index
#array([174, 685, 164, 374, 395])array([0.23082088, 0.24455129, 0.25492683, 0.2615391 , 0.34819573])
#min error and corresponding index
#array([405, 52, 0, 3, 11])array([0.04185434, 0.04804014, 0.04179324, 0.0492242 , 0.05022766])
#x = np.linspace(0, y_DMS_1024.shape[1], y_DMS_1024.shape[1], endpoint=True)#;pdb.set_trace()
#plt.hold(True)
if reso:
# plt.plot(x, y_DMS_1024[0,:], 'b', label= 'dms_1024')
# plt.plot(x, y_DMS_416[0,:], 'r', label= 'dms_416')
plt.scatter(y_DMS_1024[0,:], y_DMS_416[0,:], c='r', label= 'dms_416')
plt.title("Abs Rel resolution comparison")
plt.xlabel("dms_1024")
plt.ylabel("dms_416")
plt.legend(loc='upper left')
plt.savefig("Direct_reso_com_graph.pdf")
else:
# plt.plot(x, y_DMS_416[0,:], 'r', label= 'dms_416')
# plt.plot(x, y_D_416[0,:], 'b', label= 'd_416')
# plt.plot(x, y_S_416[:,0], 'g', label= 's_416')
# plt.plot(x, y_M_416[:,0], 'm', label= 'm_416')
fig, axs = plt.subplots(2, 2, sharex='all')
# add a big axes, hide frame(this is for sharex and sharey)
fig.add_subplot(111, frameon = False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
plt.grid(False)
plt.xlabel("direct supervision")#set up common label
axs[0, 0].scatter(y_D_416[0,:], y_S_416[:,0], c='m', s=5)
axs[0, 0].set_title('direct vs stereo')
axs[0, 0].set_ylabel('stereo')
axs[0, 0].set_xlabel('a')
axs[0, 1].scatter(y_D_416[0,:], y_M_416[:,0], c='m', s=5)
axs[0, 1].set_title('direct vs video')
axs[0, 1].set_ylabel('video')
axs[0, 1].set_xlabel('b')
axs[1, 0].scatter(y_D_416[0,:], y_D_res50_416[0,:], c='m', s=5)
axs[1, 0].set_title('direct with different models')
axs[1, 0].set_ylabel('different models')
axs[1, 0].set_xlabel('c')
axs[1, 1].scatter(y_D_416[0,:], y_D_seed_416[0,:], c='m', s=5)
axs[1, 1].set_title('direct with different seeds')
axs[1, 1].set_ylabel('diferent seeds')
axs[1, 1].set_xlabel('d')
# plt.title("Abs Rel Supervised method comparison")
plt.tight_layout()
#plt.title("Abs Rel method comparison")
plt.savefig("Sup_com_graph.pdf")
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"pdb.set_trace",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
... | [((93, 122), 'numpy.load', 'np.load', (['"""abs_test_pixel.npy"""'], {}), "('abs_test_pixel.npy')\n", (100, 122), True, 'import numpy as np\n'), ((123, 138), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (136, 138), False, 'import pdb\n'), ((3514, 3524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3522, 3524), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1867), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_DMS_1024[0, :]', 'y_DMS_416[0, :]'], {'c': '"""r"""', 'label': '"""dms_416"""'}), "(y_DMS_1024[0, :], y_DMS_416[0, :], c='r', label='dms_416')\n", (1808, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1910), 'matplotlib.pyplot.title', 'plt.title', (['"""Abs Rel resolution comparison"""'], {}), "('Abs Rel resolution comparison')\n", (1877, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""dms_1024"""'], {}), "('dms_1024')\n", (1923, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dms_416"""'], {}), "('dms_416')\n", (1947, 1958), True, 'import matplotlib.pyplot as plt\n'), ((1960, 1988), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1970, 1988), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2030), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Direct_reso_com_graph.pdf"""'], {}), "('Direct_reso_com_graph.pdf')\n", (2001, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2300), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '"""all"""'}), "(2, 2, sharex='all')\n", (2280, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2544), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'top': '"""off"""', 'bottom': '"""off"""', 'left': '"""off"""', 'right': '"""off"""'}), "(labelcolor='none', top='off', bottom='off', left='off',\n right='off')\n", (2471, 2544), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2560), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2553, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""direct supervision"""'], {}), "('direct supervision')\n", (2575, 2597), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3431), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3429, 3431), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3512), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Sup_com_graph.pdf"""'], {}), "('Sup_com_graph.pdf')\n", (3491, 3512), True, 'import matplotlib.pyplot as plt\n')] |
import os
import h5py
import numpy as np
from scipy.ndimage.filters import uniform_filter1d
import matplotlib.pyplot as plt
def plot_histogram(all_histograms, save_path, attr_id, attr_name, k=25, smoothing=True):
x = np.array(list(range(100)))
if smoothing:
benign = uniform_filter1d(all_histograms[k][0][attr_id], size=5)
atypical = uniform_filter1d(all_histograms[k][1][attr_id], size=5)
malignant = uniform_filter1d(all_histograms[k][2][attr_id], size=5)
else:
benign = all_histograms[k][0][attr_id]
atypical = all_histograms[k][1][attr_id]
malignant = all_histograms[k][2][attr_id]
plt.plot(x, benign, label="benign")
plt.plot(x, atypical, label="atypical")
plt.plot(x, malignant, label="malignant")
plt.title(attr_name)
plt.legend()
plt.savefig(os.path.join(save_path, attr_name + '.png'))
plt.clf()
def h5_to_numpy(h5_path, key):
h5_object = h5py.File(h5_path, 'r')
out = np.array(h5_object[key])
return out
| [
"matplotlib.pyplot.title",
"h5py.File",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.array",
"scipy.ndimage.filters.uniform_filter1d",
"os.path.join"
] | [((657, 692), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'benign'], {'label': '"""benign"""'}), "(x, benign, label='benign')\n", (665, 692), True, 'import matplotlib.pyplot as plt\n'), ((697, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'atypical'], {'label': '"""atypical"""'}), "(x, atypical, label='atypical')\n", (705, 736), True, 'import matplotlib.pyplot as plt\n'), ((741, 782), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'malignant'], {'label': '"""malignant"""'}), "(x, malignant, label='malignant')\n", (749, 782), True, 'import matplotlib.pyplot as plt\n'), ((787, 807), 'matplotlib.pyplot.title', 'plt.title', (['attr_name'], {}), '(attr_name)\n', (796, 807), True, 'import matplotlib.pyplot as plt\n'), ((812, 824), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (822, 824), True, 'import matplotlib.pyplot as plt\n'), ((890, 899), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((950, 973), 'h5py.File', 'h5py.File', (['h5_path', '"""r"""'], {}), "(h5_path, 'r')\n", (959, 973), False, 'import h5py\n'), ((984, 1008), 'numpy.array', 'np.array', (['h5_object[key]'], {}), '(h5_object[key])\n', (992, 1008), True, 'import numpy as np\n'), ((289, 344), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['all_histograms[k][0][attr_id]'], {'size': '(5)'}), '(all_histograms[k][0][attr_id], size=5)\n', (305, 344), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((364, 419), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['all_histograms[k][1][attr_id]'], {'size': '(5)'}), '(all_histograms[k][1][attr_id], size=5)\n', (380, 419), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((440, 495), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['all_histograms[k][2][attr_id]'], {'size': '(5)'}), '(all_histograms[k][2][attr_id], size=5)\n', (456, 495), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((841, 884), 'os.path.join', 'os.path.join', (['save_path', "(attr_name + '.png')"], {}), "(save_path, attr_name + '.png')\n", (853, 884), False, 'import os\n')] |
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN PARALLEL BY MPI
Created on 2019
@author: <NAME>
"""
import os
import sys
import h5py
import glob
import time
import timeit
import random
import numpy as np
import xarray as xr
from mpi4py import MPI
from netCDF4 import Dataset
import matplotlib.pyplot as plt
def read_MODIS(M06_files,M03_files,verbose=False): # READ THE HDF FILE
# Read the cloud mask from MYD06_L2 product')
ncfile=Dataset(M06_files,'r')
d06_CM = ncfile.variables['Cloud_Mask_1km'][:,:,0]
CM1km = d06_CM[::3,::3]
CM = (np.array(CM1km,dtype='byte') & 0b00000110) >>1
ncfile.close()
# Read the geolocation data from MYD03 product')
ncfile=Dataset(M03_files,'r')
d03_lat = ncfile.variables['Latitude'][:,:]
d03_lon = ncfile.variables['Longitude'][:,:]
lat = d03_lat[::3,::3]
lon = d03_lon[::3,::3]
attr_lat = ncfile.variables['Latitude']._FillValue
attr_lon = ncfile.variables['Longitude']._FillValue
#Use _FillValue to remove fill data in lat & lon
#lat[np.where(lat == attr_lat)] = 0.0
#lon[np.where(lat == attr_lat)] = 0.0
#CM [np.where(lat == attr_lat)] = 0.5 #which will not be identified by lines 80-83
#
#lat[np.where(lon == attr_lon)] = 0.0
#lon[np.where(lon == attr_lon)] = 0.0
#CM [np.where(lon == attr_lon)] = 0.5 #which will not be identified by lines 80-83
ncfile.close()
return lat,lon,CM
def run_modis_aggre(dayloop):
# This function is the data aggregation loops by number of files
dayloop = np.array(dayloop)+1
for day in dayloop:
if day > 31: break
dc ='%03i' % day
M03_files = sorted(glob.glob(MYD03_dir + "MYD03.A2008" + dc + "*"))
M06_files = sorted(glob.glob(MYD06_dir + "MYD06_L2.A2008" + dc + "*"))
for j in range(len(M06_files)):
print("File Number: {} / {} in day {}".format(j,len(M06_files),day))
# Read Level-2 MODIS data
lat,lon,CM = read_MODIS(M06_files[j],M03_files[j])
#print(lat.shape,lon.shape,CM.shape)
# Ravel the 2-D data to 1-D array
lat = (lat.ravel()+ 89.5).astype(int)
lon = (lon.ravel()+ 179.5).astype(int)
lat = np.where(lat > -1, lat, 0)
lon = np.where(lon > -1, lon, 0)
# increment total_pix by 1 for the grid for each value in (lat, lon).
for i, j in zip(lat, lon):
total_pix[i,j] += 1
# covert ds06_decoded from 2D to 1D, check whether each element is less than or equal to 0, return a tuple whose first element is an 1D arrays of indices of ds06_decoded's elements whose value is less than or equal to 0.
index = np.nonzero(CM.ravel() == 0)
# get its lat and lon for each cloud pixel.
# we can use this approach because the internal structure (677, 452) is the same for both MYD03 and MYD06.
cloud_lon = [lon[i] for i in index[0]]
cloud_lat = [lat[i] for i in index[0]]
# increment cloud_pix by 1 for the grid for each value in (cloud_lat, cloud_lon).
for x, y in zip(cloud_lat, cloud_lon):
cloud_pix[x,y] += 1
return (total_pix,cloud_pix)
def save_output(cf):
cf1 = xr.DataArray(cf)
cf1.to_netcdf("monthlyCloudFraction-day-level-parallelization.nc")
plt.figure(figsize=(14, 7))
plt.contourf(range(-180, 180), range(-90, 90), cf, 100, cmap="jet")
plt.xlabel("Longitude", fontsize=14)
plt.ylabel("Latitude", fontsize=14)
plt.title("Level 3 Cloud Fraction Aggregation for January 2008", fontsize=16)
plt.colorbar()
plt.savefig("monthlyCloudFraction-day-level-parallelization.png")
if __name__ =='__main__':
# This is the main program for using concurrent to speed up the whole process
# Start counting operation time
start_time = timeit.default_timer()
#-------------STEP 1: Define Files Path--------
MYD06_dir= '/umbc/xfs1/cybertrn/common/Data/Satellite_Observations/MODIS/MYD06_L2/'
MYD03_dir= '/umbc/xfs1/cybertrn/common/Data/Satellite_Observations/MODIS/MYD03/'
#-------------STEP 2: Set up spactial and temporal resolution----------
total_pix = np.zeros((180, 360))
cloud_pix = np.zeros((180, 360))
# Initiate MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
random.seed(rank)
# Distribute the number of files into ppns for MPI
day_num = np.linspace(1,31,31,dtype=np.int)
remain = size-len(day_num)%size
ppn_file = (len(day_num)+remain)/size
if len(day_num) <= size:
files = np.arange(len(day_num)+remain)
tasks = np.array(np.split(files,size))
dayloop = tasks[rank]
size = len(day_num)
elif ppn_file >= remain:
# Distribute the day's loops into MPI ppns
files = np.arange(len(day_num)+remain)
tasks = np.array(np.split(files,size))
dayloop = tasks[rank]
if rank == (size-1):
dayloop = np.delete(dayloop, np.arange(len(dayloop)-remain,len(dayloop)))
else:
# Distribute the day's loops into MPI ppns
files = np.arange(len(day_num)-len(day_num)%size)
tasks = np.array(np.split(files,size))
dayloop = tasks[rank]
if rank == (size-1):
dayloop = np.append(dayloop, np.arange(len(files),len(files)+len(day_num)%size))
print("process {} aggregating days from {} to {}...".format(rank, dayloop[0],dayloop[-1]))
# Start counting operation time
# start_time = timeit.default_timer()
results = np.asarray(run_modis_aggre(dayloop))
if rank == 0:
total_pix += results[0,:]
cloud_pix += results[1,:]
for i in range(1,size):
recv_req = comm.Irecv(results,source=i, tag=0)
recv_req.wait()
total_pix += results[0,:]
cloud_pix += results[1,:]
# Compute the mean cloud fraction & Statistics (Include Min & Max & Standard deviation)
Mean_Fraction = (cloud_pix / total_pix)
print('Mean_Fraction:')
print( Mean_Fraction )
# end_time = timeit.default_timer()
end_time = timeit.default_timer()
print ("Operation Time in {:7.2f} seconds".format(end_time - start_time))
# Create HDF5 file to store the result
save_output(Mean_Fraction)
#comm.Abort()
else:
print("Process {} finished".format(rank))
send_req = comm.Isend(results, dest=0, tag=0)
send_req.wait()
| [
"netCDF4.Dataset",
"matplotlib.pyplot.title",
"timeit.default_timer",
"numpy.zeros",
"matplotlib.pyplot.colorbar",
"numpy.split",
"matplotlib.pyplot.figure",
"numpy.where",
"random.seed",
"xarray.DataArray",
"numpy.linspace",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matpl... | [((495, 518), 'netCDF4.Dataset', 'Dataset', (['M06_files', '"""r"""'], {}), "(M06_files, 'r')\n", (502, 518), False, 'from netCDF4 import Dataset\n'), ((729, 752), 'netCDF4.Dataset', 'Dataset', (['M03_files', '"""r"""'], {}), "(M03_files, 'r')\n", (736, 752), False, 'from netCDF4 import Dataset\n'), ((3034, 3050), 'xarray.DataArray', 'xr.DataArray', (['cf'], {}), '(cf)\n', (3046, 3050), True, 'import xarray as xr\n'), ((3120, 3147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (3130, 3147), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3254), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {'fontsize': '(14)'}), "('Longitude', fontsize=14)\n", (3228, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3256, 3291), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {'fontsize': '(14)'}), "('Latitude', fontsize=14)\n", (3266, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3370), 'matplotlib.pyplot.title', 'plt.title', (['"""Level 3 Cloud Fraction Aggregation for January 2008"""'], {'fontsize': '(16)'}), "('Level 3 Cloud Fraction Aggregation for January 2008', fontsize=16)\n", (3302, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3386), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3384, 3386), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""monthlyCloudFraction-day-level-parallelization.png"""'], {}), "('monthlyCloudFraction-day-level-parallelization.png')\n", (3399, 3453), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3630), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3628, 3630), False, 'import timeit\n'), ((3939, 3959), 'numpy.zeros', 'np.zeros', (['(180, 360)'], {}), '((180, 360))\n', (3947, 3959), True, 'import numpy as np\n'), ((3973, 3993), 'numpy.zeros', 'np.zeros', (['(180, 360)'], {}), '((180, 360))\n', (3981, 3993), True, 'import numpy as np\n'), ((4084, 4101), 'random.seed', 'random.seed', (['rank'], {}), '(rank)\n', (4095, 4101), False, 'import random\n'), ((4168, 4204), 'numpy.linspace', 'np.linspace', (['(1)', '(31)', '(31)'], {'dtype': 'np.int'}), '(1, 31, 31, dtype=np.int)\n', (4179, 4204), True, 'import numpy as np\n'), ((1522, 1539), 'numpy.array', 'np.array', (['dayloop'], {}), '(dayloop)\n', (1530, 1539), True, 'import numpy as np\n'), ((5685, 5707), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5705, 5707), False, 'import timeit\n'), ((607, 636), 'numpy.array', 'np.array', (['CM1km'], {'dtype': '"""byte"""'}), "(CM1km, dtype='byte')\n", (615, 636), True, 'import numpy as np\n'), ((1630, 1677), 'glob.glob', 'glob.glob', (["(MYD03_dir + 'MYD03.A2008' + dc + '*')"], {}), "(MYD03_dir + 'MYD03.A2008' + dc + '*')\n", (1639, 1677), False, 'import glob\n'), ((1700, 1750), 'glob.glob', 'glob.glob', (["(MYD06_dir + 'MYD06_L2.A2008' + dc + '*')"], {}), "(MYD06_dir + 'MYD06_L2.A2008' + dc + '*')\n", (1709, 1750), False, 'import glob\n'), ((2117, 2143), 'numpy.where', 'np.where', (['(lat > -1)', 'lat', '(0)'], {}), '(lat > -1, lat, 0)\n', (2125, 2143), True, 'import numpy as np\n'), ((2153, 2179), 'numpy.where', 'np.where', (['(lon > -1)', 'lon', '(0)'], {}), '(lon > -1, lon, 0)\n', (2161, 2179), True, 'import numpy as np\n'), ((4366, 4387), 'numpy.split', 'np.split', (['files', 'size'], {}), '(files, size)\n', (4374, 4387), True, 'import numpy as np\n'), ((4567, 4588), 'numpy.split', 'np.split', (['files', 'size'], {}), '(files, size)\n', (4575, 4588), True, 'import numpy as np\n'), ((4839, 4860), 'numpy.split', 'np.split', (['files', 'size'], {}), '(files, size)\n', (4847, 4860), True, 'import numpy as np\n')] |
import copy
import numpy as np
def smoothen(test_data, pv_inds, filter_functions, keys=None):
"""Smoothens the test_data within each pv_inds with given filter_functions.
If filter_function is a dict with filter functions, test_data with those keys are smoothened.
If filter_function is just a single function, all test_data quantities (except 'time' and 'stp') are filtered
Unless keys are given, in which case only those quantities are filtered.
The returned data is a direct replacement for test_data, with each quantity having the same time points as the
original data
:param test_data: dict with numpy arrays of equal lengths, required to contain a 'time' key
:type test_data: dict
:param pv_inds: list of lists of indices describing interesting points between which the the filter will be applied
:type pv_inds: list[ iterable ]
:param filter_functions: Either a dictionary of filter functions or a single filter function
Function should be interface vf = filter_function(t, v) where
t is time, v are values at corresponding time and vf are corresponding smoothened values
:type filter_functions: dict or function
:param keys: List of keys in test_data to apply filter to. If None (default), apply to keys in filter_functions,
or to all (except 'time' and 'stp') if filter_functions is a single function
:type keys: list[ str ]
:returns: smoothened test data including test data that was not smoothened in original form
:rtype: dict
"""
# Create output data structure
test_data_smooth = copy.deepcopy(test_data)
# Decide which quantities to smoothen
if keys is None:
if isinstance(filter_functions, dict):
keys = [key for key in filter_functions]
else:
keys = [key for key in test_data if key not in ['time', 'stp']]
# If not existing, create function dictionary
fdict = filter_functions if isinstance(filter_functions, dict) else {key: filter_functions for key in keys}
# Reshape indices (note, pv_inds should be sorted!)
inds = list(np.sort([i for j in pv_inds for i in j]))
if inds[0] != 0:
inds.insert(0, 0)
inds[-1] = inds[-1] + 1 # Ensure that last datapoint is included
# Apply filter
for i0, i1 in zip(inds[:-1], inds[1:]):
for key, f_key in zip(keys, fdict):
filter_fun = fdict[f_key]
test_data_smooth[key][i0:i1] = filter_fun(test_data['time'][i0:i1], test_data[key][i0:i1])
return test_data_smooth
def polynomial(t, v, deg=3, t_pred=None):
""" Smooth v-data using polynomial of given degree
"""
p = np.polyfit(t, v, deg=deg)
if t_pred is None:
return np.polyval(p, t)
else:
return np.polyval(p, t_pred)
def linear_segments(t, v, seg_fraction=0.25, num_segments=None, t_pred=None):
""" Smooth v-data by fitting num_segments equally spaced linear segments
"""
return spline(t, v, degree=1, knot_fraction=seg_fraction, num_knots=num_segments, t_pred=t_pred)
def cubic_spline(t, v, knot_fraction=0.25, num_knots=None, t_pred=None):
""" Smooth v-data by fitting cubic splines"""
return spline(t, v, degree=3, knot_fraction=knot_fraction, num_knots=num_knots, t_pred=t_pred)
def spline(t, v, degree=3, knot_fraction=0.25, num_knots=None, t_pred=None):
""" Smooth v-data by fitting splines of given degree"""
sp = Spline(degree=degree, knots=get_knots(t, knot_fraction, num_knots))
sp.fit(t, v)
return sp.eval(t) if t_pred is None else sp.eval(t_pred)
def get_knots(t, knot_fraction, num_knots=None):
""" Evenly distribute knots from t[0] to t[-1]"""
_num_knots = int(len(t) * knot_fraction) if num_knots is None else num_knots
return np.linspace(t[0], t[-1], _num_knots)
class Spline:
""" Spline class with arbitrary polynomial degree. The spline is represented
by
.. math::
f(x) = \\sum_{i=0}^{N_\\mathrm{degree}} a_i x^i +
\\sum_{i=1}^{N_\\mathrm{knots}} b_i \\langle x-x_i\\rangle^3, \\quad
\\langle x \\rangle = \\begin{matrix} 0 & x<0 \\\\ x & x \\geq 0 \\end{matrix}
"""
def __init__(self, degree, knots):
self.degree = degree
self.knot_vector = knots
self.coefficients = np.zeros(degree+1 + len(knots))
self.fitted = False
def fit(self, x, y):
""" Fit the splines to given x and y input: y=f(x)"""
fit_mat = self._get_fit_mat(x)
self.coefficients = np.linalg.lstsq(fit_mat, y, rcond=None)[0]
self.fitted = True
def eval(self, x):
""" Evaluate the splines for given x input: f(x)"""
assert self.fitted
fit_mat = self._get_fit_mat(x)
return fit_mat @ self.coefficients
# Internal methods
def _get_fit_mat(self, x):
fit_mat = np.zeros((len(x), len(self.coefficients)))
for n in range(self.degree + 1):
fit_mat[:, n] = x ** n
yv = np.zeros(len(x))
for i, knot in enumerate(self.knot_vector):
yv[:] = 0.0
yv[x > knot] = (x[x > knot] - knot) ** self.degree
fit_mat[:, self.degree + i + 1] = yv
return fit_mat
'''
# Old methods, keep as reference
import patsy
import statsmodels.api as sm
def b_spline_patsy(t, v, knot_fraction=0.25, num_knots=None, t_pred=None):
""" Smooth v-data using B-spline
"""
return spline_patsy(t, v, patsy.bs, knot_fraction, num_knots, t_pred=t_pred)
def natural_spline_patsy(t, v, knot_fraction=0.25, num_knots=None, t_pred=None):
""" Smooth v-data using a natural cubic spline
"""
return spline_patsy(t, v, patsy.cr, knot_fraction, num_knots, t_pred=t_pred)
def cubic_spline_patsy(t, v, knot_fraction=0.25, num_knots=None, t_pred=None):
""" Smooth v-data using a cubic spline
"""
return spline_patsy(t, v, patsy.cc, knot_fraction, num_knots, t_pred=t_pred)
def spline_patsy(t, v, spline_basis, knot_fraction=0.25, num_knots=None, b_degree=None, t_pred=None):
""" Smooth using a given spline basis with num_knots
If num_knots not given, set num_knots = floor(len(t)*knot_fraction)
If t_pred given, return predicted values for different time coordinates
"""
# Set parameters if b_degree given (degree of b-spline)
params = {} if b_degree is None else {'degree': b_degree}
# Setup basis function, need to define knots and bounds explicitly for prediction to work correctly
# (I.e. we need to use the same knots for prediction as for fitting!)
# Get number of knots
num_knots_ = int(len(t) * knot_fraction) if num_knots is None else num_knots
# Distribute knots
knots = tuple(np.linspace(t[0], t[-1], num_knots_))
# Find bounds
t_min = np.min(t) if t_pred is None else min(np.min(t), np.min(t_pred))
t_max = np.max(t) if t_pred is None else max(np.max(t), np.max(t_pred))
# Expand bounds to avoid numerical issues (double precision)
t_min -= abs(t_min) * 1.e-12 + 1.e-100
t_max += abs(t_max) * 1.e-12 + 1.e-100
# Construct base function for fitting
base_function = spline_basis(t, knots=knots, lower_bound=t_min, upper_bound=t_max, **params)
# Construct model for fitting
model = patsy.dmatrix(base_function)
# Fit model
fit = sm.GLM(v, model).fit()
if t_pred is None: # Return smoothened values by evaluating the model at the fitted data
smoothened = fit.predict(model)
else: # Use the fit to predict new time values
pred_base = spline_basis(t_pred, knots=knots, lower_bound=t_min, upper_bound=t_max, **params)
pred_model = patsy.dmatrix(pred_base)
smoothened = fit.predict(pred_model)
return np.array(smoothened)
'''
| [
"copy.deepcopy",
"numpy.linalg.lstsq",
"numpy.polyfit",
"numpy.polyval",
"numpy.sort",
"numpy.linspace"
] | [((1651, 1675), 'copy.deepcopy', 'copy.deepcopy', (['test_data'], {}), '(test_data)\n', (1664, 1675), False, 'import copy\n'), ((2717, 2742), 'numpy.polyfit', 'np.polyfit', (['t', 'v'], {'deg': 'deg'}), '(t, v, deg=deg)\n', (2727, 2742), True, 'import numpy as np\n'), ((3826, 3862), 'numpy.linspace', 'np.linspace', (['t[0]', 't[-1]', '_num_knots'], {}), '(t[0], t[-1], _num_knots)\n', (3837, 3862), True, 'import numpy as np\n'), ((2166, 2206), 'numpy.sort', 'np.sort', (['[i for j in pv_inds for i in j]'], {}), '([i for j in pv_inds for i in j])\n', (2173, 2206), True, 'import numpy as np\n'), ((2781, 2797), 'numpy.polyval', 'np.polyval', (['p', 't'], {}), '(p, t)\n', (2791, 2797), True, 'import numpy as np\n'), ((2823, 2844), 'numpy.polyval', 'np.polyval', (['p', 't_pred'], {}), '(p, t_pred)\n', (2833, 2844), True, 'import numpy as np\n'), ((4558, 4597), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['fit_mat', 'y'], {'rcond': 'None'}), '(fit_mat, y, rcond=None)\n', (4573, 4597), True, 'import numpy as np\n')] |
#!/usr/bin/env python
## Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from sklearn.metrics import (roc_curve, roc_auc_score, accuracy_score,
average_precision_score, precision_score,
recall_score, brier_score_loss)
import numpy as np
from lightsaber import constants as C
import logging
log = logging.getLogger()
try:
from pysurvival.utils._metrics import _concordance_index
except ImportError:
log.warning("pysurvival not installed... survival models wont work")
'''
The concordance_score metric is added. Accuract but time consuming
concordance_index is fast approximate c-index. It could be used for
optimizing the model but may be not the final reported one
TODO
- Brier score
'''
# **************************************************************************
# library of metric functions
# **************************************************************************
def pr_at_k(y_true, y_hat, y_proba, pct, average='binary'):
'''
Calculate precision and recall @ k
Args:
y_true: (1d np.array) actual labels, 1 - positve class, 0 - negative class
y_hat: (1d np.array) predicted labels, 1 - positve class, 0 - negative class
y_proba: (1d np.array) probability in positive class
k: (int) number of top (highest probability) predictions
average: averaging method (see doc for sklearn.metrics.precision_score)
Returns:
dict with precision_at_k and recall_at_k
'''
k = round(pct * len(y_true))
merged_values = np.vstack([y_true.T, y_hat.T, y_proba.T]).T #np.hstack((y_true, probas_pred)) # concat y_true and prediction probabilities
merged_values = merged_values[(-merged_values[:,2]).argsort()] # sort by probabilities
top_k_true_proba = merged_values[:k,:] # select top k with highest probabilities
y_true_top_k = top_k_true_proba[:,0] # seperate y_true, y_hat, preds for clarity
y_hat_top_k = top_k_true_proba[:,1]
precision = precision_score(y_true = y_true_top_k, y_pred=y_hat_top_k, average=average)
recall = recall_score(y_true = y_true_top_k, y_pred=y_hat_top_k, average=average)
return {('precision_at_' + str(pct * 100) + 'pct_' + str(k)): precision,
('recall_at_'+ str(pct * 100) + 'pct_' +str(k)): recall}
# **************************************************************************
# Main interface for other modules
# **************************************************************************
class Metrics(object):
"""docstring for Metrics"""
__supported_modes = ['classifier'] #, 'pu_classifier', 't2e']
def __init__(self, mode='classifier'):
super(Metrics, self).__init__()
self.mode = mode
if self.mode not in self.__supported_modes:
raise NotImplementedError(f'modes outside {self.__supported_modes} not yet supported')
def __call__(self, *args, **kwargs):
if self.mode == 'classifier':
ret_val = self.classifier_metrics(*args, **kwargs)
elif self.mode == 'pu_classifier':
ret_val = self.pu_classifier_metrics(*args, **kwargs)
elif self.mode == 't2e':
ret_val = self.t2e_metrics(*args, **kwargs)
return ret_val
def __repr__(self):
s = f"Metrics[{self.mode}]"
return s
@staticmethod
def classifier_metrics(y_val, y_val_hat, y_val_proba=None, val_score=None,
y_test=None, y_test_hat=None, y_test_proba=None, test_score=None):
'''
Calculate metrics for model evaluation
Args:
y_true: (1d np.array) actual labels, 1 - positve class, 0 - negative class
y_hat: (1d np.array) predicted labels, 1 - positve class, 0 - negative class
y_proba: (1d np.array) probability in positive class
Returns:
dict with train_error, test_error, precision, recall, auc, auprc, accuracy, and precision recalls at k%
'''
# Validation part
val_precision = precision_score(y_true=y_val, y_pred=y_val_hat)
val_recall = recall_score(y_true=y_val, y_pred=y_val_hat)
val_accuracy = accuracy_score(y_true=y_val, y_pred=y_val_hat)
val_auc, val_auprc, val_brier_score = 0, 0, 0
_prak = {}
if y_val_proba is not None:
val_auc = roc_auc_score(y_val, y_val_proba)
val_auprc = average_precision_score(y_val, y_val_proba)
val_brier_score = brier_score_loss(y_val, y_val_proba)
# Recall on highest ½%, 1%, 2%, 5 of risk scores
for pct in [0.005, 0.01, 0.02, 0.05]:
_tmp = pr_at_k(y_true=y_val, y_hat=y_val_hat,
y_proba=y_val_proba, pct=pct)
for key, value in six.iteritems(_tmp):
_prak[f'Val_{key}'] = value
metrics = {
'Val_Precision': val_precision,
'Val_Recall': val_recall,
'Val_AUCROC': val_auc,
'Val_AUPRC': val_auprc,
'Val_Accuracy': val_accuracy,
'Val_Brier_score': val_brier_score,
}
metrics.update(_prak) #**pr_at_5_pct, **pr_at_10pct, **pr_at_25pct, **pr_at_50pct}
if val_score is not None:
val_error = 1 - val_score
metrics['Val_error'] = val_error
else:
val_error = None
# Test part
if y_test is not None:
test_precision = precision_score(y_true=y_test, y_pred=y_test_hat)
test_recall = recall_score(y_true=y_test, y_pred=y_test_hat)
test_accuracy = accuracy_score(y_true=y_test, y_pred=y_test_hat)
test_auc, test_auprc, test_brier_score = 0, 0, 0
_prak = {}
if y_test_proba is not None:
test_auc = roc_auc_score(y_test, y_test_proba)
test_auprc = average_precision_score(y_test, y_test_proba)
test_brier_score = brier_score_loss(y_test, y_test_proba)
# Recall on highest ½%, 1%, 2%, 5 of risk scores
for pct in [0.005, 0.01, 0.02, 0.05]:
_tmp = pr_at_k(y_true=y_test, y_hat=y_test_hat,
y_proba=y_test_proba, pct=pct)
for key, value in six.iteritems(_tmp):
_prak[f'Test_{key}'] = value
metrics.update({
'Test_Precision': test_precision,
'Test_Recall': test_recall,
'Test_AUCROC': test_auc,
'Test_AUPRC': test_auprc,
'Test_Accuracy': test_accuracy,
'Test_Brier_score': test_brier_score,
})
metrics.update(_prak) #**pr_at_5_pct, **pr_at_10pct, **pr_at_25pct, **pr_at_50pct}
if test_score is not None:
test_error = 1 - test_score
metrics['Test_error'] = test_error
else:
test_error = None
return metrics
@staticmethod
def pu_classifier_metrics(y_val, y_val_hat, y_val_proba=None, val_score=None,
y_test=None, y_test_hat=None, y_test_proba=None, test_score=None):
'''
Calculate metrics for model evaluation
Args:
y_true: (1d np.array) actual labels, 1 - positve class, 0 - negative class
y_hat: (1d np.array) predicted labels, 1 - positve class, 0 - negative class
y_proba: (1d np.array) probability in positive class
Returns:
dict with train_error, test_error, precision, recall, auc, auprc, accuracy, and precision recalls at k%
'''
val_precision = precision_score(y_true=y_val, y_pred=y_val_hat)
val_recall = recall_score(y_true=y_val,y_pred=y_val_hat)
val_accuracy = accuracy_score(y_true=y_val, y_pred=y_val_hat)
val_f1_score_pu = f1_pu(y_val, y_val_hat)
val_accuracy_score_pu = accuracy_pu(y_val, y_val_hat)
metrics = {
'Val_Precision': val_precision,
'Val_Recall': val_recall,
'Val_Accuracy': val_accuracy,
'Val_F1_PU': val_f1_score_pu,
'Val_Accuracy_PU': val_accuracy_score_pu
}
if val_score is not None:
val_error = 1 - val_score
metrics['Val_error'] = val_error
else:
val_error = None
if y_test is not None:
test_precision = precision_score(y_true=y_test, y_pred=y_test_hat)
test_recall = recall_score(y_true=y_test,y_pred=y_test_hat)
test_accuracy = accuracy_score(y_true=y_test, y_pred=y_test_hat)
test_f1_score_pu = f1_pu(y_test, y_test_hat)
test_accuracy_score_pu = accuracy_pu(y_test, y_test_hat)
metrics.update({
'Test_Precision': test_precision,
'Test_Recall': test_recall,
'Test_Accuracy': test_accuracy,
'Test_F1_PU': test_f1_score_pu,
'Test_Accuracy_PU': test_accuracy_score_pu
})
if test_score is not None:
test_error = 1 - test_score
metrics['Test_error'] = test_error
else:
test_error = None
return metrics
@staticmethod
def t2e_metrics(*args, **kwargs):
raise NotImplementedError()
pass
| [
"sklearn.metrics.accuracy_score",
"sklearn.metrics.recall_score",
"logging.getLogger",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.brier_score_loss",
"sklearn.metrics.precision_score",
"sklearn.metrics.average_precision_score",
"six.iteritems",
"numpy.vstack"
] | [((893, 912), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (910, 912), False, 'import logging\n'), ((2547, 2620), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'y_true_top_k', 'y_pred': 'y_hat_top_k', 'average': 'average'}), '(y_true=y_true_top_k, y_pred=y_hat_top_k, average=average)\n', (2562, 2620), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((2636, 2706), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'y_true_top_k', 'y_pred': 'y_hat_top_k', 'average': 'average'}), '(y_true=y_true_top_k, y_pred=y_hat_top_k, average=average)\n', (2648, 2706), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((2106, 2147), 'numpy.vstack', 'np.vstack', (['[y_true.T, y_hat.T, y_proba.T]'], {}), '([y_true.T, y_hat.T, y_proba.T])\n', (2115, 2147), True, 'import numpy as np\n'), ((4578, 4625), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (4593, 4625), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((4647, 4691), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (4659, 4691), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((4715, 4761), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (4729, 4761), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((8311, 8358), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (8326, 8358), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((8380, 8424), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (8392, 8424), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((8447, 8493), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_val', 'y_pred': 'y_val_hat'}), '(y_true=y_val, y_pred=y_val_hat)\n', (8461, 8493), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((4893, 4926), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_val', 'y_val_proba'], {}), '(y_val, y_val_proba)\n', (4906, 4926), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((4951, 4994), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_val', 'y_val_proba'], {}), '(y_val, y_val_proba)\n', (4974, 4994), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((5025, 5061), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_val', 'y_val_proba'], {}), '(y_val, y_val_proba)\n', (5041, 5061), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6053, 6102), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (6068, 6102), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6129, 6175), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (6141, 6175), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6204, 6252), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (6218, 6252), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((9077, 9126), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (9092, 9126), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((9153, 9199), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (9165, 9199), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((9227, 9275), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'y_test_hat'}), '(y_true=y_test, y_pred=y_test_hat)\n', (9241, 9275), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((5331, 5350), 'six.iteritems', 'six.iteritems', (['_tmp'], {}), '(_tmp)\n', (5344, 5350), False, 'import six\n'), ((6405, 6440), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_test_proba'], {}), '(y_test, y_test_proba)\n', (6418, 6440), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6470, 6515), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test', 'y_test_proba'], {}), '(y_test, y_test_proba)\n', (6493, 6515), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6551, 6589), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'y_test_proba'], {}), '(y_test, y_test_proba)\n', (6567, 6589), False, 'from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, average_precision_score, precision_score, recall_score, brier_score_loss\n'), ((6882, 6901), 'six.iteritems', 'six.iteritems', (['_tmp'], {}), '(_tmp)\n', (6895, 6901), False, 'import six\n')] |
"""
This module contains the class "Engine", which contains the most important functions.
"""
import os
import io
#import matplotlib as mpl
#import matplotlib.pyplot as plt
import base64
import numpy as np
#import pyopencl as cl
from tqdm import tqdm
from .font_loader import load_font
from .filter_bank import FilterBank
from .radial_filter import RadialFilter
from .util import relu
script_dir = os.path.dirname(os.path.abspath(__file__))
kernels_file_path = os.path.join(script_dir, "kernels.cl")
print("Loading OpenCL kernel file at", kernels_file_path)
ALL_LC_GLYPHS = "abcdefghijklmnopqrstuvwxyz"
ALL_UC_GLYPHS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
ALL_GLYPHS = ALL_LC_GLYPHS #+ ALL_UC_GLYPHS
class Engine:
def __init__(self, file_name, size_factor=0.6, n_scales=5, n_orientations=4, glyphset=ALL_GLYPHS):
self.single_glyph_widths = {}
self.single_glyph_images = {}
self.convolved_glyph_images = {}
self.glyph_distance_images = {}
self.glyph_fullness_images = {}
self.set_up_gpu_processor_kernel()
self.load_font_file(file_name, size_factor)
self.load_filter_bank_and_convolve_glyphs(n_scales, n_orientations, glyphset)
print("Engine loaded.")
def load_font_file(self, file_name, size_factor):
(f, box_height, box_width) = load_font(file_name, size_factor)
self.f = f
self.file_name = file_name
self.size_factor = size_factor
self.box_height = box_height
self.box_width = box_width
print("Font loaded.")
def load_filter_bank_and_convolve_glyphs(self, n_scales, n_orientations, glyphset):
self.filter_bank = FilterBank(n_scales, n_orientations, self.box_height, self.box_width, 0, display_filters=False)
self.radial_filter = RadialFilter(n_scales, n_orientations, 2., 3., 2*self.box_height, 2*self.box_width)
self.glyphset = glyphset
self.n_scales = n_scales
self.n_orientations = n_orientations
print("Convolving glyphs ...")
for g in tqdm(glyphset):
rg = self.f.glyph(g)
self.single_glyph_widths[g] = rg.ink_width
self.single_glyph_images[g] = rg.as_matrix(normalize=True).with_padding_to_constant_box_width(self.box_width)
self.convolved_glyph_images[g] = self.filter_bank.convolve(self.single_glyph_images[g]).astype(np.complex64)
(distance, fullness) = self.radial_filter.convolve(self.convolved_glyph_images[g])
self.glyph_distance_images[g] = distance
self.glyph_fullness_images[g] = fullness
print("Filter bank loaded and glyphs convolved.")
def font_info(self):
"""
For convenience. Used by the web interface to display info about the loaded font,
adjust the height of the preview canvas, etc.
"""
font_info = {
"size_factor": self.size_factor,
"box_height": self.box_height,
"box_width": self.box_width,
"n_scales": self.n_scales,
"n_orientations": self.n_orientations,
"ascender": self.f.ascender,
"ascender_px": self.f.ascender_px,
"baseline_ratio": self.f.baseline_ratio,
"descender": self.f.descender,
"descender_px": self.f.descender_px,
"family_name": self.f.face.family_name.decode("utf-8"),
"style_name": self.f.face.style_name.decode("utf-8"),
"file_name": self.file_name,
"full_height": self.f.full_height,
"full_height_px": self.f.full_height_px,
"xheight": self.f.get_xheight(),
"italic_angle": self.f.italic_angle,
"glyph_images": self.get_glyph_images(self.glyphset)
}
print("Font info returned.")
return font_info
def get_glyph_images(self, glyphset):
bytes_writer = io.BytesIO()
# We're using the following encoding:
# 4 bytes character (utf-16)
# 4 bytes height (int32)
# 4 bytes width (int32)
# (height * width * 4) bytes penalty_fields (float32)
for c in glyphset:
rg = self.f.glyph(c)
bytes_writer.write(c.encode("utf-16")) # utf-16 means always use 4 bytes (2 for BOM, then 2 for the character)
bytes_writer.write(np.int32(self.box_height).tobytes()) # height
bytes_writer.write(np.int32(rg.ink_width).tobytes()) # width
bytes_writer.write(rg.as_matrix(normalize=True).astype(np.float32).tobytes())
binary_images = bytes_writer.getvalue()
binary_as_string = base64.b64encode(binary_images).decode("utf-8")
return binary_as_string
def set_up_gpu_processor_kernel(self):
#self.ctx = cl.create_some_context(False) # Create a context with your device
#now create a command queue in the context
#self.queue = cl.CommandQueue(self.ctx)
#print("Compiling GPU kernel ...")
#self.vp = cl.Program(self.ctx, open(kernels_file_path).read()).build()
#print("Compiled:", self.vp)
pass
def create_pair_image_distances(self, lc, rc, distances):
# This should just take the pre-convolved images and shift them.
total_width_at_minimum_ink_distance = self.single_glyph_widths[lc] + self.single_glyph_widths[rc] - self.f.minimum_ink_distance(lc, rc)
total_width_at_desired_distances = total_width_at_minimum_ink_distance + distances
left_shifts = -np.ceil((total_width_at_desired_distances - self.single_glyph_widths[lc]) / 2).astype(np.int32)
right_shifts = np.floor((total_width_at_desired_distances - self.single_glyph_widths[rc]) / 2).astype(np.int32)
return left_shifts, right_shifts
def render_penalty_fields(self, lc, rc, params, distances):
current_scales = np.array(params.get("currentScales", np.arange(self.n_scales)))
n_current_scales = len(current_scales)
current_orientations = np.array(params.get("currentOrientations", np.arange(self.n_orientations)))
n_current_orientations = len(current_orientations)
return np.zeros((n_current_scales, n_current_orientations, self.box_height, self.box_width, len(distances)))
# def render_penalty_fields_old(self, lc, rc, params, distances):
# # Renders the penalty field for a certain set of sizes and orientations (or all), and returns the diffs
# # TODO: get distances from params
# current_scales = np.array(params.get("currentScales", np.arange(self.n_scales)))
# n_current_scales = len(current_scales)
# current_orientations = np.array(params.get("currentOrientations", np.arange(self.n_orientations)))
# n_current_orientations = len(current_orientations)
#
# sc_lg = self.convolved_glyph_images[lc][current_scales[:, None], current_orientations[None, :], :, :].reshape([n_current_scales, n_current_orientations, self.box_height * self.box_width])
# sc_rg = self.convolved_glyph_images[rc][current_scales[:, None], current_orientations[None, :], :, :].reshape([n_current_scales, n_current_orientations, self.box_height * self.box_width])
# shifts_l, shifts_r = self.create_pair_image_distances(lc, rc, distances)
#
# # On the GPU, we want to perform the V1/V4 analysis for multiple distances in parallel.
#
# # Copy over the input images
# sc_lg_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=sc_lg)
# sc_rg_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=sc_rg)
# shifts_l_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=shifts_l)
# shifts_r_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=shifts_r)
#
# # Copy over the parameters
# edge_loss_weights_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=params['edge_loss_weights'][current_scales[:, None], current_orientations[None, :]])
# gap_gain_weights_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=params['gap_gain_weights'][current_scales[:, None], current_orientations[None, :]])
# v1_beta_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=params['v1_beta'][current_scales[:, None], current_orientations[None, :]])
# v1_exponents_dev = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=params['v1_exponents'][current_scales[:, None], current_orientations[None, :]])
#
# # create output buffer
# diffs = np.ones((n_current_scales, n_current_orientations, self.box_height * self.box_width * len(distances)), dtype=np.float32)
# diff_dest_dev = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, diffs.nbytes)
#
# self.vp.penalty_parallel(self.queue, diffs.shape, None,
# diff_dest_dev,
# np.int32(n_current_scales),
# np.int32(n_current_orientations),
# np.int32(self.box_height),
# np.int32(self.box_width),
# np.int32(len(distances)),
# sc_lg_dev,
# sc_rg_dev,
# shifts_l_dev,
# shifts_r_dev,
#
# edge_loss_weights_dev,
# gap_gain_weights_dev,
# v1_beta_dev,
# v1_exponent_dev,
#
# letter_tuning_function_dev,
# vertical_gap_tuning_function_dev,
# horizontal_gap_tuning_function_dev,
# beta_dev,
# np.float32(params['exponent']),
# # gap_weights_dev,
# blur_weights_dev,
# blur_weight_exps_dev)
#
# # Get result back
# cl.enqueue_copy(self.queue, diffs, diff_dest_dev)
# penalty_field = np.reshape(diffs, (n_current_scales, n_current_orientations, self.box_height, self.box_width, len(distances))).astype(np.float32)
#
# return penalty_field
def get_penalty_fields_subset(self, params):
# Generate the fields for just a single distance, and a subset of sizes and orientations.
params = self.prepare_params(params)
rendered_pairs = {} # Just so we don't do work more than once
bytes_writer = io.BytesIO()
# We're using the following encoding:
# 4 bytes first character
# 4 bytes second character
# 4 bytes height
# 4 bytes width
# (height * width * 4) bytes penalty_fields (float32)
text = params["sampleText"]
for i in tqdm(range(len(text) - 1)):
lc = text[i]
rc = text[i + 1]
if (lc + rc) not in rendered_pairs:
rendered_pairs[(lc + rc)] = True
dist = np.array([params['currentDistances'][lc + rc]]).astype(np.int32) + self.f.minimum_ink_distance(lc, rc)
np_penalty_fields = self.render_penalty_fields(lc, rc, params, dist)
bytes_writer.write(lc.encode("utf-16")) # utf-16 means always use 4 bytes (2 for BOM, then 2 for the character)
bytes_writer.write(rc.encode("utf-16")) # Can't use utf32 becaues not supported by browsers
bytes_writer.write(np.int32(np_penalty_fields.shape[2]).tobytes()) # height
bytes_writer.write(np.int32(np_penalty_fields.shape[3]).tobytes()) # width
bytes_writer.write(np.sum(np_penalty_fields[:, :, :, :, 0], (0, 1)).astype(np.float32).tobytes())
return bytes_writer.getvalue()
def get_best_distances_and_full_penalty_fields(self, params):
# Generate the fields for a whole set of distances, then find the best distance, and return it all.
distances = (np.arange(-1, 40, 2)).astype(np.int32) # TODO: get from params
params = self.prepare_params(params)
print("Getting best distances and full penalty fields for", params)
rendered_fields = {}
bytes_writer = io.BytesIO()
# We're using the following encoding:
# 4 bytes first character
# 4 bytes second character
# 4 bytes best distance (int32)
# 4 bytes height (int32)
# 4 bytes width (int32)
# (height * width * 4) bytes penalty_fields (float32)
text = params["sampleText"]
for i in tqdm(range(len(text) - 1)):
lc = text[i]
rc = text[i + 1]
if (lc + rc) not in rendered_fields:
rendered_fields[(lc + rc)] = True
np_penalty_fields = self.render_penalty_fields(lc, rc, params, distances)
# Of the original image, a certain mask is affected by the pairing at all.
# Of the originals in the pairing mask, how much is lost?
# Here, we are exponentiating each channel total with a exponent.
# Consider also dividing the channel totals by the original total.
loss_totals = np.sum(np_penalty_fields, (2, 3)) # <s, o, d>
totals = np.sum(loss_totals, (0, 1)) # <d>
best_distance_index = 0
#plt.plot(totals)
#plt.show()
#for si in range(self.n_scales):
# plt.plot(np.sum(np_penalty_fields[si, :, :, :], (0, 1, 2)), linestyle='dotted')
#plt.show()
lowest_penalty_total = 1e10
for ii in range(len(distances) - 1):
if (totals[ii] < lowest_penalty_total):
lowest_penalty_total = totals[ii]
best_distance_index = ii
break
#best_distance_index = np.argmin(np.abs(np.sum(np_penalty_fields, (0, 1, 2, 3))))
bytes_writer.write(lc.encode("utf-16")) # utf-16 means always use 4 bytes (2 for BOM, then 2 for the character)
bytes_writer.write(rc.encode("utf-16")) # Can't use utf32 becaues not supported by browsers
bytes_writer.write(np.int32(distances[best_distance_index] - self.f.minimum_ink_distance(lc, rc)).tobytes())
bytes_writer.write(np.int32(np_penalty_fields.shape[2]).tobytes()) # height
bytes_writer.write(np.int32(np_penalty_fields.shape[3]).tobytes()) # width
bytes_writer.write(np.sum(np_penalty_fields[:, :, :, :, best_distance_index], (0, 1)).astype(np.float32).tobytes())
return bytes_writer.getvalue()
def prepare_params(self, params):
# Parameters for V1 HRA
params['v1_k'] = np.zeros((1, self.n_orientations)) #np.tile(np.array(params['v1_k'])[:, None].astype(np.float32), [1, self.n_orientations])
params['v1_b'] = np.zeros((1, self.n_orientations)) #np.tile(np.array(params['v1_b'])[:, None].astype(np.float32), [1, self.n_orientations])
return params
| [
"io.BytesIO",
"os.path.abspath",
"tqdm.tqdm",
"numpy.sum",
"numpy.ceil",
"numpy.floor",
"numpy.zeros",
"base64.b64encode",
"numpy.arange",
"numpy.int32",
"numpy.array",
"os.path.join"
] | [((468, 506), 'os.path.join', 'os.path.join', (['script_dir', '"""kernels.cl"""'], {}), "(script_dir, 'kernels.cl')\n", (480, 506), False, 'import os\n'), ((421, 446), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (436, 446), False, 'import os\n'), ((2044, 2058), 'tqdm.tqdm', 'tqdm', (['glyphset'], {}), '(glyphset)\n', (2048, 2058), False, 'from tqdm import tqdm\n'), ((3893, 3905), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3903, 3905), False, 'import io\n'), ((10770, 10782), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10780, 10782), False, 'import io\n'), ((12464, 12476), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (12474, 12476), False, 'import io\n'), ((15035, 15069), 'numpy.zeros', 'np.zeros', (['(1, self.n_orientations)'], {}), '((1, self.n_orientations))\n', (15043, 15069), True, 'import numpy as np\n'), ((15184, 15218), 'numpy.zeros', 'np.zeros', (['(1, self.n_orientations)'], {}), '((1, self.n_orientations))\n', (15192, 15218), True, 'import numpy as np\n'), ((4617, 4648), 'base64.b64encode', 'base64.b64encode', (['binary_images'], {}), '(binary_images)\n', (4633, 4648), False, 'import base64\n'), ((5619, 5698), 'numpy.floor', 'np.floor', (['((total_width_at_desired_distances - self.single_glyph_widths[rc]) / 2)'], {}), '((total_width_at_desired_distances - self.single_glyph_widths[rc]) / 2)\n', (5627, 5698), True, 'import numpy as np\n'), ((5885, 5909), 'numpy.arange', 'np.arange', (['self.n_scales'], {}), '(self.n_scales)\n', (5894, 5909), True, 'import numpy as np\n'), ((6033, 6063), 'numpy.arange', 'np.arange', (['self.n_orientations'], {}), '(self.n_orientations)\n', (6042, 6063), True, 'import numpy as np\n'), ((12225, 12245), 'numpy.arange', 'np.arange', (['(-1)', '(40)', '(2)'], {}), '(-1, 40, 2)\n', (12234, 12245), True, 'import numpy as np\n'), ((13447, 13480), 'numpy.sum', 'np.sum', (['np_penalty_fields', '(2, 3)'], {}), '(np_penalty_fields, (2, 3))\n', (13453, 13480), True, 'import numpy as np\n'), ((13518, 13545), 'numpy.sum', 'np.sum', (['loss_totals', '(0, 1)'], {}), '(loss_totals, (0, 1))\n', (13524, 13545), True, 'import numpy as np\n'), ((5500, 5578), 'numpy.ceil', 'np.ceil', (['((total_width_at_desired_distances - self.single_glyph_widths[lc]) / 2)'], {}), '((total_width_at_desired_distances - self.single_glyph_widths[lc]) / 2)\n', (5507, 5578), True, 'import numpy as np\n'), ((4332, 4357), 'numpy.int32', 'np.int32', (['self.box_height'], {}), '(self.box_height)\n', (4340, 4357), True, 'import numpy as np\n'), ((4409, 4431), 'numpy.int32', 'np.int32', (['rg.ink_width'], {}), '(rg.ink_width)\n', (4417, 4431), True, 'import numpy as np\n'), ((11266, 11313), 'numpy.array', 'np.array', (["[params['currentDistances'][lc + rc]]"], {}), "([params['currentDistances'][lc + rc]])\n", (11274, 11313), True, 'import numpy as np\n'), ((11726, 11762), 'numpy.int32', 'np.int32', (['np_penalty_fields.shape[2]'], {}), '(np_penalty_fields.shape[2])\n', (11734, 11762), True, 'import numpy as np\n'), ((11818, 11854), 'numpy.int32', 'np.int32', (['np_penalty_fields.shape[3]'], {}), '(np_penalty_fields.shape[3])\n', (11826, 11854), True, 'import numpy as np\n'), ((14619, 14655), 'numpy.int32', 'np.int32', (['np_penalty_fields.shape[2]'], {}), '(np_penalty_fields.shape[2])\n', (14627, 14655), True, 'import numpy as np\n'), ((14711, 14747), 'numpy.int32', 'np.int32', (['np_penalty_fields.shape[3]'], {}), '(np_penalty_fields.shape[3])\n', (14719, 14747), True, 'import numpy as np\n'), ((11909, 11957), 'numpy.sum', 'np.sum', (['np_penalty_fields[:, :, :, :, 0]', '(0, 1)'], {}), '(np_penalty_fields[:, :, :, :, 0], (0, 1))\n', (11915, 11957), True, 'import numpy as np\n'), ((14802, 14868), 'numpy.sum', 'np.sum', (['np_penalty_fields[:, :, :, :, best_distance_index]', '(0, 1)'], {}), '(np_penalty_fields[:, :, :, :, best_distance_index], (0, 1))\n', (14808, 14868), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Multi Signals
=============
Defines the class implementing support for multi-continuous signals:
- :class:`colour.continuous.MultiSignals`
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
# Python 3 compatibility.
try:
from operator import div, idiv
except ImportError:
from operator import truediv, itruediv
div = truediv
idiv = itruediv
from colour.constants import DEFAULT_FLOAT_DTYPE
from colour.continuous import AbstractContinuousFunction, Signal
from colour.utilities import (as_float_array, first_item, is_pandas_installed,
tsplit, tstack)
if sys.version_info[:2] >= (3, 8): # pragma: no cover
from collections.abc import Iterator, Mapping, OrderedDict, Sequence
else: # pragma: no cover
from collections import Iterator, Mapping, OrderedDict, Sequence
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['MultiSignals']
class MultiSignals(AbstractContinuousFunction):
"""
Defines the base class for multi-continuous signals, a container for
multiple :class:`colour.continuous.Signal` sub-class instances.
Parameters
----------
data : Series or Dataframe or Signal or MultiSignals or array_like or \
dict_like, optional
Data to be stored in the multi-continuous signals.
domain : array_like, optional
Values to initialise the multiple :class:`colour.continuous.Signal`
sub-class instances :attr:`colour.continuous.Signal.domain` attribute
with. If both ``data`` and ``domain`` arguments are defined, the latter
will be used to initialise the :attr:`colour.continuous.Signal.domain`
attribute.
labels : array_like, optional
Names to use for the :class:`colour.continuous.Signal` sub-class
instances.
Other Parameters
----------------
name : unicode, optional
multi-continuous signals name.
dtype : type, optional
**{np.float16, np.float32, np.float64, np.float128}**,
Floating point data type.
interpolator : object, optional
Interpolator class type to use as interpolating function for the
:class:`colour.continuous.Signal` sub-class instances.
interpolator_args : dict_like, optional
Arguments to use when instantiating the interpolating function
of the :class:`colour.continuous.Signal` sub-class instances.
extrapolator : object, optional
Extrapolator class type to use as extrapolating function for the
:class:`colour.continuous.Signal` sub-class instances.
extrapolator_args : dict_like, optional
Arguments to use when instantiating the extrapolating function
of the :class:`colour.continuous.Signal` sub-class instances.
signal_type : type, optional
The :class:`colour.continuous.Signal` sub-class type used for
instances.
Attributes
----------
dtype
domain
range
interpolator
interpolator_args
extrapolator
extrapolator_args
function
signals
labels
signal_type
Methods
-------
__str__
__repr__
__hash__
__getitem__
__setitem__
__contains__
__eq__
__ne__
arithmetical_operation
multi_signals_unpack_data
fill_nan
to_dataframe
Examples
--------
Instantiation with implicit *domain* and a single signal:
>>> range_ = np.linspace(10, 100, 10)
>>> print(MultiSignals(range_))
[[ 0. 10.]
[ 1. 20.]
[ 2. 30.]
[ 3. 40.]
[ 4. 50.]
[ 5. 60.]
[ 6. 70.]
[ 7. 80.]
[ 8. 90.]
[ 9. 100.]]
Instantiation with explicit *domain* and a single signal:
>>> domain = np.arange(100, 1100, 100)
>>> print(MultiSignals(range_, domain))
[[ 100. 10.]
[ 200. 20.]
[ 300. 30.]
[ 400. 40.]
[ 500. 50.]
[ 600. 60.]
[ 700. 70.]
[ 800. 80.]
[ 900. 90.]
[ 1000. 100.]]
Instantiation with multiple signals:
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> print(MultiSignals(range_, domain))
[[ 100. 10. 20. 30.]
[ 200. 20. 30. 40.]
[ 300. 30. 40. 50.]
[ 400. 40. 50. 60.]
[ 500. 50. 60. 70.]
[ 600. 60. 70. 80.]
[ 700. 70. 80. 90.]
[ 800. 80. 90. 100.]
[ 900. 90. 100. 110.]
[ 1000. 100. 110. 120.]]
Instantiation with a *dict*:
>>> print(MultiSignals(dict(zip(domain, range_))))
[[ 100. 10. 20. 30.]
[ 200. 20. 30. 40.]
[ 300. 30. 40. 50.]
[ 400. 40. 50. 60.]
[ 500. 50. 60. 70.]
[ 600. 60. 70. 80.]
[ 700. 70. 80. 90.]
[ 800. 80. 90. 100.]
[ 900. 90. 100. 110.]
[ 1000. 100. 110. 120.]]
Instantiation using a *Signal* sub-class:
>>> class NotSignal(Signal):
... pass
>>> multi_signals = MultiSignals(range_, domain, signal_type=NotSignal)
>>> print(multi_signals)
[[ 100. 10. 20. 30.]
[ 200. 20. 30. 40.]
[ 300. 30. 40. 50.]
[ 400. 40. 50. 60.]
[ 500. 50. 60. 70.]
[ 600. 60. 70. 80.]
[ 700. 70. 80. 90.]
[ 800. 80. 90. 100.]
[ 900. 90. 100. 110.]
[ 1000. 100. 110. 120.]]
>>> type(multi_signals.signals[0]) # doctest: +SKIP
<class 'multi_signals.NotSignal'>
Instantiation with a *Pandas* *Series*:
>>> if is_pandas_installed():
... from pandas import Series
... print(MultiSignals( # doctest: +SKIP
... Series(dict(zip(domain, np.linspace(10, 100, 10))))))
[[ 100. 10.]
[ 200. 20.]
[ 300. 30.]
[ 400. 40.]
[ 500. 50.]
[ 600. 60.]
[ 700. 70.]
[ 800. 80.]
[ 900. 90.]
[ 1000. 100.]]
Instantiation with a *Pandas* *Dataframe*:
>>> if is_pandas_installed():
... from pandas import DataFrame
... data = dict(zip(['a', 'b', 'c'], tsplit(range_)))
... print(MultiSignals( # doctest: +SKIP
... DataFrame(data, domain)))
[[ 100. 10. 20. 30.]
[ 200. 20. 30. 40.]
[ 300. 30. 40. 50.]
[ 400. 40. 50. 60.]
[ 500. 50. 60. 70.]
[ 600. 60. 70. 80.]
[ 700. 70. 80. 90.]
[ 800. 80. 90. 100.]
[ 900. 90. 100. 110.]
[ 1000. 100. 110. 120.]]
Retrieving domain *y* variable for arbitrary range *x* variable:
>>> x = 150
>>> range_ = tstack([np.sin(np.linspace(0, 1, 10))] * 3)
>>> range_ += np.array([0.0, 0.25, 0.5])
>>> MultiSignals(range_, domain)[x] # doctest: +ELLIPSIS
array([ 0.0359701..., 0.2845447..., 0.5331193...])
>>> x = np.linspace(100, 1000, 3)
>>> MultiSignals(range_, domain)[x] # doctest: +ELLIPSIS
array([[ 4.4085384...e-20, 2.5000000...e-01, 5.0000000...e-01],
[ 4.7669395...e-01, 7.2526859...e-01, 9.7384323...e-01],
[ 8.4147098...e-01, 1.0914709...e+00, 1.3414709...e+00]])
Using an alternative interpolating function:
>>> x = 150
>>> from colour.algebra import CubicSplineInterpolator
>>> MultiSignals(
... range_,
... domain,
... interpolator=CubicSplineInterpolator)[x] # doctest: +ELLIPSIS
array([ 0.0555274..., 0.3055274..., 0.5555274...])
>>> x = np.linspace(100, 1000, 3)
>>> MultiSignals(
... range_,
... domain,
... interpolator=CubicSplineInterpolator)[x] # doctest: +ELLIPSIS
array([[ 0. ..., 0.25 ..., 0.5 ...],
[ 0.4794253..., 0.7294253..., 0.9794253...],
[ 0.8414709..., 1.0914709..., 1.3414709...]])
"""
def __init__(self, data=None, domain=None, labels=None, **kwargs):
super(MultiSignals, self).__init__(kwargs.get('name'))
self._signal_type = kwargs.get('signal_type', Signal)
self._signals = self.multi_signals_unpack_data(data, domain, labels,
**kwargs)
@property
def dtype(self):
"""
Getter and setter property for the continuous signal dtype.
Parameters
----------
value : type
Value to set the continuous signal dtype with.
Returns
-------
type
Continuous signal dtype.
"""
if self._signals:
return first_item(self._signals.values()).dtype
@dtype.setter
def dtype(self, value):
"""
Setter for **self.dtype** property.
"""
if value is not None:
for signal in self._signals.values():
signal.dtype = value
@property
def domain(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances independent domain :math:`x` variable.
Parameters
----------
value : array_like
Value to set the :class:`colour.continuous.Signal` sub-class
instances independent domain :math:`x` variable with.
Returns
-------
ndarray
:class:`colour.continuous.Signal` sub-class instances independent
domain :math:`x` variable.
"""
if self._signals:
return first_item(self._signals.values()).domain
@domain.setter
def domain(self, value):
"""
Setter for the **self.domain** property.
"""
if value is not None:
for signal in self._signals.values():
signal.domain = value
@property
def range(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances corresponding range :math:`y` variable.
Parameters
----------
value : array_like
Value to set the :class:`colour.continuous.Signal` sub-class
instances corresponding range :math:`y` variable with.
Returns
-------
ndarray
:class:`colour.continuous.Signal` sub-class instances corresponding
range :math:`y` variable.
"""
if self._signals:
return tstack([signal.range for signal in self._signals.values()])
@range.setter
def range(self, value):
"""
Setter for the **self.range** property.
"""
if value is not None:
value = as_float_array(value)
if value.ndim in (0, 1):
for signal in self._signals.values():
signal.range = value
else:
assert value.shape[-1] == len(self._signals), (
'Corresponding "y" variable columns must have '
'same count than underlying "Signal" components!')
for signal, y in zip(self._signals.values(), tsplit(value)):
signal.range = y
@property
def interpolator(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances interpolator type.
Parameters
----------
value : type
Value to set the :class:`colour.continuous.Signal` sub-class
instances interpolator type with.
Returns
-------
type
:class:`colour.continuous.Signal` sub-class instances interpolator
type.
"""
if self._signals:
return first_item(self._signals.values()).interpolator
@interpolator.setter
def interpolator(self, value):
"""
Setter for the **self.interpolator** property.
"""
if value is not None:
for signal in self._signals.values():
signal.interpolator = value
@property
def interpolator_args(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances interpolator instantiation time arguments.
Parameters
----------
value : dict
Value to set the :class:`colour.continuous.Signal` sub-class
instances interpolator instantiation time arguments to.
Returns
-------
dict
:class:`colour.continuous.Signal` sub-class instances interpolator
instantiation time arguments.
"""
if self._signals:
return first_item(self._signals.values()).interpolator_args
@interpolator_args.setter
def interpolator_args(self, value):
"""
Setter for the **self.interpolator_args** property.
"""
if value is not None:
for signal in self._signals.values():
signal.interpolator_args = value
@property
def extrapolator(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances extrapolator type.
Parameters
----------
value : type
Value to set the :class:`colour.continuous.Signal` sub-class
instances extrapolator type with.
Returns
-------
type
:class:`colour.continuous.Signal` sub-class instances extrapolator
type.
"""
if self._signals:
return first_item(self._signals.values()).extrapolator
@extrapolator.setter
def extrapolator(self, value):
"""
Setter for the **self.extrapolator** property.
"""
if value is not None:
for signal in self._signals.values():
signal.extrapolator = value
@property
def extrapolator_args(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances extrapolator instantiation time arguments.
Parameters
----------
value : dict
Value to set the :class:`colour.continuous.Signal` sub-class
instances extrapolator instantiation time arguments to.
Returns
-------
dict
:class:`colour.continuous.Signal` sub-class instances extrapolator
instantiation time arguments.
"""
if self._signals:
return first_item(self._signals.values()).extrapolator_args
@extrapolator_args.setter
def extrapolator_args(self, value):
"""
Setter for the **self.extrapolator_args** property.
"""
if value is not None:
for signal in self._signals.values():
signal.extrapolator_args = value
@property
def function(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances callable.
Parameters
----------
value : object
Attribute value.
Returns
-------
callable
:class:`colour.continuous.Signal` sub-class instances callable.
Notes
-----
- This property is read only.
"""
if self._signals:
return first_item(self._signals.values()).function
@property
def signals(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances.
Parameters
----------
value : Series or Dataframe or Signal or MultiSignals or array_like \
or dict_like
Attribute value.
Returns
-------
OrderedDict
:class:`colour.continuous.Signal` sub-class instances.
"""
return self._signals
@signals.setter
def signals(self, value):
"""
Setter for the **self.signals** property.
"""
if value is not None:
self._signals = self.multi_signals_unpack_data(
value, signal_type=self._signal_type)
@property
def labels(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances name.
Parameters
----------
value : array_like
Value to set the :class:`colour.continuous.Signal` sub-class
instances name.
Returns
-------
dict
:class:`colour.continuous.Signal` sub-class instance name.
"""
if self._signals:
return list(self._signals.keys())
@labels.setter
def labels(self, value):
"""
Setter for the **self.labels** property.
"""
if value is not None:
assert len(value) == len(self._signals), (
'"labels" length does not match "signals" length!')
self._signals = OrderedDict(
[(value[i], signal)
for i, (_key, signal) in enumerate(self._signals.items())])
@property
def signal_type(self):
"""
Getter and setter property for the :class:`colour.continuous.Signal`
sub-class instances type.
Returns
-------
type
:class:`colour.continuous.Signal` sub-class instances type.
Notes
-----
- This property is read only.
"""
return self._signal_type
def __str__(self):
"""
Returns a formatted string representation of the multi-continuous
signals.
Returns
-------
unicode
Formatted string representation.
Examples
--------
>>> domain = np.arange(0, 10, 1)
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> print(MultiSignals(range_))
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 40. 50. 60.]
[ 4. 50. 60. 70.]
[ 5. 60. 70. 80.]
[ 6. 70. 80. 90.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
"""
try:
return str(np.hstack([self.domain[:, np.newaxis], self.range]))
except TypeError:
return super(MultiSignals, self).__str__()
def __repr__(self):
"""
Returns an evaluable string representation of the multi-continuous
signals.
Returns
-------
unicode
Evaluable string representation.
Examples
--------
>>> domain = np.arange(0, 10, 1)
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> MultiSignals(range_) # doctest: +ELLIPSIS
MultiSignals([[ 0., 10., 20., 30.],
[ 1., 20., 30., 40.],
[ 2., 30., 40., 50.],
[ 3., 40., 50., 60.],
[ 4., 50., 60., 70.],
[ 5., 60., 70., 80.],
[ 6., 70., 80., 90.],
[ 7., 80., 90., 100.],
[ 8., 90., 100., 110.],
[ 9., 100., 110., 120.]],
labels=[0, 1, 2],
interpolator=KernelInterpolator,
interpolator_args={},
extrapolator=Extrapolator,
extrapolator_args={...)
"""
try:
representation = repr(
np.hstack([self.domain[:, np.newaxis], self.range]))
representation = representation.replace('array',
self.__class__.__name__)
representation = representation.replace(
' [',
'{0}['.format(' ' * (len(self.__class__.__name__) + 2)))
representation = (
'{0},\n'
'{1}labels={2},\n'
'{1}interpolator={3},\n'
'{1}interpolator_args={4},\n'
'{1}extrapolator={5},\n'
'{1}extrapolator_args={6})').format(
representation[:-1],
' ' * (len(self.__class__.__name__) + 1), repr(
self.labels), self.interpolator.__name__
if self.interpolator is not None else self.interpolator,
repr(self.interpolator_args), self.extrapolator.__name__
if self.extrapolator is not None else self.extrapolator,
repr(self.extrapolator_args))
return representation
except TypeError:
return super(MultiSignals, self).__repr__()
def __hash__(self):
"""
Returns the abstract continuous function hash.
Returns
-------
int
Object hash.
"""
return hash(repr(self))
def __getitem__(self, x):
"""
Returns the corresponding range :math:`y` variable for independent
domain :math:`x` variable.
Parameters
----------
x : numeric, array_like or slice
Independent domain :math:`x` variable.
Returns
-------
numeric or ndarray
math:`y` range value.
Examples
--------
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> multi_signals = MultiSignals(range_)
>>> print(multi_signals)
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 40. 50. 60.]
[ 4. 50. 60. 70.]
[ 5. 60. 70. 80.]
[ 6. 70. 80. 90.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
>>> multi_signals[0]
array([ 10., 20., 30.])
>>> multi_signals[np.array([0, 1, 2])]
array([[ 10., 20., 30.],
[ 20., 30., 40.],
[ 30., 40., 50.]])
>>> multi_signals[0:3]
array([[ 10., 20., 30.],
[ 20., 30., 40.],
[ 30., 40., 50.]])
>>> multi_signals[np.linspace(0, 5, 5)] # doctest: +ELLIPSIS
array([[ 10. ..., 20. ..., 30. ...],
[ 22.8348902..., 32.8046056..., 42.774321 ...],
[ 34.8004492..., 44.7434347..., 54.6864201...],
[ 47.5535392..., 57.5232546..., 67.4929700...],
[ 60. ..., 70. ..., 80. ...]])
"""
if self._signals:
return tstack([signal[x] for signal in self._signals.values()])
else:
raise RuntimeError('No underlying "Signal" defined!')
def __setitem__(self, x, y):
"""
Sets the corresponding range :math:`y` variable for independent domain
:math:`x` variable.
Parameters
----------
x : numeric, array_like or slice
Independent domain :math:`x` variable.
y : numeric or ndarray
Corresponding range :math:`y` variable.
Examples
--------
>>> domain = np.arange(0, 10, 1)
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> multi_signals = MultiSignals(range_)
>>> print(multi_signals)
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 40. 50. 60.]
[ 4. 50. 60. 70.]
[ 5. 60. 70. 80.]
[ 6. 70. 80. 90.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
>>> multi_signals[0] = 20
>>> multi_signals[0]
array([ 20., 20., 20.])
>>> multi_signals[np.array([0, 1, 2])] = 30
>>> multi_signals[np.array([0, 1, 2])]
array([[ 30., 30., 30.],
[ 30., 30., 30.],
[ 30., 30., 30.]])
>>> multi_signals[0:3] = 40
>>> multi_signals[0:3]
array([[ 40., 40., 40.],
[ 40., 40., 40.],
[ 40., 40., 40.]])
>>> multi_signals[np.linspace(0, 5, 5)] = 50
>>> print(multi_signals)
[[ 0. 50. 50. 50. ]
[ 1. 40. 40. 40. ]
[ 1.25 50. 50. 50. ]
[ 2. 40. 40. 40. ]
[ 2.5 50. 50. 50. ]
[ 3. 40. 50. 60. ]
[ 3.75 50. 50. 50. ]
[ 4. 50. 60. 70. ]
[ 5. 50. 50. 50. ]
[ 6. 70. 80. 90. ]
[ 7. 80. 90. 100. ]
[ 8. 90. 100. 110. ]
[ 9. 100. 110. 120. ]]
>>> multi_signals[np.array([0, 1, 2])] = np.array([10, 20, 30])
>>> print(multi_signals)
[[ 0. 10. 20. 30. ]
[ 1. 10. 20. 30. ]
[ 1.25 50. 50. 50. ]
[ 2. 10. 20. 30. ]
[ 2.5 50. 50. 50. ]
[ 3. 40. 50. 60. ]
[ 3.75 50. 50. 50. ]
[ 4. 50. 60. 70. ]
[ 5. 50. 50. 50. ]
[ 6. 70. 80. 90. ]
[ 7. 80. 90. 100. ]
[ 8. 90. 100. 110. ]
[ 9. 100. 110. 120. ]]
>>> y = np.arange(1, 10, 1).reshape(3, 3)
>>> multi_signals[np.array([0, 1, 2])] = y
>>> print(multi_signals)
[[ 0. 1. 2. 3. ]
[ 1. 4. 5. 6. ]
[ 1.25 50. 50. 50. ]
[ 2. 7. 8. 9. ]
[ 2.5 50. 50. 50. ]
[ 3. 40. 50. 60. ]
[ 3.75 50. 50. 50. ]
[ 4. 50. 60. 70. ]
[ 5. 50. 50. 50. ]
[ 6. 70. 80. 90. ]
[ 7. 80. 90. 100. ]
[ 8. 90. 100. 110. ]
[ 9. 100. 110. 120. ]]
"""
y = as_float_array(y)
assert y.ndim in range(3), (
'Corresponding "y" variable must be a numeric or a 1-dimensional '
'or 2-dimensional array!')
if y.ndim == 0:
y = np.tile(y, len(self._signals))
elif y.ndim == 1:
y = y[np.newaxis, :]
assert y.shape[-1] == len(self._signals), (
'Corresponding "y" variable columns must have same count than '
'underlying "Signal" components!')
for signal, y in zip(self._signals.values(), tsplit(y)):
signal[x] = y
def __contains__(self, x):
"""
Returns whether the multi-continuous signals contains given independent
domain :math:`x` variable.
Parameters
----------
x : numeric, array_like or slice
Independent domain :math:`x` variable.
Returns
-------
bool
Is :math:`x` domain value contained.
Examples
--------
>>> range_ = np.linspace(10, 100, 10)
>>> multi_signals = MultiSignals(range_)
>>> 0 in multi_signals
True
>>> 0.5 in multi_signals
True
>>> 1000 in multi_signals
False
"""
if self._signals:
return x in first_item(self._signals.values())
else:
raise RuntimeError('No underlying "Signal" defined!')
def __eq__(self, other):
"""
Returns whether the multi-continuous signals is equal to given other
object.
Parameters
----------
other : object
Object to test whether it is equal to the multi-continuous signals.
Returns
-------
bool
Is given object equal to the multi-continuous signals.
Examples
--------
>>> range_ = np.linspace(10, 100, 10)
>>> multi_signals_1 = MultiSignals(range_)
>>> multi_signals_2 = MultiSignals(range_)
>>> multi_signals_1 == multi_signals_2
True
>>> multi_signals_2[0] = 20
>>> multi_signals_1 == multi_signals_2
False
>>> multi_signals_2[0] = 10
>>> multi_signals_1 == multi_signals_2
True
>>> from colour.algebra import CubicSplineInterpolator
>>> multi_signals_2.interpolator = CubicSplineInterpolator
>>> multi_signals_1 == multi_signals_2
False
"""
if isinstance(other, MultiSignals):
if all([
np.array_equal(self.domain, other.domain),
np.array_equal(
self.range,
other.range), self.interpolator is other.interpolator,
self.interpolator_args == other.interpolator_args,
self.extrapolator is other.extrapolator,
self.extrapolator_args == other.extrapolator_args,
self.labels == other.labels
]):
return True
return False
def __ne__(self, other):
"""
Returns whether the multi-continuous signals is not equal to given
other object.
Parameters
----------
other : object
Object to test whether it is not equal to the multi-continuous
signals.
Returns
-------
bool
Is given object not equal to the multi-continuous signals.
Examples
--------
>>> range_ = np.linspace(10, 100, 10)
>>> multi_signals_1 = MultiSignals(range_)
>>> multi_signals_2 = MultiSignals(range_)
>>> multi_signals_1 != multi_signals_2
False
>>> multi_signals_2[0] = 20
>>> multi_signals_1 != multi_signals_2
True
>>> multi_signals_2[0] = 10
>>> multi_signals_1 != multi_signals_2
False
>>> from colour.algebra import CubicSplineInterpolator
>>> multi_signals_2.interpolator = CubicSplineInterpolator
>>> multi_signals_1 != multi_signals_2
True
"""
return not (self == other)
def arithmetical_operation(self, a, operation, in_place=False):
"""
Performs given arithmetical operation with :math:`a` operand, the
operation can be either performed on a copy or in-place.
Parameters
----------
a : numeric or ndarray or Signal
Operand.
operation : object
Operation to perform.
in_place : bool, optional
Operation happens in place.
Returns
-------
MultiSignals
multi-continuous signals.
Examples
--------
Adding a single *numeric* variable:
>>> domain = np.arange(0, 10, 1)
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> multi_signals_1 = MultiSignals(range_)
>>> print(multi_signals_1)
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 40. 50. 60.]
[ 4. 50. 60. 70.]
[ 5. 60. 70. 80.]
[ 6. 70. 80. 90.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
>>> print(multi_signals_1.arithmetical_operation(10, '+', True))
[[ 0. 20. 30. 40.]
[ 1. 30. 40. 50.]
[ 2. 40. 50. 60.]
[ 3. 50. 60. 70.]
[ 4. 60. 70. 80.]
[ 5. 70. 80. 90.]
[ 6. 80. 90. 100.]
[ 7. 90. 100. 110.]
[ 8. 100. 110. 120.]
[ 9. 110. 120. 130.]]
Adding an *array_like* variable:
>>> a = np.linspace(10, 100, 10)
>>> print(multi_signals_1.arithmetical_operation(a, '+', True))
[[ 0. 30. 40. 50.]
[ 1. 50. 60. 70.]
[ 2. 70. 80. 90.]
[ 3. 90. 100. 110.]
[ 4. 110. 120. 130.]
[ 5. 130. 140. 150.]
[ 6. 150. 160. 170.]
[ 7. 170. 180. 190.]
[ 8. 190. 200. 210.]
[ 9. 210. 220. 230.]]
>>> a = np.array([[10, 20, 30]])
>>> print(multi_signals_1.arithmetical_operation(a, '+', True))
[[ 0. 40. 60. 80.]
[ 1. 60. 80. 100.]
[ 2. 80. 100. 120.]
[ 3. 100. 120. 140.]
[ 4. 120. 140. 160.]
[ 5. 140. 160. 180.]
[ 6. 160. 180. 200.]
[ 7. 180. 200. 220.]
[ 8. 200. 220. 240.]
[ 9. 220. 240. 260.]]
>>> a = np.arange(0, 30, 1).reshape([10, 3])
>>> print(multi_signals_1.arithmetical_operation(a, '+', True))
[[ 0. 40. 61. 82.]
[ 1. 63. 84. 105.]
[ 2. 86. 107. 128.]
[ 3. 109. 130. 151.]
[ 4. 132. 153. 174.]
[ 5. 155. 176. 197.]
[ 6. 178. 199. 220.]
[ 7. 201. 222. 243.]
[ 8. 224. 245. 266.]
[ 9. 247. 268. 289.]]
Adding a :class:`colour.continuous.Signal` sub-class:
>>> multi_signals_2 = MultiSignals(range_)
>>> print(multi_signals_1.arithmetical_operation(
... multi_signals_2, '+', True))
[[ 0. 50. 81. 112.]
[ 1. 83. 114. 145.]
[ 2. 116. 147. 178.]
[ 3. 149. 180. 211.]
[ 4. 182. 213. 244.]
[ 5. 215. 246. 277.]
[ 6. 248. 279. 310.]
[ 7. 281. 312. 343.]
[ 8. 314. 345. 376.]
[ 9. 347. 378. 409.]]
"""
multi_signals = self if in_place else self.copy()
if isinstance(a, MultiSignals):
assert len(self.signals) == len(a.signals), (
'"MultiSignals" operands must have same count than '
'underlying "Signal" components!')
for signal_a, signal_b in zip(multi_signals.signals.values(),
a.signals.values()):
signal_a.arithmetical_operation(signal_b, operation, True)
else:
a = as_float_array(a)
assert a.ndim in range(3), (
'Operand "a" variable must be a numeric or a 1-dimensional or '
'2-dimensional array!')
if a.ndim in (0, 1):
for signal in multi_signals.signals.values():
signal.arithmetical_operation(a, operation, True)
else:
assert a.shape[-1] == len(multi_signals.signals), (
'Operand "a" variable columns must have same count than '
'underlying "Signal" components!')
for signal, y in zip(multi_signals.signals.values(),
tsplit(a)):
signal.arithmetical_operation(y, operation, True)
return multi_signals
@staticmethod
def multi_signals_unpack_data(data=None,
domain=None,
labels=None,
dtype=DEFAULT_FLOAT_DTYPE,
signal_type=Signal,
**kwargs):
"""
Unpack given data for multi-continuous signals instantiation.
Parameters
----------
data : Series or Dataframe or Signal or MultiSignals or array_like or \
dict_like, optional
Data to unpack for multi-continuous signals instantiation.
domain : array_like, optional
Values to initialise the multiple :class:`colour.continuous.Signal`
sub-class instances :attr:`colour.continuous.Signal.domain`
attribute with. If both ``data`` and ``domain`` arguments are
defined, the latter will be used to initialise the
:attr:`colour.continuous.Signal.domain` attribute.
dtype : type, optional
**{np.float16, np.float32, np.float64, np.float128}**,
Floating point data type.
signal_type : type, optional
A :class:`colour.continuous.Signal` sub-class type.
Other Parameters
----------------
name : unicode, optional
multi-continuous signals name.
interpolator : object, optional
Interpolator class type to use as interpolating function for the
:class:`colour.continuous.Signal` sub-class instances.
interpolator_args : dict_like, optional
Arguments to use when instantiating the interpolating function
of the :class:`colour.continuous.Signal` sub-class instances.
extrapolator : object, optional
Extrapolator class type to use as extrapolating function for the
:class:`colour.continuous.Signal` sub-class instances.
extrapolator_args : dict_like, optional
Arguments to use when instantiating the extrapolating function
of the :class:`colour.continuous.Signal` sub-class instances.
Returns
-------
dict
Mapping of labeled :class:`colour.continuous.Signal` sub-class
instances.
Examples
--------
Unpacking using implicit *domain* and a single signal:
>>> range_ = np.linspace(10, 100, 10)
>>> signals = MultiSignals.multi_signals_unpack_data(range_)
>>> list(signals.keys())
[0]
>>> print(signals[0])
[[ 0. 10.]
[ 1. 20.]
[ 2. 30.]
[ 3. 40.]
[ 4. 50.]
[ 5. 60.]
[ 6. 70.]
[ 7. 80.]
[ 8. 90.]
[ 9. 100.]]
Unpacking using explicit *domain* and a single signal:
>>> domain = np.arange(100, 1100, 100)
>>> signals = MultiSignals.multi_signals_unpack_data(range_, domain)
>>> list(signals.keys())
[0]
>>> print(signals[0])
[[ 100. 10.]
[ 200. 20.]
[ 300. 30.]
[ 400. 40.]
[ 500. 50.]
[ 600. 60.]
[ 700. 70.]
[ 800. 80.]
[ 900. 90.]
[ 1000. 100.]]
Unpacking using multiple signals:
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> signals = MultiSignals.multi_signals_unpack_data(range_, domain)
>>> list(signals.keys())
[0, 1, 2]
>>> print(signals[2])
[[ 100. 30.]
[ 200. 40.]
[ 300. 50.]
[ 400. 60.]
[ 500. 70.]
[ 600. 80.]
[ 700. 90.]
[ 800. 100.]
[ 900. 110.]
[ 1000. 120.]]
Unpacking using a *dict*:
>>> signals = MultiSignals.multi_signals_unpack_data(
... dict(zip(domain, range_)))
>>> list(signals.keys())
[0, 1, 2]
>>> print(signals[2])
[[ 100. 30.]
[ 200. 40.]
[ 300. 50.]
[ 400. 60.]
[ 500. 70.]
[ 600. 80.]
[ 700. 90.]
[ 800. 100.]
[ 900. 110.]
[ 1000. 120.]]
Unpacking using *MultiSignals.multi_signals_unpack_data* method output:
>>> signals = MultiSignals.multi_signals_unpack_data(
... dict(zip(domain, range_)))
>>> signals = MultiSignals.multi_signals_unpack_data(signals)
>>> list(signals.keys())
[0, 1, 2]
>>> print(signals[2])
[[ 100. 30.]
[ 200. 40.]
[ 300. 50.]
[ 400. 60.]
[ 500. 70.]
[ 600. 80.]
[ 700. 90.]
[ 800. 100.]
[ 900. 110.]
[ 1000. 120.]]
Unpacking using a *Pandas* *Series*:
>>> if is_pandas_installed():
... from pandas import Series
... signals = MultiSignals.multi_signals_unpack_data(
... Series(dict(zip(domain, np.linspace(10, 100, 10)))))
... print(signals[0]) # doctest: +SKIP
[[ 100. 10.]
[ 200. 20.]
[ 300. 30.]
[ 400. 40.]
[ 500. 50.]
[ 600. 60.]
[ 700. 70.]
[ 800. 80.]
[ 900. 90.]
[ 1000. 100.]]
Unpacking using a *Pandas* *Dataframe*:
>>> if is_pandas_installed():
... from pandas import DataFrame
... data = dict(zip(['a', 'b', 'c'], tsplit(range_)))
... signals = MultiSignals.multi_signals_unpack_data(
... DataFrame(data, domain))
... print(signals['c']) # doctest: +SKIP
[[ 100. 30.]
[ 200. 40.]
[ 300. 50.]
[ 400. 60.]
[ 500. 70.]
[ 600. 80.]
[ 700. 90.]
[ 800. 100.]
[ 900. 110.]
[ 1000. 120.]]
"""
assert dtype in np.sctypes['float'], (
'"dtype" must be one of the following types: {0}'.format(
np.sctypes['float']))
domain_u, range_u, signals = None, None, None
signals = OrderedDict()
# TODO: Implement support for Signal class passing.
if isinstance(data, MultiSignals):
signals = data.signals
elif (issubclass(type(data), Sequence) or
isinstance(data, (tuple, list, np.ndarray, Iterator))):
data = tsplit(list(data) if isinstance(data, Iterator) else data)
assert data.ndim in (1, 2), (
'User "data" must be 1-dimensional or 2-dimensional!')
if data.ndim == 1:
data = data[np.newaxis, :]
for i, range_u in enumerate(data):
signals[i] = signal_type(range_u, domain, **kwargs)
elif (issubclass(type(data), Mapping) or
isinstance(data, (dict, OrderedDict))):
# Handling `MultiSignals.multi_signals_unpack_data` method output
# used as argument to `MultiSignals.multi_signals_unpack_data`
# method.
is_signal = all([
True if isinstance(i, Signal) else False
for i in data.values()
])
if is_signal:
for label, signal in data.items():
signals[label] = signal_type(signal.range, signal.domain,
**kwargs)
else:
domain_u, range_u = zip(*sorted(data.items()))
for i, range_u in enumerate(tsplit(range_u)):
signals[i] = signal_type(range_u, domain_u, **kwargs)
elif is_pandas_installed():
from pandas import DataFrame, Series
if isinstance(data, Series):
signals[0] = signal_type(data, **kwargs)
elif isinstance(data, DataFrame):
domain_u = data.index.values
signals = OrderedDict(((label,
signal_type(
data[label],
domain_u,
name=label,
**kwargs)) for label in data))
if domain is not None and signals is not None:
for signal in signals.values():
assert len(domain) == len(signal.domain), (
'User "domain" is not compatible with unpacked signals!')
signal.domain = domain
if labels is not None and signals is not None:
assert len(labels) == len(signals), (
'User "labels" is not compatible with unpacked signals!')
signals = OrderedDict(
[(labels[i], signal)
for i, (_key, signal) in enumerate(signals.items())])
return signals
def fill_nan(self, method='Interpolation', default=0):
"""
Fill NaNs in independent domain :math:`x` variable and corresponding
range :math:`y` variable using given method.
Parameters
----------
method : unicode, optional
**{'Interpolation', 'Constant'}**,
*Interpolation* method linearly interpolates through the NaNs,
*Constant* method replaces NaNs with ``default``.
default : numeric, optional
Value to use with the *Constant* method.
Returns
-------
Signal
NaNs filled multi-continuous signals.
>>> domain = np.arange(0, 10, 1)
>>> range_ = tstack([np.linspace(10, 100, 10)] * 3)
>>> range_ += np.array([0, 10, 20])
>>> multi_signals = MultiSignals(range_)
>>> multi_signals[3:7] = np.nan
>>> print(multi_signals)
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. nan nan nan]
[ 4. nan nan nan]
[ 5. nan nan nan]
[ 6. nan nan nan]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
>>> print(multi_signals.fill_nan())
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 40. 50. 60.]
[ 4. 50. 60. 70.]
[ 5. 60. 70. 80.]
[ 6. 70. 80. 90.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
>>> multi_signals[3:7] = np.nan
>>> print(multi_signals.fill_nan(method='Constant'))
[[ 0. 10. 20. 30.]
[ 1. 20. 30. 40.]
[ 2. 30. 40. 50.]
[ 3. 0. 0. 0.]
[ 4. 0. 0. 0.]
[ 5. 0. 0. 0.]
[ 6. 0. 0. 0.]
[ 7. 80. 90. 100.]
[ 8. 90. 100. 110.]
[ 9. 100. 110. 120.]]
"""
for signal in self._signals.values():
signal.fill_nan(method, default)
return self
def to_dataframe(self):
"""
Converts the continuous signal to a *Pandas* :class:`DataFrame` class
instance.
Returns
-------
DataFrame
Continuous signal as a *Pandas* :class:`DataFrame` class instance.
Examples
--------
>>> if is_pandas_installed():
... domain = np.arange(0, 10, 1)
... range_ = tstack([np.linspace(10, 100, 10)] * 3)
... range_ += np.array([0, 10, 20])
... multi_signals = MultiSignals(range_)
... print(multi_signals.to_dataframe()) # doctest: +SKIP
0 1 2
0.0 10.0 20.0 30.0
1.0 20.0 30.0 40.0
2.0 30.0 40.0 50.0
3.0 40.0 50.0 60.0
4.0 50.0 60.0 70.0
5.0 60.0 70.0 80.0
6.0 70.0 80.0 90.0
7.0 80.0 90.0 100.0
8.0 90.0 100.0 110.0
9.0 100.0 110.0 120.0
"""
if is_pandas_installed():
from pandas import DataFrame
return DataFrame(
data=self.range, index=self.domain, columns=self.labels)
| [
"pandas.DataFrame",
"numpy.array_equal",
"numpy.hstack",
"colour.utilities.is_pandas_installed",
"colour.utilities.as_float_array",
"collections.OrderedDict",
"colour.utilities.tsplit"
] | [((26896, 26913), 'colour.utilities.as_float_array', 'as_float_array', (['y'], {}), '(y)\n', (26910, 26913), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((42332, 42345), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (42343, 42345), False, 'from collections import Iterator, Mapping, OrderedDict, Sequence\n'), ((48322, 48343), 'colour.utilities.is_pandas_installed', 'is_pandas_installed', ([], {}), '()\n', (48341, 48343), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((11024, 11045), 'colour.utilities.as_float_array', 'as_float_array', (['value'], {}), '(value)\n', (11038, 11045), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((27431, 27440), 'colour.utilities.tsplit', 'tsplit', (['y'], {}), '(y)\n', (27437, 27440), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((35199, 35216), 'colour.utilities.as_float_array', 'as_float_array', (['a'], {}), '(a)\n', (35213, 35216), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((48406, 48472), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'self.range', 'index': 'self.domain', 'columns': 'self.labels'}), '(data=self.range, index=self.domain, columns=self.labels)\n', (48415, 48472), False, 'from pandas import DataFrame, Series\n'), ((18702, 18753), 'numpy.hstack', 'np.hstack', (['[self.domain[:, np.newaxis], self.range]'], {}), '([self.domain[:, np.newaxis], self.range])\n', (18711, 18753), True, 'import numpy as np\n'), ((20121, 20172), 'numpy.hstack', 'np.hstack', (['[self.domain[:, np.newaxis], self.range]'], {}), '([self.domain[:, np.newaxis], self.range])\n', (20130, 20172), True, 'import numpy as np\n'), ((11462, 11475), 'colour.utilities.tsplit', 'tsplit', (['value'], {}), '(value)\n', (11468, 11475), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((29415, 29456), 'numpy.array_equal', 'np.array_equal', (['self.domain', 'other.domain'], {}), '(self.domain, other.domain)\n', (29429, 29456), True, 'import numpy as np\n'), ((29478, 29517), 'numpy.array_equal', 'np.array_equal', (['self.range', 'other.range'], {}), '(self.range, other.range)\n', (29492, 29517), True, 'import numpy as np\n'), ((35871, 35880), 'colour.utilities.tsplit', 'tsplit', (['a'], {}), '(a)\n', (35877, 35880), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((43849, 43870), 'colour.utilities.is_pandas_installed', 'is_pandas_installed', ([], {}), '()\n', (43868, 43870), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n'), ((43744, 43759), 'colour.utilities.tsplit', 'tsplit', (['range_u'], {}), '(range_u)\n', (43750, 43759), False, 'from colour.utilities import as_float_array, first_item, is_pandas_installed, tsplit, tstack\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Plot Loop-Closure-Detection
#
# Plots statistics on loop closure detection as well as optimized trajectory RPE, APE and trajectory against ground truth.
# %%
import yaml
import os
import copy
import pandas as pd
import numpy as np
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
if not log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
log.addHandler(ch)
from evo.tools import file_interface
from evo.tools import plot
from evo.tools import pandas_bridge
from evo.core import sync
from evo.core import trajectory
from evo.core import metrics
from evo.core import transformations
from evo.core import lie_algebra as lie
# %matplotlib inline
# # %matplotlib notebook
import matplotlib.pyplot as plt
# %% [markdown]
# ## Data Locations
#
# Make sure to set the following paths.
#
# `vio_output_dir` is the path to the directory containing `output_*.csv` files obtained from logging a run of SparkVio.
#
# `gt_data_file` is the absolute path to the `csv` file containing ground truth data for the absolute pose at each timestamp of the dataset.
# %%
# Define directory to VIO output csv files as well as ground truth absolute poses.
vio_output_dir = "/home/sparklab/code/SparkVIO/output_logs/"
gt_data_file = "/home/sparklab/datasets/EuRoC/mh_04_difficult/mav0/state_groundtruth_estimate0/data.csv"
# %%
def get_ape(data, metric):
"""Gets APE and APE statistics for two trajectories and a given pose_relation.
Args:
data: tuple of trajectories, the first being the reference trajectory
and the second being the estimated trajectory.
metric: a metrics.PoseRelation instance representing the pose relation
to use when computing APE.
Returns:
A metrics.APE instance containing the APE for both trajectories according
to the given metric.
"""
ape = metrics.APE(metric)
ape.process_data(data)
return ape
def plot_ape(x_axis, ape, size=(18, 10), title=None):
"""Plots APE error against time for a given metrics.APE instance.
Args:
x_axis: An array-type of values for all the x-axis values (time).
rpe: A metrics.APE instance with pre-processed data.
size: A tuple optionally containing the size of the figure to be plotted.
"""
if title is None:
title = "APE w.r.t. " + ape.pose_relation.value
fig = plt.figure(figsize=size)
plot.error_array(
fig,
ape.error,
x_array=x_axis,
statistics=ape.get_all_statistics(),
name="APE",
title=title,
xlabel="$t$ (s)",
)
plt.show()
def get_rpe(data, metric):
"""Gets RPE and RPE statistics for two trajectories and a given pose_relation.
Args:
data: tuple of trajectories, the first being the reference trajectory
and the second being the estimated trajectory.
metric: a metrics.PoseRelation instance representing the pose relation
to use when computing RPE.
Returns:
A metrics.RPE instance containing the RPE for both trajectories according
to the given metric.
"""
# normal mode
delta = 1
delta_unit = metrics.Unit.frames
all_pairs = False
rpe = metrics.RPE(metric, delta, delta_unit, all_pairs)
rpe.process_data(data)
return rpe
def plot_rpe(x_axis, rpe, size=(18, 10), title=None):
"""Plots RPE error against time for a given metrics.RPE instance.
Args:
x_axis: An array-type of values for all the x-axis values (time).
rpe: A metrics.RPE instance with pre-processed data.
size: A tuple optionally containing the size of the figure to be plotted.
"""
if title == None:
title = "RPE w.r.t. " + rpe.pose_relation.value
fig = plt.figure(figsize=size)
plot.error_array(
fig,
rpe.error,
x_array=x_axis,
statistics=rpe.get_all_statistics(),
name="RPE",
title=title,
xlabel="$t$ (s)",
)
plt.show()
def downsize_lc_df(df):
"""Remove all entries from a pandas DataFrame object that have '0' for the timestamp, which
includes all entries that do not have loop closures. Returns this cleaned DataFrame.
Args:
df: A pandas.DataFrame object representing loop-closure detections, indexed by timestamp.
Returns:
A pandas.DataFrame object with only loop closure entries.
"""
df = df[~df.index.duplicated()]
ts = np.array(df.index.tolist())
good_ts = ts[np.where(ts > 0)]
res = df.reindex(index=good_ts)
return res
def convert_abs_traj_to_rel_traj_lcd(df, lcd_df, to_scale=True):
"""Converts an absolute-pose trajectory to a relative-pose trajectory.
The incoming DataFrame df is processed element-wise. At each kf timestamp (which is the
index of the DataFrame row) starting from the second (index 1), the relative pose
from the match timestamp to the query stamp is calculated (in the match-
timestamp's coordinate frame). This relative pose is then appended to the
resulting DataFrame.
The resulting DataFrame has timestamp indices corresponding to poses that represent
the relative transformation between the match timestamp and the query one.
Args:
df: A pandas.DataFrame object with timestamps as indices containing, at a minimum,
columns representing the xyz position and wxyz quaternion-rotation at each
timestamp, corresponding to the absolute pose at that time.
lcd_df: A pandas.DataFrame object with timestamps as indices containing, at a minimum,
columns representing the timestamp of query frames and the timestamps of the
match frames.
to_scale: A boolean. If set to False, relative poses will have their translation
part normalized.
Returns:
A pandas.DataFrame object with xyz position and wxyz quaternion fields for the
relative pose trajectory corresponding to the absolute one given in 'df', and
relative by the given match and query timestamps.
"""
rows_list = []
index_list = []
for i in range(len(lcd_df.index)):
match_ts = lcd_df.timestamp_match[lcd_df.index[i]]
query_ts = lcd_df.timestamp_query[lcd_df.index[i]]
try:
w_t_bi = np.array([df.at[match_ts, idx] for idx in ["x", "y", "z"]])
w_q_bi = np.array(
[df.at[match_ts, idx] for idx in ["qw", "qx", "qy", "qz"]]
)
w_T_bi = transformations.quaternion_matrix(w_q_bi)
w_T_bi[:3, 3] = w_t_bi
except:
print(
"Failed to convert an abs pose to a rel pose. Timestamp ",
match_ts,
" is not available in ground truth df.",
)
continue
try:
w_t_bidelta = np.array([df.at[query_ts, idx] for idx in ["x", "y", "z"]])
w_q_bidelta = np.array(
[df.at[query_ts, idx] for idx in ["qw", "qx", "qy", "qz"]]
)
w_T_bidelta = transformations.quaternion_matrix(w_q_bidelta)
w_T_bidelta[:3, 3] = w_t_bidelta
except:
print(
"Failed to convert an abs pose to a rel pose. Timestamp ",
query_ts,
" is not available in ground truth df.",
)
continue
index_list.append(lcd_df.index[i])
bi_T_bidelta = lie.relative_se3(w_T_bi, w_T_bidelta)
bi_R_bidelta = copy.deepcopy(bi_T_bidelta)
bi_R_bidelta[:, 3] = np.array([0, 0, 0, 1])
bi_q_bidelta = transformations.quaternion_from_matrix(bi_R_bidelta)
bi_t_bidelta = bi_T_bidelta[:3, 3]
if not to_scale:
norm = np.linalg.norm(bi_t_bidelta)
if norm > 1e-6:
bi_t_bidelta = bi_t_bidelta / np.linalg.norm(bi_t_bidelta)
new_row = {
"x": bi_t_bidelta[0],
"y": bi_t_bidelta[1],
"z": bi_t_bidelta[2],
"qw": bi_q_bidelta[0],
"qx": bi_q_bidelta[1],
"qy": bi_q_bidelta[2],
"qz": bi_q_bidelta[3],
}
rows_list.append(new_row)
return pd.DataFrame(data=rows_list, index=index_list)
def rename_euroc_gt_df(df):
"""Renames a DataFrame built from a EuRoC ground-truth data csv file to be easier to read.
Column labels are changed to be more readable and to be identical to the generic pose
trajectory format used with other csv files. Note that '#timestamp' will not actually
be renamed if it is the index of the DataFrame (which it should be). It will be
appropriately renamed if it is the index name.
This operation is 'inplace': It does not return a new DataFrame but simply changes
the existing one.
Args:
df: A pandas.DataFrame object.
"""
df.index.names = ["timestamp"]
df.rename(
columns={
" p_RS_R_x [m]": "x",
" p_RS_R_y [m]": "y",
" p_RS_R_z [m]": "z",
" q_RS_w []": "qw",
" q_RS_x []": "qx",
" q_RS_y []": "qy",
" q_RS_z []": "qz",
" v_RS_R_x [m s^-1]": "vx",
" v_RS_R_y [m s^-1]": "vy",
" v_RS_R_z [m s^-1]": "vz",
" b_w_RS_S_x [rad s^-1]": "bgx",
" b_w_RS_S_y [rad s^-1]": "bgy",
" b_w_RS_S_z [rad s^-1]": "bgz",
" b_a_RS_S_x [m s^-2]": "bax",
" b_a_RS_S_y [m s^-2]": "bay",
" b_a_RS_S_z [m s^-2]": "baz",
},
inplace=True,
)
def rename_lcd_result_df(df):
"""Renames a DataFrame built from an LCD results measurements csv file to be converted to a trajectory.
This is an 'inplace' argument and returns nothing.
Args:
df: A pandas.DataFrame object.
"""
df.index.names = ["timestamp"]
df.rename(columns={"px": "x", "py": "y", "pz": "z"}, inplace=True)
# %% [markdown]
# ## LoopClosureDetector Statistics Plotting
#
# Gather and plot various statistics on LCD module performance, including RANSAC information, keyframe status (w.r.t. loop closure detection), and loop closure events and the quality of their relative poses.
# %% [markdown]
# ### LCD Status Frequency Chart
#
# Each keyframe is processed for potential loop closures. During this process, the loop-closure detector can either identify a loop closure or not. There are several reasons why a loop closure would not be detected. This plot helps to identify why loop closures are not detected between keyframes.
# %%
output_lcd_status_filename = os.path.join(
os.path.expandvars(vio_output_dir), "output_lcd_status.csv"
)
lcd_debuginfo_df = pd.read_csv(output_lcd_status_filename, sep=",", index_col=0)
status_freq_map = {}
for status in lcd_debuginfo_df.lcd_status:
if status not in status_freq_map:
status_freq_map[status] = 1
else:
status_freq_map[status] += 1
print(
"Full Size of PGO: ", lcd_debuginfo_df.pgo_size.tolist()[-1]
)
# Print the overall number of loop closures detected over all time.
if "LOOP_DETECTED" in status_freq_map:
print("Loop Closures Detected: ", status_freq_map["LOOP_DETECTED"])
else:
print("Loop Closures Detected: 0")
print(
"Loop Closures Registered by PGO by End: ",
lcd_debuginfo_df.pgo_lc_count.tolist()[-1],
)
print(
"Loop Closures Accepted by PGO at End: ",
lcd_debuginfo_df.pgo_lc_inliers.tolist()[-1],
)
# Plot failure modes as a histogram.
fig = plt.figure(figsize=(18, 10))
plt.bar(status_freq_map.keys(), status_freq_map.values(), width=1.0)
plt.xticks(status_freq_map.keys(), list(status_freq_map.keys()))
plt.ylabel("Status Frequency")
plt.title("LoopClosureDetector Status Histogram")
plt.show()
# %% [markdown]
# ### LCD RANSAC Performance Charts
#
# Plot the performance of the geometric-verification and pose-recovery steps. These are handled by Nister (5pt) RANSAC and Arun (3pt) RANSAC respectively.
#
# inlier percentages and iterations are plotted for both methods.
# %%
lcd_debuginfo_small_df = downsize_lc_df(lcd_debuginfo_df)
# Helper functions for processing data summary.
def get_mean(attrib):
ls = lcd_debuginfo_small_df[attrib].tolist()
return float(sum(ls)) / len(ls)
def get_min(attrib):
return min(lcd_debuginfo_small_df[attrib])
def get_max(attrib):
return max(lcd_debuginfo_small_df[attrib])
# Construct and visualize summary. TODO(marcus): use a LaTeX table.
summary_stats = [
("Average number of mono ransac inliers", get_mean("mono_inliers")),
("Average size of mono ransac input", get_mean("mono_input_size")),
("Average number of stereo ransac inliers", get_mean("stereo_inliers")),
("Average size of stereo ransac input", get_mean("stereo_input_size")),
("Maximum mono ransac iterations", get_max("mono_iters")),
("Maximum stereo ransac iterations", get_max("stereo_iters")),
]
attrib_len = [len(attrib[0]) for attrib in summary_stats]
max_attrib_len = max(attrib_len)
print("\nRANSAC Statistic Summary for Loop Closures ONLY:\n")
for entry in summary_stats:
attrib = entry[0]
value = entry[1]
spacing = max_attrib_len - len(attrib)
print(attrib + " " * spacing + ": " + str(value))
# Plot ransac inlier and iteration statistics.
fig1, axes1 = plt.subplots(nrows=1, ncols=2, figsize=(18, 10), squeeze=False)
lcd_debuginfo_small_df.plot(kind="hist", y="mono_inliers", ax=axes1[0, 0])
lcd_debuginfo_small_df.plot(kind="hist", y="stereo_inliers", ax=axes1[0, 0])
lcd_debuginfo_small_df.plot(kind="hist", y="mono_iters", ax=axes1[0, 1])
lcd_debuginfo_small_df.plot(kind="hist", y="stereo_iters", ax=axes1[0, 1])
plt.show()
# %% [markdown]
# ### LCD Relative Pose Error Plotting
#
# Calculate error statistics for all individual loop closures and plot their error as compared to ground truth. These plots give insight into how reliable the pose determination between two frames is for each loop closure. This pose determination is done via a combination of 5-pt and 3-pt RANSAC matching of the stereo images from the camera.
# %%
gt_df = pd.read_csv(gt_data_file, sep=",", index_col=0)
rename_euroc_gt_df(gt_df)
output_loop_closures_filename = os.path.join(
os.path.expandvars(vio_output_dir), "output_lcd_result.csv"
)
output_loop_closures_df = pd.read_csv(
output_loop_closures_filename, sep=",", index_col=0
)
# %%
small_lc_df = downsize_lc_df(output_loop_closures_df)
rename_lcd_result_df(small_lc_df)
gt_rel_df = convert_abs_traj_to_rel_traj_lcd(gt_df, small_lc_df, to_scale=True)
# %%
# Convert the gt relative-pose DataFrame to a trajectory object.
traj_ref = pandas_bridge.df_to_trajectory(gt_rel_df)
# Use the mono ransac file as estimated trajectory.
traj_est = pandas_bridge.df_to_trajectory(small_lc_df)
traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est)
print("traj_ref: ", traj_ref)
print("traj_est: ", traj_est)
# %%
# Get RPE for entire relative trajectory.
rpe_rot = get_rpe((traj_ref, traj_est), metrics.PoseRelation.rotation_angle_deg)
rpe_tran = get_rpe((traj_ref, traj_est), metrics.PoseRelation.translation_part)
# Print rotation RPE statistics:
rot_summary_stats = [
("mean", rpe_rot.get_statistic(metrics.StatisticsType.mean)),
("median", rpe_rot.get_all_statistics()["median"]),
("rmse", rpe_rot.get_statistic(metrics.StatisticsType.rmse)),
("std", rpe_rot.get_statistic(metrics.StatisticsType.std)),
("min", rpe_rot.get_statistic(metrics.StatisticsType.min)),
("max", rpe_rot.get_statistic(metrics.StatisticsType.max)),
]
attrib_len = [len(attrib[0]) for attrib in rot_summary_stats]
max_attrib_len = max(attrib_len)
print("\nRotation RPE Statistics Summary:\n")
for entry in rot_summary_stats:
attrib = entry[0]
value = entry[1]
spacing = max_attrib_len - len(attrib)
print(attrib + " " * spacing + ": " + str(value))
# Print translation RPE statistics:
tram_summary_stats = [
("mean", rpe_tran.get_statistic(metrics.StatisticsType.mean)),
("median", rpe_tran.get_all_statistics()["median"]),
("rmse", rpe_tran.get_statistic(metrics.StatisticsType.rmse)),
("std", rpe_tran.get_statistic(metrics.StatisticsType.std)),
("min", rpe_tran.get_statistic(metrics.StatisticsType.min)),
("max", rpe_tran.get_statistic(metrics.StatisticsType.max)),
]
attrib_len = [len(attrib[0]) for attrib in tram_summary_stats]
max_attrib_len = max(attrib_len)
print("\nTranslation RPE Statistics Summary:\n")
for entry in tram_summary_stats:
attrib = entry[0]
value = entry[1]
spacing = max_attrib_len - len(attrib)
print(attrib + " " * spacing + ": " + str(value))
# %% [markdown]
# ## LoopClosureDetector PGO-Optimized Trajectory Plotting
#
# Plot the APE, RPE, and trajectory of the Pose-graph-optimized trajectory, including loop closures on top of regular odometry updates.
#
# The results are visualized against both ground truth and the odometry-estimate alone to show the performance gain from loop closure detection.
# %%
# Load ground truth and estimated data as csv DataFrames.
gt_df = pd.read_csv(gt_data_file, sep=",", index_col=0)
output_poses_filename = os.path.join(
os.path.expandvars(vio_output_dir), "output_posesVIO.csv"
)
output_poses_df = pd.read_csv(output_poses_filename, sep=",", index_col=0)
output_pgo_poses_filename = os.path.join(
os.path.expandvars(vio_output_dir), "output_lcd_optimized_traj.csv"
)
output_pgo_poses_df = pd.read_csv(output_pgo_poses_filename, sep=",", index_col=0)
# %%
gt_df = gt_df[~gt_df.index.duplicated()]
rename_euroc_gt_df(gt_df)
# %%
# Convert the gt relative-pose DataFrame to a trajectory object.
traj_ref = pandas_bridge.df_to_trajectory(gt_df)
# Compare against the VIO without PGO.
traj_ref_cp = copy.deepcopy(traj_ref)
traj_vio = pandas_bridge.df_to_trajectory(output_poses_df)
traj_ref_cp, traj_vio = sync.associate_trajectories(traj_ref_cp, traj_vio)
traj_vio = trajectory.align_trajectory(
traj_vio,
traj_ref_cp,
correct_scale=False,
discard_n_start_poses=int(discard_n_start_poses),
discard_n_end_poses=int(discard_n_end_poses),
)
# Use the PGO output as estimated trajectory.
traj_est = pandas_bridge.df_to_trajectory(output_pgo_poses_df)
# Associate the data.
traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est)
traj_est = trajectory.align_trajectory(
traj_est,
traj_ref,
correct_scale=False,
discard_n_start_poses=int(discard_n_start_poses),
discard_n_end_poses=int(discard_n_end_poses),
)
print("traj_ref: ", traj_ref)
print("traj_vio: ", traj_vio)
print("traj_est: ", traj_est)
# %% [markdown]
# ## Absolute-Pose-Error Plotting
#
# Plot absolute-pose-error along the entire trajectory. APE gives a good sense of overall VIO performance across the entire trajectory.
# %%
# Plot APE of trajectory rotation and translation parts.
num_of_poses = traj_est.num_poses
traj_est.reduce_to_ids(
range(int(discard_n_start_poses), int(num_of_poses - discard_n_end_poses), 1)
)
traj_ref.reduce_to_ids(
range(int(discard_n_start_poses), int(num_of_poses - discard_n_end_poses), 1)
)
traj_vio.reduce_to_ids(
range(int(discard_n_start_poses), int(num_of_poses - discard_n_end_poses), 1)
)
seconds_from_start = [t - traj_est.timestamps[0] for t in traj_est.timestamps]
ape_tran = get_ape((traj_ref, traj_est), metrics.PoseRelation.translation_part)
plot_ape(seconds_from_start, ape_tran, title="VIO+PGO ATE in Meters")
# %%
# Plot the ground truth and estimated trajectories against each other with APE overlaid.
plot_mode = plot.PlotMode.xy
fig = plt.figure(figsize=(18, 10))
ax = plot.prepare_axis(fig, plot_mode)
plot.traj(ax, plot_mode, traj_ref, "--", "gray", "reference")
plot.traj(ax, plot_mode, traj_vio, ".", "gray", "vio without pgo")
plot.traj_colormap(
ax,
traj_est,
ape_tran.error,
plot_mode,
min_map=ape_tran.get_all_statistics()["min"],
max_map=ape_tran.get_all_statistics()["max"],
title="VIO+PGO Trajectory Tracking - Color Coded by ATE",
)
ax.legend()
plt.show()
# %% [markdown]
# ## Relative-Pose-Error Plotting
#
# Plot relative-pose-error along the entire trajectory. RPE gives a good sense of overall VIO performance from one frame to the next.
# %%
# Get RPE for entire relative trajectory.
rpe_rot = get_rpe((traj_ref, traj_est), metrics.PoseRelation.rotation_angle_deg)
rpe_tran = get_rpe((traj_ref, traj_est), metrics.PoseRelation.translation_part)
# %%
# Plot RPE of trajectory rotation and translation parts.
seconds_from_start = [t - traj_est.timestamps[0] for t in traj_est.timestamps[1:]]
plot_rpe(seconds_from_start, rpe_rot, title="VIO+PGO RRE in Degrees")
plot_rpe(seconds_from_start, rpe_tran, title="VIO+PGO RTE in Meters")
# %%
# important: restrict data to delta ids for plot.
traj_ref_plot = copy.deepcopy(traj_ref)
traj_est_plot = copy.deepcopy(traj_est)
traj_ref_plot.reduce_to_ids(rpe_rot.delta_ids)
traj_est_plot.reduce_to_ids(rpe_rot.delta_ids)
# Plot the ground truth and estimated trajectories against each other with RPE overlaid.
plot_mode = plot.PlotMode.xy
fig = plt.figure(figsize=(18, 10))
ax = plot.prepare_axis(fig, plot_mode)
plot.traj(ax, plot_mode, traj_ref_plot, "--", "gray", "reference")
plot.traj_colormap(
ax,
traj_est_plot,
rpe_rot.error,
plot_mode,
min_map=rpe_rot.get_all_statistics()["min"],
max_map=rpe_rot.get_all_statistics()["max"],
title="VIO+PGO Trajectory Tracking - Color Coded by RRE",
)
ax.legend()
plt.show()
# %%
traj_vio = pandas_bridge.df_to_trajectory(output_poses_df)
traj_ref, traj_vio = sync.associate_trajectories(traj_ref, traj_est)
traj_vio = trajectory.align_trajectory(traj_vio, traj_ref, correct_scale=False)
# Plot the trajectories for quick error visualization.
fig = plt.figure(figsize=(18, 10))
traj_by_label = {"VIO only": traj_vio, "VIO + PGO": traj_est, "reference": traj_ref}
plot.trajectories(
fig, traj_by_label, plot.PlotMode.xyz, title="PIM Trajectory Tracking in 3D"
)
plt.show()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"evo.core.trajectory.align_trajectory",
"logging.Formatter",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"evo.tools.plot.prepare_axis",
"evo.core.metrics.RPE",
"pandas.DataFrame",
"evo.core.metrics.APE",
"evo.tools.plot.trajectories",
"matplot... | [((575, 602), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (592, 602), False, 'import logging\n'), ((11155, 11216), 'pandas.read_csv', 'pd.read_csv', (['output_lcd_status_filename'], {'sep': '""","""', 'index_col': '(0)'}), "(output_lcd_status_filename, sep=',', index_col=0)\n", (11166, 11216), True, 'import pandas as pd\n'), ((12016, 12044), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (12026, 12044), True, 'import matplotlib.pyplot as plt\n'), ((12180, 12210), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Status Frequency"""'], {}), "('Status Frequency')\n", (12190, 12210), True, 'import matplotlib.pyplot as plt\n'), ((12211, 12260), 'matplotlib.pyplot.title', 'plt.title', (['"""LoopClosureDetector Status Histogram"""'], {}), "('LoopClosureDetector Status Histogram')\n", (12220, 12260), True, 'import matplotlib.pyplot as plt\n'), ((12262, 12272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12270, 12272), True, 'import matplotlib.pyplot as plt\n'), ((13815, 13878), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(18, 10)', 'squeeze': '(False)'}), '(nrows=1, ncols=2, figsize=(18, 10), squeeze=False)\n', (13827, 13878), True, 'import matplotlib.pyplot as plt\n'), ((14181, 14191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14189, 14191), True, 'import matplotlib.pyplot as plt\n'), ((14608, 14655), 'pandas.read_csv', 'pd.read_csv', (['gt_data_file'], {'sep': '""","""', 'index_col': '(0)'}), "(gt_data_file, sep=',', index_col=0)\n", (14619, 14655), True, 'import pandas as pd\n'), ((14821, 14885), 'pandas.read_csv', 'pd.read_csv', (['output_loop_closures_filename'], {'sep': '""","""', 'index_col': '(0)'}), "(output_loop_closures_filename, sep=',', index_col=0)\n", (14832, 14885), True, 'import pandas as pd\n'), ((15148, 15189), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['gt_rel_df'], {}), '(gt_rel_df)\n', (15178, 15189), False, 'from evo.tools import pandas_bridge\n'), ((15254, 15297), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['small_lc_df'], {}), '(small_lc_df)\n', (15284, 15297), False, 'from evo.tools import pandas_bridge\n'), ((15319, 15366), 'evo.core.sync.associate_trajectories', 'sync.associate_trajectories', (['traj_ref', 'traj_est'], {}), '(traj_ref, traj_est)\n', (15346, 15366), False, 'from evo.core import sync\n'), ((17592, 17639), 'pandas.read_csv', 'pd.read_csv', (['gt_data_file'], {'sep': '""","""', 'index_col': '(0)'}), "(gt_data_file, sep=',', index_col=0)\n", (17603, 17639), True, 'import pandas as pd\n'), ((17761, 17817), 'pandas.read_csv', 'pd.read_csv', (['output_poses_filename'], {'sep': '""","""', 'index_col': '(0)'}), "(output_poses_filename, sep=',', index_col=0)\n", (17772, 17817), True, 'import pandas as pd\n'), ((17957, 18017), 'pandas.read_csv', 'pd.read_csv', (['output_pgo_poses_filename'], {'sep': '""","""', 'index_col': '(0)'}), "(output_pgo_poses_filename, sep=',', index_col=0)\n", (17968, 18017), True, 'import pandas as pd\n'), ((18174, 18211), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['gt_df'], {}), '(gt_df)\n', (18204, 18211), False, 'from evo.tools import pandas_bridge\n'), ((18266, 18289), 'copy.deepcopy', 'copy.deepcopy', (['traj_ref'], {}), '(traj_ref)\n', (18279, 18289), False, 'import copy\n'), ((18301, 18348), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['output_poses_df'], {}), '(output_poses_df)\n', (18331, 18348), False, 'from evo.tools import pandas_bridge\n'), ((18373, 18423), 'evo.core.sync.associate_trajectories', 'sync.associate_trajectories', (['traj_ref_cp', 'traj_vio'], {}), '(traj_ref_cp, traj_vio)\n', (18400, 18423), False, 'from evo.core import sync\n'), ((18684, 18735), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['output_pgo_poses_df'], {}), '(output_pgo_poses_df)\n', (18714, 18735), False, 'from evo.tools import pandas_bridge\n'), ((18780, 18827), 'evo.core.sync.associate_trajectories', 'sync.associate_trajectories', (['traj_ref', 'traj_est'], {}), '(traj_ref, traj_est)\n', (18807, 18827), False, 'from evo.core import sync\n'), ((20090, 20118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (20100, 20118), True, 'import matplotlib.pyplot as plt\n'), ((20124, 20157), 'evo.tools.plot.prepare_axis', 'plot.prepare_axis', (['fig', 'plot_mode'], {}), '(fig, plot_mode)\n', (20141, 20157), False, 'from evo.tools import plot\n'), ((20158, 20219), 'evo.tools.plot.traj', 'plot.traj', (['ax', 'plot_mode', 'traj_ref', '"""--"""', '"""gray"""', '"""reference"""'], {}), "(ax, plot_mode, traj_ref, '--', 'gray', 'reference')\n", (20167, 20219), False, 'from evo.tools import plot\n'), ((20220, 20286), 'evo.tools.plot.traj', 'plot.traj', (['ax', 'plot_mode', 'traj_vio', '"""."""', '"""gray"""', '"""vio without pgo"""'], {}), "(ax, plot_mode, traj_vio, '.', 'gray', 'vio without pgo')\n", (20229, 20286), False, 'from evo.tools import plot\n'), ((20540, 20550), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20548, 20550), True, 'import matplotlib.pyplot as plt\n'), ((21306, 21329), 'copy.deepcopy', 'copy.deepcopy', (['traj_ref'], {}), '(traj_ref)\n', (21319, 21329), False, 'import copy\n'), ((21346, 21369), 'copy.deepcopy', 'copy.deepcopy', (['traj_est'], {}), '(traj_est)\n', (21359, 21369), False, 'import copy\n'), ((21590, 21618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (21600, 21618), True, 'import matplotlib.pyplot as plt\n'), ((21624, 21657), 'evo.tools.plot.prepare_axis', 'plot.prepare_axis', (['fig', 'plot_mode'], {}), '(fig, plot_mode)\n', (21641, 21657), False, 'from evo.tools import plot\n'), ((21658, 21724), 'evo.tools.plot.traj', 'plot.traj', (['ax', 'plot_mode', 'traj_ref_plot', '"""--"""', '"""gray"""', '"""reference"""'], {}), "(ax, plot_mode, traj_ref_plot, '--', 'gray', 'reference')\n", (21667, 21724), False, 'from evo.tools import plot\n'), ((21980, 21990), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21988, 21990), True, 'import matplotlib.pyplot as plt\n'), ((22008, 22055), 'evo.tools.pandas_bridge.df_to_trajectory', 'pandas_bridge.df_to_trajectory', (['output_poses_df'], {}), '(output_poses_df)\n', (22038, 22055), False, 'from evo.tools import pandas_bridge\n'), ((22077, 22124), 'evo.core.sync.associate_trajectories', 'sync.associate_trajectories', (['traj_ref', 'traj_est'], {}), '(traj_ref, traj_est)\n', (22104, 22124), False, 'from evo.core import sync\n'), ((22136, 22204), 'evo.core.trajectory.align_trajectory', 'trajectory.align_trajectory', (['traj_vio', 'traj_ref'], {'correct_scale': '(False)'}), '(traj_vio, traj_ref, correct_scale=False)\n', (22163, 22204), False, 'from evo.core import trajectory\n'), ((22268, 22296), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (22278, 22296), True, 'import matplotlib.pyplot as plt\n'), ((22382, 22482), 'evo.tools.plot.trajectories', 'plot.trajectories', (['fig', 'traj_by_label', 'plot.PlotMode.xyz'], {'title': '"""PIM Trajectory Tracking in 3D"""'}), "(fig, traj_by_label, plot.PlotMode.xyz, title=\n 'PIM Trajectory Tracking in 3D')\n", (22399, 22482), False, 'from evo.tools import plot\n'), ((22484, 22494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22492, 22494), True, 'import matplotlib.pyplot as plt\n'), ((660, 683), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (681, 683), False, 'import logging\n'), ((2288, 2307), 'evo.core.metrics.APE', 'metrics.APE', (['metric'], {}), '(metric)\n', (2299, 2307), False, 'from evo.core import metrics\n'), ((2807, 2831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (2817, 2831), True, 'import matplotlib.pyplot as plt\n'), ((3032, 3042), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3040, 3042), True, 'import matplotlib.pyplot as plt\n'), ((3666, 3715), 'evo.core.metrics.RPE', 'metrics.RPE', (['metric', 'delta', 'delta_unit', 'all_pairs'], {}), '(metric, delta, delta_unit, all_pairs)\n', (3677, 3715), False, 'from evo.core import metrics\n'), ((4214, 4238), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (4224, 4238), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4447, 4449), True, 'import matplotlib.pyplot as plt\n'), ((8662, 8708), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rows_list', 'index': 'index_list'}), '(data=rows_list, index=index_list)\n', (8674, 8708), True, 'import pandas as pd\n'), ((11074, 11108), 'os.path.expandvars', 'os.path.expandvars', (['vio_output_dir'], {}), '(vio_output_dir)\n', (11092, 11108), False, 'import os\n'), ((14733, 14767), 'os.path.expandvars', 'os.path.expandvars', (['vio_output_dir'], {}), '(vio_output_dir)\n', (14751, 14767), False, 'import os\n'), ((17683, 17717), 'os.path.expandvars', 'os.path.expandvars', (['vio_output_dir'], {}), '(vio_output_dir)\n', (17701, 17717), False, 'import os\n'), ((17865, 17899), 'os.path.expandvars', 'os.path.expandvars', (['vio_output_dir'], {}), '(vio_output_dir)\n', (17883, 17899), False, 'import os\n'), ((734, 782), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (751, 782), False, 'import logging\n'), ((4948, 4964), 'numpy.where', 'np.where', (['(ts > 0)'], {}), '(ts > 0)\n', (4956, 4964), True, 'import numpy as np\n'), ((7905, 7942), 'evo.core.lie_algebra.relative_se3', 'lie.relative_se3', (['w_T_bi', 'w_T_bidelta'], {}), '(w_T_bi, w_T_bidelta)\n', (7921, 7942), True, 'from evo.core import lie_algebra as lie\n'), ((7967, 7994), 'copy.deepcopy', 'copy.deepcopy', (['bi_T_bidelta'], {}), '(bi_T_bidelta)\n', (7980, 7994), False, 'import copy\n'), ((8024, 8046), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (8032, 8046), True, 'import numpy as np\n'), ((8070, 8122), 'evo.core.transformations.quaternion_from_matrix', 'transformations.quaternion_from_matrix', (['bi_R_bidelta'], {}), '(bi_R_bidelta)\n', (8108, 8122), False, 'from evo.core import transformations\n'), ((6760, 6819), 'numpy.array', 'np.array', (["[df.at[match_ts, idx] for idx in ['x', 'y', 'z']]"], {}), "([df.at[match_ts, idx] for idx in ['x', 'y', 'z']])\n", (6768, 6819), True, 'import numpy as np\n'), ((6841, 6909), 'numpy.array', 'np.array', (["[df.at[match_ts, idx] for idx in ['qw', 'qx', 'qy', 'qz']]"], {}), "([df.at[match_ts, idx] for idx in ['qw', 'qx', 'qy', 'qz']])\n", (6849, 6909), True, 'import numpy as np\n'), ((6961, 7002), 'evo.core.transformations.quaternion_matrix', 'transformations.quaternion_matrix', (['w_q_bi'], {}), '(w_q_bi)\n', (6994, 7002), False, 'from evo.core import transformations\n'), ((7306, 7365), 'numpy.array', 'np.array', (["[df.at[query_ts, idx] for idx in ['x', 'y', 'z']]"], {}), "([df.at[query_ts, idx] for idx in ['x', 'y', 'z']])\n", (7314, 7365), True, 'import numpy as np\n'), ((7392, 7460), 'numpy.array', 'np.array', (["[df.at[query_ts, idx] for idx in ['qw', 'qx', 'qy', 'qz']]"], {}), "([df.at[query_ts, idx] for idx in ['qw', 'qx', 'qy', 'qz']])\n", (7400, 7460), True, 'import numpy as np\n'), ((7517, 7563), 'evo.core.transformations.quaternion_matrix', 'transformations.quaternion_matrix', (['w_q_bidelta'], {}), '(w_q_bidelta)\n', (7550, 7563), False, 'from evo.core import transformations\n'), ((8211, 8239), 'numpy.linalg.norm', 'np.linalg.norm', (['bi_t_bidelta'], {}), '(bi_t_bidelta)\n', (8225, 8239), True, 'import numpy as np\n'), ((8314, 8342), 'numpy.linalg.norm', 'np.linalg.norm', (['bi_t_bidelta'], {}), '(bi_t_bidelta)\n', (8328, 8342), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import tensorflow as tf
from elasticdl.python.common.constants import DistributionStrategy
from elasticdl.python.common.model_handler import ModelHandler
from elasticdl.python.elasticdl.layers.embedding import Embedding
class CustomModel(tf.keras.models.Model):
def __init__(self):
super(CustomModel, self).__init__()
self.embedding = tf.keras.layers.Embedding(4, 2)
self.dense = tf.keras.layers.Dense(1)
def call(self, inputs):
embedding = self.embedding(inputs)
output = self.dense(embedding)
return output
def custom_model_with_embedding():
inputs = tf.keras.layers.Input(shape=(4,), name="x")
embedding = tf.keras.layers.Embedding(4, 2)(inputs)
outputs = tf.keras.layers.Dense(1)(embedding)
return tf.keras.models.Model(inputs, outputs)
def custom_sequential_model(feature_columns):
model = tf.keras.Sequential(
[
tf.keras.layers.DenseFeatures(feature_columns=feature_columns),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
return model
def feature_columns_fn():
age = tf.feature_column.numeric_column("age", dtype=tf.int64)
education = tf.feature_column.categorical_column_with_hash_bucket(
"education", hash_bucket_size=4
)
education_one_hot = tf.feature_column.indicator_column(education)
return [age, education_one_hot]
def _get_dataset():
y_labels = np.array([1, 1, 0, 0, 1])
x_data = {
"age": [14, 56, 78, 38, 80],
"education": [
"Bachelors",
"Master",
"Some-college",
"Bachelors",
"Master",
],
}
dataset = tf.data.Dataset.from_tensor_slices((dict(x_data), y_labels))
dataset = dataset.shuffle(len(x_data)).batch(4)
return dataset
def _mock_model_trained_params(model):
trained_params = {}
for var in model.trainable_variables:
trained_params[var.name] = np.ones(
var.shape.as_list(), dtype="float32"
)
return trained_params
class DefaultModelHandlerTest(unittest.TestCase):
def setUp(self):
self.model_handler = ModelHandler.get_model_handler()
def test_get_model_to_ps(self):
model_inst = custom_model_with_embedding()
model_inst = self.model_handler.get_model_to_train(model_inst)
self.assertEqual(type(model_inst.layers[1]), tf.keras.layers.Embedding)
def test_get_model_to_export(self):
dataset = _get_dataset()
feature_columns = feature_columns_fn()
model_inst = custom_sequential_model(feature_columns)
model_inst._build_model_with_inputs(inputs=dataset, targets=None)
model_inst = self.model_handler.get_model_to_export(
model_inst, dataset
)
self.assertEqual(list(model_inst.inputs.keys()), ["age", "education"])
self.assertEqual(len(model_inst.outputs), 1)
mock_params = _mock_model_trained_params(model_inst)
for var in model_inst.trainable_variables:
var.assign(mock_params[var.name])
test_data = {
"age": [14, 56, 78, 38, 80],
"education": [
"Bachelors",
"Master",
"Some-college",
"Bachelors",
"Master",
],
}
result = model_inst.call(test_data).numpy()
self.assertEqual(result.tolist(), np.ones((5, 1)).tolist())
class ParameterSeverModelHandlerTest(unittest.TestCase):
def setUp(self):
tf.keras.backend.clear_session()
self.model_handler = ModelHandler.get_model_handler(
distribution_strategy=DistributionStrategy.PARAMETER_SERVER,
checkpoint_dir="elasticdl/python/tests/testdata/functional_ckpt/",
)
def test_get_model_to_train(self):
model_inst = custom_model_with_embedding()
model_inst = self.model_handler.get_model_to_train(model_inst)
self.assertEqual(type(model_inst.layers[1]), Embedding)
def test_get_model_to_export(self):
model_inst = custom_model_with_embedding()
train_model = self.model_handler.get_model_to_train(model_inst)
export_model = self.model_handler.get_model_to_export(
train_model, dataset=None
)
test_data = tf.constant([0])
result = export_model.call(test_data).numpy()
self.assertEqual(result[0][0], 3.0)
def test_get_subclass_model_to_export(self):
self.model_handler._checkpoint_dir = (
"elasticdl/python/tests/testdata/subclass_ckpt/"
)
def _get_dataset():
dataset = tf.data.Dataset.from_tensor_slices(
np.random.randint(0, 10, (10, 4))
)
dataset = dataset.batch(2)
return dataset
model_inst = CustomModel()
dataset = _get_dataset()
train_model = self.model_handler.get_model_to_train(model_inst)
self.assertEqual(type(train_model.embedding), Embedding)
export_model = self.model_handler.get_model_to_export(
train_model, dataset=dataset
)
test_data = tf.constant([0])
result = export_model.call(test_data).numpy()
self.assertEqual(result[0][0], 3.0)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"tensorflow.keras.layers.DenseFeatures",
"tensorflow.feature_column.numeric_column",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.clear_session",
"numpy.ones",
"tensorflow.constant",
"tensorflow.keras.models.Model",
"tensorflow.feature_column.categorical_column_with_ha... | [((655, 698), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(4,)', 'name': '"""x"""'}), "(shape=(4,), name='x')\n", (676, 698), True, 'import tensorflow as tf\n'), ((816, 854), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (837, 854), True, 'import tensorflow as tf\n'), ((1211, 1266), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""age"""'], {'dtype': 'tf.int64'}), "('age', dtype=tf.int64)\n", (1243, 1266), True, 'import tensorflow as tf\n'), ((1283, 1373), 'tensorflow.feature_column.categorical_column_with_hash_bucket', 'tf.feature_column.categorical_column_with_hash_bucket', (['"""education"""'], {'hash_bucket_size': '(4)'}), "('education',\n hash_bucket_size=4)\n", (1336, 1373), True, 'import tensorflow as tf\n'), ((1408, 1453), 'tensorflow.feature_column.indicator_column', 'tf.feature_column.indicator_column', (['education'], {}), '(education)\n', (1442, 1453), True, 'import tensorflow as tf\n'), ((1527, 1552), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 1]'], {}), '([1, 1, 0, 0, 1])\n', (1535, 1552), True, 'import numpy as np\n'), ((5409, 5424), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5422, 5424), False, 'import unittest\n'), ((394, 425), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(4)', '(2)'], {}), '(4, 2)\n', (419, 425), True, 'import tensorflow as tf\n'), ((447, 471), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (468, 471), True, 'import tensorflow as tf\n'), ((715, 746), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(4)', '(2)'], {}), '(4, 2)\n', (740, 746), True, 'import tensorflow as tf\n'), ((769, 793), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (790, 793), True, 'import tensorflow as tf\n'), ((2251, 2283), 'elasticdl.python.common.model_handler.ModelHandler.get_model_handler', 'ModelHandler.get_model_handler', ([], {}), '()\n', (2281, 2283), False, 'from elasticdl.python.common.model_handler import ModelHandler\n'), ((3640, 3672), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (3670, 3672), True, 'import tensorflow as tf\n'), ((3702, 3870), 'elasticdl.python.common.model_handler.ModelHandler.get_model_handler', 'ModelHandler.get_model_handler', ([], {'distribution_strategy': 'DistributionStrategy.PARAMETER_SERVER', 'checkpoint_dir': '"""elasticdl/python/tests/testdata/functional_ckpt/"""'}), "(distribution_strategy=DistributionStrategy.\n PARAMETER_SERVER, checkpoint_dir=\n 'elasticdl/python/tests/testdata/functional_ckpt/')\n", (3732, 3870), False, 'from elasticdl.python.common.model_handler import ModelHandler\n'), ((4418, 4434), 'tensorflow.constant', 'tf.constant', (['[0]'], {}), '([0])\n', (4429, 4434), True, 'import tensorflow as tf\n'), ((5261, 5277), 'tensorflow.constant', 'tf.constant', (['[0]'], {}), '([0])\n', (5272, 5277), True, 'import tensorflow as tf\n'), ((958, 1020), 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', ([], {'feature_columns': 'feature_columns'}), '(feature_columns=feature_columns)\n', (987, 1020), True, 'import tensorflow as tf\n'), ((1034, 1078), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (1055, 1078), True, 'import tensorflow as tf\n'), ((1092, 1138), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1113, 1138), True, 'import tensorflow as tf\n'), ((4804, 4837), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10, 4)'], {}), '(0, 10, (10, 4))\n', (4821, 4837), True, 'import numpy as np\n'), ((3526, 3541), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (3533, 3541), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 2:18 下午
# @Author : zhengjiawei
# @FileName: nlu_slot_test.py
# @Software: PyCharm
import os
import json
import random
import numpy as np
from xbot.util.path import get_root_path
from xbot.nlu.slot.slot_with_bert import SlotWithBert
from data.crosswoz.data_process.nlu_slot_dataloader import Dataloader
from data.crosswoz.data_process.nlu_slot_postprocess import (
is_slot_da,
calculate_f1,
recover_intent,
)
import torch
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if __name__ == "__main__":
data_urls = {
"slot_train_data.json": "http://qiw2jpwfc.hn-bkt.clouddn.com/slot_train_data.json",
"slot_val_data.json": "http://qiw2jpwfc.hn-bkt.clouddn.com/slot_val_data.json",
"slot_test_data.json": "http://qiw2jpwfc.hn-bkt.clouddn.com/slot_test_data.json",
}
# load config
root_path = get_root_path()
config_path = os.path.join(
root_path, "xbot/config/crosswoz_all_context_nlu_slot.json"
)
config = json.load(open(config_path))
data_path = config["data_dir"]
data_path = os.path.join(root_path, data_path)
output_dir = config["output_dir"]
output_dir = os.path.join(root_path, output_dir)
log_dir = config["log_dir"]
output_dir = os.path.join(root_path, output_dir)
device = config["DEVICE"]
set_seed(config["seed"])
intent_vocab = json.load(
open(os.path.join(data_path, "intent_vocab.json"), encoding="utf-8")
)
tag_vocab = json.load(
open(os.path.join(data_path, "tag_vocab.json"), encoding="utf-8")
)
dataloader = Dataloader(
intent_vocab=intent_vocab,
tag_vocab=tag_vocab,
pretrained_weights=config["model"]["pretrained_weights"],
)
print("intent num:", len(intent_vocab))
print("tag num:", len(tag_vocab))
for data_key in ["val", "tests"]:
dataloader.load_data(
json.load(
open(
os.path.join(data_path, "slot_{}_data.json".format(data_key)),
encoding="utf-8",
)
),
data_key,
cut_sen_len=0,
use_bert_tokenizer=config["use_bert_tokenizer"],
)
print("{} set size: {}".format(data_key, len(dataloader.data[data_key])))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
model = SlotWithBert(config["model"], device, dataloader.tag_dim)
model.load_state_dict(
torch.load(os.path.join(output_dir, "pytorch_model_nlu_slot.pt"), device)
)
model.to(device)
model.eval()
batch_size = config["model"]["batch_size"]
data_key = "tests"
predict_golden = {"slot": []}
slot_loss = 0
for pad_batch, ori_batch, real_batch_size in dataloader.yield_batches(
batch_size, data_key=data_key
):
pad_batch = tuple(t.to(device) for t in pad_batch)
(
word_seq_tensor,
tag_seq_tensor,
word_mask_tensor,
tag_mask_tensor,
context_seq_tensor,
context_mask_tensor,
) = pad_batch
if not config["model"]["context"]:
context_seq_tensor, context_mask_tensor = None, None
with torch.no_grad():
slot_logits, batch_slot_loss = model.forward(
word_seq_tensor,
word_mask_tensor,
tag_seq_tensor,
tag_mask_tensor,
context_seq_tensor,
context_mask_tensor,
)
slot_loss += batch_slot_loss.item() * real_batch_size
for j in range(real_batch_size):
predicts = recover_intent(
dataloader,
slot_logits[j],
tag_mask_tensor[j],
ori_batch[j][0],
ori_batch[j][1],
)
labels = ori_batch[j][2]
predict_golden["slot"].append(
{
"predict": [x for x in predicts if is_slot_da(x)],
"golden": [x for x in labels if is_slot_da(x)],
}
)
total = len(dataloader.data[data_key])
slot_loss /= total
precision, recall, F1 = calculate_f1(predict_golden["slot"])
print("-" * 20 + "slot" + "-" * 20)
print("\t Precision: %.2f" % (100 * precision))
print("\t Recall: %.2f" % (100 * recall))
print("\t F1: %.2f" % (100 * F1))
| [
"data.crosswoz.data_process.nlu_slot_dataloader.Dataloader",
"data.crosswoz.data_process.nlu_slot_postprocess.recover_intent",
"numpy.random.seed",
"xbot.util.path.get_root_path",
"os.makedirs",
"data.crosswoz.data_process.nlu_slot_postprocess.calculate_f1",
"torch.manual_seed",
"os.path.exists",
"x... | [((513, 530), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (524, 530), False, 'import random\n'), ((535, 555), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (549, 555), True, 'import numpy as np\n'), ((560, 583), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (577, 583), False, 'import torch\n'), ((943, 958), 'xbot.util.path.get_root_path', 'get_root_path', ([], {}), '()\n', (956, 958), False, 'from xbot.util.path import get_root_path\n'), ((977, 1050), 'os.path.join', 'os.path.join', (['root_path', '"""xbot/config/crosswoz_all_context_nlu_slot.json"""'], {}), "(root_path, 'xbot/config/crosswoz_all_context_nlu_slot.json')\n", (989, 1050), False, 'import os\n'), ((1158, 1192), 'os.path.join', 'os.path.join', (['root_path', 'data_path'], {}), '(root_path, data_path)\n', (1170, 1192), False, 'import os\n'), ((1248, 1283), 'os.path.join', 'os.path.join', (['root_path', 'output_dir'], {}), '(root_path, output_dir)\n', (1260, 1283), False, 'import os\n'), ((1333, 1368), 'os.path.join', 'os.path.join', (['root_path', 'output_dir'], {}), '(root_path, output_dir)\n', (1345, 1368), False, 'import os\n'), ((1667, 1787), 'data.crosswoz.data_process.nlu_slot_dataloader.Dataloader', 'Dataloader', ([], {'intent_vocab': 'intent_vocab', 'tag_vocab': 'tag_vocab', 'pretrained_weights': "config['model']['pretrained_weights']"}), "(intent_vocab=intent_vocab, tag_vocab=tag_vocab,\n pretrained_weights=config['model']['pretrained_weights'])\n", (1677, 1787), False, 'from data.crosswoz.data_process.nlu_slot_dataloader import Dataloader\n'), ((2516, 2573), 'xbot.nlu.slot.slot_with_bert.SlotWithBert', 'SlotWithBert', (["config['model']", 'device', 'dataloader.tag_dim'], {}), "(config['model'], device, dataloader.tag_dim)\n", (2528, 2573), False, 'from xbot.nlu.slot.slot_with_bert import SlotWithBert\n'), ((4343, 4379), 'data.crosswoz.data_process.nlu_slot_postprocess.calculate_f1', 'calculate_f1', (["predict_golden['slot']"], {}), "(predict_golden['slot'])\n", (4355, 4379), False, 'from data.crosswoz.data_process.nlu_slot_postprocess import is_slot_da, calculate_f1, recover_intent\n'), ((2378, 2404), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (2392, 2404), False, 'import os\n'), ((2414, 2437), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2425, 2437), False, 'import os\n'), ((2449, 2472), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (2463, 2472), False, 'import os\n'), ((2482, 2502), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2493, 2502), False, 'import os\n'), ((1473, 1517), 'os.path.join', 'os.path.join', (['data_path', '"""intent_vocab.json"""'], {}), "(data_path, 'intent_vocab.json')\n", (1485, 1517), False, 'import os\n'), ((1583, 1624), 'os.path.join', 'os.path.join', (['data_path', '"""tag_vocab.json"""'], {}), "(data_path, 'tag_vocab.json')\n", (1595, 1624), False, 'import os\n'), ((2620, 2673), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model_nlu_slot.pt"""'], {}), "(output_dir, 'pytorch_model_nlu_slot.pt')\n", (2632, 2673), False, 'import os\n'), ((3365, 3380), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3378, 3380), False, 'import torch\n'), ((3786, 3887), 'data.crosswoz.data_process.nlu_slot_postprocess.recover_intent', 'recover_intent', (['dataloader', 'slot_logits[j]', 'tag_mask_tensor[j]', 'ori_batch[j][0]', 'ori_batch[j][1]'], {}), '(dataloader, slot_logits[j], tag_mask_tensor[j], ori_batch[j]\n [0], ori_batch[j][1])\n', (3800, 3887), False, 'from data.crosswoz.data_process.nlu_slot_postprocess import is_slot_da, calculate_f1, recover_intent\n'), ((4132, 4145), 'data.crosswoz.data_process.nlu_slot_postprocess.is_slot_da', 'is_slot_da', (['x'], {}), '(x)\n', (4142, 4145), False, 'from data.crosswoz.data_process.nlu_slot_postprocess import is_slot_da, calculate_f1, recover_intent\n'), ((4200, 4213), 'data.crosswoz.data_process.nlu_slot_postprocess.is_slot_da', 'is_slot_da', (['x'], {}), '(x)\n', (4210, 4213), False, 'from data.crosswoz.data_process.nlu_slot_postprocess import is_slot_da, calculate_f1, recover_intent\n')] |
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import face_recognition
import keras
from keras.models import load_model
import cv2
import time
emotion_dict= {'Angry': 0, 'Sad': 5, 'Neutral': 4, 'Disgust': 1, 'Surprise': 6, 'Fear': 2, 'Happy': 3}
emoji_array = {0: 'angerEmoji.png', 5: 'sadEmoji.png', 4: 'neutralEmoji.png', 1: 'disgustEmoji.png', 6: 'surpriseEmoji.png', 2: 'fearEmoji.png', 3: 'happyEmoji.png'}
model = load_model("model_v6_23.hdf5")
def show_webcam(mirror=False):
cam = cv2.VideoCapture(0)
while True:
ret_val, oimg = cam.read()
img=oimg
face_locations = face_recognition.face_locations(img)
print(len(face_locations))
for i in range(len(face_locations)):
top, right, bottom, left = face_locations[i]
if mirror:
img = cv2.flip(img, 1)
width =right - left
height = bottom-top
dsize = (width, height)
center=(int((left+right)/2),int((top+bottom)/2))
face_image = img[top:bottom, left:right]
face_image = cv2.resize(face_image, (48,48))
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = np.reshape(face_image, [1, face_image.shape[0], face_image.shape[1], 1])
predicted_class = np.argmax(model.predict(face_image))
label_map = dict((v,k) for k,v in emotion_dict.items())
predicted_label = label_map[predicted_class]
print(predicted_label)
#cv2.rectangle(oimg,(left,top),(right,bottom),2,10)
cv2.circle(oimg,center,int(width/2),(255, 128, 0))
# emoji overlay without alpha
emogi=cv2.imread("images/"+emoji_array[predicted_class])
output = cv2.resize(emogi, dsize)
x_offset=left
y_offset=top
oimg[y_offset:y_offset+output.shape[0], x_offset:x_offset+output.shape[1]] = output
output = cv2.resize(emogi, dsize)
#except:
# print("no face detected")
cv2.imshow('my webcam', oimg)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
| [
"keras.models.load_model",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.imread",
"numpy.reshape",
"cv2.flip",
"face_recognition.face_locations",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((452, 482), 'keras.models.load_model', 'load_model', (['"""model_v6_23.hdf5"""'], {}), "('model_v6_23.hdf5')\n", (462, 482), False, 'from keras.models import load_model\n'), ((525, 544), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (541, 544), False, 'import cv2\n'), ((2235, 2258), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2256, 2258), False, 'import cv2\n'), ((638, 674), 'face_recognition.face_locations', 'face_recognition.face_locations', (['img'], {}), '(img)\n', (669, 674), False, 'import face_recognition\n'), ((2134, 2163), 'cv2.imshow', 'cv2.imshow', (['"""my webcam"""', 'oimg'], {}), "('my webcam', oimg)\n", (2144, 2163), False, 'import cv2\n'), ((1137, 1169), 'cv2.resize', 'cv2.resize', (['face_image', '(48, 48)'], {}), '(face_image, (48, 48))\n', (1147, 1169), False, 'import cv2\n'), ((1194, 1238), 'cv2.cvtColor', 'cv2.cvtColor', (['face_image', 'cv2.COLOR_BGR2GRAY'], {}), '(face_image, cv2.COLOR_BGR2GRAY)\n', (1206, 1238), False, 'import cv2\n'), ((1264, 1336), 'numpy.reshape', 'np.reshape', (['face_image', '[1, face_image.shape[0], face_image.shape[1], 1]'], {}), '(face_image, [1, face_image.shape[0], face_image.shape[1], 1])\n', (1274, 1336), True, 'import numpy as np\n'), ((1752, 1804), 'cv2.imread', 'cv2.imread', (["('images/' + emoji_array[predicted_class])"], {}), "('images/' + emoji_array[predicted_class])\n", (1762, 1804), False, 'import cv2\n'), ((1824, 1848), 'cv2.resize', 'cv2.resize', (['emogi', 'dsize'], {}), '(emogi, dsize)\n', (1834, 1848), False, 'import cv2\n'), ((2029, 2053), 'cv2.resize', 'cv2.resize', (['emogi', 'dsize'], {}), '(emogi, dsize)\n', (2039, 2053), False, 'import cv2\n'), ((2175, 2189), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2186, 2189), False, 'import cv2\n'), ((867, 883), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (875, 883), False, 'import cv2\n')] |
#!/usr/bin/env python2
import unittest
import numpy as np
import libpandasafety_py
MAX_RATE_UP = 3
MAX_RATE_DOWN = 7
MAX_STEER = 255
MAX_RT_DELTA = 112
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 50;
DRIVER_TORQUE_FACTOR = 2;
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
class TestHyundaiSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.safety_set_mode(7, 0)
cls.safety.init_tests_hyundai()
def _send_msg(self, bus, addr, length):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = addr << 21
to_send[0].RDTR = length
to_send[0].RDTR = bus << 4
return to_send
def _button_msg(self, buttons):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 1265 << 21
to_send[0].RDLR = buttons
return to_send
def _set_prev_torque(self, t):
self.safety.set_hyundai_desired_torque_last(t)
self.safety.set_hyundai_rt_torque_last(t)
def _torque_driver_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 897 << 21
to_send[0].RDLR = (torque + 2048) << 11
return to_send
def _torque_msg(self, torque):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 832 << 21
to_send[0].RDLR = (torque + 1024) << 16
return to_send
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-0x200, 0x200):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(t)))
else:
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_enable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 1057 << 21
to_push[0].RDLR = 1 << 13
self.safety.safety_rx_hook(to_push)
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
to_push = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_push[0].RIR = 1057 << 21
to_push[0].RDLR = 0
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(to_push)
self.assertFalse(self.safety.get_controls_allowed())
def test_non_realtime_limit_up(self):
self.safety.set_hyundai_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP + 1)))
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP - 1)))
def test_non_realtime_limit_down(self):
self.safety.set_hyundai_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
def test_against_torque_driver(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1):
t *= -sign
self.safety.set_hyundai_torque_driver(t, t)
self._set_prev_torque(MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_STEER * sign)))
self.safety.set_hyundai_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_STEER)))
# spot check some individual cases
for sign in [-1, 1]:
driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign
torque_desired = (MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign
delta = 1 * sign
self._set_prev_torque(torque_desired)
self.safety.set_hyundai_torque_driver(-driver_torque, -driver_torque)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(torque_desired)))
self._set_prev_torque(torque_desired + delta)
self.safety.set_hyundai_torque_driver(-driver_torque, -driver_torque)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(torque_desired + delta)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_hyundai_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN) * sign)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_hyundai_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(0)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_hyundai_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN + 1) * sign)))
def test_realtime_limits(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_hyundai()
self._set_prev_torque(0)
self.safety.set_hyundai_torque_driver(0, 0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA - 1))))
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
#def test_spam_cancel_safety_check(self):
# RESUME_BTN = 1
# SET_BTN = 2
# CANCEL_BTN = 4
# BUTTON_MSG = 1265
# self.safety.set_controls_allowed(0)
# self.assertTrue(self.safety.safety_tx_hook(self._button_msg(CANCEL_BTN)))
# self.assertFalse(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN)))
# self.assertFalse(self.safety.safety_tx_hook(self._button_msg(SET_BTN)))
# # do not block resume if we are engaged already
# self.safety.set_controls_allowed(1)
# self.assertTrue(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN)))
def test_fwd_hook(self):
buss = range(0x0, 0x3)
msgs = range(0x1, 0x800)
hyundai_giraffe_switch_2 = [0, 1]
self.safety.set_hyundai_camera_bus(2)
for hgs in hyundai_giraffe_switch_2:
self.safety.set_hyundai_giraffe_switch_2(hgs)
blocked_msgs = [832]
for b in buss:
for m in msgs:
if hgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
else:
fwd_bus = -1
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, self._send_msg(b, m, 8)))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"libpandasafety_py.ffi.new",
"numpy.arange"
] | [((7725, 7740), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7738, 7740), False, 'import unittest\n'), ((643, 697), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (668, 697), False, 'import libpandasafety_py\n'), ((858, 912), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (883, 912), False, 'import libpandasafety_py\n'), ((1180, 1234), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (1205, 1234), False, 'import libpandasafety_py\n'), ((1377, 1431), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (1402, 1431), False, 'import libpandasafety_py\n'), ((2357, 2411), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (2382, 2411), False, 'import libpandasafety_py\n'), ((2640, 2694), 'libpandasafety_py.ffi.new', 'libpandasafety_py.ffi.new', (['"""CAN_FIFOMailBox_TypeDef *"""'], {}), "('CAN_FIFOMailBox_TypeDef *')\n", (2665, 2694), False, 'import libpandasafety_py\n'), ((3769, 3813), 'numpy.arange', 'np.arange', (['(0)', '(DRIVER_TORQUE_ALLOWANCE + 1)', '(1)'], {}), '(0, DRIVER_TORQUE_ALLOWANCE + 1, 1)\n', (3778, 3813), True, 'import numpy as np\n'), ((5758, 5787), 'numpy.arange', 'np.arange', (['(0)', 'MAX_RT_DELTA', '(1)'], {}), '(0, MAX_RT_DELTA, 1)\n', (5767, 5787), True, 'import numpy as np\n'), ((6023, 6052), 'numpy.arange', 'np.arange', (['(0)', 'MAX_RT_DELTA', '(1)'], {}), '(0, MAX_RT_DELTA, 1)\n', (6032, 6052), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.