code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Provides unit tests for preprocessing helper routines.
"""
# License: MIT
from __future__ import absolute_import, division
import numpy as np
import pandas as pd
from sklearn.utils import check_random_state
import reanalysis_dbns.utils as rdu
def test_construction_of_lagged_dataframe():
"""Test construction of lagged data when input is a pandas DataFrame."""
index = np.arange(10)
df = pd.DataFrame(
{'x': np.arange(10), 'y': np.arange(10, 20)}, index=index)
offsets = [('x', 0), ('y', -1)]
lagged_df = rdu.construct_lagged_data(offsets, df)
expected_index = np.arange(1, 10)
expected_df = pd.DataFrame(
{'x': np.arange(1, 10), 'y_lag_1': np.arange(10, 19, dtype='f8')},
index=expected_index)
assert lagged_df.equals(expected_df)
index = pd.date_range('2000-01-01', freq='1D', periods=10)
df = pd.DataFrame({'x': np.arange(10, dtype='f8'),
'y': np.arange(10, 20, dtype='f8')}, index=index)
offsets = [('x', -1), ('y', -2)]
lagged_df = rdu.construct_lagged_data(offsets, df)
expected_index = index[2:]
expected_df = pd.DataFrame(
{'x_lag_1': np.arange(1, 9, dtype='f8'),
'y_lag_2': np.arange(10, 18, dtype='f8')},
index=expected_index)
assert lagged_df.equals(expected_df)
index = pd.date_range('2000-01-01', freq='1D', periods=5)
df = pd.DataFrame(
{'x': pd.Categorical(['a', 'b', 'a', 'b', 'c']),
'y': np.arange(2, 7, dtype='f8')}, index=index)
offsets = [('x', -1), ('y', 1)]
lagged_df = rdu.construct_lagged_data(offsets, df)
expected_index = index[1:]
expected_df = pd.DataFrame(
{'x_lag_1': pd.Categorical(['a', 'b', 'a', 'b'],
categories=['a', 'b', 'c']),
'y_lead_1': np.array([4.0, 5.0, 6.0, np.NaN])},
index=expected_index)
assert lagged_df.equals(expected_df)
def test_remove_polynomial_trend():
"""Test removal of polynomial trends from data."""
random_seed = 0
random_state = check_random_state(random_seed)
n_samples = 100
t = pd.date_range('2010-01-01', periods=n_samples, freq='1D')
x = -2.3 + 0.02 * np.arange(n_samples)
data = pd.DataFrame({'x': x}, index=t)
detrended_data = rdu.remove_polynomial_trend(data, trend_order=1)
expected_df = pd.DataFrame({'x': np.zeros(n_samples)}, index=t)
assert detrended_data.equals(expected_df)
x += random_state.normal(size=(n_samples,))
data = pd.DataFrame({'x': x}, index=t)
detrended_data = rdu.remove_polynomial_trend(data, trend_order=1)
assert np.abs(np.mean(detrended_data['x'])) < 1e-12
x = -2.3 + 0.02 * np.arange(n_samples) - 0.001 * np.arange(n_samples)**2
data = pd.DataFrame({'x': x}, index=t)
detrended_data = rdu.remove_polynomial_trend(data, trend_order=2)
expected_df = pd.DataFrame({'x': np.zeros(n_samples)}, index=t)
assert np.allclose(detrended_data.to_numpy(), expected_df.to_numpy())
x += random_state.normal(size=(n_samples,))
data = pd.DataFrame({'x': x}, index=t)
detrended_data = rdu.remove_polynomial_trend(data, trend_order=1)
assert np.abs(np.mean(detrended_data['x'])) < 1e-12
def test_standardize_time_series():
"""Test standardization of time series data."""
random_seed = 0
random_state = check_random_state(random_seed)
n_samples = 100
t = pd.date_range('2010-01-01', periods=n_samples, freq='1D')
x = random_state.normal(loc=2.0, scale=3.2, size=n_samples)
data = pd.DataFrame({'x': x}, index=t)
standardized_data = rdu.standardize_time_series(data)
assert np.abs(np.mean(standardized_data['x'])) < 1e-12
assert np.abs(np.std(standardized_data['x'], ddof=1) - 1) < 1e-12
t = pd.DatetimeIndex(
[pd.Timestamp('2009-12-01'), pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-02-01'), pd.Timestamp('2010-12-01'),
pd.Timestamp('2011-01-01'), pd.Timestamp('2011-02-01'),
pd.Timestamp('2011-12-01'), pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-02-01'), pd.Timestamp('2012-12-01'),
pd.Timestamp('2013-01-01'), pd.Timestamp('2013-02-01')],
)
x = np.array(
[random_state.normal(loc=1.0, scale=0.2),
random_state.normal(loc=2.0, scale=3.0),
random_state.normal(loc=-3.2, scale=0.1),
random_state.normal(loc=1.0, scale=0.2),
random_state.normal(loc=2.0, scale=3.0),
random_state.normal(loc=-3.2, scale=0.1),
random_state.normal(loc=1.0, scale=0.2),
random_state.normal(loc=2.0, scale=3.0),
random_state.normal(loc=-3.2, scale=0.1),
random_state.normal(loc=1.0, scale=0.2),
random_state.normal(loc=2.0, scale=3.0),
random_state.normal(loc=-3.2, scale=0.1)])
data = pd.DataFrame({'x': x}, index=t)
standardized_data = rdu.standardize_time_series(
data, standardize_by='month')
assert np.all(
np.abs(standardized_data['x'].groupby(
standardized_data.index.month).mean()) < 1e-12)
assert np.all(
np.abs(standardized_data['x'].groupby(
standardized_data.index.month).std(ddof=1) - 1) < 1e-12)
n_samples = 1000
t = pd.date_range('2010-01-01', periods=n_samples, freq='1D')
x = random_state.normal(loc=3.2, scale=4.0, size=n_samples)
data = pd.DataFrame({'x': x}, index=t)
standardized_data = rdu.standardize_time_series(
data, standardize_by='dayofyear')
assert np.all(
np.abs(standardized_data['x'].groupby(
standardized_data.index.dayofyear).mean()) < 1e-12)
assert np.all(
np.abs(standardized_data['x'].groupby(
standardized_data.index.dayofyear).std(ddof=1) - 1) < 1e-12)
| [
"pandas.DataFrame",
"sklearn.utils.check_random_state",
"pandas.date_range",
"pandas.Timestamp",
"numpy.std",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"reanalysis_dbns.utils.standardize_time_series",
"pandas.Categorical",
"reanalysis_dbns.utils.remove_polynomial_trend",
"reanalysis_dbns.ut... | [((389, 402), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (398, 402), True, 'import numpy as np\n'), ((547, 585), 'reanalysis_dbns.utils.construct_lagged_data', 'rdu.construct_lagged_data', (['offsets', 'df'], {}), '(offsets, df)\n', (572, 585), True, 'import reanalysis_dbns.utils as rdu\n'), ((608, 624), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (617, 624), True, 'import numpy as np\n'), ((817, 867), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'freq': '"""1D"""', 'periods': '(10)'}), "('2000-01-01', freq='1D', periods=10)\n", (830, 867), True, 'import pandas as pd\n'), ((1051, 1089), 'reanalysis_dbns.utils.construct_lagged_data', 'rdu.construct_lagged_data', (['offsets', 'df'], {}), '(offsets, df)\n', (1076, 1089), True, 'import reanalysis_dbns.utils as rdu\n'), ((1340, 1389), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'freq': '"""1D"""', 'periods': '(5)'}), "('2000-01-01', freq='1D', periods=5)\n", (1353, 1389), True, 'import pandas as pd\n'), ((1581, 1619), 'reanalysis_dbns.utils.construct_lagged_data', 'rdu.construct_lagged_data', (['offsets', 'df'], {}), '(offsets, df)\n', (1606, 1619), True, 'import reanalysis_dbns.utils as rdu\n'), ((2067, 2098), 'sklearn.utils.check_random_state', 'check_random_state', (['random_seed'], {}), '(random_seed)\n', (2085, 2098), False, 'from sklearn.utils import check_random_state\n'), ((2128, 2185), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': 'n_samples', 'freq': '"""1D"""'}), "('2010-01-01', periods=n_samples, freq='1D')\n", (2141, 2185), True, 'import pandas as pd\n'), ((2240, 2271), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (2252, 2271), True, 'import pandas as pd\n'), ((2293, 2341), 'reanalysis_dbns.utils.remove_polynomial_trend', 'rdu.remove_polynomial_trend', (['data'], {'trend_order': '(1)'}), '(data, trend_order=1)\n', (2320, 2341), True, 'import reanalysis_dbns.utils as rdu\n'), ((2517, 2548), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (2529, 2548), True, 'import pandas as pd\n'), ((2570, 2618), 'reanalysis_dbns.utils.remove_polynomial_trend', 'rdu.remove_polynomial_trend', (['data'], {'trend_order': '(1)'}), '(data, trend_order=1)\n', (2597, 2618), True, 'import reanalysis_dbns.utils as rdu\n'), ((2765, 2796), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (2777, 2796), True, 'import pandas as pd\n'), ((2818, 2866), 'reanalysis_dbns.utils.remove_polynomial_trend', 'rdu.remove_polynomial_trend', (['data'], {'trend_order': '(2)'}), '(data, trend_order=2)\n', (2845, 2866), True, 'import reanalysis_dbns.utils as rdu\n'), ((3070, 3101), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (3082, 3101), True, 'import pandas as pd\n'), ((3123, 3171), 'reanalysis_dbns.utils.remove_polynomial_trend', 'rdu.remove_polynomial_trend', (['data'], {'trend_order': '(1)'}), '(data, trend_order=1)\n', (3150, 3171), True, 'import reanalysis_dbns.utils as rdu\n'), ((3359, 3390), 'sklearn.utils.check_random_state', 'check_random_state', (['random_seed'], {}), '(random_seed)\n', (3377, 3390), False, 'from sklearn.utils import check_random_state\n'), ((3420, 3477), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': 'n_samples', 'freq': '"""1D"""'}), "('2010-01-01', periods=n_samples, freq='1D')\n", (3433, 3477), True, 'import pandas as pd\n'), ((3553, 3584), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (3565, 3584), True, 'import pandas as pd\n'), ((3609, 3642), 'reanalysis_dbns.utils.standardize_time_series', 'rdu.standardize_time_series', (['data'], {}), '(data)\n', (3636, 3642), True, 'import reanalysis_dbns.utils as rdu\n'), ((4836, 4867), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (4848, 4867), True, 'import pandas as pd\n'), ((4892, 4949), 'reanalysis_dbns.utils.standardize_time_series', 'rdu.standardize_time_series', (['data'], {'standardize_by': '"""month"""'}), "(data, standardize_by='month')\n", (4919, 4949), True, 'import reanalysis_dbns.utils as rdu\n'), ((5251, 5308), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': 'n_samples', 'freq': '"""1D"""'}), "('2010-01-01', periods=n_samples, freq='1D')\n", (5264, 5308), True, 'import pandas as pd\n'), ((5384, 5415), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {'index': 't'}), "({'x': x}, index=t)\n", (5396, 5415), True, 'import pandas as pd\n'), ((5440, 5501), 'reanalysis_dbns.utils.standardize_time_series', 'rdu.standardize_time_series', (['data'], {'standardize_by': '"""dayofyear"""'}), "(data, standardize_by='dayofyear')\n", (5467, 5501), True, 'import reanalysis_dbns.utils as rdu\n'), ((440, 453), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (449, 453), True, 'import numpy as np\n'), ((460, 477), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (469, 477), True, 'import numpy as np\n'), ((671, 687), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (680, 687), True, 'import numpy as np\n'), ((700, 729), 'numpy.arange', 'np.arange', (['(10)', '(19)'], {'dtype': '"""f8"""'}), "(10, 19, dtype='f8')\n", (709, 729), True, 'import numpy as np\n'), ((896, 921), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': '"""f8"""'}), "(10, dtype='f8')\n", (905, 921), True, 'import numpy as np\n'), ((951, 980), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {'dtype': '"""f8"""'}), "(10, 20, dtype='f8')\n", (960, 980), True, 'import numpy as np\n'), ((1174, 1201), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {'dtype': '"""f8"""'}), "(1, 9, dtype='f8')\n", (1183, 1201), True, 'import numpy as np\n'), ((1223, 1252), 'numpy.arange', 'np.arange', (['(10)', '(18)'], {'dtype': '"""f8"""'}), "(10, 18, dtype='f8')\n", (1232, 1252), True, 'import numpy as np\n'), ((1427, 1468), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b', 'c']"], {}), "(['a', 'b', 'a', 'b', 'c'])\n", (1441, 1468), True, 'import pandas as pd\n'), ((1484, 1511), 'numpy.arange', 'np.arange', (['(2)', '(7)'], {'dtype': '"""f8"""'}), "(2, 7, dtype='f8')\n", (1493, 1511), True, 'import numpy as np\n'), ((1704, 1768), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'a', 'b'], categories=['a', 'b', 'c'])\n", (1718, 1768), True, 'import pandas as pd\n'), ((1826, 1859), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0, np.NaN]'], {}), '([4.0, 5.0, 6.0, np.NaN])\n', (1834, 1859), True, 'import numpy as np\n'), ((2208, 2228), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2217, 2228), True, 'import numpy as np\n'), ((2379, 2398), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (2387, 2398), True, 'import numpy as np\n'), ((2638, 2666), 'numpy.mean', 'np.mean', (["detrended_data['x']"], {}), "(detrended_data['x'])\n", (2645, 2666), True, 'import numpy as np\n'), ((2904, 2923), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (2912, 2923), True, 'import numpy as np\n'), ((3191, 3219), 'numpy.mean', 'np.mean', (["detrended_data['x']"], {}), "(detrended_data['x'])\n", (3198, 3219), True, 'import numpy as np\n'), ((3662, 3693), 'numpy.mean', 'np.mean', (["standardized_data['x']"], {}), "(standardized_data['x'])\n", (3669, 3693), True, 'import numpy as np\n'), ((3809, 3835), 'pandas.Timestamp', 'pd.Timestamp', (['"""2009-12-01"""'], {}), "('2009-12-01')\n", (3821, 3835), True, 'import pandas as pd\n'), ((3837, 3863), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (3849, 3863), True, 'import pandas as pd\n'), ((3874, 3900), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-02-01"""'], {}), "('2010-02-01')\n", (3886, 3900), True, 'import pandas as pd\n'), ((3902, 3928), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-12-01"""'], {}), "('2010-12-01')\n", (3914, 3928), True, 'import pandas as pd\n'), ((3939, 3965), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (3951, 3965), True, 'import pandas as pd\n'), ((3967, 3993), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-02-01"""'], {}), "('2011-02-01')\n", (3979, 3993), True, 'import pandas as pd\n'), ((4004, 4030), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-12-01"""'], {}), "('2011-12-01')\n", (4016, 4030), True, 'import pandas as pd\n'), ((4032, 4058), 'pandas.Timestamp', 'pd.Timestamp', (['"""2012-01-01"""'], {}), "('2012-01-01')\n", (4044, 4058), True, 'import pandas as pd\n'), ((4069, 4095), 'pandas.Timestamp', 'pd.Timestamp', (['"""2012-02-01"""'], {}), "('2012-02-01')\n", (4081, 4095), True, 'import pandas as pd\n'), ((4097, 4123), 'pandas.Timestamp', 'pd.Timestamp', (['"""2012-12-01"""'], {}), "('2012-12-01')\n", (4109, 4123), True, 'import pandas as pd\n'), ((4134, 4160), 'pandas.Timestamp', 'pd.Timestamp', (['"""2013-01-01"""'], {}), "('2013-01-01')\n", (4146, 4160), True, 'import pandas as pd\n'), ((4162, 4188), 'pandas.Timestamp', 'pd.Timestamp', (['"""2013-02-01"""'], {}), "('2013-02-01')\n", (4174, 4188), True, 'import pandas as pd\n'), ((2699, 2719), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2708, 2719), True, 'import numpy as np\n'), ((2730, 2750), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2739, 2750), True, 'import numpy as np\n'), ((3721, 3759), 'numpy.std', 'np.std', (["standardized_data['x']"], {'ddof': '(1)'}), "(standardized_data['x'], ddof=1)\n", (3727, 3759), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import numpy
from pyqtgraph.Qt import QtGui
import pyqtgraph
def plot_data(data, scroll_axis=2):
""" Plot an image associated data.
Currently support on 1D, 2D or 3D data.
Parameters
----------
data: array
the data to be displayed.
scroll_axis: int (optional, default 2)
the scroll axis for 3d data.
"""
# Check input parameters
if data.ndim not in range(1, 4):
raise ValueError("Unsupported data dimension.")
# Deal with complex data
if numpy.iscomplex(data).any():
data = numpy.abs(data)
# Create application
app = pyqtgraph.mkQApp()
# Create the widget
if data.ndim == 3:
indices = [i for i in range(3) if i != scroll_axis]
indices = [scroll_axis] + indices
widget = pyqtgraph.image(numpy.transpose(data, indices))
elif data.ndim == 2:
widget = pyqtgraph.image(data)
else:
widget = pyqtgraph.plot(data)
# Run application
app.exec_()
| [
"numpy.abs",
"numpy.transpose",
"numpy.iscomplex",
"pyqtgraph.mkQApp",
"pyqtgraph.image",
"pyqtgraph.plot"
] | [((1040, 1058), 'pyqtgraph.mkQApp', 'pyqtgraph.mkQApp', ([], {}), '()\n', (1056, 1058), False, 'import pyqtgraph\n'), ((988, 1003), 'numpy.abs', 'numpy.abs', (['data'], {}), '(data)\n', (997, 1003), False, 'import numpy\n'), ((944, 965), 'numpy.iscomplex', 'numpy.iscomplex', (['data'], {}), '(data)\n', (959, 965), False, 'import numpy\n'), ((1242, 1272), 'numpy.transpose', 'numpy.transpose', (['data', 'indices'], {}), '(data, indices)\n', (1257, 1272), False, 'import numpy\n'), ((1316, 1337), 'pyqtgraph.image', 'pyqtgraph.image', (['data'], {}), '(data)\n', (1331, 1337), False, 'import pyqtgraph\n'), ((1365, 1385), 'pyqtgraph.plot', 'pyqtgraph.plot', (['data'], {}), '(data)\n', (1379, 1385), False, 'import pyqtgraph\n')] |
import tensorflow
import matplotlib.pyplot as plt
import pandas_datareader as pdr
import pandas as pd
import math
import statistics
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.metrics import mean_squared_error
df = pd.read_csv("IMDB-Movie-Data.csv")
df.head()
df.tail()
rate = df.reset_index()['Rating']
print(rate)
c = statistics.mean(rate)
print(c)
m = np.quantile(rate, .50)
print(m)
q_movies = df.copy().loc[df['Rating'] >= m]
q_movies.shape()
df.shape()
def weighted_rating(x, m=m, c=c):
v = x['Metascore']
R = x['Metascore']
# Calculation based on the IMDB formula
return (v/(v+m) * R) + (m/(m+v) * c)
q_movies['score'] = q_movies.apply(weighted_rating, axis=1)
q_movies = q_movies.sort_values('score', ascending=False)
print( q_movies[['Title','Rating', 'score']].head(35) )
| [
"pandas.read_csv",
"numpy.quantile",
"statistics.mean"
] | [((334, 368), 'pandas.read_csv', 'pd.read_csv', (['"""IMDB-Movie-Data.csv"""'], {}), "('IMDB-Movie-Data.csv')\n", (345, 368), True, 'import pandas as pd\n'), ((442, 463), 'statistics.mean', 'statistics.mean', (['rate'], {}), '(rate)\n', (457, 463), False, 'import statistics\n'), ((479, 501), 'numpy.quantile', 'np.quantile', (['rate', '(0.5)'], {}), '(rate, 0.5)\n', (490, 501), True, 'import numpy as np\n')] |
"""Ledoit & Wolf constant correlation unequal variance shrinkage estimator."""
import numpy as np
import pandas as pd
from poptimizer.data.views import quotes
def shrinkage(returns: np.array) -> tuple[np.array, float, float]: # noqa: WPS210
"""Shrinks sample covariance matrix towards constant correlation unequal variance matrix.
Ledoit & Wolf ("Honey, I shrunk the sample covariance matrix", Portfolio Management, 30(2004),
110-119) optimal asymptotic shrinkage between 0 (sample covariance matrix) and 1 (constant
sample average correlation unequal sample variance matrix).
Paper:
http://www.ledoit.net/honey.pdf
Matlab code:
https://www.econ.uzh.ch/dam/jcr:ffffffff-935a-b0d6-ffff-ffffde5e2d4e/covCor.m.zip
Special thanks to <NAME> https://github.com/epogrebnyak
:param returns:
t, n - returns of t observations of n shares.
:return:
Covariance matrix, sample average correlation, shrinkage.
"""
t, n = returns.shape # noqa: WPS111
mean_returns = np.mean(returns, axis=0, keepdims=True)
returns -= mean_returns
sample_cov = returns.transpose() @ returns / t
# sample average correlation
variance = np.diag(sample_cov).reshape(-1, 1)
sqrt_var = variance ** 0.5
unit_cor_var = sqrt_var * sqrt_var.transpose()
average_cor = ((sample_cov / unit_cor_var).sum() - n) / n / (n - 1) # noqa: WPS221
prior = average_cor * unit_cor_var
np.fill_diagonal(prior, variance)
# pi-hat
y = returns ** 2 # noqa: WPS111
phi_mat = (y.transpose() @ y) / t - sample_cov ** 2
phi = phi_mat.sum()
# rho-hat
theta_mat = ((returns ** 3).transpose() @ returns) / t - variance * sample_cov # noqa: WPS221
np.fill_diagonal(theta_mat, 0)
rho = np.diag(phi_mat).sum() + average_cor * (1 / sqrt_var @ sqrt_var.transpose() * theta_mat).sum() # noqa: WPS221
# gamma-hat
gamma = np.linalg.norm(sample_cov - prior, "fro") ** 2
# shrinkage constant
kappa = (phi - rho) / gamma
shrink = max(0, min(1, kappa / t))
# estimator
sigma = shrink * prior + (1 - shrink) * sample_cov
return sigma, average_cor, shrink
def ledoit_wolf_cor(
tickers: tuple,
date: pd.Timestamp,
history_days: int,
forecast_days: int = 0,
) -> tuple[np.array, float, float]:
"""Корреляционная матрица на основе Ledoit Wolf.
В расчете учитывается, что при использовании котировок за history_days могут быть получены доходности за
history_days - 1 день.
"""
div, p1 = quotes.div_and_prices(tickers, date)
p0 = p1.shift(1)
returns = (p1 + div) / p0
returns = returns.iloc[-(history_days - 1) - forecast_days :]
returns = returns.iloc[: history_days - 1]
returns = (returns - returns.mean()) / returns.std(ddof=0)
return shrinkage(returns.values)
| [
"numpy.fill_diagonal",
"poptimizer.data.views.quotes.div_and_prices",
"numpy.mean",
"numpy.linalg.norm",
"numpy.diag"
] | [((1033, 1072), 'numpy.mean', 'np.mean', (['returns'], {'axis': '(0)', 'keepdims': '(True)'}), '(returns, axis=0, keepdims=True)\n', (1040, 1072), True, 'import numpy as np\n'), ((1449, 1482), 'numpy.fill_diagonal', 'np.fill_diagonal', (['prior', 'variance'], {}), '(prior, variance)\n', (1465, 1482), True, 'import numpy as np\n'), ((1732, 1762), 'numpy.fill_diagonal', 'np.fill_diagonal', (['theta_mat', '(0)'], {}), '(theta_mat, 0)\n', (1748, 1762), True, 'import numpy as np\n'), ((2534, 2570), 'poptimizer.data.views.quotes.div_and_prices', 'quotes.div_and_prices', (['tickers', 'date'], {}), '(tickers, date)\n', (2555, 2570), False, 'from poptimizer.data.views import quotes\n'), ((1913, 1954), 'numpy.linalg.norm', 'np.linalg.norm', (['(sample_cov - prior)', '"""fro"""'], {}), "(sample_cov - prior, 'fro')\n", (1927, 1954), True, 'import numpy as np\n'), ((1201, 1220), 'numpy.diag', 'np.diag', (['sample_cov'], {}), '(sample_cov)\n', (1208, 1220), True, 'import numpy as np\n'), ((1773, 1789), 'numpy.diag', 'np.diag', (['phi_mat'], {}), '(phi_mat)\n', (1780, 1789), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network configuration for MAML."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
import tensorflow.compat.v1 as tf
def serialize_weights(session, weights, feed_dict=None):
"""Serializes the weights of current network into a 1-d array for protobuf.
The order in which weights are serialized depends on the alphabetical
ordering of the name of the weights.
Args:
session: a TF session in which the values are computed
weights: a dictionary that maps weight name to corresponding TF variables.
feed_dict: feed_dict for TF evaluation
Returns:
A 1-d numpy array containing the serialized weights
"""
flattened_weights = []
for name in sorted(weights.keys()):
materialized_weight = session.run(weights[name], feed_dict=feed_dict)
flattened_weights.append(materialized_weight.reshape([-1]))
return np.hstack(flattened_weights)
def deserialize_weights(weights_variable, flattened_weights):
"""Deserializes the weights into a dictionary that maps name to values.
The schema is provided by the weights_variable, which is a dictionary that
maps weight names to corresponding TF variables (i.e. the output of
construct_network)
Args:
weights_variable: a dictionary that maps weight names to corresponding TF
variables
flattened_weights: a 1-d array of weights to deserialize
Returns:
A dictionary that maps weight names to weight values
"""
ans = {}
idx = 0
for name in sorted(weights_variable.keys()):
len_current_weight = np.prod(weights_variable[name].shape)
flattened_weight = np.array(flattened_weights[idx:idx + len_current_weight])
ans[name] = flattened_weight.reshape(weights_variable[name].shape)
idx += len_current_weight
return ans
class MAMLNetworkGenerator(object):
__metaclass__ = ABCMeta
@abstractmethod
def construct_network_weights(self, scope='weights'):
pass
@abstractmethod
def construct_network(self, network_input, weights, scope='network'):
pass
class FullyConnectedNetworkGenerator(MAMLNetworkGenerator):
"""Generator for fully connected networks."""
def __init__(self,
dim_input=1,
dim_output=1,
layer_sizes=(64,),
activation_fn=tf.nn.tanh):
"""Creates fully connected neural networks.
Args:
dim_input: Dimensionality of input (integer > 0).
dim_output: Dimensionality of output (integer > 0).
layer_sizes: non-empty list with number of neurons per internal layer.
activation_fn: activation function for hidden layers
"""
self.dim_input = dim_input
self.dim_output = dim_output
self.layer_sizes = layer_sizes
self.activation_fn = activation_fn
def construct_network_weights(self, scope='weights'):
"""Creates weights for fully connected neural network.
Args:
scope: variable scope
Returns:
A dict with weights (network parameters).
"""
weights = {}
with tf.variable_scope(scope):
weights['w_0'] = tf.Variable(
tf.truncated_normal([self.dim_input, self.layer_sizes[0]],
stddev=0.1),
name='w_0')
weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')
for i in range(1, len(self.layer_sizes)):
weights['w_%d' % i] = tf.Variable(
tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]],
stddev=0.1),
name='w_%d' % i)
weights['b_%d' % i] = tf.Variable(
tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)
weights['w_out'] = tf.Variable(
tf.truncated_normal([self.layer_sizes[-1], self.dim_output],
stddev=0.1),
name='w_out')
weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')
return weights
def construct_network(self, network_input, weights, scope='network'):
"""Creates a fully connected neural network with given weights and input.
Args:
network_input: Network input (1d).
weights: network parameters (see construct_network_weights).
scope: name scope.
Returns:
neural network output op
"""
num_layers = len(self.layer_sizes)
with tf.name_scope(scope):
hidden = self.activation_fn(
tf.nn.xw_plus_b(
network_input, weights['w_0'], weights['b_0'], name='hidden_0'))
for i in range(1, num_layers):
hidden = self.activation_fn(
tf.nn.xw_plus_b(
hidden,
weights['w_%d' % i],
weights['b_%d' % i],
name='hidden_%d' % i))
return tf.nn.xw_plus_b(
hidden, weights['w_out'], weights['b_out'], name='output')
class LinearNetworkGenerator(MAMLNetworkGenerator):
"""Generator for simple linear connections (Y = W*X+b)."""
def __init__(self, dim_input=1, dim_output=1):
"""Linear transformation with dim_input inputs and dim_output outputs.
Args:
dim_input: Dimensionality of input (integer > 0).
dim_output: Dimensionality of output (integer > 0).
"""
self.dim_input = dim_input
self.dim_output = dim_output
def construct_network_weights(self, scope='weights'):
"""Create weights for linear transformation.
Args:
scope: variable scope
Returns:
A dict with weights (network parameters).
"""
with tf.variable_scope(scope):
return {
'w_out':
tf.Variable(
tf.truncated_normal([self.dim_input, self.dim_output],
stddev=0.1),
name='w_out'),
'b_out':
tf.Variable(tf.zeros([self.dim_output]), name='b_out'),
}
def construct_network(self, network_input, weights, scope='network'):
"""Create ops for linear transformation.
Args:
network_input: Network input (1d).
weights: network parameters (see construct_network_weights).
scope: name scope.
Returns:
output op
"""
with tf.name_scope(scope):
return tf.nn.xw_plus_b(
network_input, weights['w_out'], weights['b_out'], name='output')
| [
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.name_scope",
"numpy.hstack",
"tensorflow.compat.v1.nn.xw_plus_b",
"tensorflow.compat.v1.truncated_normal",
"numpy.array",
"numpy.prod"
] | [((1594, 1622), 'numpy.hstack', 'np.hstack', (['flattened_weights'], {}), '(flattened_weights)\n', (1603, 1622), True, 'import numpy as np\n'), ((2262, 2299), 'numpy.prod', 'np.prod', (['weights_variable[name].shape'], {}), '(weights_variable[name].shape)\n', (2269, 2299), True, 'import numpy as np\n'), ((2323, 2380), 'numpy.array', 'np.array', (['flattened_weights[idx:idx + len_current_weight]'], {}), '(flattened_weights[idx:idx + len_current_weight])\n', (2331, 2380), True, 'import numpy as np\n'), ((3714, 3738), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (3731, 3738), True, 'import tensorflow.compat.v1 as tf\n'), ((5008, 5028), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['scope'], {}), '(scope)\n', (5021, 5028), True, 'import tensorflow.compat.v1 as tf\n'), ((5424, 5498), 'tensorflow.compat.v1.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['hidden', "weights['w_out']", "weights['b_out']"], {'name': '"""output"""'}), "(hidden, weights['w_out'], weights['b_out'], name='output')\n", (5439, 5498), True, 'import tensorflow.compat.v1 as tf\n'), ((6171, 6195), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6188, 6195), True, 'import tensorflow.compat.v1 as tf\n'), ((6818, 6838), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['scope'], {}), '(scope)\n', (6831, 6838), True, 'import tensorflow.compat.v1 as tf\n'), ((6853, 6939), 'tensorflow.compat.v1.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['network_input', "weights['w_out']", "weights['b_out']"], {'name': '"""output"""'}), "(network_input, weights['w_out'], weights['b_out'], name=\n 'output')\n", (6868, 6939), True, 'import tensorflow.compat.v1 as tf\n'), ((3786, 3856), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[self.dim_input, self.layer_sizes[0]]'], {'stddev': '(0.1)'}), '([self.dim_input, self.layer_sizes[0]], stddev=0.1)\n', (3805, 3856), True, 'import tensorflow.compat.v1 as tf\n'), ((3945, 3976), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.layer_sizes[0]]'], {}), '([self.layer_sizes[0]])\n', (3953, 3976), True, 'import tensorflow.compat.v1 as tf\n'), ((4388, 4460), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[self.layer_sizes[-1], self.dim_output]'], {'stddev': '(0.1)'}), '([self.layer_sizes[-1], self.dim_output], stddev=0.1)\n', (4407, 4460), True, 'import tensorflow.compat.v1 as tf\n'), ((4553, 4580), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.dim_output]'], {}), '([self.dim_output])\n', (4561, 4580), True, 'import tensorflow.compat.v1 as tf\n'), ((5075, 5154), 'tensorflow.compat.v1.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['network_input', "weights['w_0']", "weights['b_0']"], {'name': '"""hidden_0"""'}), "(network_input, weights['w_0'], weights['b_0'], name='hidden_0')\n", (5090, 5154), True, 'import tensorflow.compat.v1 as tf\n'), ((4093, 4172), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[self.layer_sizes[i - 1], self.layer_sizes[i]]'], {'stddev': '(0.1)'}), '([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1)\n', (4112, 4172), True, 'import tensorflow.compat.v1 as tf\n'), ((4290, 4321), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.layer_sizes[i]]'], {}), '([self.layer_sizes[i]])\n', (4298, 4321), True, 'import tensorflow.compat.v1 as tf\n'), ((5257, 5349), 'tensorflow.compat.v1.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['hidden', "weights['w_%d' % i]", "weights['b_%d' % i]"], {'name': "('hidden_%d' % i)"}), "(hidden, weights['w_%d' % i], weights['b_%d' % i], name=\n 'hidden_%d' % i)\n", (5272, 5349), True, 'import tensorflow.compat.v1 as tf\n'), ((6276, 6342), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[self.dim_input, self.dim_output]'], {'stddev': '(0.1)'}), '([self.dim_input, self.dim_output], stddev=0.1)\n', (6295, 6342), True, 'import tensorflow.compat.v1 as tf\n'), ((6460, 6487), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.dim_output]'], {}), '([self.dim_output])\n', (6468, 6487), True, 'import tensorflow.compat.v1 as tf\n')] |
# Third-party
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import interp1d
from astropy import units as u
from typing import Iterable
# Project
from ..util import check_random_state, check_units
__all__ = ['IMFIntegrator', 'salpeter_params', 'kroupa_params', 'scalo_params',
'imf_params_dict', 'sample_imf', 'build_galaxy',
'kroupa','scalo','salpeter']
# pre-defined IMF parameter dictionaries
kroupa_params = {'a': [-0.3, -1.3, -2.3],'b': [0.08, 0.5]}
scalo_params = {'a': [-1.2, -2.7, -2.3], 'b': [1,10]}
salpeter_params = {'a': -2.35, 'b': [2e2, 3e2]}
imf_params_dict = {'salpeter': salpeter_params,
'kroupa': kroupa_params,
'scalo': scalo_params}
def kroupa(m, **kwargs):
"""
Wrapper function to calculate weights for the Kroupa stellar initial
mass function
(`Kroupa 2001
<https://ui.adsabs.harvard.edu/abs/2001MNRAS.322..231K/abstract>`_).
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
norm_type : str, optional
How to normalize the weights: by 'number', 'mass', or the 'sum'.
num_norm_bins : int, optional
Number of mass bins to use for integration (if needed to normalize).
norm_mass_min : int or None, optional
Minimum mass to use for normalization. If None, use minimum of
`mass_grid` will be used.
norm_mass_max : int or None, optional
Maximum mass to use for normalization. If None, use maximum of
`mass_grid` will be used.
Returns
-------
weights : `~numpy.ndarray`
The weights associated with each mass in the input `mass_grid`.
"""
return IMFIntegrator('kroupa').weights(m, **kwargs)
def scalo(m, **kwargs):
"""
The Scalo stellar initial mass function (`Scalo 1998
<https://ui.adsabs.harvard.edu/abs/1998ASPC..142..201S/abstract>`_).
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
norm_type : str, optional
How to normalize the weights: by 'number', 'mass', or the 'sum'.
norm_mass_min : int or None, optional
Minimum mass to use for normalization. If None, use minimum of
`mass_grid` will be used.
norm_mass_max : int or None, optional
Maximum mass to use for normalization. If None, use maximum of
`mass_grid` will be used.
Returns
-------
weights : `~numpy.ndarray`
The weights associated with each mass in the input `mass_grid`.
"""
return IMFIntegrator('scalo').weights(m, **kwargs)
def salpeter(m, **kwargs):
"""
Wrapper function to calculate weights for the Salpeter IMF (`Salpeter 1955
<https://ui.adsabs.harvard.edu/abs/1955ApJ...121..161S/abstract>`_).
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
norm_type : str, optional
How to normalize the weights: by 'number', 'mass', or the 'sum'.
norm_mass_min : int or None, optional
Minimum mass to use for normalization. If None, use minimum of
`mass_grid` will be used.
norm_mass_max : int or None, optional
Maximum mass to use for normalization. If None, use maximum of
`mass_grid` will be used.
Returns
-------
weights : `~numpy.ndarray`
The weights associated with each mass in the input `mass_grid`.
"""
return IMFIntegrator('salpeter').weights(m, **kwargs)
def sample_imf(num_stars, m_min=0.08, m_max=120, imf='kroupa',
num_mass_bins=100000, random_state=None, imf_kw={}):
"""
Sample stellar IMF via inverse transform sampling.
Parameters
----------
num_stars : int
Number of stars to sample.
m_min : float, optional
Minimum stellar mass.
m_max : float, optional
Maximum stellar mass.
imf : str or dict
Which IMF to use, if str then must be one of pre-defined: 'kroupa',
'scalo' or 'salpeter'. Can also specify broken power law as dict,
which must contain either 'a' as a Float (describing the slope of a
single power law) or 'a' (a list with 3 elements describing the slopes
of a broken power law) and 'b' (a list with 2 elements describing the
locations of the breaks).
num_mass_bins : int, optional
Number of mass bins in logarithmic spaced mass grid.
random_state : `None`, int, list of ints, or `~numpy.random.RandomState`
If `None`, return the `~numpy.random.RandomState` singleton used by
``numpy.random``. If `int`, return a new `~numpy.random.RandomState`
instance seeded with the `int`. If `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
imf_kw : dict, optional
Keyword arguments for the imf function.
Returns
-------
masses : `~numpy.ndarray`
The sampled stellar masses.
"""
rng = check_random_state(random_state)
bin_edges = np.logspace(np.log10(m_min),
np.log10(m_max),
int(num_mass_bins))
mass_grid = (bin_edges[1:] + bin_edges[:-1]) / 2.0
weights = IMFIntegrator(imf).weights(mass_grid, **imf_kw)
dm = np.diff(bin_edges)
cdf = np.cumsum(weights * dm)
cdf /= cdf.max()
cdf = interp1d(cdf, mass_grid, bounds_error=False, fill_value=m_min)
rand_num = rng.uniform(low=0., high=1.0, size=int(num_stars))
masses = cdf(rand_num)
return masses
def build_galaxy(stellar_mass, num_stars_iter=1e5, m_min=0.08, m_max=120, imf='kroupa',
num_mass_bins=100000, random_state=None, imf_kw={}):
"""
Build galaxy of a given stellar mass.
Parameters
----------
stellar_mass : float or `~astropy.units.Quantity`
Stellar mass of galaxy. If float is given, the units are assumed to be
solar masses.
num_stars_iter : int
Number of stars to generate at each iteration. Lower this
number (at the expense of speed) to get a more accurate total mass.)
num_stars : int
Number of stars to sample.
m_min : float, optional
Minimum stellar mass.
m_max : float, optional
Maximum stellar mass.
imf : str or dict
Which IMF to use, if str then must be one of pre-defined: 'kroupa',
'scalo' or 'salpeter'. Can also specify broken power law as dict,
which must contain either 'a' as a Float (describing the slope of a
single power law) or 'a' (a list with 3 elements describing the slopes
of a broken power law) and 'b' (a list with 2 elements describing the
locations of the breaks).
num_mass_bins : int, optional
Number of mass bins in logarithmic spaced mass grid.
random_state : `None`, int, list of ints, or `~numpy.random.RandomState`
If `None`, return the `~numpy.random.RandomState` singleton used by
``numpy.random``. If `int`, return a new `~numpy.random.RandomState`
instance seeded with the `int`. If `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
imf_kw : dict, optional
Keyword arguments for the imf function.
Returns
-------
stars : `~numpy.ndarray`
Stellar masses of all the stars.
"""
#Build CDF, taken from sample_imf above
rng = check_random_state(random_state)
bin_edges = np.logspace(np.log10(m_min),
np.log10(m_max),
int(num_mass_bins))
mass_grid = (bin_edges[1:] + bin_edges[:-1]) / 2.0
weights = IMFIntegrator(imf).weights(mass_grid, **imf_kw)
dm = np.diff(bin_edges)
cdf = np.cumsum(weights * dm)
cdf /= cdf.max()
cdf = interp1d(cdf, mass_grid, bounds_error=False, fill_value=m_min)
stars = []
total_mass = 0.0
stellar_mass = check_units(stellar_mass, 'Msun').to('Msun').value
#Ensure the while loop does not get stuck
if num_stars_iter < 1:
num_stars_iter = 1
while total_mass < stellar_mass:
rand_num = rng.uniform(low=0., high=1.0, size=int(num_stars_iter))
new_stars = cdf(rand_num)
total_mass += new_stars.sum()
stars = np.concatenate([stars, new_stars])
return stars
class IMFIntegrator(object):
"""
A helper class for numerically integrating the IMF.
Parameters
----------
params : str or dict
Which IMF to use, if str then must be one of pre-defined: 'kroupa',
'scalo' or 'salpeter'. Can also specify broken power law as dict,
which must contain either 'a' as a Float (describing the slope of a
single power law) or 'a' (a list with 3 elements describing the slopes
of a broken power law) and 'b' (a list with 2 elements describing the
locations of the breaks).
m_min : float, optional
Minimum stellar mass.
m_max : float, optional
Maximum stellar mass.
"""
def __init__(self, params, m_min=0.1, m_max=120.0):
if type(params) == str:
if params in imf_params_dict.keys():
params_dict = imf_params_dict[params]
if params == 'salpeter':
self.a = [ params_dict['a'] ]*3
self.b = [301.,302.]
else:
self.a = params_dict['a']
self.b = params_dict['b']
self.name = params
else:
raise Exception(
f'{params} is not one of the pre-defined IMFs: ' \
+ ', '.join(imf_params_dict.keys())
)
elif type(params) == dict:
if ('a' in params.keys() and
'b' in params.keys() and
isinstance(params['a'], Iterable)):
self.a = params['a']
self.b = params['b']
self.name = 'custom'
elif 'a' in params.keys() and isinstance(params['a'], float):
self.a = [ params['a'] ]*3
self.b = [301.,302.]
self.name = 'custom'
else:
raise Exception(
"dict must have both 'a' and 'b' for broken power "
"law or float in 'a' for single power law"
)
self.m_min = m_min
self.m_max = m_max
self.eval_min = 1e-3
self.num_norm = self.integrate(m_min, m_max, None)
self.mass_norm = self.m_integrate(m_min, m_max, None)
def weights(self, mass_grid, norm_type=None,
norm_mass_min=None, norm_mass_max=None):
"""
Calculate the weights of the IMF at grid of stellar masses.
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
norm_type : str, optional
How to normalize the weights: by 'number', 'mass', or the 'sum'.
norm_mass_min : int or None, optional
Minimum mass to use for normalization. If None, use minimum of
`mass_grid` will be used.
norm_mass_max : int or None, optional
Maximum mass to use for normalization. If None, use maximum of
`mass_grid` will be used.
Returns
-------
weights : `~numpy.ndarray`
The weights associated with each mass in the input `mass_grid`.
"""
mass_grid = np.asarray(mass_grid)
a1, a2, a3 = self.a
b1, b2 = self.b
alpha = np.where(mass_grid < b1, a1, np.where(mass_grid < b2, a2, a3))
m_break = np.where(mass_grid < b2, b1, b2 * (b1 / b2)**(a2 / a3))
weights = (mass_grid / m_break)**(alpha)
if norm_type is None:
norm = 1.
elif norm_type == 'sum':
norm = weights.sum()
else:
m_min = norm_mass_min if norm_mass_min else mass_grid.min()
m_max = norm_mass_max if norm_mass_max else mass_grid.max()
if norm_type == 'number':
norm = self.integrate(m_min = m_min, m_max = m_max)
elif norm_type == 'mass':
norm = self.m_integrate(m_min = m_min, m_max = m_max)
weights /= norm
return weights
def func(self,m):
"""
Wrapper function to calculate the un-normalized values of
of the IMF (i.e. dN / dM ) at grid of stellar masses.
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
Returns
-------
weights : `~numpy.ndarray`
Values of dN/dM associated with each mass in the input `mass_grid`.
"""
return self.weights(m, norm_type = None)
def m_func(self, m):
"""
Wrapper function to calculate the un-normalized values of
the mass times the IMF (i.e. dN / d logM ) at grid of stellar masses.
Parameters
----------
mass_grid : `~numpy.ndarray`
Stellar mass grid.
Returns
-------
weights : `~numpy.ndarray`
Values dN/d logM associated with each mass in `mass_grid`.
"""
return m * self.weights(m, norm_type=None)
def _indef_int(self,m):
"""
Helper function to calculate integral for `0` to some mass
"""
a0,a1,a2 = self.a
b0,b1 = self.b
#Define constants to normalize functions
c0 = b0**a0
c1 = b0**a1
c2 = (b1 * (b0 / b1)**(a1 / a2) )**a2
ans = 0
if m > b1:
ans = (b0**(a0+1.) - self.eval_min**(a0+1.)) / (a0+1)/c0 \
+ (b1**(a1+1.) - b0**(a1+1.))/(a1+1)/c1 \
+ (m**(a2+1.) - b1**(a2+1.))/(a2+1)/c2
elif m > b0:
ans = (b0**(a0+1.) - self.eval_min**(a0+1.))/(a0+1)/c0 \
+ (m**(a1+1.)- b0**(a1+1.))/(a1+1) /c1
else:
ans = (m**(a0+1.) - self.eval_min**(a0+1.))/(a0+1)/c0
return ans
def integrate(self, m_min=None, m_max=None, norm=False, ):
""""
Function to calculate the integral under the IMF.
Parameters
----------
m_min : Float
Lower stellar mass bound of integral.
m_max : Float
Upper stellar mass bound of integral.
norm: : Bool or Float
Whether or not to normalize the inegral, default False. If True
will normalize by number of stars. If a Float is given, then
will use that value as the normalization.
Returns
-------
weights : Float
Value of the integral of the IMF between m_min and m_mass.
"""
m_min = m_min if m_min else self.m_min
m_max = m_max if m_max else self.m_max
if norm == True:
n = self.num_norm
elif norm is None or norm == False:
n = 1.0
elif type(norm) == int or type(norm) == float:
n = norm
else:
raise Exception(f'{norm} is not a valid normalization.')
return ( self._indef_int(m_max) - self._indef_int(m_min) ) / n
def _indef_m_int(self,m):
"""
Helper function to calculate integral for `0` to some mass
"""
a0, a1, a2 = self.a
b0, b1 = self.b
# define constants to normalize functions
c0 = b0**a0
c1 = b0**a1
c2 = (b1 * (b0 / b1)**(a1 / a2))**a2
# multiply by M
a0 += 1
a1 += 1
a2 += 1
ans = 0
if m > b1:
ans = (b0**(a0+1.) - self.eval_min**(a0+1.)) / (a0+1)/c0 \
+ (b1**(a1+1.) - b0**(a1+1.))/(a1+1)/c1 \
+ (m**(a2+1.) - b1**(a2+1.))/(a2+1)/c2
elif m > b0:
ans = (b0**(a0+1.) - self.eval_min**(a0+1.))/(a0+1)/c0 \
+ (m**(a1+1.) - b0**(a1+1.))/(a1+1) /c1
else:
ans = (m**(a0+1.) - self.eval_min**(a0+1.))/(a0+1)/c0
return ans
def m_integrate(self, m_min=None, m_max=None, norm=False):
""""
Function to calculate the integral under mass times the IMF.
Parameters
----------
m_min : Float
Lower stellar mass bound of integral.
m_max : Float
Upper stellar mass bound of integral.
norm: : Bool or Float
Whether or not to normalize the integral, default False. If True
will normalize by the total stelllar mass. If a Float is given,
then will use that value as the normalization.
Returns
-------
weights : Float
Value of the integral of mass times the IMF between
m_min and m_mass.
"""
m_min = m_min if m_min else self.m_min
m_max = m_max if m_max else self.m_max
if norm == True:
n = self.mass_norm
elif norm is None or norm == False:
n = 1.0
elif type(norm) == int or type(norm) == float:
n = norm
else:
raise Exception(f'{norm} is not a valid normalization.')
return (self._indef_m_int(m_max) - self._indef_m_int(m_min)) / n
| [
"numpy.asarray",
"numpy.cumsum",
"numpy.diff",
"numpy.where",
"scipy.interpolate.interp1d",
"numpy.log10",
"numpy.concatenate"
] | [((5239, 5257), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (5246, 5257), True, 'import numpy as np\n'), ((5268, 5291), 'numpy.cumsum', 'np.cumsum', (['(weights * dm)'], {}), '(weights * dm)\n', (5277, 5291), True, 'import numpy as np\n'), ((5323, 5385), 'scipy.interpolate.interp1d', 'interp1d', (['cdf', 'mass_grid'], {'bounds_error': '(False)', 'fill_value': 'm_min'}), '(cdf, mass_grid, bounds_error=False, fill_value=m_min)\n', (5331, 5385), False, 'from scipy.interpolate import interp1d\n'), ((7642, 7660), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (7649, 7660), True, 'import numpy as np\n'), ((7671, 7694), 'numpy.cumsum', 'np.cumsum', (['(weights * dm)'], {}), '(weights * dm)\n', (7680, 7694), True, 'import numpy as np\n'), ((7726, 7788), 'scipy.interpolate.interp1d', 'interp1d', (['cdf', 'mass_grid'], {'bounds_error': '(False)', 'fill_value': 'm_min'}), '(cdf, mass_grid, bounds_error=False, fill_value=m_min)\n', (7734, 7788), False, 'from scipy.interpolate import interp1d\n'), ((5003, 5018), 'numpy.log10', 'np.log10', (['m_min'], {}), '(m_min)\n', (5011, 5018), True, 'import numpy as np\n'), ((5048, 5063), 'numpy.log10', 'np.log10', (['m_max'], {}), '(m_max)\n', (5056, 5063), True, 'import numpy as np\n'), ((7406, 7421), 'numpy.log10', 'np.log10', (['m_min'], {}), '(m_min)\n', (7414, 7421), True, 'import numpy as np\n'), ((7451, 7466), 'numpy.log10', 'np.log10', (['m_max'], {}), '(m_max)\n', (7459, 7466), True, 'import numpy as np\n'), ((8221, 8255), 'numpy.concatenate', 'np.concatenate', (['[stars, new_stars]'], {}), '([stars, new_stars])\n', (8235, 8255), True, 'import numpy as np\n'), ((11436, 11457), 'numpy.asarray', 'np.asarray', (['mass_grid'], {}), '(mass_grid)\n', (11446, 11457), True, 'import numpy as np\n'), ((11607, 11664), 'numpy.where', 'np.where', (['(mass_grid < b2)', 'b1', '(b2 * (b1 / b2) ** (a2 / a3))'], {}), '(mass_grid < b2, b1, b2 * (b1 / b2) ** (a2 / a3))\n', (11615, 11664), True, 'import numpy as np\n'), ((11555, 11587), 'numpy.where', 'np.where', (['(mass_grid < b2)', 'a2', 'a3'], {}), '(mass_grid < b2, a2, a3)\n', (11563, 11587), True, 'import numpy as np\n')] |
"""
A pytest module to test numpy methods, both supported and unsupported.
Numpy methods are selected from this API reference:
https://numpy.org/doc/stable/reference/routines.array-manipulation.html
"""
import random
import numpy as np
import pytest
import galois
from ..helper import randint, array_equal
###############################################################################
# Basic operations
###############################################################################
def test_copy(field):
dtype = random.choice(field.dtypes)
shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.copy(a)
assert type(b) is np.ndarray
assert b is not a
d = a.copy()
assert type(d) is field
assert d is not a
def test_shape(field):
dtype = random.choice(field.dtypes)
shape = ()
a = field.Random(shape, dtype=dtype)
assert a.shape == shape
assert np.shape(a) == shape
shape = (3,)
a = field.Random(shape, dtype=dtype)
assert a.shape == shape
assert np.shape(a) == shape
shape = (3,4,5)
a = field.Random(shape, dtype=dtype)
assert a.shape == shape
assert np.shape(a) == shape
###############################################################################
# Changing array shape
###############################################################################
def test_reshape(field):
dtype = random.choice(field.dtypes)
shape = (10,)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = a.reshape(new_shape)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
b = np.reshape(a, new_shape)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_ravel(field):
dtype = random.choice(field.dtypes)
shape = (2,5)
new_shape = (10,)
a = field.Random(shape, dtype=dtype)
b = np.ravel(a)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_flatten(field):
dtype = random.choice(field.dtypes)
shape = (2,5)
new_shape = (10,)
a = field.Random(shape, dtype=dtype)
b = a.flatten()
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
###############################################################################
# Transpose-like operations
###############################################################################
def test_moveaxis(field):
dtype = random.choice(field.dtypes)
shape = (3,4,5)
new_shape = (4,3,5)
a = field.Random(shape, dtype=dtype)
b = np.moveaxis(a, 0, 1)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_transpose(field):
dtype = random.choice(field.dtypes)
shape = (3,4)
new_shape = (4,3)
a = field.Random(shape, dtype=dtype)
b = a.T
assert b.shape == new_shape
assert array_equal(b[0,:], a[:,0])
assert type(b) is field
assert b.dtype == dtype
b = np.transpose(a)
assert b.shape == new_shape
assert array_equal(b[0,:], a[:,0])
assert type(b) is field
assert b.dtype == dtype
###############################################################################
# Changing number of dimensions
###############################################################################
def test_at_least1d(field):
dtype = random.choice(field.dtypes)
shape = ()
new_shape = (1,)
a = field.Random(shape, dtype=dtype)
b = np.atleast_1d(a)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_at_least2d(field):
dtype = random.choice(field.dtypes)
shape = (10,)
new_shape = (1,10)
a = field.Random(shape, dtype=dtype)
b = np.atleast_2d(a)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_at_least3d(field):
dtype = random.choice(field.dtypes)
shape = (10,)
new_shape = (1,10,1)
a = field.Random(shape, dtype=dtype)
b = np.atleast_3d(a)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_broadcast_to(field):
dtype = random.choice(field.dtypes)
shape = (3,)
new_shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.broadcast_to(a, new_shape)
assert b.shape == new_shape
assert array_equal(b[1,:], a)
assert type(b) is field
assert b.dtype == dtype
def test_squeeze(field):
dtype = random.choice(field.dtypes)
shape = (1,3,1)
new_shape = (3,)
a = field.Random(shape, dtype=dtype)
b = np.squeeze(a)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
###############################################################################
# Joining arrays
###############################################################################
def test_concatenate(field):
dtype = random.choice(field.dtypes)
shape1 = (2,3)
shape2 = (1,3)
new_shape = (3,3)
a1 = field.Random(shape1, dtype=dtype)
a2 = field.Random(shape2, dtype=dtype)
b = np.concatenate((a1,a2), axis=0)
assert b.shape == new_shape
assert array_equal(b[0:2,:], a1)
assert array_equal(b[2:,:], a2)
assert type(b) is field
assert b.dtype == dtype
def test_vstack(field):
dtype = random.choice(field.dtypes)
shape1 = (3,)
shape2 = (3,)
new_shape = (2,3)
a1 = field.Random(shape1, dtype=dtype)
a2 = field.Random(shape2, dtype=dtype)
b = np.vstack((a1,a2))
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_hstack(field):
dtype = random.choice(field.dtypes)
shape1 = (3,)
shape2 = (3,)
new_shape = (6,)
a1 = field.Random(shape1, dtype=dtype)
a2 = field.Random(shape2, dtype=dtype)
b = np.hstack((a1,a2))
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
###############################################################################
# Splitting arrays
###############################################################################
def test_split(field):
dtype = random.choice(field.dtypes)
shape = (6,)
new_shape = (3,)
a = field.Random(shape, dtype=dtype)
b1, b2 = np.split(a, 2)
assert b1.shape == new_shape
assert type(b1) is field
assert b1.dtype == dtype
assert b2.shape == new_shape
assert type(b2) is field
assert b2.dtype == dtype
###############################################################################
# Tiling arrays
###############################################################################
def test_tile(field):
dtype = random.choice(field.dtypes)
shape = (3,)
new_shape = (2,6)
a = field.Random(shape, dtype=dtype)
b = np.tile(a, (2,2))
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
def test_repeat(field):
dtype = random.choice(field.dtypes)
shape = (2,2)
new_shape = (2,6)
a = field.Random(shape, dtype=dtype)
b = np.repeat(a, 3, axis=1)
assert b.shape == new_shape
assert type(b) is field
assert b.dtype == dtype
###############################################################################
# Adding and removing elements
###############################################################################
def test_delete(field):
dtype = random.choice(field.dtypes)
shape = (2,4)
new_shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.delete(a, 1, axis=1)
assert b.shape == new_shape
assert array_equal(b[:,0], a[:,0])
assert array_equal(b[:,1:], a[:,2:])
assert type(b) is field
assert b.dtype == dtype
def test_insert_field_element(field):
dtype = random.choice(field.dtypes)
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = field.Random()
c = np.insert(a, 1, b, axis=1)
assert c.shape == new_shape
assert array_equal(c[:,0], a[:,0])
assert np.all(c[:,1] == b)
assert array_equal(c[:,2:], a[:,1:])
assert type(c) is field
assert c.dtype == dtype
def test_insert_int(field):
dtype = random.choice(field.dtypes)
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = random.randint(0, field.order - 1)
c = np.insert(a, 1, b, axis=1)
assert c.shape == new_shape
assert array_equal(c[:,0], a[:,0])
assert np.all(c[:,1] == b)
assert array_equal(c[:,2:], a[:,1:])
assert type(c) is field
assert c.dtype == dtype
def test_insert_int_out_of_range(field):
for dtype in [field.dtypes[0], field.dtypes[-1]]:
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = field.order
with pytest.raises(ValueError):
c = np.insert(a, 1, b, axis=1)
def test_insert_int_list(field):
dtype = random.choice(field.dtypes)
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = [random.randint(0, field.order - 1) for _ in range(2)]
c = np.insert(a, 1, b, axis=1)
assert c.shape == new_shape
assert array_equal(c[:,0], a[:,0])
assert array_equal(c[:,1], b)
assert array_equal(c[:,2:], a[:,1:])
assert type(c) is field
assert c.dtype == dtype
def test_insert_int_array(field):
dtype = field.dtypes[0]
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = randint(0, field.order, 2, field.dtypes[-1])
c = np.insert(a, 1, b, axis=1)
assert c.shape == new_shape
assert array_equal(c[:,0], a[:,0])
assert array_equal(c[:,1], b)
assert array_equal(c[:,2:], a[:,1:])
assert type(c) is field
assert c.dtype == dtype
def test_insert_int_array_out_of_range(field):
dtype = field.dtypes[0]
shape = (2,4)
new_shape = (2,5)
a = field.Random(shape, dtype=dtype)
b = randint(field.order, field.order+2, 2, field.dtypes[-1])
with pytest.raises(ValueError):
c = np.insert(a, 1, b, axis=1)
def test_append(field):
dtype = random.choice(field.dtypes)
shape1 = (2,3)
shape2 = (1,3)
new_shape = (3,3)
a1 = field.Random(shape1, dtype=dtype)
a2 = field.Random(shape2, dtype=dtype)
b = np.append(a1, a2, axis=0)
assert b.shape == new_shape
assert array_equal(b[0:2,:], a1)
assert array_equal(b[2:,:], a2)
assert type(b) is field
assert b.dtype == dtype
def test_resize(field):
dtype = random.choice(field.dtypes)
shape = (3,)
new_shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.resize(a, new_shape)
assert b.shape == new_shape
assert array_equal(b[0,:], a)
assert array_equal(b[1,:], a)
assert type(b) is field
assert b.dtype == dtype
# TODO: Why does c not "own its data"?
# c = np.copy(a)
# c.resize(new_shape)
# assert c.shape == new_shape
# assert array_equal(c[0,:], a)
# assert array_equal(c[1,:], 0) # NOTE: This is different than np.resize()
# assert type(c) is field
# assert c.dtype == dtype
def test_trim_zeros(field):
dtype = random.choice(field.dtypes)
shape = (5,)
new_shape = (2,)
a = field.Random(shape, low=1, dtype=dtype)
a[0:2] = 0
a[-1] = 0
b = np.trim_zeros(a, trim="fb")
assert b.shape == new_shape
assert array_equal(b, a[2:-1])
assert type(b) is field
def test_unique(field):
dtype = random.choice(field.dtypes)
size = field.order if field.order < 10 else 10
a = field.Range(0, size, dtype=dtype)
a[0] = 1 # Remove 0 element
b = np.unique(a)
assert array_equal(b, a[1:])
assert type(b) is field
###############################################################################
# Rearranging elements
###############################################################################
def test_flip(field):
dtype = random.choice(field.dtypes)
shape = (3,)
a = field.Random(shape, dtype=dtype)
b = np.flip(a)
assert array_equal(b, a[::-1])
assert type(b) is field
def test_fliplr(field):
dtype = random.choice(field.dtypes)
shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.fliplr(a)
assert array_equal(b, a[:,::-1])
assert type(b) is field
def test_flipud(field):
dtype = random.choice(field.dtypes)
shape = (2,3)
a = field.Random(shape, dtype=dtype)
b = np.flipud(a)
assert array_equal(b, a[::-1,:])
assert type(b) is field
def test_roll(field):
dtype = random.choice(field.dtypes)
shape = (10,)
a = field.Random(shape, dtype=dtype)
b = np.roll(a, 2)
assert array_equal(b[0:2], a[-2:])
assert array_equal(b[2:], a[0:-2])
assert type(b) is field | [
"numpy.moveaxis",
"numpy.resize",
"numpy.ravel",
"numpy.shape",
"numpy.tile",
"numpy.unique",
"numpy.atleast_2d",
"random.randint",
"numpy.copy",
"numpy.transpose",
"numpy.insert",
"numpy.append",
"pytest.raises",
"numpy.reshape",
"numpy.repeat",
"numpy.roll",
"numpy.flipud",
"nump... | [((525, 552), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (538, 552), False, 'import random\n'), ((620, 630), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (627, 630), True, 'import numpy as np\n'), ((791, 818), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (804, 818), False, 'import random\n'), ((1399, 1426), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (1412, 1426), False, 'import random\n'), ((1635, 1659), 'numpy.reshape', 'np.reshape', (['a', 'new_shape'], {}), '(a, new_shape)\n', (1645, 1659), True, 'import numpy as np\n'), ((1785, 1812), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (1798, 1812), False, 'import random\n'), ((1902, 1913), 'numpy.ravel', 'np.ravel', (['a'], {}), '(a)\n', (1910, 1913), True, 'import numpy as np\n'), ((2041, 2068), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (2054, 2068), False, 'import random\n'), ((2487, 2514), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (2500, 2514), False, 'import random\n'), ((2608, 2628), 'numpy.moveaxis', 'np.moveaxis', (['a', '(0)', '(1)'], {}), '(a, 0, 1)\n', (2619, 2628), True, 'import numpy as np\n'), ((2758, 2785), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (2771, 2785), False, 'import random\n'), ((3016, 3031), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (3028, 3031), True, 'import numpy as np\n'), ((3394, 3421), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (3407, 3421), False, 'import random\n'), ((3507, 3523), 'numpy.atleast_1d', 'np.atleast_1d', (['a'], {}), '(a)\n', (3520, 3523), True, 'import numpy as np\n'), ((3654, 3681), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (3667, 3681), False, 'import random\n'), ((3772, 3788), 'numpy.atleast_2d', 'np.atleast_2d', (['a'], {}), '(a)\n', (3785, 3788), True, 'import numpy as np\n'), ((3919, 3946), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (3932, 3946), False, 'import random\n'), ((4039, 4055), 'numpy.atleast_3d', 'np.atleast_3d', (['a'], {}), '(a)\n', (4052, 4055), True, 'import numpy as np\n'), ((4188, 4215), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (4201, 4215), False, 'import random\n'), ((4304, 4333), 'numpy.broadcast_to', 'np.broadcast_to', (['a', 'new_shape'], {}), '(a, new_shape)\n', (4319, 4333), True, 'import numpy as np\n'), ((4495, 4522), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (4508, 4522), False, 'import random\n'), ((4613, 4626), 'numpy.squeeze', 'np.squeeze', (['a'], {}), '(a)\n', (4623, 4626), True, 'import numpy as np\n'), ((4936, 4963), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (4949, 4963), False, 'import random\n'), ((5118, 5150), 'numpy.concatenate', 'np.concatenate', (['(a1, a2)'], {'axis': '(0)'}), '((a1, a2), axis=0)\n', (5132, 5150), True, 'import numpy as np\n'), ((5349, 5376), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (5362, 5376), False, 'import random\n'), ((5529, 5548), 'numpy.vstack', 'np.vstack', (['(a1, a2)'], {}), '((a1, a2))\n', (5538, 5548), True, 'import numpy as np\n'), ((5674, 5701), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (5687, 5701), False, 'import random\n'), ((5853, 5872), 'numpy.hstack', 'np.hstack', (['(a1, a2)'], {}), '((a1, a2))\n', (5862, 5872), True, 'import numpy as np\n'), ((6177, 6204), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (6190, 6204), False, 'import random\n'), ((6297, 6311), 'numpy.split', 'np.split', (['a', '(2)'], {}), '(a, 2)\n', (6305, 6311), True, 'import numpy as np\n'), ((6707, 6734), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (6720, 6734), False, 'import random\n'), ((6823, 6841), 'numpy.tile', 'np.tile', (['a', '(2, 2)'], {}), '(a, (2, 2))\n', (6830, 6841), True, 'import numpy as np\n'), ((6967, 6994), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (6980, 6994), False, 'import random\n'), ((7084, 7107), 'numpy.repeat', 'np.repeat', (['a', '(3)'], {'axis': '(1)'}), '(a, 3, axis=1)\n', (7093, 7107), True, 'import numpy as np\n'), ((7426, 7453), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (7439, 7453), False, 'import random\n'), ((7543, 7566), 'numpy.delete', 'np.delete', (['a', '(1)'], {'axis': '(1)'}), '(a, 1, axis=1)\n', (7552, 7566), True, 'import numpy as np\n'), ((7787, 7814), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (7800, 7814), False, 'import random\n'), ((7927, 7953), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (7936, 7953), True, 'import numpy as np\n'), ((8036, 8056), 'numpy.all', 'np.all', (['(c[:, 1] == b)'], {}), '(c[:, 1] == b)\n', (8042, 8056), True, 'import numpy as np\n'), ((8195, 8222), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (8208, 8222), False, 'import random\n'), ((8312, 8346), 'random.randint', 'random.randint', (['(0)', '(field.order - 1)'], {}), '(0, field.order - 1)\n', (8326, 8346), False, 'import random\n'), ((8355, 8381), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (8364, 8381), True, 'import numpy as np\n'), ((8464, 8484), 'numpy.all', 'np.all', (['(c[:, 1] == b)'], {}), '(c[:, 1] == b)\n', (8470, 8484), True, 'import numpy as np\n'), ((8925, 8952), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (8938, 8952), False, 'import random\n'), ((9105, 9131), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (9114, 9131), True, 'import numpy as np\n'), ((9540, 9566), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (9549, 9566), True, 'import numpy as np\n'), ((10105, 10132), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (10118, 10132), False, 'import random\n'), ((10287, 10312), 'numpy.append', 'np.append', (['a1', 'a2'], {'axis': '(0)'}), '(a1, a2, axis=0)\n', (10296, 10312), True, 'import numpy as np\n'), ((10512, 10539), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (10525, 10539), False, 'import random\n'), ((10629, 10652), 'numpy.resize', 'np.resize', (['a', 'new_shape'], {}), '(a, new_shape)\n', (10638, 10652), True, 'import numpy as np\n'), ((11152, 11179), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (11165, 11179), False, 'import random\n'), ((11303, 11330), 'numpy.trim_zeros', 'np.trim_zeros', (['a'], {'trim': '"""fb"""'}), "(a, trim='fb')\n", (11316, 11330), True, 'import numpy as np\n'), ((11464, 11491), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (11477, 11491), False, 'import random\n'), ((11626, 11638), 'numpy.unique', 'np.unique', (['a'], {}), '(a)\n', (11635, 11638), True, 'import numpy as np\n'), ((11920, 11947), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (11933, 11947), False, 'import random\n'), ((12014, 12024), 'numpy.flip', 'np.flip', (['a'], {}), '(a)\n', (12021, 12024), True, 'import numpy as np\n'), ((12126, 12153), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (12139, 12153), False, 'import random\n'), ((12221, 12233), 'numpy.fliplr', 'np.fliplr', (['a'], {}), '(a)\n', (12230, 12233), True, 'import numpy as np\n'), ((12337, 12364), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (12350, 12364), False, 'import random\n'), ((12432, 12444), 'numpy.flipud', 'np.flipud', (['a'], {}), '(a)\n', (12441, 12444), True, 'import numpy as np\n'), ((12546, 12573), 'random.choice', 'random.choice', (['field.dtypes'], {}), '(field.dtypes)\n', (12559, 12573), False, 'import random\n'), ((12641, 12654), 'numpy.roll', 'np.roll', (['a', '(2)'], {}), '(a, 2)\n', (12648, 12654), True, 'import numpy as np\n'), ((914, 925), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (922, 925), True, 'import numpy as np\n'), ((1033, 1044), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1041, 1044), True, 'import numpy as np\n'), ((1155, 1166), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1163, 1166), True, 'import numpy as np\n'), ((9043, 9077), 'random.randint', 'random.randint', (['(0)', '(field.order - 1)'], {}), '(0, field.order - 1)\n', (9057, 9077), False, 'import random\n'), ((10001, 10026), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10014, 10026), False, 'import pytest\n'), ((10040, 10066), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (10049, 10066), True, 'import numpy as np\n'), ((8808, 8833), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8821, 8833), False, 'import pytest\n'), ((8851, 8877), 'numpy.insert', 'np.insert', (['a', '(1)', 'b'], {'axis': '(1)'}), '(a, 1, b, axis=1)\n', (8860, 8877), True, 'import numpy as np\n')] |
import logging
import cv2
import PIL
from PIL import Image
import scipy
import scipy.ndimage
import numpy as np
from face_utils.Face import Face
from skimage.transform._geometric import _umeyama
def get_face_mask(face: Face, mask_type,
erosion_size=None,
dilation_kernel=None,
blur_size: int = None):
"""
Return mask of mask_type for the given face.
:param face:
:param mask_type:
:param erosion_size:
:param dilation_kernel:
:param blur_size:
:return:
"""
if mask_type == 'hull':
# we can rotate the hull mask obtained from original image
# or re-detect face from aligned image, and get mask then
mask = get_hull_mask(face, 255)
elif mask_type == 'rect':
face_img = face.get_face_img()
mask = np.zeros(face_img.shape, dtype=face_img.dtype)+255
else:
logging.error("No such mask type: {}".format(mask_type))
raise Exception("No such mask type: {}".format(mask_type))
# apply mask modifiers
if erosion_size:
erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, erosion_size)
mask = cv2.erode(mask, erosion_kernel, iterations=1)
if dilation_kernel:
mask = cv2.dilate(mask, dilation_kernel, iterations=1)
if blur_size:
mask = cv2.blur(mask, (blur_size, blur_size))
return mask
def get_hull_mask(from_face: Face, fill_val=1):
"""
:param from_face:
:param fill_val: generally 1 or 255
:return:
"""
mask = np.zeros(from_face.img.shape, dtype=from_face.img.dtype)
hull = cv2.convexHull(np.array(from_face.landmarks).reshape((-1, 2)).astype(int)).flatten().reshape((
-1, 2))
hull = [(p[0], p[1]) for p in hull]
cv2.fillConvexPoly(mask, np.int32(hull), (fill_val, fill_val, fill_val))
return mask
#################################
# ALIGNMENT #
#################################
mean_face_x = np.array([
0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
0.553364, 0.490127, 0.42689])
mean_face_y = np.array([
0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
0.784792, 0.824182, 0.831803, 0.824182])
default_landmarks_2D = np.stack([mean_face_x, mean_face_y], axis=1)
# other implementation option see
# https://matthewearl.github.io/2015/07/28/switching-eds-with-python/
def align_face(face, boundary_resize_factor=None, invert=False, img=None):
if img is None:
face_img = face.get_face_img(boundary_resize_factor=boundary_resize_factor)
else:
face_img = img
src_landmarks = np.array([(x - face.rect.left, y - face.rect.top) for (x, y) in face.landmarks])
# need to resize default ones to match given head size
(w, h) = face.get_face_size()
translation = None
if boundary_resize_factor:
img_w, img_h = face_img.shape[:2][::-1]
translation = (img_w - w, img_h - h)
#w += translation[0]
#h += translation[1]
# w/1.5 h/1.5
scaled_default_landmarks = np.array([(int(x * w), int(y * h)) for (x, y) in default_landmarks_2D])
# default aligned face has only 51 landmarks, so we remove
# first 17 from the given one in order to align
src_landmarks = src_landmarks[17:]
target_landmarks = scaled_default_landmarks
if invert:
align_matrix = get_align_matrix(target_landmarks, src_landmarks, translation)
else:
align_matrix = get_align_matrix(src_landmarks, target_landmarks, translation)
aligned_img = cv2.warpAffine(face_img,
align_matrix,
(w, h),
borderMode=cv2.BORDER_REPLICATE)
return aligned_img, align_matrix
def get_align_matrix(src_landmarks, target_landmarks, translation: tuple = None):
align_matrix = _umeyama(src_landmarks, target_landmarks, True)[:2]
if translation:
align_matrix[0, 2] -= translation[0]//2
align_matrix[1, 2] -= translation[1]//2
return align_matrix
# Align function from FFHQ dataset pre-processing step
# https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
def ffhq_align(face, output_size=1024, transform_size=4096, enable_padding=True,
boundary_resize_factor=None, img=None):
if img is None:
face_img = face.get_face_img(boundary_resize_factor=boundary_resize_factor)
else:
face_img = img
face_landmarks = np.array([(x - face.rect.left, y - face.rect.top) for (x, y) in face.landmarks])
lm = np.array(face_landmarks)
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
lm_nostrils = lm[31: 36] # top-down
lm_eye_left = lm[36: 42] # left-clockwise
lm_eye_right = lm[42: 48] # left-clockwise
lm_mouth_outer = lm[48: 60] # left-clockwise
lm_mouth_inner = lm[60: 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
img = Image.fromarray(np.uint8(face_img))
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(),
PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
return np.array(img) | [
"numpy.floor",
"numpy.clip",
"cv2.warpAffine",
"numpy.mean",
"cv2.erode",
"cv2.dilate",
"scipy.ndimage.gaussian_filter",
"skimage.transform._geometric._umeyama",
"numpy.int32",
"numpy.stack",
"numpy.uint8",
"numpy.median",
"numpy.flipud",
"numpy.hypot",
"cv2.getStructuringElement",
"nu... | [((1987, 2540), 'numpy.array', 'np.array', (['[0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, \n 0.799124, 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, \n 0.36688, 0.426036, 0.490127, 0.554217, 0.613373, 0.121737, 0.187122, \n 0.265825, 0.334606, 0.260918, 0.182743, 0.645647, 0.714428, 0.793132, \n 0.858516, 0.79751, 0.719335, 0.254149, 0.340985, 0.428858, 0.490127, \n 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, 0.490127, 0.423532, \n 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, 0.553364, \n 0.490127, 0.42689]'], {}), '([0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, \n 0.689483, 0.799124, 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, \n 0.490127, 0.36688, 0.426036, 0.490127, 0.554217, 0.613373, 0.121737, \n 0.187122, 0.265825, 0.334606, 0.260918, 0.182743, 0.645647, 0.714428, \n 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, 0.340985, 0.428858, \n 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, 0.490127, \n 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, \n 0.553364, 0.490127, 0.42689])\n', (1995, 2540), True, 'import numpy as np\n'), ((2689, 3249), 'numpy.array', 'np.array', (['[0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,\n 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, \n 0.587326, 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, \n 0.179852, 0.231733, 0.245099, 0.244077, 0.231733, 0.179852, 0.178758, \n 0.216423, 0.244077, 0.245099, 0.780233, 0.745405, 0.727388, 0.742578, \n 0.727388, 0.745405, 0.780233, 0.864805, 0.902192, 0.909281, 0.902192, \n 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, 0.784792, 0.824182, \n 0.831803, 0.824182]'], {}), '([0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, \n 0.0344891, 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805,\n 0.515625, 0.587326, 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, \n 0.178758, 0.179852, 0.231733, 0.245099, 0.244077, 0.231733, 0.179852, \n 0.178758, 0.216423, 0.244077, 0.245099, 0.780233, 0.745405, 0.727388, \n 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, 0.902192, 0.909281, \n 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, 0.784792, \n 0.824182, 0.831803, 0.824182])\n', (2697, 3249), True, 'import numpy as np\n'), ((3408, 3452), 'numpy.stack', 'np.stack', (['[mean_face_x, mean_face_y]'], {'axis': '(1)'}), '([mean_face_x, mean_face_y], axis=1)\n', (3416, 3452), True, 'import numpy as np\n'), ((1553, 1609), 'numpy.zeros', 'np.zeros', (['from_face.img.shape'], {'dtype': 'from_face.img.dtype'}), '(from_face.img.shape, dtype=from_face.img.dtype)\n', (1561, 1609), True, 'import numpy as np\n'), ((3792, 3870), 'numpy.array', 'np.array', (['[(x - face.rect.left, y - face.rect.top) for x, y in face.landmarks]'], {}), '([(x - face.rect.left, y - face.rect.top) for x, y in face.landmarks])\n', (3800, 3870), True, 'import numpy as np\n'), ((4712, 4791), 'cv2.warpAffine', 'cv2.warpAffine', (['face_img', 'align_matrix', '(w, h)'], {'borderMode': 'cv2.BORDER_REPLICATE'}), '(face_img, align_matrix, (w, h), borderMode=cv2.BORDER_REPLICATE)\n', (4726, 4791), False, 'import cv2\n'), ((5647, 5725), 'numpy.array', 'np.array', (['[(x - face.rect.left, y - face.rect.top) for x, y in face.landmarks]'], {}), '([(x - face.rect.left, y - face.rect.top) for x, y in face.landmarks])\n', (5655, 5725), True, 'import numpy as np\n'), ((5738, 5762), 'numpy.array', 'np.array', (['face_landmarks'], {}), '(face_landmarks)\n', (5746, 5762), True, 'import numpy as np\n'), ((6220, 6248), 'numpy.mean', 'np.mean', (['lm_eye_left'], {'axis': '(0)'}), '(lm_eye_left, axis=0)\n', (6227, 6248), True, 'import numpy as np\n'), ((6265, 6294), 'numpy.mean', 'np.mean', (['lm_eye_right'], {'axis': '(0)'}), '(lm_eye_right, axis=0)\n', (6272, 6294), True, 'import numpy as np\n'), ((6638, 6650), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (6646, 6650), True, 'import numpy as np\n'), ((6803, 6857), 'numpy.stack', 'np.stack', (['[c - x - y, c - x + y, c + x + y, c + x - y]'], {}), '([c - x - y, c - x + y, c + x + y, c + x - y])\n', (6811, 6857), True, 'import numpy as np\n'), ((9100, 9113), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9108, 9113), True, 'import numpy as np\n'), ((1104, 1162), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'erosion_size'], {}), '(cv2.MORPH_ELLIPSE, erosion_size)\n', (1129, 1162), False, 'import cv2\n'), ((1178, 1223), 'cv2.erode', 'cv2.erode', (['mask', 'erosion_kernel'], {'iterations': '(1)'}), '(mask, erosion_kernel, iterations=1)\n', (1187, 1223), False, 'import cv2\n'), ((1263, 1310), 'cv2.dilate', 'cv2.dilate', (['mask', 'dilation_kernel'], {'iterations': '(1)'}), '(mask, dilation_kernel, iterations=1)\n', (1273, 1310), False, 'import cv2\n'), ((1344, 1382), 'cv2.blur', 'cv2.blur', (['mask', '(blur_size, blur_size)'], {}), '(mask, (blur_size, blur_size))\n', (1352, 1382), False, 'import cv2\n'), ((1803, 1817), 'numpy.int32', 'np.int32', (['hull'], {}), '(hull)\n', (1811, 1817), True, 'import numpy as np\n'), ((5032, 5079), 'skimage.transform._geometric._umeyama', '_umeyama', (['src_landmarks', 'target_landmarks', '(True)'], {}), '(src_landmarks, target_landmarks, True)\n', (5040, 5079), False, 'from skimage.transform._geometric import _umeyama\n'), ((6732, 6744), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (6741, 6744), True, 'import numpy as np\n'), ((6870, 6882), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (6878, 6882), True, 'import numpy as np\n'), ((6914, 6932), 'numpy.uint8', 'np.uint8', (['face_img'], {}), '(face_img)\n', (6922, 6932), True, 'import numpy as np\n'), ((6966, 7001), 'numpy.floor', 'np.floor', (['(qsize / output_size * 0.5)'], {}), '(qsize / output_size * 0.5)\n', (6974, 7001), True, 'import numpy as np\n'), ((6595, 6618), 'numpy.flipud', 'np.flipud', (['eye_to_mouth'], {}), '(eye_to_mouth)\n', (6604, 6618), True, 'import numpy as np\n'), ((6664, 6685), 'numpy.hypot', 'np.hypot', (['*eye_to_eye'], {}), '(*eye_to_eye)\n', (6672, 6685), True, 'import numpy as np\n'), ((6693, 6716), 'numpy.hypot', 'np.hypot', (['*eye_to_mouth'], {}), '(*eye_to_mouth)\n', (6701, 6716), True, 'import numpy as np\n'), ((7259, 7279), 'numpy.rint', 'np.rint', (['(qsize * 0.1)'], {}), '(qsize * 0.1)\n', (7266, 7279), True, 'import numpy as np\n'), ((8152, 8167), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (8162, 8167), True, 'import numpy as np\n'), ((8598, 8633), 'numpy.clip', 'np.clip', (['(mask * 3.0 + 1.0)', '(0.0)', '(1.0)'], {}), '(mask * 3.0 + 1.0, 0.0, 1.0)\n', (8605, 8633), True, 'import numpy as np\n'), ((8687, 8710), 'numpy.clip', 'np.clip', (['mask', '(0.0)', '(1.0)'], {}), '(mask, 0.0, 1.0)\n', (8694, 8710), True, 'import numpy as np\n'), ((837, 883), 'numpy.zeros', 'np.zeros', (['face_img.shape'], {'dtype': 'face_img.dtype'}), '(face_img.shape, dtype=face_img.dtype)\n', (845, 883), True, 'import numpy as np\n'), ((8108, 8128), 'numpy.rint', 'np.rint', (['(qsize * 0.3)'], {}), '(qsize * 0.3)\n', (8115, 8128), True, 'import numpy as np\n'), ((8537, 8588), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['img', '[blur, blur, 0]'], {}), '(img, [blur, blur, 0])\n', (8566, 8588), False, 'import scipy\n'), ((8650, 8677), 'numpy.median', 'np.median', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (8659, 8677), True, 'import numpy as np\n'), ((8762, 8774), 'numpy.rint', 'np.rint', (['img'], {}), '(img)\n', (8769, 8774), True, 'import numpy as np\n'), ((8336, 8349), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (8346, 8349), True, 'import numpy as np\n'), ((8360, 8381), 'numpy.float32', 'np.float32', (['(w - 1 - x)'], {}), '(w - 1 - x)\n', (8370, 8381), True, 'import numpy as np\n'), ((8436, 8449), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (8446, 8449), True, 'import numpy as np\n'), ((8460, 8481), 'numpy.float32', 'np.float32', (['(h - 1 - y)'], {}), '(h - 1 - y)\n', (8470, 8481), True, 'import numpy as np\n'), ((1637, 1666), 'numpy.array', 'np.array', (['from_face.landmarks'], {}), '(from_face.landmarks)\n', (1645, 1666), True, 'import numpy as np\n')] |
import h5py
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from astropy.io import fits
f = h5py.File('archive.hdf5', 'a')
dset = f['images']
composite = np.sum(f['images'][:], axis=2)
# composite = composite.copy() - np.min(composite) + 1
# plt.hist(composite.ravel(), log=True)
plt.imshow(np.log(composite), vmin=13, vmax=15, origin='lower')
plt.show() | [
"h5py.File",
"numpy.sum",
"numpy.log",
"matplotlib.pyplot.show"
] | [((119, 149), 'h5py.File', 'h5py.File', (['"""archive.hdf5"""', '"""a"""'], {}), "('archive.hdf5', 'a')\n", (128, 149), False, 'import h5py\n'), ((182, 212), 'numpy.sum', 'np.sum', (["f['images'][:]"], {'axis': '(2)'}), "(f['images'][:], axis=2)\n", (188, 212), True, 'import numpy as np\n'), ((374, 384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (382, 384), True, 'import matplotlib.pyplot as plt\n'), ((321, 338), 'numpy.log', 'np.log', (['composite'], {}), '(composite)\n', (327, 338), True, 'import numpy as np\n')] |
import os
import shutil
import gym
import numpy as np
import pytest
from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3, HerReplayBuffer
from stable_baselines3.common.callbacks import (
CallbackList,
CheckpointCallback,
EvalCallback,
EveryNTimesteps,
StopTrainingOnMaxEpisodes,
StopTrainingOnRewardThreshold,
)
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.envs import BitFlippingEnv
from stable_baselines3.common.vec_env import DummyVecEnv
@pytest.mark.parametrize("model_class", [A2C, PPO, SAC, TD3, DQN, DDPG])
def test_callbacks(tmp_path, model_class):
log_folder = tmp_path / "logs/callbacks/"
# DQN only support discrete actions
env_name = select_env(model_class)
# Create RL model
# Small network for fast test
model = model_class("MlpPolicy", env_name, policy_kwargs=dict(net_arch=[32]))
checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=log_folder)
eval_env = gym.make(env_name)
# Stop training if the performance is good enough
callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-1200, verbose=1)
eval_callback = EvalCallback(
eval_env,
callback_on_new_best=callback_on_best,
best_model_save_path=log_folder,
log_path=log_folder,
eval_freq=100,
warn=False,
)
# Equivalent to the `checkpoint_callback`
# but here in an event-driven manner
checkpoint_on_event = CheckpointCallback(save_freq=1, save_path=log_folder, name_prefix="event")
event_callback = EveryNTimesteps(n_steps=500, callback=checkpoint_on_event)
# Stop training if max number of episodes is reached
callback_max_episodes = StopTrainingOnMaxEpisodes(max_episodes=100, verbose=1)
callback = CallbackList([checkpoint_callback, eval_callback, event_callback, callback_max_episodes])
model.learn(500, callback=callback)
# Check access to local variables
assert model.env.observation_space.contains(callback.locals["new_obs"][0])
# Check that the child callback was called
assert checkpoint_callback.locals["new_obs"] is callback.locals["new_obs"]
assert event_callback.locals["new_obs"] is callback.locals["new_obs"]
assert checkpoint_on_event.locals["new_obs"] is callback.locals["new_obs"]
# Check that internal callback counters match models' counters
assert event_callback.num_timesteps == model.num_timesteps
assert event_callback.n_calls == model.num_timesteps
model.learn(500, callback=None)
# Transform callback into a callback list automatically
model.learn(500, callback=[checkpoint_callback, eval_callback])
# Automatic wrapping, old way of doing callbacks
model.learn(500, callback=lambda _locals, _globals: True)
# Testing models that support multiple envs
if model_class in [A2C, PPO]:
max_episodes = 1
n_envs = 2
# Pendulum-v0 has a timelimit of 200 timesteps
max_episode_length = 200
envs = make_vec_env(env_name, n_envs=n_envs, seed=0)
model = model_class("MlpPolicy", envs, policy_kwargs=dict(net_arch=[32]))
callback_max_episodes = StopTrainingOnMaxEpisodes(max_episodes=max_episodes, verbose=1)
callback = CallbackList([callback_max_episodes])
model.learn(1000, callback=callback)
# Check that the actual number of episodes and timesteps per env matches the expected one
episodes_per_env = callback_max_episodes.n_episodes // n_envs
assert episodes_per_env == max_episodes
timesteps_per_env = model.num_timesteps // n_envs
assert timesteps_per_env == max_episode_length
if os.path.exists(log_folder):
shutil.rmtree(log_folder)
def select_env(model_class) -> str:
if model_class is DQN:
return "CartPole-v0"
else:
return "Pendulum-v0"
def test_eval_success_logging(tmp_path):
n_bits = 2
env = BitFlippingEnv(n_bits=n_bits)
eval_env = DummyVecEnv([lambda: BitFlippingEnv(n_bits=n_bits)])
eval_callback = EvalCallback(
eval_env,
eval_freq=250,
log_path=tmp_path,
warn=False,
)
model = DQN(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
learning_starts=100,
seed=0,
replay_buffer_kwargs=dict(max_episode_length=n_bits),
)
model.learn(500, callback=eval_callback)
assert len(eval_callback._is_success_buffer) > 0
# More than 50% success rate
assert np.mean(eval_callback._is_success_buffer) > 0.5
| [
"stable_baselines3.common.callbacks.CheckpointCallback",
"stable_baselines3.common.env_util.make_vec_env",
"gym.make",
"shutil.rmtree",
"stable_baselines3.common.callbacks.CallbackList",
"os.path.exists",
"stable_baselines3.common.envs.BitFlippingEnv",
"stable_baselines3.common.callbacks.EvalCallback"... | [((521, 592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_class"""', '[A2C, PPO, SAC, TD3, DQN, DDPG]'], {}), "('model_class', [A2C, PPO, SAC, TD3, DQN, DDPG])\n", (544, 592), False, 'import pytest\n'), ((927, 983), 'stable_baselines3.common.callbacks.CheckpointCallback', 'CheckpointCallback', ([], {'save_freq': '(1000)', 'save_path': 'log_folder'}), '(save_freq=1000, save_path=log_folder)\n', (945, 983), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1000, 1018), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1008, 1018), False, 'import gym\n'), ((1096, 1160), 'stable_baselines3.common.callbacks.StopTrainingOnRewardThreshold', 'StopTrainingOnRewardThreshold', ([], {'reward_threshold': '(-1200)', 'verbose': '(1)'}), '(reward_threshold=-1200, verbose=1)\n', (1125, 1160), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1182, 1332), 'stable_baselines3.common.callbacks.EvalCallback', 'EvalCallback', (['eval_env'], {'callback_on_new_best': 'callback_on_best', 'best_model_save_path': 'log_folder', 'log_path': 'log_folder', 'eval_freq': '(100)', 'warn': '(False)'}), '(eval_env, callback_on_new_best=callback_on_best,\n best_model_save_path=log_folder, log_path=log_folder, eval_freq=100,\n warn=False)\n', (1194, 1332), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1493, 1567), 'stable_baselines3.common.callbacks.CheckpointCallback', 'CheckpointCallback', ([], {'save_freq': '(1)', 'save_path': 'log_folder', 'name_prefix': '"""event"""'}), "(save_freq=1, save_path=log_folder, name_prefix='event')\n", (1511, 1567), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1590, 1648), 'stable_baselines3.common.callbacks.EveryNTimesteps', 'EveryNTimesteps', ([], {'n_steps': '(500)', 'callback': 'checkpoint_on_event'}), '(n_steps=500, callback=checkpoint_on_event)\n', (1605, 1648), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1735, 1789), 'stable_baselines3.common.callbacks.StopTrainingOnMaxEpisodes', 'StopTrainingOnMaxEpisodes', ([], {'max_episodes': '(100)', 'verbose': '(1)'}), '(max_episodes=100, verbose=1)\n', (1760, 1789), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((1806, 1899), 'stable_baselines3.common.callbacks.CallbackList', 'CallbackList', (['[checkpoint_callback, eval_callback, event_callback, callback_max_episodes]'], {}), '([checkpoint_callback, eval_callback, event_callback,\n callback_max_episodes])\n', (1818, 1899), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((3696, 3722), 'os.path.exists', 'os.path.exists', (['log_folder'], {}), '(log_folder)\n', (3710, 3722), False, 'import os\n'), ((3959, 3988), 'stable_baselines3.common.envs.BitFlippingEnv', 'BitFlippingEnv', ([], {'n_bits': 'n_bits'}), '(n_bits=n_bits)\n', (3973, 3988), False, 'from stable_baselines3.common.envs import BitFlippingEnv\n'), ((4077, 4145), 'stable_baselines3.common.callbacks.EvalCallback', 'EvalCallback', (['eval_env'], {'eval_freq': '(250)', 'log_path': 'tmp_path', 'warn': '(False)'}), '(eval_env, eval_freq=250, log_path=tmp_path, warn=False)\n', (4089, 4145), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((3030, 3075), 'stable_baselines3.common.env_util.make_vec_env', 'make_vec_env', (['env_name'], {'n_envs': 'n_envs', 'seed': '(0)'}), '(env_name, n_envs=n_envs, seed=0)\n', (3042, 3075), False, 'from stable_baselines3.common.env_util import make_vec_env\n'), ((3192, 3255), 'stable_baselines3.common.callbacks.StopTrainingOnMaxEpisodes', 'StopTrainingOnMaxEpisodes', ([], {'max_episodes': 'max_episodes', 'verbose': '(1)'}), '(max_episodes=max_episodes, verbose=1)\n', (3217, 3255), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((3275, 3312), 'stable_baselines3.common.callbacks.CallbackList', 'CallbackList', (['[callback_max_episodes]'], {}), '([callback_max_episodes])\n', (3287, 3312), False, 'from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, StopTrainingOnMaxEpisodes, StopTrainingOnRewardThreshold\n'), ((3732, 3757), 'shutil.rmtree', 'shutil.rmtree', (['log_folder'], {}), '(log_folder)\n', (3745, 3757), False, 'import shutil\n'), ((4543, 4584), 'numpy.mean', 'np.mean', (['eval_callback._is_success_buffer'], {}), '(eval_callback._is_success_buffer)\n', (4550, 4584), True, 'import numpy as np\n'), ((4025, 4054), 'stable_baselines3.common.envs.BitFlippingEnv', 'BitFlippingEnv', ([], {'n_bits': 'n_bits'}), '(n_bits=n_bits)\n', (4039, 4054), False, 'from stable_baselines3.common.envs import BitFlippingEnv\n')] |
import os
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
import torch
from tqdm import tqdm, trange
from nerf import cumprod_exclusive, get_minibatches, get_ray_bundle, positional_encoding
def compute_query_points_from_rays(
ray_origins: torch.Tensor,
ray_directions: torch.Tensor,
near_thresh: float,
far_thresh: float,
num_samples: int,
randomize: Optional[bool] = True,
) -> (torch.Tensor, torch.Tensor):
r"""Compute query 3D points given the "bundle" of rays. The near_thresh and far_thresh
variables indicate the bounds within which 3D points are to be sampled.
Args:
ray_origins (torch.Tensor): Origin of each ray in the "bundle" as returned by the
`get_ray_bundle()` method (shape: :math:`(width, height, 3)`).
ray_directions (torch.Tensor): Direction of each ray in the "bundle" as returned by the
`get_ray_bundle()` method (shape: :math:`(width, height, 3)`).
near_thresh (float): The 'near' extent of the bounding volume (i.e., the nearest depth
coordinate that is of interest/relevance).
far_thresh (float): The 'far' extent of the bounding volume (i.e., the farthest depth
coordinate that is of interest/relevance).
num_samples (int): Number of samples to be drawn along each ray. Samples are drawn
randomly, whilst trying to ensure "some form of" uniform spacing among them.
randomize (optional, bool): Whether or not to randomize the sampling of query points.
By default, this is set to `True`. If disabled (by setting to `False`), we sample
uniformly spaced points along each ray in the "bundle".
Returns:
query_points (torch.Tensor): Query points along each ray
(shape: :math:`(width, height, num_samples, 3)`).
depth_values (torch.Tensor): Sampled depth values along each ray
(shape: :math:`(num_samples)`).
"""
# TESTED
# shape: (num_samples)
depth_values = torch.linspace(near_thresh, far_thresh, num_samples).to(ray_origins)
if randomize is True:
# ray_origins: (width, height, 3)
# noise_shape = (width, height, num_samples)
noise_shape = list(ray_origins.shape[:-1]) + [num_samples]
# depth_values: (num_samples)
depth_values = (
depth_values
+ torch.rand(noise_shape).to(ray_origins)
* (far_thresh - near_thresh)
/ num_samples
)
# (width, height, num_samples, 3) = (width, height, 1, 3) + (width, height, 1, 3) * (num_samples, 1)
# query_points: (width, height, num_samples, 3)
query_points = (
ray_origins[..., None, :]
+ ray_directions[..., None, :] * depth_values[..., :, None]
)
# TODO: Double-check that `depth_values` returned is of shape `(num_samples)`.
return query_points, depth_values
def render_volume_density(
radiance_field: torch.Tensor, ray_origins: torch.Tensor, depth_values: torch.Tensor
) -> (torch.Tensor, torch.Tensor, torch.Tensor):
r"""Differentiably renders a radiance field, given the origin of each ray in the
"bundle", and the sampled depth values along them.
Args:
radiance_field (torch.Tensor): A "field" where, at each query location (X, Y, Z),
we have an emitted (RGB) color and a volume density (denoted :math:`\sigma` in
the paper) (shape: :math:`(width, height, num_samples, 4)`).
ray_origins (torch.Tensor): Origin of each ray in the "bundle" as returned by the
`get_ray_bundle()` method (shape: :math:`(width, height, 3)`).
depth_values (torch.Tensor): Sampled depth values along each ray
(shape: :math:`(num_samples)`).
Returns:
rgb_map (torch.Tensor): Rendered RGB image (shape: :math:`(width, height, 3)`).
depth_map (torch.Tensor): Rendered depth image (shape: :math:`(width, height)`).
acc_map (torch.Tensor): # TODO: Double-check (I think this is the accumulated
transmittance map).
"""
# TESTED
sigma_a = torch.nn.functional.relu(radiance_field[..., 3])
rgb = torch.sigmoid(radiance_field[..., :3])
one_e_10 = torch.tensor([1e10], dtype=ray_origins.dtype, device=ray_origins.device)
dists = torch.cat(
(
depth_values[..., 1:] - depth_values[..., :-1],
one_e_10.expand(depth_values[..., :1].shape),
),
dim=-1,
)
alpha = 1.0 - torch.exp(-sigma_a * dists)
weights = alpha * cumprod_exclusive(1.0 - alpha + 1e-10)
rgb_map = (weights[..., None] * rgb).sum(dim=-2)
depth_map = (weights * depth_values).sum(dim=-1)
acc_map = weights.sum(-1)
return rgb_map, depth_map, acc_map
# One iteration of TinyNeRF (forward pass).
def run_one_iter_of_tinynerf(
height,
width,
focal_length,
tform_cam2world,
near_thresh,
far_thresh,
depth_samples_per_ray,
encoding_function,
get_minibatches_function,
chunksize,
model,
encoding_function_args,
):
# Get the "bundle" of rays through all image pixels.
ray_origins, ray_directions = get_ray_bundle(
height, width, focal_length, tform_cam2world
)
# Sample query points along each ray
query_points, depth_values = compute_query_points_from_rays(
ray_origins, ray_directions, near_thresh, far_thresh, depth_samples_per_ray
)
# "Flatten" the query points.
flattened_query_points = query_points.reshape((-1, 3))
# Encode the query points (default: positional encoding).
encoded_points = encoding_function(flattened_query_points, encoding_function_args)
# Split the encoded points into "chunks", run the model on all chunks, and
# concatenate the results (to avoid out-of-memory issues).
batches = get_minibatches_function(encoded_points, chunksize=chunksize)
predictions = []
for batch in batches:
predictions.append(model(batch))
radiance_field_flattened = torch.cat(predictions, dim=0)
# "Unflatten" to obtain the radiance field.
unflattened_shape = list(query_points.shape[:-1]) + [4]
radiance_field = torch.reshape(radiance_field_flattened, unflattened_shape)
# Perform differentiable volume rendering to re-synthesize the RGB image.
rgb_predicted, _, _ = render_volume_density(
radiance_field, ray_origins, depth_values
)
return rgb_predicted
class VeryTinyNerfModel(torch.nn.Module):
r"""Define a "very tiny" NeRF model comprising three fully connected layers.
"""
def __init__(self, filter_size=128, num_encoding_functions=6):
super(VeryTinyNerfModel, self).__init__()
# Input layer (default: 39 -> 128)
self.layer1 = torch.nn.Linear(3 + 3 * 2 * num_encoding_functions, filter_size)
# Layer 2 (default: 128 -> 128)
self.layer2 = torch.nn.Linear(filter_size, filter_size)
# Layer 3 (default: 128 -> 4)
self.layer3 = torch.nn.Linear(filter_size, 4)
# Short hand for torch.nn.functional.relu
self.relu = torch.nn.functional.relu
def forward(self, x):
x = self.relu(self.layer1(x))
x = self.relu(self.layer2(x))
x = self.layer3(x)
return x
def main():
# Determine device to run on (GPU vs CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Log directory
logdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache", "log")
os.makedirs(logdir, exist_ok=True)
"""
Load input images and poses
"""
data = np.load("cache/tiny_nerf_data.npz")
# Images
images = data["images"]
# Camera extrinsics (poses)
tform_cam2world = data["poses"]
tform_cam2world = torch.from_numpy(tform_cam2world).to(device)
# Focal length (intrinsics)
focal_length = data["focal"]
focal_length = torch.from_numpy(focal_length).to(device)
# Height and width of each image
height, width = images.shape[1:3]
# Near and far clipping thresholds for depth values.
near_thresh = 2.0
far_thresh = 6.0
# Hold one image out (for test).
testimg, testpose = images[101], tform_cam2world[101]
testimg = torch.from_numpy(testimg).to(device)
# Map images to device
images = torch.from_numpy(images[:100, ..., :3]).to(device)
"""
Parameters for TinyNeRF training
"""
# Number of functions used in the positional encoding (Be sure to update the
# model if this number changes).
num_encoding_functions = 10
# Specify encoding function.
encode = positional_encoding
# Number of depth samples along each ray.
depth_samples_per_ray = 32
# Chunksize (Note: this isn't batchsize in the conventional sense. This only
# specifies the number of rays to be queried in one go. Backprop still happens
# only after all rays from the current "bundle" are queried and rendered).
# Use chunksize of about 4096 to fit in ~1.4 GB of GPU memory (when using 8
# samples per ray).
chunksize = 4096
# Optimizer parameters
lr = 5e-3
num_iters = 5000
# Misc parameters
display_every = 100 # Number of iters after which stats are
"""
Model
"""
model = VeryTinyNerfModel(num_encoding_functions=num_encoding_functions)
model.to(device)
"""
Optimizer
"""
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
"""
Train-Eval-Repeat!
"""
# Seed RNG, for repeatability
seed = 9458
torch.manual_seed(seed)
np.random.seed(seed)
# Lists to log metrics etc.
psnrs = []
iternums = []
for i in trange(num_iters):
# Randomly pick an image as the target.
target_img_idx = np.random.randint(images.shape[0])
target_img = images[target_img_idx].to(device)
target_tform_cam2world = tform_cam2world[target_img_idx].to(device)
# Run one iteration of TinyNeRF and get the rendered RGB image.
rgb_predicted = run_one_iter_of_tinynerf(
height,
width,
focal_length,
target_tform_cam2world,
near_thresh,
far_thresh,
depth_samples_per_ray,
encode,
get_minibatches,
chunksize,
model,
num_encoding_functions,
)
# Compute mean-squared error between the predicted and target images. Backprop!
loss = torch.nn.functional.mse_loss(rgb_predicted, target_img)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Display images/plots/stats
if i % display_every == 0 or i == num_iters - 1:
# Render the held-out view
rgb_predicted = run_one_iter_of_tinynerf(
height,
width,
focal_length,
testpose,
near_thresh,
far_thresh,
depth_samples_per_ray,
encode,
get_minibatches,
chunksize,
model,
num_encoding_functions,
)
loss = torch.nn.functional.mse_loss(rgb_predicted, target_img)
tqdm.write("Loss: " + str(loss.item()))
psnr = -10.0 * torch.log10(loss)
psnrs.append(psnr.item())
iternums.append(i)
plt.imshow(rgb_predicted.detach().cpu().numpy())
plt.savefig(os.path.join(logdir, str(i).zfill(6) + ".png"))
plt.close("all")
if i == num_iters - 1:
plt.plot(iternums, psnrs)
plt.savefig(os.path.join(logdir, "psnr.png"))
plt.close("all")
# plt.figure(figsize=(10, 4))
# plt.subplot(121)
# plt.imshow(rgb_predicted.detach().cpu().numpy())
# plt.title(f"Iteration {i}")
# plt.subplot(122)
# plt.plot(iternums, psnrs)
# plt.title("PSNR")
# plt.show()
print("Done!")
if __name__ == "__main__":
# m = TinyNerfModel(depth=8)
# m.cuda()
# print(m)
# print(m(torch.rand(2, 39).cuda()).shape)
main()
| [
"numpy.load",
"numpy.random.seed",
"torch.cat",
"nerf.get_ray_bundle",
"numpy.random.randint",
"os.path.join",
"os.path.abspath",
"matplotlib.pyplot.close",
"nerf.cumprod_exclusive",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.relu",
"tqdm.trange",
"torch.manual_seed",
"torch.nn... | [((4051, 4099), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['radiance_field[..., 3]'], {}), '(radiance_field[..., 3])\n', (4075, 4099), False, 'import torch\n'), ((4110, 4148), 'torch.sigmoid', 'torch.sigmoid', (['radiance_field[..., :3]'], {}), '(radiance_field[..., :3])\n', (4123, 4148), False, 'import torch\n'), ((4164, 4250), 'torch.tensor', 'torch.tensor', (['[10000000000.0]'], {'dtype': 'ray_origins.dtype', 'device': 'ray_origins.device'}), '([10000000000.0], dtype=ray_origins.dtype, device=ray_origins.\n device)\n', (4176, 4250), False, 'import torch\n'), ((5105, 5165), 'nerf.get_ray_bundle', 'get_ray_bundle', (['height', 'width', 'focal_length', 'tform_cam2world'], {}), '(height, width, focal_length, tform_cam2world)\n', (5119, 5165), False, 'from nerf import cumprod_exclusive, get_minibatches, get_ray_bundle, positional_encoding\n'), ((5959, 5988), 'torch.cat', 'torch.cat', (['predictions'], {'dim': '(0)'}), '(predictions, dim=0)\n', (5968, 5988), False, 'import torch\n'), ((6119, 6177), 'torch.reshape', 'torch.reshape', (['radiance_field_flattened', 'unflattened_shape'], {}), '(radiance_field_flattened, unflattened_shape)\n', (6132, 6177), False, 'import torch\n'), ((7453, 7487), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (7464, 7487), False, 'import os\n'), ((7549, 7584), 'numpy.load', 'np.load', (['"""cache/tiny_nerf_data.npz"""'], {}), "('cache/tiny_nerf_data.npz')\n", (7556, 7584), True, 'import numpy as np\n'), ((9483, 9506), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (9500, 9506), False, 'import torch\n'), ((9511, 9531), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9525, 9531), True, 'import numpy as np\n'), ((9612, 9629), 'tqdm.trange', 'trange', (['num_iters'], {}), '(num_iters)\n', (9618, 9629), False, 'from tqdm import tqdm, trange\n'), ((4439, 4466), 'torch.exp', 'torch.exp', (['(-sigma_a * dists)'], {}), '(-sigma_a * dists)\n', (4448, 4466), False, 'import torch\n'), ((4489, 4527), 'nerf.cumprod_exclusive', 'cumprod_exclusive', (['(1.0 - alpha + 1e-10)'], {}), '(1.0 - alpha + 1e-10)\n', (4506, 4527), False, 'from nerf import cumprod_exclusive, get_minibatches, get_ray_bundle, positional_encoding\n'), ((6704, 6768), 'torch.nn.Linear', 'torch.nn.Linear', (['(3 + 3 * 2 * num_encoding_functions)', 'filter_size'], {}), '(3 + 3 * 2 * num_encoding_functions, filter_size)\n', (6719, 6768), False, 'import torch\n'), ((6831, 6872), 'torch.nn.Linear', 'torch.nn.Linear', (['filter_size', 'filter_size'], {}), '(filter_size, filter_size)\n', (6846, 6872), False, 'import torch\n'), ((6933, 6964), 'torch.nn.Linear', 'torch.nn.Linear', (['filter_size', '(4)'], {}), '(filter_size, 4)\n', (6948, 6964), False, 'import torch\n'), ((9705, 9739), 'numpy.random.randint', 'np.random.randint', (['images.shape[0]'], {}), '(images.shape[0])\n', (9722, 9739), True, 'import numpy as np\n'), ((10420, 10475), 'torch.nn.functional.mse_loss', 'torch.nn.functional.mse_loss', (['rgb_predicted', 'target_img'], {}), '(rgb_predicted, target_img)\n', (10448, 10475), False, 'import torch\n'), ((2024, 2076), 'torch.linspace', 'torch.linspace', (['near_thresh', 'far_thresh', 'num_samples'], {}), '(near_thresh, far_thresh, num_samples)\n', (2038, 2076), False, 'import torch\n'), ((7304, 7329), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7327, 7329), False, 'import torch\n'), ((7405, 7430), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7420, 7430), False, 'import os\n'), ((7717, 7750), 'torch.from_numpy', 'torch.from_numpy', (['tform_cam2world'], {}), '(tform_cam2world)\n', (7733, 7750), False, 'import torch\n'), ((7846, 7876), 'torch.from_numpy', 'torch.from_numpy', (['focal_length'], {}), '(focal_length)\n', (7862, 7876), False, 'import torch\n'), ((8175, 8200), 'torch.from_numpy', 'torch.from_numpy', (['testimg'], {}), '(testimg)\n', (8191, 8200), False, 'import torch\n'), ((8253, 8292), 'torch.from_numpy', 'torch.from_numpy', (['images[:100, ..., :3]'], {}), '(images[:100, ..., :3])\n', (8269, 8292), False, 'import torch\n'), ((11122, 11177), 'torch.nn.functional.mse_loss', 'torch.nn.functional.mse_loss', (['rgb_predicted', 'target_img'], {}), '(rgb_predicted, target_img)\n', (11150, 11177), False, 'import torch\n'), ((11491, 11507), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11500, 11507), True, 'import matplotlib.pyplot as plt\n'), ((11257, 11274), 'torch.log10', 'torch.log10', (['loss'], {}), '(loss)\n', (11268, 11274), False, 'import torch\n'), ((11560, 11585), 'matplotlib.pyplot.plot', 'plt.plot', (['iternums', 'psnrs'], {}), '(iternums, psnrs)\n', (11568, 11585), True, 'import matplotlib.pyplot as plt\n'), ((11664, 11680), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11673, 11680), True, 'import matplotlib.pyplot as plt\n'), ((11614, 11646), 'os.path.join', 'os.path.join', (['logdir', '"""psnr.png"""'], {}), "(logdir, 'psnr.png')\n", (11626, 11646), False, 'import os\n'), ((2383, 2406), 'torch.rand', 'torch.rand', (['noise_shape'], {}), '(noise_shape)\n', (2393, 2406), False, 'import torch\n')] |
__all__ = ['planet_orbit', 'planet_star_projected_distance', 'planet_phase',
'transit', 'transit_integrated', 'transit_depth', 'transit_duration',
'eclipse', 'eclipse_integrated', 'eclipse_depth', 'eclipse_duration', 'eclipse_mid_time']
import numpy as np
from pylightcurve.analysis.numerical_integration import gauss_numerical_integration
from pylightcurve.analysis.curve_fit import curve_fit
# orbit
def planet_orbit(period, sma_over_rs, eccentricity, inclination, periastron, mid_time, time_array, ww=0):
inclination = inclination * np.pi / 180.0
periastron = periastron * np.pi / 180.0
ww = ww * np.pi / 180.0
if eccentricity == 0 and ww == 0:
vv = 2 * np.pi * (time_array - mid_time) / period
bb = sma_over_rs * np.cos(vv)
return [bb * np.sin(inclination), sma_over_rs * np.sin(vv), - bb * np.cos(inclination)]
if periastron < np.pi / 2:
aa = 1.0 * np.pi / 2 - periastron
else:
aa = 5.0 * np.pi / 2 - periastron
bb = 2 * np.arctan(np.sqrt((1 - eccentricity) / (1 + eccentricity)) * np.tan(aa / 2))
if bb < 0:
bb += 2 * np.pi
mid_time = float(mid_time) - (period / 2.0 / np.pi) * (bb - eccentricity * np.sin(bb))
m = (time_array - mid_time - np.int_((time_array - mid_time) / period) * period) * 2.0 * np.pi / period
u0 = m
stop = False
u1 = 0
for ii in range(10000): # setting a limit of 1k iterations - arbitrary limit
u1 = u0 - (u0 - eccentricity * np.sin(u0) - m) / (1 - eccentricity * np.cos(u0))
stop = (np.abs(u1 - u0) < 10 ** (-7)).all()
if stop:
break
else:
u0 = u1
if not stop:
raise RuntimeError('Failed to find a solution in 10000 loops')
vv = 2 * np.arctan(np.sqrt((1 + eccentricity) / (1 - eccentricity)) * np.tan(u1 / 2))
#
rr = sma_over_rs * (1 - (eccentricity ** 2)) / (np.ones_like(vv) + eccentricity * np.cos(vv))
aa = np.cos(vv + periastron)
bb = np.sin(vv + periastron)
x = rr * bb * np.sin(inclination)
y = rr * (-aa * np.cos(ww) + bb * np.sin(ww) * np.cos(inclination))
z = rr * (-aa * np.sin(ww) - bb * np.cos(ww) * np.cos(inclination))
return [x, y, z]
def planet_star_projected_distance(period, sma_over_rs, eccentricity, inclination, periastron, mid_time, time_array):
position_vector = planet_orbit(period, sma_over_rs, eccentricity, inclination, periastron, mid_time, time_array)
return np.sqrt(position_vector[1] * position_vector[1] + position_vector[2] * position_vector[2])
def planet_phase(period, mid_time, time_array):
return (time_array - mid_time)/period
# flux drop
def integral_r_claret(limb_darkening_coefficients, r):
a1, a2, a3, a4 = limb_darkening_coefficients
mu44 = 1.0 - r * r
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return - (2.0 * (1.0 - a1 - a2 - a3 - a4) / 4) * mu44 \
- (2.0 * a1 / 5) * mu44 * mu14 \
- (2.0 * a2 / 6) * mu44 * mu24 \
- (2.0 * a3 / 7) * mu44 * mu24 * mu14 \
- (2.0 * a4 / 8) * mu44 * mu44
def num_claret(r, limb_darkening_coefficients, rprs, z):
a1, a2, a3, a4 = limb_darkening_coefficients
rsq = r * r
mu44 = 1.0 - rsq
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return ((1.0 - a1 - a2 - a3 - a4) + a1 * mu14 + a2 * mu24 + a3 * mu24 * mu14 + a4 * mu44) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_claret(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_claret, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for zero method
def integral_r_zero(limb_darkening_coefficients, r):
musq = 1 - r * r
return (-1.0 / 6) * musq * 3.0
def num_zero(r, limb_darkening_coefficients, rprs, z):
rsq = r * r
return r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_zero(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_zero, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for linear method
def integral_r_linear(limb_darkening_coefficients, r):
a1 = limb_darkening_coefficients[0]
musq = 1 - r * r
return (-1.0 / 6) * musq * (3.0 + a1 * (-3.0 + 2.0 * np.sqrt(musq)))
def num_linear(r, limb_darkening_coefficients, rprs, z):
a1 = limb_darkening_coefficients[0]
rsq = r * r
return (1.0 - a1 * (1.0 - np.sqrt(1.0 - rsq))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_linear(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_linear, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for quadratic method
def integral_r_quad(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return (1.0 / 12) * (-4.0 * (a1 + 2.0 * a2) * mu * musq + 6.0 * (-1 + a1 + a2) * musq + 3.0 * a2 * musq * musq)
def num_quad(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
cc = 1.0 - np.sqrt(1.0 - rsq)
return (1.0 - a1 * cc - a2 * cc * cc) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_quad(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_quad, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for square root method
def integral_r_sqrt(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return ((-2.0 / 5) * a2 * np.sqrt(mu) - (1.0 / 3) * a1 * mu + (1.0 / 2) * (-1 + a1 + a2)) * musq
def num_sqrt(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
mu = np.sqrt(1.0 - rsq)
return (1.0 - a1 * (1 - mu) - a2 * (1.0 - np.sqrt(mu))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_sqrt(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_sqrt, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# dictionaries containing the different methods,
# if you define a new method, include the functions in the dictionary as well
integral_r = {
'claret': integral_r_claret,
'linear': integral_r_linear,
'quad': integral_r_quad,
'sqrt': integral_r_sqrt,
'zero': integral_r_zero
}
integral_r_f = {
'claret': integral_r_f_claret,
'linear': integral_r_f_linear,
'quad': integral_r_f_quad,
'sqrt': integral_r_f_sqrt,
'zero': integral_r_f_zero,
}
def integral_centred(method, limb_darkening_coefficients, rprs, ww1, ww2):
return (integral_r[method](limb_darkening_coefficients, rprs)
- integral_r[method](limb_darkening_coefficients, 0.0)) * np.abs(ww2 - ww1)
def integral_plus_core(method, limb_darkening_coefficients, rprs, z, ww1, ww2, precision=3):
if len(z) == 0:
return z
rr1 = z * np.cos(ww1) + np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww1)) ** 2, 0))
rr1 = np.clip(rr1, 0, 1)
rr2 = z * np.cos(ww2) + np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww2)) ** 2, 0))
rr2 = np.clip(rr2, 0, 1)
w1 = np.minimum(ww1, ww2)
r1 = np.minimum(rr1, rr2)
w2 = np.maximum(ww1, ww2)
r2 = np.maximum(rr1, rr2)
parta = integral_r[method](limb_darkening_coefficients, 0.0) * (w1 - w2)
partb = integral_r[method](limb_darkening_coefficients, r1) * w2
partc = integral_r[method](limb_darkening_coefficients, r2) * (-w1)
partd = integral_r_f[method](limb_darkening_coefficients, rprs, z, r1, r2, precision=precision)
return parta + partb + partc + partd
def integral_minus_core(method, limb_darkening_coefficients, rprs, z, ww1, ww2, precision=3):
if len(z) == 0:
return z
rr1 = z * np.cos(ww1) - np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww1)) ** 2, 0))
rr1 = np.clip(rr1, 0, 1)
rr2 = z * np.cos(ww2) - np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww2)) ** 2, 0))
rr2 = np.clip(rr2, 0, 1)
w1 = np.minimum(ww1, ww2)
r1 = np.minimum(rr1, rr2)
w2 = np.maximum(ww1, ww2)
r2 = np.maximum(rr1, rr2)
parta = integral_r[method](limb_darkening_coefficients, 0.0) * (w1 - w2)
partb = integral_r[method](limb_darkening_coefficients, r1) * (-w1)
partc = integral_r[method](limb_darkening_coefficients, r2) * w2
partd = integral_r_f[method](limb_darkening_coefficients, rprs, z, r1, r2, precision=precision)
return parta + partb + partc - partd
def transit_flux_drop(limb_darkening_coefficients, rp_over_rs, z_over_rs, method='claret', precision=3):
z_over_rs = np.where(z_over_rs < 0, 1.0 + 100.0 * rp_over_rs, z_over_rs)
z_over_rs = np.maximum(z_over_rs, 10**(-10))
# cases
zsq = z_over_rs * z_over_rs
sum_z_rprs = z_over_rs + rp_over_rs
dif_z_rprs = rp_over_rs - z_over_rs
sqr_dif_z_rprs = zsq - rp_over_rs ** 2
case0 = np.where((z_over_rs == 0) & (rp_over_rs <= 1))
case1 = np.where((z_over_rs < rp_over_rs) & (sum_z_rprs <= 1))
casea = np.where((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs < 1))
caseb = np.where((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs > 1))
case2 = np.where((z_over_rs == rp_over_rs) & (sum_z_rprs <= 1))
casec = np.where((z_over_rs == rp_over_rs) & (sum_z_rprs > 1))
case3 = np.where((z_over_rs > rp_over_rs) & (sum_z_rprs < 1))
case4 = np.where((z_over_rs > rp_over_rs) & (sum_z_rprs == 1))
case5 = np.where((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs < 1))
case6 = np.where((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs == 1))
case7 = np.where((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs > 1) & (-1 < dif_z_rprs))
plus_case = np.concatenate((case1[0], case2[0], case3[0], case4[0], case5[0], casea[0], casec[0]))
minus_case = np.concatenate((case3[0], case4[0], case5[0], case6[0], case7[0]))
star_case = np.concatenate((case5[0], case6[0], case7[0], casea[0], casec[0]))
# cross points
ph = np.arccos(np.clip((1.0 - rp_over_rs ** 2 + zsq) / (2.0 * z_over_rs), -1, 1))
theta_1 = np.zeros(len(z_over_rs))
ph_case = np.concatenate((case5[0], casea[0], casec[0]))
theta_1[ph_case] = ph[ph_case]
theta_2 = np.arcsin(np.minimum(rp_over_rs / z_over_rs, 1))
theta_2[case1] = np.pi
theta_2[case2] = np.pi / 2.0
theta_2[casea] = np.pi
theta_2[casec] = np.pi / 2.0
theta_2[case7] = ph[case7]
# flux_upper
plusflux = np.zeros(len(z_over_rs))
plusflux[plus_case] = integral_plus_core(method, limb_darkening_coefficients, rp_over_rs, z_over_rs[plus_case],
theta_1[plus_case], theta_2[plus_case], precision=precision)
if len(case0[0]) > 0:
plusflux[case0] = integral_centred(method, limb_darkening_coefficients, rp_over_rs, 0.0, np.pi)
if len(caseb[0]) > 0:
plusflux[caseb] = integral_centred(method, limb_darkening_coefficients, 1, 0.0, np.pi)
# flux_lower
minsflux = np.zeros(len(z_over_rs))
minsflux[minus_case] = integral_minus_core(method, limb_darkening_coefficients, rp_over_rs,
z_over_rs[minus_case], 0.0, theta_2[minus_case], precision=precision)
# flux_star
starflux = np.zeros(len(z_over_rs))
starflux[star_case] = integral_centred(method, limb_darkening_coefficients, 1, 0.0, ph[star_case])
# flux_total
total_flux = integral_centred(method, limb_darkening_coefficients, 1, 0.0, 2.0 * np.pi)
return 1 - (2.0 / total_flux) * (plusflux + starflux - minsflux)
# transit
def transit(limb_darkening_coefficients, rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron,
mid_time, time_array, method='claret', precision=3):
position_vector = planet_orbit(period, sma_over_rs, eccentricity, inclination, periastron, mid_time, time_array)
projected_distance = np.where(
position_vector[0] < 0, 1.0 + 5.0 * rp_over_rs,
np.sqrt(position_vector[1] * position_vector[1] + position_vector[2] * position_vector[2]))
return transit_flux_drop(limb_darkening_coefficients, rp_over_rs, projected_distance,
method=method, precision=precision)
def transit_integrated(limb_darkening_coefficients, rp_over_rs, period, sma_over_rs, eccentricity, inclination,
periastron, mid_time, time_array, exp_time, max_sub_exp_time=10, method='claret', precision=3):
time_factor = int(exp_time / max_sub_exp_time) + 1
exp_time /= (60.0 * 60.0 * 24.0)
time_array_hr = (time_array[:, None] + np.arange(-exp_time / 2 + exp_time / time_factor / 2, exp_time / 2,
exp_time / time_factor)).flatten()
position_vector = planet_orbit(period, sma_over_rs, eccentricity,
inclination, periastron, mid_time, time_array_hr)
projected_distance = np.where(
position_vector[0] < 0, 1.0 + 5.0 * rp_over_rs,
np.sqrt(position_vector[1] * position_vector[1] + position_vector[2] * position_vector[2]))
return np.mean(np.reshape(
transit_flux_drop(limb_darkening_coefficients, rp_over_rs, projected_distance,
method=method, precision=precision),
(len(time_array), time_factor)), 1)
def transit_duration(rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron):
ww = periastron * np.pi / 180
ii = inclination * np.pi / 180
ee = eccentricity
aa = sma_over_rs
ro_pt = (1 - ee ** 2) / (1 + ee * np.sin(ww))
b_pt = aa * ro_pt * np.cos(ii)
if b_pt > 1:
b_pt = 0.5
s_ps = 1.0 + rp_over_rs
df = np.arcsin(np.sqrt((s_ps ** 2 - b_pt ** 2) / ((aa ** 2) * (ro_pt ** 2) - b_pt ** 2)))
aprox = (period * (ro_pt ** 2)) / (np.pi * np.sqrt(1 - ee ** 2)) * df * 60 * 60 * 24
def function_to_fit(x, t):
return planet_star_projected_distance(period, sma_over_rs, eccentricity, inclination, periastron,
10000, np.array(10000 + t / 24 / 60 / 60))
popt1, pcov1 = curve_fit(function_to_fit, [0], [1.0 + rp_over_rs], p0=[-aprox / 2])
popt2, pcov2 = curve_fit(function_to_fit, [0], [1.0 + rp_over_rs], p0=[aprox / 2])
return (popt2[0] - popt1[0]) / 24 / 60 / 60
def transit_depth(limb_darkening_coefficients, rp_over_rs, period, sma_over_rs, eccentricity, inclination,
periastron, method='claret', precision=6):
return 1 - transit(limb_darkening_coefficients, rp_over_rs, period, sma_over_rs, eccentricity, inclination,
periastron, 10000, np.array([10000]), method=method, precision=precision)[0]
# eclipse
def eclipse(fp_over_fs, rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron, mid_time,
time_array, precision=3):
position_vector = planet_orbit(period, sma_over_rs / rp_over_rs, eccentricity, inclination, periastron + 180,
mid_time, time_array)
projected_distance = np.where(
position_vector[0] < 0, 1.0 + 5.0 / rp_over_rs,
np.sqrt(position_vector[1] * position_vector[1] + position_vector[2] * position_vector[2]))
return (1.0 + fp_over_fs * transit_flux_drop([0, 0, 0, 0], 1 / rp_over_rs, projected_distance,
precision=precision, method='zero')) / (1.0 + fp_over_fs)
def eclipse_integrated(fp_over_fs, rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron,
mid_time, time_array, exp_time, max_sub_exp_time=10, precision=3):
time_factor = int(exp_time / max_sub_exp_time) + 1
exp_time /= (60.0 * 60.0 * 24.0)
time_array_hr = (time_array[:, None] + np.arange(-exp_time / 2 + exp_time / time_factor / 2, exp_time / 2,
exp_time / time_factor)).flatten()
position_vector = planet_orbit(period, sma_over_rs / rp_over_rs, eccentricity, inclination, periastron + 180,
mid_time, time_array_hr)
projected_distance = np.where(
position_vector[0] < 0, 1.0 + 5.0 * rp_over_rs,
np.sqrt(position_vector[1] * position_vector[1] + position_vector[2] * position_vector[2]))
return np.mean(np.reshape(
(1.0 + fp_over_fs * transit_flux_drop([0, 0, 0, 0], 1 / rp_over_rs, projected_distance, method='zero',
precision=precision)) / (1.0 + fp_over_fs),
(len(time_array), time_factor)), 1)
def eclipse_mid_time(period, sma_over_rs, eccentricity, inclination, periastron, mid_time):
test_array = np.arange(0, period, 0.001)
xx, yy, zz = planet_orbit(period, sma_over_rs, eccentricity, inclination, periastron, mid_time,
test_array + mid_time)
test1 = np.where(xx < 0)
yy = yy[test1]
test_array = test_array[test1]
aprox = test_array[np.argmin(np.abs(yy))]
def function_to_fit(x, t):
xx, yy, zz = planet_orbit(period, sma_over_rs, eccentricity, inclination, periastron, mid_time,
np.array(mid_time + t))
return yy
popt, pcov = curve_fit(function_to_fit, [0], [0], p0=[aprox])
return mid_time + popt[0]
def eclipse_duration(rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron):
ww = periastron * np.pi / 180
ii = inclination * np.pi / 180
ee = eccentricity
aa = sma_over_rs
ro_pt = (1 - ee ** 2) / (1 + ee * np.sin(ww))
b_pt = aa * ro_pt * np.cos(ii)
if b_pt > 1:
b_pt = 0.5
s_ps = 1.0 + rp_over_rs
df = np.arcsin(np.sqrt((s_ps ** 2 - b_pt ** 2) / ((aa ** 2) * (ro_pt ** 2) - b_pt ** 2)))
aprox = (period * (ro_pt ** 2)) / (np.pi * np.sqrt(1 - ee ** 2)) * df * 60 * 60 * 24
emt = eclipse_mid_time(period, sma_over_rs, eccentricity, inclination, periastron, 10000)
def function_to_fit(x, t):
xx = planet_star_projected_distance(period, sma_over_rs, eccentricity, inclination, periastron,
10000, np.array(emt + t / 24 / 60 / 60))
return xx
popt1, pcov1 = curve_fit(function_to_fit, [0], [1.0 + rp_over_rs], p0=[-aprox / 2])
popt2, pcov2 = curve_fit(function_to_fit, [0], [1.0 + rp_over_rs], p0=[aprox / 2])
return (popt2[0] - popt1[0]) / 24 / 60 / 60
def eclipse_depth(fp_over_fs, rp_over_rs, period, sma_over_rs, eccentricity, inclination, periastron, precision=6):
return 1 - eclipse(fp_over_fs, rp_over_rs, period, sma_over_rs, eccentricity, inclination,
periastron, 10000, np.array([10000]), precision=precision)[0]
| [
"numpy.int_",
"numpy.minimum",
"numpy.maximum",
"numpy.abs",
"numpy.ones_like",
"numpy.clip",
"pylightcurve.analysis.numerical_integration.gauss_numerical_integration",
"numpy.sin",
"numpy.where",
"numpy.arange",
"numpy.cos",
"pylightcurve.analysis.curve_fit.curve_fit",
"numpy.array",
"num... | [((1965, 1988), 'numpy.cos', 'np.cos', (['(vv + periastron)'], {}), '(vv + periastron)\n', (1971, 1988), True, 'import numpy as np\n'), ((1998, 2021), 'numpy.sin', 'np.sin', (['(vv + periastron)'], {}), '(vv + periastron)\n', (2004, 2021), True, 'import numpy as np\n'), ((2476, 2570), 'numpy.sqrt', 'np.sqrt', (['(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])'], {}), '(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])\n', (2483, 2570), True, 'import numpy as np\n'), ((2813, 2826), 'numpy.sqrt', 'np.sqrt', (['mu44'], {}), '(mu44)\n', (2820, 2826), True, 'import numpy as np\n'), ((2838, 2851), 'numpy.sqrt', 'np.sqrt', (['mu24'], {}), '(mu24)\n', (2845, 2851), True, 'import numpy as np\n'), ((3249, 3262), 'numpy.sqrt', 'np.sqrt', (['mu44'], {}), '(mu44)\n', (3256, 3262), True, 'import numpy as np\n'), ((3274, 3287), 'numpy.sqrt', 'np.sqrt', (['mu24'], {}), '(mu24)\n', (3281, 3287), True, 'import numpy as np\n'), ((3566, 3666), 'pylightcurve.analysis.numerical_integration.gauss_numerical_integration', 'gauss_numerical_integration', (['num_claret', 'r1', 'r2', 'precision', 'limb_darkening_coefficients', 'rprs', 'z'], {}), '(num_claret, r1, r2, precision,\n limb_darkening_coefficients, rprs, z)\n', (3593, 3666), False, 'from pylightcurve.analysis.numerical_integration import gauss_numerical_integration\n'), ((4069, 4167), 'pylightcurve.analysis.numerical_integration.gauss_numerical_integration', 'gauss_numerical_integration', (['num_zero', 'r1', 'r2', 'precision', 'limb_darkening_coefficients', 'rprs', 'z'], {}), '(num_zero, r1, r2, precision,\n limb_darkening_coefficients, rprs, z)\n', (4096, 4167), False, 'from pylightcurve.analysis.numerical_integration import gauss_numerical_integration\n'), ((4748, 4848), 'pylightcurve.analysis.numerical_integration.gauss_numerical_integration', 'gauss_numerical_integration', (['num_linear', 'r1', 'r2', 'precision', 'limb_darkening_coefficients', 'rprs', 'z'], {}), '(num_linear, r1, r2, precision,\n limb_darkening_coefficients, rprs, z)\n', (4775, 4848), False, 'from pylightcurve.analysis.numerical_integration import gauss_numerical_integration\n'), ((5021, 5034), 'numpy.sqrt', 'np.sqrt', (['musq'], {}), '(musq)\n', (5028, 5034), True, 'import numpy as np\n'), ((5527, 5625), 'pylightcurve.analysis.numerical_integration.gauss_numerical_integration', 'gauss_numerical_integration', (['num_quad', 'r1', 'r2', 'precision', 'limb_darkening_coefficients', 'rprs', 'z'], {}), '(num_quad, r1, r2, precision,\n limb_darkening_coefficients, rprs, z)\n', (5554, 5625), False, 'from pylightcurve.analysis.numerical_integration import gauss_numerical_integration\n'), ((5800, 5813), 'numpy.sqrt', 'np.sqrt', (['musq'], {}), '(musq)\n', (5807, 5813), True, 'import numpy as np\n'), ((6042, 6060), 'numpy.sqrt', 'np.sqrt', (['(1.0 - rsq)'], {}), '(1.0 - rsq)\n', (6049, 6060), True, 'import numpy as np\n'), ((6303, 6401), 'pylightcurve.analysis.numerical_integration.gauss_numerical_integration', 'gauss_numerical_integration', (['num_sqrt', 'r1', 'r2', 'precision', 'limb_darkening_coefficients', 'rprs', 'z'], {}), '(num_sqrt, r1, r2, precision,\n limb_darkening_coefficients, rprs, z)\n', (6330, 6401), False, 'from pylightcurve.analysis.numerical_integration import gauss_numerical_integration\n'), ((7340, 7358), 'numpy.clip', 'np.clip', (['rr1', '(0)', '(1)'], {}), '(rr1, 0, 1)\n', (7347, 7358), True, 'import numpy as np\n'), ((7456, 7474), 'numpy.clip', 'np.clip', (['rr2', '(0)', '(1)'], {}), '(rr2, 0, 1)\n', (7463, 7474), True, 'import numpy as np\n'), ((7484, 7504), 'numpy.minimum', 'np.minimum', (['ww1', 'ww2'], {}), '(ww1, ww2)\n', (7494, 7504), True, 'import numpy as np\n'), ((7514, 7534), 'numpy.minimum', 'np.minimum', (['rr1', 'rr2'], {}), '(rr1, rr2)\n', (7524, 7534), True, 'import numpy as np\n'), ((7544, 7564), 'numpy.maximum', 'np.maximum', (['ww1', 'ww2'], {}), '(ww1, ww2)\n', (7554, 7564), True, 'import numpy as np\n'), ((7574, 7594), 'numpy.maximum', 'np.maximum', (['rr1', 'rr2'], {}), '(rr1, rr2)\n', (7584, 7594), True, 'import numpy as np\n'), ((8184, 8202), 'numpy.clip', 'np.clip', (['rr1', '(0)', '(1)'], {}), '(rr1, 0, 1)\n', (8191, 8202), True, 'import numpy as np\n'), ((8300, 8318), 'numpy.clip', 'np.clip', (['rr2', '(0)', '(1)'], {}), '(rr2, 0, 1)\n', (8307, 8318), True, 'import numpy as np\n'), ((8328, 8348), 'numpy.minimum', 'np.minimum', (['ww1', 'ww2'], {}), '(ww1, ww2)\n', (8338, 8348), True, 'import numpy as np\n'), ((8358, 8378), 'numpy.minimum', 'np.minimum', (['rr1', 'rr2'], {}), '(rr1, rr2)\n', (8368, 8378), True, 'import numpy as np\n'), ((8388, 8408), 'numpy.maximum', 'np.maximum', (['ww1', 'ww2'], {}), '(ww1, ww2)\n', (8398, 8408), True, 'import numpy as np\n'), ((8418, 8438), 'numpy.maximum', 'np.maximum', (['rr1', 'rr2'], {}), '(rr1, rr2)\n', (8428, 8438), True, 'import numpy as np\n'), ((8922, 8982), 'numpy.where', 'np.where', (['(z_over_rs < 0)', '(1.0 + 100.0 * rp_over_rs)', 'z_over_rs'], {}), '(z_over_rs < 0, 1.0 + 100.0 * rp_over_rs, z_over_rs)\n', (8930, 8982), True, 'import numpy as np\n'), ((8999, 9031), 'numpy.maximum', 'np.maximum', (['z_over_rs', '(10 ** -10)'], {}), '(z_over_rs, 10 ** -10)\n', (9009, 9031), True, 'import numpy as np\n'), ((9212, 9258), 'numpy.where', 'np.where', (['((z_over_rs == 0) & (rp_over_rs <= 1))'], {}), '((z_over_rs == 0) & (rp_over_rs <= 1))\n', (9220, 9258), True, 'import numpy as np\n'), ((9271, 9325), 'numpy.where', 'np.where', (['((z_over_rs < rp_over_rs) & (sum_z_rprs <= 1))'], {}), '((z_over_rs < rp_over_rs) & (sum_z_rprs <= 1))\n', (9279, 9325), True, 'import numpy as np\n'), ((9338, 9410), 'numpy.where', 'np.where', (['((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs < 1))'], {}), '((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs < 1))\n', (9346, 9410), True, 'import numpy as np\n'), ((9423, 9495), 'numpy.where', 'np.where', (['((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs > 1))'], {}), '((z_over_rs < rp_over_rs) & (sum_z_rprs > 1) & (dif_z_rprs > 1))\n', (9431, 9495), True, 'import numpy as np\n'), ((9508, 9563), 'numpy.where', 'np.where', (['((z_over_rs == rp_over_rs) & (sum_z_rprs <= 1))'], {}), '((z_over_rs == rp_over_rs) & (sum_z_rprs <= 1))\n', (9516, 9563), True, 'import numpy as np\n'), ((9576, 9630), 'numpy.where', 'np.where', (['((z_over_rs == rp_over_rs) & (sum_z_rprs > 1))'], {}), '((z_over_rs == rp_over_rs) & (sum_z_rprs > 1))\n', (9584, 9630), True, 'import numpy as np\n'), ((9643, 9696), 'numpy.where', 'np.where', (['((z_over_rs > rp_over_rs) & (sum_z_rprs < 1))'], {}), '((z_over_rs > rp_over_rs) & (sum_z_rprs < 1))\n', (9651, 9696), True, 'import numpy as np\n'), ((9709, 9763), 'numpy.where', 'np.where', (['((z_over_rs > rp_over_rs) & (sum_z_rprs == 1))'], {}), '((z_over_rs > rp_over_rs) & (sum_z_rprs == 1))\n', (9717, 9763), True, 'import numpy as np\n'), ((9776, 9852), 'numpy.where', 'np.where', (['((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs < 1))'], {}), '((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs < 1))\n', (9784, 9852), True, 'import numpy as np\n'), ((9865, 9942), 'numpy.where', 'np.where', (['((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs == 1))'], {}), '((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs == 1))\n', (9873, 9942), True, 'import numpy as np\n'), ((9955, 10055), 'numpy.where', 'np.where', (['((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs > 1) & (-1 <\n dif_z_rprs))'], {}), '((z_over_rs > rp_over_rs) & (sum_z_rprs > 1) & (sqr_dif_z_rprs > 1) &\n (-1 < dif_z_rprs))\n', (9963, 10055), True, 'import numpy as np\n'), ((10068, 10158), 'numpy.concatenate', 'np.concatenate', (['(case1[0], case2[0], case3[0], case4[0], case5[0], casea[0], casec[0])'], {}), '((case1[0], case2[0], case3[0], case4[0], case5[0], casea[0],\n casec[0]))\n', (10082, 10158), True, 'import numpy as np\n'), ((10172, 10238), 'numpy.concatenate', 'np.concatenate', (['(case3[0], case4[0], case5[0], case6[0], case7[0])'], {}), '((case3[0], case4[0], case5[0], case6[0], case7[0]))\n', (10186, 10238), True, 'import numpy as np\n'), ((10255, 10321), 'numpy.concatenate', 'np.concatenate', (['(case5[0], case6[0], case7[0], casea[0], casec[0])'], {}), '((case5[0], case6[0], case7[0], casea[0], casec[0]))\n', (10269, 10321), True, 'import numpy as np\n'), ((10481, 10527), 'numpy.concatenate', 'np.concatenate', (['(case5[0], casea[0], casec[0])'], {}), '((case5[0], casea[0], casec[0]))\n', (10495, 10527), True, 'import numpy as np\n'), ((14464, 14532), 'pylightcurve.analysis.curve_fit.curve_fit', 'curve_fit', (['function_to_fit', '[0]', '[1.0 + rp_over_rs]'], {'p0': '[-aprox / 2]'}), '(function_to_fit, [0], [1.0 + rp_over_rs], p0=[-aprox / 2])\n', (14473, 14532), False, 'from pylightcurve.analysis.curve_fit import curve_fit\n'), ((14552, 14619), 'pylightcurve.analysis.curve_fit.curve_fit', 'curve_fit', (['function_to_fit', '[0]', '[1.0 + rp_over_rs]'], {'p0': '[aprox / 2]'}), '(function_to_fit, [0], [1.0 + rp_over_rs], p0=[aprox / 2])\n', (14561, 14619), False, 'from pylightcurve.analysis.curve_fit import curve_fit\n'), ((17027, 17054), 'numpy.arange', 'np.arange', (['(0)', 'period', '(0.001)'], {}), '(0, period, 0.001)\n', (17036, 17054), True, 'import numpy as np\n'), ((17221, 17237), 'numpy.where', 'np.where', (['(xx < 0)'], {}), '(xx < 0)\n', (17229, 17237), True, 'import numpy as np\n'), ((17563, 17611), 'pylightcurve.analysis.curve_fit.curve_fit', 'curve_fit', (['function_to_fit', '[0]', '[0]'], {'p0': '[aprox]'}), '(function_to_fit, [0], [0], p0=[aprox])\n', (17572, 17611), False, 'from pylightcurve.analysis.curve_fit import curve_fit\n'), ((18540, 18608), 'pylightcurve.analysis.curve_fit.curve_fit', 'curve_fit', (['function_to_fit', '[0]', '[1.0 + rp_over_rs]'], {'p0': '[-aprox / 2]'}), '(function_to_fit, [0], [1.0 + rp_over_rs], p0=[-aprox / 2])\n', (18549, 18608), False, 'from pylightcurve.analysis.curve_fit import curve_fit\n'), ((18628, 18695), 'pylightcurve.analysis.curve_fit.curve_fit', 'curve_fit', (['function_to_fit', '[0]', '[1.0 + rp_over_rs]'], {'p0': '[aprox / 2]'}), '(function_to_fit, [0], [1.0 + rp_over_rs], p0=[aprox / 2])\n', (18637, 18695), False, 'from pylightcurve.analysis.curve_fit import curve_fit\n'), ((2040, 2059), 'numpy.sin', 'np.sin', (['inclination'], {}), '(inclination)\n', (2046, 2059), True, 'import numpy as np\n'), ((5284, 5302), 'numpy.sqrt', 'np.sqrt', (['(1.0 - rsq)'], {}), '(1.0 - rsq)\n', (5291, 5302), True, 'import numpy as np\n'), ((7093, 7110), 'numpy.abs', 'np.abs', (['(ww2 - ww1)'], {}), '(ww2 - ww1)\n', (7099, 7110), True, 'import numpy as np\n'), ((10361, 10426), 'numpy.clip', 'np.clip', (['((1.0 - rp_over_rs ** 2 + zsq) / (2.0 * z_over_rs))', '(-1)', '(1)'], {}), '((1.0 - rp_over_rs ** 2 + zsq) / (2.0 * z_over_rs), -1, 1)\n', (10368, 10426), True, 'import numpy as np\n'), ((10587, 10624), 'numpy.minimum', 'np.minimum', (['(rp_over_rs / z_over_rs)', '(1)'], {}), '(rp_over_rs / z_over_rs, 1)\n', (10597, 10624), True, 'import numpy as np\n'), ((12328, 12422), 'numpy.sqrt', 'np.sqrt', (['(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])'], {}), '(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])\n', (12335, 12422), True, 'import numpy as np\n'), ((13358, 13452), 'numpy.sqrt', 'np.sqrt', (['(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])'], {}), '(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])\n', (13365, 13452), True, 'import numpy as np\n'), ((13959, 13969), 'numpy.cos', 'np.cos', (['ii'], {}), '(ii)\n', (13965, 13969), True, 'import numpy as np\n'), ((14053, 14122), 'numpy.sqrt', 'np.sqrt', (['((s_ps ** 2 - b_pt ** 2) / (aa ** 2 * ro_pt ** 2 - b_pt ** 2))'], {}), '((s_ps ** 2 - b_pt ** 2) / (aa ** 2 * ro_pt ** 2 - b_pt ** 2))\n', (14060, 14122), True, 'import numpy as np\n'), ((15481, 15575), 'numpy.sqrt', 'np.sqrt', (['(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])'], {}), '(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])\n', (15488, 15575), True, 'import numpy as np\n'), ((16547, 16641), 'numpy.sqrt', 'np.sqrt', (['(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])'], {}), '(position_vector[1] * position_vector[1] + position_vector[2] *\n position_vector[2])\n', (16554, 16641), True, 'import numpy as np\n'), ((17926, 17936), 'numpy.cos', 'np.cos', (['ii'], {}), '(ii)\n', (17932, 17936), True, 'import numpy as np\n'), ((18020, 18089), 'numpy.sqrt', 'np.sqrt', (['((s_ps ** 2 - b_pt ** 2) / (aa ** 2 * ro_pt ** 2 - b_pt ** 2))'], {}), '((s_ps ** 2 - b_pt ** 2) / (aa ** 2 * ro_pt ** 2 - b_pt ** 2))\n', (18027, 18089), True, 'import numpy as np\n'), ((781, 791), 'numpy.cos', 'np.cos', (['vv'], {}), '(vv)\n', (787, 791), True, 'import numpy as np\n'), ((1910, 1926), 'numpy.ones_like', 'np.ones_like', (['vv'], {}), '(vv)\n', (1922, 1926), True, 'import numpy as np\n'), ((3408, 3467), 'numpy.minimum', 'np.minimum', (['((-rprs ** 2 + z * z + rsq) / (2.0 * z * r))', '(1.0)'], {}), '((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0)\n', (3418, 3467), True, 'import numpy as np\n'), ((3913, 3972), 'numpy.minimum', 'np.minimum', (['((-rprs ** 2 + z * z + rsq) / (2.0 * z * r))', '(1.0)'], {}), '((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0)\n', (3923, 3972), True, 'import numpy as np\n'), ((4590, 4649), 'numpy.minimum', 'np.minimum', (['((-rprs ** 2 + z * z + rsq) / (2.0 * z * r))', '(1.0)'], {}), '((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0)\n', (4600, 4649), True, 'import numpy as np\n'), ((5371, 5430), 'numpy.minimum', 'np.minimum', (['((-rprs ** 2 + z * z + rsq) / (2.0 * z * r))', '(1.0)'], {}), '((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0)\n', (5381, 5430), True, 'import numpy as np\n'), ((6147, 6206), 'numpy.minimum', 'np.minimum', (['((-rprs ** 2 + z * z + rsq) / (2.0 * z * r))', '(1.0)'], {}), '((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0)\n', (6157, 6206), True, 'import numpy as np\n'), ((7257, 7268), 'numpy.cos', 'np.cos', (['ww1'], {}), '(ww1)\n', (7263, 7268), True, 'import numpy as np\n'), ((7373, 7384), 'numpy.cos', 'np.cos', (['ww2'], {}), '(ww2)\n', (7379, 7384), True, 'import numpy as np\n'), ((8101, 8112), 'numpy.cos', 'np.cos', (['ww1'], {}), '(ww1)\n', (8107, 8112), True, 'import numpy as np\n'), ((8217, 8228), 'numpy.cos', 'np.cos', (['ww2'], {}), '(ww2)\n', (8223, 8228), True, 'import numpy as np\n'), ((14408, 14442), 'numpy.array', 'np.array', (['(10000 + t / 24 / 60 / 60)'], {}), '(10000 + t / 24 / 60 / 60)\n', (14416, 14442), True, 'import numpy as np\n'), ((17326, 17336), 'numpy.abs', 'np.abs', (['yy'], {}), '(yy)\n', (17332, 17336), True, 'import numpy as np\n'), ((17503, 17525), 'numpy.array', 'np.array', (['(mid_time + t)'], {}), '(mid_time + t)\n', (17511, 17525), True, 'import numpy as np\n'), ((18468, 18500), 'numpy.array', 'np.array', (['(emt + t / 24 / 60 / 60)'], {}), '(emt + t / 24 / 60 / 60)\n', (18476, 18500), True, 'import numpy as np\n'), ((813, 832), 'numpy.sin', 'np.sin', (['inclination'], {}), '(inclination)\n', (819, 832), True, 'import numpy as np\n'), ((848, 858), 'numpy.sin', 'np.sin', (['vv'], {}), '(vv)\n', (854, 858), True, 'import numpy as np\n'), ((867, 886), 'numpy.cos', 'np.cos', (['inclination'], {}), '(inclination)\n', (873, 886), True, 'import numpy as np\n'), ((1037, 1085), 'numpy.sqrt', 'np.sqrt', (['((1 - eccentricity) / (1 + eccentricity))'], {}), '((1 - eccentricity) / (1 + eccentricity))\n', (1044, 1085), True, 'import numpy as np\n'), ((1088, 1102), 'numpy.tan', 'np.tan', (['(aa / 2)'], {}), '(aa / 2)\n', (1094, 1102), True, 'import numpy as np\n'), ((1785, 1833), 'numpy.sqrt', 'np.sqrt', (['((1 + eccentricity) / (1 - eccentricity))'], {}), '((1 + eccentricity) / (1 - eccentricity))\n', (1792, 1833), True, 'import numpy as np\n'), ((1836, 1850), 'numpy.tan', 'np.tan', (['(u1 / 2)'], {}), '(u1 / 2)\n', (1842, 1850), True, 'import numpy as np\n'), ((1944, 1954), 'numpy.cos', 'np.cos', (['vv'], {}), '(vv)\n', (1950, 1954), True, 'import numpy as np\n'), ((2080, 2090), 'numpy.cos', 'np.cos', (['ww'], {}), '(ww)\n', (2086, 2090), True, 'import numpy as np\n'), ((2111, 2130), 'numpy.cos', 'np.cos', (['inclination'], {}), '(inclination)\n', (2117, 2130), True, 'import numpy as np\n'), ((2152, 2162), 'numpy.sin', 'np.sin', (['ww'], {}), '(ww)\n', (2158, 2162), True, 'import numpy as np\n'), ((2183, 2202), 'numpy.cos', 'np.cos', (['inclination'], {}), '(inclination)\n', (2189, 2202), True, 'import numpy as np\n'), ((12946, 13042), 'numpy.arange', 'np.arange', (['(-exp_time / 2 + exp_time / time_factor / 2)', '(exp_time / 2)', '(exp_time / time_factor)'], {}), '(-exp_time / 2 + exp_time / time_factor / 2, exp_time / 2, \n exp_time / time_factor)\n', (12955, 13042), True, 'import numpy as np\n'), ((13923, 13933), 'numpy.sin', 'np.sin', (['ww'], {}), '(ww)\n', (13929, 13933), True, 'import numpy as np\n'), ((14994, 15011), 'numpy.array', 'np.array', (['[10000]'], {}), '([10000])\n', (15002, 15011), True, 'import numpy as np\n'), ((16116, 16212), 'numpy.arange', 'np.arange', (['(-exp_time / 2 + exp_time / time_factor / 2)', '(exp_time / 2)', '(exp_time / time_factor)'], {}), '(-exp_time / 2 + exp_time / time_factor / 2, exp_time / 2, \n exp_time / time_factor)\n', (16125, 16212), True, 'import numpy as np\n'), ((17890, 17900), 'numpy.sin', 'np.sin', (['ww'], {}), '(ww)\n', (17896, 17900), True, 'import numpy as np\n'), ((19001, 19018), 'numpy.array', 'np.array', (['[10000]'], {}), '([10000])\n', (19009, 19018), True, 'import numpy as np\n'), ((1222, 1232), 'numpy.sin', 'np.sin', (['bb'], {}), '(bb)\n', (1228, 1232), True, 'import numpy as np\n'), ((1568, 1583), 'numpy.abs', 'np.abs', (['(u1 - u0)'], {}), '(u1 - u0)\n', (1574, 1583), True, 'import numpy as np\n'), ((2098, 2108), 'numpy.sin', 'np.sin', (['ww'], {}), '(ww)\n', (2104, 2108), True, 'import numpy as np\n'), ((2170, 2180), 'numpy.cos', 'np.cos', (['ww'], {}), '(ww)\n', (2176, 2180), True, 'import numpy as np\n'), ((5844, 5855), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (5851, 5855), True, 'import numpy as np\n'), ((1267, 1308), 'numpy.int_', 'np.int_', (['((time_array - mid_time) / period)'], {}), '((time_array - mid_time) / period)\n', (1274, 1308), True, 'import numpy as np\n'), ((1540, 1550), 'numpy.cos', 'np.cos', (['u0'], {}), '(u0)\n', (1546, 1550), True, 'import numpy as np\n'), ((4382, 4395), 'numpy.sqrt', 'np.sqrt', (['musq'], {}), '(musq)\n', (4389, 4395), True, 'import numpy as np\n'), ((4543, 4561), 'numpy.sqrt', 'np.sqrt', (['(1.0 - rsq)'], {}), '(1.0 - rsq)\n', (4550, 4561), True, 'import numpy as np\n'), ((6107, 6118), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (6114, 6118), True, 'import numpy as np\n'), ((1502, 1512), 'numpy.sin', 'np.sin', (['u0'], {}), '(u0)\n', (1508, 1512), True, 'import numpy as np\n'), ((7307, 7318), 'numpy.sin', 'np.sin', (['ww1'], {}), '(ww1)\n', (7313, 7318), True, 'import numpy as np\n'), ((7423, 7434), 'numpy.sin', 'np.sin', (['ww2'], {}), '(ww2)\n', (7429, 7434), True, 'import numpy as np\n'), ((8151, 8162), 'numpy.sin', 'np.sin', (['ww1'], {}), '(ww1)\n', (8157, 8162), True, 'import numpy as np\n'), ((8267, 8278), 'numpy.sin', 'np.sin', (['ww2'], {}), '(ww2)\n', (8273, 8278), True, 'import numpy as np\n'), ((14175, 14195), 'numpy.sqrt', 'np.sqrt', (['(1 - ee ** 2)'], {}), '(1 - ee ** 2)\n', (14182, 14195), True, 'import numpy as np\n'), ((18142, 18162), 'numpy.sqrt', 'np.sqrt', (['(1 - ee ** 2)'], {}), '(1 - ee ** 2)\n', (18149, 18162), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:19:28 2020
@author: andreaapariciomartinez
"""
import scipy.stats as sts
import numpy as np
import csv
import math as mt
from matplotlib import pyplot as plt
from Network_ID import fn
#%%
chsteps = 70 # parameter goes from 1 to 0 in these many steps
#define simulation data location
folderd = "data/dataSave/"
nums_tests = [13,14,15,19]
for k in nums_tests:
#get simulation data
with open(folderd+fn+'_var1_'+str(k)+'.csv') as csvfile:
v1l = list(csv.reader(csvfile,delimiter=' ',quoting=csv.QUOTE_NONNUMERIC)) #mutualistic network
varM100 = np.array(v1l)
sp = varM100.shape[1]
# with open(folderd+fn+'_var0.csv') as csvfile:
# v0l = list(csv.reader(csvfile,delimiter=' ',quoting=csv.QUOTE_NONNUMERIC)) #mutualistic network
# varM0 = np.array(v0l)
# sp = len(varM0.T)
# chsteps = len(varM0)
#% calculate slope for a rolling window
win = mt.ceil(chsteps/3)
# polyf0 = np.zeros((chsteps-win,sp))
polyf100 = np.zeros((chsteps-win,sp))
a=range(win)
for i in range(chsteps-win):
for j in range(sp):
# polyf0[i,j] = np.polyfit(a,varM0[i:i+win,j],1)[0]
polyf100[i,j] = np.polyfit(a,varM100[i:i+win,j],1)[0]
#% calculate Early Warning Score (when slope is never negative again)
# detection0t = win*np.ones(sp)
detection100t = win*np.ones(sp)
# for j in range(sp):
# for i in range(chsteps-win):
# if polyf0[chsteps-win-1-i,j]<=0:
# detection0t[j] = chsteps-i
# break
for j in range(sp):
for i in range(chsteps-win):
if polyf100[chsteps-win-1-i,j]<=0:
detection100t[j] = chsteps-i
break
# detection0 = 1-detection0t/chsteps
detection100 = 1-detection100t/chsteps
plt.scatter(range (sp),detection100)
# np.savetxt(folderd+fn+"_detection0.csv", detection0)
np.savetxt(folderd+fn+'_detection100_'+str(k)+'.csv', detection100)
| [
"csv.reader",
"math.ceil",
"numpy.polyfit",
"numpy.zeros",
"numpy.ones",
"numpy.array"
] | [((646, 659), 'numpy.array', 'np.array', (['v1l'], {}), '(v1l)\n', (654, 659), True, 'import numpy as np\n'), ((990, 1010), 'math.ceil', 'mt.ceil', (['(chsteps / 3)'], {}), '(chsteps / 3)\n', (997, 1010), True, 'import math as mt\n'), ((1065, 1094), 'numpy.zeros', 'np.zeros', (['(chsteps - win, sp)'], {}), '((chsteps - win, sp))\n', (1073, 1094), True, 'import numpy as np\n'), ((1431, 1442), 'numpy.ones', 'np.ones', (['sp'], {}), '(sp)\n', (1438, 1442), True, 'import numpy as np\n'), ((547, 611), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(csvfile, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC)\n", (557, 611), False, 'import csv\n'), ((1261, 1300), 'numpy.polyfit', 'np.polyfit', (['a', 'varM100[i:i + win, j]', '(1)'], {}), '(a, varM100[i:i + win, j], 1)\n', (1271, 1300), True, 'import numpy as np\n')] |
#coding=utf-8
import numpy as np
def loadFromPts(filename):
landmarks = np.genfromtxt(filename, skip_header=3, skip_footer=1)
landmarks = landmarks - 1
return landmarks
def saveToPts(filename, landmarks):
pts = landmarks + 1
header = 'version: 1\nn_points: {}\n{{'.format(pts.shape[0])
np.savetxt(filename, pts, delimiter=' ', header=header, footer='}', fmt='%.3f', comments='')
def bestFitRect(points, meanS, box=None):
if box is None:
box = np.array([points[:, 0].min(), points[:, 1].min(), points[:, 0].max(), points[:, 1].max()])
boxCenter = np.array([(box[0] + box[2]) / 2, (box[1] + box[3]) / 2 ])
boxWidth = box[2] - box[0]
boxHeight = box[3] - box[1]
meanShapeWidth = meanS[:, 0].max() - meanS[:, 0].min()
meanShapeHeight = meanS[:, 1].max() - meanS[:, 1].min()
scaleWidth = boxWidth / meanShapeWidth
scaleHeight = boxHeight / meanShapeHeight
scale = (scaleWidth + scaleHeight) / 2
S0 = meanS * scale
S0Center = [(S0[:, 0].min() + S0[:, 0].max()) / 2, (S0[:, 1].min() + S0[:, 1].max()) / 2]
S0 += boxCenter - S0Center
return S0
def bestFit(destination, source, returnTransform=False):
destMean = np.mean(destination, axis=0)
srcMean = np.mean(source, axis=0)
srcVec = (source - srcMean).flatten()
destVec = (destination - destMean).flatten()
a = np.dot(srcVec, destVec) / np.linalg.norm(srcVec)**2
b = 0
for i in range(destination.shape[0]):
b += srcVec[2*i] * destVec[2*i+1] - srcVec[2*i+1] * destVec[2*i]
b = b / np.linalg.norm(srcVec)**2
T = np.array([[a, b], [-b, a]])
srcMean = np.dot(srcMean, T)
if returnTransform:
return T, destMean - srcMean
else:
return np.dot(srcVec.reshape((-1, 2)), T) + destMean
def mirrorShape(shape, imgShape=None):
imgShapeTemp = np.array(imgShape)
shape2 = mirrorShapes(shape.reshape((1, -1, 2)), imgShapeTemp.reshape((1, -1)))[0]
return shape2
def mirrorShapes(shapes, imgShapes=None):
shapes2 = shapes.copy()
for i in range(shapes.shape[0]):
if imgShapes is None:
shapes2[i, :, 0] = -shapes2[i, :, 0]
else:
shapes2[i, :, 0] = -shapes2[i, :, 0] + imgShapes[i][1]
lEyeIndU = list(range(36, 40))
lEyeIndD = [40, 41]
rEyeIndU = list(range(42, 46))
rEyeIndD = [46, 47]
lBrowInd = list(range(17, 22))
rBrowInd = list(range(22, 27))
uMouthInd = list(range(48, 55))
dMouthInd = list(range(55, 60))
uInnMouthInd = list(range(60, 65))
dInnMouthInd = list(range(65, 68))
noseInd = list(range(31, 36))
beardInd = list(range(17))
lEyeU = shapes2[i, lEyeIndU].copy()
lEyeD = shapes2[i, lEyeIndD].copy()
rEyeU = shapes2[i, rEyeIndU].copy()
rEyeD = shapes2[i, rEyeIndD].copy()
lBrow = shapes2[i, lBrowInd].copy()
rBrow = shapes2[i, rBrowInd].copy()
uMouth = shapes2[i, uMouthInd].copy()
dMouth = shapes2[i, dMouthInd].copy()
uInnMouth = shapes2[i, uInnMouthInd].copy()
dInnMouth = shapes2[i, dInnMouthInd].copy()
nose = shapes2[i, noseInd].copy()
beard = shapes2[i, beardInd].copy()
lEyeIndU.reverse()
lEyeIndD.reverse()
rEyeIndU.reverse()
rEyeIndD.reverse()
lBrowInd.reverse()
rBrowInd.reverse()
uMouthInd.reverse()
dMouthInd.reverse()
uInnMouthInd.reverse()
dInnMouthInd.reverse()
beardInd.reverse()
noseInd.reverse()
shapes2[i, rEyeIndU] = lEyeU
shapes2[i, rEyeIndD] = lEyeD
shapes2[i, lEyeIndU] = rEyeU
shapes2[i, lEyeIndD] = rEyeD
shapes2[i, rBrowInd] = lBrow
shapes2[i, lBrowInd] = rBrow
shapes2[i, uMouthInd] = uMouth
shapes2[i, dMouthInd] = dMouth
shapes2[i, uInnMouthInd] = uInnMouth
shapes2[i, dInnMouthInd] = dInnMouth
shapes2[i, noseInd] = nose
shapes2[i, beardInd] = beard
return shapes2
| [
"numpy.savetxt",
"numpy.genfromtxt",
"numpy.mean",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot"
] | [((78, 131), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'skip_header': '(3)', 'skip_footer': '(1)'}), '(filename, skip_header=3, skip_footer=1)\n', (91, 131), True, 'import numpy as np\n'), ((315, 412), 'numpy.savetxt', 'np.savetxt', (['filename', 'pts'], {'delimiter': '""" """', 'header': 'header', 'footer': '"""}"""', 'fmt': '"""%.3f"""', 'comments': '""""""'}), "(filename, pts, delimiter=' ', header=header, footer='}', fmt=\n '%.3f', comments='')\n", (325, 412), True, 'import numpy as np\n'), ((592, 648), 'numpy.array', 'np.array', (['[(box[0] + box[2]) / 2, (box[1] + box[3]) / 2]'], {}), '([(box[0] + box[2]) / 2, (box[1] + box[3]) / 2])\n', (600, 648), True, 'import numpy as np\n'), ((1209, 1237), 'numpy.mean', 'np.mean', (['destination'], {'axis': '(0)'}), '(destination, axis=0)\n', (1216, 1237), True, 'import numpy as np\n'), ((1252, 1275), 'numpy.mean', 'np.mean', (['source'], {'axis': '(0)'}), '(source, axis=0)\n', (1259, 1275), True, 'import numpy as np\n'), ((1606, 1633), 'numpy.array', 'np.array', (['[[a, b], [-b, a]]'], {}), '([[a, b], [-b, a]])\n', (1614, 1633), True, 'import numpy as np\n'), ((1648, 1666), 'numpy.dot', 'np.dot', (['srcMean', 'T'], {}), '(srcMean, T)\n', (1654, 1666), True, 'import numpy as np\n'), ((1859, 1877), 'numpy.array', 'np.array', (['imgShape'], {}), '(imgShape)\n', (1867, 1877), True, 'import numpy as np\n'), ((1377, 1400), 'numpy.dot', 'np.dot', (['srcVec', 'destVec'], {}), '(srcVec, destVec)\n', (1383, 1400), True, 'import numpy as np\n'), ((1403, 1425), 'numpy.linalg.norm', 'np.linalg.norm', (['srcVec'], {}), '(srcVec)\n', (1417, 1425), True, 'import numpy as np\n'), ((1567, 1589), 'numpy.linalg.norm', 'np.linalg.norm', (['srcVec'], {}), '(srcVec)\n', (1581, 1589), True, 'import numpy as np\n')] |
import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import ContinuousMLPPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleMLPModel
class TestContinuousMLPPolicy(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_get_action(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, _ = policy.get_action(obs)
expected_action = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
actions, _ = policy.get_actions([obs, obs, obs])
for action in actions:
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_get_action_sym(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim))
action_sym = policy.get_action_sym(state_input, name='action_sym')
expected_action = np.full(action_dim, 0.5)
action = self.sess.run(action_sym,
feed_dict={state_input: [obs.flatten()]})
action = policy.action_space.unflatten(action)
assert np.array_equal(action, expected_action)
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
with tf.compat.v1.variable_scope('ContinuousMLPPolicy/MLPModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = self.sess.run(
policy.model.outputs,
feed_dict={policy.model.input: [obs.flatten()]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
policy_pickled.model.outputs,
feed_dict={policy_pickled.model.input: [obs.flatten()]})
assert np.array_equal(output1, output2)
| [
"numpy.full",
"tensorflow.compat.v1.get_variable",
"pickle.loads",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.placeholder",
"garage.tf.policies.ContinuousMLPPolicy",
"tensorflow.ones_like",
"unittest.mock.patch",
"tensorflow.Graph",
"numpy.array_equal",
"pytest.mark.parametrize... | [((381, 529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obs_dim, action_dim"""', '[((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2\n )), ((2, 2), (2, 2))]'], {}), "('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)),\n ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])\n", (404, 529), False, 'import pytest\n'), ((1388, 1536), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obs_dim, action_dim"""', '[((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2\n )), ((2, 2), (2, 2))]'], {}), "('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)),\n ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])\n", (1411, 1536), False, 'import pytest\n'), ((2585, 2733), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obs_dim, action_dim"""', '[((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2\n )), ((2, 2), (2, 2))]'], {}), "('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)),\n ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])\n", (2608, 2733), False, 'import pytest\n'), ((1051, 1075), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (1058, 1075), True, 'import numpy as np\n'), ((1141, 1180), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1155, 1180), True, 'import numpy as np\n'), ((2068, 2127), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None, obs_dim)'}), '(tf.float32, shape=(None, obs_dim))\n', (2092, 2127), True, 'import tensorflow as tf\n'), ((2277, 2301), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (2284, 2301), True, 'import numpy as np\n'), ((2490, 2529), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (2504, 2529), True, 'import numpy as np\n'), ((3609, 3629), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (3621, 3629), False, 'import pickle\n'), ((659, 710), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (670, 710), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((725, 813), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.continuous_mlp_policy.MLPModel"""'], {'new': 'SimpleMLPModel'}), "('garage.tf.policies.continuous_mlp_policy.MLPModel', new=\n SimpleMLPModel)\n", (735, 813), False, 'from unittest import mock\n'), ((885, 923), 'garage.tf.policies.ContinuousMLPPolicy', 'ContinuousMLPPolicy', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (904, 923), False, 'from garage.tf.policies import ContinuousMLPPolicy\n'), ((1342, 1381), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1356, 1381), True, 'import numpy as np\n'), ((1670, 1721), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (1681, 1721), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((1736, 1824), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.continuous_mlp_policy.MLPModel"""'], {'new': 'SimpleMLPModel'}), "('garage.tf.policies.continuous_mlp_policy.MLPModel', new=\n SimpleMLPModel)\n", (1746, 1824), False, 'from unittest import mock\n'), ((1896, 1934), 'garage.tf.policies.ContinuousMLPPolicy', 'ContinuousMLPPolicy', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (1915, 1934), False, 'from garage.tf.policies import ContinuousMLPPolicy\n'), ((2866, 2917), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (2877, 2917), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((2932, 3020), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.continuous_mlp_policy.MLPModel"""'], {'new': 'SimpleMLPModel'}), "('garage.tf.policies.continuous_mlp_policy.MLPModel', new=\n SimpleMLPModel)\n", (2942, 3020), False, 'from unittest import mock\n'), ((3092, 3130), 'garage.tf.policies.ContinuousMLPPolicy', 'ContinuousMLPPolicy', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (3111, 3130), False, 'from garage.tf.policies import ContinuousMLPPolicy\n'), ((3201, 3272), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""ContinuousMLPPolicy/MLPModel"""'], {'reuse': '(True)'}), "('ContinuousMLPPolicy/MLPModel', reuse=True)\n", (3228, 3272), True, 'import tensorflow as tf\n'), ((3340, 3379), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', (['"""return_var"""'], {}), "('return_var')\n", (3365, 3379), True, 'import tensorflow as tf\n'), ((3720, 3735), 'pickle.loads', 'pickle.loads', (['p'], {}), '(p)\n', (3732, 3735), False, 'import pickle\n'), ((3906, 3938), 'numpy.array_equal', 'np.array_equal', (['output1', 'output2'], {}), '(output1, output2)\n', (3920, 3938), True, 'import numpy as np\n'), ((3435, 3459), 'tensorflow.ones_like', 'tf.ones_like', (['return_var'], {}), '(return_var)\n', (3447, 3459), True, 'import tensorflow as tf\n'), ((3670, 3680), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3678, 3680), True, 'import tensorflow as tf\n')] |
import torch
from torch.autograd import grad
from tqdm import tqdm
import os
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from util.ClassifierDataSet import ClassifierDataSet
from torch.utils.data import ConcatDataset
import numpy as np
import math
class InF():
def __init__(self, args, mlp, reps):
self.args = args
self.mlp = mlp
self.reps = reps
self.criterion = nn.CrossEntropyLoss()
self.params = list(set(self.mlp.parameters()))
self.kl = torch.nn.KLDivLoss(reduction='batchmean')
if not os.path.isdir('output/weighting/{}/s_result'.format(self.args.dataset)):
os.mkdir('output/weighting/{}/s_result'.format(self.args.dataset))
if not os.path.isdir('output/weighting/{}/influence_result'.format(self.args.dataset)):
os.mkdir('output/weighting/{}/influence_result'.format(self.args.dataset))
self.s_dir = 'output/weighting/{}/s_result/{}'.format(self.args.dataset, self.args.aupr_out)
self.influence_dir = 'output/weighting/{}/influence_result/{}'.format(self.args.dataset, self.args.aupr_out)
if not os.path.isdir(self.s_dir):
os.mkdir(self.s_dir)
if not os.path.isdir(self.influence_dir):
os.mkdir(self.influence_dir)
self.splits = ['train', 'valid', 'ood']
self.init_loader()
test_num = int(len(self.loaders['valid'].dataset.y) / args.split_num)
self.start_index = args.ith * test_num
if args.ith == args.split_num - 1:
self.end_index = len(self.loaders['valid'].dataset.y)
else:
self.end_index = (args.ith + 1) * test_num
def init_loader(self):
self.loaders = {}
for split in self.splits:
if split == 'ood':
dataset = ClassifierDataSet('output/generating/{}/ood_{}.csv'.format(self.args.dataset, self.args.seed))
else:
dataset = ClassifierDataSet(self.args.train.replace('train', split))
loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False)
self.loaders[split] = loader
merge_dataset = ConcatDataset([self.loaders['train'].dataset, self.loaders['ood'].dataset])
self.loaders['merge'] = torch.utils.data.DataLoader(dataset=merge_dataset, batch_size=1, shuffle=False, drop_last=False)
def cal_s(self):
if len(os.listdir(self.s_dir)) == len(self.loaders['valid'].dataset.y):
print('{} has been calculated'.format(self.s_dir))
else:
for index, (_, y) in enumerate(self.loaders['valid']):
if index >= self.start_index and index < self.end_index:
print('{}: {}/{}'.format(self.args.ith, index - self.start_index, self.end_index - self.start_index))
x = self.reps['valid'][index].unsqueeze(0)
x, y = x.to(self.args.device), y.to(self.args.device)
s = self.cal_s_single(x, y)
torch.save(s, '{}/{}'.format(self.s_dir, index))
def cal_s_single(self, x_test, y_test):
ihvp = None
for i in range(self.args.repeat):
v = self.grad_z(x_test, y_test)
h_estimate = v.copy()
for j, (_, y) in enumerate(self.loaders['merge']):
x = self.reps['merge'][j].unsqueeze(0).to(self.args.device)
y = y.to(self.args.device)
output = self.mlp(x)
loss = self.loss_with_energy(output, y)
hv = self.hvp(loss, self.params, h_estimate)
h_estimate = [_v + (1 - self.args.damp) * _h_e - _hv / self.args.scale for _v, _h_e, _hv in zip(v, h_estimate, hv)]
h_estimate = [one.detach() for one in h_estimate]
if j == self.args.recursion - 1:
break
# if j % 50 == 0:
# print("Recursion at depth %s: norm is %f" % (j, np.linalg.norm(self.gather_flat_grad(h_estimate).cpu().numpy())))
if ihvp == None:
ihvp = [_a / self.args.scale for _a in h_estimate]
else:
ihvp = [_a + _b / self.args.scale for _a, _b in zip(ihvp, h_estimate)]
return_ihvp = self.gather_flat_grad(ihvp)
return_ihvp /= self.args.repeat
return return_ihvp
def gather_flat_grad(self, grads):
views = []
for p in grads:
if p.data.is_sparse:
view = p.data.to_dense().view(-1)
else:
view = p.data.view(-1)
views.append(view)
return torch.cat(views, 0)
def loss_with_energy(self, output, y):
energy = -torch.logsumexp(output, dim=1)
if y.item() != -1:
loss = self.criterion(output, y)
energy_loss = torch.pow(F.relu(energy - self.args.m_in), 2)
return loss + 0.1 * energy_loss
else: # ood
energy_loss = torch.pow(F.relu(self.args.m_out - energy), 2)
return energy_loss * 0.1
def grad_z(self, x, y):
output = self.mlp(x)
loss = self.loss_with_energy(output, y)
return list(grad(loss, self.params, create_graph=True))
def hvp(self, loss, model_params, v):
grad1 = grad(loss, model_params, create_graph=True, retain_graph=True)
Hv = grad(grad1, model_params, grad_outputs=v)
return Hv
def cal_influence(self):
if self.args.dataset == 'clinc150':
per_intent_num = 20 # valid number
else:
per_intent_num = 100 # snips
if os.path.isdir(self.s_dir) and len(os.listdir(self.s_dir)) == len(self.loaders['valid'].dataset.y):
print('Start loading s from {}'.format(self.s_dir))
s_avgs = {}
for index, (x, y) in enumerate(self.loaders['valid']):
s_test = torch.load('{}/{}'.format(self.s_dir, index), \
map_location='cuda:{}'.format(self.args.gpu) if self.args.gpu != -1 else 'cpu')
if index % per_intent_num == 0:
s_avg = s_test
elif (index + 1) % per_intent_num == 0:
s_avg += s_test
s_avg /= per_intent_num
s_avgs[y.item()] = s_avg
else:
s_avg += s_test
else:
print('Please calculate s first')
return
# ood
data = pd.read_csv('output/generating/{}/ood_{}.csv'.format(self.args.dataset, self.args.seed))
ind_labels = list(data['ind_index'])
utts = []
influences = []
for index, (utt, y) in enumerate(tqdm(self.loaders['ood'], desc='grad index')):
s_avg = s_avgs[ind_labels[index]]
x = self.reps['ood'][index].unsqueeze(0).to(self.args.device)
y = y.to(self.args.device)
grad_z_vec = self.grad_z(x, y)
grad_z_vec = self.gather_flat_grad(grad_z_vec)
influence = -torch.dot(s_avg, grad_z_vec).item()
# data['influence'].append(influence)
# data['utt'].append(utt[0])
utts.append(utt[0])
influences.append(influence)
max_influence = np.max(influences)
min_influence = np.min(influences)
normalized_influences = [1 / (1 + math.exp(self.args.gamma * influence / (max_influence - min_influence))) for influence in influences]
# utts = ['{}[SPLIT]{}'.format(utt, influence) for utt, influence in zip(utts, normalized_influences)]
data = pd.DataFrame({'utt': utts, 'influence': influences, \
'weight': normalized_influences, 'index': [-1] * len(utts)})
data.to_csv('output/weighting/{}/weight/{}/weight.csv'.format(self.args.dataset, self.args.aupr_out))
# data = pd.read_csv(self.args.train)
# utts = []
# influences = []
# for index, (utt, y) in enumerate(tqdm(self.loaders['train'], desc='grad index')):
# s_avg = s_avgs[y.item()]
# x = self.reps['train'][index].unsqueeze(0).to(self.args.device)
# y = y.to(self.args.device)
# grad_z_vec = self.grad_z(x, y)
# grad_z_vec = self.gather_flat_grad(grad_z_vec)
# influence = -torch.dot(s_avg, grad_z_vec).item()
# # data['influence'].append(influence)
# # data['utt'].append(utt[0])
# utts.append(utt[0])
# influences.append(influence)
# data = pd.DataFrame({'utt': utts, 'influence': influences, \
# 'intent': list(data['intent'])})
# data.to_csv('output/weighting/{}/weight/{}/weight_ind.csv'.format(self.args.dataset, self.args.aupr_out))
| [
"os.mkdir",
"torch.logsumexp",
"torch.utils.data.ConcatDataset",
"tqdm.tqdm",
"math.exp",
"torch.utils.data.DataLoader",
"torch.dot",
"torch.autograd.grad",
"os.path.isdir",
"torch.nn.KLDivLoss",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"numpy.max",
"numpy.min",
"torch.nn.functional.rel... | [((434, 455), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (453, 455), True, 'import torch.nn as nn\n'), ((537, 578), 'torch.nn.KLDivLoss', 'torch.nn.KLDivLoss', ([], {'reduction': '"""batchmean"""'}), "(reduction='batchmean')\n", (555, 578), False, 'import torch\n'), ((2242, 2317), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (["[self.loaders['train'].dataset, self.loaders['ood'].dataset]"], {}), "([self.loaders['train'].dataset, self.loaders['ood'].dataset])\n", (2255, 2317), False, 'from torch.utils.data import ConcatDataset\n'), ((2350, 2451), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'merge_dataset', 'batch_size': '(1)', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset=merge_dataset, batch_size=1, shuffle=\n False, drop_last=False)\n', (2377, 2451), False, 'import torch\n'), ((4706, 4725), 'torch.cat', 'torch.cat', (['views', '(0)'], {}), '(views, 0)\n', (4715, 4725), False, 'import torch\n'), ((5372, 5434), 'torch.autograd.grad', 'grad', (['loss', 'model_params'], {'create_graph': '(True)', 'retain_graph': '(True)'}), '(loss, model_params, create_graph=True, retain_graph=True)\n', (5376, 5434), False, 'from torch.autograd import grad\n'), ((5448, 5489), 'torch.autograd.grad', 'grad', (['grad1', 'model_params'], {'grad_outputs': 'v'}), '(grad1, model_params, grad_outputs=v)\n', (5452, 5489), False, 'from torch.autograd import grad\n'), ((7350, 7368), 'numpy.max', 'np.max', (['influences'], {}), '(influences)\n', (7356, 7368), True, 'import numpy as np\n'), ((7393, 7411), 'numpy.min', 'np.min', (['influences'], {}), '(influences)\n', (7399, 7411), True, 'import numpy as np\n'), ((1182, 1207), 'os.path.isdir', 'os.path.isdir', (['self.s_dir'], {}), '(self.s_dir)\n', (1195, 1207), False, 'import os\n'), ((1221, 1241), 'os.mkdir', 'os.mkdir', (['self.s_dir'], {}), '(self.s_dir)\n', (1229, 1241), False, 'import os\n'), ((1266, 1299), 'os.path.isdir', 'os.path.isdir', (['self.influence_dir'], {}), '(self.influence_dir)\n', (1279, 1299), False, 'import os\n'), ((1313, 1341), 'os.mkdir', 'os.mkdir', (['self.influence_dir'], {}), '(self.influence_dir)\n', (1321, 1341), False, 'import os\n'), ((2086, 2180), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': '(1)', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=1, shuffle=False,\n drop_last=False)\n', (2113, 2180), False, 'import torch\n'), ((4792, 4822), 'torch.logsumexp', 'torch.logsumexp', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (4807, 4822), False, 'import torch\n'), ((5269, 5311), 'torch.autograd.grad', 'grad', (['loss', 'self.params'], {'create_graph': '(True)'}), '(loss, self.params, create_graph=True)\n', (5273, 5311), False, 'from torch.autograd import grad\n'), ((5707, 5732), 'os.path.isdir', 'os.path.isdir', (['self.s_dir'], {}), '(self.s_dir)\n', (5720, 5732), False, 'import os\n'), ((6783, 6827), 'tqdm.tqdm', 'tqdm', (["self.loaders['ood']"], {'desc': '"""grad index"""'}), "(self.loaders['ood'], desc='grad index')\n", (6787, 6827), False, 'from tqdm import tqdm\n'), ((2484, 2506), 'os.listdir', 'os.listdir', (['self.s_dir'], {}), '(self.s_dir)\n', (2494, 2506), False, 'import os\n'), ((4931, 4962), 'torch.nn.functional.relu', 'F.relu', (['(energy - self.args.m_in)'], {}), '(energy - self.args.m_in)\n', (4937, 4962), True, 'import torch.nn.functional as F\n'), ((5068, 5100), 'torch.nn.functional.relu', 'F.relu', (['(self.args.m_out - energy)'], {}), '(self.args.m_out - energy)\n', (5074, 5100), True, 'import torch.nn.functional as F\n'), ((5741, 5763), 'os.listdir', 'os.listdir', (['self.s_dir'], {}), '(self.s_dir)\n', (5751, 5763), False, 'import os\n'), ((7454, 7525), 'math.exp', 'math.exp', (['(self.args.gamma * influence / (max_influence - min_influence))'], {}), '(self.args.gamma * influence / (max_influence - min_influence))\n', (7462, 7525), False, 'import math\n'), ((7116, 7144), 'torch.dot', 'torch.dot', (['s_avg', 'grad_z_vec'], {}), '(s_avg, grad_z_vec)\n', (7125, 7144), False, 'import torch\n')] |
from floodsystem.stationdata import build_station_list
from floodsystem.station import *
from floodsystem.geo import *
from floodsystem.plot import *
from floodsystem.flood import *
from floodsystem.datafetcher import fetch_measure_levels
from datetime import *
from floodsystem.analysis import *
import numpy as np
def run():
"""Requirements for Task 2G"""
#Build list of stations
stations = build_station_list()
#Determine which stations can be computed
valid_stations = [stations[x] for x in range(len(stations)) if MonitoringStation.typical_range_consistent(stations[x]) == True]
#Update water levels
update_water_levels(valid_stations)
invalid = []
for x in range(len(valid_stations)):
if MonitoringStation.relative_water_level(valid_stations[x]) is None:
invalid.append(valid_stations[x])
else:
pass
valid_stations = [x for x in valid_stations if x not in invalid]
print(len(valid_stations))
#Only worried about station if water level is above average
stations_level = stations_level_over_threshold(stations, 0.5)
station_over_tol = []
for i in range(len(stations_level)):
for x in range(len(valid_stations)):
if valid_stations[x].name == stations_level[i][0] and MonitoringStation.relative_water_level(valid_stations[x]) == stations_level[i][1]:
station_over_tol.append(valid_stations[x])
else:
pass
#Determine rivers at risk
stations_to_river = stations_by_river(station_over_tol)
#Set up lists for categorising risks at rivers
rivers_low = []
rivers_moderate =[]
rivers_high = []
rivers_severe = []
#Fetch water levels from pass 2 days & compare
dt = 2
#Evaluate average relative water levels at rivers & compare
keys = list(stations_to_river.keys())
values = list(stations_to_river.values())
values_1 = []
for i in range(len(values)):
for x in range(len(values[i])):
lst = [MonitoringStation.relative_water_level(station_over_tol[n]) for n in range(len(station_over_tol))
if station_over_tol[n].name == values[i][x] and station_over_tol[n].river == keys[i]]
values_1.append(sum(lst) / len(lst))
for i in range(len(keys)):
if values_1[i] < 0.8:
rivers_low.append(keys[i])
elif values_1[i] >= 0.8 and values_1[i] < 1:
for n in range(len(station_over_tol)):
if station_over_tol[n].river == keys[i]:
station = station_over_tol[n]
dates, levels = fetch_measure_levels(station.measure_id, dt=timedelta(days=dt))
if len(dates) == 0:
m = 0
else:
poly, d0, m = polyfit(dates, levels, 4)
gradient = np.polyder(poly)
m = gradient(m)
if m <= 0:
rivers_moderate.append(keys[i])
else:
rivers_high.append(keys[i])
elif values_1[i] >= 1 and values_1[i] < 2:
for n in range(len(station_over_tol)):
if station_over_tol[n].river == keys[i]:
station = station_over_tol[n]
dates, levels = fetch_measure_levels(station.measure_id, dt=timedelta(days=dt))
if len(dates) == 0:
m = 0
else:
poly, d0, m = polyfit(dates, levels, 4)
gradient = np.polyder(poly)
m = gradient(m)
if m <= 0:
rivers_high.append(keys[i])
else:
rivers_severe.append(keys[i])
else:
rivers_severe.append(keys[i])
towns_low = []
towns_moderate = []
towns_high = []
towns_severe = []
for i in range(len(rivers_low)):
towns_low.append([station_over_tol[n].town for n in range(len(station_over_tol))
if station_over_tol[n].river == rivers_low[i]])
for i in range(len(rivers_moderate)):
towns_moderate.append([station_over_tol[n].town for n in range(len(station_over_tol))
if station_over_tol[n].river == rivers_moderate[i]])
for i in range(len(rivers_high)):
towns_high.append([station_over_tol[n].town for n in range(len(station_over_tol))
if station_over_tol[n].river == rivers_high[i]])
for i in range(len(rivers_severe)):
towns_severe.append([station_over_tol[n].town for n in range(len(station_over_tol))
if station_over_tol[n].river == rivers_severe[i]])
print("--- Towns with low risks ---")
print(towns_low)
print("--- Towns with moderate risks ---")
print(towns_moderate)
print("--- Towns with high risks ---")
print(towns_high)
print("--- Towns with severe risks ---")
print(towns_severe)
#stations_highest_rel_level(stations, N)
#print(type(station_over_tol[0].town))
# stations_to_town = stations_by_town(station_over_tol)
# print(len(stations_to_town))
# values_1.append([station_over_tol[n] for n in range(len(station_over_tol))
# if station_over_tol[n].name == values[i][x] and station_over_tol[n].river == keys[i]])
if __name__ == "__main__":
print("*** Task 2G: CUED Part IA Flood Warning System ***")
run() | [
"floodsystem.stationdata.build_station_list",
"numpy.polyder"
] | [((408, 428), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (426, 428), False, 'from floodsystem.stationdata import build_station_list\n'), ((2876, 2892), 'numpy.polyder', 'np.polyder', (['poly'], {}), '(poly)\n', (2886, 2892), True, 'import numpy as np\n'), ((3524, 3540), 'numpy.polyder', 'np.polyder', (['poly'], {}), '(poly)\n', (3534, 3540), True, 'import numpy as np\n')] |
import numpy as np
from panda_gym.envs.core import BimanualTaskEnv
from panda_gym.envs.robots.panda import Panda
from panda_gym.envs.tasks.assemble_bimanual import AssembleBimanual
from panda_gym.pybullet import PyBullet
class PandaAssembleBimanualEnv(BimanualTaskEnv):
"""Stack task wih Panda robot.
Args:
render (bool, optional): Activate rendering. Defaults to False.
num_blocks (int): >=1
control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
Defaults to "ee".
"""
def __init__(self, render: bool = False, control_type: str = "ee", has_object = False, obj_not_in_hand_rate = 0, obj_not_in_plate_rate = 0) -> None:
sim = PyBullet(render=render)
robot0 = Panda(sim, index=0,block_gripper=False, base_position=np.array([-0.775, 0.0, 0.0]), control_type=control_type, base_orientation = [0,0,0,1])
robot1 = Panda(sim, index=1, block_gripper=False, base_position=np.array([0.775, 0.0, 0.0]),control_type=control_type, base_orientation = [0,0,1,0])
# robot0.neutral_joint_values = np.array([0.01, 0.54, 0.003, -2.12, -0.003, 2.67, 0.80, 0.00, 0.00])
# robot1.neutral_joint_values = np.array([0.01, 0.54, 0.003, -2.12, -0.003, 2.67, 0.80, 0.00, 0.00])
task = AssembleBimanual(sim, robot0.get_ee_position, robot1.get_ee_position, obj_not_in_hand_rate = obj_not_in_hand_rate, obj_not_in_plate_rate=obj_not_in_plate_rate)
super().__init__(robot0, robot1, task)
| [
"panda_gym.envs.tasks.assemble_bimanual.AssembleBimanual",
"numpy.array",
"panda_gym.pybullet.PyBullet"
] | [((740, 763), 'panda_gym.pybullet.PyBullet', 'PyBullet', ([], {'render': 'render'}), '(render=render)\n', (748, 763), False, 'from panda_gym.pybullet import PyBullet\n'), ((1312, 1478), 'panda_gym.envs.tasks.assemble_bimanual.AssembleBimanual', 'AssembleBimanual', (['sim', 'robot0.get_ee_position', 'robot1.get_ee_position'], {'obj_not_in_hand_rate': 'obj_not_in_hand_rate', 'obj_not_in_plate_rate': 'obj_not_in_plate_rate'}), '(sim, robot0.get_ee_position, robot1.get_ee_position,\n obj_not_in_hand_rate=obj_not_in_hand_rate, obj_not_in_plate_rate=\n obj_not_in_plate_rate)\n', (1328, 1478), False, 'from panda_gym.envs.tasks.assemble_bimanual import AssembleBimanual\n'), ((835, 863), 'numpy.array', 'np.array', (['[-0.775, 0.0, 0.0]'], {}), '([-0.775, 0.0, 0.0])\n', (843, 863), True, 'import numpy as np\n'), ((994, 1021), 'numpy.array', 'np.array', (['[0.775, 0.0, 0.0]'], {}), '([0.775, 0.0, 0.0])\n', (1002, 1021), True, 'import numpy as np\n')] |
import numpy as np
import gym
import copy
from collections import deque
import random
import pickle
bsize = 32
q_in_size = 4
q_hl1_size = 40
q_hl2_size = 40
q_out_size = 1
miu_in_size = 3
miu_hl1_size = 40
miu_hl2_size = 40
miu_out_size = 1
replay_size = 100*1000
episodes_num = 500
iters_num = 1000
gamma = 0.99
upd_r = 0.1
lr_actor = 3*1e-3
lr_critic = 1e-2
seed = 105
np.random.seed(seed)
random.seed(seed)
running_r = None
version = 1
demo = True
resume = demo
render = demo
allow_writing = not demo
print(bsize, replay_size, gamma, upd_r, lr_actor, lr_critic, seed, version, demo)
if resume:
Q = pickle.load(open('Q-pendulum-%d' % version, 'rb'))
Miu = pickle.load(open('Miu-pendulum-%d' % version, 'rb'))
else:
Q = {}
Q['W1'] = np.random.uniform(-1., 1., (q_in_size, q_hl1_size)) / np.sqrt(q_in_size)
Q['W2'] = np.random.uniform(-1., 1., (q_hl1_size, q_hl2_size)) / np.sqrt(q_hl1_size)
Q['W3'] = np.random.uniform(-3*1e-4, 3*1e-4, (q_hl2_size, q_out_size))
Miu = {}
Miu['W1'] = np.random.uniform(-1., 1., (miu_in_size, miu_hl1_size)) / np.sqrt(miu_in_size)
Miu['W2'] = np.random.uniform(-1., 1., (miu_hl1_size, miu_hl2_size)) / np.sqrt(miu_hl1_size)
Miu['W3'] = np.random.uniform(-3*1e-3, 3*1e-3, (miu_hl2_size, miu_out_size))
Q_tar = copy.deepcopy(Q)
Miu_tar = copy.deepcopy(Miu)
Qgrad = {}
Qgrad_sq = {}
for k, v in Q.items(): Qgrad[k] = np.zeros_like(v)
for k, v in Q.items(): Qgrad_sq[k] = np.zeros_like(v)
Miugrad = {}
Miugrad_sq = {}
for k, v in Miu.items(): Miugrad[k] = np.zeros_like(v)
for k, v in Miu.items(): Miugrad_sq[k] = np.zeros_like(v)
R = deque([], replay_size)
env = gym.make('Pendulum-v0')
def sample_batch(R, bsize):
batch = random.sample(list(R), bsize)
D_array = np.array(batch)
states1 = np.array([data[0] for data in D_array])
actions1 = np.array([data[1] for data in D_array])
rewards = np.array([[data[2]] for data in D_array])
states2 = np.array([data[3] for data in D_array])
dones = np.array([data[4] for data in D_array])
return states1, actions1, rewards, states2, dones
def relu(x):
return np.maximum(0, x)
def tanh(x):
e1 = np.exp(x)
e2 = np.exp(-x)
return (e1 - e2) / (e1 + e2)
def actions_Miu(states, Miu):
hl1 = np.matmul(states, Miu['W1'])
hl1 = relu(hl1)
hl2 = np.matmul(hl1, Miu['W2'])
hl2 = relu(hl2)
outs = np.matmul(hl2, Miu['W3'])
actions = 2 * tanh(outs)
return actions
def values_Q(states, actions, Q):
inputs = np.concatenate([states, actions], axis=1)
hl1 = np.matmul(inputs, Q['W1'])
hl1 = relu(hl1)
hl2 = np.matmul(hl1, Q['W2'])
hl2 = relu(hl2)
values = np.matmul(hl2, Q['W3'])
return values, hl2, hl1
def train_Q(douts, hl2, hl1, states, actions, Q):
inputs = np.concatenate([states, actions], axis=1)
dhl2 = np.matmul(douts, Q['W3'].transpose())
dhl2[hl2 <= 0] = 0
dhl1 = np.matmul(dhl2, Q['W2'].transpose())
dhl1[hl1 <= 0] = 0
d = {}
d['W3'] = np.matmul(hl2.transpose(), douts)
d['W2'] = np.matmul(hl1.transpose(), dhl2)
d['W1'] = np.matmul(inputs.transpose(), dhl1)
for k in Qgrad: Qgrad[k] = Qgrad[k] * 0.9 + d[k] * 0.1
for k in Qgrad_sq: Qgrad_sq[k] = Qgrad_sq[k] * 0.999 + (d[k]**2) * 0.001
for k in Q: Q[k] -= lr_critic * Qgrad[k] / (np.sqrt(Qgrad_sq[k]) + 1e-5)
def train_Miu(states, Miu, Q):
mhl1 = np.matmul(states, Miu['W1'])
mhl1 = relu(mhl1)
mhl2 = np.matmul(mhl1, Miu['W2'])
mhl2 = relu(mhl2)
outs = np.matmul(mhl2, Miu['W3'])
actions = 2 * tanh(outs)
inputs = np.concatenate([states, actions], axis=1)
qhl1 = np.matmul(inputs, Q['W1'])
qhl1 = relu(qhl1)
qhl2 = np.matmul(qhl1, Q['W2'])
qhl2 = relu(qhl2)
dvalues = np.ones((bsize, q_out_size))
dqhl2 = np.matmul(dvalues, Q['W3'].transpose())
dqhl2[qhl2 <= 0] = 0
dqhl1 = np.matmul(dqhl2, Q['W2'].transpose())
dqhl1[qhl1 <= 0] = 0
dinputs = np.matmul(dqhl1, Q['W1'].transpose())
dactions = dinputs[:, 3:4]
dactions /= bsize
douts = dactions * 2 * (1 + actions/2) * (1 - actions/2)
dmhl2 = np.matmul(douts, Miu['W3'].transpose())
dmhl2[mhl2 <= 0] = 0
dmhl1 = np.matmul(dmhl2, Miu['W2'].transpose())
dmhl1[mhl1 <= 0] = 0
d = {}
d['W3'] = np.matmul(mhl2.transpose(), douts)
d['W2'] = np.matmul(mhl1.transpose(), dmhl2)
d['W1'] = np.matmul(states.transpose(), dmhl1)
for k in Miugrad: Miugrad[k] = Miugrad[k] * 0.9 + d[k] * 0.1
for k in Miugrad_sq: Miugrad_sq[k] = Miugrad_sq[k] * 0.999 + (d[k]**2) * 0.001
for k in Miu: Miu[k] += lr_actor * Miugrad[k] / (np.sqrt(Miugrad_sq[k]) + 1e-5)
def noise(episode):
if demo:
return 0.
if np.random.randint(2) == 0:
return (1. / (1. + episode/4))
else:
return -(1. / (1. + episode/4))
arr_values_rewards = []
for episode in range(1, episodes_num+1):
state1 = env.reset()
ep_reward = 0.
value, _, _ = values_Q([state1], actions_Miu([state1], Miu), Q)
for iter in range(1, iters_num+1):
if render: env.render()
action = actions_Miu(state1, Miu)
action += noise(episode)
state2, reward, done, _ = env.step(action)
R.append([state1, action, reward, state2, done])
ep_reward += reward
state1 = state2
if(len(R) > bsize) and not demo:
states1, actions1, rewards, states2, dones = sample_batch(R, bsize)
actions2 = actions_Miu(states2, Miu_tar)
values, _, _ = values_Q(states2, actions2, Q_tar)
second_term = gamma * values
second_term[dones] = 0
y = rewards + second_term
outs, hl2, hl1 = values_Q(states1, actions1, Q)
douts = (outs - y) / bsize
train_Q(douts, hl2, hl1, states1, actions1, Q)
train_Miu(states1, Miu, Q)
for k, v in Q.items(): Q_tar[k] = upd_r * v + (1-upd_r) * Q_tar[k]
for k, v in Miu.items(): Miu_tar[k] = upd_r * v + (1-upd_r) * Miu_tar[k]
if done or iter == iters_num:
running_r = (running_r * 0.9 + ep_reward * 0.1) if running_r != None else ep_reward
arr_values_rewards.append([value, ep_reward])
if episode % 1 == 0:
print(np.mean(Q['W1']), np.mean(Q['W2']), np.mean(Q['W3']))
print(np.mean(Miu['W1']), np.mean(Miu['W2']), np.mean(Miu['W3']))
print('ep: %d, iters: %d, reward %f, run aver: %f' % \
(episode, iter, ep_reward, running_r))
if episode % 10 == 0 and allow_writing:
pickle.dump(Q, open('Q-pendulum-%d' % version, 'wb'))
pickle.dump(Miu, open('Miu-pendulum-%d' % version, 'wb'))
pickle.dump(arr_values_rewards, open('VR-pendulum-%d' % version, 'wb'))
break | [
"numpy.random.uniform",
"copy.deepcopy",
"numpy.zeros_like",
"numpy.random.seed",
"gym.make",
"numpy.maximum",
"numpy.concatenate",
"numpy.ones",
"numpy.random.randint",
"random.seed",
"numpy.array",
"numpy.exp",
"numpy.matmul",
"numpy.mean",
"collections.deque",
"numpy.sqrt"
] | [((376, 396), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (390, 396), True, 'import numpy as np\n'), ((397, 414), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (408, 414), False, 'import random\n'), ((1294, 1310), 'copy.deepcopy', 'copy.deepcopy', (['Q'], {}), '(Q)\n', (1307, 1310), False, 'import copy\n'), ((1321, 1339), 'copy.deepcopy', 'copy.deepcopy', (['Miu'], {}), '(Miu)\n', (1334, 1339), False, 'import copy\n'), ((1622, 1644), 'collections.deque', 'deque', (['[]', 'replay_size'], {}), '([], replay_size)\n', (1627, 1644), False, 'from collections import deque\n'), ((1651, 1674), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (1659, 1674), False, 'import gym\n'), ((935, 1003), 'numpy.random.uniform', 'np.random.uniform', (['(-3 * 0.0001)', '(3 * 0.0001)', '(q_hl2_size, q_out_size)'], {}), '(-3 * 0.0001, 3 * 0.0001, (q_hl2_size, q_out_size))\n', (952, 1003), True, 'import numpy as np\n'), ((1219, 1289), 'numpy.random.uniform', 'np.random.uniform', (['(-3 * 0.001)', '(3 * 0.001)', '(miu_hl2_size, miu_out_size)'], {}), '(-3 * 0.001, 3 * 0.001, (miu_hl2_size, miu_out_size))\n', (1236, 1289), True, 'import numpy as np\n'), ((1401, 1417), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1414, 1417), True, 'import numpy as np\n'), ((1455, 1471), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1468, 1471), True, 'import numpy as np\n'), ((1541, 1557), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1554, 1557), True, 'import numpy as np\n'), ((1599, 1615), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1612, 1615), True, 'import numpy as np\n'), ((1761, 1776), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (1769, 1776), True, 'import numpy as np\n'), ((1793, 1832), 'numpy.array', 'np.array', (['[data[0] for data in D_array]'], {}), '([data[0] for data in D_array])\n', (1801, 1832), True, 'import numpy as np\n'), ((1848, 1887), 'numpy.array', 'np.array', (['[data[1] for data in D_array]'], {}), '([data[1] for data in D_array])\n', (1856, 1887), True, 'import numpy as np\n'), ((1902, 1943), 'numpy.array', 'np.array', (['[[data[2]] for data in D_array]'], {}), '([[data[2]] for data in D_array])\n', (1910, 1943), True, 'import numpy as np\n'), ((1958, 1997), 'numpy.array', 'np.array', (['[data[3] for data in D_array]'], {}), '([data[3] for data in D_array])\n', (1966, 1997), True, 'import numpy as np\n'), ((2010, 2049), 'numpy.array', 'np.array', (['[data[4] for data in D_array]'], {}), '([data[4] for data in D_array])\n', (2018, 2049), True, 'import numpy as np\n'), ((2132, 2148), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (2142, 2148), True, 'import numpy as np\n'), ((2173, 2182), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2179, 2182), True, 'import numpy as np\n'), ((2192, 2202), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2198, 2202), True, 'import numpy as np\n'), ((2278, 2306), 'numpy.matmul', 'np.matmul', (['states', "Miu['W1']"], {}), "(states, Miu['W1'])\n", (2287, 2306), True, 'import numpy as np\n'), ((2337, 2362), 'numpy.matmul', 'np.matmul', (['hl1', "Miu['W2']"], {}), "(hl1, Miu['W2'])\n", (2346, 2362), True, 'import numpy as np\n'), ((2394, 2419), 'numpy.matmul', 'np.matmul', (['hl2', "Miu['W3']"], {}), "(hl2, Miu['W3'])\n", (2403, 2419), True, 'import numpy as np\n'), ((2519, 2560), 'numpy.concatenate', 'np.concatenate', (['[states, actions]'], {'axis': '(1)'}), '([states, actions], axis=1)\n', (2533, 2560), True, 'import numpy as np\n'), ((2571, 2597), 'numpy.matmul', 'np.matmul', (['inputs', "Q['W1']"], {}), "(inputs, Q['W1'])\n", (2580, 2597), True, 'import numpy as np\n'), ((2628, 2651), 'numpy.matmul', 'np.matmul', (['hl1', "Q['W2']"], {}), "(hl1, Q['W2'])\n", (2637, 2651), True, 'import numpy as np\n'), ((2685, 2708), 'numpy.matmul', 'np.matmul', (['hl2', "Q['W3']"], {}), "(hl2, Q['W3'])\n", (2694, 2708), True, 'import numpy as np\n'), ((2804, 2845), 'numpy.concatenate', 'np.concatenate', (['[states, actions]'], {'axis': '(1)'}), '([states, actions], axis=1)\n', (2818, 2845), True, 'import numpy as np\n'), ((3404, 3432), 'numpy.matmul', 'np.matmul', (['states', "Miu['W1']"], {}), "(states, Miu['W1'])\n", (3413, 3432), True, 'import numpy as np\n'), ((3466, 3492), 'numpy.matmul', 'np.matmul', (['mhl1', "Miu['W2']"], {}), "(mhl1, Miu['W2'])\n", (3475, 3492), True, 'import numpy as np\n'), ((3526, 3552), 'numpy.matmul', 'np.matmul', (['mhl2', "Miu['W3']"], {}), "(mhl2, Miu['W3'])\n", (3535, 3552), True, 'import numpy as np\n'), ((3597, 3638), 'numpy.concatenate', 'np.concatenate', (['[states, actions]'], {'axis': '(1)'}), '([states, actions], axis=1)\n', (3611, 3638), True, 'import numpy as np\n'), ((3650, 3676), 'numpy.matmul', 'np.matmul', (['inputs', "Q['W1']"], {}), "(inputs, Q['W1'])\n", (3659, 3676), True, 'import numpy as np\n'), ((3710, 3734), 'numpy.matmul', 'np.matmul', (['qhl1', "Q['W2']"], {}), "(qhl1, Q['W2'])\n", (3719, 3734), True, 'import numpy as np\n'), ((3773, 3801), 'numpy.ones', 'np.ones', (['(bsize, q_out_size)'], {}), '((bsize, q_out_size))\n', (3780, 3801), True, 'import numpy as np\n'), ((759, 812), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(q_in_size, q_hl1_size)'], {}), '(-1.0, 1.0, (q_in_size, q_hl1_size))\n', (776, 812), True, 'import numpy as np\n'), ((813, 831), 'numpy.sqrt', 'np.sqrt', (['q_in_size'], {}), '(q_in_size)\n', (820, 831), True, 'import numpy as np\n'), ((846, 900), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(q_hl1_size, q_hl2_size)'], {}), '(-1.0, 1.0, (q_hl1_size, q_hl2_size))\n', (863, 900), True, 'import numpy as np\n'), ((901, 920), 'numpy.sqrt', 'np.sqrt', (['q_hl1_size'], {}), '(q_hl1_size)\n', (908, 920), True, 'import numpy as np\n'), ((1027, 1084), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(miu_in_size, miu_hl1_size)'], {}), '(-1.0, 1.0, (miu_in_size, miu_hl1_size))\n', (1044, 1084), True, 'import numpy as np\n'), ((1085, 1105), 'numpy.sqrt', 'np.sqrt', (['miu_in_size'], {}), '(miu_in_size)\n', (1092, 1105), True, 'import numpy as np\n'), ((1122, 1180), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(miu_hl1_size, miu_hl2_size)'], {}), '(-1.0, 1.0, (miu_hl1_size, miu_hl2_size))\n', (1139, 1180), True, 'import numpy as np\n'), ((1181, 1202), 'numpy.sqrt', 'np.sqrt', (['miu_hl1_size'], {}), '(miu_hl1_size)\n', (1188, 1202), True, 'import numpy as np\n'), ((4732, 4752), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (4749, 4752), True, 'import numpy as np\n'), ((3331, 3351), 'numpy.sqrt', 'np.sqrt', (['Qgrad_sq[k]'], {}), '(Qgrad_sq[k])\n', (3338, 3351), True, 'import numpy as np\n'), ((4641, 4663), 'numpy.sqrt', 'np.sqrt', (['Miugrad_sq[k]'], {}), '(Miugrad_sq[k])\n', (4648, 4663), True, 'import numpy as np\n'), ((6301, 6317), 'numpy.mean', 'np.mean', (["Q['W1']"], {}), "(Q['W1'])\n", (6308, 6317), True, 'import numpy as np\n'), ((6319, 6335), 'numpy.mean', 'np.mean', (["Q['W2']"], {}), "(Q['W2'])\n", (6326, 6335), True, 'import numpy as np\n'), ((6337, 6353), 'numpy.mean', 'np.mean', (["Q['W3']"], {}), "(Q['W3'])\n", (6344, 6353), True, 'import numpy as np\n'), ((6377, 6395), 'numpy.mean', 'np.mean', (["Miu['W1']"], {}), "(Miu['W1'])\n", (6384, 6395), True, 'import numpy as np\n'), ((6397, 6415), 'numpy.mean', 'np.mean', (["Miu['W2']"], {}), "(Miu['W2'])\n", (6404, 6415), True, 'import numpy as np\n'), ((6417, 6435), 'numpy.mean', 'np.mean', (["Miu['W3']"], {}), "(Miu['W3'])\n", (6424, 6435), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import random
def draw_tree(xold,yold,theta,length):
ratio= 0.6
xnew=xold+length*np.cos(theta)
ynew=yold+length*np.sin(theta)
if length>0.009:
plt.plot([xold,xnew],[yold,ynew], '-r')
draw_tree(xnew,ynew,theta+np.pi/5,length*ratio)
draw_tree(xnew,ynew,theta-np.pi/5,length*ratio)
def main():
draw_tree(1,1,np.pi/2,1)
plt.show()
main()
| [
"numpy.sin",
"matplotlib.pyplot.show",
"numpy.cos",
"matplotlib.pyplot.plot"
] | [((441, 451), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (449, 451), True, 'import matplotlib.pyplot as plt\n'), ((231, 273), 'matplotlib.pyplot.plot', 'plt.plot', (['[xold, xnew]', '[yold, ynew]', '"""-r"""'], {}), "([xold, xnew], [yold, ynew], '-r')\n", (239, 273), True, 'import matplotlib.pyplot as plt\n'), ((150, 163), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (156, 163), True, 'import numpy as np\n'), ((186, 199), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (192, 199), True, 'import numpy as np\n')] |
import numpy as np
from sqlalchemy.orm import * # session
from sqlalchemy import create_engine
from obspy.core import UTCDateTime
from obspy.geodetics import locations2degrees, degrees2kilometers
from .table_tt_curve import *
from .table_nsta24 import *
from .tables3D import *
from .search_hypo import *
#from .pbr import pbr
from datetime import *
from operator import itemgetter
from itertools import combinations
import logging
import time #"from datetime import *" will import time, name space will be overwritten
class LocalAssociator():
"""
The 3D Associator associate picks with travel time curve of 3D velocity.
"""
def __init__(self, assoc_db, nsta_db, tt_curve_db, fileHeader, max_Parr = 80, aggregation = 1, aggr_norm = 'L2', assoc_ot_uncert = 3, nsta_declare = 6, config_par = None, AS_MODE = False):
"""
Parameters:
db_assoc: associator database
db_tt: travel time table database
max_km: maximum distance of S-P interval in distance
aggregation: the coefficient multiplied to minimum travel time
aggr_norm: L2: median; L1: mean
assoc_ot_uncert: origin time uncertainty window
nsta_declare: minimum station number to declare a earthquake
nt, np, nr: node geometry
"""
self.assoc_db = assoc_db
self.nsta_db = nsta_db
self.tt_curve_db = tt_curve_db
self.max_Parr = max_Parr
self.max_s_p = max_Parr*0.75 # need tuning
self.min_s_p = 1.0
self.aggregation = aggregation
self.aggr_window = self.aggregation * self.min_s_p
self.aggr_norm = aggr_norm # L1 takes the mean; L2 takes the median
self.assoc_ot_uncert = assoc_ot_uncert # Uncertainty of origin times of candidates
self.nsta_declare = nsta_declare # number observation to declare an evnet
self.AS_MODE = AS_MODE # aftershock mode
self.AS_STNs = []
self.zerotime = UTCDateTime(year=fileHeader[1], month=fileHeader[4], day=fileHeader[5], hour=fileHeader[6], minute=fileHeader[7])
self.config = config_par
def id_candidate_events(self):
""" Create a set of possible candidate events from our picks table.
Where session is the connection to the sqlalchemy database.
This method simply takes all picks with time differences less than our maximum S-P
times for each station and generates a list of candidate events.
"""
now1 = time.time()
#############
# Get all stations with unnassoiated picks
stations=self.assoc_db.query(Pick.sta).filter(Pick.assoc_id==None).distinct().all()
#print('stations:',len(stations))
if self.AS_MODE :
print('AS_MODE initial')
AS_LAT = self.config['AS_MODE'].getfloat('Latitude')
AS_LON = self.config['AS_MODE'].getfloat('Longitude')
AS_RAD = self.config['AS_MODE'].getfloat('Radius')
self.AS_SP_TIME = timedelta(seconds=(AS_RAD/8.0)*1.5)
self.AS_STNs = self.search_stns_in_range(AS_LAT, AS_LON, AS_RAD, stations)
print('AS_MODE initial end')
else:
#self.AS_STNs = []
for STN, in stations:
self.AS_STNs.append(STN)
for sta, in stations: # the comma is needed
picks=self.assoc_db.query(Pick).filter(Pick.sta==sta).filter(Pick.assoc_id==None).order_by(Pick.time).all()
# Condense picktimes that are within our pick uncertainty value picktimes are python datetime objects
if stations.index((sta,))==0: #stupid tuple
counter0=0
picktimes_new,counter=pick_cluster(self.assoc_db,picks,self.aggr_window,self.aggr_norm,counter0)
else:
picktimes_new,counter=pick_cluster(self.assoc_db,picks,self.aggr_window,self.aggr_norm,counter)
nets = self.assoc_db.query(PickModified.net).filter(PickModified.sta==sta).filter(PickModified.assoc_id==None).all()
locs = self.assoc_db.query(PickModified.loc).filter(PickModified.sta==sta).filter(PickModified.assoc_id==None).all()
#for net in nets:
#if sta in STNs:
for net, in set(nets):
for loc, in set(locs):
picks_modified=self.assoc_db.query(PickModified).filter(PickModified.sta==sta,PickModified.net==net,PickModified.loc==loc).filter(PickModified.assoc_id==None).order_by(PickModified.time).all()
#picks_modified=self.assoc_db.query(PickModified).filter(PickModified.sta==sta).filter(PickModified.assoc_id==None).order_by(PickModified.time).all()
# Generate all possible candidate events
for i in range(0, len(picks_modified) - 1):
for j in range(i + 1,len(picks_modified)):
s_p = (picks_modified[j].time - picks_modified[i].time).total_seconds()#; print s_p
if s_p <= self.max_s_p and s_p >= self.min_s_p:
ot = self.find_ot_from_tt_curve(sta, picks_modified[i], s_p)
new_candidate=Candidate(ot, sta, picks_modified[i].time, picks_modified[i].id, picks_modified[j].time, picks_modified[j].id)
self.assoc_db.add(new_candidate)
self.assoc_db.commit()
print('id_candidate time in seconds: ',time.time()-now1)
def associate_candidates(self):
""" Associate all possible candidate events by comparing the projected origin-times. At
this point we are not dealing with the condition that more picks and candidate events
could be arriving while we do our event associations.
"""
now2 = time.time()
dt_ot=timedelta(seconds=self.assoc_ot_uncert)
# Query all candidate ots
#candidate_ots=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None).order_by(Candidate.ot).all()
if self.AS_MODE :
candidate_ots=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None, Candidate.sta.in_(self.AS_STNs)).\
filter((Candidate.ts-Candidate.tp) <= self.AS_SP_TIME).order_by(Candidate.ot).all()
else:
candidate_ots=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None, Candidate.sta.in_(self.AS_STNs)).order_by(Candidate.ot).all()
L_ots=len(candidate_ots) #; print(L_ots)
Array=[]
for i in range(L_ots):
#cluster=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None).filter(Candidate.ot>=candidate_ots[i].ot, Candidate.ot<(candidate_ots[i].ot+dt_ot)).order_by(Candidate.ot).all()
if self.AS_MODE :
cluster=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None, Candidate.sta.in_(self.AS_STNs)).\
filter((Candidate.ts-Candidate.tp) <= self.AS_SP_TIME).\
filter(Candidate.ot>=candidate_ots[i].ot, Candidate.ot<(candidate_ots[i].ot+dt_ot)).order_by(Candidate.ot).all()
else:
cluster=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None, Candidate.sta.in_(self.AS_STNs)).\
filter(Candidate.ot>=candidate_ots[i].ot, Candidate.ot<(candidate_ots[i].ot+dt_ot)).order_by(Candidate.ot).all()
#cluster_sta=self.assoc_db.query(Candidate.sta).filter(Candidate.assoc_id==None).filter(Candidate.ot>=candidate_ots[i].ot).filter(Candidate.ot<(candidate_ots[i].ot+dt_ot)).order_by(Candidate.ot).all()
cluster_sta = [candi.sta for candi in cluster]
#print(cluster_sta)
l_cluster=len(set(cluster_sta))
Array.append((i,l_cluster,len(cluster)))
#print Array
Array.sort(key=itemgetter(1), reverse=True) #sort Array by l_cluster, notice Array has been changed
#print Array
print('candidates_ots:', time.time()-now2, ', Array length:', len(Array))
if not self.AS_MODE:
Array_count = len(Array)
if Array_count > 1500:
print('VERY VERY HUGE EARTHQUAKE MODE!')
dt_ot=timedelta(seconds=self.assoc_ot_uncert*2)
self.nsta_declare = 25
elif Array_count > 800:
print('HUGE EARTHQUAKE MODE!')
dt_ot=timedelta(seconds=self.assoc_ot_uncert*2)
self.nsta_declare = 15
elif Array_count > 500:
print('BIG EARTHQUAKE MODE!')
dt_ot=timedelta(seconds=self.assoc_ot_uncert*2)
self.nsta_declare = 10
elif Array_count > 400:
print('Medium EARTHQUAKE MODE!')
dt_ot=timedelta(seconds=self.assoc_ot_uncert*2)
self.nsta_declare = 8
for i in range(len(Array)):
index=Array[i][0]
if Array[i][1]>=self.nsta_declare:
matches=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None).\
filter(Candidate.ot>=candidate_ots[index].ot).\
filter(Candidate.ot<(candidate_ots[index].ot+dt_ot)).order_by(Candidate.ot).all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# remove the candidates with the modified picks has been associated
picks_associated_id=list(set(self.assoc_db.query(PickModified.id).filter(PickModified.assoc_id!=None).all()))
index_matches=[]
for id, in picks_associated_id:
for j,match in enumerate(matches):
if match.p_modified_id==id or match.s_modified_id==id:
index_matches.append(j)
# delete from the end
if index_matches:
for j in sorted(set(index_matches),reverse=True):
del matches[j]
# remove the candidates with the modified picks has been associated
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#print(i)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 3D Associator
now = time.time()
tt = []
#print(len(matches))
for match in matches:
#print('sta:',match.sta)
match_p = match.tp
match_s = match.ts
match_ot = match.ot
match_ttp = (match_p - match_ot).total_seconds()
match_tts = (match_s - match_ot).total_seconds()
tt.append([match.sta, match_ttp, match_tts, match.ot, match])
cb, cb_dupl = self.new_remove_comb(tt)
rms_sort = []
tt_cb = cb[0]
if len(tt_cb) >= self.nsta_declare: # self.nsta_declare has to be greater than or equal to 3
##PyramidSearching##
#print(tt_cb)
tt_new, evt_lat, evt_lon, evt_dep, QA = HYPO3D_Searching(self.assoc_db, self.nsta_db, tt_cb, self.zerotime, config=self.config)
#rms_sort.append((tt_new, sourcegrid, rms, 1))
print('HYPO3D_Searching done.', QA)
#if QA:
# dp_cb = cb_dupl[0]
# cb = self.add_dupl_ots_new(tt_new, dp_cb, evt_lat, evt_lon, evt_dep, 2.0)
#
# tt_cb = cb
if len(tt_cb) >= self.nsta_declare and QA: # self.nsta_declare has to be greater than or equal to 3
rms = 0.0
sourcegrid=35000 # useless thing, lazy to remove
lat, lon, dep = evt_lat, evt_lon, evt_dep
nsta = len(tt_new)
all_ots = []
for j in range(nsta):
all_ots.append(tt_new[j][3])
origintime, ot_unc = datetime_statistics(all_ots)
# in 3D Associator, use rms of picks instead of loc_uncertainty
t_create = datetime.utcnow()
t_update = datetime.utcnow()
new_event=Associated(origintime,round(ot_unc,3),lat,lon,dep,round(rms,3),nsta,sourcegrid,t_create,t_update)
self.assoc_db.add(new_event)
self.assoc_db.flush()
self.assoc_db.refresh(new_event)
self.assoc_db.commit()
event_id=new_event.id
logging.info(str(['event_id:',event_id]))
logging.info(str(['ot:', origintime, 'ot_uncert:', round(ot_unc,3), 'loc:', lat,lon,dep, 'rms:', round(rms,3), 'nsta:', nsta]))
#print(event_id)
for tt_tuple in cb[0]:#[index]:
match = tt_tuple[4]
#print(match,match.assoc_id)
match.set_assoc_id(event_id,self.assoc_db,True)
self.assoc_db.commit()
# remove all picks near this time
#not_ots=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None).filter(Candidate.ot>=(origintime-dt_ot)).filter(Candidate.ot<(origintime+dt_ot)).all()
mask_time = timedelta(seconds=10)
not_ots=self.assoc_db.query(Candidate).filter(Candidate.assoc_id==None).filter(Candidate.ot>=(origintime-mask_time)).filter(Candidate.ot<(origintime+mask_time)).all()
for not_tt in not_ots:
not_tt.set_assoc_id(99,self.assoc_db,True)
self.assoc_db.commit()
else:
break
# remove stations with dupl ots from combinations
def new_remove_comb(self,tt):
stns = [item[0] for item in tt]
dupl_stns = list_duplicates(stns)
f_stns = [stn for stn in dupl_stns if stns.count(stn) < 3]
not_dupl_tt = [item for item in tt if item[0] not in dupl_stns]
dupl_tt = [item for item in tt if item[0] in f_stns]
cb = []
cb_dupl = []
cb.append(tuple(not_dupl_tt))
cb_dupl.append(tuple(dupl_tt))
# only return combinations of different stations
return cb, cb_dupl
def new_remove_comb_2(self,tt):
stns = [item[0] for item in tt]
dupl_stns = list_duplicates(stns)
not_dupl_tt = [item for item in tt if item[0] not in dupl_stns]
dupl_tt = [item for item in tt if item[0] in dupl_stns]
cb = []
cb_tmp = []
stn_done = []
for dp_tt in dupl_tt:
if dp_tt[0] not in stn_done:
cb_tmp.append(dp_tt)
stn_done.append(dp_tt[0])
cb.append(tuple(cb_tmp)+tuple(not_dupl_tt))
# only return combinations of different stations
return cb
def find_ot_from_tt_curve(self, sta, p_arr, s_p_time):
check_db = self.tt_curve_db.query(TT_CURVE.id).filter(TT_CURVE.sta == sta).first()
if check_db == None:
a_value = 1.35 # some default value
b_value = 0.0
else:
a_value, b_value = self.tt_curve_db.query(TT_CURVE.a_value, TT_CURVE.b_value).filter(TT_CURVE.sta == sta).first()
ot = p_arr.time - timedelta(seconds=s_p_time*a_value + b_value)
return ot
def add_dupl_ots_new(self, tt, tt_dupl, evt_lat, evt_lon, evt_dep, tt_residual):
# find stns
tt_new = tt
stn_done = []
for tt_dupls in tt_dupl:
if tt_dupls[0] not in stn_done:
sta = tt_dupls[0]
ttp = tt_dupls[1]
tts = tt_dupls[2]
ttsp = tts-ttp
# check sta in TTtable3D
sta_id, = self.nsta_db.query(NSTATable.id).filter(NSTATable.sta == sta).first()
sta_lat, sta_lon, sta_dep = self.nsta_db.query(NSTATable.latitude,NSTATable.longitude,NSTATable.elevation).filter(NSTATable.id == sta_id).first()
sta_dep = sta_dep * (-0.001)
P_ttime = pbr(evt_lat,evt_lon,evt_dep,sta_lat,sta_lon,sta_dep,1)
S_ttime = pbr(evt_lat,evt_lon,evt_dep,sta_lat,sta_lon,sta_dep,2)
S_P_ttime = S_ttime-P_ttime
if abs(P_ttime-ttp) <= tt_residual and abs(S_ttime-tts) <= tt_residual and abs(S_P_ttime-ttsp) <= tt_residual:
tt_new.append(tt_dupls)
stn_done.append(sta)
return tt_new
def search_stns_in_range(self, lat, lon, rad, stations):
stns = []
for stn, in stations:
stn_lon, stn_lat = self.nsta_db.query(NSTATable.longitude, NSTATable.latitude).filter(NSTATable.sta==stn).first()
Dist=degrees2kilometers(locations2degrees(lat, lon, stn_lat, stn_lon))
if Dist <= rad:
stns.append(stn)
return stns
def list_duplicates(seq):
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set( x for x in seq if x in seen or seen_add(x) )
# turn the set into a list (as requested)
return list( seen_twice )
def datetime_statistics(dt_list,norm='L2'):
""" mean,std=datetime_statistics(datetime_list)
Calculate the mean and standard deviations in seconds of a list of datetime values
"""
offsets=[]
for dt in dt_list:
offsets.append((dt-dt_list[0]).total_seconds())
if norm=='L1':
mean_offsets=np.mean(offsets)
new_pick = dt_list[0]+timedelta(seconds=mean_offsets)
elif norm=='L2':
mean_offsets=np.median(offsets)
new_pick = dt_list[0]+timedelta(seconds=mean_offsets)
elif norm=='FA':
mean_offsets=np.median(offsets)
tmp_pick = dt_list.copy()
tmp_pick.sort()
new_pick = tmp_pick[0]
std_offsets=np.std(offsets)
return new_pick,std_offsets
def pick_cluster(session,picks,pickwindow,aggr_norm,counter):
""" cleaning up very closed picks on different channels of same station
"""
# | | /\
# | | / \ /\
# | | /\ /\ / \ / \ /\
# _____________|/\__|/ \ / \ / \ / \ / \ /\_________
# | | \ / \ / \ / \ / \/
# | | \/ \ / \ / \/
# | | \/ \/
# pickwindow: ---- better to set pickwindow==t_up, t_up is to clean closed picks
# STA1 E -----------|----|--------------------|--------------
# STA1 N ------------|-------------------------|-------------
# STA1 Z -------------|-------------------------|------------
# stack -----------|||--|--------------------|||------------
# cluster STA1 --------|---|---------------------|------------- chen highly recommend to use norm=='L2' to lower the effect of outlier, L2 takes median
# ARGUE: whether only take the median or mean of the picks from different stations? won't count the followings after first one
#
picks_new=[]
# only one pick in picks
if len(picks)==1:
cluster=[];cluster.append(picks[0]);cluster_time=[];cluster_time.append(picks[0].time)
picks[0].modified_id=1+counter # assign modified id to picks
counter+=1
pickave,pickstd=datetime_statistics(cluster_time,aggr_norm)
# append the row to the picks_new, not only the pick time
picks_new.append(picks[0])
pick_modified=PickModified(picks[0].sta,picks[0].chan,picks[0].net,picks[0].loc,picks[0].time,picks[0].phase,round(pickstd,3),picks[0].assoc_id)
session.add(pick_modified)
session.commit()
# more than one pick in picks
else:
j=0
counter=1+counter
while True:
i=j
cluster=[];cluster.append(picks[i]);cluster_time=[];cluster_time.append(picks[i].time);channel=[];channel.append(picks[i].chan)
picks[i].modified_id=counter
while True:
# cluster picks of different channels; notice that for the closed picks on the same station, those picks behind the first pick could be separated lonely or separated cluster
if picks[i+1].chan not in channel and (picks[i+1].time-picks[i].time).total_seconds()<pickwindow:
cluster.append(picks[i+1])
cluster_time.append(picks[i+1].time)
channel.append(picks[i+1].chan)
picks[i+1].modified_id=counter # assign modified id to picks
i=i+1
# make sure do not go over the range limit because j=i+1 below, jump out inner while loop
if i==len(picks)-1:
break
# elif is dealing with the exactly same picks, probably from processing same stream twice
elif picks[i+1].sta==picks[i].sta and picks[i+1].chan==picks[i].chan and picks[i+1].time==picks[i].time: # and picks[i+1].snr==picks[i].snr and picks[i+1].phase==picks[i].phase and picks[i+1].uncert==picks[i].uncert:
cluster.append(picks[i+1])
cluster_time.append(picks[i+1].time)
channel.append(picks[i+1].chan)
picks[i+1].modified_id=counter # assign modified id to picks
i=i+1
# make sure do not go over the range limit because j=i+1 below, jump out inner while loop
if i==len(picks)-1:
break
else:
break
pickave,pickstd=datetime_statistics(cluster_time,aggr_norm)
# append whole rows to the picks_new, not only the pick time
for pick in cluster:
if aggr_norm == 'FA' and (pick.time-pickave).total_seconds()==0:
break
if (pick.time-pickave).total_seconds()>=0:
break
picks_new.append(pick)
pick_modified=PickModified(pick.sta,pick.chan,pick.net,pick.loc,pick.time,pick.phase,round(pickstd,3),pick.assoc_id)
session.add(pick_modified)
session.commit()
# next cluster
j=i+1
counter=counter+1
# jump outer while loop and compare last two picks. For the situation that last one is ungrouped, use if statement to add in picks_new
if j>=len(picks)-1:
if (picks[-1].time-picks[-2].time).total_seconds()>pickwindow:
picks_new.append(picks[-1])
picks[-1].modified_id=counter # assign modified id to picks
pick_modified=PickModified(picks[-1].sta,picks[-1].chan,picks[-1].net,picks[-1].loc,picks[-1].time,picks[-1].phase,round(pickstd,3),picks[-1].assoc_id)
session.add(pick_modified)
session.commit()
else:
if picks[-1] in cluster:
counter-=1
else:
picks[-1].modified_id=counter
pick_modified=PickModified(picks[-1].sta,picks[-1].chan,picks[-1].net,picks[-1].loc,picks[-1].time,picks[-1].phase,round(pickstd,3),picks[-1].assoc_id)
session.add(pick_modified)
session.commit()
break
return picks_new, counter
| [
"numpy.std",
"numpy.median",
"time.time",
"obspy.core.UTCDateTime",
"numpy.mean",
"obspy.geodetics.locations2degrees",
"operator.itemgetter"
] | [((17106, 17121), 'numpy.std', 'np.std', (['offsets'], {}), '(offsets)\n', (17112, 17121), True, 'import numpy as np\n'), ((2214, 2331), 'obspy.core.UTCDateTime', 'UTCDateTime', ([], {'year': 'fileHeader[1]', 'month': 'fileHeader[4]', 'day': 'fileHeader[5]', 'hour': 'fileHeader[6]', 'minute': 'fileHeader[7]'}), '(year=fileHeader[1], month=fileHeader[4], day=fileHeader[5],\n hour=fileHeader[6], minute=fileHeader[7])\n', (2225, 2331), False, 'from obspy.core import UTCDateTime\n'), ((2730, 2741), 'time.time', 'time.time', ([], {}), '()\n', (2739, 2741), False, 'import time\n'), ((5739, 5750), 'time.time', 'time.time', ([], {}), '()\n', (5748, 5750), False, 'import time\n'), ((16772, 16788), 'numpy.mean', 'np.mean', (['offsets'], {}), '(offsets)\n', (16779, 16788), True, 'import numpy as np\n'), ((16883, 16901), 'numpy.median', 'np.median', (['offsets'], {}), '(offsets)\n', (16892, 16901), True, 'import numpy as np\n'), ((5419, 5430), 'time.time', 'time.time', ([], {}), '()\n', (5428, 5430), False, 'import time\n'), ((7665, 7678), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (7675, 7678), False, 'from operator import itemgetter\n'), ((7802, 7813), 'time.time', 'time.time', ([], {}), '()\n', (7811, 7813), False, 'import time\n'), ((10072, 10083), 'time.time', 'time.time', ([], {}), '()\n', (10081, 10083), False, 'import time\n'), ((16058, 16103), 'obspy.geodetics.locations2degrees', 'locations2degrees', (['lat', 'lon', 'stn_lat', 'stn_lon'], {}), '(lat, lon, stn_lat, stn_lon)\n', (16075, 16103), False, 'from obspy.geodetics import locations2degrees, degrees2kilometers\n'), ((16996, 17014), 'numpy.median', 'np.median', (['offsets'], {}), '(offsets)\n', (17005, 17014), True, 'import numpy as np\n')] |
import warnings
warnings.filterwarnings("ignore")
# import faulthandler
# faulthandler.enable()
import logging
import time
from collections import Counter
import numpy as np
from ta import add_all_ta_features
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LassoLarsIC
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score
from sklearn.decomposition import PCA
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from zvt.api.data_type import Region, Provider
from zvt.contract.reader import DataReader
from zvt.domain import Stock1dKdata, Stock
logger = logging.getLogger(__name__)
def dataXY(df, train_size=0.7, pred_future=10):
def create_labels(y_cohort, pred_future):
y = (y_cohort.diff(periods=pred_future).shift(-pred_future).dropna()>=0).astype(int)
return y
def create_ta(x_cohort, pred_future):
x = add_all_ta_features(x_cohort, 'open', 'high', 'low', 'close', 'volume', fillna=True).shift(-pred_future).dropna()
return x
train_cohort = df[0:round(df.shape[0] * train_size)]
x_train_cohort = train_cohort.iloc[:, 1:7]
x_train = create_ta(x_train_cohort, pred_future)
y_train_cohort = train_cohort['close']
y_train = create_labels(y_train_cohort, pred_future)
test_cohort = df[round(df.shape[0] * (train_size)):]
x_test_cohort = test_cohort.iloc[:, 1:7]
x_test = create_ta(x_test_cohort, pred_future)
y_test_cohort = test_cohort['close']
y_test = create_labels(y_test_cohort, pred_future)
y_test_cohort = y_test_cohort.shift(-pred_future).dropna()
return x_train, y_train, x_test, y_test, y_test_cohort
class LASSOJorn(BaseEstimator, TransformerMixin):
def __init__(self):
None
def fit(self, X, y):
self.model = LassoLarsIC(criterion='aic').fit(X, y)
return self
def transform(self, X):
return np.asarray(X)[:, abs(self.model.coef_) > 0]
if __name__ == '__main__':
now = time.time()
reader = DataReader(region=Region.US,
codes=['FB', 'AMD'],
data_schema=Stock1dKdata,
entity_schema=Stock,
provider=Provider.Yahoo)
gb = reader.data_df.groupby('code')
dfs = {x: gb.get_group(x) for x in gb.groups}
df = dfs['AMD'][['open', 'close', 'volume', 'high', 'low']].copy()
x_train, y_train, x_test, y_test, y_test_cohort = dataXY(df)
plt.close()
parameters = {
# 'clf__base_estimator__n_estimators': np.round(np.linspace(100,400,10)).astype('int'),
# 'clf__base_estimator__max_depth': [10,11,12],
# 'clf__base_estimator__min_child_weight': [1],
# 'clf__base_estimator__gamma': np.linspace(0,0.5,5),
# 'clf__base_estimator__subsample': np.linspace(0.2,0.4,3),
# 'clf__base_estimator__colsample_bytree': np.linspace(0.2,0.4,3),
# 'clf__base_estimator__reg_alpha': np.linspace(0.01,0.03,10)
# 'clf__method': ['isotonic','sigmoid'],
}
scale_pos_weight = Counter(y_train)[0] / Counter(y_train)[1]
clf = xgb.XGBRFClassifier(objective='binary:logistic',
scale_pos_weight=scale_pos_weight,
learning_rate=0.01,
n_estimators=5000,
max_depth=10,
min_child_weight=1,
gamma=0,
subsample=0.3,
colsample_bytree=0.3,
reg_alpha=0.014,
nthread=4,
seed=27)
PL = Pipeline(steps=[('PreProcessor', StandardScaler()),
('PCA', PCA()),
('EmbeddedSelector', LASSOJorn()),
('clf', CalibratedClassifierCV(base_estimator=clf, method='sigmoid'))])
# tss = TimeSeriesSplit(n_splits=3)
# optimizer = GridSearchCV(PL, parameters, cv=tss, n_jobs=-1, verbose=10, scoring='roc_auc')
# optimizer.fit(x_train, y_train)
# print(optimizer.best_params_)
# final_model = optimizer.best_estimator_
final_model = PL.fit(x_train, y_train)
# plt.plot(optimizer.cv_results_['mean_test_score'])
# xgb.plot_importance(final_model.named_steps['clf'])
y_pred_proba = final_model.predict_proba(x_test)[:, 1]
y_pred = final_model.predict(x_test)
fraction_of_positives, mean_predicted_value = calibration_curve(np.array(y_test), y_pred_proba, strategy='uniform', n_bins=20)
plt.figure()
plt.plot(mean_predicted_value, fraction_of_positives, "sr-")
plt.title("Calibration")
plt.xlabel("mean_predicted_value")
plt.ylabel("fraction_of_positives")
fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
roc_auc = auc(fpr, tpr)
display = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=None)
display.plot()
plt.title("ROC")
range_class = np.linspace(np.min(y_pred_proba), np.max(y_pred_proba), 100)
range_class = np.delete(range_class, 0)
range_class = np.delete(range_class, -1)
PPV = np.zeros(len(range_class))
NPV = np.zeros(len(range_class))
j = 0
for i in range_class:
PPV[j] = precision_score(y_test, y_pred_proba > i, pos_label=1)
NPV[j] = precision_score(y_test, y_pred_proba > i, pos_label=0)
j += 1
plt.figure()
plt.plot(range_class, PPV, label='PPV')
plt.plot(range_class, NPV, label='NPV')
plt.legend()
threshold = 0.98
threshold_high = range_class[np.where(PPV > threshold)[0][0]]
threshold_low = range_class[np.where(NPV < threshold)[0][0]]
plt.plot(threshold_high, PPV[np.where(np.isin(range_class, threshold_high))[0][0]], 'r*')
plt.plot(threshold_low, NPV[np.where(np.isin(range_class, threshold_low))[0][0]], 'r*')
plt.figure(figsize=(10, 10))
idx = np.linspace(0, 100, 101).astype('int')
plt.plot(range(len(y_test_cohort.iloc[idx])), y_test_cohort.iloc[idx], 'b')
idx_high = np.where(y_pred_proba[idx] > threshold_high)[0]
plt.plot(idx_high, np.asarray(y_test_cohort)[idx_high], 'g.')
idx_low = np.where(y_pred_proba[idx] < threshold_low)[0]
plt.plot(idx_low, np.asarray(y_test_cohort)[idx_low], 'r.')
idx_sure = np.sort(np.concatenate((idx_high, idx_low)))
print(classification_report(y_test.iloc[idx_sure], y_pred[idx_sure]))
print(confusion_matrix(y_test.iloc[idx_sure], y_pred[idx_sure]))
plt.close('all')
def bot(threshold_high, threshold_low):
koers = df.iloc[round(df.shape[0] * (0.7)):, 4]
start = np.zeros(len(x_test) + 1)
start[0] = 10000
bought = 0
sellat = 0
buyat = 0
for i in range(len(x_test)):
if y_pred_proba[i] > threshold_high and bought == 0:
# print("Buy at i=",i)
buyat = koers.iloc[i]
if sellat != 0:
interest = -(buyat - sellat) / sellat
start[i + 1] = start[i] * (1 + interest)
else:
start[i + 1] = start[i]
bought = 1
elif y_pred_proba[i] < threshold_low and bought == 1:
# print("Sell at i=",i)
sellat = koers.iloc[i]
if buyat != 0:
interest = (sellat - buyat) / buyat
start[i + 1] = start[i] * (1 + interest)
else:
start[i + 1] = start[i]
bought = 0
else:
start[i + 1] = start[i]
return start
# range_class = np.linspace(min(y_pred_proba),max(y_pred_proba),100)
# interest = np.zeros((range_class.shape[0],range_class.shape[0]))
# ii=0
# for i in range_class:
# jj=0
# for j in range_class:
# start = bot(i,j)
# interest[ii,jj] = start[-1]/start[0]*100/len(start)
# jj+=1
# ii+=1
# ind = np.unravel_index(np.argmax(interest), interest.shape)
start = bot(0.6, 0.46)
interest = start[-1] / start[0] * 100 / len(start)
print("interest: ", interest)
plt.figure()
plt.plot(start[:5 * 250])
plt.show()
| [
"matplotlib.pyplot.title",
"sklearn.metrics.RocCurveDisplay",
"numpy.isin",
"sklearn.preprocessing.StandardScaler",
"xgboost.XGBRFClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"ta.add_all_ta_features",
"matplotlib.pyplot.close",
"sklearn.linear_model.LassoLarsIC... | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((895, 922), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (912, 922), False, 'import logging\n'), ((2268, 2279), 'time.time', 'time.time', ([], {}), '()\n', (2277, 2279), False, 'import time\n'), ((2293, 2418), 'zvt.contract.reader.DataReader', 'DataReader', ([], {'region': 'Region.US', 'codes': "['FB', 'AMD']", 'data_schema': 'Stock1dKdata', 'entity_schema': 'Stock', 'provider': 'Provider.Yahoo'}), "(region=Region.US, codes=['FB', 'AMD'], data_schema=Stock1dKdata,\n entity_schema=Stock, provider=Provider.Yahoo)\n", (2303, 2418), False, 'from zvt.contract.reader import DataReader\n'), ((2744, 2755), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2753, 2755), True, 'import matplotlib.pyplot as plt\n'), ((3382, 3634), 'xgboost.XGBRFClassifier', 'xgb.XGBRFClassifier', ([], {'objective': '"""binary:logistic"""', 'scale_pos_weight': 'scale_pos_weight', 'learning_rate': '(0.01)', 'n_estimators': '(5000)', 'max_depth': '(10)', 'min_child_weight': '(1)', 'gamma': '(0)', 'subsample': '(0.3)', 'colsample_bytree': '(0.3)', 'reg_alpha': '(0.014)', 'nthread': '(4)', 'seed': '(27)'}), "(objective='binary:logistic', scale_pos_weight=\n scale_pos_weight, learning_rate=0.01, n_estimators=5000, max_depth=10,\n min_child_weight=1, gamma=0, subsample=0.3, colsample_bytree=0.3,\n reg_alpha=0.014, nthread=4, seed=27)\n", (3401, 3634), True, 'import xgboost as xgb\n'), ((4866, 4878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4876, 4878), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4943), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_predicted_value', 'fraction_of_positives', '"""sr-"""'], {}), "(mean_predicted_value, fraction_of_positives, 'sr-')\n", (4891, 4943), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4972), 'matplotlib.pyplot.title', 'plt.title', (['"""Calibration"""'], {}), "('Calibration')\n", (4957, 4972), True, 'import matplotlib.pyplot as plt\n'), ((4977, 5011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mean_predicted_value"""'], {}), "('mean_predicted_value')\n", (4987, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5016, 5051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fraction_of_positives"""'], {}), "('fraction_of_positives')\n", (5026, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5071, 5102), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred_proba'], {}), '(y_test, y_pred_proba)\n', (5080, 5102), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((5117, 5130), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5120, 5130), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((5145, 5216), 'sklearn.metrics.RocCurveDisplay', 'RocCurveDisplay', ([], {'fpr': 'fpr', 'tpr': 'tpr', 'roc_auc': 'roc_auc', 'estimator_name': 'None'}), '(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=None)\n', (5160, 5216), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((5240, 5256), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC"""'], {}), "('ROC')\n", (5249, 5256), True, 'import matplotlib.pyplot as plt\n'), ((5355, 5380), 'numpy.delete', 'np.delete', (['range_class', '(0)'], {}), '(range_class, 0)\n', (5364, 5380), True, 'import numpy as np\n'), ((5399, 5425), 'numpy.delete', 'np.delete', (['range_class', '(-1)'], {}), '(range_class, -1)\n', (5408, 5425), True, 'import numpy as np\n'), ((5701, 5713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5711, 5713), True, 'import matplotlib.pyplot as plt\n'), ((5718, 5757), 'matplotlib.pyplot.plot', 'plt.plot', (['range_class', 'PPV'], {'label': '"""PPV"""'}), "(range_class, PPV, label='PPV')\n", (5726, 5757), True, 'import matplotlib.pyplot as plt\n'), ((5762, 5801), 'matplotlib.pyplot.plot', 'plt.plot', (['range_class', 'NPV'], {'label': '"""NPV"""'}), "(range_class, NPV, label='NPV')\n", (5770, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5806, 5818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5816, 5818), True, 'import matplotlib.pyplot as plt\n'), ((6162, 6190), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (6172, 6190), True, 'import matplotlib.pyplot as plt\n'), ((6783, 6799), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6792, 6799), True, 'import matplotlib.pyplot as plt\n'), ((8470, 8482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8480, 8482), True, 'import matplotlib.pyplot as plt\n'), ((8487, 8512), 'matplotlib.pyplot.plot', 'plt.plot', (['start[:5 * 250]'], {}), '(start[:5 * 250])\n', (8495, 8512), True, 'import matplotlib.pyplot as plt\n'), ((8517, 8527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8525, 8527), True, 'import matplotlib.pyplot as plt\n'), ((4799, 4815), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4807, 4815), True, 'import numpy as np\n'), ((5288, 5308), 'numpy.min', 'np.min', (['y_pred_proba'], {}), '(y_pred_proba)\n', (5294, 5308), True, 'import numpy as np\n'), ((5310, 5330), 'numpy.max', 'np.max', (['y_pred_proba'], {}), '(y_pred_proba)\n', (5316, 5330), True, 'import numpy as np\n'), ((5554, 5608), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', '(y_pred_proba > i)'], {'pos_label': '(1)'}), '(y_test, y_pred_proba > i, pos_label=1)\n', (5569, 5608), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((5626, 5680), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', '(y_pred_proba > i)'], {'pos_label': '(0)'}), '(y_test, y_pred_proba > i, pos_label=0)\n', (5641, 5680), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((6335, 6379), 'numpy.where', 'np.where', (['(y_pred_proba[idx] > threshold_high)'], {}), '(y_pred_proba[idx] > threshold_high)\n', (6343, 6379), True, 'import numpy as np\n'), ((6463, 6506), 'numpy.where', 'np.where', (['(y_pred_proba[idx] < threshold_low)'], {}), '(y_pred_proba[idx] < threshold_low)\n', (6471, 6506), True, 'import numpy as np\n'), ((6598, 6633), 'numpy.concatenate', 'np.concatenate', (['(idx_high, idx_low)'], {}), '((idx_high, idx_low))\n', (6612, 6633), True, 'import numpy as np\n'), ((6645, 6707), 'sklearn.metrics.classification_report', 'classification_report', (['y_test.iloc[idx_sure]', 'y_pred[idx_sure]'], {}), '(y_test.iloc[idx_sure], y_pred[idx_sure])\n', (6666, 6707), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((6719, 6776), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test.iloc[idx_sure]', 'y_pred[idx_sure]'], {}), '(y_test.iloc[idx_sure], y_pred[idx_sure])\n', (6735, 6776), False, 'from sklearn.metrics import RocCurveDisplay, roc_curve, auc, classification_report, confusion_matrix, precision_score\n'), ((2185, 2198), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (2195, 2198), True, 'import numpy as np\n'), ((3330, 3346), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (3337, 3346), False, 'from collections import Counter\n'), ((3352, 3368), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (3359, 3368), False, 'from collections import Counter\n'), ((6201, 6225), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(101)'], {}), '(0, 100, 101)\n', (6212, 6225), True, 'import numpy as np\n'), ((6406, 6431), 'numpy.asarray', 'np.asarray', (['y_test_cohort'], {}), '(y_test_cohort)\n', (6416, 6431), True, 'import numpy as np\n'), ((6532, 6557), 'numpy.asarray', 'np.asarray', (['y_test_cohort'], {}), '(y_test_cohort)\n', (6542, 6557), True, 'import numpy as np\n'), ((2082, 2110), 'sklearn.linear_model.LassoLarsIC', 'LassoLarsIC', ([], {'criterion': '"""aic"""'}), "(criterion='aic')\n", (2093, 2110), False, 'from sklearn.linear_model import LassoLarsIC\n'), ((5873, 5898), 'numpy.where', 'np.where', (['(PPV > threshold)'], {}), '(PPV > threshold)\n', (5881, 5898), True, 'import numpy as np\n'), ((5938, 5963), 'numpy.where', 'np.where', (['(NPV < threshold)'], {}), '(NPV < threshold)\n', (5946, 5963), True, 'import numpy as np\n'), ((3995, 4011), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4009, 4011), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4047, 4052), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (4050, 4052), False, 'from sklearn.decomposition import PCA\n'), ((4148, 4208), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', ([], {'base_estimator': 'clf', 'method': '"""sigmoid"""'}), "(base_estimator=clf, method='sigmoid')\n", (4170, 4208), False, 'from sklearn.calibration import CalibratedClassifierCV, calibration_curve\n'), ((1184, 1272), 'ta.add_all_ta_features', 'add_all_ta_features', (['x_cohort', '"""open"""', '"""high"""', '"""low"""', '"""close"""', '"""volume"""'], {'fillna': '(True)'}), "(x_cohort, 'open', 'high', 'low', 'close', 'volume',\n fillna=True)\n", (1203, 1272), False, 'from ta import add_all_ta_features\n'), ((6013, 6049), 'numpy.isin', 'np.isin', (['range_class', 'threshold_high'], {}), '(range_class, threshold_high)\n', (6020, 6049), True, 'import numpy as np\n'), ((6106, 6141), 'numpy.isin', 'np.isin', (['range_class', 'threshold_low'], {}), '(range_class, threshold_low)\n', (6113, 6141), True, 'import numpy as np\n')] |
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import sys
import os
import jittor as jt
import unittest
import time
import numpy as np
def get_init_var(shape, dtype):
return jt.random(shape, dtype)
def pool(x, size, op, padding, stride = 1): # TODO: stride, padding
N,C,H,W = x.shape
h = (H+padding*2-size)//stride+1
w = (W+padding*2-size)//stride+1
xx = x.reindex([N,C,h,w,size,size], [
"i0", # Nid
"i1", # Cid
f"i2*{stride}-{padding}+i4", # Hid
f"i3*{stride}-{padding}+i5", # Wid
])
return xx.reindex_reduce(op, [N,C,h,w], [
"i0", # Nid
"i1", # Cid
"i2", # Hid
"i3", # Wid
])
def relu(x): return jt.maximum(x, jt.float32(0))
def resnet_fake():
from jittor import nn
net = nn.Sequential(
nn.Conv(3, 64, 7, 2, 3),
nn.BatchNorm(64),
nn.ReLU(),
nn.Pool(3, 2, 1)
)
return net
class TestLongestDisFuse(unittest.TestCase):
def test_longest_dis_fuse(self):
x = jt.array(np.random.rand(1,3,224,224).astype(np.float32))
net = resnet_fake()
loss = jt.sum(net(x))
ps = net.parameters()
gs = jt.grad(loss, ps)
jt.sync(gs)
# assert not alloc big tensor
g = jt.dump_all_graphs()
for s in g.nodes_info:
if not s.startswith("Var"):
continue
shape = s.split("[")[1].split("]")[0].split(",")
ptr = s.split("(")[1].split(")")[0].split(",")[-1]
if ptr != '0' and ptr != '0x0':
assert len(shape)<=5, s
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"jittor.float32",
"jittor.dump_all_graphs",
"jittor.nn.Conv",
"jittor.nn.BatchNorm",
"jittor.random",
"jittor.sync",
"jittor.grad",
"numpy.random.rand",
"jittor.nn.Pool",
"jittor.nn.ReLU"
] | [((509, 532), 'jittor.random', 'jt.random', (['shape', 'dtype'], {}), '(shape, dtype)\n', (518, 532), True, 'import jittor as jt\n'), ((1958, 1973), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1971, 1973), False, 'import unittest\n'), ((1041, 1054), 'jittor.float32', 'jt.float32', (['(0)'], {}), '(0)\n', (1051, 1054), True, 'import jittor as jt\n'), ((1135, 1158), 'jittor.nn.Conv', 'nn.Conv', (['(3)', '(64)', '(7)', '(2)', '(3)'], {}), '(3, 64, 7, 2, 3)\n', (1142, 1158), False, 'from jittor import nn\n'), ((1168, 1184), 'jittor.nn.BatchNorm', 'nn.BatchNorm', (['(64)'], {}), '(64)\n', (1180, 1184), False, 'from jittor import nn\n'), ((1194, 1203), 'jittor.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1201, 1203), False, 'from jittor import nn\n'), ((1213, 1229), 'jittor.nn.Pool', 'nn.Pool', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (1220, 1229), False, 'from jittor import nn\n'), ((1513, 1530), 'jittor.grad', 'jt.grad', (['loss', 'ps'], {}), '(loss, ps)\n', (1520, 1530), True, 'import jittor as jt\n'), ((1539, 1550), 'jittor.sync', 'jt.sync', (['gs'], {}), '(gs)\n', (1546, 1550), True, 'import jittor as jt\n'), ((1601, 1621), 'jittor.dump_all_graphs', 'jt.dump_all_graphs', ([], {}), '()\n', (1619, 1621), True, 'import jittor as jt\n'), ((1364, 1394), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (1378, 1394), True, 'import numpy as np\n')] |
"""
A module implementing binned and unbinned likelihood for weighted and
unweighted sets of photon phases. The model is encapsulated in LCTemplate,
a mixture model.
LCPrimitives are combined to form a light curve (LCTemplate).
LCFitter then performs a maximum likielihood fit to determine the
light curve parameters.
LCFitter also allows fits to subsets of the phases for TOA calculation.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/lcfitters.py,v 1.54 2017/03/24 18:48:51 kerrm Exp $
author: <NAME> <<EMAIL>>
"""
import numpy as np
from copy import deepcopy
import scipy
from scipy.optimize import fmin,fmin_tnc,leastsq
from uw.pulsar.stats import z2mw,hm,hmw
SECSPERDAY = 86400.
def shifted(m,delta=0.5):
""" Produce a copy of a binned profile shifted in phase by delta."""
f = np.fft.fft(m,axis=-1)
n = f.shape[-1]
arg = np.fft.fftfreq(n)*(n*np.pi*2.j*delta)
return np.real(np.fft.ifft(np.exp(arg)*f,axis=-1))
def weighted_light_curve(nbins,phases,weights,normed=False,phase_shift=0):
""" Return a set of bins, values, and errors to represent a
weighted light curve."""
bins = np.linspace(0+phase_shift,1+phase_shift,nbins+1)
counts = np.histogram(phases,bins=bins,normed=False)[0]
w1 = (np.histogram(phases,bins=bins,weights=weights,normed=False)[0]).astype(float)
w2 = (np.histogram(phases,bins=bins,weights=weights**2,normed=False)[0]).astype(float)
errors = np.where(counts > 1, w2**0.5, counts)
norm = w1.sum()/nbins if normed else 1.
return bins,w1/norm,errors/norm
def LCFitter(template,phases,weights=None,log10_ens=None,times=1,
binned_bins=100,binned_ebins=8,phase_shift=0):
""" Factory class for light curve fitters. Based on whether weights
or energies are supplied in addition to photon phases, the
appropriate fitter class is returned.
Arguments:
template -- an instance of LCTemplate
phases -- list of photon phases
Keyword arguments:
weights [None] optional photon weights
log10_ens [None] optional photon energies (log10(E/MeV))
times [None] optional photon arrival times
binned_bins [100] phase bins to use in binned likelihood
binned_ebins [8] energy bins to use in binned likelihood
phase_shift [0] set this if a phase shift has been applied
"""
times = np.asarray(times)
mask = np.isnan(phases).astype(int)
nnan = mask.sum()
if nnan > 0:
print ('Suppressing %d NaN phases!'%(nnan))
mask = ~mask
phases = phases[mask]
if len(times) == len(mask):
times = times[mask]
if weights is not None:
weights = np.asarray(weights)[mask]
kwargs = dict(times=np.asarray(times),binned_bins=binned_bins,
phase_shift=phase_shift)
if weights is None:
kwargs['weights'] = None
return UnweightedLCFitter(template,phases,**kwargs)
kwargs['weights'] = np.asarray(weights)
return WeightedLCFitter(template,phases,**kwargs)
class UnweightedLCFitter(object):
def __init__(self,template,phases,**kwargs):
self.template = template
self.phases = np.asarray(phases)
self.__dict__.update(kwargs)
self._hist_setup()
# default is unbinned likelihood
self.loglikelihood = self.unbinned_loglikelihood
self.gradient = self.unbinned_gradient
def is_energy_dependent(self):
return False
def _hist_setup(self):
""" Setup data for chi-squared and binned likelihood."""
h = hm(self.phases)
nbins = 25
if h > 100: nbins = 50
if h > 1000: nbins = 100
ph0,ph1 = 0+self.phase_shift,1+self.phase_shift
hist = np.histogram(self.phases,bins=np.linspace(ph0,ph1,nbins))
if len(hist[0])==nbins: raise ValueError('Histogram too old!')
x = ((hist[1][1:] + hist[1][:-1])/2.)[hist[0]>0]
counts = (hist[0][hist[0]>0]).astype(float)
y = counts / counts.sum() * nbins
yerr = counts**0.5 / counts.sum() * nbins
self.chistuff = x,y,yerr
# now set up binning for binned likelihood
nbins = self.binned_bins+1
hist = np.histogram(self.phases,bins=np.linspace(ph0,ph1,nbins))
self.counts_centers = ((hist[1][1:] + hist[1][:-1])/2.)[hist[0]>0]
self.counts = hist[0][hist[0]>0]
def unbinned_loglikelihood(self,p,*args):
t = self.template
params_ok = t.set_parameters(p);
#if (not t.shift_mode) and np.any(p<0):
if ((t.norm()>1) or (not params_ok)):
return 2e20
rvals = -np.log(t(self.phases)).sum()
if np.isnan(rvals): return 2e20 # NB need to do better accounting of norm
return rvals
def binned_loglikelihood(self,p,*args):
t = self.template
params_ok = t.set_parameters(p);
#if (not t.shift_mode) and np.any(p<0):
if ((t.norm()>1) or (not params_ok)):
return 2e20
return -(self.counts*np.log(t(self.counts_centers))).sum()
def __call__(self):
""" Shortcut for log likelihood at current param values."""
return self.loglikelihood(self.template.get_parameters())
def unbinned_gradient(self,p,*args):
t = self.template
t.set_parameters(p);
return -(t.gradient(self.phases)/t(self.phases)).sum(axis=1)
def binned_gradient(self,p,*args):
t = self.template
t.set_parameters(p);
return -(self.counts*t.gradient(self.counts_centers)/t(self.counts_centers)).sum(axis=1)
def chi(self,p,*args):
x,y,yerr = self.chistuff
t = self.template
if not self.template.shift_mode and np.any(p < 0):
return 2e100*np.ones_like(x)/len(x)
t.set_parameters(p)
chi = (t(x) - y)/yerr
return chi
def quick_fit(self):
t = self.template
p0 = t.get_parameters().copy()
chi0 = (self.chi(t.get_parameters(),t)**2).sum()
f = leastsq(self.chi,t.get_parameters(),args=(t))
chi1 = (self.chi(t.get_parameters(),t)**2).sum()
print (chi0,chi1,' chi numbers')
if (chi1 > chi0):
print (self)
print ('Failed least squares fit -- reset and proceed to likelihood.')
t.set_parameters(p0)
def _fix_state(self,restore_state=None):
old_state = []
counter = 0
for p in self.template.primitives:
for i in xrange(len(p.p)):
old_state.append(p.free[i])
if restore_state is not None:
p.free[i] = restore_state[counter]
else:
if i<(len(p.p)-1):
p.free[i] = False
counter += 1
return old_state
def _set_unbinned(self,unbinned=True):
if unbinned:
self.loglikelihood = self.unbinned_loglikelihood
self.gradient = self.unbinned_gradient
else:
self.loglikelihood = self.binned_loglikelihood
self.gradient = self.binned_gradient
def fit(self,quick_fit_first=False, unbinned=True, use_gradient=True,
overall_position_first=False, positions_first=False,
estimate_errors=False, prior=None,
unbinned_refit=True, try_bootstrap=True):
# NB use of priors currently not supported by quick_fit, positions first, etc.
self._set_unbinned(unbinned)
if (prior is not None) and (len(prior) > 0):
fit_func = lambda x: self.loglikelihood(x)+prior(x)
grad_func = lambda x: self.gradient(x) + prior.gradient(x)
else:
fit_func = self.loglikelihood
grad_func = self.gradient
if overall_position_first:
""" do a brute force scan over profile down to <1mP."""
def logl(phase):
self.template.set_overall_phase(phase)
return self.loglikelihood(self.template.get_parameters())
# coarse grained
dom = np.linspace(0,1,101)
cod = map(logl,0.5*(dom[1:]+dom[:-1]))
idx = np.argmin(cod)
# fine grained
dom = np.linspace(dom[idx],dom[idx+1],101)
cod = map(logl,dom)
# set to best fit phase shift
ph0 = dom[np.argmin(cod)]
self.template.set_overall_phase(ph0)
if positions_first:
print ('Running positions first')
restore_state = self._fix_state()
self.fit(quick_fit_first=quick_fit_first,estimate_errors=False, unbinned=unbinned, use_gradient=use_gradient, positions_first=False)
self._fix_state(restore_state)
# an initial chi squared fit to find better seed values
if quick_fit_first: self.quick_fit()
ll0 = -fit_func(self.template.get_parameters())
p0 = self.template.get_parameters().copy()
if use_gradient:
f = self.fit_tnc(fit_func,grad_func)
else:
f = self.fit_fmin(fit_func)
if (ll0 > self.ll) or (self.ll==-2e20) or (np.isnan(self.ll)):
if unbinned_refit and np.isnan(self.ll) and (not unbinned):
if (self.binned_bins*2) < 400:
print ('Did not converge using %d bins... retrying with %d bins...'%(self.binned_bins,self.binned_bins*2))
self.template.set_parameters(p0)
self.ll = ll0; self.fitvals = p0
self.binned_bins *= 2
self._hist_setup()
return self.fit(quick_fit_first=quick_fit_first, unbinned=unbinned, use_gradient=use_gradient, positions_first=positions_first, estimate_errors=estimate_errors,prior=prior)
self.bad_p = self.template.get_parameters().copy()
self.bad_ll = self.ll
print ('Failed likelihood fit -- resetting parameters.')
self.template.set_parameters(p0)
self.ll = ll0; self.fitvals = p0
return False
if estimate_errors:
if not self.hess_errors(use_gradient=use_gradient):
#try:
if try_bootstrap:
self.bootstrap_errors(set_errors=True)
#except ValueError:
# print ('Warning, could not estimate errors.')
# self.template.set_errors(np.zeros_like(p0))
print ('Improved log likelihood by %.2f'%(self.ll-ll0))
return True
def fit_position(self, unbinned=True):
""" Fit overall template position."""
self._set_unbinned(unbinned)
# NB use of priors currently not supported by quick_fit, positions first, etc.
def logl(phase):
self.template.set_overall_phase(phase)
return self.loglikelihood(self.template.get_parameters())
# coarse grained
dom = np.linspace(0,1,101)
cod = map(logl,0.5*(dom[1:]+dom[:-1]))
idx = np.argmin(cod)
ph0 = fmin(logl,[dom[idx]],full_output=True,disp=0)[0][0]
# set to best fit phase shift
self.template.set_overall_phase(ph0)
def fit_fmin(self,fit_func,ftol=1e-5):
x0 = self.template.get_parameters()
fit = fmin(fit_func,x0,disp=0,ftol=ftol,full_output=True)
self.fitval = fit[0]
self.ll = -fit[1]
return fit
def fit_cg(self):
from scipy.optimize import fmin_cg
fit = fmin_cg(self.loglikelihood,self.template.get_parameters(),fprime=self.gradient,args=(self.template,),full_output=1,disp=1)
return fit
def fit_bfgs(self):
from scipy.optimize import fmin_bfgs
#bounds = self.template.get_bounds()
fit = fmin_bfgs(self.loglikelihood,self.template.get_parameters(),fprime=self.gradient,args=(self.template,),full_output=1,disp=1,gtol=1e-5,norm=2)
self.template.set_errors(np.diag(fit[3])**0.5)
self.fitval = fit[0]
self.ll = -fit[1]
self.cov_matrix = fit[3]
return fit
def fit_tnc(self,fit_func,grad_func,ftol=1e-5):
x0 = self.template.get_parameters()
bounds = self.template.get_bounds()
fit = fmin_tnc(fit_func,x0,fprime=grad_func,ftol=ftol,pgtol=1e-5,
bounds=bounds,maxfun=5000,messages=8)
self.fitval = fit[0]
self.ll = -fit_func(self.template.get_parameters())
return fit
def fit_l_bfgs_b(self):
from scipy.optimize import fmin_l_bfgs_b
x0 = self.template.get_parameters()
bounds = self.template.get_bounds()
fit = fmin_l_bfgs_b(self.loglikelihood,x0,fprime=self.gradient,
bounds=bounds,factr=1e-5)
return fit
def hess_errors(self,use_gradient=True):
""" Set errors from hessian. Fit should be called first..."""
p = self.template.get_parameters()
nump = len(p)
self.cov_matrix = np.zeros([nump,nump],dtype=float)
ss = calc_step_size(self.loglikelihood,p.copy())
if use_gradient:
h1 = hess_from_grad(self.gradient,p.copy(),step=ss)
c1 = scipy.linalg.inv(h1)
if np.all(np.diag(c1)>0):
self.cov_matrix = c1
else:
print ('Could not estimate errors from hessian.')
return False
else:
h1 = hessian(self.template,self.loglikelihood,delta=ss)
try:
c1 = scipy.linalg.inv(h1)
except scipy.linalg.LinAlgError:
print ('Hessian matrix was singular! Aborting.')
return False
d = np.diag(c1)
if np.all(d>0):
self.cov_matrix = c1
# attempt to refine
h2 = hessian(self.template,self.loglikelihood,delt=d**0.5)
try:
c2 = scipy.linalg.inv(h2)
except scipy.linalg.LinAlgError:
print ('Second try at hessian matrix was singular! Aborting.')
return False
if np.all(np.diag(c2)>0):
self.cov_matrix = c2
else:
print ('Could not estimate errors from hessian.')
return False
self.template.set_errors(np.diag(self.cov_matrix)**0.5)
return True
def bootstrap_errors(self,nsamp=100,fit_kwargs={},set_errors=False):
p0 = self.phases; w0 = self.weights
param0 = self.template.get_parameters().copy()
n = len(p0)
results = np.empty([nsamp,len(self.template.get_parameters())])
fit_kwargs['estimate_errors'] = False # never estimate errors
if 'unbinned' not in fit_kwargs.keys():
fit_kwargs['unbinned'] = True
counter = 0
for i in xrange(nsamp*2):
if counter == nsamp:
break
if i == (2*nsamp-1):
self.phases = p0; self.weights = w0
raise ValueError('Could not construct bootstrap sample. Giving up.')
a = (np.random.rand(n)*n).astype(int)
self.phases = p0[a]
if w0 is not None:
self.weights = w0[a]
if not fit_kwargs['unbinned']:
self._hist_setup()
if self.fit(**fit_kwargs):
results[counter,:] = self.template.get_parameters()
counter += 1
self.template.set_parameters(param0)
if set_errors:
self.template.set_errors(np.std(results,axis=0))
self.phases = p0; self.weights = w0
return results
def __str__(self):
if 'll' in self.__dict__.keys():
return '\nLog Likelihood for fit: %.2f\n'%(self.ll) + str(self.template)
return str(self.template)
def write_template(self,outputfile='template.gauss'):
s = self.template.prof_string(outputfile=outputfile)
def remove_component(self,index,steps=5,fit_kwargs={}):
""" Gradually remove a component from a model by refitting and
return new LCTemplate object."""
if len(self.template)==1:
raise ValueError('Template only has one component -- removing it would be madness!')
old_p = self.template.get_parameters()
old_f = self.template.norms.free.copy()
vals = np.arange(steps).astype(float)/np.arange(1,steps+1)
vals = vals[::-1]*self.template.norms()[index]
self.template.norms.free[index] = False
#self.template.primitives[index].free[:] = False
for v in vals:
self.template.norms.set_single_norm(index,v)
if 'unbinned' not in fit_kwargs:
fit_kwargs['unbinned'] = False
self.fit(**fit_kwargs)
t = self.template.delete_primitive(index)
self.template.norms.free[:] = old_f
self.template.set_parameters(old_p)
return t
def plot(self,nbins=50,fignum=2, axes=None, plot_components=False, template=None):
import pylab as pl
weights = self.weights
dom = np.linspace(0,1,1000)
if template is None: template = self.template
if axes is None:
fig = pl.figure(fignum)
axes = fig.add_subplot(111)
axes.hist(self.phases,bins=np.linspace(0,1,nbins+1),histtype='step',ec='red',normed=True,lw=1,weights=weights)
if weights is not None:
bg_level = 1-(weights**2).sum()/weights.sum()
axes.axhline(bg_level,color='blue')
#cod = template(dom)*(1-bg_level)+bg_level
#axes.plot(dom,cod,color='blue')
x,w1,errors = weighted_light_curve(nbins,self.phases,weights,normed=True)
x = (x[:-1]+x[1:])/2
axes.errorbar(x,w1,yerr=errors,capsize=0,marker='',ls=' ',color='red')
else:
bg_level = 0
#axes.plot(dom,cod,color='blue',lw=1)
h = np.histogram(self.phases,bins=np.linspace(0,1,nbins+1))
x = (h[1][:-1]+h[1][1:])/2
n = float(h[0].sum())/nbins
axes.errorbar(x,h[0]/n,yerr=h[0]**0.5/n,capsize=0,marker='',ls=' ',color='red')
cod = template(dom)*(1-bg_level)+bg_level
axes.plot(dom,cod,color='blue',lw=1)
if plot_components:
for i in xrange(len(template.primitives)):
cod = template.single_component(i,dom)*(1-bg_level)+bg_level
axes.plot(dom,cod,color='blue',lw=1,ls='--')
pl.axis([0,1,pl.axis()[2],max(pl.axis()[3],cod.max()*1.05)])
axes.set_ylabel('Normalized Profile')
axes.set_xlabel('Phase')
axes.grid(True)
def plot_residuals(self,nbins=50,fignum=3):
import pylab as pl
edges = np.linspace(0,1,nbins+1)
lct = self.template
cod = np.asarray([lct.integrate(e1,e2) for e1,e2 in zip(edges[:-1],edges[1:])])*len(self.phases)
pl.figure(fignum)
counts= np.histogram(self.phases,bins=edges)[0]
pl.errorbar(x=(edges[1:]+edges[:-1])/2,y=counts-cod,yerr=counts**0.5,ls=' ',marker='o',color='red')
pl.axhline(0,color='blue')
pl.ylabel('Residuals (Data - Model)')
pl.xlabel('Phase')
pl.grid(True)
def __getstate__(self):
""" Cannot pickle self.loglikelihood and self.gradient since
these are instancemethod objects.
See: http://mail.python.org/pipermail/python-list/2000-October/054610.html """
result = self.__dict__.copy()
del result['loglikelihood']
del result['gradient']
return result
def __setstate__(self,state):
self.__dict__ = state
self.loglikelihood = self.unbinned_loglikelihood
self.gradient = self.unbinned_gradient
def aic(self,template=None):
""" Return the Akaike information criterion for the current state.
Note the sense of the statistic is such that more negative
implies a better fit."""
if template is not None:
template,self.template = self.template,template
else:
template = self.template
nump = len(template.get_parameters())
ts = 2*(nump+self())
self.template = template
return ts
def bic(self,template=None):
""" Return the Bayesian information criterion for the current state.
Note the sense of the statistic is such that more negative
implies a better fit.
This should work for energy-dependent templates provided the
template and fitter match types."""
if template is not None:
template,self.template = self.template,template
else:
template = self.template
nump = len(self.template.get_parameters())
if self.weights is None:
n = len(self.phases)
else:
n = self.weights.sum()
ts = nump*np.log(n)+2*self()
self.template = template
return ts
class WeightedLCFitter(UnweightedLCFitter):
def _hist_setup(self):
""" Setup binning for a quick chi-squared fit."""
h = hmw(self.phases,self.weights)
nbins = 25
if h > 100: nbins = 50
if h > 1000: nbins = 100
bins,counts,errors = weighted_light_curve(nbins,
self.phases,self.weights,phase_shift=self.phase_shift)
mask = counts > 0
N = counts.sum()
self.bg_level = 1-(self.weights**2).sum()/N
x = ((bins[1:]+bins[:-1])/2)
y = counts / N * nbins
yerr = errors / N * nbins
self.chistuff = x[mask],y[mask],yerr[mask]
# now set up binning for binned likelihood
nbins = self.binned_bins
bins = np.linspace(0+self.phase_shift,1+self.phase_shift,nbins+1)
a = np.argsort(self.phases)
self.phases = self.phases[a]
self.weights = self.weights[a]
self.counts_centers = []
self.slices = []
indices = np.arange(len(self.weights))
for i in xrange(nbins):
mask = (self.phases >= bins[i]) & (self.phases < bins[i+1])
if mask.sum() > 0:
w = self.weights[mask]
if w.sum()==0: continue
p = self.phases[mask]
self.counts_centers.append((w*p).sum()/w.sum())
self.slices.append(slice(indices[mask].min(),indices[mask].max()+1))
self.counts_centers = np.asarray(self.counts_centers)
def chi(self,p,*args):
x,y,yerr = self.chistuff
bg = self.bg_level
if not self.template.shift_mode and np.any(p < 0):
return 2e100*np.ones_like(x)/len(x)
args[0].set_parameters(p)
chi = (bg + (1-bg)*self.template(x) - y)/yerr
return chi
def unbinned_loglikelihood(self,p,*args):
t = self.template
params_ok = t.set_parameters(p);
if ((t.norm()>1) or (not params_ok)):
#if (t.norm()>1) or (not t.shift_mode and np.any(p<0)):
return 2e20
return -np.log(1+self.weights*(t(self.phases)-1)).sum()
#return -np.log(1+self.weights*(self.template(self.phases,suppress_bg=True)-1)).sum()
def binned_loglikelihood(self,p,*args):
t = self.template
params_ok = t.set_parameters(p)
#if (t.norm()>1) or (not t.shift_mode and np.any(p<0)):
if ((t.norm()>1) or (not params_ok)):
return 2e20
template_terms = t(self.counts_centers)-1
phase_template_terms = np.empty_like(self.weights)
for tt,sl in zip(template_terms,self.slices):
phase_template_terms[sl] = tt
return -np.log(1+self.weights*phase_template_terms).sum()
def unbinned_gradient(self,p,*args):
t = self.template
t.set_parameters(p)
if t.norm()>1:
return np.ones_like(p)*2e20
numer = self.weights*t.gradient(self.phases)
denom = 1+self.weights*(t(self.phases)-1)
return -(numer/denom).sum(axis=1)
def binned_gradient(self,p,*args):
t = self.template
t.set_parameters(p)
if t.norm()>1:
return np.ones_like(p)*2e20
nump = len(p)
template_terms = t(self.counts_centers)-1
gradient_terms = t.gradient(self.counts_centers)
phase_template_terms = np.empty_like(self.weights)
phase_gradient_terms = np.empty([nump,len(self.weights)])
# distribute the central values to the unbinned phases/weights
for tt,gt,sl in zip(template_terms,gradient_terms.transpose(),self.slices):
phase_template_terms[sl] = tt
for j in xrange(nump):
phase_gradient_terms[j,sl] = gt[j]
numer = self.weights*phase_gradient_terms
denom = 1+self.weights*(phase_template_terms)
return -(numer/denom).sum(axis=1)
class ChiSqLCFitter(object):
""" Fit binned data with a gaussian likelihood."""
def __init__(self,template,x,y,yerr,log10_ens=3,**kwargs):
self.template = template
self.chistuff = x,y,yerr,log10_ens
self.__dict__.update(kwargs)
def is_energy_dependent(self):
return False
def chi(self,p,*args):
x,y,yerr,log10_ens = self.chistuff
t = self.template
if not self.template.shift_mode and np.any(p < 0):
return 2e100*np.ones_like(x)/len(x)
t.set_parameters(p)
chi = (t(x,log10_ens) - y)/yerr
return chi
def chigrad(self,p,*args):
x,y,yerr,log10_ens = self.chistuff
#chi = self.chi(p,*args)
g = self.template.gradient(x,log10_ens)
#return 2*(chi*g)
return g/yerr
def fit(self,use_gradient=True,get_results=False):
p = self.template.get_parameters()
Dfun = self.chigrad if use_gradient else None
results = leastsq(self.chi,p,Dfun=Dfun,full_output=1,
col_deriv=True)
p,cov = results[:2]
self.template.set_parameters(p)
if cov is not None:
self.template.set_errors(np.diag(cov)**0.5)
if get_results:
return results
def __str__(self):
return str(self.template)
def plot(self,fignum=2,shift=0,resids=False):
import pylab as pl
x,y,yerr,log10_ens = self.chistuff
my = self.template(x,log10_ens)
if shift != 0:
y = shifted(y,shift)
my = shifted(my,shift)
if resids:
y = y-my
pl.figure(fignum); pl.clf()
pl.errorbar(x,y,yerr=yerr,ls=' ')
if not resids:
pl.plot(x,my,color='red')
def hessian(m,mf,*args,**kwargs):
"""Calculate the Hessian; mf is the minimizing function, m is the model,args additional arguments for mf."""
p = m.get_parameters().copy()
p0 = p.copy() # sacrosanct copy
if 'delt' in kwargs.keys():
delta = kwargs['delt']
else:
delta = [0.01]*len(p)
hessian=np.zeros([len(p),len(p)])
for i in xrange(len(p)):
delt = delta[i]
for j in xrange(i,len(p)): #Second partials by finite difference; could be done analytically in a future revision
xhyh,xhyl,xlyh,xlyl=p.copy(),p.copy(),p.copy(),p.copy()
xdelt = delt if p[i] >= 0 else -delt
ydelt = delt if p[j] >= 0 else -delt
xhyh[i]*=(1+xdelt)
xhyh[j]*=(1+ydelt)
xhyl[i]*=(1+xdelt)
xhyl[j]*=(1-ydelt)
xlyh[i]*=(1-xdelt)
xlyh[j]*=(1+ydelt)
xlyl[i]*=(1-xdelt)
xlyl[j]*=(1-ydelt)
hessian[i][j]=hessian[j][i]=(mf(xhyh,m,*args)-mf(xhyl,m,*args)-mf(xlyh,m,*args)+mf(xlyl,m,*args))/\
(p[i]*p[j]*4*delt**2)
mf(p0,m,*args) #call likelihood with original values; this resets model and any other values that might be used later
return hessian
def get_errors(template,total,n=100):
""" This is, I think, for making MC estimates of TOA errors."""
from scipy.optimize import fmin
ph0 = template.get_location()
def logl(phi,*args):
phases = args[0]
template.set_overall_phase(phi%1)
return -np.log(template(phases)).sum()
errors = np.empty(n)
fitvals = np.empty(n)
errors_r = np.empty(n)
delta = 0.01
mean = 0
for i in xrange(n):
template.set_overall_phase(ph0)
ph = template.random(total)
results = fmin(logl,ph0,args=(ph,),full_output=1,disp=0)
phi0,fopt = results[0],results[1]
fitvals[i] = phi0
mean += logl(phi0+delta,ph)-logl(phi0,ph)
errors[i] = (logl(phi0+delta,ph)-fopt*2+logl(phi0-delta,ph))/delta**2
my_delta = errors[i]**-0.5
errors_r[i] = (logl(phi0+my_delta,ph)-fopt*2+logl(phi0-my_delta,ph))/my_delta**2
print ('Mean: %.2f'%(mean/n))
return fitvals-ph0,errors**-0.5,errors_r**-0.5
def make_err_plot(template,totals=[10,20,50,100,500],n=1000):
import pylab as pl
fvals = []; errs = []
bins = np.arange(-5,5.1,0.25)
for tot in totals:
f,e = get_errors(template,tot,n=n)
fvals += [f]; errs += [e]
pl.hist(f/e,bins=np.arange(-5,5.1,0.5),histtype='step',normed=True,label='N = %d'%tot);
g = lambda x: (np.pi*2)**-0.5*np.exp(-x**2/2)
dom = np.linspace(-5,5,101)
pl.plot(dom,g(dom),color='k')
pl.legend()
pl.axis([-5,5,0,0.5])
def approx_gradient(fitter,eps=1e-6):
""" Numerically approximate the gradient of an instance of one of the
light curve fitters.
TODO -- potentially merge this with the code in lcprimitives"""
func = fitter.template
orig_p = func.get_parameters(free=True).copy()
g = np.zeros([len(orig_p)])
weights = np.asarray([-1,8,-8,1])/(12*eps)
def do_step(which,eps):
p0 = orig_p.copy()
p0[which] += eps
#func.set_parameters(p0,free=False)
return fitter.loglikelihood(p0)
for i in xrange(len(orig_p)):
# use a 4th-order central difference scheme
for j,w in zip([2,1,-1,-2],weights):
g[i] += w*do_step(i,j*eps)
func.set_parameters(orig_p,free=True)
return g
def hess_from_grad(grad,par,step=1e-3,iterations=2):
""" Use gradient to compute hessian. Proceed iteratively to take steps
roughly equal to the 1-sigma errors.
The initial step can be:
[scalar] use the same step for the initial iteration
[array] specify a step for each parameters.
"""
def mdet(M):
""" Return determinant of M.
Use a Laplace cofactor expansion along first row."""
n = M.shape[0]
if n == 2:
return M[0,0]*M[1,1]-M[0,1]*M[1,0]
if n == 1:
return M[0,0]
rvals = np.zeros(1,dtype=M.dtype)
toggle = 1.
for i in xrange(n):
minor = np.delete(np.delete(M,0,0),i,1)
rvals += M[0,i] * toggle * mdet(minor)
toggle *= -1
return rvals
def minv(M):
""" Return inverse of M, using cofactor expansion."""
n = M.shape[0]
C = np.empty_like(M)
for i in xrange(n):
for j in xrange(n):
m = np.delete(np.delete(M,i,0),j,1)
C[i,j] = (-1)**(i+j)*mdet(m)
det = (M[0,:]*C[0,:]).sum()
return C.transpose()/det
# why am I using a co-factor expansion? Reckon this would better be
# done as Cholesky in any case
minv = scipy.linalg.inv
def make_hess(p0,steps):
npar = len(par)
hess = np.empty([npar,npar],dtype=p0.dtype)
for i in xrange(npar):
par[i] = p0[i] + steps[i]
gup = grad(par)
par[i] = p0[i] - steps[i]
gdn = grad(par)
par[:] = p0
hess[i,:] = (gup-gdn)/(2*steps[i])
return hess
p0 = par.copy() # sacrosanct
if not (hasattr(step,'__len__') and len(step)==len(p0)):
step = np.ones_like(p0)*step
hessians = [make_hess(p0,step)]
for i in xrange(iterations):
steps = np.diag(minv(hessians[-1]))**0.5
mask = np.isnan(steps)
if np.any(mask):
steps[mask] = step[mask]
hessians.append(make_hess(p0,steps))
g = grad(p0) # reset parameters
for i in xrange(iterations,-1,-1):
if not np.any(np.isnan(np.diag(minv(hessians[i]))**0.5)):
return hessians[i].astype(float)
return hessians[0].astype(float)
def calc_step_size(logl,par,minstep=1e-5,maxstep=1e-1):
from scipy.optimize import bisect
rvals = np.empty_like(par)
p0 = par.copy()
ll0 = logl(p0)
def f(x,i):
p0[i] = par[i] + x
delta_ll = logl(p0)-ll0-0.5
p0[i] = par[i]
if abs(delta_ll) < 0.05:
return 0
return delta_ll
for i in xrange(len(par)):
if f(maxstep,i) <= 0:
rvals[i] = maxstep
else:
try:
rvals[i] = bisect(f,minstep,maxstep,args=(i))
except ValueError as e:
print ('Unable to compute a step size for parameter %d.'%i)
rvals[i] = maxstep
logl(par) # reset parameters
return rvals
| [
"scipy.optimize.fmin_tnc",
"numpy.empty",
"numpy.isnan",
"numpy.argmin",
"numpy.argsort",
"scipy.optimize.leastsq",
"numpy.histogram",
"numpy.arange",
"pylab.figure",
"numpy.exp",
"numpy.diag",
"numpy.std",
"numpy.fft.fft",
"numpy.random.rand",
"pylab.ylabel",
"numpy.empty_like",
"sc... | [((822, 844), 'numpy.fft.fft', 'np.fft.fft', (['m'], {'axis': '(-1)'}), '(m, axis=-1)\n', (832, 844), True, 'import numpy as np\n'), ((1152, 1208), 'numpy.linspace', 'np.linspace', (['(0 + phase_shift)', '(1 + phase_shift)', '(nbins + 1)'], {}), '(0 + phase_shift, 1 + phase_shift, nbins + 1)\n', (1163, 1208), True, 'import numpy as np\n'), ((1453, 1492), 'numpy.where', 'np.where', (['(counts > 1)', '(w2 ** 0.5)', 'counts'], {}), '(counts > 1, w2 ** 0.5, counts)\n', (1461, 1492), True, 'import numpy as np\n'), ((2423, 2440), 'numpy.asarray', 'np.asarray', (['times'], {}), '(times)\n', (2433, 2440), True, 'import numpy as np\n'), ((3022, 3041), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (3032, 3041), True, 'import numpy as np\n'), ((28227, 28238), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (28235, 28238), True, 'import numpy as np\n'), ((28253, 28264), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (28261, 28264), True, 'import numpy as np\n'), ((28280, 28291), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (28288, 28291), True, 'import numpy as np\n'), ((29015, 29039), 'numpy.arange', 'np.arange', (['(-5)', '(5.1)', '(0.25)'], {}), '(-5, 5.1, 0.25)\n', (29024, 29039), True, 'import numpy as np\n'), ((29294, 29317), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (29305, 29317), True, 'import numpy as np\n'), ((29354, 29365), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (29363, 29365), True, 'import pylab as pl\n'), ((29370, 29394), 'pylab.axis', 'pl.axis', (['[-5, 5, 0, 0.5]'], {}), '([-5, 5, 0, 0.5])\n', (29377, 29394), True, 'import pylab as pl\n'), ((32580, 32598), 'numpy.empty_like', 'np.empty_like', (['par'], {}), '(par)\n', (32593, 32598), True, 'import numpy as np\n'), ((875, 892), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['n'], {}), '(n)\n', (889, 892), True, 'import numpy as np\n'), ((1214, 1259), 'numpy.histogram', 'np.histogram', (['phases'], {'bins': 'bins', 'normed': '(False)'}), '(phases, bins=bins, normed=False)\n', (1226, 1259), True, 'import numpy as np\n'), ((3236, 3254), 'numpy.asarray', 'np.asarray', (['phases'], {}), '(phases)\n', (3246, 3254), True, 'import numpy as np\n'), ((3626, 3641), 'uw.pulsar.stats.hm', 'hm', (['self.phases'], {}), '(self.phases)\n', (3628, 3641), False, 'from uw.pulsar.stats import z2mw, hm, hmw\n'), ((4727, 4742), 'numpy.isnan', 'np.isnan', (['rvals'], {}), '(rvals)\n', (4735, 4742), True, 'import numpy as np\n'), ((10960, 10982), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (10971, 10982), True, 'import numpy as np\n'), ((11042, 11056), 'numpy.argmin', 'np.argmin', (['cod'], {}), '(cod)\n', (11051, 11056), True, 'import numpy as np\n'), ((11308, 11363), 'scipy.optimize.fmin', 'fmin', (['fit_func', 'x0'], {'disp': '(0)', 'ftol': 'ftol', 'full_output': '(True)'}), '(fit_func, x0, disp=0, ftol=ftol, full_output=True)\n', (11312, 11363), False, 'from scipy.optimize import fmin\n'), ((12246, 12355), 'scipy.optimize.fmin_tnc', 'fmin_tnc', (['fit_func', 'x0'], {'fprime': 'grad_func', 'ftol': 'ftol', 'pgtol': '(1e-05)', 'bounds': 'bounds', 'maxfun': '(5000)', 'messages': '(8)'}), '(fit_func, x0, fprime=grad_func, ftol=ftol, pgtol=1e-05, bounds=\n bounds, maxfun=5000, messages=8)\n', (12254, 12355), False, 'from scipy.optimize import fmin, fmin_tnc, leastsq\n'), ((12655, 12746), 'scipy.optimize.fmin_l_bfgs_b', 'fmin_l_bfgs_b', (['self.loglikelihood', 'x0'], {'fprime': 'self.gradient', 'bounds': 'bounds', 'factr': '(1e-05)'}), '(self.loglikelihood, x0, fprime=self.gradient, bounds=bounds,\n factr=1e-05)\n', (12668, 12746), False, 'from scipy.optimize import fmin_l_bfgs_b\n'), ((12995, 13030), 'numpy.zeros', 'np.zeros', (['[nump, nump]'], {'dtype': 'float'}), '([nump, nump], dtype=float)\n', (13003, 13030), True, 'import numpy as np\n'), ((17125, 17148), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (17136, 17148), True, 'import numpy as np\n'), ((18775, 18803), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nbins + 1)'], {}), '(0, 1, nbins + 1)\n', (18786, 18803), True, 'import numpy as np\n'), ((18941, 18958), 'pylab.figure', 'pl.figure', (['fignum'], {}), '(fignum)\n', (18950, 18958), True, 'import pylab as pl\n'), ((19023, 19140), 'pylab.errorbar', 'pl.errorbar', ([], {'x': '((edges[1:] + edges[:-1]) / 2)', 'y': '(counts - cod)', 'yerr': '(counts ** 0.5)', 'ls': '""" """', 'marker': '"""o"""', 'color': '"""red"""'}), "(x=(edges[1:] + edges[:-1]) / 2, y=counts - cod, yerr=counts ** \n 0.5, ls=' ', marker='o', color='red')\n", (19034, 19140), True, 'import pylab as pl\n'), ((19131, 19158), 'pylab.axhline', 'pl.axhline', (['(0)'], {'color': '"""blue"""'}), "(0, color='blue')\n", (19141, 19158), True, 'import pylab as pl\n'), ((19166, 19203), 'pylab.ylabel', 'pl.ylabel', (['"""Residuals (Data - Model)"""'], {}), "('Residuals (Data - Model)')\n", (19175, 19203), True, 'import pylab as pl\n'), ((19212, 19230), 'pylab.xlabel', 'pl.xlabel', (['"""Phase"""'], {}), "('Phase')\n", (19221, 19230), True, 'import pylab as pl\n'), ((19239, 19252), 'pylab.grid', 'pl.grid', (['(True)'], {}), '(True)\n', (19246, 19252), True, 'import pylab as pl\n'), ((21164, 21194), 'uw.pulsar.stats.hmw', 'hmw', (['self.phases', 'self.weights'], {}), '(self.phases, self.weights)\n', (21167, 21194), False, 'from uw.pulsar.stats import z2mw, hm, hmw\n'), ((21759, 21825), 'numpy.linspace', 'np.linspace', (['(0 + self.phase_shift)', '(1 + self.phase_shift)', '(nbins + 1)'], {}), '(0 + self.phase_shift, 1 + self.phase_shift, nbins + 1)\n', (21770, 21825), True, 'import numpy as np\n'), ((21830, 21853), 'numpy.argsort', 'np.argsort', (['self.phases'], {}), '(self.phases)\n', (21840, 21853), True, 'import numpy as np\n'), ((22466, 22497), 'numpy.asarray', 'np.asarray', (['self.counts_centers'], {}), '(self.counts_centers)\n', (22476, 22497), True, 'import numpy as np\n'), ((23532, 23559), 'numpy.empty_like', 'np.empty_like', (['self.weights'], {}), '(self.weights)\n', (23545, 23559), True, 'import numpy as np\n'), ((24343, 24370), 'numpy.empty_like', 'np.empty_like', (['self.weights'], {}), '(self.weights)\n', (24356, 24370), True, 'import numpy as np\n'), ((25851, 25913), 'scipy.optimize.leastsq', 'leastsq', (['self.chi', 'p'], {'Dfun': 'Dfun', 'full_output': '(1)', 'col_deriv': '(True)'}), '(self.chi, p, Dfun=Dfun, full_output=1, col_deriv=True)\n', (25858, 25913), False, 'from scipy.optimize import fmin, fmin_tnc, leastsq\n'), ((26488, 26505), 'pylab.figure', 'pl.figure', (['fignum'], {}), '(fignum)\n', (26497, 26505), True, 'import pylab as pl\n'), ((26507, 26515), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (26513, 26515), True, 'import pylab as pl\n'), ((26524, 26560), 'pylab.errorbar', 'pl.errorbar', (['x', 'y'], {'yerr': 'yerr', 'ls': '""" """'}), "(x, y, yerr=yerr, ls=' ')\n", (26535, 26560), True, 'import pylab as pl\n'), ((28440, 28490), 'scipy.optimize.fmin', 'fmin', (['logl', 'ph0'], {'args': '(ph,)', 'full_output': '(1)', 'disp': '(0)'}), '(logl, ph0, args=(ph,), full_output=1, disp=0)\n', (28444, 28490), False, 'from scipy.optimize import fmin\n'), ((29739, 29765), 'numpy.asarray', 'np.asarray', (['[-1, 8, -8, 1]'], {}), '([-1, 8, -8, 1])\n', (29749, 29765), True, 'import numpy as np\n'), ((30780, 30806), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'M.dtype'}), '(1, dtype=M.dtype)\n', (30788, 30806), True, 'import numpy as np\n'), ((31119, 31135), 'numpy.empty_like', 'np.empty_like', (['M'], {}), '(M)\n', (31132, 31135), True, 'import numpy as np\n'), ((31568, 31606), 'numpy.empty', 'np.empty', (['[npar, npar]'], {'dtype': 'p0.dtype'}), '([npar, npar], dtype=p0.dtype)\n', (31576, 31606), True, 'import numpy as np\n'), ((32126, 32141), 'numpy.isnan', 'np.isnan', (['steps'], {}), '(steps)\n', (32134, 32141), True, 'import numpy as np\n'), ((32153, 32165), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (32159, 32165), True, 'import numpy as np\n'), ((2452, 2468), 'numpy.isnan', 'np.isnan', (['phases'], {}), '(phases)\n', (2460, 2468), True, 'import numpy as np\n'), ((2795, 2812), 'numpy.asarray', 'np.asarray', (['times'], {}), '(times)\n', (2805, 2812), True, 'import numpy as np\n'), ((5770, 5783), 'numpy.any', 'np.any', (['(p < 0)'], {}), '(p < 0)\n', (5776, 5783), True, 'import numpy as np\n'), ((8121, 8143), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (8132, 8143), True, 'import numpy as np\n'), ((8211, 8225), 'numpy.argmin', 'np.argmin', (['cod'], {}), '(cod)\n', (8220, 8225), True, 'import numpy as np\n'), ((8271, 8311), 'numpy.linspace', 'np.linspace', (['dom[idx]', 'dom[idx + 1]', '(101)'], {}), '(dom[idx], dom[idx + 1], 101)\n', (8282, 8311), True, 'import numpy as np\n'), ((9175, 9192), 'numpy.isnan', 'np.isnan', (['self.ll'], {}), '(self.ll)\n', (9183, 9192), True, 'import numpy as np\n'), ((13192, 13212), 'scipy.linalg.inv', 'scipy.linalg.inv', (['h1'], {}), '(h1)\n', (13208, 13212), False, 'import scipy\n'), ((13699, 13710), 'numpy.diag', 'np.diag', (['c1'], {}), '(c1)\n', (13706, 13710), True, 'import numpy as np\n'), ((13726, 13739), 'numpy.all', 'np.all', (['(d > 0)'], {}), '(d > 0)\n', (13732, 13739), True, 'import numpy as np\n'), ((16422, 16445), 'numpy.arange', 'np.arange', (['(1)', '(steps + 1)'], {}), '(1, steps + 1)\n', (16431, 16445), True, 'import numpy as np\n'), ((17245, 17262), 'pylab.figure', 'pl.figure', (['fignum'], {}), '(fignum)\n', (17254, 17262), True, 'import pylab as pl\n'), ((18975, 19012), 'numpy.histogram', 'np.histogram', (['self.phases'], {'bins': 'edges'}), '(self.phases, bins=edges)\n', (18987, 19012), True, 'import numpy as np\n'), ((22630, 22643), 'numpy.any', 'np.any', (['(p < 0)'], {}), '(p < 0)\n', (22636, 22643), True, 'import numpy as np\n'), ((25326, 25339), 'numpy.any', 'np.any', (['(p < 0)'], {}), '(p < 0)\n', (25332, 25339), True, 'import numpy as np\n'), ((26593, 26620), 'pylab.plot', 'pl.plot', (['x', 'my'], {'color': '"""red"""'}), "(x, my, color='red')\n", (26600, 26620), True, 'import pylab as pl\n'), ((29268, 29287), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2)'], {}), '(-x ** 2 / 2)\n', (29274, 29287), True, 'import numpy as np\n'), ((31970, 31986), 'numpy.ones_like', 'np.ones_like', (['p0'], {}), '(p0)\n', (31982, 31986), True, 'import numpy as np\n'), ((944, 955), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (950, 955), True, 'import numpy as np\n'), ((1271, 1333), 'numpy.histogram', 'np.histogram', (['phases'], {'bins': 'bins', 'weights': 'weights', 'normed': '(False)'}), '(phases, bins=bins, weights=weights, normed=False)\n', (1283, 1333), True, 'import numpy as np\n'), ((1359, 1426), 'numpy.histogram', 'np.histogram', (['phases'], {'bins': 'bins', 'weights': '(weights ** 2)', 'normed': '(False)'}), '(phases, bins=bins, weights=weights ** 2, normed=False)\n', (1371, 1426), True, 'import numpy as np\n'), ((2745, 2764), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (2755, 2764), True, 'import numpy as np\n'), ((3826, 3854), 'numpy.linspace', 'np.linspace', (['ph0', 'ph1', 'nbins'], {}), '(ph0, ph1, nbins)\n', (3837, 3854), True, 'import numpy as np\n'), ((4294, 4322), 'numpy.linspace', 'np.linspace', (['ph0', 'ph1', 'nbins'], {}), '(ph0, ph1, nbins)\n', (4305, 4322), True, 'import numpy as np\n'), ((8404, 8418), 'numpy.argmin', 'np.argmin', (['cod'], {}), '(cod)\n', (8413, 8418), True, 'import numpy as np\n'), ((9229, 9246), 'numpy.isnan', 'np.isnan', (['self.ll'], {}), '(self.ll)\n', (9237, 9246), True, 'import numpy as np\n'), ((11071, 11119), 'scipy.optimize.fmin', 'fmin', (['logl', '[dom[idx]]'], {'full_output': '(True)', 'disp': '(0)'}), '(logl, [dom[idx]], full_output=True, disp=0)\n', (11075, 11119), False, 'from scipy.optimize import fmin\n'), ((11961, 11976), 'numpy.diag', 'np.diag', (['fit[3]'], {}), '(fit[3])\n', (11968, 11976), True, 'import numpy as np\n'), ((13522, 13542), 'scipy.linalg.inv', 'scipy.linalg.inv', (['h1'], {}), '(h1)\n', (13538, 13542), False, 'import scipy\n'), ((14349, 14373), 'numpy.diag', 'np.diag', (['self.cov_matrix'], {}), '(self.cov_matrix)\n', (14356, 14373), True, 'import numpy as np\n'), ((15578, 15601), 'numpy.std', 'np.std', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (15584, 15601), True, 'import numpy as np\n'), ((17339, 17367), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nbins + 1)'], {}), '(0, 1, nbins + 1)\n', (17350, 17367), True, 'import numpy as np\n'), ((20951, 20960), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (20957, 20960), True, 'import numpy as np\n'), ((23860, 23875), 'numpy.ones_like', 'np.ones_like', (['p'], {}), '(p)\n', (23872, 23875), True, 'import numpy as np\n'), ((24162, 24177), 'numpy.ones_like', 'np.ones_like', (['p'], {}), '(p)\n', (24174, 24177), True, 'import numpy as np\n'), ((29163, 29186), 'numpy.arange', 'np.arange', (['(-5)', '(5.1)', '(0.5)'], {}), '(-5, 5.1, 0.5)\n', (29172, 29186), True, 'import numpy as np\n'), ((30884, 30902), 'numpy.delete', 'np.delete', (['M', '(0)', '(0)'], {}), '(M, 0, 0)\n', (30893, 30902), True, 'import numpy as np\n'), ((32968, 33003), 'scipy.optimize.bisect', 'bisect', (['f', 'minstep', 'maxstep'], {'args': 'i'}), '(f, minstep, maxstep, args=i)\n', (32974, 33003), False, 'from scipy.optimize import bisect\n'), ((5810, 5825), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (5822, 5825), True, 'import numpy as np\n'), ((13235, 13246), 'numpy.diag', 'np.diag', (['c1'], {}), '(c1)\n', (13242, 13246), True, 'import numpy as np\n'), ((13933, 13953), 'scipy.linalg.inv', 'scipy.linalg.inv', (['h2'], {}), '(h2)\n', (13949, 13953), False, 'import scipy\n'), ((16391, 16407), 'numpy.arange', 'np.arange', (['steps'], {}), '(steps)\n', (16400, 16407), True, 'import numpy as np\n'), ((17998, 18026), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nbins + 1)'], {}), '(0, 1, nbins + 1)\n', (18009, 18026), True, 'import numpy as np\n'), ((18532, 18541), 'pylab.axis', 'pl.axis', ([], {}), '()\n', (18539, 18541), True, 'import pylab as pl\n'), ((22670, 22685), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (22682, 22685), True, 'import numpy as np\n'), ((23672, 23719), 'numpy.log', 'np.log', (['(1 + self.weights * phase_template_terms)'], {}), '(1 + self.weights * phase_template_terms)\n', (23678, 23719), True, 'import numpy as np\n'), ((25366, 25381), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (25378, 25381), True, 'import numpy as np\n'), ((26056, 26068), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (26063, 26068), True, 'import numpy as np\n'), ((31226, 31244), 'numpy.delete', 'np.delete', (['M', 'i', '(0)'], {}), '(M, i, 0)\n', (31235, 31244), True, 'import numpy as np\n'), ((14146, 14157), 'numpy.diag', 'np.diag', (['c2'], {}), '(c2)\n', (14153, 14157), True, 'import numpy as np\n'), ((15122, 15139), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (15136, 15139), True, 'import numpy as np\n'), ((18549, 18558), 'pylab.axis', 'pl.axis', ([], {}), '()\n', (18556, 18558), True, 'import pylab as pl\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[1]:
import pandas as pd
import numpy as np
import pickle
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.stem.porter import *
import string
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS
from textstat.textstat import *
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import seaborn
#new
from nltk.tokenize.casual import casual_tokenize #casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False)
from nltk.tokenize import TreebankWordTokenizer
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
stemmer = PorterStemmer()
def preprocess(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, '', parsed_text)
parsed_text = re.sub(mention_regex, '', parsed_text)
return parsed_text
def tokenize(tweet):
"""Removes punctuation & excess whitespace, sets to lowercase,
and stems tweets. Returns a list of stemmed tokens."""
tweet = " ".join(re.split("[^a-zA-Z]*", tweet.lower())).strip()
#print(tweet.split())
tokens = [stemmer.stem(t) for t in tweet.split()]
return tokens
def basic_tokenize(tweet):
"""Same as tokenize but without the stemming"""
tweet = " ".join(re.split("[^a-zA-Z.,!?]*", tweet.lower())).strip()
return tweet.split()
# Own function
def tokenize_words(tweet, use_stemmer = True):
"""Removes punctuation & excess whitespace, sets to lowercase,
and stems tweets. Returns a list of stemmed tokens."""
tweet = " ".join(re.split(r"[-\s.,;!)]+", tweet.lower())).strip()
if use_stemmer:
tokens = [stemmer.stem(t) for t in tweet.split()]
else:
tokens = [t for t in tweet.split()]
return tokens
def pos_tag_tweet(tweet, tokenizer, print_tweet = False):
tokens = tokenizer(tweet)
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
return tag_str
# In[3]:
#Now get other features
sentiment_analyzer = VS()
def count_twitter_objs(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
4) hashtags with HASHTAGHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned.
Returns counts of urls, mentions, and hashtags.
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
hashtag_regex = '#[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text)
parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)
parsed_text = re.sub(hashtag_regex, 'HASHTAGHERE', parsed_text)
return(parsed_text.count('URLHERE'),parsed_text.count('MENTIONHERE'),parsed_text.count('HASHTAGHERE'))
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
sentiment = sentiment_analyzer.polarity_scores(tweet)
words = preprocess(tweet) #Get text only
syllables = textstat.syllable_count(words)
num_chars = sum(len(w) for w in words)
num_chars_total = len(tweet)
num_terms = len(tweet.split())
num_words = len(words.split())
avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)
num_unique_terms = len(set(words.split()))
###Modified FK grade, where avg words per sentence is just num words/1
FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)
##Modified FRE score, where sentence fixed to 1
FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)
twitter_objs = count_twitter_objs(tweet)
retweet = 0
if "rt" in words:
retweet = 1
features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet]
#features = pandas.DataFrame(features)
return features
def get_feature_array(tweets):
feats=[]
for t in tweets:
feats.append(other_features(t))
return np.array(feats)
# In[4]:
def print_cm(y,y_preds, save_cm = False, save_path = None):
plt.rc('pdf', fonttype=42)
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
plt.rcParams['font.serif'] = 'Times'
plt.rcParams['font.family'] = 'serif'
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y,y_preds)
matrix_proportions = np.zeros((3,3))
for i in range(0,3):
matrix_proportions[i,:] = confusion_matrix[i,:]/float(confusion_matrix[i,:].sum())
names=['Hate','Offensive','Neither']
confusion_df = pd.DataFrame(matrix_proportions, index=names,columns=names)
plt.figure(figsize=(5,5))
seaborn.heatmap(confusion_df,annot=True,annot_kws={"size": 12},cmap='gist_gray_r',cbar=False, square=True,fmt='.2f')
plt.ylabel(r'\textbf{True categories}',fontsize=14)
plt.xlabel(r'\textbf{Predicted categories}',fontsize=14)
plt.tick_params(labelsize=12)
if save_cm:
if save_path is not None:
plt.savefig(save_path)
print(f'Confusionmatrix was saved to {save_path}')
else:
save_path = 'data/confusion.png'
plt.savefig(save_path)
print(f'Confusionmatrix was saved to {save_path}')
plt.show()
# In[5]:
# Data Structure
class TweetsDataset:
def __init__(self, csv_path, tokenizer_name, use_stopwords = True, use_preprocessor= False, min_df = 10, max_df = 0.75, max_ngram = 3):
# Where data is stored
self.csv_path = csv_path
#Read data directly
self.dataframe = pd.read_csv(self.csv_path)
# Choose tokenizer
if tokenizer_name == 'casual_std':
func = lambda x: casual_tokenize(x, preserve_case=True, reduce_len=False, strip_handles=False)
self.tokenizer = func
elif tokenizer_name == 'casual_reduce':
func = lambda x: casual_tokenize(x, preserve_case=False, reduce_len=True, strip_handles=True)
self.tokenizer = func
elif tokenizer_name == 'words':
self.tokenizer = tokenize_words
elif tokenizer_name == 'orig':
self.tokenizer = tokenize
else:
raise NotImplementedError('Unknown tokenizer')
# Stopwords
if use_stopwords:
self.stopwords = nltk.corpus.stopwords.words("english").extend( ["#ff", "ff", "rt"])
else:
self.stopwords = None
# Preprocessor
if use_preprocessor:
self.preprocessor = preprocess
else:
self.preprocessor = None
# Some hyperparameters
self.min_df = min_df
self.max_df = max_df
self.max_ngram = max_ngram
# Vectorizer
self.vectorizer = TfidfVectorizer(
tokenizer=self.tokenizer, #casual_tokenize_specified,
preprocessor=self.preprocessor,
ngram_range=(1, self.max_ngram),
stop_words=self.stopwords,
use_idf=True,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=10000,
min_df=self.min_df,
max_df=self.max_df
)
# PosVectorizer
self.pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, self.max_ngram),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=5000,
min_df=5,
max_df=0.75,
)
#Construct tfidf matrix and get relevant scores
self.tfidf = self.vectorizer.fit_transform(self.dataframe['tweet']).toarray()
self.vocab = {v:i for i, v in enumerate(self.vectorizer.get_feature_names())}
self.idf_vals = self.vectorizer.idf_
self.idf_dict = {i:self.idf_vals[i] for i in self.vocab.values()}
print(f'A vocab was created. It consists of {len(self.vocab)} entries')
# POS-tagging
self.tweet_tags = [pos_tag_tweet(tweet, self.tokenizer, print_tweet = False) for tweet in self.dataframe['tweet']]
self.pos = self.pos_vectorizer.fit_transform(pd.Series(self.tweet_tags)).toarray()
self.pos_vocab = {v:i for i, v in enumerate(self.pos_vectorizer.get_feature_names())}
# Other features: this is untouched
self.feats = get_feature_array(self.dataframe['tweet'])
#Now join them all up
self.features = np.concatenate([self.tfidf,self.pos,self.feats],axis=1)
self.feature_names = [k for k,_ in self.vocab.items()]+[k for k,_ in self.pos_vocab.items()]+["FKRA", "FRE","num_syllables", "avg_syl_per_word", "num_chars", "num_chars_total", "num_terms", "num_words", "num_unique_words", "vader neg","vader pos","vader neu", "vader compound", "num_hashtags", "num_mentions", "num_urls", "is_retweet"]
self.labels = self.dataframe['class']
print(f'\n Data has been processed and is now available. Feature dim: {self.features.shape}')
| [
"seaborn.heatmap",
"pandas.read_csv",
"sklearn.feature_extraction.text.TfidfVectorizer",
"nltk.tokenize.casual.casual_tokenize",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"pandas.DataFrame",
"matplotlib.pyplot.rc",
"re.sub",
"matplotlib.pyplot.show",
"pandas.Series",
"nltk.c... | [((2721, 2725), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'VS', ([], {}), '()\n', (2723, 2725), True, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS\n'), ((1375, 1414), 're.sub', 're.sub', (['space_pattern', '""" """', 'text_string'], {}), "(space_pattern, ' ', text_string)\n", (1381, 1414), False, 'import re\n'), ((1433, 1473), 're.sub', 're.sub', (['giant_url_regex', '""""""', 'parsed_text'], {}), "(giant_url_regex, '', parsed_text)\n", (1439, 1473), False, 'import re\n'), ((1492, 1530), 're.sub', 're.sub', (['mention_regex', '""""""', 'parsed_text'], {}), "(mention_regex, '', parsed_text)\n", (1498, 1530), False, 'import re\n'), ((2554, 2574), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (2566, 2574), False, 'import nltk\n'), ((3354, 3393), 're.sub', 're.sub', (['space_pattern', '""" """', 'text_string'], {}), "(space_pattern, ' ', text_string)\n", (3360, 3393), False, 'import re\n'), ((3412, 3459), 're.sub', 're.sub', (['giant_url_regex', '"""URLHERE"""', 'parsed_text'], {}), "(giant_url_regex, 'URLHERE', parsed_text)\n", (3418, 3459), False, 'import re\n'), ((3478, 3527), 're.sub', 're.sub', (['mention_regex', '"""MENTIONHERE"""', 'parsed_text'], {}), "(mention_regex, 'MENTIONHERE', parsed_text)\n", (3484, 3527), False, 'import re\n'), ((3546, 3595), 're.sub', 're.sub', (['hashtag_regex', '"""HASHTAGHERE"""', 'parsed_text'], {}), "(hashtag_regex, 'HASHTAGHERE', parsed_text)\n", (3552, 3595), False, 'import re\n'), ((5220, 5235), 'numpy.array', 'np.array', (['feats'], {}), '(feats)\n', (5228, 5235), True, 'import numpy as np\n'), ((5313, 5339), 'matplotlib.pyplot.rc', 'plt.rc', (['"""pdf"""'], {'fonttype': '(42)'}), "('pdf', fonttype=42)\n", (5319, 5339), True, 'import matplotlib.pyplot as plt\n'), ((5618, 5646), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_preds'], {}), '(y, y_preds)\n', (5634, 5646), False, 'from sklearn.metrics import confusion_matrix\n'), ((5671, 5687), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5679, 5687), True, 'import numpy as np\n'), ((5863, 5923), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_proportions'], {'index': 'names', 'columns': 'names'}), '(matrix_proportions, index=names, columns=names)\n', (5875, 5923), True, 'import pandas as pd\n'), ((5927, 5953), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5937, 5953), True, 'import matplotlib.pyplot as plt\n'), ((5957, 6083), 'seaborn.heatmap', 'seaborn.heatmap', (['confusion_df'], {'annot': '(True)', 'annot_kws': "{'size': 12}", 'cmap': '"""gist_gray_r"""', 'cbar': '(False)', 'square': '(True)', 'fmt': '""".2f"""'}), "(confusion_df, annot=True, annot_kws={'size': 12}, cmap=\n 'gist_gray_r', cbar=False, square=True, fmt='.2f')\n", (5972, 6083), False, 'import seaborn\n'), ((6078, 6130), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{True categories}"""'], {'fontsize': '(14)'}), "('\\\\textbf{True categories}', fontsize=14)\n", (6088, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6134, 6191), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{Predicted categories}"""'], {'fontsize': '(14)'}), "('\\\\textbf{Predicted categories}', fontsize=14)\n", (6144, 6191), True, 'import matplotlib.pyplot as plt\n'), ((6195, 6224), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(12)'}), '(labelsize=12)\n', (6210, 6224), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6546), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6544, 6546), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6899), 'pandas.read_csv', 'pd.read_csv', (['self.csv_path'], {}), '(self.csv_path)\n', (6884, 6899), True, 'import pandas as pd\n'), ((8107, 8381), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'tokenizer': 'self.tokenizer', 'preprocessor': 'self.preprocessor', 'ngram_range': '(1, self.max_ngram)', 'stop_words': 'self.stopwords', 'use_idf': '(True)', 'smooth_idf': '(False)', 'norm': 'None', 'decode_error': '"""replace"""', 'max_features': '(10000)', 'min_df': 'self.min_df', 'max_df': 'self.max_df'}), "(tokenizer=self.tokenizer, preprocessor=self.preprocessor,\n ngram_range=(1, self.max_ngram), stop_words=self.stopwords, use_idf=\n True, smooth_idf=False, norm=None, decode_error='replace', max_features\n =10000, min_df=self.min_df, max_df=self.max_df)\n", (8122, 8381), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((8740, 8979), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'tokenizer': 'None', 'lowercase': '(False)', 'preprocessor': 'None', 'ngram_range': '(1, self.max_ngram)', 'stop_words': 'None', 'use_idf': '(False)', 'smooth_idf': '(False)', 'norm': 'None', 'decode_error': '"""replace"""', 'max_features': '(5000)', 'min_df': '(5)', 'max_df': '(0.75)'}), "(tokenizer=None, lowercase=False, preprocessor=None,\n ngram_range=(1, self.max_ngram), stop_words=None, use_idf=False,\n smooth_idf=False, norm=None, decode_error='replace', max_features=5000,\n min_df=5, max_df=0.75)\n", (8755, 8979), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((10252, 10310), 'numpy.concatenate', 'np.concatenate', (['[self.tfidf, self.pos, self.feats]'], {'axis': '(1)'}), '([self.tfidf, self.pos, self.feats], axis=1)\n', (10266, 10310), True, 'import numpy as np\n'), ((6288, 6310), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (6299, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6445, 6467), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (6456, 6467), True, 'import matplotlib.pyplot as plt\n'), ((7008, 7085), 'nltk.tokenize.casual.casual_tokenize', 'casual_tokenize', (['x'], {'preserve_case': '(True)', 'reduce_len': '(False)', 'strip_handles': '(False)'}), '(x, preserve_case=True, reduce_len=False, strip_handles=False)\n', (7023, 7085), False, 'from nltk.tokenize.casual import casual_tokenize\n'), ((7197, 7273), 'nltk.tokenize.casual.casual_tokenize', 'casual_tokenize', (['x'], {'preserve_case': '(False)', 'reduce_len': '(True)', 'strip_handles': '(True)'}), '(x, preserve_case=False, reduce_len=True, strip_handles=True)\n', (7212, 7273), False, 'from nltk.tokenize.casual import casual_tokenize\n'), ((7630, 7668), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (7657, 7668), False, 'import nltk\n'), ((9931, 9957), 'pandas.Series', 'pd.Series', (['self.tweet_tags'], {}), '(self.tweet_tags)\n', (9940, 9957), True, 'import pandas as pd\n')] |
"""
Histograms example.
"""
import numpy as np
import matplotlib.pyplot as plt
mu, sigma = 2, 0.5
v = np.random.normal(mu,sigma,1000)
plt.hist(v,bins=50,density=1)
plt.show() | [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"numpy.random.normal"
] | [((103, 136), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', '(1000)'], {}), '(mu, sigma, 1000)\n', (119, 136), True, 'import numpy as np\n'), ((135, 166), 'matplotlib.pyplot.hist', 'plt.hist', (['v'], {'bins': '(50)', 'density': '(1)'}), '(v, bins=50, density=1)\n', (143, 166), True, 'import matplotlib.pyplot as plt\n'), ((165, 175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (173, 175), True, 'import matplotlib.pyplot as plt\n')] |
from collections import namedtuple
import numpy as np
import pytest
tf = pytest.importorskip("tensorflow")
from eight_mile.tf.layers import (
SeqDotProductAttentionT5,
SeqScaledDotProductAttentionT5,
)
NH = 4
NQ = 7
NK = 6
NB = 32
@pytest.fixture
def generate_buckets_values():
REL_BUCKETS = np.array([[0, 17, 18, 19, 20, 21],
[1, 0, 17, 18, 19, 20],
[2, 1, 0, 17, 18, 19],
[3, 2, 1, 0, 17, 18],
[4, 3, 2, 1, 0, 17],
[5, 4, 3, 2, 1, 0],
[6, 5, 4, 3, 2, 1]], dtype=np.float32)
REL_EMB = np.array([[[0., 17., 18., 19., 20., 21.],
[1., 0., 17., 18., 19., 20.],
[2., 1., 0., 17., 18., 19.],
[3., 2., 1., 0., 17., 18.],
[4., 3., 2., 1., 0., 17.],
[5., 4., 3., 2., 1., 0.],
[6., 5., 4., 3., 2., 1.]],
[[32., 49., 50., 51., 52., 53.],
[33., 32., 49., 50., 51., 52.],
[34., 33., 32., 49., 50., 51.],
[35., 34., 33., 32., 49., 50.],
[36., 35., 34., 33., 32., 49.],
[37., 36., 35., 34., 33., 32.],
[38., 37., 36., 35., 34., 33.]],
[[64., 81., 82., 83., 84., 85.],
[65., 64., 81., 82., 83., 84.],
[66., 65., 64., 81., 82., 83.],
[67., 66., 65., 64., 81., 82.],
[68., 67., 66., 65., 64., 81.],
[69., 68., 67., 66., 65., 64.],
[70., 69., 68., 67., 66., 65.]],
[[96., 113., 114., 115., 116., 117.],
[97., 96., 113., 114., 115., 116.],
[98., 97., 96., 113., 114., 115.],
[99., 98., 97., 96., 113., 114.],
[100., 99., 98., 97., 96., 113.],
[101., 100., 99., 98., 97., 96.],
[102., 101., 100., 99., 98., 97.]]], dtype=np.float32)
return REL_BUCKETS, REL_EMB
def test_rel_buckets_dp(generate_buckets_values):
buckets, rel_emb = generate_buckets_values
dp = SeqDotProductAttentionT5(0, NH)
dp.build((None,))
dp.set_weights([np.arange((NH * NB), dtype=np.float32).reshape(NH, NB)])
query_position = tf.reshape(tf.range(NQ), [-1, 1])
memory_position = tf.reshape(tf.range(NK), [1, -1])
relative_position = memory_position - query_position
rp_bucket = dp._relative_position_bucket(relative_position)
np.allclose(rp_bucket.numpy(), buckets)
rel_emb_dp = tf.expand_dims(tf.gather(dp.get_weights()[0], rp_bucket, axis=-1), 0)
np.allclose(rel_emb, rel_emb_dp)
def test_rel_buckets_sdp(generate_buckets_values):
buckets, rel_emb = generate_buckets_values
sdp = SeqScaledDotProductAttentionT5(0, NH)
sdp.build((None,))
sdp.set_weights([np.arange((NH * NB), dtype=np.float32).reshape(NH, NB)])
query_position = tf.reshape(tf.range(NQ), [-1, 1])
memory_position = tf.reshape(tf.range(NK), [1, -1])
relative_position = memory_position - query_position
rp_bucket = sdp._relative_position_bucket(relative_position)
np.allclose(rp_bucket.numpy(), buckets)
rel_emb_sdp = tf.expand_dims(tf.gather(sdp.get_weights()[0], rp_bucket, axis=-1), 0)
np.allclose(rel_emb, rel_emb_sdp)
| [
"pytest.importorskip",
"numpy.allclose",
"eight_mile.tf.layers.SeqDotProductAttentionT5",
"numpy.array",
"numpy.arange",
"eight_mile.tf.layers.SeqScaledDotProductAttentionT5"
] | [((74, 107), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {}), "('tensorflow')\n", (93, 107), False, 'import pytest\n'), ((309, 500), 'numpy.array', 'np.array', (['[[0, 17, 18, 19, 20, 21], [1, 0, 17, 18, 19, 20], [2, 1, 0, 17, 18, 19], [3,\n 2, 1, 0, 17, 18], [4, 3, 2, 1, 0, 17], [5, 4, 3, 2, 1, 0], [6, 5, 4, 3,\n 2, 1]]'], {'dtype': 'np.float32'}), '([[0, 17, 18, 19, 20, 21], [1, 0, 17, 18, 19, 20], [2, 1, 0, 17, 18,\n 19], [3, 2, 1, 0, 17, 18], [4, 3, 2, 1, 0, 17], [5, 4, 3, 2, 1, 0], [6,\n 5, 4, 3, 2, 1]], dtype=np.float32)\n', (317, 500), True, 'import numpy as np\n'), ((676, 1839), 'numpy.array', 'np.array', (['[[[0.0, 17.0, 18.0, 19.0, 20.0, 21.0], [1.0, 0.0, 17.0, 18.0, 19.0, 20.0],\n [2.0, 1.0, 0.0, 17.0, 18.0, 19.0], [3.0, 2.0, 1.0, 0.0, 17.0, 18.0], [\n 4.0, 3.0, 2.0, 1.0, 0.0, 17.0], [5.0, 4.0, 3.0, 2.0, 1.0, 0.0], [6.0, \n 5.0, 4.0, 3.0, 2.0, 1.0]], [[32.0, 49.0, 50.0, 51.0, 52.0, 53.0], [33.0,\n 32.0, 49.0, 50.0, 51.0, 52.0], [34.0, 33.0, 32.0, 49.0, 50.0, 51.0], [\n 35.0, 34.0, 33.0, 32.0, 49.0, 50.0], [36.0, 35.0, 34.0, 33.0, 32.0, \n 49.0], [37.0, 36.0, 35.0, 34.0, 33.0, 32.0], [38.0, 37.0, 36.0, 35.0, \n 34.0, 33.0]], [[64.0, 81.0, 82.0, 83.0, 84.0, 85.0], [65.0, 64.0, 81.0,\n 82.0, 83.0, 84.0], [66.0, 65.0, 64.0, 81.0, 82.0, 83.0], [67.0, 66.0, \n 65.0, 64.0, 81.0, 82.0], [68.0, 67.0, 66.0, 65.0, 64.0, 81.0], [69.0, \n 68.0, 67.0, 66.0, 65.0, 64.0], [70.0, 69.0, 68.0, 67.0, 66.0, 65.0]], [\n [96.0, 113.0, 114.0, 115.0, 116.0, 117.0], [97.0, 96.0, 113.0, 114.0, \n 115.0, 116.0], [98.0, 97.0, 96.0, 113.0, 114.0, 115.0], [99.0, 98.0, \n 97.0, 96.0, 113.0, 114.0], [100.0, 99.0, 98.0, 97.0, 96.0, 113.0], [\n 101.0, 100.0, 99.0, 98.0, 97.0, 96.0], [102.0, 101.0, 100.0, 99.0, 98.0,\n 97.0]]]'], {'dtype': 'np.float32'}), '([[[0.0, 17.0, 18.0, 19.0, 20.0, 21.0], [1.0, 0.0, 17.0, 18.0, 19.0,\n 20.0], [2.0, 1.0, 0.0, 17.0, 18.0, 19.0], [3.0, 2.0, 1.0, 0.0, 17.0, \n 18.0], [4.0, 3.0, 2.0, 1.0, 0.0, 17.0], [5.0, 4.0, 3.0, 2.0, 1.0, 0.0],\n [6.0, 5.0, 4.0, 3.0, 2.0, 1.0]], [[32.0, 49.0, 50.0, 51.0, 52.0, 53.0],\n [33.0, 32.0, 49.0, 50.0, 51.0, 52.0], [34.0, 33.0, 32.0, 49.0, 50.0, \n 51.0], [35.0, 34.0, 33.0, 32.0, 49.0, 50.0], [36.0, 35.0, 34.0, 33.0, \n 32.0, 49.0], [37.0, 36.0, 35.0, 34.0, 33.0, 32.0], [38.0, 37.0, 36.0, \n 35.0, 34.0, 33.0]], [[64.0, 81.0, 82.0, 83.0, 84.0, 85.0], [65.0, 64.0,\n 81.0, 82.0, 83.0, 84.0], [66.0, 65.0, 64.0, 81.0, 82.0, 83.0], [67.0, \n 66.0, 65.0, 64.0, 81.0, 82.0], [68.0, 67.0, 66.0, 65.0, 64.0, 81.0], [\n 69.0, 68.0, 67.0, 66.0, 65.0, 64.0], [70.0, 69.0, 68.0, 67.0, 66.0, \n 65.0]], [[96.0, 113.0, 114.0, 115.0, 116.0, 117.0], [97.0, 96.0, 113.0,\n 114.0, 115.0, 116.0], [98.0, 97.0, 96.0, 113.0, 114.0, 115.0], [99.0, \n 98.0, 97.0, 96.0, 113.0, 114.0], [100.0, 99.0, 98.0, 97.0, 96.0, 113.0],\n [101.0, 100.0, 99.0, 98.0, 97.0, 96.0], [102.0, 101.0, 100.0, 99.0, \n 98.0, 97.0]]], dtype=np.float32)\n', (684, 1839), True, 'import numpy as np\n'), ((2419, 2450), 'eight_mile.tf.layers.SeqDotProductAttentionT5', 'SeqDotProductAttentionT5', (['(0)', 'NH'], {}), '(0, NH)\n', (2443, 2450), False, 'from eight_mile.tf.layers import SeqDotProductAttentionT5, SeqScaledDotProductAttentionT5\n'), ((2917, 2949), 'numpy.allclose', 'np.allclose', (['rel_emb', 'rel_emb_dp'], {}), '(rel_emb, rel_emb_dp)\n', (2928, 2949), True, 'import numpy as np\n'), ((3061, 3098), 'eight_mile.tf.layers.SeqScaledDotProductAttentionT5', 'SeqScaledDotProductAttentionT5', (['(0)', 'NH'], {}), '(0, NH)\n', (3091, 3098), False, 'from eight_mile.tf.layers import SeqDotProductAttentionT5, SeqScaledDotProductAttentionT5\n'), ((3570, 3603), 'numpy.allclose', 'np.allclose', (['rel_emb', 'rel_emb_sdp'], {}), '(rel_emb, rel_emb_sdp)\n', (3581, 3603), True, 'import numpy as np\n'), ((2493, 2529), 'numpy.arange', 'np.arange', (['(NH * NB)'], {'dtype': 'np.float32'}), '(NH * NB, dtype=np.float32)\n', (2502, 2529), True, 'import numpy as np\n'), ((3143, 3179), 'numpy.arange', 'np.arange', (['(NH * NB)'], {'dtype': 'np.float32'}), '(NH * NB, dtype=np.float32)\n', (3152, 3179), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import tqdm as tqdm
import sys
import torch
import matplotlib.pyplot as plt
class Meter(object):
'''Meters provide a way to keep track of important statistics in an online manner.
This class is abstract, but provides a standard interface for all meters to follow.
'''
def reset(self):
'''Resets the meter to default settings.'''
pass
def add(self, value):
'''Log a new value to the meter
Args:
value: Next restult to include.
'''
pass
def value(self):
'''Get the value of the meter in the current state.'''
pass
class AverageValueMeter(Meter):
def __init__(self):
super(AverageValueMeter, self).__init__()
self.reset()
self.val = 0
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy
self.std = np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
self.std = np.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.mean_old = 0.0
self.m_s = 0.0
self.std = np.nan
def visualizer(pred,imgs,masks,epoch):
print(f"pred: {pred.shape} imgs: {imgs.shape}, masks: {masks.shape}")
if pred.shape[0]>1 and len(pred.shape)==3:
pred = pred[0,:,:]
imgs = imgs[0,:,:,].unsqueeze_(0)
masks = masks[0,:,:,].unsqueeze_(0)
pred = pred.cpu().detach().numpy().squeeze()
masks = masks.cpu().detach().numpy().squeeze()
imgs = np.transpose(imgs.cpu().detach().numpy().squeeze(),(1,2,0))
fig = plt.figure()
ax1 = fig.add_subplot(1,4,1)
ax1.title.set_text("Prediction")
ax1.imshow(pred)
ax2 = fig.add_subplot(1,4,2)
ax2.title.set_text("GT")
ax2.imshow(masks)
ax3 = fig.add_subplot(1,4,3)
ax3.title.set_text("Image")
ax3.imshow(imgs)
ax4 = fig.add_subplot(1,4,4)
ax4.title.set_text("Overlay")
ax4.imshow(imgs)
ax4.imshow(pred,alpha=0.6)
fig.suptitle(f"Segmentation Results after {epoch} epochs",y=0.80)
return fig
class Epoch:
def __init__(self, model, loss, metrics, stage_name, device='cpu', verbose=True):
self.model = model
self.loss = loss
self.metrics = metrics
self.stage_name = stage_name
self.verbose = verbose
self.device = device
self._to_device()
def _to_device(self):
self.model.to(self.device)
self.loss.to(self.device)
for metric in self.metrics:
metric.to(self.device)
def _format_logs(self, logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
def batch_update(self, x, y):
raise NotImplementedError
def on_epoch_start(self):
pass
def run(self, dataloader):
self.on_epoch_start()
logs = {}
loss_meter = AverageValueMeter()
metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}
with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
for x, y in iterator:
x, y = x.to(self.device), y.to(self.device)
loss, y_pred = self.batch_update(x, y)
fig = visualizer(pred = y_pred,imgs = x,masks = y,epoch = 0)
plt.show()
loss_value = loss.cpu().detach().numpy()
loss_meter.add(loss_value)
loss_logs = {self.loss.__name__: loss_meter.mean}
logs.update(loss_logs)
# update metrics logs
for metric_fn in self.metrics:
metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
metrics_meters[metric_fn.__name__].add(metric_value)
metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
logs.update(metrics_logs)
if self.verbose:
s = self._format_logs(logs)
iterator.set_postfix_str(s)
return logs
class TrainEpoch(Epoch):
def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='train',
device=device,
verbose=verbose,
)
self.optimizer = optimizer
def on_epoch_start(self):
self.model.train()
def batch_update(self, x, y):
self.optimizer.zero_grad()
prediction = self.model.forward(x)
loss = self.loss(prediction, y)
loss.backward()
self.optimizer.step()
return loss, prediction
class ValidEpoch(Epoch):
def __init__(self, model, loss, metrics, device='cpu', verbose=True):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='valid',
device=device,
verbose=verbose,
)
def on_epoch_start(self):
self.model.eval()
def batch_update(self, x, y):
with torch.no_grad():
prediction = self.model.forward(x)
loss = self.loss(prediction, y)
return loss, prediction
class Trainer(object):
def __init__(self,model,train_loader,val_loader,epochs,optimizer,criterion):
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.epochs = epochs
self.optimizer = optimizer
self.criterion = criterion
self.visualizer_indicator = True
def visualizer(self,pred,imgs,masks,epoch):
if pred.shape[0]>1 and len(pred.shape)==3:
pred = pred[0,:,:]
imgs = imgs[0,:,:,].unsqueeze_(0)
masks = masks[0,:,:,].unsqueeze_(0)
fig = plt.figure()
ax1 = fig.add_subplot(1,4,1)
ax1.title.set_text("Prediction")
ax1.imshow(pred)
ax2 = fig.add_subplot(1,4,2)
ax2.title.set_text("GT")
ax2.imshow(np.transpose(masks.cpu().detach().numpy(),(1,2,0)).squeeze())
ax3 = fig.add_subplot(1,4,3)
ax3.title.set_text("Image")
ax3.imshow(np.transpose(imgs.cpu().detach().numpy().squeeze(),(1,2,0)))
ax4 = fig.add_subplot(1,4,4)
ax4.title.set_text("Overlay")
ax4.imshow(np.transpose(imgs.cpu().detach().numpy().squeeze(),(1,2,0)))
ax4.imshow(pred,alpha=0.6)
fig.suptitle(f"Segmentation Results after {epoch} epochs",y=0.80)
return fig
def train(self,epoch,cometml_experiemnt):
total_loss = 0
self.model.train()
for batch_index,batch in enumerate(self.train_loader):
imgs,masks = batch
imgs = imgs.to(device)
masks = masks.to(device).squeeze(1)
self.optimizer.zero_grad()
output = self.model.forward(imgs)
loss = self.criterion(output,masks)
loss.backward()
self.optimizer.step()
total_loss +=loss.item()
cometml_experiemnt.log_metric("Training Average Loss",total_loss/self.train_loader.__len__())
print("Training Epoch {0:2d} average loss: {1:1.2f}".format(epoch+1, total_loss/self.train_loader.__len__()))
return total_loss/self.train_loader.__len__()
def validate(self,epoch,cometml_experiemnt):
self.model.eval()
total_loss = 0
preds,targets = [],[]
with torch.no_grad():
for batch_index,batch in enumerate(self.val_loader):
imgs,masks = batch
imgs = imgs.to(device)
masks = masks.to(device).squeeze(1)
output = self.model.forward(imgs)
loss = self.criterion(output,masks)
pred = output.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
if self.visualizer_indicator:
if (epoch+1)%10 == 0:
figure = self.visualizer(pred,imgs,masks,epoch)
cometml_experiemnt.log_figure(figure_name=f"epoch: {epoch}, current loss: {loss}",figure=figure)
masks = masks.squeeze_(0).cpu().numpy()
preds.append(pred)
targets.append(masks)
total_loss+=loss.item()
_,_,mean_iou,_ = eval_utils.calc_mAP(preds,targets)
print("Validation mIoU value: {0:1.5f}".format(mean_iou))
print("Validation Epoch {0:2d} average loss: {1:1.2f}".format(epoch+1, total_loss/self.val_loader.__len__()))
cometml_experiemnt.log_metric("Validation mIoU",mean_iou)
cometml_experiemnt.log_metric("Validation Average Loss",total_loss/self.val_loader.__len__())
return total_loss/self.val_loader.__len__(), mean_iou
def forward(self,cometml_experiment):
train_losses = []
val_losses = []
mean_ious_val = []
best_val_loss = np.infty
best_val_mean_iou = 0
model_save_dir = config['data']['model_save_dir']+f"{current_path[-1]}/{cometml_experiment.project_name}_{datetime.datetime.today().strftime('%Y-%m-%d-%H:%M')}/"
utils.create_dir_if_doesnt_exist(model_save_dir)
for epoch in range(0,self.epochs):
with cometml_experiment.train():
train_loss = self.train(epoch,cometml_experiment)
with cometml_experiment.validate():
val_loss, val_mean_iou = self.validate(epoch,cometml_experiment)
if val_loss < best_val_loss and val_mean_iou>best_val_mean_iou:
best_val_loss = val_loss
best_val_mean_iou = val_mean_iou
model_save_name = f"{current_path[-1]}_epoch_{epoch}_mean_iou_{val_mean_iou}_time_{datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')}.pth"
# stream = file(model_save_dir+"config.yaml")
with open(model_save_dir+"config.yaml",'w') as file:
yaml.dump(config,file)
torch.save(self.model,model_save_dir+model_save_name)
train_losses.append(train_loss)
val_losses.append(val_loss)
mean_ious_val.append(val_mean_iou)
return train_losses, val_losses, mean_ious_val
| [
"tqdm.tqdm",
"matplotlib.pyplot.show",
"torch.save",
"matplotlib.pyplot.figure",
"torch.no_grad",
"numpy.sqrt"
] | [((2253, 2265), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2263, 2265), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6643, 6645), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3849), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': 'self.stage_name', 'file': 'sys.stdout', 'disable': '(not self.verbose)'}), '(dataloader, desc=self.stage_name, file=sys.stdout, disable=not self.\n verbose)\n', (3767, 3849), True, 'from tqdm import tqdm as tqdm\n'), ((5899, 5914), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5912, 5914), False, 'import torch\n'), ((8272, 8287), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8285, 8287), False, 'import torch\n'), ((1438, 1472), 'numpy.sqrt', 'np.sqrt', (['(self.m_s / (self.n - 1.0))'], {}), '(self.m_s / (self.n - 1.0))\n', (1445, 1472), True, 'import numpy as np\n'), ((4119, 4129), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4127, 4129), True, 'import matplotlib.pyplot as plt\n'), ((10802, 10858), 'torch.save', 'torch.save', (['self.model', '(model_save_dir + model_save_name)'], {}), '(self.model, model_save_dir + model_save_name)\n', (10812, 10858), False, 'import torch\n')] |
import sys, os
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
from haven import haven_chk as hc
from haven import haven_results as hr
from haven import haven_utils as hu
import torch
import torchvision
import tqdm
import pandas as pd
import pprint
import itertools
import os
import pylab as plt
import time
import numpy as np
from src import models
from src import datasets
from src import utils as ut
from src.models import metrics
import argparse
from torch.utils.data import sampler
from torch.utils.data.sampler import RandomSampler
from torch.backends import cudnn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import pandas as pd
cudnn.benchmark = True
if __name__ == "__main__":
savedir_base = '/mnt/public/results/toolkit/weak_supervision'
hash_list = ['b04090f27c7c52bcec65f6ba455ed2d8',
'6d4af38d64b23586e71a198de2608333',
'84ced18cf5c1fb3ad5820cc1b55a38fa',
'63f29eec3dbe1e03364f198ed7d4b414',
'017e7441c2f581b6fee9e3ac6f574edc']
hash_dct = {'b04090f27c7c52bcec65f6ba455ed2d8': 'Fully_Supervised',
'6d4af38d64b23586e71a198de2608333': 'LCFCN',
'84ced18cf5c1fb3ad5820cc1b55a38fa': 'LCFCN+Affinity_(ours)',
'63f29eec3dbe1e03364f198ed7d4b414': 'Point-level_Loss ',
'017e7441c2f581b6fee9e3ac6f574edc': 'Cross_entropy_Loss+pseudo-mask'}
datadir = '/mnt/public/datasets/DeepFish/'
score_list = []
for hash_id in hash_list:
fname = os.path.join('/mnt/public/predictions/habitat/%s.pkl' % hash_id)
exp_dict = hu.load_json(os.path.join(savedir_base, hash_id, 'exp_dict.json'))
if os.path.exists(fname):
print('FOUND:', fname)
val_dict = hu.load_pkl(fname)
else:
train_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split='train',
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
test_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split='test',
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
test_loader = DataLoader(test_set,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=0)
pprint.pprint(exp_dict)
# Model
# ==================
model = models.get_model(model_dict=exp_dict['model'],
exp_dict=exp_dict,
train_set=train_set).cuda()
model_path = os.path.join(savedir_base, hash_id, 'model_best.pth')
# load best model
model.load_state_dict(hu.torch_load(model_path))
# loop over the val_loader and saves image
# get counts
habitats = []
for i, batch in enumerate(test_loader):
habitat = batch['meta'][0]['habitat']
habitats += [habitat]
habitats = np.array(habitats)
val_dict = {}
val_dict_lst = []
for h in np.unique(habitats):
val_meter = metrics.SegMeter(split=test_loader.dataset.split)
for i, batch in enumerate(tqdm.tqdm(test_loader)):
habitat = batch['meta'][0]['habitat']
if habitat != h:
continue
val_meter.val_on_batch(model, batch)
score_dict = val_meter.get_avg_score()
pprint.pprint(score_dict)
val_dict[h] = val_meter.get_avg_score()
val_dict_dfc = pd.DataFrame([val_meter.get_avg_score()])
val_dict_dfc.insert(0, "Habitat", h, True)
val_dict_dfc.rename(
columns={'test_score': 'mIoU', 'test_class0': 'IoU class 0', 'test_class1': 'IoU class 1',
'test_mae': 'MAE', 'test_game': 'GAME'}, inplace=True)
val_dict_lst.append(val_dict_dfc)
val_dict_df = pd.concat(val_dict_lst, axis=0)
val_dict_df.to_csv(os.path.join('/mnt/public/predictions/habitat/', "%s_habitat_score_df.csv" % hash_id),
index=False)
val_dict_df.to_latex(os.path.join('/mnt/public/predictions/habitat/', "%s_habitat_score_df.tex" % hash_id),
index=False, caption=hash_dct[hash_id], label=hash_dct[hash_id])
hu.save_pkl(fname, val_dict)
val_dict['model'] = exp_dict['model']
score_list += [val_dict]
print(pd.DataFrame(score_list))
# score_df = pd.DataFrame(score_list)
# score_df.to_csv(os.path.join('/mnt/public/predictions/habitat/', "habitat_score_df.csv"))
# score_df.to_latex(os.path.join('/mnt/public/predictions/habitat/', "habitat_score_df.tex"))
| [
"pandas.DataFrame",
"tqdm.tqdm",
"src.models.metrics.SegMeter",
"src.datasets.get_dataset",
"torch.utils.data.DataLoader",
"os.path.realpath",
"haven.haven_utils.load_pkl",
"os.path.exists",
"sys.path.insert",
"haven.haven_utils.torch_load",
"numpy.array",
"pprint.pprint",
"haven.haven_utils... | [((84, 108), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (99, 108), False, 'import sys, os\n'), ((55, 81), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (71, 81), False, 'import os\n'), ((1585, 1649), 'os.path.join', 'os.path.join', (["('/mnt/public/predictions/habitat/%s.pkl' % hash_id)"], {}), "('/mnt/public/predictions/habitat/%s.pkl' % hash_id)\n", (1597, 1649), False, 'import os\n'), ((1747, 1768), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1761, 1768), False, 'import os\n'), ((5108, 5132), 'pandas.DataFrame', 'pd.DataFrame', (['score_list'], {}), '(score_list)\n', (5120, 5132), True, 'import pandas as pd\n'), ((1682, 1734), 'os.path.join', 'os.path.join', (['savedir_base', 'hash_id', '"""exp_dict.json"""'], {}), "(savedir_base, hash_id, 'exp_dict.json')\n", (1694, 1734), False, 'import os\n'), ((1828, 1846), 'haven.haven_utils.load_pkl', 'hu.load_pkl', (['fname'], {}), '(fname)\n', (1839, 1846), True, 'from haven import haven_utils as hu\n'), ((1886, 2034), 'src.datasets.get_dataset', 'datasets.get_dataset', ([], {'dataset_dict': "exp_dict['dataset']", 'split': '"""train"""', 'datadir': 'datadir', 'exp_dict': 'exp_dict', 'dataset_size': "exp_dict['dataset_size']"}), "(dataset_dict=exp_dict['dataset'], split='train',\n datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size'])\n", (1906, 2034), False, 'from src import datasets\n'), ((2235, 2382), 'src.datasets.get_dataset', 'datasets.get_dataset', ([], {'dataset_dict': "exp_dict['dataset']", 'split': '"""test"""', 'datadir': 'datadir', 'exp_dict': 'exp_dict', 'dataset_size': "exp_dict['dataset_size']"}), "(dataset_dict=exp_dict['dataset'], split='test',\n datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size'])\n", (2255, 2382), False, 'from src import datasets\n'), ((2582, 2657), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': '(1)', 'collate_fn': 'ut.collate_fn', 'num_workers': '(0)'}), '(test_set, batch_size=1, collate_fn=ut.collate_fn, num_workers=0)\n', (2592, 2657), False, 'from torch.utils.data import DataLoader\n'), ((2781, 2804), 'pprint.pprint', 'pprint.pprint', (['exp_dict'], {}), '(exp_dict)\n', (2794, 2804), False, 'import pprint\n'), ((3072, 3125), 'os.path.join', 'os.path.join', (['savedir_base', 'hash_id', '"""model_best.pth"""'], {}), "(savedir_base, hash_id, 'model_best.pth')\n", (3084, 3125), False, 'import os\n'), ((3491, 3509), 'numpy.array', 'np.array', (['habitats'], {}), '(habitats)\n', (3499, 3509), True, 'import numpy as np\n'), ((3588, 3607), 'numpy.unique', 'np.unique', (['habitats'], {}), '(habitats)\n', (3597, 3607), True, 'import numpy as np\n'), ((4988, 5016), 'haven.haven_utils.save_pkl', 'hu.save_pkl', (['fname', 'val_dict'], {}), '(fname, val_dict)\n', (4999, 5016), True, 'from haven import haven_utils as hu\n'), ((3191, 3216), 'haven.haven_utils.torch_load', 'hu.torch_load', (['model_path'], {}), '(model_path)\n', (3204, 3216), True, 'from haven import haven_utils as hu\n'), ((3637, 3686), 'src.models.metrics.SegMeter', 'metrics.SegMeter', ([], {'split': 'test_loader.dataset.split'}), '(split=test_loader.dataset.split)\n', (3653, 3686), False, 'from src.models import metrics\n'), ((4547, 4578), 'pandas.concat', 'pd.concat', (['val_dict_lst'], {'axis': '(0)'}), '(val_dict_lst, axis=0)\n', (4556, 4578), True, 'import pandas as pd\n'), ((2878, 2969), 'src.models.get_model', 'models.get_model', ([], {'model_dict': "exp_dict['model']", 'exp_dict': 'exp_dict', 'train_set': 'train_set'}), "(model_dict=exp_dict['model'], exp_dict=exp_dict, train_set\n =train_set)\n", (2894, 2969), False, 'from src import models\n'), ((3730, 3752), 'tqdm.tqdm', 'tqdm.tqdm', (['test_loader'], {}), '(test_loader)\n', (3739, 3752), False, 'import tqdm\n'), ((4020, 4045), 'pprint.pprint', 'pprint.pprint', (['score_dict'], {}), '(score_dict)\n', (4033, 4045), False, 'import pprint\n'), ((4614, 4703), 'os.path.join', 'os.path.join', (['"""/mnt/public/predictions/habitat/"""', "('%s_habitat_score_df.csv' % hash_id)"], {}), "('/mnt/public/predictions/habitat/', '%s_habitat_score_df.csv' %\n hash_id)\n", (4626, 4703), False, 'import os\n'), ((4786, 4875), 'os.path.join', 'os.path.join', (['"""/mnt/public/predictions/habitat/"""', "('%s_habitat_score_df.tex' % hash_id)"], {}), "('/mnt/public/predictions/habitat/', '%s_habitat_score_df.tex' %\n hash_id)\n", (4798, 4875), False, 'import os\n')] |
#!/usr/bin/env python3
# Author: <NAME> <wecros|xfilip46>
# Date: 2020/01/02
import sys
import wave
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import minmax_scale
from lib import clip_centre, SAMPLE_RATE, OUTPUT_PATH, auto_correlate, \
save_figure, compute_log_spectogram, N
import ex3
def plot(maskon, maskoff, save):
"""Plot the spectograms of the maskon/maskoff frames."""
fig, axes = plt.subplots(2, constrained_layout=True)
fig.set_size_inches(8.0, 6.0)
fig.canvas.set_window_title('Excercise 5')
ax_on, ax_off = axes
im_on = ax_on.imshow(maskon, origin='lower', aspect='auto', extent = [0 , 1.0, 0 , 8000])
fig.colorbar(im_on, ax=ax_on)
im_off = ax_off.imshow(maskoff, origin='lower', aspect='auto', extent = [0 , 1.0, 0 , 8000])
fig.colorbar(im_off, ax=ax_off)
ax_on.set_title('Mask-on spectogram')
ax_off.set_title('Mask-off spectogram')
for ax in axes:
ax.set_xlabel('time [s]')
ax.set_ylabel('frequency')
if save:
save_figure(fig, 'ex5')
else:
plt.show()
def main(save=False):
maskon_frames, maskoff_frames = ex3.output()
maskon_dfts = np.fft.fft(maskon_frames, n=N)
maskoff_dfts = np.fft.fft(maskoff_frames, n=N)
maskon_spectogram = compute_log_spectogram(maskon_dfts)
maskoff_spectogram = compute_log_spectogram(maskoff_dfts)
maskon_spectogram = maskon_spectogram.transpose()
maskoff_spectogram = maskoff_spectogram.transpose()
plot(maskon_spectogram[:N//2], maskoff_spectogram[:N//2], save)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.show",
"lib.save_figure",
"numpy.fft.fft",
"ex3.output",
"matplotlib.pyplot.subplots",
"lib.compute_log_spectogram"
] | [((454, 494), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'constrained_layout': '(True)'}), '(2, constrained_layout=True)\n', (466, 494), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1188), 'ex3.output', 'ex3.output', ([], {}), '()\n', (1186, 1188), False, 'import ex3\n'), ((1208, 1238), 'numpy.fft.fft', 'np.fft.fft', (['maskon_frames'], {'n': 'N'}), '(maskon_frames, n=N)\n', (1218, 1238), True, 'import numpy as np\n'), ((1258, 1289), 'numpy.fft.fft', 'np.fft.fft', (['maskoff_frames'], {'n': 'N'}), '(maskoff_frames, n=N)\n', (1268, 1289), True, 'import numpy as np\n'), ((1315, 1350), 'lib.compute_log_spectogram', 'compute_log_spectogram', (['maskon_dfts'], {}), '(maskon_dfts)\n', (1337, 1350), False, 'from lib import clip_centre, SAMPLE_RATE, OUTPUT_PATH, auto_correlate, save_figure, compute_log_spectogram, N\n'), ((1376, 1412), 'lib.compute_log_spectogram', 'compute_log_spectogram', (['maskoff_dfts'], {}), '(maskoff_dfts)\n', (1398, 1412), False, 'from lib import clip_centre, SAMPLE_RATE, OUTPUT_PATH, auto_correlate, save_figure, compute_log_spectogram, N\n'), ((1063, 1086), 'lib.save_figure', 'save_figure', (['fig', '"""ex5"""'], {}), "(fig, 'ex5')\n", (1074, 1086), False, 'from lib import clip_centre, SAMPLE_RATE, OUTPUT_PATH, auto_correlate, save_figure, compute_log_spectogram, N\n'), ((1105, 1115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1113, 1115), True, 'import matplotlib.pyplot as plt\n')] |
"""
Defines CompilationLibrary class and supporting functions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import copy as _copy
import itertools as _itertools
import numpy as _np
from pygsti.baseobjs.label import Label as _Label
from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph
from pygsti.circuits.circuit import Circuit as _Circuit
from pygsti.processors.processorspec import QubitProcessorSpec as _QubitProcessorSpec
from pygsti.tools import listtools as _lt
from pygsti.tools import symplectic as _symp
from pygsti.tools import internalgates as _itgs
class CompilationError(Exception):
"""
A compilation error, raised by :class:`CompilationLibrary`
"""
pass
class CompilationRules(object):
"""
A prescription for creating ("compiling") a set of gates based on another set.
A :class:`CompilationRules` object contains a dictionary of gate unitaries,
much like a :class:`ProcessorSpec`, and instructions for creating these gates.
The instructions can be given explicitly as circuits corresponding to a given gate,
or implicitly as functions. Instructions can be given for gate *names* (e.g. `"Gx"`),
regardless of the target state space labels of the gate, as well as for specific
gate locations (e.g. `("Gx",2)`).
Parameters
----------
compilation_rules_dict : dict
A dictionary of initial rules, which can be specified in multiple formats.
Keys can be either gate names as strings or gate labels as a Label object.
Values are 2-tuples of (gate unitary, gate template). The gate unitary
can either be a unitary matrix, function returning a matrix, or None if the
gate name is a standard PyGSTi unitary. The gate template is either a Circuit
with local state space labels (i.e. 0..k-1 for k qubits) or a function that takes
the target gate label and returns the proper Circuit. If the key is a gate label,
the gate template (second entry of the value tuple) MUST be a Circuit with
absolute state space labels.
"""
@classmethod
def cast(cls, obj):
"""
Convert an object into compilation rules, if it isn't already.
Parameters
----------
obj : object
The object to convert.
Returns
-------
CompilationRules
"""
if isinstance(obj, CompilationRules): return obj
return cls(obj)
def __init__(self, compilation_rules_dict=None):
self.gate_unitaries = _collections.OrderedDict() # gate_name => unitary mx, fn, or None
self.local_templates = _collections.OrderedDict() # gate_name => Circuit on gate's #qubits
self.function_templates = _collections.OrderedDict() # gate_name => fn(sslbls, args=None, time=None)
# that returns a Circuit on absolute qubits
self.specific_compilations = _collections.OrderedDict() # gate_label => Circuit on absolute qubits
self._compiled_cache = _collections.OrderedDict() # compiled gate_label => Circuit on absolute qubits
if compilation_rules_dict is not None:
for gate_key, (gate_unitary, gate_template) in compilation_rules_dict.items():
if isinstance(gate_key, str):
self.gate_unitaries[gate_key] = gate_unitary
if callable(gate_template):
self.function_templates[gate_key] = gate_template
else:
assert isinstance(gate_template, _Circuit), \
"Values to gate name template must be functions or Circuits, not %s" % type(gate_template)
self.local_templates[gate_key] = gate_template
else:
assert isinstance(gate_key, _Label), \
"Keys to compilation_rules_dict must be str or Labels, not %s" % type(gate_key)
assert isinstance(gate_template, _Circuit), \
"Values to specific compilations must be Circuits, not %s" % type(gate_template)
self.specific_compilations[gate_key] = gate_template
def add_compilation_rule(self, gate_name, template_circuit_or_fn, unitary=None):
"""
Add a compilation rule for a gate *name*, given as a circuit or function.
Parameters
----------
gate_name : str
The gate name to add a rule for.
template_circuit_or_fn : Circuit or callable
The rule. This can be specified as either a circuit or as a function. If a
circuit is given, it must be on the gate's local state space, assumed to be
a k-qubit space (for a k-qubit gate) with qubit labels 0 to k-1. That is,
the circuit must have line labels equal to `0...k-1`. If a function if given,
the function must take as a single argument a tuple of state space labels that
specify the target labels of the gate.
unitary : numpy.ndarray
The unitary corresponding to the gate. This can be left as `None` if
`gate_name` names a standard or internal gate known to pyGSTi.
Returns
-------
None
"""
std_gate_unitaries = _itgs.standard_gatename_unitaries()
std_gate_unitaries.update(_itgs.internal_gate_unitaries()) # internal gates ok too?
if unitary is None:
if gate_name in std_gate_unitaries: unitary = std_gate_unitaries[gate_name]
else: raise ValueError("Must supply `unitary` for non-standard gate name '%s'" % gate_name)
self.gate_unitaries[gate_name] = unitary
if callable(template_circuit_or_fn):
self.function_templates[gate_name] = template_circuit_or_fn
else:
self.local_templates[gate_name] = template_circuit_or_fn
def add_specific_compilation_rule(self, gate_label, circuit, unitary):
"""
Add a compilation rule for a gate at a specific location (target labels)
Parameters
----------
gate_label : Label
The gate label to add a rule for. Includes the gate's name and its target
state space labels (`gate_label.sslbls`).
circuit : Circuit
The rule, given as a circuit on the gate's local state space, i.e. the circuit's
line labels should be the same as `gate_label.sslbls`.
unitary : numpy.ndarray
The unitary corresponding to the gate. This can be left as `None` if
`gate_label.name` names a standard or internal gate known to pyGSTi.
Returns
-------
None
"""
std_gate_unitaries = _itgs.standard_gatename_unitaries()
std_gate_unitaries.update(_itgs.internal_gate_unitaries()) # internal gates ok too?
if gate_label.name not in self.gate_unitaries:
if unitary is None:
if gate_label.name in std_gate_unitaries: unitary = std_gate_unitaries[gate_label.name]
else: raise ValueError("Must supply `unitary` for non-standard gate name '%s'" % gate_label.name)
self.gate_unitaries[gate_label.name] = unitary
self.specific_compilations[gate_label] = circuit
def create_aux_info(self):
"""
Create auxiliary information that should be stored along with the compilation rules herein.
(Currently unused, but perhaps useful in the future.)
Returns
-------
dict
"""
return {}
def retrieve_compilation_of(self, oplabel, force=False):
"""
Get a compilation of `oplabel`, computing one from local templates if necessary.
Parameters
----------
oplabel : Label
The label of the gate to compile.
force : bool, optional
If True, then an attempt is made to recompute a compilation
even if `oplabel` already exists in this `CompilationLibrary`.
Otherwise compilations are only computed when they are *not* present.
Returns
-------
Circuit or None, if failed to retrieve compilation
"""
# First look up in cache
if not force and oplabel in self._compiled_cache:
return self._compiled_cache[oplabel]
if oplabel in self.specific_compilations: # Second, look up in specific compilations
self._compiled_cache[oplabel] = self.specific_compilations[oplabel]
elif oplabel.name in self.local_templates: # Third, construct from local template
template_to_use = self.local_templates[oplabel.name]
# Template compilations always use integer qubit labels: 0 to N
to_real_label = {i: oplabel.sslbls[i] for i in template_to_use.line_labels}
self._compiled_cache[oplabel] = template_to_use.map_state_space_labels(to_real_label)
elif oplabel.name in self.function_templates: # Fourth, construct from local function template
template_fn_to_use = self.function_templates[oplabel.name]
self._compiled_cache[oplabel] = _Circuit(template_fn_to_use(oplabel.sslbls, oplabel.args, oplabel.time))
else:
# Failed to compile
return None
return self._compiled_cache[oplabel]
def apply_to_processorspec(self, processor_spec, action="replace", gates_to_skip=None):
"""
Use these compilation rules to convert one processor specification into another one.
Parameters
----------
processor_spec : QubitProcessorSpec
The initial processor specification, which should contain the gates present within the
circuits/functions of this compilation rules object.
action : {"replace", "add"}
Whether the existing gates in `processor_spec` are conveyed to the the returned
processor spec. If `"replace"`, then they are not conveyed, if `"add"` they are.
gates_to_skip : list
Gate names or labels to skip during processor specification construction.
Returns
-------
QubitProcessorSpec
"""
gate_names = tuple(self.gate_unitaries.keys())
gate_unitaries = self.gate_unitaries.copy() # can contain `None` entries we deal with below
if gates_to_skip is None:
gates_to_skip = []
availability = {}
for gn in gate_names:
if gn in gates_to_skip:
continue
if gn in self.local_templates:
# merge availabilities from gates in local template
compilation_circuit = self.local_templates[gn]
all_sslbls = compilation_circuit.line_labels
gn_nqubits = len(all_sslbls)
assert (all_sslbls == tuple(range(0, gn_nqubits))), \
"Template circuits *must* have line labels == 0...(gate's #qubits-1), not %s!" % (
str(all_sslbls))
# To construct the availability for a circuit, we take the intersection
# of the availability for each of the layers. Each layer's availability is
# the cartesian-like product of the availabilities for each of the components
circuit_availability = None
for layer in compilation_circuit[:]:
layer_availability_factors = []
layer_availability_sslbls = []
for gate in layer.components:
gate_availability = processor_spec.availability[gate.name]
if gate_availability in ('all-edges', 'all-combinations', 'all-permutations'):
raise NotImplementedError("Cannot merge special availabilities yet")
layer_availability_factors.append(gate_availability)
gate_sslbls = gate.sslbls
if gate_sslbls is None: gate_sslbls = all_sslbls
assert (len(set(layer_availability_sslbls).intersection(gate_sslbls)) == 0), \
"Duplicate state space labels in layer: %s" % str(layer)
layer_availability_sslbls.extend(gate_sslbls) # integers
layer_availability = tuple(_itertools.product(*layer_availability_factors))
if tuple(layer_availability_sslbls) != all_sslbls: # then need to permute availability elements
p = {to: frm for frm, to in enumerate(layer_availability_sslbls)} # use sslbls as *indices*
new_order = [p[i] for i in range(gn_nqubits)]
layer_availability = tuple(map(lambda el: tuple([el[i] for i in new_order]),
layer_availability))
circuit_availability = set(layer_availability) if (circuit_availability is None) else \
circuit_availability.intersection(layer_availability)
assert (circuit_availability is not None), "Local template circuit cannot be empty!"
availability[gn] = tuple(sorted(circuit_availability))
if gate_unitaries[gn] is None:
# TODO: compute unitary via product of embedded unitaries of circuit layers, something like:
# gate_unitaries[gn] = product(
# [kronproduct(
# [embed(self.gate_unitaries[gate.name], gate.sslbls, range(gn_nqubits))
# for gate in layer.components])
# for layer in compilation_circuit)])
raise NotImplementedError("Still need to implement product of unitaries logic!")
elif gn in self.function_templates:
# create boolean oracle function for availability
def _fn(sslbls):
try:
self.function_templates[gn](sslbls, None, None) # (returns a circuit)
return True
except CompilationError:
return False
availability[gn] = _fn # boolean function indicating availability
else:
availability[gn] = () # empty tuple for absent gates - OK b/c may have specific compilations
if gate_unitaries[gn] is None:
raise ValueError("Must specify unitary for gate name '%s'" % str(gn))
# specific compilations add specific availability for their gate names:
for gate_lbl in self.specific_compilations.keys():
if gate_lbl in gates_to_skip:
continue
assert (gate_lbl.name in gate_names), \
"gate name '%s' missing from CompilationRules gate unitaries!" % gate_lbl.name
assert (isinstance(availability[gate_lbl.name], tuple)), \
"Cannot add specific values to non-explicit availabilities (e.g. given by functions)"
availability[gate_lbl.name] += (gate_lbl.sslbls,)
if action == "add":
gate_names = tuple(processor_spec.gate_names) + gate_names
gate_unitaries.update(processor_spec.gate_unitaries)
availability.update(processor_spec.availability)
elif action == "replace":
pass
else:
raise ValueError("Invalid `action` argument: %s" % str(action))
aux_info = processor_spec.aux_info.copy()
aux_info.update(self.create_aux_info())
ret = _QubitProcessorSpec(processor_spec.num_qubits, gate_names, gate_unitaries, availability,
processor_spec.qubit_graph, processor_spec.qubit_labels, aux_info=aux_info)
ret.compiled_from = (processor_spec, self)
return ret
def apply_to_circuits(self, circuits, **kwargs):
"""
Use these compilation rules to convert one list of circuits into another one.
Additional kwargs are passed through to Circuit.change_gate_library during translation.
Common kwargs include `depth_compression=False` or `allow_unchanged_gates=True`.
Parameters
----------
circuits : list of Circuits
The initial circuits, which should contain the gates present within the
circuits/functions of this compilation rules object.
Returns
-------
list of Circuits
"""
compiled_circuits = [c.copy(editable=True) for c in circuits]
for circ in compiled_circuits:
circ.change_gate_library(self, **kwargs)
circ.done_editing()
return compiled_circuits
class CliffordCompilationRules(CompilationRules):
"""
An collection of compilations for clifford gates.
Holds mapping between operation labels (:class:`Label` objects) and circuits
(:class:`Circuit` objects).
A `CliffordCompilationRules` holds a processor specification of the "native" gates
of a processor and uses it to produce compilations of many of/all Clifford operations.
Currently, the native gates should all be Clifford gates, so that the processor spec's
`compute_clifford_symplectic_reps` method gives representations for all of its gates.
Compilations can be either "local" or "non-local". A local compilation
ony uses gates that act on its target qubits. All 1-qubit gates can be
local. A non-local compilation uses qubits outside the set of target
qubits (e.g. a CNOT between two qubits between which there is no native
CNOT). Currently, non-local compilations can only be constructed for
the CNOT gate.
To speed up the creation of local compilations, a `CliffordCompilationRules`
instance stores "template" compilations, which specify how to construct a
compilation for some k-qubit gate on qubits labeled 0 to k-1. When creating
a compilation for a gate, a template is used if a suitable one can be found;
otherwise a new template is created and then used.
Parameters
----------
native_gates_processorspec : QubitProcessorSpec
The processor specification of "native" Clifford gates which all
compilation rules are composed from.
compile_type : {"absolute","paulieq"}
The "compilation type" for this rules set. If `"absolute"`, then
compilations must match the gate operation being compiled exactly.
If `"paulieq"`, then compilations only need to match the desired
gate operation up to a Paui operation (which is useful for compiling
multi-qubit Clifford gates / stabilizer states without unneeded 1-qubit
gate over-heads).
"""
@classmethod
def create_standard(cls, base_processor_spec, compile_type="absolute", what_to_compile=("1Qcliffords",),
verbosity=1):
"""
Create a common set of compilation rules based on a base processor specification.
Parameters
----------
base_processor_spec : QubitProcessorSpec
The processor specification of "native" Clifford gates which all
the compilation rules will be in terms of.
compile_type : {"absolute","paulieq"}
The "compilation type" for this rules set. If `"absolute"`, then
compilations must match the gate operation being compiled exactly.
If `"paulieq"`, then compilations only need to match the desired
gate operation up to a Paui operation (which is useful for compiling
multi-qubit Clifford gates / stabilizer states without unneeded 1-qubit
gate over-heads).
what_to_compile : {"1Qcliffords", "localcnots", "allcnots", "paulis"}
What operations should rules be created for? Allowed values may depend on
the value of `compile_type`.
Returns
-------
CliffordCompilationRules
"""
# A list of the 1-qubit gates to compile, in the std names understood inside the compilation code.
one_q_gates = []
# A list of the 2-qubit gates to compile, in the std names understood inside the compilation code.
two_q_gates = []
add_nonlocal_two_q_gates = False # Defaults to not adding non-local compilations of 2-qubit gates.
number_of_qubits = base_processor_spec.num_qubits
qubit_labels = base_processor_spec.qubit_labels
# We construct the requested Pauli-equivalent compilations.
if compile_type == 'paulieq':
for subctype in what_to_compile:
if subctype == '1Qcliffords':
one_q_gates += ['H', 'P', 'PH', 'HP', 'HPH']
elif subctype == 'localcnots':
# So that the default still makes sense with 1 qubit, we ignore the request to compile CNOTs
# in that case
if number_of_qubits > 1:
two_q_gates += ['CNOT', ]
elif subctype == 'allcnots':
# So that the default still makes sense with 1 qubit, we ignore the request to compile CNOTs
# in that case
if number_of_qubits > 1:
two_q_gates += ['CNOT', ]
add_nonlocal_two_q_gates = True
else:
raise ValueError("{} is invalid for the `{}` compile type!".format(subctype, compile_type))
# We construct the requested `absolute` (i.e., not only up to Paulis) compilations.
elif compile_type == 'absolute':
for subctype in what_to_compile:
if subctype == 'paulis':
one_q_gates += ['I', 'X', 'Y', 'Z']
elif subctype == '1Qcliffords':
one_q_gates += ['C' + str(q) for q in range(24)]
else:
raise ValueError("{} is invalid for the `{}` compile type!".format(subctype, compile_type))
else:
raise ValueError("Invalid `compile_type` argument: %s" % str(compile_type))
descs = {'paulieq': 'up to paulis', 'absolute': ''}
# Lists that are all the hard-coded 1-qubit and 2-qubit gates.
# future: should probably import these from _itgss somehow.
hardcoded_oneQgates = ['I', 'X', 'Y', 'Z', 'H', 'P', 'HP', 'PH', 'HPH'] + ['C' + str(i) for i in range(24)]
# Currently we can only compile CNOT gates, although that should be fixed.
for gate in two_q_gates:
assert (gate == 'CNOT'), ("The only 2-qubit gate auto-generated compilations currently possible "
"are for the CNOT gate (Gcnot)!")
# Creates an empty library to fill
compilation_rules = cls(base_processor_spec, compile_type)
# 1-qubit gate compilations. These must be complied "locally" - i.e., out of native gates which act only
# on the target qubit of the gate being compiled, and they are stored in the compilation rules.
for q in qubit_labels:
for gname in one_q_gates:
# Check that this is a gate that is defined in the code, so that we can try and compile it.
assert (gname in hardcoded_oneQgates), "{} is not an allowed hard-coded 1-qubit gate".format(gname)
if verbosity > 0:
print(
"- Creating a circuit to implement {} {} on qubit {}...".format(gname, descs[compile_type],
q))
# This does a brute-force search to compile the gate, by creating `templates` when necessary, and using
# a template if one has already been constructed.
compilation_rules.add_local_compilation_of(_Label(gname, q), verbosity=verbosity)
if verbosity > 0: print("Complete.")
# Manually add in the "obvious" compilations for CNOT gates as templates, so that we use the normal conversions
# based on the Hadamard gate -- if this is possible. If we don't do this, we resort to random compilations,
# which might not give the "expected" compilations (even if the alternatives might be just as good).
if 'CNOT' in two_q_gates:
# Look to see if we have a CNOT gate in the model (with any name).
cnot_name = cls._find_std_gate(base_processor_spec, 'CNOT')
H_name = cls._find_std_gate(base_processor_spec, 'H')
I_name = cls._find_std_gate(base_processor_spec, 'I')
# If we've failed to find a Hadamard gate but we only need paulieq compilation, we try
# to find a gate that is Pauli-equivalent to Hadamard.
if H_name is None and compile_type == 'paulieq':
for gn, gunitary in base_processor_spec.gate_unitaries.items():
if callable(gunitary): continue # can't pre-process factories
if _symp.unitary_is_clifford(gunitary):
if _itgs.is_gate_pauli_equivalent_to_this_standard_unitary(gunitary, 'H'):
H_name = gn; break
# If CNOT is available, add it as a template for 'CNOT'.
if cnot_name is not None:
compilation_rules._clifford_templates['CNOT'] = [(_Label(cnot_name, (0, 1)),)]
# If Hadamard is also available, add the standard conjugation as template for reversed CNOT.
if H_name is not None:
compilation_rules._clifford_templates['CNOT'].append((_Label(H_name, 0), _Label(H_name, 1), _Label(
cnot_name, (1, 0)), _Label(H_name, 0), _Label(H_name, 1)))
# If CNOT isn't available, look to see if we have CPHASE gate in the model (with any name). If we do *and*
# we have Hadamards, we add the obvious construction of CNOT from CPHASE and Hadamards as a template
else:
cphase_name = cls._find_std_gate(base_processor_spec, 'CPHASE')
# If we find CPHASE, and we have a Hadamard-like gate, we add used them to add a CNOT compilation
# template.
if H_name is not None:
if cphase_name is not None:
if I_name is not None:
# we explicitly put identity gates into template (so noise on them is simluated correctly?)
# Add it with CPHASE in both directions, in case the CPHASES have been specified as being
# available in only one direction
compilation_rules._clifford_templates['CNOT'] = [
(_Label(I_name, 0), _Label(H_name, 1), _Label(cphase_name, (0, 1)), _Label(I_name, 0),
_Label(H_name, 1))]
compilation_rules._clifford_templates['CNOT'].append(
(_Label(I_name, 0), _Label(H_name, 1), _Label(cphase_name, (1, 0)), _Label(I_name, 0),
_Label(H_name, 1)))
else: # similar, but without explicit identity gates
compilation_rules._clifford_templates['CNOT'] = [
(_Label(H_name, 1), _Label(cphase_name, (0, 1)), _Label(H_name, 1))]
compilation_rules._clifford_templates['CNOT'].append(
(_Label(H_name, 1), _Label(cphase_name, (1, 0)), _Label(H_name, 1)))
# After adding default templates, we know generate compilations for CNOTs between all connected pairs. If the
# default templates were not relevant or aren't relevant for some qubits, this will generate new templates by
# brute force.
for gate in two_q_gates:
not_locally_compilable = []
for q1 in base_processor_spec.qubit_labels:
for q2 in base_processor_spec.qubit_labels:
if q1 == q2: continue # 2Q gates must be on different qubits!
for gname in two_q_gates:
if verbosity > 0:
print("Creating a circuit to implement {} {} on qubits {}...".format(
gname, descs[compile_type], (q1, q2)))
try:
compilation_rules.add_local_compilation_of(
_Label(gname, (q1, q2)), verbosity=verbosity)
except CompilationError:
not_locally_compilable.append((gname, q1, q2))
# If requested, try to compile remaining 2Q gates that are `non-local` (not between neighbouring qubits)
# using specific algorithms.
if add_nonlocal_two_q_gates:
for gname, q1, q2 in not_locally_compilable:
compilation_rules.add_nonlocal_compilation_of(_Label(gname, (q1, q2)),
verbosity=verbosity)
return compilation_rules
@classmethod
def _find_std_gate(cls, base_processor_spec, std_gate_name):
""" Check to see of a standard/internal gate exists in a processor spec """
for gn in base_processor_spec.gate_names:
if callable(base_processor_spec.gate_unitaries[gn]): continue # can't pre-process factories
if _itgs.is_gate_this_standard_unitary(base_processor_spec.gate_unitaries[gn], std_gate_name):
return gn
return None
def __init__(self, native_gates_processorspec, compile_type="absolute"):
# processor_spec: holds all native Clifford gates (requested gates compile into circuits of these)
self.processor_spec = native_gates_processorspec
self.compile_type = compile_type # "absolute" or "paulieq"
self._clifford_templates = _collections.defaultdict(list) # keys=gate names (strs); vals=tuples of Labels
self.connectivity = {} # QubitGraphs for gates currently compiled in library (key=gate_name)
super(CliffordCompilationRules, self).__init__()
def _create_local_compilation_of(self, oplabel, unitary=None, srep=None, max_iterations=10, verbosity=1):
"""
Constructs a local compilation of `oplabel`.
An existing template is used if one is available, otherwise a new
template is created using an iterative procedure. Raises
:class:`CompilationError` when no compilation can be found.
Parameters
----------
oplabel : Label
The label of the gate to compile. If `oplabel.name` is a
recognized standard Clifford name (e.g. 'H', 'P', 'X', 'CNOT')
then no further information is needed. Otherwise, you must specify
either (or both) of `unitary` or `srep` *unless* the compilation
for this oplabel has already been previously constructed and force
is `False`. In that case, the previously constructed compilation will
be returned in all cases, and so this method does not need to know
what the gate actually is.
unitary : numpy.ndarray, optional
The unitary action of the gate being compiled. If, as is typical,
you're compiling using Clifford gates, then this unitary should
correspond to a Clifford operation. If you specify `unitary`,
you don't need to specify `srep` - it is computed automatically.
srep : tuple, optional
The `(smatrix, svector)` tuple giving the symplectic representation
of the gate being compiled.
max_iterations : int, optional
The maximum number of iterations for the iterative compilation
algorithm.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
Returns
-------
Circuit
"""
# Template compilations always use integer qubit labels: 0 to N
# where N is the number of qubits in the template's overall label
# (i.e. its key in self._clifford_templates)
def to_real_label(template_label):
""" Convert a "template" operation label (which uses integer qubit labels
0 to N) to a "real" label for a potential gate in self.processor_spec. """
qlabels = [oplabel.sslbls[i] for i in template_label.sslbls]
return _Label(template_label.name, qlabels)
def to_template_label(real_label):
""" The reverse (qubits in template == oplabel.qubits) """
qlabels = [oplabel.sslbls.index(lbl) for lbl in real_label.sslbls]
return _Label(real_label.name, qlabels)
def is_local_compilation_feasible(allowed_gatenames):
""" Whether template_labels can possibly be enough
gates to compile a template for op_label with """
if oplabel.num_qubits <= 1:
return len(allowed_gatenames) > 0 # 1Q gates, anything is ok
elif oplabel.num_qubits == 2:
# 2Q gates need a compilation gate that is also 2Q (can't do with just 1Q gates!)
return max([self.processor_spec.gate_num_qubits(gn) for gn in allowed_gatenames]) == 2
else:
# >2Q gates need to make sure there's some connected path
return True # future: update using graphs stuff?
template_to_use = None
for template_compilation in self._clifford_templates.get(oplabel.name, []):
#Check availability of gates in self.model to determine
# whether template_compilation can be applied.
if all([self.processor_spec.is_available(gl) for gl in map(to_real_label, template_compilation)]):
template_to_use = template_compilation
if verbosity > 0: print("Existing template found!")
break # compilation found!
else: # no existing templates can be applied, so make a new one
#construct a list of the available gates on the qubits of `oplabel` (or a subset of them)
available_gatenames = self.processor_spec.available_gatenames(oplabel.sslbls)
available_srep_dict = self.processor_spec.compute_clifford_symplectic_reps(available_gatenames)
if is_local_compilation_feasible(available_gatenames):
available_gatelabels = [to_template_label(gl) for gn in available_gatenames
for gl in self.processor_spec.available_gatelabels(gn, oplabel.sslbls)]
template_to_use = self.add_clifford_compilation_template(
oplabel.name, oplabel.num_qubits, unitary, srep,
available_gatelabels, available_srep_dict,
verbosity=verbosity, max_iterations=max_iterations)
#If a template has been found, use it.
if template_to_use is not None:
opstr = list(map(to_real_label, template_to_use))
return _Circuit(layer_labels=opstr, line_labels=self.processor_spec.qubit_labels)
else:
raise CompilationError("Cannot locally compile %s" % str(oplabel))
def _get_local_compilation_of(self, oplabel, unitary=None, srep=None, max_iterations=10, force=False, verbosity=1):
"""
Gets a new local compilation of `oplabel`.
Parameters
----------
oplabel : Label
The label of the gate to compile. If `oplabel.name` is a
recognized standard Clifford name (e.g. 'H', 'P', 'X', 'CNOT')
then no further information is needed. Otherwise, you must specify
either (or both) of `unitary` or `srep`.
unitary : numpy.ndarray, optional
The unitary action of the gate being compiled. If, as is typical,
you're compiling using Clifford gates, then this unitary should
correspond to a Clifford operation. If you specify `unitary`,
you don't need to specify `srep` - it is computed automatically.
srep : tuple, optional
The `(smatrix, svector)` tuple giving the symplectic representation
of the gate being compiled.
max_iterations : int, optional
The maximum number of iterations for the iterative compilation
algorithm.
force : bool, optional
If True, then a compilation is recomputed even if `oplabel`
already exists in this `CompilationLibrary`. Otherwise
compilations are only computed when they are *not* present.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
Returns
-------
None
"""
if not force and oplabel in self.specific_compilations:
return self.specific_compilations[oplabel] # don't re-compute unless we're told to
circuit = self._create_local_compilation_of(oplabel,
unitary=unitary,
srep=srep,
max_iterations=max_iterations,
verbosity=verbosity)
return circuit
def add_local_compilation_of(self, oplabel, unitary=None, srep=None, max_iterations=10, force=False, verbosity=1):
"""
Adds a new local compilation of `oplabel`.
Parameters
----------
oplabel : Label
The label of the gate to compile. If `oplabel.name` is a
recognized standard Clifford name (e.g. 'H', 'P', 'X', 'CNOT')
then no further information is needed. Otherwise, you must specify
either (or both) of `unitary` or `srep`.
unitary : numpy.ndarray, optional
The unitary action of the gate being compiled. If, as is typical,
you're compiling using Clifford gates, then this unitary should
correspond to a Clifford operation. If you specify `unitary`,
you don't need to specify `srep` - it is computed automatically.
srep : tuple, optional
The `(smatrix, svector)` tuple giving the symplectic representation
of the gate being compiled.
max_iterations : int, optional
The maximum number of iterations for the iterative compilation
algorithm.
force : bool, optional
If True, then a compilation is recomputed even if `oplabel`
already exists in this `CompilationLibrary`. Otherwise
compilations are only computed when they are *not* present.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
Returns
-------
None
"""
circuit = self._get_local_compilation_of(oplabel, unitary, srep, max_iterations, force, verbosity)
self.add_specific_compilation_rule(oplabel, circuit, unitary)
def add_clifford_compilation_template(self, gate_name, nqubits, unitary, srep,
available_gatelabels, available_sreps,
verbosity=1, max_iterations=10):
"""
Adds a new compilation template for `gate_name`.
Parameters
----------
gate_name : str
The gate name to create a compilation for. If it is
recognized standard Clifford name (e.g. 'H', 'P', 'X', 'CNOT')
then `unitary` and `srep` can be None. Otherwise, you must specify
either (or both) of `unitary` or `srep`.
nqubits : int
The number of qubits this gate acts upon.
unitary : numpy.ndarray
The unitary action of the gate being templated. If, as is typical,
you're compiling using Clifford gates, then this unitary should
correspond to a Clifford operation. If you specify `unitary`,
you don't need to specify `srep` - it is computed automatically.
srep : tuple, optional
The `(smatrix, svector)` tuple giving the symplectic representation
of the gate being templated.
available_glabels : list
A list of the gate labels (:class:`Label` objects) that are available for
use in compilations.
available_sreps : dict
A dictionary of available symplectic representations. Keys are gate
labels and values are numpy arrays.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
max_iterations : int, optional
The maximum number of iterations for the iterative
template compilation-finding algorithm.
Returns
-------
tuple
A tuple of the operation labels (essentially a circuit) specifying
the template compilation that was generated.
"""
# The unitary is specifed, this takes priority and we use it to construct the
# symplectic rep of the gate.
if unitary is not None:
srep = _symp.unitary_to_symplectic(unitary, flagnonclifford=True)
# If the unitary has not been provided and smatrix and svector are both None, then
# we find them from the dictionary of standard gates.
if srep is None:
template_lbl = _Label(gate_name, tuple(range(nqubits))) # integer ascending qubit labels
smatrix, svector = _symp.symplectic_rep_of_clifford_layer(template_lbl, nqubits)
else:
smatrix, svector = srep
assert(_symp.check_valid_clifford(smatrix, svector)), "The gate is not a valid Clifford!"
assert(_np.shape(smatrix)[0] // 2 == nqubits), \
"The gate acts on a different number of qubits to stated by `nqubits`"
if verbosity > 0:
if self.compile_type == 'absolute':
print("- Generating a template for a compilation of {}...".format(gate_name), end='\n')
elif self.compile_type == 'paulieq':
print("- Generating a template for a pauli-equivalent compilation of {}...".format(gate_name), end='\n')
obtained_sreps = {}
#Separate the available operation labels by their target qubits
available_glabels_by_qubit = _collections.defaultdict(list)
for gl in available_gatelabels:
available_glabels_by_qubit[tuple(sorted(gl.qubits))].append(gl)
#sort qubit labels b/c order doesn't matter and can't hash sets
# Construst all possible circuit layers acting on the qubits.
all_layers = []
#Loop over all partitions of the nqubits
for p in _lt.partitions(nqubits):
pi = _np.concatenate(([0], _np.cumsum(p)))
to_iter_over = [available_glabels_by_qubit[tuple(range(pi[i], pi[i + 1]))] for i in range(len(p))]
for gls_in_layer in _itertools.product(*to_iter_over):
all_layers.append(gls_in_layer)
# Find the symplectic action of all possible circuits of length 1 on the qubits
for layer in all_layers:
obtained_sreps[layer] = _symp.symplectic_rep_of_clifford_layer(layer, nqubits, srep_dict=available_sreps)
# find the 1Q identity gate name
I_name = self._find_std_gate(self.processor_spec, 'I')
# Main loop. We go through the loop at most max_iterations times
found = False
for counter in range(0, max_iterations):
if verbosity > 0:
print(" - Checking all length {} {}-qubit circuits... ({})".format(counter + 1,
nqubits,
len(obtained_sreps)))
candidates = [] # all valid compilations, if any, of this length.
# Look to see if we have found a compilation
for seq, (s, p) in obtained_sreps.items():
if _np.array_equal(smatrix, s):
if self.compile_type == 'paulieq' or \
(self.compile_type == 'absolute' and _np.array_equal(svector, p)):
candidates.append(seq)
found = True
# If there is more than one way to compile gate at this circuit length, pick the
# one containing the most idle gates.
if len(candidates) > 1:
# Look at each sequence, and see if it has more than or equal to max_number_of_idles.
# If so, set it to the current chosen sequence.
if I_name is not None:
number_of_idles = 0
max_number_of_idles = 0
for seq in candidates:
number_of_idles = len([x for x in seq if x.name == I_name])
if number_of_idles >= max_number_of_idles:
max_number_of_idles = number_of_idles
compilation = seq
else:
# idles are absent from circuits - just take one with smallest depth
min_depth = 1e100
for seq in candidates:
depth = len(seq)
if depth < min_depth:
min_depth = depth
compilation = seq
elif len(candidates) == 1:
compilation = candidates[0]
# If we have found a compilation, leave the loop
if found:
if verbosity > 0: print("Compilation template created!")
break
# If we have reached the maximum number of iterations, quit the loop
# before we construct the symplectic rep for all sequences of a longer length.
if (counter == max_iterations - 1):
print(" - Maximum iterations reached without finding a compilation !")
return None
# Construct the gates obtained from the next length sequences.
new_obtained_sreps = {}
for seq, (s, p) in obtained_sreps.items():
# Add all possible tensor products of single-qubit gates to the end of the sequence
for layer in all_layers:
# Calculate the symp rep of this parallel gate
sadd, padd = _symp.symplectic_rep_of_clifford_layer(layer, nqubits, srep_dict=available_sreps)
key = seq + layer # tuple/Circuit concatenation
# Calculate and record the symplectic rep of this gate sequence.
new_obtained_sreps[key] = _symp.compose_cliffords(s, p, sadd, padd)
# Update list of potential compilations
obtained_sreps = new_obtained_sreps
#Compilation done: remove identity labels, as these are just used to
# explicitly keep track of the number of identity gates in a circuit (really needed?)
compilation = list(filter(lambda gl: gl.name != I_name, compilation))
#Store & return template that was found
self._clifford_templates[gate_name].append(compilation)
return compilation
#PRIVATE
def _compute_connectivity_of(self, gate_name):
"""
Compute the connectivity for `gate_name` using the (compiled) gates available this library.
Connectivity is defined in terms of nearest-neighbor links, and the
resulting :class:`QubitGraph`, is stored in `self.connectivity[gate_name]`.
Parameters
----------
gate_name : str
gate name to compute connectivity for.
Returns
-------
None
"""
nQ = self.processor_spec.num_qubits
qubit_labels = self.processor_spec.qubit_labels
d = {qlbl: i for i, qlbl in enumerate(qubit_labels)}
assert(len(qubit_labels) == nQ), "Number of qubit labels is inconsistent with Model dimension!"
connectivity = _np.zeros((nQ, nQ), dtype=bool)
for compiled_gatelabel in self.specific_compilations.keys():
if compiled_gatelabel.name == gate_name:
for p in _itertools.permutations(compiled_gatelabel.qubits, 2):
connectivity[d[p[0]], d[p[1]]] = True
# Note: d converts from qubit labels to integer indices
self.connectivity[gate_name] = _QubitGraph(qubit_labels, connectivity)
def filter_connectivity(self, gate_name, allowed_filter):
"""
Compute the QubitGraph giving the available `gate_name` gates subject to `allowed_filter`.
The filter adds constraints to by specifying the availability of `gate_name`.
Parameters
----------
gate_name : str
The gate name.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be to construct this
connectivity. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
Returns
-------
QubitGraph
"""
if gate_name not in self.connectivity: # need to recompute
self._compute_connectivity_of(gate_name)
init_qgraph = self.connectivity[gate_name] # unconstrained
if isinstance(allowed_filter, dict):
graph_constraint = allowed_filter.get(gate_name, None)
if graph_constraint is not None:
directed = graph_constraint.directed or init_qgraph.directed
init_nodes = set(init_qgraph.node_names)
qlabels = [lbl for lbl in graph_constraint.node_names
if lbl in init_nodes] # labels common to both graphs
qlset = set(qlabels) # for faster lookups
final_edges = []
for edge in graph_constraint.edges(True):
if edge[0] in qlset and edge[1] in qlset and \
init_qgraph.has_edge(edge):
final_edges.append(edge) # edge common to both
return _QubitGraph(qlabels, initial_edges=final_edges, directed=directed)
else:
return init_qgraph
else:
if allowed_filter is None:
return init_qgraph
else:
# assume allowed_filter is iterable and contains qubit labels
return init_qgraph.subgraph(list(allowed_filter))
def _create_nonlocal_compilation_of(self, oplabel, allowed_filter=None, verbosity=1, check=True):
"""
Constructs a potentially non-local compilation of `oplabel`.
This method currently only generates a compilation for a non-local CNOT,
up to arbitrary Pauli gates, between a pair of unconnected qubits. It
converts this CNOT into a circuit of CNOT gates between connected qubits,
using a fixed circuit form. This compilation is not optimal in at least
some circumstances.
Parameters
----------
oplabel : Label
The label of the gate to compile. Currently, `oplabel.name` must
equal `"CNOT"`.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be used in this non-local
compilation. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
check : bool, optional
Whether to perform internal consistency checks.
Returns
-------
Circuit
"""
assert(oplabel.num_qubits > 1), "1-qubit gates can't be non-local!"
assert(oplabel.name == "CNOT" and oplabel.num_qubits == 2), \
"Only non-local CNOT compilation is currently supported."
#Get connectivity of this gate (CNOT)
#if allowed_filter is not None:
qgraph = self.filter_connectivity(oplabel.name, allowed_filter)
#else:
# qgraph = self.connectivity[oplabel.name]
#CNOT specific
q1 = oplabel.qubits[0]
q2 = oplabel.qubits[1]
dist = qgraph.shortest_path_distance(q1, q2)
if verbosity > 0:
print("")
print("Attempting to generate a compilation for CNOT, up to Paulis,")
print("with control qubit = {} and target qubit = {}".format(q1, q2))
print("")
print("Distance between qubits is = {}".format(dist))
assert(qgraph.is_connected(q1, q2) >= 0), "There is no path between the qubits!"
# If the qubits are directly connected, this algorithm may not behave well.
assert(not qgraph.is_directly_connected(q1, q2)), "Qubits are connected! Algorithm is not needed or valid."
# Find the shortest path between q1 and q2
shortestpath = qgraph.shortest_path(q1, q2)
# Part 1 of the circuit is CNOTs along the shortest path from q1 to q2.
# To do: describe the circuit.
part_1 = []
for i in range(0, len(shortestpath) - 1):
part_1.append(_Label('CNOT', [shortestpath[i], shortestpath[i + 1]]))
# Part 2 is...
# To do: describe the circuit.
part_2 = _copy.deepcopy(part_1)
part_2.reverse()
del part_2[0]
# To do: describe the circuit.
part_3 = _copy.deepcopy(part_1)
del part_3[0]
# To do: describe the circuit.
part_4 = _copy.deepcopy(part_3)
del part_4[len(part_3) - 1]
part_4.reverse()
# Add the lists of gates together, in order
cnot_circuit = part_1 + part_2 + part_3 + part_4
# Convert the operationlist to a circuit.
line_labels = self.processor_spec.qubit_labels
circuit = _Circuit(layer_labels=cnot_circuit,
line_labels=line_labels,
editable=True)
## Change into the native gates, using the compilation for CNOTs between
## connected qubits.
circuit.change_gate_library(self)
circuit.done_editing()
if check:
# Calculate the symplectic matrix implemented by this circuit, to check the compilation
# is ok, below.
sreps = self.processor_spec.compute_clifford_symplectic_reps()
s, p = _symp.symplectic_rep_of_clifford_circuit(circuit, sreps)
# Construct the symplectic rep of CNOT between this pair of qubits, to compare to s.
nQ = self.processor_spec.num_qubits
iq1 = line_labels.index(q1) # assumes single tensor-prod term
iq2 = line_labels.index(q2) # assumes single tensor-prod term
s_cnot, p_cnot = _symp.symplectic_rep_of_clifford_layer(_Label('CNOT', (iq1, iq2)), nQ)
assert(_np.array_equal(s, s_cnot)), "Compilation has failed!"
if self.compile_type == "absolute":
assert(_np.array_equal(p, p_cnot)), "Compilation has failed!"
return circuit
def _get_nonlocal_compilation_of(self, oplabel, force=False,
allowed_filter=None, verbosity=1, check=True):
"""
Get a potentially non-local compilation of `oplabel`.
This function does *not* add this compilation to the library, it merely
returns it. To add it, use :method:`add_nonlocal_compilation_of`.
This method currently only generates a compilation for a non-local CNOT,
up to arbitrary Pauli gates, between a pair of unconnected qubits. It
converts this CNOT into a circuit of CNOT gates between connected qubits,
using a fixed circuit form. This compilation is not optimal in at least
some circumstances.
Parameters
----------
oplabel : Label
The label of the gate to compile. Currently, `oplabel.name` must
equal `"CNOT"`.
force : bool, optional
If True, then a compilation is recomputed even if `oplabel`
already exists in this `CompilationLibrary`. Otherwise
compilations are only computed when they are *not* present.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be used in this non-local
compilation. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
check : bool, optional
Whether to perform internal consistency checks.
Returns
-------
Circuit
"""
context_key = None
if isinstance(allowed_filter, dict):
context_key = frozenset(allowed_filter.items())
elif isinstance(allowed_filter, set):
context_key = frozenset(allowed_filter)
if context_key is not None:
key = (oplabel, context_key)
else:
key = oplabel
if not force and key in self.specific_compilations:
return self[oplabel] # don't re-compute unless we're told to
circuit = self._create_nonlocal_compilation_of(
oplabel, allowed_filter=allowed_filter, verbosity=verbosity, check=check)
return circuit
def add_nonlocal_compilation_of(self, oplabel, force=False,
allowed_filter=None, verbosity=1, check=True):
"""
Add a potentially non-local compilation of `oplabel` to this library.
This method currently only generates a compilation for a non-local CNOT,
up to arbitrary Pauli gates, between a pair of unconnected qubits. It
converts this CNOT into a circuit of CNOT gates between connected qubits,
using a fixed circuit form. This compilation is not optimal in at least
some circumstances.
If `allowed_filter` is None then the compilation is recorded under the key `oplabel`.
Otherwise, the compilation is recorded under the key (`oplabel`,`context_key`) where
`context_key` is frozenset(`allowed_filter`) when `allowed_filter` is a set, and
`context_key` is frozenset(`allowed_filter`.items()) when `allowed_filter` is a dict.
Parameters
----------
oplabel : Label
The label of the gate to compile. Currently, `oplabel.name` must
equal `"CNOT"`.
force : bool, optional
If True, then a compilation is recomputed even if `oplabel`
already exists in this `CompilationLibrary`. Otherwise
compilations are only computed when they are *not* present.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be used in this non-local
compilation. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
check : bool, optional
Whether to perform internal consistency checks.
Returns
-------
None
"""
context_key = None
if isinstance(allowed_filter, dict):
context_key = frozenset(allowed_filter.items())
elif isinstance(allowed_filter, set):
context_key = frozenset(allowed_filter)
if context_key is not None:
key = (oplabel, context_key)
else:
key = oplabel
if not force and key in self.specific_compilations:
return
else:
circuit = self._get_nonlocal_compilation_of(oplabel, force, allowed_filter,
verbosity, check)
self.add_specific_compilation_rule(key, circuit, unitary=None) # Need to take unitary as arg?
def retrieve_compilation_of(self, oplabel, force=False, allowed_filter=None, verbosity=1, check=True):
"""
Get a compilation of `oplabel` in the context of `allowed_filter`, if any.
This is often more convenient than querying the CompilationLibrary directly as a dictionary,
because:
1. If allowed_filter is not None, this handles the correct querying of the dictionary
to find out if there is a previously saved compilation with this `allowed_filter` context.
2. If a compilation is not present, this method will try to compute one.
This method does *not* store the compilation. To store the compilation first call the
method `add_compilation_of()`.
Parameters
----------
oplabel : Label
The label of the gate to compile.
force : bool, optional
If True, then an attempt is made to recompute a compilation
even if `oplabel` already exists in this `CompilationLibrary`.
Otherwise compilations are only computed when they are *not* present.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be used in this non-local
compilation. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
check : bool, optional
Whether to perform internal consistency checks.
Returns
-------
Circuit
"""
# first try and compile the gate locally. Future: this will not work properly if the allowed_filter removes
# gates that the get_local_compilation_of uses, because it knows nothing of the filter. This inconsistence
# should be removed somehow.
try:
# We don't have to account for `force` manually here, because it is dealt with inside this function
circuit = self._get_local_compilation_of(
oplabel, unitary=None, srep=None, max_iterations=10, force=force, verbosity=verbosity)
# Check for the case where this function won't currently behave as expected.
if isinstance(allowed_filter, dict):
raise ValueError("This function may behave incorrectly when the allowed_filer is a dict "
"*and* the gate can be compiled locally!")
# If local compilation isn't possible, we move on and try non-local compilation
except:
circuit = self._get_nonlocal_compilation_of(
oplabel, force=force, allowed_filter=allowed_filter, verbosity=verbosity, check=check)
return circuit
def add_compilation_of(self, oplabel, force=False, allowed_filter=None, verbosity=1, check=True):
"""
Adds a compilation of `oplabel` in the context of `allowed_filter`, if any.
If `allowed_filter` is None then the compilation is recorded under the key `oplabel`.
Otherwise, the compilation is recorded under the key (`oplabel`,`context_key`) where
`context_key` is frozenset(`allowed_filter`) when `allowed_filter` is a set, and
`context_key` is frozenset(`allowed_filter`.items()) when `allowed_filter` is a dict.
Parameters
----------
oplabel : Label
The label of the gate to compile.
force : bool, optional
If True, then an attempt is made to recompute a compilation
even if `oplabel` already exists in this `CompilationLibrary`.
Otherwise compilations are only computed when they are *not* present.
allowed_filter : dict or set, optional
Specifies which gates are allowed to be used in this non-local
compilation. If a `dict`, keys must be gate names (like
`"CNOT"`) and values :class:`QubitGraph` objects indicating
where that gate (if it's present in the library) may be used.
If a `set`, then it specifies a set of qubits and any gate in
the current library that is confined within that set is allowed.
If None, then all gates within the library are allowed.
verbosity : int, optional
An integer >= 0 specifying how much detail to send to stdout.
check : bool, optional
Whether to perform internal consistency checks.
Returns
-------
None
"""
# first try and compile the gate locally. Future: this will not work properly if the allowed_filter removes
# gates that the get_local_compilation_of uses, because it knows nothing of the filter. This inconsistence
# should be removed somehow.
try:
# We don't have to account for `force` manually here, because it is dealt with inside this function
self.add_local_compilation_of(oplabel, unitary=None, srep=None,
max_iterations=10, force=force, verbosity=verbosity)
# Check for the case where this function won't currently behave as expected.
if isinstance(allowed_filter, dict):
raise ValueError("This function may behave incorrectly when the allowed_filer is a dict "
"*and* the gate can be compiled locally!")
# If local compilation isn't possible, we move on and try non-local compilation
except:
pass
self.add_nonlocal_compilation_of(
oplabel, force=force, allowed_filter=allowed_filter, verbosity=verbosity, check=check)
return
| [
"pygsti.tools.internalgates.standard_gatename_unitaries",
"collections.defaultdict",
"numpy.shape",
"pygsti.circuits.circuit.Circuit",
"pygsti.baseobjs.qubitgraph.QubitGraph",
"itertools.permutations",
"numpy.cumsum",
"pygsti.tools.symplectic.unitary_to_symplectic",
"itertools.product",
"copy.deep... | [((3208, 3234), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (3232, 3234), True, 'import collections as _collections\n'), ((3306, 3332), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (3330, 3332), True, 'import collections as _collections\n'), ((3409, 3435), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (3433, 3435), True, 'import collections as _collections\n'), ((3574, 3600), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (3598, 3600), True, 'import collections as _collections\n'), ((3677, 3703), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (3701, 3703), True, 'import collections as _collections\n'), ((5961, 5996), 'pygsti.tools.internalgates.standard_gatename_unitaries', '_itgs.standard_gatename_unitaries', ([], {}), '()\n', (5994, 5996), True, 'from pygsti.tools import internalgates as _itgs\n'), ((7406, 7441), 'pygsti.tools.internalgates.standard_gatename_unitaries', '_itgs.standard_gatename_unitaries', ([], {}), '()\n', (7439, 7441), True, 'from pygsti.tools import internalgates as _itgs\n'), ((16307, 16479), 'pygsti.processors.processorspec.QubitProcessorSpec', '_QubitProcessorSpec', (['processor_spec.num_qubits', 'gate_names', 'gate_unitaries', 'availability', 'processor_spec.qubit_graph', 'processor_spec.qubit_labels'], {'aux_info': 'aux_info'}), '(processor_spec.num_qubits, gate_names, gate_unitaries,\n availability, processor_spec.qubit_graph, processor_spec.qubit_labels,\n aux_info=aux_info)\n', (16326, 16479), True, 'from pygsti.processors.processorspec import QubitProcessorSpec as _QubitProcessorSpec\n'), ((30743, 30773), 'collections.defaultdict', '_collections.defaultdict', (['list'], {}), '(list)\n', (30767, 30773), True, 'import collections as _collections\n'), ((42636, 42680), 'pygsti.tools.symplectic.check_valid_clifford', '_symp.check_valid_clifford', (['smatrix', 'svector'], {}), '(smatrix, svector)\n', (42662, 42680), True, 'from pygsti.tools import symplectic as _symp\n'), ((43347, 43377), 'collections.defaultdict', '_collections.defaultdict', (['list'], {}), '(list)\n', (43371, 43377), True, 'import collections as _collections\n'), ((43732, 43755), 'pygsti.tools.listtools.partitions', '_lt.partitions', (['nqubits'], {}), '(nqubits)\n', (43746, 43755), True, 'from pygsti.tools import listtools as _lt\n'), ((49104, 49135), 'numpy.zeros', '_np.zeros', (['(nQ, nQ)'], {'dtype': 'bool'}), '((nQ, nQ), dtype=bool)\n', (49113, 49135), True, 'import numpy as _np\n'), ((49512, 49551), 'pygsti.baseobjs.qubitgraph.QubitGraph', '_QubitGraph', (['qubit_labels', 'connectivity'], {}), '(qubit_labels, connectivity)\n', (49523, 49551), True, 'from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph\n'), ((55040, 55062), 'copy.deepcopy', '_copy.deepcopy', (['part_1'], {}), '(part_1)\n', (55054, 55062), True, 'import copy as _copy\n'), ((55167, 55189), 'copy.deepcopy', '_copy.deepcopy', (['part_1'], {}), '(part_1)\n', (55181, 55189), True, 'import copy as _copy\n'), ((55269, 55291), 'copy.deepcopy', '_copy.deepcopy', (['part_3'], {}), '(part_3)\n', (55283, 55291), True, 'import copy as _copy\n'), ((55587, 55662), 'pygsti.circuits.circuit.Circuit', '_Circuit', ([], {'layer_labels': 'cnot_circuit', 'line_labels': 'line_labels', 'editable': '(True)'}), '(layer_labels=cnot_circuit, line_labels=line_labels, editable=True)\n', (55595, 55662), True, 'from pygsti.circuits.circuit import Circuit as _Circuit\n'), ((6031, 6062), 'pygsti.tools.internalgates.internal_gate_unitaries', '_itgs.internal_gate_unitaries', ([], {}), '()\n', (6060, 6062), True, 'from pygsti.tools import internalgates as _itgs\n'), ((7476, 7507), 'pygsti.tools.internalgates.internal_gate_unitaries', '_itgs.internal_gate_unitaries', ([], {}), '()\n', (7505, 7507), True, 'from pygsti.tools import internalgates as _itgs\n'), ((30259, 30353), 'pygsti.tools.internalgates.is_gate_this_standard_unitary', '_itgs.is_gate_this_standard_unitary', (['base_processor_spec.gate_unitaries[gn]', 'std_gate_name'], {}), '(base_processor_spec.gate_unitaries[gn],\n std_gate_name)\n', (30294, 30353), True, 'from pygsti.tools import internalgates as _itgs\n'), ((33334, 33370), 'pygsti.baseobjs.label.Label', '_Label', (['template_label.name', 'qlabels'], {}), '(template_label.name, qlabels)\n', (33340, 33370), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((33584, 33616), 'pygsti.baseobjs.label.Label', '_Label', (['real_label.name', 'qlabels'], {}), '(real_label.name, qlabels)\n', (33590, 33616), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((35944, 36018), 'pygsti.circuits.circuit.Circuit', '_Circuit', ([], {'layer_labels': 'opstr', 'line_labels': 'self.processor_spec.qubit_labels'}), '(layer_labels=opstr, line_labels=self.processor_spec.qubit_labels)\n', (35952, 36018), True, 'from pygsti.circuits.circuit import Circuit as _Circuit\n'), ((42136, 42194), 'pygsti.tools.symplectic.unitary_to_symplectic', '_symp.unitary_to_symplectic', (['unitary'], {'flagnonclifford': '(True)'}), '(unitary, flagnonclifford=True)\n', (42163, 42194), True, 'from pygsti.tools import symplectic as _symp\n'), ((42508, 42569), 'pygsti.tools.symplectic.symplectic_rep_of_clifford_layer', '_symp.symplectic_rep_of_clifford_layer', (['template_lbl', 'nqubits'], {}), '(template_lbl, nqubits)\n', (42546, 42569), True, 'from pygsti.tools import symplectic as _symp\n'), ((43955, 43988), 'itertools.product', '_itertools.product', (['*to_iter_over'], {}), '(*to_iter_over)\n', (43973, 43988), True, 'import itertools as _itertools\n'), ((44196, 44282), 'pygsti.tools.symplectic.symplectic_rep_of_clifford_layer', '_symp.symplectic_rep_of_clifford_layer', (['layer', 'nqubits'], {'srep_dict': 'available_sreps'}), '(layer, nqubits, srep_dict=\n available_sreps)\n', (44234, 44282), True, 'from pygsti.tools import symplectic as _symp\n'), ((56142, 56198), 'pygsti.tools.symplectic.symplectic_rep_of_clifford_circuit', '_symp.symplectic_rep_of_clifford_circuit', (['circuit', 'sreps'], {}), '(circuit, sreps)\n', (56182, 56198), True, 'from pygsti.tools import symplectic as _symp\n'), ((56615, 56641), 'numpy.array_equal', '_np.array_equal', (['s', 's_cnot'], {}), '(s, s_cnot)\n', (56630, 56641), True, 'import numpy as _np\n'), ((45067, 45094), 'numpy.array_equal', '_np.array_equal', (['smatrix', 's'], {}), '(smatrix, s)\n', (45082, 45094), True, 'import numpy as _np\n'), ((49283, 49336), 'itertools.permutations', '_itertools.permutations', (['compiled_gatelabel.qubits', '(2)'], {}), '(compiled_gatelabel.qubits, 2)\n', (49306, 49336), True, 'import itertools as _itertools\n'), ((51516, 51582), 'pygsti.baseobjs.qubitgraph.QubitGraph', '_QubitGraph', (['qlabels'], {'initial_edges': 'final_edges', 'directed': 'directed'}), '(qlabels, initial_edges=final_edges, directed=directed)\n', (51527, 51582), True, 'from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph\n'), ((54904, 54958), 'pygsti.baseobjs.label.Label', '_Label', (['"""CNOT"""', '[shortestpath[i], shortestpath[i + 1]]'], {}), "('CNOT', [shortestpath[i], shortestpath[i + 1]])\n", (54910, 54958), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((56563, 56589), 'pygsti.baseobjs.label.Label', '_Label', (['"""CNOT"""', '(iq1, iq2)'], {}), "('CNOT', (iq1, iq2))\n", (56569, 56589), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((56741, 56767), 'numpy.array_equal', '_np.array_equal', (['p', 'p_cnot'], {}), '(p, p_cnot)\n', (56756, 56767), True, 'import numpy as _np\n'), ((24602, 24618), 'pygsti.baseobjs.label.Label', '_Label', (['gname', 'q'], {}), '(gname, q)\n', (24608, 24618), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((25767, 25802), 'pygsti.tools.symplectic.unitary_is_clifford', '_symp.unitary_is_clifford', (['gunitary'], {}), '(gunitary)\n', (25792, 25802), True, 'from pygsti.tools import symplectic as _symp\n'), ((42734, 42752), 'numpy.shape', '_np.shape', (['smatrix'], {}), '(smatrix)\n', (42743, 42752), True, 'import numpy as _np\n'), ((43796, 43809), 'numpy.cumsum', '_np.cumsum', (['p'], {}), '(p)\n', (43806, 43809), True, 'import numpy as _np\n'), ((47488, 47574), 'pygsti.tools.symplectic.symplectic_rep_of_clifford_layer', '_symp.symplectic_rep_of_clifford_layer', (['layer', 'nqubits'], {'srep_dict': 'available_sreps'}), '(layer, nqubits, srep_dict=\n available_sreps)\n', (47526, 47574), True, 'from pygsti.tools import symplectic as _symp\n'), ((47771, 47812), 'pygsti.tools.symplectic.compose_cliffords', '_symp.compose_cliffords', (['s', 'p', 'sadd', 'padd'], {}), '(s, p, sadd, padd)\n', (47794, 47812), True, 'from pygsti.tools import symplectic as _symp\n'), ((13035, 13082), 'itertools.product', '_itertools.product', (['*layer_availability_factors'], {}), '(*layer_availability_factors)\n', (13053, 13082), True, 'import itertools as _itertools\n'), ((25831, 25901), 'pygsti.tools.internalgates.is_gate_pauli_equivalent_to_this_standard_unitary', '_itgs.is_gate_pauli_equivalent_to_this_standard_unitary', (['gunitary', '"""H"""'], {}), "(gunitary, 'H')\n", (25886, 25901), True, 'from pygsti.tools import internalgates as _itgs\n'), ((26124, 26149), 'pygsti.baseobjs.label.Label', '_Label', (['cnot_name', '(0, 1)'], {}), '(cnot_name, (0, 1))\n', (26130, 26149), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((29776, 29799), 'pygsti.baseobjs.label.Label', '_Label', (['gname', '(q1, q2)'], {}), '(gname, (q1, q2))\n', (29782, 29799), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((26375, 26392), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(0)'], {}), '(H_name, 0)\n', (26381, 26392), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((26394, 26411), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (26400, 26411), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((26413, 26438), 'pygsti.baseobjs.label.Label', '_Label', (['cnot_name', '(1, 0)'], {}), '(cnot_name, (1, 0))\n', (26419, 26438), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((26465, 26482), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(0)'], {}), '(H_name, 0)\n', (26471, 26482), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((26484, 26501), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (26490, 26501), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((45215, 45242), 'numpy.array_equal', '_np.array_equal', (['svector', 'p'], {}), '(svector, p)\n', (45230, 45242), True, 'import numpy as _np\n'), ((29279, 29302), 'pygsti.baseobjs.label.Label', '_Label', (['gname', '(q1, q2)'], {}), '(gname, (q1, q2))\n', (29285, 29302), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27524, 27541), 'pygsti.baseobjs.label.Label', '_Label', (['I_name', '(0)'], {}), '(I_name, 0)\n', (27530, 27541), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27543, 27560), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (27549, 27560), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27562, 27589), 'pygsti.baseobjs.label.Label', '_Label', (['cphase_name', '(0, 1)'], {}), '(cphase_name, (0, 1))\n', (27568, 27589), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27591, 27608), 'pygsti.baseobjs.label.Label', '_Label', (['I_name', '(0)'], {}), '(I_name, 0)\n', (27597, 27608), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27643, 27660), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (27649, 27660), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27778, 27795), 'pygsti.baseobjs.label.Label', '_Label', (['I_name', '(0)'], {}), '(I_name, 0)\n', (27784, 27795), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27797, 27814), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (27803, 27814), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27816, 27843), 'pygsti.baseobjs.label.Label', '_Label', (['cphase_name', '(1, 0)'], {}), '(cphase_name, (1, 0))\n', (27822, 27843), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27845, 27862), 'pygsti.baseobjs.label.Label', '_Label', (['I_name', '(0)'], {}), '(I_name, 0)\n', (27851, 27862), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((27897, 27914), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (27903, 27914), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28106, 28123), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (28112, 28123), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28125, 28152), 'pygsti.baseobjs.label.Label', '_Label', (['cphase_name', '(0, 1)'], {}), '(cphase_name, (0, 1))\n', (28131, 28152), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28154, 28171), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (28160, 28171), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28289, 28306), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (28295, 28306), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28308, 28335), 'pygsti.baseobjs.label.Label', '_Label', (['cphase_name', '(1, 0)'], {}), '(cphase_name, (1, 0))\n', (28314, 28335), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((28337, 28354), 'pygsti.baseobjs.label.Label', '_Label', (['H_name', '(1)'], {}), '(H_name, 1)\n', (28343, 28354), True, 'from pygsti.baseobjs.label import Label as _Label\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 5 13:32:12 2020
@author: <NAME>
"""
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import seaborn as sns
def plot_nicer(ax, with_legend=True):
"""Takes an axis objects and makes it look nicer"""
alpha=0.7
for spine in ax.spines.values():
spine.set_color("lightgray")
# Make text grey
plt.setp(ax.get_yticklabels(), alpha=alpha)
plt.setp(ax.get_xticklabels(), alpha=alpha)
ax.set_xlabel(ax.get_xlabel(), alpha=alpha)
ax.set_ylabel(ax.get_ylabel(), alpha=alpha)
ax.set_title(ax.get_title(), alpha=alpha)
ax.tick_params(axis=u'both', which=u'both',length=0)
if with_legend:
legend = ax.get_legend()
for text in legend.get_texts():
text.set_color("#676767")
legend.get_title().set_color("#676767")
ax.yaxis.get_offset_text().set_color("#676767")
# Add a grid
ax.yaxis.grid(True, color="lightgrey", zorder=0)
ax.xaxis.grid(False)
def read_probability(ppm):
prob_temp = pd.read_csv("Results" + os.sep + "warming_probabilities_"+ str(ppm)+"ppm.csv", sep=";", index_col=0)
return prob_temp
def prepare_warming_data():
"""
Reads in all the warming probabilities and combines them into one dataframe
"""
warming_df = pd.DataFrame()
for ppm in np.arange(400, 1001, 50):
prob_temp = read_probability(ppm)
# Convert probability to percent
prob_temp = round(prob_temp * 100,0)
warming_df = pd.concat([warming_df, prob_temp],axis=1)
warming_df.columns = [str(i) + " ppm" for i in np.arange(400, 1001,50)]
warming_df = warming_df.transpose()
return warming_df
def prepare_count_data():
"""
Reads in the counts of the ipcc and changes them to percent
"""
# Read in the data
ipcc_counts = pd.read_csv("Results" + os.sep + "temp_counts_all.csv", sep=";", index_col=0)
# Replace the spaces in the temperature description
ipcc_counts.index = ipcc_counts.index.str.replace(" ","")
##### Preparation for the total counts
ipcc_total = ipcc_counts.sum()
# Convert counts to percent
ipcc_counts_percent = round((ipcc_counts / ipcc_total) * 100,0)
return ipcc_counts_percent
def plot_figure(combine_df):
"""Plots the main figures for the different ppm"""
ax = sns.heatmap(combine_df,cmap="OrRd", linewidth=0.2, square=True, annot=True, cbar=False, alpha=0.8)
# Add a line to mark the counts
ax.hlines([13], *ax.get_xlim())
fig=plt.gcf()
fig.set_size_inches(12,6)
fig.tight_layout()
plt.savefig("Figures" + os.sep +"heatmap.png",dpi=400, bbox_inches="tight")
if __name__ == "__main__":
# Read the data
ipcc_counts = prepare_count_data()
ipcc_counts.columns = ["IPCC Counts"]
warming_df = prepare_warming_data()
combine_df = pd.concat([warming_df, ipcc_counts.transpose()])
plot_figure(combine_df)
| [
"pandas.DataFrame",
"seaborn.heatmap",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.gcf",
"pandas.concat",
"matplotlib.pyplot.savefig"
] | [((1273, 1287), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1285, 1287), True, 'import pandas as pd\n'), ((1303, 1327), 'numpy.arange', 'np.arange', (['(400)', '(1001)', '(50)'], {}), '(400, 1001, 50)\n', (1312, 1327), True, 'import numpy as np\n'), ((1811, 1888), 'pandas.read_csv', 'pd.read_csv', (["('Results' + os.sep + 'temp_counts_all.csv')"], {'sep': '""";"""', 'index_col': '(0)'}), "('Results' + os.sep + 'temp_counts_all.csv', sep=';', index_col=0)\n", (1822, 1888), True, 'import pandas as pd\n'), ((2320, 2423), 'seaborn.heatmap', 'sns.heatmap', (['combine_df'], {'cmap': '"""OrRd"""', 'linewidth': '(0.2)', 'square': '(True)', 'annot': '(True)', 'cbar': '(False)', 'alpha': '(0.8)'}), "(combine_df, cmap='OrRd', linewidth=0.2, square=True, annot=True,\n cbar=False, alpha=0.8)\n", (2331, 2423), True, 'import seaborn as sns\n'), ((2499, 2508), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2506, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2643), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Figures' + os.sep + 'heatmap.png')"], {'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "('Figures' + os.sep + 'heatmap.png', dpi=400, bbox_inches='tight')\n", (2577, 2643), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1520), 'pandas.concat', 'pd.concat', (['[warming_df, prob_temp]'], {'axis': '(1)'}), '([warming_df, prob_temp], axis=1)\n', (1487, 1520), True, 'import pandas as pd\n'), ((1571, 1595), 'numpy.arange', 'np.arange', (['(400)', '(1001)', '(50)'], {}), '(400, 1001, 50)\n', (1580, 1595), True, 'import numpy as np\n')] |
""" scratch.py - a place to try small experiments, figure out how things work """
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import kernels
"""
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultilayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def __call__(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
"""
"""
class LeNet5(Chain):
def __init__(self):
super(LeNet5, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_channels=1, out_channels=6, ksize=5, stride=1)
self.conv2 = L.Convolution2D(
in_channels=6, out_channels=16, ksize=5, stride=1)
self.conv3 = L.Convolution2D(
in_channels=16, out_channels=120, ksize=4, stride=1)
self.fc4 = L.Linear(None, 84)
self.fc5 = L.Linear(84, 10)
def __call__(self, x):
h = F.sigmoid(self.conv1(x))
h = F.max_pooling_2d(h, 2, 2)
h = F.sigmoid(self.conv2(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.sigmoid(self.conv3(h))
h = F.sigmoid(self.fc4(h))
if chainer.config.train:
return self.fc5(h)
return F.softmax(self.fc5(h))
"""
def main():
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
for i in range(6):
l = train._datasets[1][0:10000][i]
img = train._datasets[0][0:10000][i]
print(l)
n = 2
c_i, c_o = 1, 512
h_i, w_i = 28, 28
h_k, w_k = 3, 3
h_p, w_p = 1, 1
z = train._datasets[0][0:n]
z = z.reshape((n, 1, 28, 28))
x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype('f')
x.shape
#kW = np.random.uniform(0, 1, (c_o, c_i, h_k, w_k)).astype('f')
kW, k_props = kernels.make_kernels()
kW.shape
kS = kernels.make_similiarity_matrix(kW, k_props)
csm = kernels.make_cross_support_matrix(len(kW))
b = np.random.uniform(0, 1, (c_o,)).astype('f')
b.shape
s_y, s_x = 1, 1
y = F.convolution_2d(x, kW, b, stride=(s_y, s_x), pad=(h_p, w_p))
yz = F.convolution_2d(z, kW, b, stride=(s_y, s_x), pad=(h_p, w_p))
y.shape
kernels.update_csm(csm, yz)
print(csm['p'])
exit(-1)
h_o = int((h_i + 2 * h_p - h_k) / s_y + 1)
w_o = int((w_i + 2 * w_p - w_k) / s_x + 1)
y.shape == (n, c_o, h_o, w_o)
y = F.convolution_2d(x, kW, b, stride=(s_y, s_x), pad=(h_p, w_p), cover_all=True)
y.shape == (n, c_o, h_o, w_o + 1)
def my_kernels():
k = []
k_props = np.zeros((512,2), dtype=int)
k_props -= 1
for i in range(512):
a = kernels.make_array(i)
k.append(a)
for i in range(512):
for j in range(i, 512):
if i == j:
continue
if np.sum(k[j]) > np.sum(k[i]):
continue
if k_props[j][0] != -1:
continue # Has already matched
b = np.copy(k[j])
r = kernels.rotation_check(k[i], b)
if r >= 0:
k_props[j] = [i, r]
continue
kk = np.reshape(k, (512,1,3,3))
return kk
if __name__ == '__main__':
main()
| [
"numpy.random.uniform",
"kernels.make_array",
"numpy.sum",
"numpy.copy",
"chainer.functions.convolution_2d",
"numpy.zeros",
"kernels.make_kernels",
"numpy.reshape",
"kernels.rotation_check",
"chainer.datasets.get_mnist",
"kernels.update_csm",
"kernels.make_similiarity_matrix"
] | [((1873, 1901), 'chainer.datasets.get_mnist', 'chainer.datasets.get_mnist', ([], {}), '()\n', (1899, 1901), False, 'import chainer\n'), ((2354, 2376), 'kernels.make_kernels', 'kernels.make_kernels', ([], {}), '()\n', (2374, 2376), False, 'import kernels\n'), ((2399, 2443), 'kernels.make_similiarity_matrix', 'kernels.make_similiarity_matrix', (['kW', 'k_props'], {}), '(kW, k_props)\n', (2430, 2443), False, 'import kernels\n'), ((2591, 2652), 'chainer.functions.convolution_2d', 'F.convolution_2d', (['x', 'kW', 'b'], {'stride': '(s_y, s_x)', 'pad': '(h_p, w_p)'}), '(x, kW, b, stride=(s_y, s_x), pad=(h_p, w_p))\n', (2607, 2652), True, 'import chainer.functions as F\n'), ((2662, 2723), 'chainer.functions.convolution_2d', 'F.convolution_2d', (['z', 'kW', 'b'], {'stride': '(s_y, s_x)', 'pad': '(h_p, w_p)'}), '(z, kW, b, stride=(s_y, s_x), pad=(h_p, w_p))\n', (2678, 2723), True, 'import chainer.functions as F\n'), ((2740, 2767), 'kernels.update_csm', 'kernels.update_csm', (['csm', 'yz'], {}), '(csm, yz)\n', (2758, 2767), False, 'import kernels\n'), ((2939, 3016), 'chainer.functions.convolution_2d', 'F.convolution_2d', (['x', 'kW', 'b'], {'stride': '(s_y, s_x)', 'pad': '(h_p, w_p)', 'cover_all': '(True)'}), '(x, kW, b, stride=(s_y, s_x), pad=(h_p, w_p), cover_all=True)\n', (2955, 3016), True, 'import chainer.functions as F\n'), ((3101, 3130), 'numpy.zeros', 'np.zeros', (['(512, 2)'], {'dtype': 'int'}), '((512, 2), dtype=int)\n', (3109, 3130), True, 'import numpy as np\n'), ((3673, 3702), 'numpy.reshape', 'np.reshape', (['k', '(512, 1, 3, 3)'], {}), '(k, (512, 1, 3, 3))\n', (3683, 3702), True, 'import numpy as np\n'), ((3184, 3205), 'kernels.make_array', 'kernels.make_array', (['i'], {}), '(i)\n', (3202, 3205), False, 'import kernels\n'), ((2199, 2242), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(n, c_i, h_i, w_i)'], {}), '(0, 1, (n, c_i, h_i, w_i))\n', (2216, 2242), True, 'import numpy as np\n'), ((2506, 2537), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(c_o,)'], {}), '(0, 1, (c_o,))\n', (2523, 2537), True, 'import numpy as np\n'), ((3517, 3530), 'numpy.copy', 'np.copy', (['k[j]'], {}), '(k[j])\n', (3524, 3530), True, 'import numpy as np\n'), ((3547, 3578), 'kernels.rotation_check', 'kernels.rotation_check', (['k[i]', 'b'], {}), '(k[i], b)\n', (3569, 3578), False, 'import kernels\n'), ((3346, 3358), 'numpy.sum', 'np.sum', (['k[j]'], {}), '(k[j])\n', (3352, 3358), True, 'import numpy as np\n'), ((3361, 3373), 'numpy.sum', 'np.sum', (['k[i]'], {}), '(k[i])\n', (3367, 3373), True, 'import numpy as np\n')] |
import numpy as np
def softmax(x):
xt = np.exp(x - np.max(x))
return xt / np.sum(xt)
def save_model(outfile, model):
U, V, W = model.U, model.V, model.W
np.savez(outfile, U=U, V=V, W=W)
print("Saved model parameters to %s." % outfile)
def save_data(outfile, x_train, y_train, itw):
np.savez(outfile, X=x_train, Y=y_train, ITW=itw)
print('Saved data to %s.' % outfile)
def load_data(path):
npzfile = np.load(path)
return npzfile['X'], npzfile['Y'], npzfile['ITW']
def load_model(path, model):
npzfile = np.load(path)
U, V, W = npzfile["U"], npzfile["V"], npzfile["W"]
model.hidden_dim = U.shape[0]
model.word_dim = U.shape[1]
model.U = U
model.V = V
model.W = W
print("Loaded model parameters from %s. hidden_dim=%d word_dim=%d" % (path, U.shape[0], U.shape[1]))
def save_model_parameters_theano(outfile, model):
U, V, W = model.U.get_value(), model.V.get_value(), model.W.get_value()
np.savez(outfile, U=U, V=V, W=W)
print("Saved model parameters to %s." % outfile)
def load_model_parameters_theano(path, model):
npzfile = np.load(path)
U, V, W = npzfile["U"], npzfile["V"], npzfile["W"]
model.hidden_dim = U.shape[0]
model.word_dim = U.shape[1]
model.U.set_value(U)
model.V.set_value(V)
model.W.set_value(W)
print("Loaded model parameters from %s. hidden_dim=%d word_dim=%d" % (path, U.shape[0], U.shape[1]))
| [
"numpy.savez",
"numpy.max",
"numpy.sum",
"numpy.load"
] | [((171, 203), 'numpy.savez', 'np.savez', (['outfile'], {'U': 'U', 'V': 'V', 'W': 'W'}), '(outfile, U=U, V=V, W=W)\n', (179, 203), True, 'import numpy as np\n'), ((309, 357), 'numpy.savez', 'np.savez', (['outfile'], {'X': 'x_train', 'Y': 'y_train', 'ITW': 'itw'}), '(outfile, X=x_train, Y=y_train, ITW=itw)\n', (317, 357), True, 'import numpy as np\n'), ((435, 448), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (442, 448), True, 'import numpy as np\n'), ((548, 561), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (555, 561), True, 'import numpy as np\n'), ((969, 1001), 'numpy.savez', 'np.savez', (['outfile'], {'U': 'U', 'V': 'V', 'W': 'W'}), '(outfile, U=U, V=V, W=W)\n', (977, 1001), True, 'import numpy as np\n'), ((1120, 1133), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1127, 1133), True, 'import numpy as np\n'), ((83, 93), 'numpy.sum', 'np.sum', (['xt'], {}), '(xt)\n', (89, 93), True, 'import numpy as np\n'), ((56, 65), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (62, 65), True, 'import numpy as np\n')] |
import bz2
import os
from urllib.request import urlopen
import numpy as np
import cv2
from align import align_image
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import face_recognition
def resize_image(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def download_landmarks(dst_file):
"""
Downloads landmarks to execute face alignment
"""
url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
decompressor = bz2.BZ2Decompressor()
with urlopen(url) as src, open(dst_file, 'wb') as dst:
data = src.read(1024)
while len(data) > 0:
dst.write(decompressor.decompress(data))
data = src.read(1024)
def download_landmarks_to_disk(dst_dir):
dst_file = os.path.join(dst_dir, 'landmarks.dat')
if not os.path.exists(dst_file):
os.makedirs(dst_dir)
download_landmarks(dst_file)
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg':
metadata.append(IdentityMetadata(path, i, f))
return metadata
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[...,::-1]
def generate_embeddings(metadata, pretrained_network, load_from_disk=False, embedings_output_path="" ):
"""
Inputs the metadata through the network to generate a set of encodings/embeddings
"""
embedded = np.zeros((metadata.shape[0], 128))
base_dir = os.path.dirname(os.path.abspath(__file__))
if embedings_output_path == "":
if not os.path.exists(os.path.sep.join([base_dir, "models"])):
os.mkdir(os.path.sep.join([base_dir, "models"]))
embeddings_disk = os.path.sep.join([base_dir, "models", "embeddings.pickle"])
else:
embeddings_disk = embedings_output_path
if load_from_disk:
pickleFile = open(embeddings_disk, 'rb')
embedded = pickle.load(pickleFile)
correct_indexes = pickle.load(pickleFile)
return embedded, correct_indexes
else:
error_indexes = []
for i, m in enumerate(metadata):
try:
img = load_image(m.image_path())
# Align face to boost the model performance
img = align_image(img)
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded[i] = pretrained_network.predict(np.expand_dims(img, axis=0))[0]
print(f"Generating embedding for image {m.image_path()}")
except TypeError:
print(f"Failed to generate embedding for image {m.image_path()}")
error_indexes.append(i)
correct_indexes = [i for i in range(len(metadata)) if i not in error_indexes]
embedded = embedded[correct_indexes]
pickleFile = open(embeddings_disk, 'wb')
pickle.dump(embedded, pickleFile)
pickle.dump(correct_indexes, pickleFile)
pickleFile.close()
return embedded,correct_indexes
def generate_embedding_from_image(image, pretrained_network):
embedding = None
try:
image = align_image(image)
# scale RGB values to interval [0,1]
img = (image / 255.).astype(np.float32)
# obtain embedding vector for image
embedding = pretrained_network.predict(np.expand_dims(img, axis=0))[0]
print(f"Generating embedding for image")
except:
print("Could not generate embedding")
return embedding
def train_models(embeddings, metadata, models_output_path="" ):
"""
Takes as input a set of encodings and metadata containing their labels to train a KNN and SVM model
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
if models_output_path == "":
if not os.path.exists(os.path.sep.join([base_dir, "models"])):
os.mkdir(os.path.sep.join([base_dir, "models"]))
models_disk = os.path.sep.join([base_dir, "models", "models.pickle"])
else:
models_disk = models_output_path
targets = np.array([m.name for m in metadata])
encoder = LabelEncoder()
encoder.fit(targets)
# Numerical encoding of identities
y = encoder.transform(targets)
train_idx, test_idx = train_test_split(np.arange(metadata.shape[0]), test_size = 0.3, random_state = 42)
# 50 train examples of 10 identities (5 examples each)
X_train = embeddings[train_idx]
# 50 test examples of 10 identities (5 examples each)
X_test = embeddings[test_idx]
y_train = y[train_idx]
y_test = y[test_idx]
knn = KNeighborsClassifier(n_neighbors=1, metric='euclidean')
svc = LinearSVC()
knn.fit(X_train, y_train)
svc.fit(X_train, y_train)
acc_knn = accuracy_score(y_test, knn.predict(X_test))
acc_svc = accuracy_score(y_test, svc.predict(X_test))
print(f'KNN accuracy = {acc_knn * 100}%, SVM accuracy = {acc_svc * 100}%')
pickleFile = open(models_disk, 'wb')
pickle.dump(knn, pickleFile)
pickle.dump(svc, pickleFile)
pickleFile.close()
return knn, svc
def recognize_faces_in_frame(image):
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb,
model="hog")
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
probs = []
# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(os.path.sep.join(["model", "recognizer.pickle"]), "rb").read())
le = pickle.loads(open(os.path.sep.join(["model", "le.pickle"]), "rb").read())
for index, face in enumerate(boxes):
vec = face_recognition.face_encodings(rgb, [boxes[index]])
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j] if proba > 0.6 else "unknown"
names.append(name)
probs.append(proba)
detections = zip(boxes, names, probs)
return detections
def show_image(image):
# show the output image
cv2.imshow("Press any key to close this image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def recognize_faces_in_image(image):
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb,
model="hog")
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
probs = []
# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(os.path.sep.join(["model", "recognizer.pickle"]), "rb").read())
le = pickle.loads(open(os.path.sep.join(["model", "le.pickle"]), "rb").read())
for index, face in enumerate(boxes):
vec = face_recognition.face_encodings(rgb, [boxes[index]])
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j] if proba > 0.6 else "unknown"
names.append(name)
probs.append(proba)
# loop over the recognized faces
for ((top, right, bottom, left), name, prob) in zip(boxes, names, probs):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
label = f"{name} prob : {round(prob, 3)*100}%"
cv2.putText(image, label, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.6, (0, 255, 0), 2)
return image | [
"pickle.dump",
"numpy.argmax",
"pickle.load",
"numpy.arange",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"os.path.sep.join",
"os.path.abspath",
"cv2.cvtColor",
"face_recognition.face_encodings",
"os.path.exists",
"urllib.request.urlopen",
"sklearn.preprocessing.LabelEncoder",
"align.... | [((1180, 1223), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'inter'}), '(image, dim, interpolation=inter)\n', (1190, 1223), False, 'import cv2\n'), ((1477, 1498), 'bz2.BZ2Decompressor', 'bz2.BZ2Decompressor', ([], {}), '()\n', (1496, 1498), False, 'import bz2\n'), ((1764, 1802), 'os.path.join', 'os.path.join', (['dst_dir', '"""landmarks.dat"""'], {}), "(dst_dir, 'landmarks.dat')\n", (1776, 1802), False, 'import os\n'), ((2338, 2354), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2348, 2354), False, 'import os\n'), ((2677, 2696), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (2687, 2696), False, 'import cv2\n'), ((3039, 3073), 'numpy.zeros', 'np.zeros', (['(metadata.shape[0], 128)'], {}), '((metadata.shape[0], 128))\n', (3047, 3073), True, 'import numpy as np\n'), ((5739, 5775), 'numpy.array', 'np.array', (['[m.name for m in metadata]'], {}), '([m.name for m in metadata])\n', (5747, 5775), True, 'import numpy as np\n'), ((5790, 5804), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5802, 5804), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((6266, 6321), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)', 'metric': '"""euclidean"""'}), "(n_neighbors=1, metric='euclidean')\n", (6286, 6321), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6332, 6343), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (6341, 6343), False, 'from sklearn.svm import LinearSVC\n'), ((6648, 6676), 'pickle.dump', 'pickle.dump', (['knn', 'pickleFile'], {}), '(knn, pickleFile)\n', (6659, 6676), False, 'import pickle\n'), ((6681, 6709), 'pickle.dump', 'pickle.dump', (['svc', 'pickleFile'], {}), '(svc, pickleFile)\n', (6692, 6709), False, 'import pickle\n'), ((6807, 6845), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (6819, 6845), False, 'import cv2\n'), ((6860, 6909), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb'], {'model': '"""hog"""'}), "(rgb, model='hog')\n", (6891, 6909), False, 'import face_recognition\n'), ((6970, 7013), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb', 'boxes'], {}), '(rgb, boxes)\n', (7001, 7013), False, 'import face_recognition\n'), ((7811, 7865), 'cv2.imshow', 'cv2.imshow', (['"""Press any key to close this image"""', 'image'], {}), "('Press any key to close this image', image)\n", (7821, 7865), False, 'import cv2\n'), ((7870, 7884), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7881, 7884), False, 'import cv2\n'), ((7889, 7912), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7910, 7912), False, 'import cv2\n'), ((7963, 8001), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (7975, 8001), False, 'import cv2\n'), ((8016, 8065), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb'], {'model': '"""hog"""'}), "(rgb, model='hog')\n", (8047, 8065), False, 'import face_recognition\n'), ((8126, 8169), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb', 'boxes'], {}), '(rgb, boxes)\n', (8157, 8169), False, 'import face_recognition\n'), ((1509, 1521), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1516, 1521), False, 'from urllib.request import urlopen\n'), ((1814, 1838), 'os.path.exists', 'os.path.exists', (['dst_file'], {}), '(dst_file)\n', (1828, 1838), False, 'import os\n'), ((1848, 1868), 'os.makedirs', 'os.makedirs', (['dst_dir'], {}), '(dst_dir)\n', (1859, 1868), False, 'import os\n'), ((2234, 2279), 'os.path.join', 'os.path.join', (['self.base', 'self.name', 'self.file'], {}), '(self.base, self.name, self.file)\n', (2246, 2279), False, 'import os\n'), ((3105, 3130), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3120, 3130), False, 'import os\n'), ((3327, 3386), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models', 'embeddings.pickle']"], {}), "([base_dir, 'models', 'embeddings.pickle'])\n", (3343, 3386), False, 'import os\n'), ((3538, 3561), 'pickle.load', 'pickle.load', (['pickleFile'], {}), '(pickleFile)\n', (3549, 3561), False, 'import pickle\n'), ((3588, 3611), 'pickle.load', 'pickle.load', (['pickleFile'], {}), '(pickleFile)\n', (3599, 3611), False, 'import pickle\n'), ((4561, 4594), 'pickle.dump', 'pickle.dump', (['embedded', 'pickleFile'], {}), '(embedded, pickleFile)\n', (4572, 4594), False, 'import pickle\n'), ((4603, 4643), 'pickle.dump', 'pickle.dump', (['correct_indexes', 'pickleFile'], {}), '(correct_indexes, pickleFile)\n', (4614, 4643), False, 'import pickle\n'), ((4822, 4840), 'align.align_image', 'align_image', (['image'], {}), '(image)\n', (4833, 4840), False, 'from align import align_image\n'), ((5401, 5426), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5416, 5426), False, 'import os\n'), ((5616, 5671), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models', 'models.pickle']"], {}), "([base_dir, 'models', 'models.pickle'])\n", (5632, 5671), False, 'import os\n'), ((5949, 5977), 'numpy.arange', 'np.arange', (['metadata.shape[0]'], {}), '(metadata.shape[0])\n', (5958, 5977), True, 'import numpy as np\n'), ((7417, 7469), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb', '[boxes[index]]'], {}), '(rgb, [boxes[index]])\n', (7448, 7469), False, 'import face_recognition\n'), ((7531, 7547), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (7540, 7547), True, 'import numpy as np\n'), ((8573, 8625), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb', '[boxes[index]]'], {}), '(rgb, [boxes[index]])\n', (8604, 8625), False, 'import face_recognition\n'), ((8687, 8703), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (8696, 8703), True, 'import numpy as np\n'), ((9021, 9087), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (9034, 9087), False, 'import cv2\n'), ((9203, 9290), 'cv2.putText', 'cv2.putText', (['image', 'label', '(left, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.6)', '(0, 255, 0)', '(2)'], {}), '(image, label, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255,\n 0), 2)\n', (9214, 9290), False, 'import cv2\n'), ((2384, 2405), 'os.path.join', 'os.path.join', (['path', 'i'], {}), '(path, i)\n', (2396, 2405), False, 'import os\n'), ((2490, 2509), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2506, 2509), False, 'import os\n'), ((3199, 3237), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models']"], {}), "([base_dir, 'models'])\n", (3215, 3237), False, 'import os\n'), ((3261, 3299), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models']"], {}), "([base_dir, 'models'])\n", (3277, 3299), False, 'import os\n'), ((3881, 3897), 'align.align_image', 'align_image', (['img'], {}), '(img)\n', (3892, 3897), False, 'from align import align_image\n'), ((5025, 5052), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (5039, 5052), True, 'import numpy as np\n'), ((5492, 5530), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models']"], {}), "([base_dir, 'models'])\n", (5508, 5530), False, 'import os\n'), ((5554, 5592), 'os.path.sep.join', 'os.path.sep.join', (["[base_dir, 'models']"], {}), "([base_dir, 'models'])\n", (5570, 5592), False, 'import os\n'), ((7212, 7260), 'os.path.sep.join', 'os.path.sep.join', (["['model', 'recognizer.pickle']"], {}), "(['model', 'recognizer.pickle'])\n", (7228, 7260), False, 'import os\n'), ((7303, 7343), 'os.path.sep.join', 'os.path.sep.join', (["['model', 'le.pickle']"], {}), "(['model', 'le.pickle'])\n", (7319, 7343), False, 'import os\n'), ((8368, 8416), 'os.path.sep.join', 'os.path.sep.join', (["['model', 'recognizer.pickle']"], {}), "(['model', 'recognizer.pickle'])\n", (8384, 8416), False, 'import os\n'), ((8459, 8499), 'os.path.sep.join', 'os.path.sep.join', (["['model', 'le.pickle']"], {}), "(['model', 'le.pickle'])\n", (8475, 8499), False, 'import os\n'), ((4114, 4141), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4128, 4141), True, 'import numpy as np\n')] |
from mir import DataEntry
from mir import io
from extractors.midi_utilities import get_valid_channel_count,is_percussive_channel,MidiBeatExtractor
from extractors.rule_based_channel_reweight import midi_to_thickness_and_bass_weights
from midi_chord import ChordRecognition
from chord_class import ChordClass
import numpy as np
from io_new.chordlab_io import ChordLabIO
from io_new.downbeat_io import DownbeatIO
def process_chord(entry, extra_division):
'''
Parameters
----------
entry: the song to be processed. Properties required:
entry.midi: the pretry midi object
entry.beat: extracted beat and downbeat
extra_division: extra divisions to each beat.
For chord recognition on beat-level, use extra_division=1
For chord recognition on half-beat-level, use extra_division=2
Returns
-------
Extracted chord sequence
'''
midi=entry.midi
beats=midi.get_beats()
if(extra_division>1):
beat_interp=np.linspace(beats[:-1],beats[1:],extra_division+1).T
last_beat=beat_interp[-1,-1]
beats=np.append(beat_interp[:,:-1].reshape((-1)),last_beat)
downbeats=midi.get_downbeats()
j=0
beat_pos=-2
beat=[]
for i in range(len(beats)):
if(j<len(downbeats) and beats[i]==downbeats[j]):
beat_pos=1
j+=1
else:
beat_pos=beat_pos+1
assert(beat_pos>0)
beat.append([beats[i],beat_pos])
rec=ChordRecognition(entry,ChordClass())
weights=midi_to_thickness_and_bass_weights(entry.midi)
rec.process_feature(weights)
chord=rec.decode()
return chord
def transcribe_cb1000_midi(midi_path,output_path):
'''
Perform chord recognition on a midi
:param midi_path: the path to the midi file
:param output_path: the path to the output file
'''
entry=DataEntry()
entry.append_file(midi_path,io.MidiIO,'midi')
entry.append_extractor(MidiBeatExtractor,'beat')
result=process_chord(entry,extra_division=2)
entry.append_data(result,ChordLabIO,'pred')
entry.save('pred',output_path)
if __name__ == '__main__':
import sys
if(len(sys.argv)!=2):
print('Usage: main.py midi_path')
exit(0)
output_path = "{}/chord_midi.txt".format(
"/".join(sys.argv[1].split("/")[:-1]))
transcribe_cb1000_midi(sys.argv[1],output_path)
| [
"mir.DataEntry",
"chord_class.ChordClass",
"extractors.rule_based_channel_reweight.midi_to_thickness_and_bass_weights",
"numpy.linspace"
] | [((1560, 1606), 'extractors.rule_based_channel_reweight.midi_to_thickness_and_bass_weights', 'midi_to_thickness_and_bass_weights', (['entry.midi'], {}), '(entry.midi)\n', (1594, 1606), False, 'from extractors.rule_based_channel_reweight import midi_to_thickness_and_bass_weights\n'), ((1909, 1920), 'mir.DataEntry', 'DataEntry', ([], {}), '()\n', (1918, 1920), False, 'from mir import DataEntry\n'), ((1533, 1545), 'chord_class.ChordClass', 'ChordClass', ([], {}), '()\n', (1543, 1545), False, 'from chord_class import ChordClass\n'), ((1015, 1069), 'numpy.linspace', 'np.linspace', (['beats[:-1]', 'beats[1:]', '(extra_division + 1)'], {}), '(beats[:-1], beats[1:], extra_division + 1)\n', (1026, 1069), True, 'import numpy as np\n')] |
from numpy.testing import TestCase, assert_equal, run_module_suite
from weave import ast_tools
class TestHarvestVariables(TestCase):
""" Not much testing going on here, but at least it is a flame test."""
def generic_check(self,expr,desired):
import parser
ast_list = parser.suite(expr).tolist()
actual = ast_tools.harvest_variables(ast_list)
assert_equal(actual,desired,expr)
def test_simple_expr(self):
# Convert simple expr to blitz
expr = "a[:1:2] = b[:1+i+2:]"
desired = ['a','b','i']
self.generic_check(expr,desired)
if __name__ == "__main__":
run_module_suite()
| [
"weave.ast_tools.harvest_variables",
"parser.suite",
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite"
] | [((638, 656), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (654, 656), False, 'from numpy.testing import TestCase, assert_equal, run_module_suite\n'), ((342, 379), 'weave.ast_tools.harvest_variables', 'ast_tools.harvest_variables', (['ast_list'], {}), '(ast_list)\n', (369, 379), False, 'from weave import ast_tools\n'), ((388, 423), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired', 'expr'], {}), '(actual, desired, expr)\n', (400, 423), False, 'from numpy.testing import TestCase, assert_equal, run_module_suite\n'), ((297, 315), 'parser.suite', 'parser.suite', (['expr'], {}), '(expr)\n', (309, 315), False, 'import parser\n')] |
# coding: utf-8
import interpreter
import brica1
import numpy as np
import logging
import logging.config
from config.log import APP_KEY
app_logger = logging.getLogger(APP_KEY)
class AgentService:
def __init__(self, config_file, feature_extractor):
self.feature_extractor = feature_extractor
self.nb = interpreter.NetworkBuilder()
f = open(config_file)
self.nb.load_file(f)
self.agents = {}
self.schedulers = {}
self.v1_components = {} # primary visual cortex
self.vvc_components = {} # visual what path
self.bg_components = {} # basal ganglia
self.ub_components = {} # Umataro box
self.fl_components = {} # frontal lobe
self.mo_components = {} # motor output
self.rb_components = {} # reward generator
def initialize(self, identifier):
agent_builder = interpreter.AgentBuilder()
# create agetns and schedulers
self.agents[identifier] = agent_builder.create_agent(self.nb)
modules = agent_builder.get_modules()
self.schedulers[identifier] = brica1.VirtualTimeScheduler(self.agents[identifier])
# set components
self.v1_components[identifier] = modules['WBAH2017WBRA.Isocortex#V1'].get_component(
'WBAH2017WBRA.Isocortex#V1')
self.vvc_components[identifier] = modules['WBAH2017WBRA.Isocortex#VVC'].get_component(
'WBAH2017WBRA.Isocortex#VVC')
self.bg_components[identifier] = modules['WBAH2017WBRA.BG'].get_component(
'WBAH2017WBRA.BG')
self.ub_components[identifier] = modules['WBAH2017WBRA.UB'].get_component(
'WBAH2017WBRA.UB')
self.fl_components[identifier] = modules['WBAH2017WBRA.Isocortex#FL'].get_component(
'WBAH2017WBRA.Isocortex#FL')
self.mo_components[identifier] = modules['WBAH2017WBRA.MO'].get_component(
'WBAH2017WBRA.MO')
self.rb_components[identifier] = modules['WBAH2017WBRA.RB'].get_component(
'WBAH2017WBRA.RB')
# set feature_extractor
self.vvc_components[identifier].set_model(self.feature_extractor)
# set interval of each components
self.vvc_components[identifier].interval = 1000
self.bg_components[identifier].interval = 1000
self.ub_components[identifier].interval = 1000
self.mo_components[identifier].interval = 1000
self.fl_components[identifier].interval = 1000
# set offset
self.vvc_components[identifier].offset = 1000
self.bg_components[identifier].offset = 2000
self.fl_components[identifier].offset = 3000
self.ub_components[identifier].offset = 4000
self.mo_components[identifier].offset = 5000
# set sleep
self.vvc_components[identifier].sleep = 4000
self.bg_components[identifier].sleep = 4000
self.ub_components[identifier].sleep = 4000
self.mo_components[identifier].sleep = 4000
self.fl_components[identifier].sleep = 4000
self.schedulers[identifier].update()
def create(self, reward, feature, identifier):
if identifier not in self.agents:
self.initialize(identifier)
# agent start
self.bg_components[identifier].get_in_port('Isocortex#VVC-BG-Input').buffer = feature
action = self.bg_components[identifier].start()
self.fl_components[identifier].last_action = action
self.ub_components[identifier].last_state = feature
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug('feature: {}'.format(feature))
return action
def step(self, reward, observation, identifier):
if identifier not in self.agents:
return str(-1)
self.v1_components[identifier].get_out_port('Isocortex#V1-Isocortex#VVC-Output').buffer = observation
self.rb_components[identifier].get_out_port('RB-Isocortex#FL-Output').buffer = np.array([reward])
self.rb_components[identifier].get_out_port('RB-BG-Output').buffer = np.array([reward])
self.schedulers[identifier].step(5000)
action = self.mo_components[identifier].get_in_port('Isocortex#FL-MO-Input').buffer[0]
return action
def reset(self, reward, identifier):
if identifier not in self.agents:
return str(-1)
action = self.mo_components[identifier].get_in_port('Isocortex#FL-MO-Input').buffer[0]
self.ub_components[identifier].end(action, reward)
self.ub_components[identifier].output(self.ub_components[identifier].last_output_time)
self.bg_components[identifier].input(self.bg_components[identifier].last_input_time)
self.bg_components[identifier].end(reward)
return action
| [
"brica1.VirtualTimeScheduler",
"interpreter.NetworkBuilder",
"numpy.array",
"interpreter.AgentBuilder",
"logging.getLogger"
] | [((152, 178), 'logging.getLogger', 'logging.getLogger', (['APP_KEY'], {}), '(APP_KEY)\n', (169, 178), False, 'import logging\n'), ((326, 354), 'interpreter.NetworkBuilder', 'interpreter.NetworkBuilder', ([], {}), '()\n', (352, 354), False, 'import interpreter\n'), ((905, 931), 'interpreter.AgentBuilder', 'interpreter.AgentBuilder', ([], {}), '()\n', (929, 931), False, 'import interpreter\n'), ((1125, 1177), 'brica1.VirtualTimeScheduler', 'brica1.VirtualTimeScheduler', (['self.agents[identifier]'], {}), '(self.agents[identifier])\n', (1152, 1177), False, 'import brica1\n'), ((4353, 4371), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (4361, 4371), True, 'import numpy as np\n'), ((4449, 4467), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (4457, 4467), True, 'import numpy as np\n')] |
import random
import time
from unittest import TestCase
import gym
import numpy as np
import torch
from torch.distributions import Categorical
from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, \
CategoricalActionDistribution, sample_actions_log_probs
from utils.timing import Timing
from utils.utils import log
class TestActionDistributions(TestCase):
batch_size = 128 # whatever
def test_simple_distribution(self):
simple_action_space = gym.spaces.Discrete(3)
simple_num_logits = calc_num_logits(simple_action_space)
self.assertEqual(simple_num_logits, simple_action_space.n)
simple_logits = torch.rand(self.batch_size, simple_num_logits)
simple_action_distribution = get_action_distribution(simple_action_space, simple_logits)
simple_actions = simple_action_distribution.sample()
self.assertEqual(list(simple_actions.shape), [self.batch_size])
self.assertTrue(all(0 <= a < simple_action_space.n for a in simple_actions))
def test_gumbel_trick(self):
"""
We use a Gumbel noise which seems to be faster compared to using pytorch multinomial.
Here we test that those are actually equivalent.
"""
timing = Timing()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
with torch.no_grad():
action_space = gym.spaces.Discrete(8)
num_logits = calc_num_logits(action_space)
device_type = 'cpu'
device = torch.device(device_type)
logits = torch.rand(self.batch_size, num_logits, device=device) * 10.0 - 5.0
if device_type == 'cuda':
torch.cuda.synchronize(device)
count_gumbel, count_multinomial = np.zeros([action_space.n]), np.zeros([action_space.n])
# estimate probability mass by actually sampling both ways
num_samples = 20000
action_distribution = get_action_distribution(action_space, logits)
sample_actions_log_probs(action_distribution)
action_distribution.sample_gumbel()
with timing.add_time('gumbel'):
for i in range(num_samples):
action_distribution = get_action_distribution(action_space, logits)
samples_gumbel = action_distribution.sample_gumbel()
count_gumbel[samples_gumbel[0]] += 1
action_distribution = get_action_distribution(action_space, logits)
action_distribution.sample()
with timing.add_time('multinomial'):
for i in range(num_samples):
action_distribution = get_action_distribution(action_space, logits)
samples_multinomial = action_distribution.sample()
count_multinomial[samples_multinomial[0]] += 1
estimated_probs_gumbel = count_gumbel / float(num_samples)
estimated_probs_multinomial = count_multinomial / float(num_samples)
log.debug('Gumbel estimated probs: %r', estimated_probs_gumbel)
log.debug('Multinomial estimated probs: %r', estimated_probs_multinomial)
log.debug('Sampling timing: %s', timing)
time.sleep(0.1) # to finish logging
def test_tuple_distribution(self):
num_spaces = random.randint(1, 4)
spaces = [gym.spaces.Discrete(random.randint(2, 5)) for _ in range(num_spaces)]
action_space = gym.spaces.Tuple(spaces)
num_logits = calc_num_logits(action_space)
logits = torch.rand(self.batch_size, num_logits)
self.assertEqual(num_logits, sum(s.n for s in action_space.spaces))
action_distribution = get_action_distribution(action_space, logits)
tuple_actions = action_distribution.sample()
self.assertEqual(list(tuple_actions.shape), [self.batch_size, num_spaces])
log_probs = action_distribution.log_prob(tuple_actions)
self.assertEqual(list(log_probs.shape), [self.batch_size])
entropy = action_distribution.entropy()
self.assertEqual(list(entropy.shape), [self.batch_size])
def test_tuple_sanity_check(self):
num_spaces, num_actions = 3, 2
simple_space = gym.spaces.Discrete(num_actions)
spaces = [simple_space for _ in range(num_spaces)]
tuple_space = gym.spaces.Tuple(spaces)
self.assertTrue(calc_num_logits(tuple_space), num_spaces * num_actions)
simple_logits = torch.zeros(1, num_actions)
tuple_logits = torch.zeros(1, calc_num_logits(tuple_space))
simple_distr = get_action_distribution(simple_space, simple_logits)
tuple_distr = get_action_distribution(tuple_space, tuple_logits)
tuple_entropy = tuple_distr.entropy()
self.assertEqual(tuple_entropy, simple_distr.entropy() * num_spaces)
simple_logprob = simple_distr.log_prob(torch.ones(1))
tuple_logprob = tuple_distr.log_prob(torch.ones(1, num_spaces))
self.assertEqual(tuple_logprob, simple_logprob * num_spaces)
def test_sanity(self):
raw_logits = torch.tensor([[0.0, 1.0, 2.0]])
action_space = gym.spaces.Discrete(3)
categorical = get_action_distribution(action_space, raw_logits)
torch_categorical = Categorical(logits=raw_logits)
torch_categorical_log_probs = torch_categorical.log_prob(torch.tensor([0, 1, 2]))
entropy = categorical.entropy()
torch_entropy = torch_categorical.entropy()
self.assertTrue(np.allclose(entropy.numpy(), torch_entropy))
log_probs = [categorical.log_prob(torch.tensor([action])) for action in [0, 1, 2]]
log_probs = torch.cat(log_probs)
self.assertTrue(np.allclose(torch_categorical_log_probs.numpy(), log_probs.numpy()))
probs = torch.exp(log_probs)
expected_probs = np.array([0.09003057317038046, 0.24472847105479764, 0.6652409557748219])
self.assertTrue(np.allclose(probs.numpy(), expected_probs))
tuple_space = gym.spaces.Tuple([action_space, action_space])
raw_logits = torch.tensor([[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]])
tuple_distr = get_action_distribution(tuple_space, raw_logits)
for a1 in [0, 1, 2]:
for a2 in [0, 1, 2]:
action = torch.tensor([[a1, a2]])
log_prob = tuple_distr.log_prob(action)
probability = torch.exp(log_prob)[0].item()
self.assertAlmostEqual(probability, expected_probs[a1] * expected_probs[a2], delta=1e-6)
| [
"torch.cuda.synchronize",
"torch.distributions.Categorical",
"gym.spaces.Discrete",
"torch.cat",
"algorithms.utils.action_distributions.sample_actions_log_probs",
"torch.device",
"torch.no_grad",
"algorithms.utils.action_distributions.get_action_distribution",
"torch.ones",
"random.randint",
"to... | [((506, 528), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(3)'], {}), '(3)\n', (525, 528), False, 'import gym\n'), ((557, 593), 'algorithms.utils.action_distributions.calc_num_logits', 'calc_num_logits', (['simple_action_space'], {}), '(simple_action_space)\n', (572, 593), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((686, 732), 'torch.rand', 'torch.rand', (['self.batch_size', 'simple_num_logits'], {}), '(self.batch_size, simple_num_logits)\n', (696, 732), False, 'import torch\n'), ((770, 829), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['simple_action_space', 'simple_logits'], {}), '(simple_action_space, simple_logits)\n', (793, 829), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((1276, 1284), 'utils.timing.Timing', 'Timing', ([], {}), '()\n', (1282, 1284), False, 'from utils.timing import Timing\n'), ((3389, 3409), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (3403, 3409), False, 'import random\n'), ((3521, 3545), 'gym.spaces.Tuple', 'gym.spaces.Tuple', (['spaces'], {}), '(spaces)\n', (3537, 3545), False, 'import gym\n'), ((3568, 3597), 'algorithms.utils.action_distributions.calc_num_logits', 'calc_num_logits', (['action_space'], {}), '(action_space)\n', (3583, 3597), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((3615, 3654), 'torch.rand', 'torch.rand', (['self.batch_size', 'num_logits'], {}), '(self.batch_size, num_logits)\n', (3625, 3654), False, 'import torch\n'), ((3763, 3808), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'logits'], {}), '(action_space, logits)\n', (3786, 3808), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((4294, 4326), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['num_actions'], {}), '(num_actions)\n', (4313, 4326), False, 'import gym\n'), ((4408, 4432), 'gym.spaces.Tuple', 'gym.spaces.Tuple', (['spaces'], {}), '(spaces)\n', (4424, 4432), False, 'import gym\n'), ((4539, 4566), 'torch.zeros', 'torch.zeros', (['(1)', 'num_actions'], {}), '(1, num_actions)\n', (4550, 4566), False, 'import torch\n'), ((4659, 4711), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['simple_space', 'simple_logits'], {}), '(simple_space, simple_logits)\n', (4682, 4711), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((4734, 4784), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['tuple_space', 'tuple_logits'], {}), '(tuple_space, tuple_logits)\n', (4757, 4784), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((5162, 5193), 'torch.tensor', 'torch.tensor', (['[[0.0, 1.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0]])\n', (5174, 5193), False, 'import torch\n'), ((5217, 5239), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(3)'], {}), '(3)\n', (5236, 5239), False, 'import gym\n'), ((5262, 5311), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'raw_logits'], {}), '(action_space, raw_logits)\n', (5285, 5311), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((5341, 5371), 'torch.distributions.Categorical', 'Categorical', ([], {'logits': 'raw_logits'}), '(logits=raw_logits)\n', (5352, 5371), False, 'from torch.distributions import Categorical\n'), ((5736, 5756), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (5745, 5756), False, 'import torch\n'), ((5868, 5888), 'torch.exp', 'torch.exp', (['log_probs'], {}), '(log_probs)\n', (5877, 5888), False, 'import torch\n'), ((5915, 5987), 'numpy.array', 'np.array', (['[0.09003057317038046, 0.24472847105479764, 0.6652409557748219]'], {}), '([0.09003057317038046, 0.24472847105479764, 0.6652409557748219])\n', (5923, 5987), True, 'import numpy as np\n'), ((6080, 6126), 'gym.spaces.Tuple', 'gym.spaces.Tuple', (['[action_space, action_space]'], {}), '([action_space, action_space])\n', (6096, 6126), False, 'import gym\n'), ((6148, 6194), 'torch.tensor', 'torch.tensor', (['[[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]])\n', (6160, 6194), False, 'import torch\n'), ((6217, 6265), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['tuple_space', 'raw_logits'], {}), '(tuple_space, raw_logits)\n', (6240, 6265), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((1390, 1405), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1403, 1405), False, 'import torch\n'), ((1434, 1456), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(8)'], {}), '(8)\n', (1453, 1456), False, 'import gym\n'), ((1482, 1511), 'algorithms.utils.action_distributions.calc_num_logits', 'calc_num_logits', (['action_space'], {}), '(action_space)\n', (1497, 1511), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((1565, 1590), 'torch.device', 'torch.device', (['device_type'], {}), '(device_type)\n', (1577, 1590), False, 'import torch\n'), ((2007, 2052), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'logits'], {}), '(action_space, logits)\n', (2030, 2052), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((2065, 2110), 'algorithms.utils.action_distributions.sample_actions_log_probs', 'sample_actions_log_probs', (['action_distribution'], {}), '(action_distribution)\n', (2089, 2110), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((2502, 2547), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'logits'], {}), '(action_space, logits)\n', (2525, 2547), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((3076, 3139), 'utils.utils.log.debug', 'log.debug', (['"""Gumbel estimated probs: %r"""', 'estimated_probs_gumbel'], {}), "('Gumbel estimated probs: %r', estimated_probs_gumbel)\n", (3085, 3139), False, 'from utils.utils import log\n'), ((3152, 3225), 'utils.utils.log.debug', 'log.debug', (['"""Multinomial estimated probs: %r"""', 'estimated_probs_multinomial'], {}), "('Multinomial estimated probs: %r', estimated_probs_multinomial)\n", (3161, 3225), False, 'from utils.utils import log\n'), ((3238, 3278), 'utils.utils.log.debug', 'log.debug', (['"""Sampling timing: %s"""', 'timing'], {}), "('Sampling timing: %s', timing)\n", (3247, 3278), False, 'from utils.utils import log\n'), ((3291, 3306), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3301, 3306), False, 'import time\n'), ((4458, 4486), 'algorithms.utils.action_distributions.calc_num_logits', 'calc_num_logits', (['tuple_space'], {}), '(tuple_space)\n', (4473, 4486), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((4605, 4633), 'algorithms.utils.action_distributions.calc_num_logits', 'calc_num_logits', (['tuple_space'], {}), '(tuple_space)\n', (4620, 4633), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((4957, 4970), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (4967, 4970), False, 'import torch\n'), ((5017, 5042), 'torch.ones', 'torch.ones', (['(1)', 'num_spaces'], {}), '(1, num_spaces)\n', (5027, 5042), False, 'import torch\n'), ((5437, 5460), 'torch.tensor', 'torch.tensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (5449, 5460), False, 'import torch\n'), ((1735, 1765), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['device'], {}), '(device)\n', (1757, 1765), False, 'import torch\n'), ((1813, 1839), 'numpy.zeros', 'np.zeros', (['[action_space.n]'], {}), '([action_space.n])\n', (1821, 1839), True, 'import numpy as np\n'), ((1841, 1867), 'numpy.zeros', 'np.zeros', (['[action_space.n]'], {}), '([action_space.n])\n', (1849, 1867), True, 'import numpy as np\n'), ((3448, 3468), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (3462, 3468), False, 'import random\n'), ((5667, 5689), 'torch.tensor', 'torch.tensor', (['[action]'], {}), '([action])\n', (5679, 5689), False, 'import torch\n'), ((6354, 6378), 'torch.tensor', 'torch.tensor', (['[[a1, a2]]'], {}), '([[a1, a2]])\n', (6366, 6378), False, 'import torch\n'), ((1612, 1666), 'torch.rand', 'torch.rand', (['self.batch_size', 'num_logits'], {'device': 'device'}), '(self.batch_size, num_logits, device=device)\n', (1622, 1666), False, 'import torch\n'), ((2291, 2336), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'logits'], {}), '(action_space, logits)\n', (2314, 2336), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((2726, 2771), 'algorithms.utils.action_distributions.get_action_distribution', 'get_action_distribution', (['action_space', 'logits'], {}), '(action_space, logits)\n', (2749, 2771), False, 'from algorithms.utils.action_distributions import get_action_distribution, calc_num_logits, CategoricalActionDistribution, sample_actions_log_probs\n'), ((6465, 6484), 'torch.exp', 'torch.exp', (['log_prob'], {}), '(log_prob)\n', (6474, 6484), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# ## Import Packages
# In[19]:
import torch, pytz
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
import os, csv
# For plotting
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, xlabel
import streamlit as st
from PIL import Image
# ## Model Architecture
# In[20]:
class RMSELoss(nn.Module):
def __init__(self, eps=1e-6):
super().__init__()
self.mse = nn.MSELoss()
self.eps = eps
def forward(self,yhat,y):
loss = torch.sqrt(self.mse(yhat,y) + self.eps)
return loss
class DNN(nn.Module):
def __init__(self, config):
super(DNN, self).__init__()
self.config = config
dropout_rate = self.config['dropout_rate']
num_features = self.config['num_features']
num_layers = self.config['num_layers']
hidden_size = self.config['hidden_size']
self.linear1 = nn.Linear(num_features-1, hidden_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p = dropout_rate)
self.linear2 = []
for i in range(self.config['num_layers']):
self.linear2.append(nn.Linear(hidden_size, hidden_size))
self.linear2.append(self.dropout)
self.linear2.append(self.relu)
self.net = nn.Sequential(*self.linear2)
self.linear3 = nn.Linear(hidden_size, 1)
def forward(self, x):
# -> batch, num_features-1
x = self.linear1(x) # -> batch, hidden_size
x = self.dropout(x)
x = self.relu(x)
x = self.net(x)
outputs = self.linear3(x) # -> batch, 1
outputs = torch.squeeze(outputs)
return outputs
def cal_loss(self, pred, labels):
if self.config['loss_function'] == 'MSE':
self.criterion = nn.MSELoss()
elif self.config['loss_function'] == 'RMSE':
self.criterion = RMSELoss()
else:
print('This loss function doesn\'t exist!')
raise Exception('WrongLossFunctionError')
return self.criterion(pred, labels)
# ## Data Preprocessing
# In[21]:
class BicepCurlDataset(Dataset):
def __init__(self, file, mode, config):
'''num_aug = number of augmentation for each trial
len_seg = length of segmentation for each sample
num_feature = number of features including data and label'''
self.mode = mode
num_feature = config['num_features']
self.avg = config['mean']
self.std = config['std']
#Check if path is correct
if self.mode not in ['Test','Val', 'Train']:
print('This mode doesn\'t exist, try \'Train\', \'Val\', or \'Test\'')
raise Exception('WrongModeError')
if self.mode in ['Train', 'Val']:
xy = file[:, 1:]
for j in range(num_feature-1):
xy[:, j] = (xy[:,j]-config['mean'][0, j])/config['std'][0, j]
self.xdata = xy[:, :num_feature-1]
self.ydata = xy[:, 3]
self.xdata = torch.from_numpy(self.xdata).float()
self.ydata = torch.from_numpy(self.ydata).float()
self.dim = self.xdata.shape[1]
self.length = self.xdata.shape[0]
#Here, dim does not include label
else:
#Allocate data and label
self.xdata = []
self.ydata = []
xy = file[:, 1:]
times_plot = np.linspace(0, xy.shape[0]/150, xy.shape[0])
plt.plot(times_plot, xy[:, 0]*100)
plt.title('Percentage Change in Voltage Signals from Forearm')
plt.xlabel('Time [s]')
plt.ylabel('Percentage Change in Voltage [%]')
plt.ioff()
plt.savefig('front_arm_data.png')
plt.close()
plt.plot(times_plot, xy[:, 1]*100)
plt.title('Percentage Change in Voltage Signals from Bicep')
plt.xlabel('Time [s]')
plt.ylabel('Percentage Change in Voltage [%]')
plt.ioff()
plt.savefig('bicep_data.png')
plt.close()
plt.plot(times_plot, xy[:, 3])
plt.title('Elbow Angle from Mocap')
plt.xlabel('Time [s]')
plt.ylabel('Elbow Angle [$^\circ$]')
plt.ioff()
plt.savefig('angle_data.png')
plt.close()
for j in range(num_feature-1):
xy[:, j] = (xy[:,j]-config['mean'][0, j])/config['std'][0, j]
self.xdata.append(torch.from_numpy(xy[:, :num_feature-1]).float())
self.ydata.append(xy[:, 3])
self.dim = self.xdata[0].shape[1]
#Here, dim does not include label
self.length = len(self.xdata)
print('Finished reading the {} set of BicepCurl Dataset ({} samples found, each dim = {})'
.format(mode, self.length, self.dim))
def __getitem__(self, index):
#if self.mode in ['Train', 'Val']:
# For training
# return self.xdata[index], self.ydata[index]
#else:
# For testing (no target)
# return self.xdata[index]
return self.xdata[index], self.ydata[index]
def __len__(self):
# Returns the size of the dataset
return self.length
# ## Dataset and DataLoader
# In[22]:
def prep_dataloader(file, mode, batch_size, n_jobs, config):
''' Generates a dataset, then is put into a dataloader. '''
dataset = BicepCurlDataset(file, mode=mode, config = config) # Construct dataset
dataloader = DataLoader(
dataset, batch_size,
shuffle=(mode == 'Train'), drop_last=False,
num_workers=n_jobs, pin_memory=True) # Construct dataloader
return dataloader
# ## Load Model
# In[23]:
def Reconstruct(file):
print('Reconstruct...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = {
'n_epochs': 100, # maximum number of epochs
'batch_size': 32, # mini-batch size for dataloader
'optimizer': 'Adam', # optimization algorithm (optimizer in torch.optim)
'optim_hparas': { # hyper-parameters for the optimizer
'lr': 0.001,
'weight_decay': 10**(-4)
},
'early_stop': 20, # early stopping epochs (the number epochs since your model's last improvement)
'num_features': 3,
'loss_function': 'RMSE',
'lr_scheduler': 'ExponentialLR',
'lr_scheduler_hparas':{
'gamma': 0.9,
},
'hidden_size': 256,
'num_layers': 2,
'dropout_rate': 0.2,
'mean': np.array([[-0.11826, 0.46044, 0. ]]),
'std': np.array([[0.17437, 0.26118, 0. ]])
#'mean': np.array([[0.18856, 0.25785, 2.1923]]),
#'std': np.array([[0.19122, 0.15088, 1.4071]])
}
test_set = prep_dataloader(file, mode = 'Test', batch_size=1, n_jobs = 0, config=config)
model = DNN(config).to(device)
ckpt = torch.load('/app/human_digital_twin/Deployment/model_front_arm.pth', map_location='cpu') # Load the best model
model.load_state_dict(ckpt)
preds, targets = test(test_set, model, device)
save_pred(preds, targets) # save prediction file to pred.csv
# ## Testing
# In[24]:
def test(tt_set, model, device):
model.eval() # set model to evalutation mode
preds = []
targets = []
for x, y in tt_set: # iterate through the dataloader
x = x.to(device) # move data to device (cpu/cuda)
with torch.no_grad(): # disable gradient calculation
pred = model(x) # forward pass (compute output)
preds.append(pred.detach().cpu().numpy()) # collect prediction
targets.append(y)
#preds = torch.cat(preds, dim=0).numpy() # concatenate all predictions and convert to a numpy array
return preds, targets
def save_pred(preds, targets):
print('Saving results...')
for index, i in enumerate(preds):
with open('results.csv', 'w', newline='') as fp:
writer = csv.writer(fp)
writer.writerow(['TimeId', 'Elbow Angle (preds)', 'Elbow Angle (targets)'])
for j in range(i.shape[0]):
writer.writerow([j, i[j], targets[index][0, j].detach().cpu().item()])
for index, i in enumerate(preds):
with open('results.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
preds_plot = []
targets_plot = []
length = 0
for index2, row in enumerate(reader):
if index2 == 0:
continue
preds_plot.append(float(row[1]))
targets_plot.append(float(row[2]))
length+=1
times_plot = np.linspace(0, length/150, length)
plt.plot(times_plot, preds_plot, c='tab:red', label='preds')
plt.plot(times_plot, targets_plot, c='tab:cyan', label='targets')
plt.xlabel('Time [s]')
plt.ylabel('Elbow Angle [$^\circ$]')
plt.title('Reconstruction of Elbow Angle')
plt.legend()
plt.ioff()
plt.savefig('plot.png')
plt.close()
| [
"torch.nn.Dropout",
"matplotlib.pyplot.title",
"csv.reader",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"torch.load",
"torch.squeeze",
"numpy.linspace",
"torch.nn.Linear",
"csv.writer",
"matplotlib.pyplot.legend",
"torch.cuda.is_available... | [((5323, 5437), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': "(mode == 'Train')", 'drop_last': '(False)', 'num_workers': 'n_jobs', 'pin_memory': '(True)'}), "(dataset, batch_size, shuffle=mode == 'Train', drop_last=False,\n num_workers=n_jobs, pin_memory=True)\n", (5333, 5437), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6865, 6957), 'torch.load', 'torch.load', (['"""/app/human_digital_twin/Deployment/model_front_arm.pth"""'], {'map_location': '"""cpu"""'}), "('/app/human_digital_twin/Deployment/model_front_arm.pth',\n map_location='cpu')\n", (6875, 6957), False, 'import torch, pytz\n'), ((480, 492), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (490, 492), True, 'import torch.nn as nn\n'), ((952, 992), 'torch.nn.Linear', 'nn.Linear', (['(num_features - 1)', 'hidden_size'], {}), '(num_features - 1, hidden_size)\n', (961, 992), True, 'import torch.nn as nn\n'), ((1007, 1016), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1014, 1016), True, 'import torch.nn as nn\n'), ((1036, 1062), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_rate'}), '(p=dropout_rate)\n', (1046, 1062), True, 'import torch.nn as nn\n'), ((1295, 1323), 'torch.nn.Sequential', 'nn.Sequential', (['*self.linear2'], {}), '(*self.linear2)\n', (1308, 1323), True, 'import torch.nn as nn\n'), ((1343, 1368), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (1352, 1368), True, 'import torch.nn as nn\n'), ((1620, 1642), 'torch.squeeze', 'torch.squeeze', (['outputs'], {}), '(outputs)\n', (1633, 1642), False, 'import torch, pytz\n'), ((6506, 6542), 'numpy.array', 'np.array', (['[[-0.11826, 0.46044, 0.0]]'], {}), '([[-0.11826, 0.46044, 0.0]])\n', (6514, 6542), True, 'import numpy as np\n'), ((6565, 6600), 'numpy.array', 'np.array', (['[[0.17437, 0.26118, 0.0]]'], {}), '([[0.17437, 0.26118, 0.0]])\n', (6573, 6600), True, 'import numpy as np\n'), ((1772, 1784), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1782, 1784), True, 'import torch.nn as nn\n'), ((3318, 3364), 'numpy.linspace', 'np.linspace', (['(0)', '(xy.shape[0] / 150)', 'xy.shape[0]'], {}), '(0, xy.shape[0] / 150, xy.shape[0])\n', (3329, 3364), True, 'import numpy as np\n'), ((3381, 3417), 'matplotlib.pyplot.plot', 'plt.plot', (['times_plot', '(xy[:, 0] * 100)'], {}), '(times_plot, xy[:, 0] * 100)\n', (3389, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3424, 3486), 'matplotlib.pyplot.title', 'plt.title', (['"""Percentage Change in Voltage Signals from Forearm"""'], {}), "('Percentage Change in Voltage Signals from Forearm')\n", (3433, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (3505, 3517), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage Change in Voltage [%]"""'], {}), "('Percentage Change in Voltage [%]')\n", (3536, 3572), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3591), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3589, 3591), True, 'import matplotlib.pyplot as plt\n'), ((3600, 3633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""front_arm_data.png"""'], {}), "('front_arm_data.png')\n", (3611, 3633), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3653), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3651, 3653), True, 'import matplotlib.pyplot as plt\n'), ((3672, 3708), 'matplotlib.pyplot.plot', 'plt.plot', (['times_plot', '(xy[:, 1] * 100)'], {}), '(times_plot, xy[:, 1] * 100)\n', (3680, 3708), True, 'import matplotlib.pyplot as plt\n'), ((3715, 3775), 'matplotlib.pyplot.title', 'plt.title', (['"""Percentage Change in Voltage Signals from Bicep"""'], {}), "('Percentage Change in Voltage Signals from Bicep')\n", (3724, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3784, 3806), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (3794, 3806), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3861), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage Change in Voltage [%]"""'], {}), "('Percentage Change in Voltage [%]')\n", (3825, 3861), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3880), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3878, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3889, 3918), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bicep_data.png"""'], {}), "('bicep_data.png')\n", (3900, 3918), True, 'import matplotlib.pyplot as plt\n'), ((3927, 3938), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3936, 3938), True, 'import matplotlib.pyplot as plt\n'), ((3957, 3987), 'matplotlib.pyplot.plot', 'plt.plot', (['times_plot', 'xy[:, 3]'], {}), '(times_plot, xy[:, 3])\n', (3965, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3996, 4031), 'matplotlib.pyplot.title', 'plt.title', (['"""Elbow Angle from Mocap"""'], {}), "('Elbow Angle from Mocap')\n", (4005, 4031), True, 'import matplotlib.pyplot as plt\n'), ((4040, 4062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (4050, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4108), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Elbow Angle [$^\\\\circ$]"""'], {}), "('Elbow Angle [$^\\\\circ$]')\n", (4081, 4108), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4126), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4124, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4164), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""angle_data.png"""'], {}), "('angle_data.png')\n", (4146, 4164), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4184), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4182, 4184), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5676), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5674, 5676), False, 'import torch, pytz\n'), ((7486, 7501), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7499, 7501), False, 'import torch, pytz\n'), ((8063, 8077), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (8073, 8077), False, 'import os, csv\n'), ((8426, 8460), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (8436, 8460), False, 'import os, csv\n'), ((8844, 8880), 'numpy.linspace', 'np.linspace', (['(0)', '(length / 150)', 'length'], {}), '(0, length / 150, length)\n', (8855, 8880), True, 'import numpy as np\n'), ((8908, 8968), 'matplotlib.pyplot.plot', 'plt.plot', (['times_plot', 'preds_plot'], {'c': '"""tab:red"""', 'label': '"""preds"""'}), "(times_plot, preds_plot, c='tab:red', label='preds')\n", (8916, 8968), True, 'import matplotlib.pyplot as plt\n'), ((8981, 9046), 'matplotlib.pyplot.plot', 'plt.plot', (['times_plot', 'targets_plot'], {'c': '"""tab:cyan"""', 'label': '"""targets"""'}), "(times_plot, targets_plot, c='tab:cyan', label='targets')\n", (8989, 9046), True, 'import matplotlib.pyplot as plt\n'), ((9059, 9081), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (9069, 9081), True, 'import matplotlib.pyplot as plt\n'), ((9094, 9131), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Elbow Angle [$^\\\\circ$]"""'], {}), "('Elbow Angle [$^\\\\circ$]')\n", (9104, 9131), True, 'import matplotlib.pyplot as plt\n'), ((9143, 9185), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstruction of Elbow Angle"""'], {}), "('Reconstruction of Elbow Angle')\n", (9152, 9185), True, 'import matplotlib.pyplot as plt\n'), ((9198, 9210), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9208, 9210), True, 'import matplotlib.pyplot as plt\n'), ((9236, 9246), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (9244, 9246), True, 'import matplotlib.pyplot as plt\n'), ((9259, 9282), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.png"""'], {}), "('plot.png')\n", (9270, 9282), True, 'import matplotlib.pyplot as plt\n'), ((9295, 9306), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9304, 9306), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1197), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1171, 1197), True, 'import torch.nn as nn\n'), ((2952, 2980), 'torch.from_numpy', 'torch.from_numpy', (['self.xdata'], {}), '(self.xdata)\n', (2968, 2980), False, 'import torch, pytz\n'), ((3010, 3038), 'torch.from_numpy', 'torch.from_numpy', (['self.ydata'], {}), '(self.ydata)\n', (3026, 3038), False, 'import torch, pytz\n'), ((4327, 4368), 'torch.from_numpy', 'torch.from_numpy', (['xy[:, :num_feature - 1]'], {}), '(xy[:, :num_feature - 1])\n', (4343, 4368), False, 'import torch, pytz\n')] |
import xarray as xr
import sys
from math import nan
import numpy as np
import pandas as pd
# thermal conductivities (W/m)
lamice=2.29 ; lamair=0.023
#expname=['SCAM_CLM5_CLM5F_01','SCAM_CLM5_CLM5F_02','SCAM_SNOWD_CLM5F_01','SCAM_SNOWD_CLM5F_02']
expname=['SCAM_SNOWD_CLM5F_02']
datpath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM_CLMINIT_60days/"
outpath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM_CLMINIT_60days/BULKSNOW/"
for iexp in expname:
print(iexp)
snowice = xr.open_dataset(datpath+"SNOWICE_"+iexp+".nc")
snowice = snowice.snowice
snowliq = xr.open_dataset(datpath+"SNOWLIQ_"+iexp+".nc")
snowliq = snowliq.snowliq
snowdp = xr.open_dataset(datpath+"SNOWDP_"+iexp+".nc")
snowdp = snowdp.snowdp
snottopl = xr.open_dataset(datpath+"SNOTTOPL_"+iexp+".nc")
snottopl = snottopl.snottopl
tsl = xr.open_dataset(datpath+"TSL_"+iexp+".nc")
tsl = tsl.tsl
# snow density
rhosnow = (snowice + snowliq)/snowdp
rhosnow = rhosnow.where(rhosnow != 0,nan)
rhosnow = rhosnow.rename('rhosnow')
# snow conductance
conductance = lamair + (7.75e-5*rhosnow + 1.105e-6*rhosnow**2.)*(lamice - lamair)
conductance = conductance.rename('conductance')
# temperature difference between top snow layer and soil
tdif = snottopl - tsl
tdif = tdif.rename('tdif')
# diagnosed bulk flux across snow
snowflux = -1.*conductance*tdif/snowdp
snowflux = snowflux.rename('snowflux')
# taper the winter season fluxes
taper = np.zeros([365])
minfull = 335-30 ; maxfull = 59+30 ; ntaper = 30
taper[ (np.arange(0,365,1) >= minfull ) | (np.arange(0,365,1) <= maxfull)] = 1
taper[minfull-ntaper:minfull] = 0.5*(1.-np.cos(np.pi*(np.arange(0,ntaper,1)+0.5)/float(ntaper)))
taper[maxfull+1:maxfull+1+ntaper] = 1 - 0.5*(1.-np.cos(np.pi*(np.arange(0,ntaper,1)+0.5)/float(ntaper)))
taper_3d = np.tile(taper,int(snowice.time.size/365)*3)
taper_3d = np.reshape(taper_3d,[3,snowice.time.size])
taper_3d = np.moveaxis(taper_3d,1,0)
snowflux = snowflux*taper_3d
snowflux = np.where(taper_3d == 0, taper_3d, snowflux)
# only calculate the snow flux if there's more than 5cm of snow
snowflux = np.where(np.array(snowdp) > 0.05, snowflux, 0)
snowflux = xr.DataArray(snowflux, coords=rhosnow.coords, name='snowflux')
rhosnow.to_netcdf(path=outpath+"BULKSNOW_"+iexp+".nc")
conductance.to_netcdf(path=outpath+"BULKSNOW_"+iexp+".nc", mode="a")
tdif.to_netcdf(path=outpath+"BULKSNOW_"+iexp+".nc", mode="a")
snowflux.to_netcdf(path=outpath+"BULKSNOW_"+iexp+".nc", mode="a")
| [
"numpy.moveaxis",
"xarray.open_dataset",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.reshape",
"xarray.DataArray",
"numpy.arange"
] | [((522, 574), 'xarray.open_dataset', 'xr.open_dataset', (["(datpath + 'SNOWICE_' + iexp + '.nc')"], {}), "(datpath + 'SNOWICE_' + iexp + '.nc')\n", (537, 574), True, 'import xarray as xr\n'), ((613, 665), 'xarray.open_dataset', 'xr.open_dataset', (["(datpath + 'SNOWLIQ_' + iexp + '.nc')"], {}), "(datpath + 'SNOWLIQ_' + iexp + '.nc')\n", (628, 665), True, 'import xarray as xr\n'), ((703, 754), 'xarray.open_dataset', 'xr.open_dataset', (["(datpath + 'SNOWDP_' + iexp + '.nc')"], {}), "(datpath + 'SNOWDP_' + iexp + '.nc')\n", (718, 754), True, 'import xarray as xr\n'), ((791, 844), 'xarray.open_dataset', 'xr.open_dataset', (["(datpath + 'SNOTTOPL_' + iexp + '.nc')"], {}), "(datpath + 'SNOTTOPL_' + iexp + '.nc')\n", (806, 844), True, 'import xarray as xr\n'), ((882, 930), 'xarray.open_dataset', 'xr.open_dataset', (["(datpath + 'TSL_' + iexp + '.nc')"], {}), "(datpath + 'TSL_' + iexp + '.nc')\n", (897, 930), True, 'import xarray as xr\n'), ((1547, 1562), 'numpy.zeros', 'np.zeros', (['[365]'], {}), '([365])\n', (1555, 1562), True, 'import numpy as np\n'), ((1984, 2028), 'numpy.reshape', 'np.reshape', (['taper_3d', '[3, snowice.time.size]'], {}), '(taper_3d, [3, snowice.time.size])\n', (1994, 2028), True, 'import numpy as np\n'), ((2042, 2069), 'numpy.moveaxis', 'np.moveaxis', (['taper_3d', '(1)', '(0)'], {}), '(taper_3d, 1, 0)\n', (2053, 2069), True, 'import numpy as np\n'), ((2117, 2160), 'numpy.where', 'np.where', (['(taper_3d == 0)', 'taper_3d', 'snowflux'], {}), '(taper_3d == 0, taper_3d, snowflux)\n', (2125, 2160), True, 'import numpy as np\n'), ((2306, 2368), 'xarray.DataArray', 'xr.DataArray', (['snowflux'], {'coords': 'rhosnow.coords', 'name': '"""snowflux"""'}), "(snowflux, coords=rhosnow.coords, name='snowflux')\n", (2318, 2368), True, 'import xarray as xr\n'), ((2253, 2269), 'numpy.array', 'np.array', (['snowdp'], {}), '(snowdp)\n', (2261, 2269), True, 'import numpy as np\n'), ((1628, 1648), 'numpy.arange', 'np.arange', (['(0)', '(365)', '(1)'], {}), '(0, 365, 1)\n', (1637, 1648), True, 'import numpy as np\n'), ((1663, 1683), 'numpy.arange', 'np.arange', (['(0)', '(365)', '(1)'], {}), '(0, 365, 1)\n', (1672, 1683), True, 'import numpy as np\n'), ((1757, 1780), 'numpy.arange', 'np.arange', (['(0)', 'ntaper', '(1)'], {}), '(0, ntaper, 1)\n', (1766, 1780), True, 'import numpy as np\n'), ((1866, 1889), 'numpy.arange', 'np.arange', (['(0)', 'ntaper', '(1)'], {}), '(0, ntaper, 1)\n', (1875, 1889), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 10:57:18 2019
@author: peter
"""
import numpy as np
import h5py
def curl(arr, xax, yax, zax, xaxis, yaxis, zaxis, verbose=False):
nx = xaxis.shape[0]
ny = yaxis.shape[0]
nz = zaxis.shape[0]
#create output array, initially fill with NaN
c = np.empty( arr.shape )
c.fill(np.nan)
if ny > 2 and nz > 2:
if verbose:
print('Compute curl x component')
dy = np.mean(np.gradient(yaxis))
dz = np.mean(np.gradient(zaxis))
c[..., 0] = np.gradient(arr[..., 2], dy, axis=yax) - np.gradient(arr[..., 1], dz, axis=zax)
if nx > 2 and nz > 2:
if verbose:
print('Compute curl y component')
dx = np.mean(np.gradient(xaxis))
dz = np.mean(np.gradient(zaxis))
c[..., 1] = np.gradient(arr[..., 0], dz, axis=zax) - np.gradient(arr[..., 2], dx, axis=xax)
if nx > 2 and ny > 2:
if verbose:
print('Compute curl z component')
dx = np.mean(np.gradient(xaxis))
dy = np.mean(np.gradient(yaxis))
c[..., 2] = np.gradient(arr[..., 1], dx, axis=xax) - np.gradient(arr[..., 0], dy, axis=yax)
return c
if __name__ == '__main__':
f = r'/Volumes/PVH_DATA/LAPD_Mar2018/RAW/run103_PL11B_full.hdf5'
with h5py.File(f, 'r') as sf:
arr = sf['data'][0:10, ...]
dimlabels = sf['data'].attrs['dimensions']
xaxis = sf['xaxes'][:]
yaxis = sf['yaxes'][:]
zaxis = sf['zaxes'][:]
xax = 1
yax = 2
zax= 3
c = curl(arr, xax, yax, zax, xaxis, yaxis, zaxis) | [
"numpy.empty",
"h5py.File",
"numpy.gradient"
] | [((338, 357), 'numpy.empty', 'np.empty', (['arr.shape'], {}), '(arr.shape)\n', (346, 357), True, 'import numpy as np\n'), ((1383, 1400), 'h5py.File', 'h5py.File', (['f', '"""r"""'], {}), "(f, 'r')\n", (1392, 1400), False, 'import h5py\n'), ((499, 517), 'numpy.gradient', 'np.gradient', (['yaxis'], {}), '(yaxis)\n', (510, 517), True, 'import numpy as np\n'), ((540, 558), 'numpy.gradient', 'np.gradient', (['zaxis'], {}), '(zaxis)\n', (551, 558), True, 'import numpy as np\n'), ((580, 618), 'numpy.gradient', 'np.gradient', (['arr[..., 2]', 'dy'], {'axis': 'yax'}), '(arr[..., 2], dy, axis=yax)\n', (591, 618), True, 'import numpy as np\n'), ((621, 659), 'numpy.gradient', 'np.gradient', (['arr[..., 1]', 'dz'], {'axis': 'zax'}), '(arr[..., 1], dz, axis=zax)\n', (632, 659), True, 'import numpy as np\n'), ((778, 796), 'numpy.gradient', 'np.gradient', (['xaxis'], {}), '(xaxis)\n', (789, 796), True, 'import numpy as np\n'), ((819, 837), 'numpy.gradient', 'np.gradient', (['zaxis'], {}), '(zaxis)\n', (830, 837), True, 'import numpy as np\n'), ((859, 897), 'numpy.gradient', 'np.gradient', (['arr[..., 0]', 'dz'], {'axis': 'zax'}), '(arr[..., 0], dz, axis=zax)\n', (870, 897), True, 'import numpy as np\n'), ((900, 938), 'numpy.gradient', 'np.gradient', (['arr[..., 2]', 'dx'], {'axis': 'xax'}), '(arr[..., 2], dx, axis=xax)\n', (911, 938), True, 'import numpy as np\n'), ((1062, 1080), 'numpy.gradient', 'np.gradient', (['xaxis'], {}), '(xaxis)\n', (1073, 1080), True, 'import numpy as np\n'), ((1103, 1121), 'numpy.gradient', 'np.gradient', (['yaxis'], {}), '(yaxis)\n', (1114, 1121), True, 'import numpy as np\n'), ((1143, 1181), 'numpy.gradient', 'np.gradient', (['arr[..., 1]', 'dx'], {'axis': 'xax'}), '(arr[..., 1], dx, axis=xax)\n', (1154, 1181), True, 'import numpy as np\n'), ((1184, 1222), 'numpy.gradient', 'np.gradient', (['arr[..., 0]', 'dy'], {'axis': 'yax'}), '(arr[..., 0], dy, axis=yax)\n', (1195, 1222), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 12:54:35 2018
@author: Eric
The purpose of this program is to split train and test files.
"""
import numpy as np
import pandas as pd
def load_test_docs(type_doc):
""" Load the document ids for the test files (returns a set). """
loc = "..//.//annotations//splits//" + type_doc + "_article_ids.txt"
id_ = np.loadtxt(loc, delimiter = " ", dtype = int)
return id_
def load_data(loc):
""" Load in the csv file """
df = pd.read_csv(loc, engine = "python", encoding = "utf-8")
df.fillna("")
df = np.asarray(df)
return df
def save_data(data, loc, header):
""" Save the data to the given location. """
df = pd.DataFrame(data=data, columns=header)
df.fillna("")
df.to_csv(loc, index = False, encoding = 'utf-8')
return None
def remove_test_file(loc, save_loc, x, header, type_doc):
"""
Remove the test rows from the data.
@param loc is the location of the file.
@param save_loc is where the file is saved
@param x is the location in a row where the document id is located.
@param header is the header to use when saving the file.
@param type_doc is one of train/test/val.
"""
df = load_data(loc)
test_ids = load_test_docs(type_doc)
df = list(filter(lambda row: not(row[x] in test_ids), df))
save_data(df, save_loc, header)
return None
def main(type_doc):
loc = '.././annotations/'
st_a = loc + 'annotations_'
st_p = loc + 'prompts_'
ha = ["UserID", "PromptID", "PMCID", "Valid Label", "Valid Reasoning",
"Label", "Annotations", "Label Code", "In Abstract",
"Evidence Start", "Evidence End"]
hp = ["PromptID", "PMCID", "Outcome", "Intervention", "Comparator"]
to_do = [[st_a + 'merged.csv', loc + type_doc + '_annotations_merged.csv', 2, ha],
[st_a + 'doctor_generated.csv', loc + type_doc + '_annotations_doctor_generated.csv', 2, ha],
[st_a + 'pilot_run.csv', loc + type_doc + '_annotations_pilot_run.csv', 2, ha],
[st_p + 'merged.csv', loc + type_doc + '_prompts_merged.csv', 1, hp],
[st_p + 'doctor_generated.csv', loc + type_doc + '_prompts_doctor_generated.csv', 1, hp],
[st_p + 'pilot_run.csv', loc + type_doc + '_prompts_pilot_run.csv', 1, hp]]
for row in to_do:
remove_test_file(row[0], row[1], row[2], row[3], type_doc)
if __name__ == '__main__':
types = ['test', 'train', 'validation']
for t in types:
main(t) | [
"pandas.read_csv",
"numpy.asarray",
"numpy.loadtxt",
"pandas.DataFrame"
] | [((369, 410), 'numpy.loadtxt', 'np.loadtxt', (['loc'], {'delimiter': '""" """', 'dtype': 'int'}), "(loc, delimiter=' ', dtype=int)\n", (379, 410), True, 'import numpy as np\n'), ((494, 545), 'pandas.read_csv', 'pd.read_csv', (['loc'], {'engine': '"""python"""', 'encoding': '"""utf-8"""'}), "(loc, engine='python', encoding='utf-8')\n", (505, 545), True, 'import pandas as pd\n'), ((577, 591), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (587, 591), True, 'import numpy as np\n'), ((699, 738), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'header'}), '(data=data, columns=header)\n', (711, 738), True, 'import pandas as pd\n')] |
import time
import numpy as np
## Target maker
target_maker_args = {}
#target_maker_args['future_steps'] = [1,2,4,8,16,32]
#target_maker_args['meas_to_predict'] = [0,1,2]
target_maker_args['min_num_targs'] = 3
target_maker_args['rwrd_schedule_type'] = 'exp'
target_maker_args['gammas'] = []
target_maker_args['invalid_targets_replacement'] = 'nan'
# target_maker_args['invalid_targets_replacement'] = 'last_valid'
## Experience
# Train experience
train_experience_args = {}
train_experience_args['memory_capacity'] = 10000 # TODO: automatically set as num_simulators*2500
train_experience_args['default_history_length'] = 1
train_experience_args['history_lengths'] = {}
train_experience_args['history_step'] = 1
train_experience_args['action_format'] = 'enumerate'
train_experience_args['shared'] = False
train_experience_args['meas_statistics_gamma'] = 0.
train_experience_args['num_prev_acts_to_return'] = 0
# Test policy experience
test_experience_args = train_experience_args.copy()
test_experience_args['memory_capacity'] = 10000 # NOTE has to be more than maximum possible test policy steps
# test_experience_args['memory_capacity'] = 20000
## Agent
agent_args = {}
# agent type
agent_args['agent_type'] = 'advantage'
# preprocessing
agent_args['preprocess_sensory'] = {'color': lambda x: x / 255. - 0.5,
'segEnnemies' : lambda x: x,
'segMedkit' : lambda x : x,
'segClip' : lambda x : x,
'measurements': lambda x: x,
'audio': lambda x: x / 255. - 0.5,
'audiopath': lambda x: x,
'force': lambda x: x / 700,
'actions': lambda x: x,
'depth': lambda x: x / 10.0 - 0.5,
'roomType': lambda x: x,
'goalRoomType': lambda x: x}
agent_args['preprocess_input_targets'] = lambda x: x
agent_args['postprocess_predictions'] = lambda x: x
agent_args['discrete_controls_manual'] = []
agent_args['opposite_button_pairs'] = []
# agent_args['opposite_button_pairs'] = [(0,1)]
agent_args['onehot_actions_only'] = True
# agent properties
agent_args['objective_coeffs_temporal'] = np.array([0., 0., 0., 0.5, 0.5, 1.])
agent_args['new_memories_per_batch'] = 8
agent_args['add_experiences_every'] = 1
agent_args['random_objective_coeffs'] = False
agent_args['objective_coeffs_distribution'] = 'none'
agent_args['random_exploration_schedule'] = lambda step: (0.02 + 72500. / (float(step) + 75000.))
# optimization parameters
agent_args['batch_size'] = 64
agent_args['init_learning_rate'] = 0.0002
agent_args['lr_step_size'] = 125000
agent_args['lr_decay_factor'] = 0.3
agent_args['adam_beta1'] = 0.95
agent_args['adam_epsilon'] = 1e-4
agent_args['optimizer'] = 'Adam'
agent_args['reset_iter_count'] = True
agent_args['clip_gradient'] = 0
# directories
agent_args['checkpoint_dir'] = 'checkpoints'
agent_args['init_model'] = ''
agent_args['model_name'] = "predictor.model"
# logging and testing
agent_args['print_err_every'] = 50
agent_args['detailed_summary_every'] = 1000
agent_args['test_policy_every'] = 10000
agent_args['checkpoint_every'] = 10000
agent_args['save_param_histograms_every'] = 5000
agent_args['test_random_policy_before_training'] = True
# net parameters
agent_args['img_conv_params'] = np.array([(32,8,4), (64,4,2), (64,3,1)], dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
agent_args['img_fc_params'] = np.array([(512,)], dtype = [('out_dims',int)])
agent_args['depth_conv_params'] = np.array([(32,8,4), (64,4,2), (64,3,1)], dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
agent_args['depth_fc_params'] = np.array([(512,)], dtype = [('out_dims',int)])
agent_args['audio_fc_params'] = np.array([(512,)], dtype = [('out_dims',int)])
agent_args['goalroomtype_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['roomtype_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['infer_roomtype_fc_params'] = np.array([(512,), (-1,)], dtype = [('out_dims',int)]) # we put -1 here because it will be automatically replaced when creating the net
agent_args['actions_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['obj_fc_params'] = None
agent_args['meas_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['force_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['audiopath_fc_params'] = np.array([(128,), (128,), (128,)], dtype = [('out_dims',int)])
agent_args['joint_fc_params'] = np.array([(512,), (-1,)], dtype = [('out_dims',int)]) # we put -1 here because it will be automatically replaced when creating the net
agent_args['infer_meas_fc_params'] = np.array([(512,), (-1,)], dtype = [('out_dims',int)]) # we put -1 here because it will be automatically replaced when creating the net
agent_args['weight_decay'] = 0.00000
agent_args['segEnnemies_fc_params'] = np.array([(512,)], dtype = [('out_dims',int)])
agent_args['segMedkit_fc_params'] = np.array([(512,)], dtype = [('out_dims',int)])
agent_args['segClip_fc_params'] = np.array([(512,)],dtype = [('out_dims',int)])
agent_args['segEnnemies_conv_params'] = np.array([(32,8,4), (64,4,2), (64,3,1)], dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
agent_args['segMedkit_conv_params'] = np.array([(32,8,4), (64,4,2), (64,3,1)], dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
agent_args['segClip_conv_params'] = np.array([(32,8,4), (64,4,2), (64,3,1)], dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
agent_args['unet_params'] = np.array([(4,2,2), (8,2,2), (16,2,2)],
dtype = [('out_channels',int), ('kernel',int), ('stride',int)])
## Experiment
experiment_args = {}
experiment_args['num_train_iterations'] = 820000 #820000
# experiment_args['test_random_prob'] = 0.1
experiment_args['test_random_prob'] = 0.1
experiment_args['test_policy_num_steps'] = 1000
experiment_args['test_objective_coeffs_temporal'] = agent_args['objective_coeffs_temporal']
experiment_args['show_predictions'] = True
experiment_args['meas_for_manual'] = [] # expected to be [AMMO2 AMMO3 AMMO4 AMMO5 AMMO6 AMMO7 WEAPON2 WEAPON3 WEAPON4 WEAPON5 WEAPON6 WEAPON7]
| [
"numpy.array"
] | [((2372, 2412), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.5, 0.5, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.5, 0.5, 1.0])\n', (2380, 2412), True, 'import numpy as np\n'), ((3498, 3613), 'numpy.array', 'np.array', (['[(32, 8, 4), (64, 4, 2), (64, 3, 1)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(32, 8, 4), (64, 4, 2), (64, 3, 1)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (3506, 3613), True, 'import numpy as np\n'), ((3635, 3680), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (3643, 3680), True, 'import numpy as np\n'), ((3716, 3831), 'numpy.array', 'np.array', (['[(32, 8, 4), (64, 4, 2), (64, 3, 1)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(32, 8, 4), (64, 4, 2), (64, 3, 1)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (3724, 3831), True, 'import numpy as np\n'), ((3855, 3900), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (3863, 3900), True, 'import numpy as np\n'), ((3936, 3981), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (3944, 3981), True, 'import numpy as np\n'), ((4023, 4084), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4031, 4084), True, 'import numpy as np\n'), ((4122, 4183), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4130, 4183), True, 'import numpy as np\n'), ((4226, 4278), 'numpy.array', 'np.array', (['[(512,), (-1,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,), (-1,)], dtype=[('out_dims', int)])\n", (4234, 4278), True, 'import numpy as np\n'), ((4397, 4458), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4405, 4458), True, 'import numpy as np\n'), ((4528, 4589), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4536, 4589), True, 'import numpy as np\n'), ((4624, 4685), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4632, 4685), True, 'import numpy as np\n'), ((4723, 4784), 'numpy.array', 'np.array', (['[(128,), (128,), (128,)]'], {'dtype': "[('out_dims', int)]"}), "([(128,), (128,), (128,)], dtype=[('out_dims', int)])\n", (4731, 4784), True, 'import numpy as np\n'), ((4818, 4870), 'numpy.array', 'np.array', (['[(512,), (-1,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,), (-1,)], dtype=[('out_dims', int)])\n", (4826, 4870), True, 'import numpy as np\n'), ((4990, 5042), 'numpy.array', 'np.array', (['[(512,), (-1,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,), (-1,)], dtype=[('out_dims', int)])\n", (4998, 5042), True, 'import numpy as np\n'), ((5202, 5247), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (5210, 5247), True, 'import numpy as np\n'), ((5287, 5332), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (5295, 5332), True, 'import numpy as np\n'), ((5368, 5413), 'numpy.array', 'np.array', (['[(512,)]'], {'dtype': "[('out_dims', int)]"}), "([(512,)], dtype=[('out_dims', int)])\n", (5376, 5413), True, 'import numpy as np\n'), ((5454, 5569), 'numpy.array', 'np.array', (['[(32, 8, 4), (64, 4, 2), (64, 3, 1)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(32, 8, 4), (64, 4, 2), (64, 3, 1)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (5462, 5569), True, 'import numpy as np\n'), ((5597, 5712), 'numpy.array', 'np.array', (['[(32, 8, 4), (64, 4, 2), (64, 3, 1)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(32, 8, 4), (64, 4, 2), (64, 3, 1)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (5605, 5712), True, 'import numpy as np\n'), ((5738, 5853), 'numpy.array', 'np.array', (['[(32, 8, 4), (64, 4, 2), (64, 3, 1)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(32, 8, 4), (64, 4, 2), (64, 3, 1)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (5746, 5853), True, 'import numpy as np\n'), ((5871, 5984), 'numpy.array', 'np.array', (['[(4, 2, 2), (8, 2, 2), (16, 2, 2)]'], {'dtype': "[('out_channels', int), ('kernel', int), ('stride', int)]"}), "([(4, 2, 2), (8, 2, 2), (16, 2, 2)], dtype=[('out_channels', int),\n ('kernel', int), ('stride', int)])\n", (5879, 5984), True, 'import numpy as np\n')] |
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for reference pose tasks."""
from dm_control import mjcf
from dm_control.utils import transformations as tr
import numpy as np
def add_walker(walker_fn, arena, name='walker', ghost=False, visible=True):
"""Create a walker."""
walker = walker_fn(name=name)
if ghost:
# if the walker has a built-in tracking light remove it.
light = walker._mjcf_root.find('light', 'tracking_light') # pylint: disable=protected-access
if light:
light.remove()
# Remove the contacts.
for geom in walker.mjcf_model.find_all('geom'):
# alpha=0.999 ensures grey ghost reference.
# for alpha=1.0 there is no visible difference between real walker and
# ghost reference.
geom.set_attributes(
contype=0,
conaffinity=0,
rgba=(0.5, 0.5, 0.5, .999 if visible else 0.0))
walker.create_root_joints(arena.attach(walker))
return walker
def get_qpos_qvel_from_features(features):
"""Get qpos and qvel from logged features to set walker."""
full_qpos = np.hstack([
features['position'],
features['quaternion'],
features['joints'],
])
full_qvel = np.hstack([
features['velocity'],
features['angular_velocity'],
features['joints_velocity'],
])
return full_qpos, full_qvel
def set_walker_from_features(physics, walker, features, offset=0):
"""Set the freejoint and walker's joints angles and velocities."""
qpos, qvel = get_qpos_qvel_from_features(features)
set_walker(physics, walker, qpos, qvel, offset=offset)
def set_walker(physics, walker, qpos, qvel, offset=0, null_xyz_and_yaw=False,
position_shift=None, rotation_shift=None):
"""Set the freejoint and walker's joints angles and velocities."""
qpos = np.array(qpos)
if null_xyz_and_yaw:
qpos[:3] = 0.
euler = tr.quat_to_euler(qpos[3:7], ordering='ZYX')
euler[0] = 0.
quat = tr.euler_to_quat(euler, ordering='ZYX')
qpos[3:7] = quat
qpos[:3] += offset
freejoint = mjcf.get_attachment_frame(walker.mjcf_model).freejoint
physics.bind(freejoint).qpos = qpos[:7]
physics.bind(freejoint).qvel = qvel[:6]
physics.bind(walker.mocap_joints).qpos = qpos[7:]
physics.bind(walker.mocap_joints).qvel = qvel[6:]
if position_shift is not None or rotation_shift is not None:
walker.shift_pose(physics, position=position_shift,
quaternion=rotation_shift, rotate_velocity=True)
def get_features(physics, walker):
"""Get walker features for reward functions."""
walker_bodies = walker.mocap_tracking_bodies
walker_features = {}
root_pos, root_quat = walker.get_pose(physics)
walker_features['position'] = root_pos
walker_features['quaternion'] = root_quat
joints = np.array(physics.bind(walker.mocap_joints).qpos)
walker_features['joints'] = joints
freejoint_frame = mjcf.get_attachment_frame(walker.mjcf_model)
com = np.array(physics.bind(freejoint_frame).subtree_com)
walker_features['center_of_mass'] = com
end_effectors = np.array(
walker.observables.end_effectors_pos(physics)[:]).reshape(-1, 3)
walker_features['end_effectors'] = end_effectors
if hasattr(walker.observables, 'appendages_pos'):
appendages = np.array(
walker.observables.appendages_pos(physics)[:]).reshape(-1, 3)
else:
appendages = np.array(end_effectors)
walker_features['appendages'] = appendages
xpos = np.array(physics.bind(walker_bodies).xpos)
walker_features['body_positions'] = xpos
xquat = np.array(physics.bind(walker_bodies).xquat)
walker_features['body_quaternions'] = xquat
root_vel, root_angvel = walker.get_velocity(physics)
walker_features['velocity'] = root_vel
walker_features['angular_velocity'] = root_angvel
joints_vel = np.array(physics.bind(walker.mocap_joints).qvel)
walker_features['joints_velocity'] = joints_vel
return walker_features
| [
"dm_control.mjcf.get_attachment_frame",
"dm_control.utils.transformations.quat_to_euler",
"dm_control.utils.transformations.euler_to_quat",
"numpy.hstack",
"numpy.array"
] | [((1696, 1773), 'numpy.hstack', 'np.hstack', (["[features['position'], features['quaternion'], features['joints']]"], {}), "([features['position'], features['quaternion'], features['joints']])\n", (1705, 1773), True, 'import numpy as np\n'), ((1811, 1908), 'numpy.hstack', 'np.hstack', (["[features['velocity'], features['angular_velocity'], features[\n 'joints_velocity']]"], {}), "([features['velocity'], features['angular_velocity'], features[\n 'joints_velocity']])\n", (1820, 1908), True, 'import numpy as np\n'), ((2421, 2435), 'numpy.array', 'np.array', (['qpos'], {}), '(qpos)\n', (2429, 2435), True, 'import numpy as np\n'), ((3503, 3547), 'dm_control.mjcf.get_attachment_frame', 'mjcf.get_attachment_frame', (['walker.mjcf_model'], {}), '(walker.mjcf_model)\n', (3528, 3547), False, 'from dm_control import mjcf\n'), ((2489, 2532), 'dm_control.utils.transformations.quat_to_euler', 'tr.quat_to_euler', (['qpos[3:7]'], {'ordering': '"""ZYX"""'}), "(qpos[3:7], ordering='ZYX')\n", (2505, 2532), True, 'from dm_control.utils import transformations as tr\n'), ((2562, 2601), 'dm_control.utils.transformations.euler_to_quat', 'tr.euler_to_quat', (['euler'], {'ordering': '"""ZYX"""'}), "(euler, ordering='ZYX')\n", (2578, 2601), True, 'from dm_control.utils import transformations as tr\n'), ((2659, 2703), 'dm_control.mjcf.get_attachment_frame', 'mjcf.get_attachment_frame', (['walker.mjcf_model'], {}), '(walker.mjcf_model)\n', (2684, 2703), False, 'from dm_control import mjcf\n'), ((3974, 3997), 'numpy.array', 'np.array', (['end_effectors'], {}), '(end_effectors)\n', (3982, 3997), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import config
import time
from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer
from scipy import signal
from scipy.fftpack import fft
module = 'separable_adapter' # specific module type for universal model: series_adapter, parallel_adapter, separable_adapter
trainMode = 'universal'
from pylab import *
class disentangle_trainer(nn.Module):
def __init__(self, inChans_list=[4], base_outChans=2, style_dim=8, mlp_dim=16, num_class_list=[4], args=None): ## base_outChans=16, mlp_dim=256
super(disentangle_trainer, self).__init__()
self.inChans_list = inChans_list
self.num_class_list = num_class_list
style_dim = args.style_dim
indim2d = int(args.train_transforms.split(',')[1])
self.shared_enc_model = AdaINGen(input_dim=1, dim=base_outChans, decode_indim=base_outChans*pow(2,max(config.num_pool_per_axis)), style_dim=style_dim, mlp_dim=mlp_dim, num_class_list=[1], in_dim_2d=indim2d, auxdec_dim=args.AuxDec_dim, modal_num=inChans_list[0], args=args) #in_dim_2d=indim2d)
if inChans_list[0] == 4:
self.gen_flair = self.shared_enc_model
self.gen_t1 = self.shared_enc_model
self.gen_t1ce = self.shared_enc_model
self.gen_t2 = self.shared_enc_model
elif inChans_list[0] == 2:
self.m1 = self.shared_enc_model
self.m2 = self.shared_enc_model
_channels = base_outChans*(pow(2,max(config.num_pool_per_axis))) # 32
_outChans_list = [base_outChans*pow(2,i) for i in range(1+max(config.num_pool_per_axis))] # [8, 16, 32, 64]
if args.use_style_map:
self.target_gen = Target_transform(n_res=4, dim=_channels, output_dim=_channels, res_norm='adain', activ='LeakyReLU', args=args) # target style transform
else:
self.target_gen = Target_transform(n_res=4, dim=_channels, output_dim=_channels, res_norm='in', activ='LeakyReLU', args=args)
self.seg_main_decoder = Decoder(inChans=_channels, outChans_list=_outChans_list, concatChan_list=_outChans_list, num_class_list=num_class_list, norm=None, use_distill=True, use_kd=args.use_kd, args=args)
_outChans_list_small = [args.AuxDec_dim*pow(2,i) for i in range(1+max(config.num_pool_per_axis))] # [8, 16, 32, 64]
self.binary_dec = Decoder(inChans=_channels, outChans_list=_outChans_list_small, concatChan_list=_outChans_list, num_class_list=num_class_list, norm=None, use_distill=True, use_kd=args.use_kd, args=args) ###
self.mlp_s_fusion = MLP(inChans_list[0], num_class_list[0], 16, 3, norm='none', activ='none')
self.mlp_s_map0 = MLP(style_dim, self.get_num_adain_params(self.target_gen), mlp_dim, 3, norm='none', activ='relu')
self.mlp_s_map1 = MLP(style_dim, self.get_num_adain_params(self.target_gen), mlp_dim, 3, norm='none', activ='relu')
self.mlp_s_map2 = MLP(style_dim, self.get_num_adain_params(self.target_gen), mlp_dim, 3, norm='none', activ='relu')
self.mlp_s_map4 = MLP(style_dim, self.get_num_adain_params(self.target_gen), mlp_dim, 3, norm='none', activ='relu')
self.c_fusion = nn.Conv3d(_channels, _channels, kernel_size=1, stride=1, padding=0) # 64*4
self.target_fusion1 = nn.Conv3d(_channels*num_class_list[0], _channels*2, kernel_size=1, stride=1, padding=0) # 64*4
self.se = SELayer(_channels*2) # self Channel-attention
self.target_fusion2 = nn.Conv3d(_channels*2, _channels, kernel_size=1, stride=1, padding=0) # 64*4
self.args = args
self.distill_fea_fuse = nn.Conv3d(args.fea_dim*4, args.fea_dim, kernel_size=3, stride=1, padding=1) # To fuse the 4-features of Binary decoder when distilling features
def assign_adain_params(self, adain_params, model):
'''
Adopt AdaIN layer to fuse the style-aware information and feature maps
assign the adain_params to the AdaIN layers in model
'''
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm3d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
'''Return the number of AdaIN parameters needed by the model
'''
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm3d":
num_adain_params += 2*m.num_features
return num_adain_params
def forward(self, x, complete_x=None, is_test=True):
if self.args.miss_modal == True and self.args.avg_imputation == True:
avaliable_list = [0] * x.shape[1]
for i in range(x.shape[1]):
if x[:,i,...].sum() != 0:
avaliable_list[i] = 1
avaliable_modality = x.sum(axis=1)/sum(avaliable_list)
for idx in avaliable_list:
if idx == 0:
x[:,idx,...] = avaliable_modality
modality_num = self.inChans_list[0]
t0=time.time()
if self.args.dataset == 'ProstateDataset':
x_m1, x_m2 = torch.unsqueeze(x[:,0,...],1),torch.unsqueeze(x[:,1,...],1)
elif self.args.dataset == 'BraTSDataset':
x_flair, x_t1, x_t1ce, x_t2 = torch.unsqueeze(x[:,0,...],1),torch.unsqueeze(x[:,1,...],1),torch.unsqueeze(x[:,2,...],1),torch.unsqueeze(x[:,3,...],1)
if complete_x != None:
x_flair_complete, x_t1_complete, x_t1ce_complete, x_t2_complete = torch.unsqueeze(complete_x[:,0,...],1),torch.unsqueeze(complete_x[:,1,...],1),torch.unsqueeze(complete_x[:,2,...],1),torch.unsqueeze(complete_x[:,3,...],1)
bs,w = x.shape[0], x.shape[-1]
x_parallel = torch.unsqueeze(torch.cat([x[:,i,...] for i in range(x.shape[1])],0), 1) # [bx4,1,h,w,d]
c_fusion, style_fake_parallel, style_fea_map, enc_list_parallel = self.shared_enc_model.encode(x_parallel, x) # style_fea_map: [1024, 8, 64, 64] -- freq_fea_map
_len = len(enc_list_parallel)
if self.inChans_list[0] == 2:
enc_list_m1, enc_list_m2 = [enc_list_parallel[i][0:bs] for i in range(_len)], [enc_list_parallel[i][bs:2*bs] for i in range(_len)]
elif self.inChans_list[0] == 4:
enc_list_flair, enc_list_t1, enc_list_t1ce, enc_list_t2 = [enc_list_parallel[i][0:bs] for i in range(_len)], [enc_list_parallel[i][bs:2*bs] for i in range(_len)], [enc_list_parallel[i][2*bs:3*bs] for i in range(_len)], [enc_list_parallel[i][3*bs:4*bs] for i in range(_len)]
s_piece_bs = torch.cat([torch.unsqueeze(style_fake_parallel[modality_num*bs*i:modality_num*bs*(i+1),...],1) for i in range(w)], 1) # [bx4,128,8,1,1]
s_piece = torch.cat([torch.unsqueeze(s_piece_bs[bs*i:bs*(i+1),...],1) for i in range(modality_num)], 1) # [b,4,128,8,1,1]
if self.args.use_freq_map:
s_fea_piece_bs = torch.cat([torch.unsqueeze(style_fea_map[modality_num*bs*i:modality_num*bs*(i+1),...],1) for i in range(w)], 1) # [bx4,128,8,64,64]
s_fea_piece = torch.cat([torch.unsqueeze(s_fea_piece_bs[bs*i:bs*(i+1),...],1) for i in range(modality_num)], 1) # [2, 4, 128, 8, 64, 64]
s_flair_fea, s_t1_fea, s_t1ce_fea, s_t2_fea = s_fea_piece[:,0,...], s_fea_piece[:,1,...], s_fea_piece[:,2,...], s_fea_piece[:,3,...] # [b,128,8,64,64]
if self.inChans_list[0] == 4:
s_flair, s_t1, s_t1ce, s_t2 = s_piece[:,0,...], s_piece[:,1,...], s_piece[:,2,...], s_piece[:,3,...] # [b,128,8,1,1]
elif self.inChans_list[0] == 2:
s_m1, s_m2 = s
_piece[:,0,...], s_piece[:,1,...]
c_fusion = self.c_fusion(c_fusion)
# Fusion for middle-featuremaps (to be concat):
enc_list = enc_list_parallel
if self.inChans_list[0] == 2:
rec_m1 = s_m1.mean(1), enc_list_m1
rec_m2 = s_m2.mean(1), enc_list_m2
s_fusion = torch.cat([s_m1.mean(1),s_m2.mean(1)],axis=2) ###
elif self.inChans_list[0] == 4:
rec_flair = s_flair.mean(1), enc_list_flair
rec_t1 = s_t1.mean(1), enc_list_t1
rec_t1ce = s_t1ce.mean(1), enc_list_t1ce
rec_t2 = s_t2.mean(1), enc_list_t2
s_fusion = torch.cat([s_flair.mean(1),s_t1.mean(1),s_t1ce.mean(1),s_t2.mean(1)],axis=2) ###
b,w,h=s_fusion.shape[:3]
_s_fusion = s_fusion.new_empty((b,w,self.num_class_list[0])).cuda()
for i in range(s_fusion.shape[0]):
_s_fusion[i,:,:] = self.mlp_s_fusion(s_fusion[i,...])
s_fusion = _s_fusion.permute(0,2,1)
# Get learned target-style-aware features to be input the final segmentor:
if self.args.use_style_map:
if self.inChans_list[0] == 2:
adain_params = self.mlp_s_map0(s_fusion[:,0,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea0 = self.target_gen(c_fusion)
adain_params = self.mlp_s_map1(s_fusion[:,1,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea1 = self.target_gen(c_fusion)
adain_params = self.mlp_s_map2(s_fusion[:,2,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea2 = self.target_gen(c_fusion)
target_fea = torch.cat([target_fea0, target_fea1, target_fea2],axis=1)
elif self.inChans_list[0] == 4:
adain_params = self.mlp_s_map0(s_fusion[:,0,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea0 = self.target_gen(c_fusion)
adain_params = self.mlp_s_map1(s_fusion[:,1,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea1 = self.target_gen(c_fusion)
adain_params = self.mlp_s_map2(s_fusion[:,2,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea2 = self.target_gen(c_fusion)
adain_params = self.mlp_s_map4(s_fusion[:,3,:])
self.assign_adain_params(adain_params, self.target_gen)
target_fea4 = self.target_gen(c_fusion)
target_fea = torch.cat([target_fea0, target_fea1, target_fea2, target_fea4],axis=1)
else:
target_fea0 = self.target_gen(c_fusion)
target_fea1 = self.target_gen(c_fusion)
target_fea2 = self.target_gen(c_fusion)
target_fea4 = self.target_gen(c_fusion)
target_fea = torch.cat([target_fea0, target_fea1, target_fea2, target_fea4],axis=1)
target_fea_tmp = self.target_fusion1(target_fea)
target_fea = self.se(target_fea_tmp) + target_fea_tmp
target_fea = self.target_fusion2(target_fea)
# Get segmentation prediction:
if self.args.use_kd:
seg_out, deep_sup_fea_all, distill_kd_fea_all, distill_fea_all = self.seg_main_decoder(target_fea, enc_list) ####### KD
else:
seg_out, deep_sup_fea_all, distill_fea_all = self.seg_main_decoder(target_fea, enc_list) ####### KD
if is_test == True:
return [seg_out]
bs = target_fea0.shape[0]
if self.inChans_list[0] == 2:
fea_parallel = torch.cat([target_fea0,target_fea1,target_fea2],0) # [b*4,32,8,8,8]
enc_list_parallel = [torch.cat([enc_list[i],enc_list[i],enc_list[i]],0) for i in range(len(enc_list))]
elif self.inChans_list[0] == 4:
fea_parallel = torch.cat([target_fea0,target_fea1,target_fea2,target_fea4],0) # [b*4,32,8,8,8]
enc_list_parallel = [torch.cat([enc_list[i],enc_list[i],enc_list[i],enc_list[i]],0) for i in range(len(enc_list))]
# [bx4,2,128,128,128], [bx4,4,64,64,64], [bx4,8,32,32,32], [bx4,16,16,16,16]
if self.args.use_kd:
binary_seg_out, deep_sup_fea_bin, distill_kd_fea_bin, distill_fea = self.binary_dec(fea_parallel, enc_list_parallel, is_binary=True) ###### KD
else:
binary_seg_out, deep_sup_fea_bin, distill_fea = self.binary_dec(fea_parallel, enc_list_parallel, is_binary=True) ###### KD
if self.inChans_list[0] == 2:
binary_seg_out0, distill_fea0 = binary_seg_out[0:bs], distill_fea[0:bs]
binary_seg_out1, distill_fea1 = binary_seg_out[bs:2*bs], distill_fea[bs:2*bs]
binary_seg_out2, distill_fea2 = binary_seg_out[2*bs:3*bs], distill_fea[2*bs:3*bs]
binary_seg_out_all = torch.cat([binary_seg_out0,binary_seg_out1,binary_seg_out2],axis=1)
elif self.inChans_list[0] == 4:
binary_seg_out0, distill_fea0 = binary_seg_out[0:bs], distill_fea[0:bs]
binary_seg_out1, distill_fea1 = binary_seg_out[bs:2*bs], distill_fea[bs:2*bs]
binary_seg_out2, distill_fea2 = binary_seg_out[2*bs:3*bs], distill_fea[2*bs:3*bs]
binary_seg_out4, distill_fea4 = binary_seg_out[3*bs:4*bs], distill_fea[3*bs:4*bs]
binary_seg_out_all = torch.cat([binary_seg_out0,binary_seg_out1,binary_seg_out2,binary_seg_out4],axis=1)
if self.args.use_kd:
if self.inChans_list[0] == 4:
# Distill logit&feature of Binary seg decoder:
distill_bin_logit1 = [deep_sup_fea_bin[i][0:bs] for i in range(len(deep_sup_fea_bin))] # list: 3*[b,1,...]
distill_bin_logit2 = [deep_sup_fea_bin[i][bs:2*bs] for i in range(len(deep_sup_fea_bin))]
distill_bin_logit3 = [deep_sup_fea_bin[i][2*bs:3*bs] for i in range(len(deep_sup_fea_bin))]
distill_bin_logit4 = [deep_sup_fea_bin[i][3*bs:4*bs] for i in range(len(deep_sup_fea_bin))]
distill_bin_fea1 = [distill_kd_fea_bin[i][0:bs] for i in range(len(distill_kd_fea_bin))] # list: 3*[b,dim=8,...]
distill_bin_fea2 = [distill_kd_fea_bin[i][bs:2*bs] for i in range(len(distill_kd_fea_bin))]
distill_bin_fea3 = [distill_kd_fea_bin[i][2*bs:3*bs] for i in range(len(distill_kd_fea_bin))]
distill_bin_fea4 = [distill_kd_fea_bin[i][3*bs:4*bs] for i in range(len(distill_kd_fea_bin))]
# Distill logit&feature of Main seg decoder:
distill_main_logit = deep_sup_fea_all # list: 3*[b,4,...]
distill_main_fea = distill_kd_fea_all # list: 3*[b,dim=8,...]
# Forward to obtain the fused distill_feature according to the 4-binary features:
distill_bin_fea_total = [self.distill_fea_fuse(torch.cat([distill_bin_fea1[i],distill_bin_fea2[i],distill_bin_fea3[i],distill_bin_fea4[i]],1)) for i in range(len(distill_bin_fea1))]
if self.args.use_distill: # Compute the logit distillatoin loss
if self.inChans_list[0] == 2:
distill_loss = sum([self.l2_loss(torch.unsqueeze(distill_fea_all[:,i,...],1), j) for i,j in enumerate([distill_fea0,distill_fea1])])/2
elif self.inChans_list[0] == 4:
distill_loss = sum([self.l2_loss(torch.unsqueeze(distill_fea_all[:,i,...],1), j) for i,j in enumerate([distill_fea0,distill_fea1,distill_fea2,distill_fea4])])/4
else:
distill_loss = torch.Tensor([0.0]).cuda()
if self.args.use_kd: # Compute the knowledge distillatoin loss
assert self.inChans_list[0] == 4
kd_logit_loss = torch.Tensor([0.0]).cuda()
for idx in range(len(distill_main_logit)):
kd_logit_loss += sum([self.distill_kl(torch.unsqueeze(distill_main_logit[idx][:,i,...],1), j) for i,j in enumerate([distill_bin_logit1[idx],distill_bin_logit2[idx],distill_bin_logit3[idx],distill_bin_logit4[idx]])]) / 4
kd_logit_loss /= len(distill_main_logit)
kd_fea_loss_spatial = sum([self.l2_loss(distill_main_fea[i], distill_bin_fea_total[i], channel_wise=False) for i in range(len(distill_bin_fea1))])
kd_loss = [kd_logit_loss[0]*self.args.kd_logit_w, kd_fea_loss_spatial]
if self.args.kd_dense_fea_attn == True:
kd_fea_attn_loss = sum([self.kd_channel_attn(distill_main_fea[i], distill_bin_fea_total[i]) for i in range(len(distill_bin_fea1))])
kd_loss[1] = kd_fea_attn_loss
if self.args.affinity_kd == True: # Compute the affinity-guided knowledge distillatoin loss
affinity_kd_fea_loss = self.affinity_kd(distill_main_fea, distill_bin_fea_total) # Compute feature KD loss
kd_loss[0] = affinity_kd_fea_loss * self.args.self_distill_logit_w
logits = [distill_bin_logit1,distill_bin_logit2,distill_bin_logit3,distill_bin_logit4]
distill_bin_logit = []
for i in range(len(distill_bin_logit1)):
distill_bin_logit.append(torch.cat([logits[0][0],logits[1][0],logits[2][0],logits[3][0]], 1))
affinity_kd_logit_loss = self.affinity_kd(distill_main_logit, distill_bin_logit) # Compute logit KD loss
kd_loss[1] = affinity_kd_logit_loss * self.args.self_distill_fea_w
if self.args.self_distill == True: # Compute self-distillation KD loss on feature and logit levels
kd_self_distill_loss_fea_main = self.l2_loss(distill_main_fea[0], nn.MaxPool3d(2, stride=2)(distill_main_fea[1]), channel_wise=False) + \
self.l2_loss(distill_main_fea[0], nn.MaxPool3d(4, stride=4)(distill_main_fea[2]), channel_wise=False)
kd_self_distill_loss_fea_bin = self.l2_loss(distill_bin_fea_total[0], nn.MaxPool3d(2, stride=2)(distill_bin_fea_total[1]), channel_wise=False) + \
self.l2_loss(distill_bin_fea_total[0], nn.MaxPool3d(4, stride=4)(distill_bin_fea_total[2]), channel_wise=False)
kd_self_distill_loss_logit_main = self.distill_kl(distill_main_logit[0], nn.MaxPool3d(2, stride=2)(distill_main_logit[1])) + \
self.distill_kl(distill_main_logit[0], nn.MaxPool3d(4, stride=4)(distill_main_logit[2]))
bin_logit_layer1 = torch.cat([distill_bin_logit1[0],distill_bin_logit2[0],distill_bin_logit3[0],distill_bin_logit4[0]],1)
bin_logit_layer2 = torch.cat([distill_bin_logit1[1],distill_bin_logit2[1],distill_bin_logit3[1],distill_bin_logit4[1]],1)
bin_logit_layer3 = torch.cat([distill_bin_logit1[2],distill_bin_logit2[2],distill_bin_logit3[2],distill_bin_logit4[2]],1)
kd_self_distill_loss_logit_bin = self.distill_kl(bin_logit_layer1, nn.MaxPool3d(2, stride=2)(bin_logit_layer2)) + \
self.distill_kl(bin_logit_layer1, nn.MaxPool3d(4, stride=4)(bin_logit_layer3))
kd_self_distill_fea = kd_self_distill_loss_fea_main + kd_self_distill_loss_fea_bin
kd_self_distill_logit = kd_self_distill_loss_logit_main + kd_self_distill_loss_logit_bin
kd_loss[0] += kd_self_distill_logit * self.args.self_distill_logit_w
kd_loss[1] += kd_self_distill_fea * self.args.self_distill_fea_w
if self.args.kd_channel_attn == True: # Compute channel-attention based KD loss
kd_fea_attn_loss = torch.Tensor([0.0]).cuda()
kd_fea_loss_channel = sum([self.l2_loss(distill_main_fea[i],distill_bin_fea_total[i],channel_wise=True) for i in range(len(distill_bin_fea1))])
kd_loss[1] += self.args.kd_fea_channel_w*kd_fea_loss_channel
else:
kd_loss = [torch.Tensor([0.0]).cuda(),torch.Tensor([0.0]).cuda()]
# Constrastive Loss for style_vec: group conv on one dimension of style_enc to get non-overlap style-vec
if self.inChans_list[0] == 4:
tmp_s = torch.cat([torch.unsqueeze(s_flair,1), torch.unsqueeze(s_t1,1), torch.unsqueeze(s_t1ce,1), torch.unsqueeze(s_t2,1)],1) # [b,4,128,8,1,1]
elif self.inChans_list[0] == 2:
tmp_s = torch.cat([torch.unsqueeze(s_m1,1), torch.unsqueeze(s_m2,1)],1)
tmp_s = torch.squeeze(tmp_s,-1)
tmp_s = torch.squeeze(tmp_s,-1) # [b,4,128,8]
if self.args.use_contrast:
contrastive_loss = self.contrastive_module(tmp_s, t=0.07) * self.args.contrast_w
else:
contrastive_loss = torch.Tensor([0.0]).cuda()
if self.args.use_freq_channel:
freq_loss = self.freq_filter_loss(tmp_s) * self.args.freq_w
else:
freq_loss = torch.Tensor([0.0]).cuda()
if self.args.use_freq_contrast:
freq_loss = self.freq_contrast_loss(tmp_s) * self.args.freq_w
else:
freq_loss = torch.Tensor([0.0]).cuda()
if self.args.use_freq_map:
torch.cat([torch.unsqueeze(s_flair_fea,1), torch.unsqueeze(s_t1_fea,1), torch.unsqueeze(s_t1ce_fea,1), torch.unsqueeze(s_t2_fea,1)],1) # [b,4,128,8,64,64]
if self.inChans_list[0] == 4:
if complete_x != None:
weight_recon_loss, weight_kl_loss, weight_recon_c_loss, weight_recon_s_loss, seg_aux = self.gen_update(x_flair, x_t1, x_t1ce, x_t2, rec_flair, rec_t1, rec_t1ce, rec_t2, c_fusion, enc_list, x_flair_complete, x_t1_complete, x_t1ce_complete, x_t2_complete, self.args)
else:
weight_recon_loss, weight_kl_loss, weight_recon_c_loss, weight_recon_s_loss, seg_aux = self.gen_update(x_flair, x_t1, x_t1ce, x_t2, rec_flair, rec_t1, rec_t1ce, rec_t2, c_fusion, enc_list, self.args)
elif self.inChans_list[0] == 2:
weight_recon_loss, weight_kl_loss, weight_recon_c_loss, weight_recon_s_loss, seg_aux = self.gen_update(x_m1, x_m2, None, None, rec_m1, rec_m2, None, None, c_fusion, enc_list, self.args)
return seg_out, binary_seg_out_all, deep_sup_fea_all, weight_recon_loss, weight_kl_loss, weight_recon_c_loss, weight_recon_s_loss, distill_loss, kd_loss, contrastive_loss, freq_loss, seg_aux
def gen_update(self, x_flair, x_t1, x_t1ce, x_t2, rec_flair, rec_t1, rec_t1ce, rec_t2, c_fusion, enc_list, x_flair_complete=None, x_t1_complete=None, x_t1ce_complete=None, x_t2_complete=None, args=None):
# encode
if self.inChans_list[0] == 4:
s_flair, enc_list_flair = rec_flair # [content, style]
s_t1, enc_list_t1 = rec_t1
s_t1ce, enc_list_t1ce = rec_t1ce
s_t2, enc_list_t2 = rec_t2
c_fusion_parallel = torch.cat([c_fusion,c_fusion,c_fusion,c_fusion],0) # [bx4,64,4,4,4]
s_parallel = torch.cat([s_flair,s_t1,s_t1ce,s_t2],0) # [bx4,8,1,1]
enc_list_parallel = [torch.cat([enc_list[i],enc_list[i],enc_list[i],enc_list[i]],0) for i in range(len(enc_list))] #[bx4,2,128,128,128]
recon_all = self.shared_enc_model.decode(c_fusion_parallel, s_parallel, enc_list_parallel)[0] #
bs = c_fusion.shape[0]
flair_recon, t1_recon, t1ce_recon, t2_recon = recon_all[0:bs], recon_all[bs:2*bs], recon_all[2*bs:3*bs], recon_all[3*bs:4*bs]
elif self.inChans_list[0] == 2:
s_m1, enc_list_m1 = rec_flair # [content, style]
s_m2, enc_list_m2 = rec_t1
c_fusion_parallel = torch.cat([c_fusion,c_fusion],0) # [bx4,64,4,4,4]
s_parallel = torch.cat([s_m1,s_m2],0) # [bx4,8,1,1]
enc_list_parallel = [torch.cat([enc_list[i],enc_list[i]],0) for i in range(len(enc_list))] #[bx4,2,128,128,128]
recon_all = self.shared_enc_model.decode(c_fusion_parallel, s_parallel, enc_list_parallel)[0] #
bs = c_fusion.shape[0]
m1_recon, m2_recon = recon_all[0:bs], recon_all[bs:2*bs]
# Auxiliary Seg_prediction constrains:
seg_aux = self.seg_main_decoder(c_fusion, enc_list)[0]
# reconstruction loss
if self.inChans_list[0] == 4:
if x_flair_complete == None:
self.loss_gen_recon_flair = self.recon_criterion(flair_recon, x_flair)
self.loss_gen_recon_t1 = self.recon_criterion(t1_recon, x_t1)
self.loss_gen_recon_t1ce = self.recon_criterion(t1ce_recon, x_t1ce)
self.loss_gen_recon_t2 = self.recon_criterion(t2_recon, x_t2)
else:
self.loss_gen_recon_flair = self.recon_criterion(flair_recon, x_flair_complete)
self.loss_gen_recon_t1 = self.recon_criterion(t1_recon, x_t1_complete)
self.loss_gen_recon_t1ce = self.recon_criterion(t1ce_recon, x_t1ce_complete)
self.loss_gen_recon_t2 = self.recon_criterion(t2_recon, x_t2_complete)
style_code = torch.cat([s_flair,s_t1,s_t1ce,s_t2],axis=2) # [b,8,4,1]
elif self.inChans_list[0] == 2:
self.loss_gen_recon_m1 = self.recon_criterion(m1_recon, x_flair)
self.loss_gen_recon_m2 = self.recon_criterion(m2_recon, x_t1)
style_code = torch.cat([s_m1,s_m2],axis=2) # [b,8,4,1]
mu = torch.mean(style_code.view(-1)) # [8,4], [8], [1](all)
var = torch.var(style_code.view(-1)) # calculate all dim as the whole var/mean
self.kl_loss = self.compute_kl(mu, var)
# total loss
if self.inChans_list[0] == 4:
self.weight_recon_loss = args.recon_w * (self.loss_gen_recon_flair+self.loss_gen_recon_t1+self.loss_gen_recon_t1ce+self.loss_gen_recon_t2)
elif self.inChans_list[0] == 2:
self.weight_recon_loss = args.recon_w * (self.loss_gen_recon_m1+self.loss_gen_recon_m2)
self.weight_kl_loss = args.kl_w * self.kl_loss
self.weight_recon_c_loss = torch.Tensor([0.0]).cuda()
self.weight_recon_s_loss = torch.Tensor([0.0]).cuda()
return self.weight_recon_loss, self.weight_kl_loss, self.weight_recon_c_loss, self.weight_recon_s_loss, seg_aux
def recon_criterion(self, input, target):
return torch.mean(torch.abs(input - target))
def kd_channel_attn(self, s, t): # [b,c,...], only for feature kd (L2) loss
'''
Compute channel-attention based knowledge distillation loss
'''
kd_losses = 0
for i in range(s.shape[1]):
_s = torch.unsqueeze(s[:,i,...],1)
_loss = torch.mean(torch.abs(_s - t).pow(2), (2,3,4))
score = torch.sum(_s * t, (2,3,4)) / (torch.norm(torch.flatten(_s,2), dim=(2)) * torch.norm(torch.flatten(t,2), dim=(2)) + 1e-40)
score = (score + 1)/2
_score = score / torch.unsqueeze(torch.sum(score,1),1)
kd_loss = _score * _loss
kd_losses += kd_loss.mean()
return torch.mean(kd_losses)
def affinity_kd(self, s, t):
'''
Compute affinity-guided knowledge distillation loss
s: a list of feature maps or logits from student branch
t: a list of feature maps or logits from teacher branch
'''
kd_losses = 0
for i, s_item in enumerate(s):
for j, t_item in enumerate(t):
for k in range(s_item.shape[1]):
if s_item.shape != t_item.shape:
t_item = F.interpolate(t_item, size=s_item.shape[-3:], mode='trilinear', align_corners=False)
_loss = torch.mean(torch.abs(s_item - t_item).pow(2), (2,3,4))
score = torch.sum(s_item * t_item, (2,3,4)) / (torch.norm(torch.flatten(s_item,2), dim=(2)) * torch.norm(torch.flatten(t_item,2), dim=(2)) + 1e-40)
score = (score + 1)/2
_score = score / torch.unsqueeze(torch.sum(score,1),1)
kd_loss = _score * _loss
kd_losses += kd_loss.mean()
return torch.mean(kd_losses)
def l2_loss(self, input, target, channel_wise=False, T=1): # input/target: [b,dim=8,...]
if channel_wise == True:
bs,dim,d,w,h = input.shape
input = torch.flatten(input,2) # torch.reshape(input,(bs,dim,-1))
target = torch.flatten(target,2) # torch.reshape(target,(bs,dim,-1))
loss = F.kl_div(F.log_softmax(input/T, dim=2), F.softmax(target/T, dim=2), reduction='mean') * (T**2)
return loss
else:
return torch.mean(torch.abs(input - target).pow(2))
def distill_kl(self, y_s, y_t, T=1):
'''
vanilla distillation loss with KL divergence
'''
if y_s.shape[1] == 1:
y_s = torch.cat([y_s,torch.zeros_like(y_s)],1)
y_t = torch.cat([y_t,torch.zeros_like(y_t)],1)
p_s = F.log_softmax(y_s/T+1e-40, dim=1)
p_t = F.softmax(y_t/T, dim=1)
loss = F.kl_div((p_s), p_t, reduction='mean') * (T**2)
return loss
def compute_kl(self, mu, var):
'''
KL divergence loss
'''
if var < 1e-40:
log_var = torch.log(var+1e-40)
else:
log_var = torch.log(var)
kl_loss = - 0.5 * torch.mean(1. + log_var - mu**2 - var, axis=-1) # mu**2 / torch.square(mu)
return kl_loss
def contrastive_module(self, data, t=0.07): # T=0.07 data=[b,4,128,8]
contrast_loss = 0.0
i1, i2 = 0, 0
bs,c,piece_num,_len = data.shape # piece_num=128
for i in range(c):
pos_vec = data[:,i,...] # [b,128,8]
for j in range(4,piece_num//2-1,16):
i1 += 1
data_list = []
data_list.append(pos_vec[:,j,:])
data_list.append(pos_vec[:,j+piece_num//4,:])
neg_list = [data[:,k,...][:,j-4:j+5,:] for k in range(c) if k != i]
data_list.extend(neg_list[idx][:,m,:] for idx in range(c-1) for m in range(neg_list[0].shape[1]))
contrast_loss += self.contrastive_loss(data_list, t)
return contrast_loss/(i1+i2)
def contrastive_loss(self, data_list, t=0.07): # data_list=[pos1,pos2,neg...]
'''
Compute softmax-based contrastive loss with temperature t (like Info-NCE)
'''
pos_score = self.score_t(data_list[0], data_list[1], t)
all_score = 0.0
for i in range(1,len(data_list)):
all_score += self.score_t(data_list[0], data_list[i], t)
contrast = - torch.log(pos_score/all_score+1e-5).mean()
return contrast
def score_t(self, x, y, t=0.07): # x=[b,8]
'''
Compute the similarity score between x and y with temperature t
'''
if torch.norm(x,dim=1).mean().item() <=0.001 or torch.norm(y,dim=1).mean().item() <=0.001:
print (torch.norm(x,dim=1).mean().item(),torch.norm(y,dim=1).mean().item())
return torch.exp((x*y).sum(1)/(t*(torch.norm(x,dim=1)*torch.norm(y,dim=1))+1e-5))
def freq_filter_loss(self, data, b_low=0.1, b_high=0.9): # data: style vectors. # data: [b,4,128,8 or 16]
'''
Compute DFT in frequency domain
'''
bs,num_modal,num_slice,dim = data.shape
loss = [0]*num_modal
(b,a) = signal.butter(2, [b_low, b_high], 'bandpass')
for modal_idx in range(num_modal):
slice_bank = [0]*num_slice
for i in range(num_slice):
_freq = 0
for j in range(bs):
_freq += signal.filtfilt(b, a, data[j,modal_idx,i,:].cpu().detach().numpy())
slice_bank[i] = _freq
loss[modal_idx] = [(x-np.mean(slice_bank,0))**2 for x in slice_bank]
loss = np.sum(loss)
return loss
def freq_contrast_loss(self, data):
'''
Compute contrastive loss in frequency domain
'''
freq_vec = fft(data.cpu().detach().numpy())
freq_vec = np.abs(freq_vec)
loss = self.contrastive_module(torch.from_numpy(freq_vec).cuda())
return loss
| [
"numpy.sum",
"numpy.abs",
"disentangle_modules.SELayer",
"torch.cat",
"numpy.mean",
"torch.nn.MaxPool3d",
"torch.flatten",
"torch.nn.Conv3d",
"torch.squeeze",
"torch.Tensor",
"disentangle_modules.Decoder",
"torch.nn.functional.log_softmax",
"torch.log",
"scipy.signal.butter",
"torch.mean... | [((2118, 2307), 'disentangle_modules.Decoder', 'Decoder', ([], {'inChans': '_channels', 'outChans_list': '_outChans_list', 'concatChan_list': '_outChans_list', 'num_class_list': 'num_class_list', 'norm': 'None', 'use_distill': '(True)', 'use_kd': 'args.use_kd', 'args': 'args'}), '(inChans=_channels, outChans_list=_outChans_list, concatChan_list=\n _outChans_list, num_class_list=num_class_list, norm=None, use_distill=\n True, use_kd=args.use_kd, args=args)\n', (2125, 2307), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((2457, 2651), 'disentangle_modules.Decoder', 'Decoder', ([], {'inChans': '_channels', 'outChans_list': '_outChans_list_small', 'concatChan_list': '_outChans_list', 'num_class_list': 'num_class_list', 'norm': 'None', 'use_distill': '(True)', 'use_kd': 'args.use_kd', 'args': 'args'}), '(inChans=_channels, outChans_list=_outChans_list_small,\n concatChan_list=_outChans_list, num_class_list=num_class_list, norm=\n None, use_distill=True, use_kd=args.use_kd, args=args)\n', (2464, 2651), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((2676, 2749), 'disentangle_modules.MLP', 'MLP', (['inChans_list[0]', 'num_class_list[0]', '(16)', '(3)'], {'norm': '"""none"""', 'activ': '"""none"""'}), "(inChans_list[0], num_class_list[0], 16, 3, norm='none', activ='none')\n", (2679, 2749), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((3279, 3346), 'torch.nn.Conv3d', 'nn.Conv3d', (['_channels', '_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(_channels, _channels, kernel_size=1, stride=1, padding=0)\n', (3288, 3346), True, 'import torch.nn as nn\n'), ((3384, 3479), 'torch.nn.Conv3d', 'nn.Conv3d', (['(_channels * num_class_list[0])', '(_channels * 2)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(_channels * num_class_list[0], _channels * 2, kernel_size=1,\n stride=1, padding=0)\n', (3393, 3479), True, 'import torch.nn as nn\n'), ((3497, 3519), 'disentangle_modules.SELayer', 'SELayer', (['(_channels * 2)'], {}), '(_channels * 2)\n', (3504, 3519), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((3574, 3645), 'torch.nn.Conv3d', 'nn.Conv3d', (['(_channels * 2)', '_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(_channels * 2, _channels, kernel_size=1, stride=1, padding=0)\n', (3583, 3645), True, 'import torch.nn as nn\n'), ((3710, 3787), 'torch.nn.Conv3d', 'nn.Conv3d', (['(args.fea_dim * 4)', 'args.fea_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(args.fea_dim * 4, args.fea_dim, kernel_size=3, stride=1, padding=1)\n', (3719, 3787), True, 'import torch.nn as nn\n'), ((5438, 5449), 'time.time', 'time.time', ([], {}), '()\n', (5447, 5449), False, 'import time\n'), ((20602, 20626), 'torch.squeeze', 'torch.squeeze', (['tmp_s', '(-1)'], {}), '(tmp_s, -1)\n', (20615, 20626), False, 'import torch\n'), ((20642, 20666), 'torch.squeeze', 'torch.squeeze', (['tmp_s', '(-1)'], {}), '(tmp_s, -1)\n', (20655, 20666), False, 'import torch\n'), ((27172, 27193), 'torch.mean', 'torch.mean', (['kd_losses'], {}), '(kd_losses)\n', (27182, 27193), False, 'import torch\n'), ((28279, 28300), 'torch.mean', 'torch.mean', (['kd_losses'], {}), '(kd_losses)\n', (28289, 28300), False, 'import torch\n'), ((29129, 29166), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(y_s / T + 1e-40)'], {'dim': '(1)'}), '(y_s / T + 1e-40, dim=1)\n', (29142, 29166), True, 'import torch.nn.functional as F\n'), ((29177, 29202), 'torch.nn.functional.softmax', 'F.softmax', (['(y_t / T)'], {'dim': '(1)'}), '(y_t / T, dim=1)\n', (29186, 29202), True, 'import torch.nn.functional as F\n'), ((31644, 31689), 'scipy.signal.butter', 'signal.butter', (['(2)', '[b_low, b_high]', '"""bandpass"""'], {}), "(2, [b_low, b_high], 'bandpass')\n", (31657, 31689), False, 'from scipy import signal\n'), ((32113, 32125), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (32119, 32125), True, 'import numpy as np\n'), ((32345, 32361), 'numpy.abs', 'np.abs', (['freq_vec'], {}), '(freq_vec)\n', (32351, 32361), True, 'import numpy as np\n'), ((1788, 1903), 'disentangle_modules.Target_transform', 'Target_transform', ([], {'n_res': '(4)', 'dim': '_channels', 'output_dim': '_channels', 'res_norm': '"""adain"""', 'activ': '"""LeakyReLU"""', 'args': 'args'}), "(n_res=4, dim=_channels, output_dim=_channels, res_norm=\n 'adain', activ='LeakyReLU', args=args)\n", (1804, 1903), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((1969, 2081), 'disentangle_modules.Target_transform', 'Target_transform', ([], {'n_res': '(4)', 'dim': '_channels', 'output_dim': '_channels', 'res_norm': '"""in"""', 'activ': '"""LeakyReLU"""', 'args': 'args'}), "(n_res=4, dim=_channels, output_dim=_channels, res_norm=\n 'in', activ='LeakyReLU', args=args)\n", (1985, 2081), False, 'from disentangle_modules import AdaINGen, Decoder, MLP, LinearBlock, Target_transform, SELayer\n'), ((10988, 11059), 'torch.cat', 'torch.cat', (['[target_fea0, target_fea1, target_fea2, target_fea4]'], {'axis': '(1)'}), '([target_fea0, target_fea1, target_fea2, target_fea4], axis=1)\n', (10997, 11059), False, 'import torch\n'), ((11728, 11781), 'torch.cat', 'torch.cat', (['[target_fea0, target_fea1, target_fea2]', '(0)'], {}), '([target_fea0, target_fea1, target_fea2], 0)\n', (11737, 11781), False, 'import torch\n'), ((12957, 13027), 'torch.cat', 'torch.cat', (['[binary_seg_out0, binary_seg_out1, binary_seg_out2]'], {'axis': '(1)'}), '([binary_seg_out0, binary_seg_out1, binary_seg_out2], axis=1)\n', (12966, 13027), False, 'import torch\n'), ((22993, 23047), 'torch.cat', 'torch.cat', (['[c_fusion, c_fusion, c_fusion, c_fusion]', '(0)'], {}), '([c_fusion, c_fusion, c_fusion, c_fusion], 0)\n', (23002, 23047), False, 'import torch\n'), ((23087, 23130), 'torch.cat', 'torch.cat', (['[s_flair, s_t1, s_t1ce, s_t2]', '(0)'], {}), '([s_flair, s_t1, s_t1ce, s_t2], 0)\n', (23096, 23130), False, 'import torch\n'), ((25169, 25217), 'torch.cat', 'torch.cat', (['[s_flair, s_t1, s_t1ce, s_t2]'], {'axis': '(2)'}), '([s_flair, s_t1, s_t1ce, s_t2], axis=2)\n', (25178, 25217), False, 'import torch\n'), ((26455, 26480), 'torch.abs', 'torch.abs', (['(input - target)'], {}), '(input - target)\n', (26464, 26480), False, 'import torch\n'), ((26732, 26764), 'torch.unsqueeze', 'torch.unsqueeze', (['s[:, i, ...]', '(1)'], {}), '(s[:, i, ...], 1)\n', (26747, 26764), False, 'import torch\n'), ((28489, 28512), 'torch.flatten', 'torch.flatten', (['input', '(2)'], {}), '(input, 2)\n', (28502, 28512), False, 'import torch\n'), ((28568, 28592), 'torch.flatten', 'torch.flatten', (['target', '(2)'], {}), '(target, 2)\n', (28581, 28592), False, 'import torch\n'), ((29223, 29259), 'torch.nn.functional.kl_div', 'F.kl_div', (['p_s', 'p_t'], {'reduction': '"""mean"""'}), "(p_s, p_t, reduction='mean')\n", (29231, 29259), True, 'import torch.nn.functional as F\n'), ((29436, 29458), 'torch.log', 'torch.log', (['(var + 1e-40)'], {}), '(var + 1e-40)\n', (29445, 29458), False, 'import torch\n'), ((29493, 29507), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (29502, 29507), False, 'import torch\n'), ((29543, 29593), 'torch.mean', 'torch.mean', (['(1.0 + log_var - mu ** 2 - var)'], {'axis': '(-1)'}), '(1.0 + log_var - mu ** 2 - var, axis=-1)\n', (29553, 29593), False, 'import torch\n'), ((5526, 5558), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 0, ...]', '(1)'], {}), '(x[:, 0, ...], 1)\n', (5541, 5558), False, 'import torch\n'), ((5556, 5588), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 1, ...]', '(1)'], {}), '(x[:, 1, ...], 1)\n', (5571, 5588), False, 'import torch\n'), ((6974, 7073), 'torch.unsqueeze', 'torch.unsqueeze', (['style_fake_parallel[modality_num * bs * i:modality_num * bs * (i + 1), ...]', '(1)'], {}), '(style_fake_parallel[modality_num * bs * i:modality_num * bs *\n (i + 1), ...], 1)\n', (6989, 7073), False, 'import torch\n'), ((7128, 7184), 'torch.unsqueeze', 'torch.unsqueeze', (['s_piece_bs[bs * i:bs * (i + 1), ...]', '(1)'], {}), '(s_piece_bs[bs * i:bs * (i + 1), ...], 1)\n', (7143, 7184), False, 'import torch\n'), ((9766, 9824), 'torch.cat', 'torch.cat', (['[target_fea0, target_fea1, target_fea2]'], {'axis': '(1)'}), '([target_fea0, target_fea1, target_fea2], axis=1)\n', (9775, 9824), False, 'import torch\n'), ((11830, 11883), 'torch.cat', 'torch.cat', (['[enc_list[i], enc_list[i], enc_list[i]]', '(0)'], {}), '([enc_list[i], enc_list[i], enc_list[i]], 0)\n', (11839, 11883), False, 'import torch\n'), ((11979, 12045), 'torch.cat', 'torch.cat', (['[target_fea0, target_fea1, target_fea2, target_fea4]', '(0)'], {}), '([target_fea0, target_fea1, target_fea2, target_fea4], 0)\n', (11988, 12045), False, 'import torch\n'), ((13460, 13551), 'torch.cat', 'torch.cat', (['[binary_seg_out0, binary_seg_out1, binary_seg_out2, binary_seg_out4]'], {'axis': '(1)'}), '([binary_seg_out0, binary_seg_out1, binary_seg_out2,\n binary_seg_out4], axis=1)\n', (13469, 13551), False, 'import torch\n'), ((18642, 18753), 'torch.cat', 'torch.cat', (['[distill_bin_logit1[0], distill_bin_logit2[0], distill_bin_logit3[0],\n distill_bin_logit4[0]]', '(1)'], {}), '([distill_bin_logit1[0], distill_bin_logit2[0], distill_bin_logit3\n [0], distill_bin_logit4[0]], 1)\n', (18651, 18753), False, 'import torch\n'), ((18780, 18891), 'torch.cat', 'torch.cat', (['[distill_bin_logit1[1], distill_bin_logit2[1], distill_bin_logit3[1],\n distill_bin_logit4[1]]', '(1)'], {}), '([distill_bin_logit1[1], distill_bin_logit2[1], distill_bin_logit3\n [1], distill_bin_logit4[1]], 1)\n', (18789, 18891), False, 'import torch\n'), ((18918, 19029), 'torch.cat', 'torch.cat', (['[distill_bin_logit1[2], distill_bin_logit2[2], distill_bin_logit3[2],\n distill_bin_logit4[2]]', '(1)'], {}), '([distill_bin_logit1[2], distill_bin_logit2[2], distill_bin_logit3\n [2], distill_bin_logit4[2]], 1)\n', (18927, 19029), False, 'import torch\n'), ((23176, 23242), 'torch.cat', 'torch.cat', (['[enc_list[i], enc_list[i], enc_list[i], enc_list[i]]', '(0)'], {}), '([enc_list[i], enc_list[i], enc_list[i], enc_list[i]], 0)\n', (23185, 23242), False, 'import torch\n'), ((23758, 23792), 'torch.cat', 'torch.cat', (['[c_fusion, c_fusion]', '(0)'], {}), '([c_fusion, c_fusion], 0)\n', (23767, 23792), False, 'import torch\n'), ((23834, 23860), 'torch.cat', 'torch.cat', (['[s_m1, s_m2]', '(0)'], {}), '([s_m1, s_m2], 0)\n', (23843, 23860), False, 'import torch\n'), ((25453, 25484), 'torch.cat', 'torch.cat', (['[s_m1, s_m2]'], {'axis': '(2)'}), '([s_m1, s_m2], axis=2)\n', (25462, 25484), False, 'import torch\n'), ((26159, 26178), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (26171, 26178), False, 'import torch\n'), ((26221, 26240), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (26233, 26240), False, 'import torch\n'), ((26848, 26876), 'torch.sum', 'torch.sum', (['(_s * t)', '(2, 3, 4)'], {}), '(_s * t, (2, 3, 4))\n', (26857, 26876), False, 'import torch\n'), ((5678, 5710), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 0, ...]', '(1)'], {}), '(x[:, 0, ...], 1)\n', (5693, 5710), False, 'import torch\n'), ((5708, 5740), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 1, ...]', '(1)'], {}), '(x[:, 1, ...], 1)\n', (5723, 5740), False, 'import torch\n'), ((5738, 5770), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 2, ...]', '(1)'], {}), '(x[:, 2, ...], 1)\n', (5753, 5770), False, 'import torch\n'), ((5768, 5800), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 3, ...]', '(1)'], {}), '(x[:, 3, ...], 1)\n', (5783, 5800), False, 'import torch\n'), ((7305, 7398), 'torch.unsqueeze', 'torch.unsqueeze', (['style_fea_map[modality_num * bs * i:modality_num * bs * (i + 1), ...]', '(1)'], {}), '(style_fea_map[modality_num * bs * i:modality_num * bs * (i +\n 1), ...], 1)\n', (7320, 7398), False, 'import torch\n'), ((7463, 7523), 'torch.unsqueeze', 'torch.unsqueeze', (['s_fea_piece_bs[bs * i:bs * (i + 1), ...]', '(1)'], {}), '(s_fea_piece_bs[bs * i:bs * (i + 1), ...], 1)\n', (7478, 7523), False, 'import torch\n'), ((10670, 10741), 'torch.cat', 'torch.cat', (['[target_fea0, target_fea1, target_fea2, target_fea4]'], {'axis': '(1)'}), '([target_fea0, target_fea1, target_fea2, target_fea4], axis=1)\n', (10679, 10741), False, 'import torch\n'), ((12093, 12159), 'torch.cat', 'torch.cat', (['[enc_list[i], enc_list[i], enc_list[i], enc_list[i]]', '(0)'], {}), '([enc_list[i], enc_list[i], enc_list[i], enc_list[i]], 0)\n', (12102, 12159), False, 'import torch\n'), ((15638, 15657), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (15650, 15657), False, 'import torch\n'), ((15820, 15839), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (15832, 15839), False, 'import torch\n'), ((20335, 20362), 'torch.unsqueeze', 'torch.unsqueeze', (['s_flair', '(1)'], {}), '(s_flair, 1)\n', (20350, 20362), False, 'import torch\n'), ((20363, 20387), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t1', '(1)'], {}), '(s_t1, 1)\n', (20378, 20387), False, 'import torch\n'), ((20388, 20414), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t1ce', '(1)'], {}), '(s_t1ce, 1)\n', (20403, 20414), False, 'import torch\n'), ((20415, 20439), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t2', '(1)'], {}), '(s_t2, 1)\n', (20430, 20439), False, 'import torch\n'), ((20863, 20882), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (20875, 20882), False, 'import torch\n'), ((21040, 21059), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (21052, 21059), False, 'import torch\n'), ((21229, 21248), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (21241, 21248), False, 'import torch\n'), ((21316, 21347), 'torch.unsqueeze', 'torch.unsqueeze', (['s_flair_fea', '(1)'], {}), '(s_flair_fea, 1)\n', (21331, 21347), False, 'import torch\n'), ((21348, 21376), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t1_fea', '(1)'], {}), '(s_t1_fea, 1)\n', (21363, 21376), False, 'import torch\n'), ((21377, 21407), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t1ce_fea', '(1)'], {}), '(s_t1ce_fea, 1)\n', (21392, 21407), False, 'import torch\n'), ((21408, 21436), 'torch.unsqueeze', 'torch.unsqueeze', (['s_t2_fea', '(1)'], {}), '(s_t2_fea, 1)\n', (21423, 21436), False, 'import torch\n'), ((23908, 23948), 'torch.cat', 'torch.cat', (['[enc_list[i], enc_list[i]]', '(0)'], {}), '([enc_list[i], enc_list[i]], 0)\n', (23917, 23948), False, 'import torch\n'), ((27049, 27068), 'torch.sum', 'torch.sum', (['score', '(1)'], {}), '(score, 1)\n', (27058, 27068), False, 'import torch\n'), ((28657, 28688), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(input / T)'], {'dim': '(2)'}), '(input / T, dim=2)\n', (28670, 28688), True, 'import torch.nn.functional as F\n'), ((28688, 28716), 'torch.nn.functional.softmax', 'F.softmax', (['(target / T)'], {'dim': '(2)'}), '(target / T, dim=2)\n', (28697, 28716), True, 'import torch.nn.functional as F\n'), ((29029, 29050), 'torch.zeros_like', 'torch.zeros_like', (['y_s'], {}), '(y_s)\n', (29045, 29050), False, 'import torch\n'), ((29088, 29109), 'torch.zeros_like', 'torch.zeros_like', (['y_t'], {}), '(y_t)\n', (29104, 29109), False, 'import torch\n'), ((30861, 30901), 'torch.log', 'torch.log', (['(pos_score / all_score + 1e-05)'], {}), '(pos_score / all_score + 1e-05)\n', (30870, 30901), False, 'import torch\n'), ((32410, 32436), 'torch.from_numpy', 'torch.from_numpy', (['freq_vec'], {}), '(freq_vec)\n', (32426, 32436), False, 'import torch\n'), ((5915, 5956), 'torch.unsqueeze', 'torch.unsqueeze', (['complete_x[:, 0, ...]', '(1)'], {}), '(complete_x[:, 0, ...], 1)\n', (5930, 5956), False, 'import torch\n'), ((5954, 5995), 'torch.unsqueeze', 'torch.unsqueeze', (['complete_x[:, 1, ...]', '(1)'], {}), '(complete_x[:, 1, ...], 1)\n', (5969, 5995), False, 'import torch\n'), ((5993, 6034), 'torch.unsqueeze', 'torch.unsqueeze', (['complete_x[:, 2, ...]', '(1)'], {}), '(complete_x[:, 2, ...], 1)\n', (6008, 6034), False, 'import torch\n'), ((6032, 6073), 'torch.unsqueeze', 'torch.unsqueeze', (['complete_x[:, 3, ...]', '(1)'], {}), '(complete_x[:, 3, ...], 1)\n', (6047, 6073), False, 'import torch\n'), ((14965, 15067), 'torch.cat', 'torch.cat', (['[distill_bin_fea1[i], distill_bin_fea2[i], distill_bin_fea3[i],\n distill_bin_fea4[i]]', '(1)'], {}), '([distill_bin_fea1[i], distill_bin_fea2[i], distill_bin_fea3[i],\n distill_bin_fea4[i]], 1)\n', (14974, 15067), False, 'import torch\n'), ((17279, 17349), 'torch.cat', 'torch.cat', (['[logits[0][0], logits[1][0], logits[2][0], logits[3][0]]', '(1)'], {}), '([logits[0][0], logits[1][0], logits[2][0], logits[3][0]], 1)\n', (17288, 17349), False, 'import torch\n'), ((19796, 19815), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (19808, 19815), False, 'import torch\n'), ((20097, 20116), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (20109, 20116), False, 'import torch\n'), ((20124, 20143), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (20136, 20143), False, 'import torch\n'), ((20533, 20557), 'torch.unsqueeze', 'torch.unsqueeze', (['s_m1', '(1)'], {}), '(s_m1, 1)\n', (20548, 20557), False, 'import torch\n'), ((20558, 20582), 'torch.unsqueeze', 'torch.unsqueeze', (['s_m2', '(1)'], {}), '(s_m2, 1)\n', (20573, 20582), False, 'import torch\n'), ((26793, 26810), 'torch.abs', 'torch.abs', (['(_s - t)'], {}), '(_s - t)\n', (26802, 26810), False, 'import torch\n'), ((27684, 27772), 'torch.nn.functional.interpolate', 'F.interpolate', (['t_item'], {'size': 's_item.shape[-3:]', 'mode': '"""trilinear"""', 'align_corners': '(False)'}), "(t_item, size=s_item.shape[-3:], mode='trilinear',\n align_corners=False)\n", (27697, 27772), True, 'import torch.nn.functional as F\n'), ((27905, 27942), 'torch.sum', 'torch.sum', (['(s_item * t_item)', '(2, 3, 4)'], {}), '(s_item * t_item, (2, 3, 4))\n', (27914, 27942), False, 'import torch\n'), ((28812, 28837), 'torch.abs', 'torch.abs', (['(input - target)'], {}), '(input - target)\n', (28821, 28837), False, 'import torch\n'), ((32051, 32073), 'numpy.mean', 'np.mean', (['slice_bank', '(0)'], {}), '(slice_bank, 0)\n', (32058, 32073), True, 'import numpy as np\n'), ((17761, 17786), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (17773, 17786), True, 'import torch.nn as nn\n'), ((17915, 17940), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(4)'], {'stride': '(4)'}), '(4, stride=4)\n', (17927, 17940), True, 'import torch.nn as nn\n'), ((18069, 18094), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (18081, 18094), True, 'import torch.nn as nn\n'), ((18233, 18258), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(4)'], {'stride': '(4)'}), '(4, stride=4)\n', (18245, 18258), True, 'import torch.nn as nn\n'), ((18395, 18420), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (18407, 18420), True, 'import torch.nn as nn\n'), ((18540, 18565), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(4)'], {'stride': '(4)'}), '(4, stride=4)\n', (18552, 18565), True, 'import torch.nn as nn\n'), ((19105, 19130), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (19117, 19130), True, 'import torch.nn as nn\n'), ((19240, 19265), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(4)'], {'stride': '(4)'}), '(4, stride=4)\n', (19252, 19265), True, 'import torch.nn as nn\n'), ((26889, 26909), 'torch.flatten', 'torch.flatten', (['_s', '(2)'], {}), '(_s, 2)\n', (26902, 26909), False, 'import torch\n'), ((26932, 26951), 'torch.flatten', 'torch.flatten', (['t', '(2)'], {}), '(t, 2)\n', (26945, 26951), False, 'import torch\n'), ((28140, 28159), 'torch.sum', 'torch.sum', (['score', '(1)'], {}), '(score, 1)\n', (28149, 28159), False, 'import torch\n'), ((31321, 31341), 'torch.norm', 'torch.norm', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (31331, 31341), False, 'import torch\n'), ((31341, 31361), 'torch.norm', 'torch.norm', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (31351, 31361), False, 'import torch\n'), ((15274, 15320), 'torch.unsqueeze', 'torch.unsqueeze', (['distill_fea_all[:, i, ...]', '(1)'], {}), '(distill_fea_all[:, i, ...], 1)\n', (15289, 15320), False, 'import torch\n'), ((15969, 16023), 'torch.unsqueeze', 'torch.unsqueeze', (['distill_main_logit[idx][:, i, ...]', '(1)'], {}), '(distill_main_logit[idx][:, i, ...], 1)\n', (15984, 16023), False, 'import torch\n'), ((27833, 27859), 'torch.abs', 'torch.abs', (['(s_item - t_item)'], {}), '(s_item - t_item)\n', (27842, 27859), False, 'import torch\n'), ((31094, 31114), 'torch.norm', 'torch.norm', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (31104, 31114), False, 'import torch\n'), ((31139, 31159), 'torch.norm', 'torch.norm', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (31149, 31159), False, 'import torch\n'), ((31201, 31221), 'torch.norm', 'torch.norm', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (31211, 31221), False, 'import torch\n'), ((31235, 31255), 'torch.norm', 'torch.norm', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (31245, 31255), False, 'import torch\n'), ((15469, 15515), 'torch.unsqueeze', 'torch.unsqueeze', (['distill_fea_all[:, i, ...]', '(1)'], {}), '(distill_fea_all[:, i, ...], 1)\n', (15484, 15515), False, 'import torch\n'), ((27955, 27979), 'torch.flatten', 'torch.flatten', (['s_item', '(2)'], {}), '(s_item, 2)\n', (27968, 27979), False, 'import torch\n'), ((28002, 28026), 'torch.flatten', 'torch.flatten', (['t_item', '(2)'], {}), '(t_item, 2)\n', (28015, 28026), False, 'import torch\n')] |
import numpy
from silx.math.fit import leastsq
x = numpy.arange(10).astype(numpy.float64)
y = 0.001 * x**3 + 25.1 * x**2 + 1.2 * x - 25
def poly2(x, a, b, c):
return a * x**2 + b*x + c
p0 = [1., 1., 1.]
p, cov_matrix = leastsq(poly2, x, y, p0)
print("Parameters [a, b, c]: " + str(p))
# 4.
p, cov_matrix, info = leastsq(poly2, x, y, p0, full_output=True)
print(info["reduced_chisq"])
print(info["niter"])
| [
"silx.math.fit.leastsq",
"numpy.arange"
] | [((228, 252), 'silx.math.fit.leastsq', 'leastsq', (['poly2', 'x', 'y', 'p0'], {}), '(poly2, x, y, p0)\n', (235, 252), False, 'from silx.math.fit import leastsq\n'), ((323, 365), 'silx.math.fit.leastsq', 'leastsq', (['poly2', 'x', 'y', 'p0'], {'full_output': '(True)'}), '(poly2, x, y, p0, full_output=True)\n', (330, 365), False, 'from silx.math.fit import leastsq\n'), ((52, 68), 'numpy.arange', 'numpy.arange', (['(10)'], {}), '(10)\n', (64, 68), False, 'import numpy\n')] |
import psutil
from numpy import mean, round
from .logger import Logger
__all__ = ("MemoryLogger",)
class MemoryLogger(Logger):
def __init__(self, unit="gb"):
if unit == "b":
self.factor = 1
elif unit == "kb":
self.factor = 1024
elif unit == "mb":
self.factor = 1024 ** 2
elif unit == "gb":
self.factor = 1024 ** 3
else:
raise ValueError(
f"`{unit}` is an invalid unit, valid units are"
+ "`[b, kb, mb, gb]`."
)
Logger.__init__(self)
def on_task_end(self):
self.log["min"] = min(self.all)
self.log["max"] = max(self.all)
self.log["mean"] = mean(self.all)
def on_task_update(self, stepname=None):
memory_usage = round(psutil.virtual_memory().used / self.factor, 4)
if f"memory-{stepname}" in self.log:
self.log[f"memory-{stepname}"].append(memory_usage)
else:
self.log[f"memory-{stepname}"] = [memory_usage]
@property
def all(self):
_all = []
for k, v in self.log.items():
if k[:6] == "memory":
_all.extend(v)
return _all
if __name__ == "__main__":
pass
| [
"psutil.virtual_memory",
"numpy.mean"
] | [((728, 742), 'numpy.mean', 'mean', (['self.all'], {}), '(self.all)\n', (732, 742), False, 'from numpy import mean, round\n'), ((818, 841), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (839, 841), False, 'import psutil\n')] |
# -*- coding: utf-8 -*-
"""
ese3
"""
import numpy as np
import matplotlib.pyplot as plt
from funzioni_Approssimazione_MQ import metodoQR
x = np.arange(10.0,10.6,0.1)
y = np.array([11.0320, 11.1263, 11.1339, 11.1339, 11.1993, 11.1844])
a1 = metodoQR(x,y,4)
valori = np.linspace(np.min(x), np.max(x), 100)
p1 = np.polyval(a1, valori)
residuo = np.linalg.norm(y - np.polyval(a1, x))**2
print("Residuo: {:e}".format(residuo))
plt.plot(valori, p1, "-r", x, y, "ob")
plt.show()
# Perturbo
x[1] += 0.013
y[1] -= 0.001
a2 = metodoQR(x,y,4)
valori = np.linspace(np.min(x), np.max(x), 100)
p2 = np.polyval(a2, valori)
plt.plot(valori, p2, "-r", x, y, "ob")
residuo = np.linalg.norm(y - np.polyval(a2, x))**2
print("Residuo dati perturbati: {:e}".format(residuo))
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyval",
"funzioni_Approssimazione_MQ.metodoQR",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.arange"
] | [((150, 176), 'numpy.arange', 'np.arange', (['(10.0)', '(10.6)', '(0.1)'], {}), '(10.0, 10.6, 0.1)\n', (159, 176), True, 'import numpy as np\n'), ((180, 243), 'numpy.array', 'np.array', (['[11.032, 11.1263, 11.1339, 11.1339, 11.1993, 11.1844]'], {}), '([11.032, 11.1263, 11.1339, 11.1339, 11.1993, 11.1844])\n', (188, 243), True, 'import numpy as np\n'), ((251, 268), 'funzioni_Approssimazione_MQ.metodoQR', 'metodoQR', (['x', 'y', '(4)'], {}), '(x, y, 4)\n', (259, 268), False, 'from funzioni_Approssimazione_MQ import metodoQR\n'), ((322, 344), 'numpy.polyval', 'np.polyval', (['a1', 'valori'], {}), '(a1, valori)\n', (332, 344), True, 'import numpy as np\n'), ((438, 476), 'matplotlib.pyplot.plot', 'plt.plot', (['valori', 'p1', '"""-r"""', 'x', 'y', '"""ob"""'], {}), "(valori, p1, '-r', x, y, 'ob')\n", (446, 476), True, 'import matplotlib.pyplot as plt\n'), ((480, 490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (488, 490), True, 'import matplotlib.pyplot as plt\n'), ((539, 556), 'funzioni_Approssimazione_MQ.metodoQR', 'metodoQR', (['x', 'y', '(4)'], {}), '(x, y, 4)\n', (547, 556), False, 'from funzioni_Approssimazione_MQ import metodoQR\n'), ((610, 632), 'numpy.polyval', 'np.polyval', (['a2', 'valori'], {}), '(a2, valori)\n', (620, 632), True, 'import numpy as np\n'), ((634, 672), 'matplotlib.pyplot.plot', 'plt.plot', (['valori', 'p2', '"""-r"""', 'x', 'y', '"""ob"""'], {}), "(valori, p2, '-r', x, y, 'ob')\n", (642, 672), True, 'import matplotlib.pyplot as plt\n'), ((782, 792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((289, 298), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (295, 298), True, 'import numpy as np\n'), ((300, 309), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (306, 309), True, 'import numpy as np\n'), ((577, 586), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (583, 586), True, 'import numpy as np\n'), ((588, 597), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (594, 597), True, 'import numpy as np\n'), ((375, 392), 'numpy.polyval', 'np.polyval', (['a1', 'x'], {}), '(a1, x)\n', (385, 392), True, 'import numpy as np\n'), ((703, 720), 'numpy.polyval', 'np.polyval', (['a2', 'x'], {}), '(a2, x)\n', (713, 720), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
#imports
import cv2
import numpy as np
import matplotlib.pyplot as plt
from datetime import date
import os
#Intializing instance of class CascadeClassifier
face_detect = cv2.CascadeClassifier(os.getcwd() + "\\haarcascade_frontalface_alt.xml")
path = input("Enter the path of the picture : (enter cwd for current working directory)")
if path == "cwd":
path = os.getcwd()
while not os.path.isdir(path):
print("Invalid Path Entered... ")
path = input("Enter the valid path of the picture : ")
name = input("Enter the name of the picture with the extension : ")
os.chdir(path)
while not os.path.isfile(name):
print("Picture Doesn't Exists!")
name = input("Enter the valid name of the picture with the extension : ")
cwd = os.getcwd()
src = cv2.imread(name)
#OpenCV uses BGR color format
img = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
#Location of haarcascade_frontalface_alt.xml file in my PC
faces = face_detect.detectMultiScale(img, 1.1, 2)
#if there are no faces detected, then line 38 return a tuple instead of an array
if type(faces) == type(np.array([1])):
#To access List functionalities
faces = faces.tolist()
cut = []
if "Stickers" not in os.listdir():
os.mkdir("Stickers")
os.chdir(cwd + "\\Stickers")
for face in faces:
x, y, h, w = face
#Making the filename
today = str(date.today()).replace("-","")
filename = "STK-" + today + "-WA" + "0"*(4 - len(str(faces.index(face)))) + str(faces.index(face)) + ".webp"
cut.append(img[y : y + h, x : x + w, : ])
im = cut[-1].copy()
im2save = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
_ = cv2.imwrite(filename, im2save)
#Back to working Directory
os.chdir(cwd)
| [
"os.listdir",
"os.mkdir",
"os.getcwd",
"cv2.cvtColor",
"os.path.isdir",
"cv2.imwrite",
"datetime.date.today",
"cv2.imread",
"os.path.isfile",
"numpy.array",
"os.chdir"
] | [((618, 632), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (626, 632), False, 'import os\n'), ((792, 803), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (801, 803), False, 'import os\n'), ((811, 827), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (821, 827), False, 'import cv2\n'), ((865, 901), 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'cv2.COLOR_BGR2RGB'], {}), '(src, cv2.COLOR_BGR2RGB)\n', (877, 901), False, 'import cv2\n'), ((1267, 1295), 'os.chdir', 'os.chdir', (["(cwd + '\\\\Stickers')"], {}), "(cwd + '\\\\Stickers')\n", (1275, 1295), False, 'import os\n'), ((1734, 1747), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (1742, 1747), False, 'import os\n'), ((404, 415), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (413, 415), False, 'import os\n'), ((427, 446), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (440, 446), False, 'import os\n'), ((644, 664), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (658, 664), False, 'import os\n'), ((1228, 1240), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1238, 1240), False, 'import os\n'), ((1246, 1266), 'os.mkdir', 'os.mkdir', (['"""Stickers"""'], {}), "('Stickers')\n", (1254, 1266), False, 'import os\n'), ((1626, 1661), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1638, 1661), False, 'import cv2\n'), ((1671, 1701), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'im2save'], {}), '(filename, im2save)\n', (1682, 1701), False, 'import cv2\n'), ((232, 243), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (241, 243), False, 'import os\n'), ((1117, 1130), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1125, 1130), True, 'import numpy as np\n'), ((1389, 1401), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1399, 1401), False, 'from datetime import date\n')] |
from typing import Dict
import os
import numpy as np
from ..core_chart import BaseChart
from ....assets.numba_kernels import calc_groupby
from ....assets import geo_json_mapper
from ....layouts import chart_view
from ....assets.cudf_utils import get_min_max
from ...constants import CUXF_NAN_COLOR
np.seterr(divide="ignore", invalid="ignore")
class BaseChoropleth(BaseChart):
reset_event = None
_datatile_loaded_state: bool = False
geo_mapper: Dict[str, str] = {}
use_data_tiles = True
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@property
def name(self):
# overwrite BaseChart name function to allow unique choropleths on
# value x
if self.chart_type is not None:
return f"{self.x}_{self.aggregate_fn}_{self.chart_type}"
else:
return f"{self.x}_{self.aggregate_fn}_chart"
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
def __init__(
self,
x,
color_column,
elevation_column=None,
color_aggregate_fn="count",
color_factor=1,
elevation_aggregate_fn="sum",
elevation_factor=1,
add_interaction=True,
width=800,
height=400,
geoJSONSource=None,
geoJSONProperty=None,
geo_color_palette=None,
mapbox_api_key=os.getenv("MAPBOX_API_KEY"),
map_style=None,
tooltip=True,
tooltip_include_cols=[],
nan_color=CUXF_NAN_COLOR,
title=None,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
color_column,
elevation_column,
color_aggregate_fn,
color_factor,
elevation_aggregate_fn,
elevation_factor,
geoJSONSource
geoJSONProperty
add_interaction
geo_color_palette
width
height
nan_color
mapbox_api_key
map_style
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.color_column = color_column
self.color_aggregate_fn = color_aggregate_fn
self.color_factor = color_factor
self.elevation_column = elevation_column
self.aggregate_dict = {
self.color_column: self.color_aggregate_fn,
}
if self.elevation_column is not None:
self.elevation_aggregate_fn = elevation_aggregate_fn
self.elevation_factor = elevation_factor
self.aggregate_dict[
self.elevation_column
] = self.elevation_aggregate_fn
self.add_interaction = add_interaction
if geoJSONSource is None:
print("geoJSONSource is required for the choropleth map")
else:
self.geoJSONSource = geoJSONSource
self.geo_color_palette = geo_color_palette
self.geoJSONProperty = geoJSONProperty
_, x_range, y_range = geo_json_mapper(
self.geoJSONSource, self.geoJSONProperty, projection=4326
)
self.height = height
self.width = width
self.stride = 1
self.mapbox_api_key = mapbox_api_key
self.map_style = map_style
self.library_specific_params = library_specific_params
self.tooltip = tooltip
self.tooltip_include_cols = tooltip_include_cols
self.nan_color = nan_color
self.title = title or f"{self.x}"
if "x_range" not in self.library_specific_params:
self.library_specific_params["x_range"] = x_range
if "y_range" not in self.library_specific_params:
self.library_specific_params["y_range"] = y_range
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
self.geo_mapper, x_range, y_range = geo_json_mapper(
self.geoJSONSource,
self.geoJSONProperty,
4326,
self.x,
dashboard_cls._cuxfilter_df.data[self.x].dtype,
)
self.calculate_source(dashboard_cls._cuxfilter_df.data)
self.generate_chart()
self.apply_mappers()
self.add_events(dashboard_cls)
def view(self):
return chart_view(
self.chart.view(), width=self.width, title=self.title
)
def _compute_array_all_bins(
self, source_x, source_y, update_data_x, update_data_y
):
"""
source_x: current_source_x, np.array()
source_y: current_source_y, np.array()
update_data_x: updated_data_x, np.array()
update_data_y: updated_data_x, np.array()
"""
result_array = np.zeros(shape=source_x.shape)
indices = [np.where(x_ == source_x)[0][0] for x_ in update_data_x]
np.put(result_array, indices, update_data_y)
return result_array
def calculate_source(self, data, patch_update=False):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
df = calc_groupby(self, data, agg=self.aggregate_dict)
dict_temp = {
self.x: df[0],
self.color_column: df[1],
}
if self.elevation_column is not None:
dict_temp[self.elevation_column] = df[2]
if patch_update and len(dict_temp[self.x]) < len(
self.source.data[self.x]
):
# if not all X axis bins are provided, filling bins not updated
# with zeros
color_axis_data = self._compute_array_all_bins(
self.source.data[self.x],
self.source.data[self.color_column],
dict_temp[self.x],
dict_temp[self.color_column],
)
dict_temp[self.color_column] = color_axis_data
if self.elevation_column is not None:
elevation_axis_data = self._compute_array_all_bins(
self.source.data[self.x],
self.source.data[self.elevation_column],
dict_temp[self.x],
dict_temp[self.elevation_column],
)
dict_temp[self.elevation_column] = elevation_axis_data
dict_temp[self.x] = self.source.data[self.x]
self.format_source_data(dict_temp, patch_update)
def get_selection_callback(self, dashboard_cls):
"""
Description: generate callback for choropleth selection event
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def selection_callback(old, new):
if dashboard_cls._active_view != self:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles(cumsum=False)
dashboard_cls._query_datatiles_by_indices(old, new)
return selection_callback
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
list_of_indices = self.get_selected_indices()
if len(list_of_indices) == 0 or list_of_indices == [""]:
query_str_dict.pop(self.name, None)
elif len(list_of_indices) == 1:
query_str_dict[self.name] = f"{self.x}=={list_of_indices[0]}"
else:
indices_string = ",".join(map(str, list_of_indices))
query_str_dict[self.name] = f"{self.x} in ({indices_string})"
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.add_selection_event(
self.get_selection_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(event):
if dashboard_cls._active_view != self:
# reset previous active view and set current chart as
# active view
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._reload_charts()
# add callback to reset chart button
self.add_event(self.reset_event, reset_callback)
def get_selected_indices(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
print("function to be overridden by library specific extensions")
return []
def add_selection_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
print("function to be overridden by library specific extensions")
def query_chart_by_range(self, active_chart, query_tuple, datatile):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. query_tuple: (min_val, max_val) of the query [type: tuple]
3. datatile: dict, datatile of active chart for
current chart[type: pandas df]
-------------------------------------------
Ouput:
"""
if type(datatile) != dict:
# choropleth datatile should be a dictionary
datatile = {self.color_column: datatile}
for key in datatile:
datatile_result = None
min_val, max_val = query_tuple
datatile_index_min = int(
round((min_val - active_chart.min_value) / active_chart.stride)
)
datatile_index_max = int(
round((max_val - active_chart.min_value) / active_chart.stride)
)
datatile_indices = (
(self.source.data[self.x] - self.min_value) / self.stride
).astype(int)
if key == self.color_column:
temp_agg_function = self.color_aggregate_fn
elif self.elevation_column is not None:
temp_agg_function = self.elevation_aggregate_fn
if datatile_index_min == 0:
if temp_agg_function == "mean":
datatile_result_sum = np.array(
datatile[key][0].loc[
datatile_indices, datatile_index_max
]
)
datatile_result_count = np.array(
datatile[key][1].loc[
datatile_indices, datatile_index_max
]
)
datatile_result = (
datatile_result_sum / datatile_result_count
)
elif temp_agg_function in ["count", "sum"]:
datatile_result = datatile[key].loc[
datatile_indices, datatile_index_max
]
elif temp_agg_function in ["min", "max"]:
datatile_result = np.array(
getattr(
datatile[key].loc[datatile_indices, 1:],
temp_agg_function,
)(axis=1, skipna=True)
)
else:
datatile_index_min -= 1
if temp_agg_function == "mean":
datatile_max0 = datatile[key][0].loc[
datatile_indices, datatile_index_max
]
datatile_min0 = datatile[key][0].loc[
datatile_indices, datatile_index_min
]
datatile_result_sum = np.array(
datatile_max0 - datatile_min0
)
datatile_max1 = datatile[key][1].loc[
datatile_indices, datatile_index_max
]
datatile_min1 = datatile[key][1].loc[
datatile_indices, datatile_index_min
]
datatile_result_count = np.array(
datatile_max1 - datatile_min1
)
datatile_result = (
datatile_result_sum / datatile_result_count
)
elif temp_agg_function in ["count", "sum"]:
datatile_max = datatile[key].loc[
datatile_indices, datatile_index_max
]
datatile_min = datatile[key].loc[
datatile_indices, datatile_index_min
]
datatile_result = np.array(datatile_max - datatile_min)
elif temp_agg_function in ["min", "max"]:
datatile_result = np.array(
getattr(
datatile[key].loc[
datatile_indices,
datatile_index_min:datatile_index_max,
],
temp_agg_function,
)(axis=1, skipna=True)
)
if datatile_result is not None:
if isinstance(datatile_result, np.ndarray):
self.reset_chart(datatile_result, key)
else:
self.reset_chart(np.array(datatile_result), key)
def query_chart_by_indices_for_mean(
self,
active_chart,
old_indices,
new_indices,
datatile,
calc_new,
remove_old,
):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
datatile_indices = (
(self.source.data[self.x] - self.min_value) / self.stride
).astype(int)
if len(new_indices) == 0 or new_indices == [""]:
datatile_sum_0 = np.array(
datatile[0].loc[datatile_indices].sum(axis=1, skipna=True)
)
datatile_sum_1 = np.array(
datatile[1].loc[datatile_indices].sum(axis=1, skipna=True)
)
datatile_result = datatile_sum_0 / datatile_sum_1
return datatile_result
len_y_axis = datatile[0][0].loc[datatile_indices].shape[0]
datatile_result = np.zeros(shape=(len_y_axis,), dtype=np.float64)
value_sum = np.zeros(shape=(len_y_axis,), dtype=np.float64)
value_count = np.zeros(shape=(len_y_axis,), dtype=np.float64)
for index in new_indices:
index = int(
round((index - active_chart.min_value) / active_chart.stride)
)
value_sum += np.array(
datatile[0][int(index)].loc[datatile_indices]
)
value_count += np.array(
datatile[1][int(index)].loc[datatile_indices]
)
datatile_result = value_sum / value_count
return datatile_result
def query_chart_by_indices_for_count(
self,
active_chart,
old_indices,
new_indices,
datatile,
calc_new,
remove_old,
key,
):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
datatile_indices = (
(self.source.data[self.x] - self.min_value) / self.stride
).astype(int)
if len(new_indices) == 0 or new_indices == [""]:
datatile_result = np.array(
datatile.loc[datatile_indices, :].sum(axis=1, skipna=True)
)
return datatile_result
if len(old_indices) == 0 or old_indices == [""]:
len_y_axis = datatile[0].loc[datatile_indices].shape[0]
datatile_result = np.zeros(shape=(len_y_axis,), dtype=np.float64)
else:
len_y_axis = datatile[0].loc[datatile_indices].shape[0]
datatile_result = np.array(
self.source.data[key], dtype=np.float64
)[:len_y_axis]
for index in calc_new:
index = int(
round((index - active_chart.min_value) / active_chart.stride)
)
datatile_result += np.array(
datatile.loc[datatile_indices, int(index)]
)
for index in remove_old:
index = int(
round((index - active_chart.min_value) / active_chart.stride)
)
datatile_result -= np.array(
datatile.loc[datatile_indices, int(index)]
)
return datatile_result
def query_chart_by_indices_for_minmax(
self,
active_chart,
old_indices,
new_indices,
datatile,
temp_agg_function,
):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
datatile_indices = (
(self.source.data[self.x] - self.min_value) / self.stride
).astype(int)
if len(new_indices) == 0 or new_indices == [""]:
# get min or max from datatile df, skipping column 0(always 0)
datatile_result = np.array(
getattr(datatile.loc[datatile_indices, 1:], temp_agg_function)(
axis=1, skipna=True
)
)
else:
new_indices = np.array(new_indices)
new_indices = np.round(
(new_indices - active_chart.min_value) / active_chart.stride
).astype(int)
datatile_result = np.array(
getattr(
datatile.loc[datatile_indices, list(new_indices)],
temp_agg_function,
)(axis=1, skipna=True)
)
return datatile_result
def query_chart_by_indices(
self, active_chart, old_indices, new_indices, datatile
):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. old_indices: list
3. new_indices: list
4. datatile: dict, datatile of active chart for
current chart[type: pandas df]
-------------------------------------------
Ouput:
"""
if type(datatile) != dict:
# choropleth datatile should be a dictionary
datatile = {self.color_column: datatile}
for key in datatile:
calc_new = list(set(new_indices) - set(old_indices))
remove_old = list(set(old_indices) - set(new_indices))
if "" in calc_new:
calc_new.remove("")
if "" in remove_old:
remove_old.remove("")
if key == self.color_column:
temp_agg_function = self.color_aggregate_fn
elif self.elevation_column is not None:
temp_agg_function = self.elevation_aggregate_fn
if temp_agg_function == "mean":
datatile_result = self.query_chart_by_indices_for_mean(
active_chart,
old_indices,
new_indices,
datatile[key],
calc_new,
remove_old,
)
elif temp_agg_function in ["count", "sum"]:
datatile_result = self.query_chart_by_indices_for_count(
active_chart,
old_indices,
new_indices,
datatile[key],
calc_new,
remove_old,
key,
)
elif temp_agg_function in ["min", "max"]:
datatile_result = self.query_chart_by_indices_for_minmax(
active_chart,
old_indices,
new_indices,
datatile[key],
temp_agg_function,
)
if isinstance(datatile_result, np.ndarray):
self.reset_chart(datatile_result, key)
else:
self.reset_chart(np.array(datatile_result), key)
| [
"numpy.put",
"numpy.seterr",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.round",
"os.getenv"
] | [((300, 344), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (309, 344), True, 'import numpy as np\n'), ((1443, 1470), 'os.getenv', 'os.getenv', (['"""MAPBOX_API_KEY"""'], {}), "('MAPBOX_API_KEY')\n", (1452, 1470), False, 'import os\n'), ((5174, 5204), 'numpy.zeros', 'np.zeros', ([], {'shape': 'source_x.shape'}), '(shape=source_x.shape)\n', (5182, 5204), True, 'import numpy as np\n'), ((5288, 5332), 'numpy.put', 'np.put', (['result_array', 'indices', 'update_data_y'], {}), '(result_array, indices, update_data_y)\n', (5294, 5332), True, 'import numpy as np\n'), ((15685, 15732), 'numpy.zeros', 'np.zeros', ([], {'shape': '(len_y_axis,)', 'dtype': 'np.float64'}), '(shape=(len_y_axis,), dtype=np.float64)\n', (15693, 15732), True, 'import numpy as np\n'), ((15753, 15800), 'numpy.zeros', 'np.zeros', ([], {'shape': '(len_y_axis,)', 'dtype': 'np.float64'}), '(shape=(len_y_axis,), dtype=np.float64)\n', (15761, 15800), True, 'import numpy as np\n'), ((15823, 15870), 'numpy.zeros', 'np.zeros', ([], {'shape': '(len_y_axis,)', 'dtype': 'np.float64'}), '(shape=(len_y_axis,), dtype=np.float64)\n', (15831, 15870), True, 'import numpy as np\n'), ((17206, 17253), 'numpy.zeros', 'np.zeros', ([], {'shape': '(len_y_axis,)', 'dtype': 'np.float64'}), '(shape=(len_y_axis,), dtype=np.float64)\n', (17214, 17253), True, 'import numpy as np\n'), ((18859, 18880), 'numpy.array', 'np.array', (['new_indices'], {}), '(new_indices)\n', (18867, 18880), True, 'import numpy as np\n'), ((17366, 17415), 'numpy.array', 'np.array', (['self.source.data[key]'], {'dtype': 'np.float64'}), '(self.source.data[key], dtype=np.float64)\n', (17374, 17415), True, 'import numpy as np\n'), ((5224, 5248), 'numpy.where', 'np.where', (['(x_ == source_x)'], {}), '(x_ == source_x)\n', (5232, 5248), True, 'import numpy as np\n'), ((11480, 11548), 'numpy.array', 'np.array', (['datatile[key][0].loc[datatile_indices, datatile_index_max]'], {}), '(datatile[key][0].loc[datatile_indices, datatile_index_max])\n', (11488, 11548), True, 'import numpy as np\n'), ((11693, 11761), 'numpy.array', 'np.array', (['datatile[key][1].loc[datatile_indices, datatile_index_max]'], {}), '(datatile[key][1].loc[datatile_indices, datatile_index_max])\n', (11701, 11761), True, 'import numpy as np\n'), ((12946, 12985), 'numpy.array', 'np.array', (['(datatile_max0 - datatile_min0)'], {}), '(datatile_max0 - datatile_min0)\n', (12954, 12985), True, 'import numpy as np\n'), ((13359, 13398), 'numpy.array', 'np.array', (['(datatile_max1 - datatile_min1)'], {}), '(datatile_max1 - datatile_min1)\n', (13367, 13398), True, 'import numpy as np\n'), ((18907, 18977), 'numpy.round', 'np.round', (['((new_indices - active_chart.min_value) / active_chart.stride)'], {}), '((new_indices - active_chart.min_value) / active_chart.stride)\n', (18915, 18977), True, 'import numpy as np\n'), ((21649, 21674), 'numpy.array', 'np.array', (['datatile_result'], {}), '(datatile_result)\n', (21657, 21674), True, 'import numpy as np\n'), ((13948, 13985), 'numpy.array', 'np.array', (['(datatile_max - datatile_min)'], {}), '(datatile_max - datatile_min)\n', (13956, 13985), True, 'import numpy as np\n'), ((14663, 14688), 'numpy.array', 'np.array', (['datatile_result'], {}), '(datatile_result)\n', (14671, 14688), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 20:08:46 2020
@author: ryanday
"""
# -*- coding: utf-8 -*-
#Created on Thu Feb 01 08:56:54 2018
#@author: ryanday
#MIT License
#Copyright (c) 2018 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A ▲ICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
import chinook.klib as K_lib
import chinook.Ylm as Ylm
rcParams.update({'font.size':14})
'''
Library for different operators of possible interest in calculating, diagnostics, etc for a material of interest
'''
def colourmaps():
'''
Plot utility, define a few colourmaps which scale to transparent at their zero values
'''
cmaps=[cm.Blues,cm.Greens,cm.Reds,cm.Purples,cm.Greys]
cname = ['Blues_alpha','Greens_alpha','Reds_alpha','Purples_alpha','Greys_alpha']
nc = 256
for ii in range(len(cmaps)):
col_arr = cmaps[ii](range(nc))
col_arr[:,-1] = np.linspace(0,1,nc)
map_obj = LinearSegmentedColormap.from_list(name=cname[ii],colors=col_arr)
plt.register_cmap(cmap=map_obj)
col_arr = cm.RdBu(range(nc))
col_arr[:,-1] = abs(np.linspace(-1,1,nc))
map_obj = LinearSegmentedColormap.from_list(name='RdBu_alpha',colors=col_arr)
plt.register_cmap(cmap=map_obj)
colourmaps()
def LSmat(TB,axis=None):
'''
Generate an arbitary L.S type matrix for a given basis. Uses same *Yproj* as
the *HSO* in the *chinook.H_library*, but is otherwise different, as it supports
projection onto specific axes, in addition to the full vector dot product operator.
Otherwise, the LiSi matrix is computed with i the axis index.
To do this, a linear combination of L+S+,L-S-,L+S-,L-S+,LzSz terms are used to compute.
In the factors dictionary, the weight of these terms is defined.
The keys are tuples of (L+/-/z,S+/-/z) in a bit
of a cryptic way. For L, range (0,1,2) ->(-1,0,1)
and for S range (-1,0,1) = S1-S2 with S1/2 = +/- 1 here
L+,L-,Lz matrices are defined for each l shell in the basis,
transformed into the basis of cubic harmonics.
The nonzero terms will then just be used along with the spin and
weighted by the factor value, and slotted into
a len(basis)xlen(basis) matrix HSO
*args*:
- **TB**: tight-binding object, as defined in TB_lib.py
- **axis**: axis for calculation as either 'x','y','z',None,
or float (angle in the x-y plane)
*return*:
- **HSO**: (len(basis)xlen(basis)) numpy array of complex float
***
'''
Md = Ylm.Yproj(TB.basis)
normal_order = {0:{'':0},1:{'x':0,'y':1,'z':2},2:{'xz':0,'yz':1,'xy':2,'ZR':3,'XY':4},3:{'z3':0,'xz2':1,'yz2':2,'xzy':3,'zXY':4,'xXY':5,'yXY':6}}
factors = {(2,-1):0.5,(0,1):0.5,(1,0):1.0}
L,al = {},[]
HSO = np.zeros((len(TB.basis),len(TB.basis)),dtype=complex)
for o in TB.basis:
if (o.atom,o.n,o.l) not in al:
al.append((o.atom,o.n,o.l))
Mdn = Md[(o.atom,o.n,o.l,-1)]
Mup = Md[(o.atom,o.n,o.l,1)]
Mdnp = np.linalg.inv(Mdn)
Mupp = np.linalg.inv(Mup)
L[(o.atom,o.n,o.l)] = [np.dot(Mupp,np.dot(Lm(o.l),Mdn)),np.dot(Mdnp,np.dot(Lz(o.l),Mdn)),np.dot(Mdnp,np.dot(Lp(o.l),Mup))]
if axis is not None:
try:
ax = float(axis)
factors = {(0,1):0.25,(2,-1):0.25,(2,1):0.25*np.exp(-1.0j*2*ax),(0,-1):0.25*np.exp(1.0j*2*ax)}
except ValueError:
if axis=='x':
factors = {(0,1):0.25,(2,-1):0.25,(2,1):0.25,(0,-1):0.25}
elif axis=='y':
factors = {(0,1):0.25,(2,-1):0.25,(2,1):-0.25,(0,-1):-0.25}
elif axis=='z':
factors = {(1,0):1.0}
else:
print('Invalid axis entry')
return None
for o1 in TB.basis:
for o2 in TB.basis:
if o1.index<=o2.index:
LS_val = 0.0
if np.linalg.norm(o1.pos-o2.pos)<0.0001 and o1.l==o2.l and o1.n==o2.n:
inds = (normal_order[o1.l][o1.label[2:]],normal_order[o2.l][o2.label[2:]])
ds = (o1.spin-o2.spin)/2.
if ds==0:
s=0.5*np.sign(o1.spin)
else:
s=1.0
for f in factors:
if f[1]==ds:
LS_val+=factors[f]*L[(o1.atom,o1.n,o1.l)][f[0]][inds]*s
HSO[o1.index,o2.index]+=LS_val
if o1.index!=o2.index:
HSO[o2.index,o1.index]+=np.conj(LS_val)
return HSO
def Lp(l):
'''
L+ operator in the l,m_l basis, organized with
(0,0) = |l,l>, (2*l,2*l) = |l,-l>
The nonzero elements are on the upper diagonal
*arg*:
- **l**: int orbital angular momentum
*return*:
- **M**: numpy array (2*l+1,2*l+1) of real float
***
'''
M = np.zeros((2*l+1,2*l+1))
r = np.arange(0,2*l,1)
M[r,r+1]=1.0
vals = [0]+[np.sqrt(l*(l+1)-(l-m)*(l-m+1)) for m in range(1,2*l+1)]
M = M*vals
return M
def Lm(l):
'''
L- operator in the l,m_l basis, organized with
(0,0) = |l,l>, (2*l,2*l) = |l,-l>
The nonzero elements are on the upper diagonal
*arg*:
- **l**: int orbital angular momentum
*return*:
- **M**: numpy array (2*l+1,2*l+1) of real float
***
'''
M = np.zeros((2*l+1,2*l+1))
r = np.arange(1,2*l+1,1)
M[r,r-1]=1.0
vals = [np.sqrt(l*(l+1)-(l-m)*(l-m-1)) for m in range(0,2*l)]+[0]
M = M*vals
return M
def Lz(l):
'''
Lz operator in the l,m_l basis
*arg*:
- **l**: int orbital angular momentum
*return*:
- numpy array (2*l+1,2*l+1)
***
'''
return np.identity(2*l+1)*np.array([l-m for m in range(2*l+1)])
def fatbs(proj,TB,Kobj=None,vlims=None,Elims=None,degen=False,ax=None,colourbar=True,plot=True):
'''
Fat band projections. User denotes which orbital index projection is of interest
Projection passed either as an Nx1 or Nx2 array of float. If Nx2, first column is
the indices of the desired orbitals, the second column is the weight. If Nx1, then
the weights are all taken to be eqaul
*args*:
- **proj**: iterable of projections, to be passed as either a 1-dimensional
with indices of projection only, OR, 2-dimensional, with the second column giving
the amplitude of projection (for linear-combination projection)
- **TB**: tight-binding object
*kwargs*:
- **Kobj**: Momentum object, as defined in *chinook.klib.py*
- **vlims**: tuple of 2 float, limits of the colorscale for plotting, default to (0,1)
- **Elims**: tuple of 2 float, limits of vertical scale for plotting
- **plot**: bool, default to True, plot, or not plot the result
- **degen**: bool, True if bands are degenerate, sum over adjacent bands
- **ax**: matplotlib Axes, option for plotting onto existing Axes
- **colorbar**: bool, plot colorbar on axes, default to True
*return*:
- **Ovals**: numpy array of float, len(Kobj.kpts)*len(TB.basis)
***
'''
proj = np.array(proj)
O = np.identity(len(TB.basis))
if len(np.shape(proj))<2:
tmp = np.zeros((len(proj),2))
tmp[:,0] = proj
tmp[:,1] = 1.0
proj = tmp
pvec = np.zeros(len(TB.basis),dtype=complex)
try:
pvec[np.real(proj[:,0]).astype(int)] = proj[:,1]
O = O*pvec
Ovals,ax = O_path(O,TB,Kobj,vlims,Elims,degen=degen,ax=ax,colourbar=colourbar,plot=plot)
except ValueError:
print('projections need to be passed as list or array of type [index,projection]')
Ovals = None
return Ovals,ax
def O_path(Operator,TB,Kobj=None,vlims=None,Elims=None,degen=False,plot=True,ax=None,colourbar=True,colourmap=None):
'''
Compute and plot the expectation value of an user-defined operator along a k-path
Option of summing over degenerate bands (for e.g. fat bands) with degen boolean flag
*args*:
- **Operator**: matrix representation of the operator (numpy array len(basis), len(basis) of complex float)
- **TB**: Tight binding object from TB_lib
*kwargs*:
- **Kobj**: Momentum object, as defined in *chinook.klib.py*
- **vlims**: tuple of 2 float, limits of the colourscale for plotting,
if default value passed, will compute a reasonable range
- **Elims**: tuple of 2 float, limits of vertical scale for plotting
- **plot**: bool, default to True, plot, or not plot the result
- **degen**: bool, True if bands are degenerate, sum over adjacent bands
- **ax**: matplotlib Axes, option for plotting onto existing Axes
- **colourbar**: bool, plot colorbar on axes, default to True
- **colourmap**: matplotlib colourmap,i.e. LinearSegmentedColormap
*return*:
- **O_vals**: the numpy array of float, (len Kobj x len basis) expectation values
- **ax**: matplotlib Axes, allowing for user to further modify
***
'''
if np.shape(Operator)!=(len(TB.basis),len(TB.basis)):
print('ERROR! Ensure your operator has the same dimension as the basis.')
return None
try:
np.shape(TB.Evec)
except AttributeError:
try:
len(TB.Kobj.kpts)
TB.solve_H()
except AttributeError:
TB.Kobj = Kobj
try:
TB.solve_H()
except TypeError:
print('ERROR! Please include a K-object, or diagonalize your tight-binding model over a k-path first to initialize the eigenvectors')
return None
right_product = np.einsum('ij,ljm->lim',Operator,TB.Evec)
O_vals = np.einsum('ijk,ijk->ik',np.conj(TB.Evec),right_product)
O_vals = np.real(O_vals) #any Hermitian operator must have real-valued expectation value--discard any imaginary component
if degen:
O_vals = degen_Ovals(O_vals,TB.Eband)
if ax is None:
fig,ax = plt.subplots(1,1)
fig.set_tight_layout(False)
for b in TB.Kobj.kcut_brk:
ax.axvline(x = b,color = 'grey',ls='--',lw=1.0)
if colourmap is None:
if np.sign(O_vals.min())<0 and np.sign(O_vals.max())>0:
colourmap = 'RdBu_alpha'
else:
colourmap = 'Blues_alpha'
if vlims is None:
vlims = (O_vals.min()-(O_vals.max()-O_vals.min())/10.0,O_vals.max()+(O_vals.max()-O_vals.min())/10.0)
if Elims is None:
Elims = (TB.Eband.min()-(TB.Eband.max()-TB.Eband.min())/10.0,TB.Eband.max()+(TB.Eband.max()-TB.Eband.min())/10.0)
if plot:
for p in range(np.shape(O_vals)[1]):
ax.plot(TB.Kobj.kcut,TB.Eband[:,p],color='k',lw=0.2)
O_line=ax.scatter(TB.Kobj.kcut,TB.Eband[:,p],c=O_vals[:,p],cmap=colourmap,marker='.',lw=0,s=80,vmin=vlims[0],vmax=vlims[1])
ax.axis([TB.Kobj.kcut[0],TB.Kobj.kcut[-1],Elims[0],Elims[1]])
ax.set_xticks(TB.Kobj.kcut_brk)
ax.set_xticklabels(TB.Kobj.labels)
if colourbar:
plt.colorbar(O_line,ax=ax)
ax.set_ylabel("Energy (eV)")
return O_vals,ax
def degen_Ovals(Oper_exp,Energy):
'''
In the presence of degeneracy, we want to average over the
evaluated orbital expectation values--numerically, the degenerate
subspace can be arbitrarily diagonalized during numpy.linalg.eigh.
All degeneracies are found, and the expectation values averaged.
*args*:
- **Oper_exp**: numpy array of float, operator expectations
- **Energy**: numpy array of float, energy eigenvalues.
***
'''
O_copy = Oper_exp.copy()
tol = 1e-6
for ki in range(np.shape(Oper_exp)[0]):
val = Energy[ki,0]
start = 0
counter = 1
for bi in range(1,np.shape(Oper_exp)[1]):
if abs(Energy[ki,bi]-val)<tol:
counter+=1
if abs(Energy[ki,bi]-val)>=tol or bi==(np.shape(Oper_exp)[1]-1):
O_copy[ki,start:start+counter] = np.mean(O_copy[ki,start:start+counter])
start = bi
counter = 1
val = Energy[ki,bi]
return O_copy
def O_surf(O,TB,ktuple,Ef,tol,vlims=(0,0),ax=None):
'''
Compute and plot the expectation value of an user-defined operator over
a surface of constant-binding energy
Option of summing over degenerate bands (for e.g. fat bands) with degen boolean flag
*args*:
- **O**: matrix representation of the operator (numpy array len(basis), len(basis) of complex float)
- **TB**: Tight binding object from *chinook.TB_lib.py*
- **ktuple**: momentum range for mesh:
ktuple[0] = (x0,xn,n),ktuple[1]=(y0,yn,n),ktuple[2]=kz
*kwargs*:
- **vlims**: limits for the colourscale (optional argument), will choose
a reasonable set of limits if none passed by user
- **ax**: matplotlib Axes, option for plotting onto existing Axes
*return*:
- **pts**: the numpy array of expectation values, of shape Nx3, with first
two dimensions the kx,ky coordinates of the point, and the third the expectation
value.
- **ax**: matplotlib Axes, allowing for further user modifications
***
'''
coords,Eb,Ev=FS(TB,ktuple,Ef,tol)
masked_Ev = np.array([Ev[int(coords[ei,2]/len(TB.basis)),:,int(coords[ei,2]%len(TB.basis))] for ei in range(len(coords))])
Ovals = np.sum(np.conj(masked_Ev)*np.dot(O,masked_Ev.T).T,1)
if np.sign(Ovals.min())!=np.sign(Ovals.max()):
cmap = cm.RdBu
else:
cmap = cm.magma
pts = np.array([[coords[ci,0],coords[ci,1],np.real(Ovals[ci])] for ci in range(len(coords))])
if vlims==(0,0):
vlims = (Ovals.min()-(Ovals.max()-Ovals.min())/10.0,Ovals.max()+(Ovals.max()-Ovals.min())/10.0)
if ax is None:
fig,ax = plt.subplots(1,1)
ax.scatter(pts[:,0],pts[:,1],c=pts[:,2],cmap=cmap,s=200,vmin=vlims[0],vmax=vlims[1])
ax.scatter(pts[:,0],pts[:,1],c='k',s=5)
return pts,ax
def FS(TB,ktuple,Ef,tol,ax=None):
'''
A simplified form of Fermi surface extraction, for proper calculation of this,
*chinook.FS_tetra.py* is preferred. This finds all points in kmesh within a
tolerance of the constant energy level.
*args*:
- **TB**: tight-binding model object
- **ktuple**: tuple of k limits, len (3), First and second should be iterable,
define the limits and mesh of k for kx,ky, the third is constant, float for kz
- **Ef**: float, energy of interest, eV
- **tol**: float, energy tolerance, float
- **ax**: matplotlib Axes, option for plotting onto existing Axes
*return*:
- **pts**: numpy array of len(N) x 3 indicating x,y, band index
- **TB.Eband**: numpy array of float, energy spectrum
- **TB.Evec**: numpy array of complex float, eigenvectors
- **ax**: matplotlib Axes, for further user modification
***
'''
x,y,z=np.linspace(*ktuple[0]),np.linspace(*ktuple[1]),ktuple[2]
X,Y=np.meshgrid(x,y)
k_arr,_ = K_lib.kmesh(0.0,X,Y,z)
blen = len(TB.basis)
TB.Kobj = K_lib.kpath(k_arr)
_,_ = TB.solve_H()
TB.Eband = np.reshape(TB.Eband,(np.shape(TB.Eband)[-1]*np.shape(X)[0]*np.shape(X)[1]))
pts = []
for ei in range(len(TB.Eband)):
if abs(TB.Eband[ei]-Ef)<tol:
inds = (int(np.floor(np.floor(ei/blen)/np.shape(X)[1])),int(np.floor(ei/blen)%np.shape(X)[1]))
pts.append([X[inds],Y[inds],ei])
pts = np.array(pts)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(pts[:,0],pts[:,1])
return pts,TB.Eband,TB.Evec,ax
####################SOME STANDARD OPERATORS FOLLOW HERE: ######################
def LdotS(TB,axis=None,ax=None,colourbar=True):
'''
Wrapper for **O_path** for computing L.S along a vector projection of interest,
or none at all.
*args*:
- **TB**: tight-binding obect
*kwargs*:
- **axis**: numpy array of 3 float, indicating axis, or None for full L.S
- **ax**: matplotli.Axes object for plotting
- **colourbar**: bool, display colourbar on plot
*return*:
- **O**: numpy array of Nxlen(basis) float, expectation value of operator
on each band over the kpath of TB.Kobj.
***
'''
HSO = LSmat(TB,axis)
O = O_path(HSO,TB,TB.Kobj,ax=ax,colourbar=colourbar)
return O
def Sz(TB,ax=None,colourbar=True):
'''
Wrapper for **O_path** for computing Sz along a vector projection of interest,
or none at all.
*args*:
- **TB**: tight-binding obect
*kwargs*:
- **ax**: matplotlib.Axes plotting object
- **colourbar**: bool, display colourbar on plot
*return*:
- **O**: numpy array of Nxlen(basis) float, expectation value of operator
on each band over the kpath of TB.Kobj.
***
'''
Omat = S_vec(len(TB.basis),np.array([0,0,1]))
O = O_path(Omat,TB,TB.Kobj,ax=ax,colourbar=colourbar)
return O
def surface_proj(basis,length):
'''
Operator for computing surface-projection of eigenstates. User passes the orbital basis
and an extinction length (1/e) length for the 'projection onto surface'. The operator
is diagonal with exponenential suppression based on depth.
For use with SLAB geometry only
*args*:
- **basis**: list, orbital objects
- **cutoff**: float, cutoff length
*return*:
- **M**: numpy array of float, shape len(TB.basis) x len(TB.basis)
***
'''
Omat = np.identity(len(basis))
attenuation = np.exp(np.array([-abs(o.depth)/length for o in basis]))
return Omat*attenuation
def S_vec(LB,vec):
'''
Spin operator along an arbitrary direction can be written as
n.S = nx Sx + ny Sy + nz Sz
*args*:
- **LB**: int, length of basis
- **vec**: numpy array of 3 float, direction of spin projection
*return*:
- numpy array of complex float (LB by LB), spin operator matrix
***
'''
numstates = int(LB/2)
Si = 0.5*np.identity(numstates,dtype=complex)
Smats = np.zeros((3,LB,LB),dtype=complex)
Smats[2,:numstates,:numstates]+=-Si
Smats[2,numstates:,numstates:]+=Si
Smats[0,:numstates,numstates:] +=Si
Smats[0,numstates:,:numstates]+=Si
Smats[1,:numstates,numstates:]+=1.0j*Si
Smats[1,numstates:,:numstates]+=-1.0j*Si
return vec[0]*Smats[0]+vec[1]*Smats[1]+vec[2]*Smats[2]
def is_numeric(a):
'''
Quick check if object is numeric
*args*:
- **a**: numeric, float/int
*return*:
- bool, if numeric True, else False
***
'''
if a is not None:
try:
float(a)
return True
except ValueError:
return False
else:
return False
| [
"numpy.floor",
"numpy.einsum",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.linalg.norm",
"chinook.klib.kpath",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.register_cmap",
"numpy.meshgrid",
"matplotlib.rcParams.upd... | [((1488, 1522), 'matplotlib.rcParams.update', 'rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (1503, 1522), False, 'from matplotlib import rcParams\n'), ((2279, 2347), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': '"""RdBu_alpha"""', 'colors': 'col_arr'}), "(name='RdBu_alpha', colors=col_arr)\n", (2312, 2347), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((2351, 2382), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'map_obj'}), '(cmap=map_obj)\n', (2368, 2382), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3730), 'chinook.Ylm.Yproj', 'Ylm.Yproj', (['TB.basis'], {}), '(TB.basis)\n', (3720, 3730), True, 'import chinook.Ylm as Ylm\n'), ((6151, 6183), 'numpy.zeros', 'np.zeros', (['(2 * l + 1, 2 * l + 1)'], {}), '((2 * l + 1, 2 * l + 1))\n', (6159, 6183), True, 'import numpy as np\n'), ((6183, 6205), 'numpy.arange', 'np.arange', (['(0)', '(2 * l)', '(1)'], {}), '(0, 2 * l, 1)\n', (6192, 6205), True, 'import numpy as np\n'), ((6668, 6700), 'numpy.zeros', 'np.zeros', (['(2 * l + 1, 2 * l + 1)'], {}), '((2 * l + 1, 2 * l + 1))\n', (6676, 6700), True, 'import numpy as np\n'), ((6700, 6726), 'numpy.arange', 'np.arange', (['(1)', '(2 * l + 1)', '(1)'], {}), '(1, 2 * l + 1, 1)\n', (6709, 6726), True, 'import numpy as np\n'), ((8626, 8640), 'numpy.array', 'np.array', (['proj'], {}), '(proj)\n', (8634, 8640), True, 'import numpy as np\n'), ((11384, 11427), 'numpy.einsum', 'np.einsum', (['"""ij,ljm->lim"""', 'Operator', 'TB.Evec'], {}), "('ij,ljm->lim', Operator, TB.Evec)\n", (11393, 11427), True, 'import numpy as np\n'), ((11508, 11523), 'numpy.real', 'np.real', (['O_vals'], {}), '(O_vals)\n', (11515, 11523), True, 'import numpy as np\n'), ((17068, 17085), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (17079, 17085), True, 'import numpy as np\n'), ((17108, 17133), 'chinook.klib.kmesh', 'K_lib.kmesh', (['(0.0)', 'X', 'Y', 'z'], {}), '(0.0, X, Y, z)\n', (17119, 17133), True, 'import chinook.klib as K_lib\n'), ((17178, 17196), 'chinook.klib.kpath', 'K_lib.kpath', (['k_arr'], {}), '(k_arr)\n', (17189, 17196), True, 'import chinook.klib as K_lib\n'), ((17566, 17579), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (17574, 17579), True, 'import numpy as np\n'), ((20378, 20414), 'numpy.zeros', 'np.zeros', (['(3, LB, LB)'], {'dtype': 'complex'}), '((3, LB, LB), dtype=complex)\n', (20386, 20414), True, 'import numpy as np\n'), ((2029, 2050), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nc'], {}), '(0, 1, nc)\n', (2040, 2050), True, 'import numpy as np\n'), ((2067, 2132), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': 'cname[ii]', 'colors': 'col_arr'}), '(name=cname[ii], colors=col_arr)\n', (2100, 2132), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((2149, 2180), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'map_obj'}), '(cmap=map_obj)\n', (2166, 2180), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2265), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nc'], {}), '(-1, 1, nc)\n', (2254, 2265), True, 'import numpy as np\n'), ((7060, 7082), 'numpy.identity', 'np.identity', (['(2 * l + 1)'], {}), '(2 * l + 1)\n', (7071, 7082), True, 'import numpy as np\n'), ((10740, 10758), 'numpy.shape', 'np.shape', (['Operator'], {}), '(Operator)\n', (10748, 10758), True, 'import numpy as np\n'), ((10910, 10927), 'numpy.shape', 'np.shape', (['TB.Evec'], {}), '(TB.Evec)\n', (10918, 10927), True, 'import numpy as np\n'), ((11463, 11479), 'numpy.conj', 'np.conj', (['TB.Evec'], {}), '(TB.Evec)\n', (11470, 11479), True, 'import numpy as np\n'), ((11724, 11742), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (11736, 11742), True, 'import matplotlib.pyplot as plt\n'), ((15762, 15780), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (15774, 15780), True, 'import matplotlib.pyplot as plt\n'), ((17002, 17025), 'numpy.linspace', 'np.linspace', (['*ktuple[0]'], {}), '(*ktuple[0])\n', (17013, 17025), True, 'import numpy as np\n'), ((17026, 17049), 'numpy.linspace', 'np.linspace', (['*ktuple[1]'], {}), '(*ktuple[1])\n', (17037, 17049), True, 'import numpy as np\n'), ((17613, 17625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17623, 17625), True, 'import matplotlib.pyplot as plt\n'), ((19114, 19133), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (19122, 19133), True, 'import numpy as np\n'), ((20324, 20361), 'numpy.identity', 'np.identity', (['numstates'], {'dtype': 'complex'}), '(numstates, dtype=complex)\n', (20335, 20361), True, 'import numpy as np\n'), ((4213, 4231), 'numpy.linalg.inv', 'np.linalg.inv', (['Mdn'], {}), '(Mdn)\n', (4226, 4231), True, 'import numpy as np\n'), ((4251, 4269), 'numpy.linalg.inv', 'np.linalg.inv', (['Mup'], {}), '(Mup)\n', (4264, 4269), True, 'import numpy as np\n'), ((6235, 6279), 'numpy.sqrt', 'np.sqrt', (['(l * (l + 1) - (l - m) * (l - m + 1))'], {}), '(l * (l + 1) - (l - m) * (l - m + 1))\n', (6242, 6279), True, 'import numpy as np\n'), ((6750, 6794), 'numpy.sqrt', 'np.sqrt', (['(l * (l + 1) - (l - m) * (l - m - 1))'], {}), '(l * (l + 1) - (l - m) * (l - m - 1))\n', (6757, 6794), True, 'import numpy as np\n'), ((8697, 8711), 'numpy.shape', 'np.shape', (['proj'], {}), '(proj)\n', (8705, 8711), True, 'import numpy as np\n'), ((12795, 12822), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['O_line'], {'ax': 'ax'}), '(O_line, ax=ax)\n', (12807, 12822), True, 'import matplotlib.pyplot as plt\n'), ((13462, 13480), 'numpy.shape', 'np.shape', (['Oper_exp'], {}), '(Oper_exp)\n', (13470, 13480), True, 'import numpy as np\n'), ((15332, 15350), 'numpy.conj', 'np.conj', (['masked_Ev'], {}), '(masked_Ev)\n', (15339, 15350), True, 'import numpy as np\n'), ((12383, 12399), 'numpy.shape', 'np.shape', (['O_vals'], {}), '(O_vals)\n', (12391, 12399), True, 'import numpy as np\n'), ((13577, 13595), 'numpy.shape', 'np.shape', (['Oper_exp'], {}), '(Oper_exp)\n', (13585, 13595), True, 'import numpy as np\n'), ((13799, 13841), 'numpy.mean', 'np.mean', (['O_copy[ki, start:start + counter]'], {}), '(O_copy[ki, start:start + counter])\n', (13806, 13841), True, 'import numpy as np\n'), ((15351, 15373), 'numpy.dot', 'np.dot', (['O', 'masked_Ev.T'], {}), '(O, masked_Ev.T)\n', (15357, 15373), True, 'import numpy as np\n'), ((15539, 15557), 'numpy.real', 'np.real', (['Ovals[ci]'], {}), '(Ovals[ci])\n', (15546, 15557), True, 'import numpy as np\n'), ((17294, 17305), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (17302, 17305), True, 'import numpy as np\n'), ((4529, 4551), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * ax)'], {}), '(-1.0j * 2 * ax)\n', (4535, 4551), True, 'import numpy as np\n'), ((4560, 4581), 'numpy.exp', 'np.exp', (['(1.0j * 2 * ax)'], {}), '(1.0j * 2 * ax)\n', (4566, 4581), True, 'import numpy as np\n'), ((8891, 8910), 'numpy.real', 'np.real', (['proj[:, 0]'], {}), '(proj[:, 0])\n', (8898, 8910), True, 'import numpy as np\n'), ((17256, 17274), 'numpy.shape', 'np.shape', (['TB.Eband'], {}), '(TB.Eband)\n', (17264, 17274), True, 'import numpy as np\n'), ((17279, 17290), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (17287, 17290), True, 'import numpy as np\n'), ((5101, 5132), 'numpy.linalg.norm', 'np.linalg.norm', (['(o1.pos - o2.pos)'], {}), '(o1.pos - o2.pos)\n', (5115, 5132), True, 'import numpy as np\n'), ((5765, 5780), 'numpy.conj', 'np.conj', (['LS_val'], {}), '(LS_val)\n', (5772, 5780), True, 'import numpy as np\n'), ((17471, 17490), 'numpy.floor', 'np.floor', (['(ei / blen)'], {}), '(ei / blen)\n', (17479, 17490), True, 'import numpy as np\n'), ((5391, 5407), 'numpy.sign', 'np.sign', (['o1.spin'], {}), '(o1.spin)\n', (5398, 5407), True, 'import numpy as np\n'), ((13723, 13741), 'numpy.shape', 'np.shape', (['Oper_exp'], {}), '(Oper_exp)\n', (13731, 13741), True, 'import numpy as np\n'), ((17432, 17451), 'numpy.floor', 'np.floor', (['(ei / blen)'], {}), '(ei / blen)\n', (17440, 17451), True, 'import numpy as np\n'), ((17489, 17500), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (17497, 17500), True, 'import numpy as np\n'), ((17450, 17461), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (17458, 17461), True, 'import numpy as np\n')] |
import numpy as np
import torch
from scipy.sparse import diags
from scipy.sparse.linalg import cg, LinearOperator
from scipy.special import softmax
def SSL_clustering(alpha, L, Yobs, w, eta, TOL=1e-9,MAXITER=10000):
"""
Minimizes the objective function:
U = arg min_Y 1/2 ||Y - Yobs||_W^2 + alpha/2 * Y'*L*Y
s.t. Ye = 0
which has the closed form solution:
U = (W + alpha*L)^-1 W * Yobs * C
:param alpha: hyperparameter
:param L: Graph Laplacian
:param Yobs: labelled data
:param balance_weights: If true it will ensure that the weights of each class adds up to 1. (this should be used if the classes have different number of sampled points)
:return:
"""
if isinstance(Yobs, torch.Tensor):
Yobs = Yobs.numpy()
n,nc = Yobs.shape
W = diags(w)
if eta == 1:
H_lambda = lambda x: alpha*((L @ x)) + (W @ x)
elif eta == 2:
H_lambda = lambda x: alpha*(L.T @ (L @ x)) + (W @ x)
elif eta == 3:
H_lambda = lambda x: alpha*(L @ (L.T @ (L @ x))) + (W @ x)
elif eta == 4:
H_lambda = lambda x: alpha * (L @ (L @ (L.T @ (L @ x)))) + (W @ x)
else:
raise NotImplementedError('Not implemented')
A = LinearOperator((n, n), H_lambda)
b = W @ Yobs
U = np.empty_like(Yobs)
for i in range(nc):
U[:,i], stat = cg(A, b[:,i], tol=TOL,maxiter=MAXITER)
return U
def convert_pseudo_to_prob(v,use_softmax=False):
"""
Converts a pseudo probability array to a probability array
:param v: is a pseudo probability array, that is subject to ve = 0
:param use_softmax:
:return:
"""
if use_softmax:
cpv = softmax(v,axis=1)
else:
maxval = np.sum(np.abs(v), axis=1)
vshifted = v - np.min(v,axis=1)[:,None]
cpv = vshifted / (maxval[:,None]+1e-10)
return cpv
| [
"scipy.sparse.diags",
"numpy.abs",
"scipy.sparse.linalg.cg",
"numpy.empty_like",
"numpy.min",
"scipy.sparse.linalg.LinearOperator",
"scipy.special.softmax"
] | [((799, 807), 'scipy.sparse.diags', 'diags', (['w'], {}), '(w)\n', (804, 807), False, 'from scipy.sparse import diags\n'), ((1211, 1243), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(n, n)', 'H_lambda'], {}), '((n, n), H_lambda)\n', (1225, 1243), False, 'from scipy.sparse.linalg import cg, LinearOperator\n'), ((1269, 1288), 'numpy.empty_like', 'np.empty_like', (['Yobs'], {}), '(Yobs)\n', (1282, 1288), True, 'import numpy as np\n'), ((1336, 1376), 'scipy.sparse.linalg.cg', 'cg', (['A', 'b[:, i]'], {'tol': 'TOL', 'maxiter': 'MAXITER'}), '(A, b[:, i], tol=TOL, maxiter=MAXITER)\n', (1338, 1376), False, 'from scipy.sparse.linalg import cg, LinearOperator\n'), ((1659, 1677), 'scipy.special.softmax', 'softmax', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (1666, 1677), False, 'from scipy.special import softmax\n'), ((1711, 1720), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (1717, 1720), True, 'import numpy as np\n'), ((1753, 1770), 'numpy.min', 'np.min', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (1759, 1770), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import os
import random
import numpy as np
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, SequentialSampler
from tqdm import tqdm, trange
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
BertForMaskedLM,
PreTrainedModel,
PreTrainedTokenizer,
RobertaForMaskedLM,
)
def return_extended_attention_mask(attention_mask, dtype):
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError("Wrong shape for input_ids or attention_mask")
extended_attention_mask = extended_attention_mask.to(dtype=dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
class GuideHead(nn.Module):
def __init__(self, pad_id, cls_id, sep_id):
super().__init__()
self.pad_id = pad_id
self.cls_id = cls_id
self.sep_id = sep_id
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (1, x.size(-1))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states_src,
hidden_states_tgt,
inputs_src,
inputs_tgt,
guide=None,
extraction="softmax",
softmax_threshold=0.001,
train_so=True,
train_co=False,
output_prob=False,
):
# mask
attention_mask_src = (
(inputs_src == self.pad_id)
+ (inputs_src == self.cls_id)
+ (inputs_src == self.sep_id)
).float()
attention_mask_tgt = (
(inputs_tgt == self.pad_id)
+ (inputs_tgt == self.cls_id)
+ (inputs_tgt == self.sep_id)
).float()
len_src = torch.sum(1 - attention_mask_src, -1)
len_tgt = torch.sum(1 - attention_mask_tgt, -1)
attention_mask_src = return_extended_attention_mask(
1 - attention_mask_src, hidden_states_src.dtype
)
attention_mask_tgt = return_extended_attention_mask(
1 - attention_mask_tgt, hidden_states_src.dtype
)
# qkv
query_src = self.transpose_for_scores(hidden_states_src)
query_tgt = self.transpose_for_scores(hidden_states_tgt)
key_tgt = query_tgt
# att
attention_scores = torch.matmul(query_src, key_tgt.transpose(-1, -2))
attention_scores_src = attention_scores + attention_mask_tgt
attention_scores_tgt = attention_scores + attention_mask_src.transpose(-1, -2)
assert extraction == "softmax"
attention_probs_src = (
nn.Softmax(dim=-1)(attention_scores_src)
if extraction == "softmax"
else None
)
attention_probs_tgt = (
nn.Softmax(dim=-2)(attention_scores_tgt)
if extraction == "softmax"
else None
)
if guide is None:
threshold = softmax_threshold if extraction == "softmax" else 0
align_matrix = (attention_probs_src > threshold) * (
attention_probs_tgt > threshold
)
if not output_prob:
return align_matrix
# A heuristic of generating the alignment probability
attention_probs_src = nn.Softmax(dim=-1)(
attention_scores_src / torch.sqrt(len_tgt.view(-1, 1, 1, 1))
)
attention_probs_tgt = nn.Softmax(dim=-2)(
attention_scores_tgt / torch.sqrt(len_src.view(-1, 1, 1, 1))
)
align_prob = (2 * attention_probs_src * attention_probs_tgt) / (
attention_probs_src + attention_probs_tgt + 1e-9
)
return align_matrix, align_prob
so_loss = 0
if train_so:
so_loss_src = torch.sum(
torch.sum(attention_probs_src * guide, -1), -1
).view(-1)
so_loss_tgt = torch.sum(
torch.sum(attention_probs_tgt * guide, -1), -1
).view(-1)
so_loss = so_loss_src / len_src + so_loss_tgt / len_tgt
so_loss = -torch.mean(so_loss)
co_loss = 0
if train_co:
min_len = torch.min(len_src, len_tgt)
trace = torch.matmul(
attention_probs_src, (attention_probs_tgt).transpose(-1, -2)
).squeeze(1)
trace = torch.einsum("bii->b", trace)
co_loss = -torch.mean(trace / min_len)
return so_loss + co_loss
class Aligner(nn.Module):
def __init__(self, tokenizer, model):
super().__init__()
self.tokenizer = tokenizer
self.model = model
if isinstance(model, BertForMaskedLM):
self.encoder = model.bert
self.lm_head = model.cls
self.hidden_size = model.config.hidden_size
self.vocab_size = model.config.vocab_size
elif isinstance(model, RobertaForMaskedLM):
self.encoder = model.roberta
self.lm_head = model.lm_head
self.hidden_size = model.config.hidden_size
self.vocab_size = model.config.vocab_size
else:
raise ValueError("Unsupported model:", model)
self.guide_layer = GuideHead(
self.tokenizer.pad_token_id,
self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
)
def get_aligned_word(
self,
inputs_src,
inputs_tgt,
bpe2word_map_src,
bpe2word_map_tgt,
device,
src_len,
tgt_len,
align_layer=8,
extraction="softmax",
softmax_threshold=0.001,
test=False,
output_prob=False,
word_aligns=None,
):
batch_size = inputs_src.size(0)
bpelen_src, bpelen_tgt = inputs_src.size(1) - 2, inputs_tgt.size(1) - 2
if word_aligns is None:
inputs_src = inputs_src.to(dtype=torch.long, device=device).clone()
inputs_tgt = inputs_tgt.to(dtype=torch.long, device=device).clone()
with torch.no_grad():
outputs_src = self.encoder(
inputs_src,
attention_mask=(inputs_src != self.tokenizer.pad_token_id),
output_hidden_states=True,
).hidden_states[align_layer]
outputs_tgt = self.encoder(
inputs_tgt,
attention_mask=(inputs_tgt != self.tokenizer.pad_token_id),
output_hidden_states=True,
).hidden_states[align_layer]
attention_probs_inter = self.guide_layer(
outputs_src,
outputs_tgt,
inputs_src,
inputs_tgt,
extraction=extraction,
softmax_threshold=softmax_threshold,
output_prob=output_prob,
)
if output_prob:
attention_probs_inter, alignment_probs = attention_probs_inter
alignment_probs = alignment_probs[:, 0, 1:-1, 1:-1]
attention_probs_inter = attention_probs_inter.float()
word_aligns = []
attention_probs_inter = attention_probs_inter[:, 0, 1:-1, 1:-1]
for idx, (attention, b2w_src, b2w_tgt) in enumerate(
zip(attention_probs_inter, bpe2word_map_src, bpe2word_map_tgt)
):
aligns = set() if not output_prob else dict()
non_zeros = torch.nonzero(attention)
for i, j in non_zeros:
word_pair = (b2w_src[i], b2w_tgt[j])
if output_prob:
prob = alignment_probs[idx, i, j]
if word_pair not in aligns:
aligns[word_pair] = prob
else:
aligns[word_pair] = max(aligns[word_pair], prob)
else:
aligns.add(word_pair)
word_aligns.append(aligns)
if test:
return word_aligns
guide = torch.zeros(batch_size, 1, src_len, tgt_len)
for idx, (word_align, b2w_src, b2w_tgt) in enumerate(
zip(word_aligns, bpe2word_map_src, bpe2word_map_tgt)
):
len_src = min(bpelen_src, len(b2w_src))
len_tgt = min(bpelen_tgt, len(b2w_tgt))
for i in range(len_src):
for j in range(len_tgt):
if (b2w_src[i], b2w_tgt[j]) in word_align:
guide[idx, 0, i + 1, j + 1] = 1.0
return guide
def set_seed(args):
if args.seed >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):
assert os.path.isfile(file_path)
print("Loading the dataset...")
self.examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(tqdm(f.readlines())):
if (
len(line) == 0
or line.isspace()
or not len(line.split(" ||| ")) == 2
):
raise ValueError(f"Line {idx+1} is not in the correct format!")
src, tgt = line.split(" ||| ")
if src.rstrip() == "" or tgt.rstrip() == "":
raise ValueError(f"Line {idx+1} is not in the correct format!")
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [
tokenizer.tokenize(word) for word in sent_src
], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [
tokenizer.convert_tokens_to_ids(x) for x in token_src
], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
max_len = tokenizer.max_len_single_sentence
if args.max_len != -1:
max_len = min(max_len, args.max_len)
ids_src = tokenizer.prepare_for_model(
list(itertools.chain(*wid_src)),
return_tensors="pt",
max_length=max_len,
truncation=True,
)["input_ids"]
ids_tgt = tokenizer.prepare_for_model(
list(itertools.chain(*wid_tgt)),
return_tensors="pt",
max_length=max_len,
truncation=True,
)["input_ids"]
if len(ids_src) == 2 or len(ids_tgt) == 2:
raise ValueError(f"Line {idx+1} is not in the correct format!")
bpe2word_map_src = []
for i, word_list in enumerate(token_src):
bpe2word_map_src += [i for x in word_list]
bpe2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
bpe2word_map_tgt += [i for x in word_list]
self.examples.append(
(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt)
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
def word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer):
def collate(examples):
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)
ids_src = pad_sequence(
ids_src, batch_first=True, padding_value=tokenizer.pad_token_id
)
ids_tgt = pad_sequence(
ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id
)
return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt
dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate
)
model.to(args.device)
model.eval()
aligner = Aligner(tokenizer, model)
tqdm_iterator = trange(dataset.__len__(), desc="Extracting")
if args.output_prob_file is not None:
prob_writer = open(args.output_prob_file, "w")
with open(args.output_file, "w") as writer:
for batch in dataloader:
with torch.no_grad():
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch
word_aligns_list = aligner.get_aligned_word(
ids_src,
ids_tgt,
bpe2word_map_src,
bpe2word_map_tgt,
args.device,
0,
0,
align_layer=args.align_layer,
extraction=args.extraction,
softmax_threshold=args.softmax_threshold,
test=True,
output_prob=(args.output_prob_file is not None),
)
for word_aligns in word_aligns_list:
output_str = []
if args.output_prob_file is not None:
output_prob_str = []
for word_align in word_aligns:
output_str.append(f"{word_align[0]}-{word_align[1]}")
if args.output_prob_file is not None:
output_prob_str.append(f"{word_aligns[word_align]}")
writer.write(" ".join(output_str) + "\n")
if args.output_prob_file is not None:
prob_writer.write(" ".join(output_prob_str) + "\n")
tqdm_iterator.update(len(ids_src))
if args.output_prob_file is not None:
prob_writer.close()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_file",
default=None,
type=str,
required=True,
help="The input data file (a text file).",
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="The output file.",
)
parser.add_argument(
"--align_layer", type=int, default=8, help="layer for alignment extraction"
)
parser.add_argument(
"--extraction", default="softmax", type=str, help="softmax or entmax15"
)
parser.add_argument("--softmax_threshold", type=float, default=0.001)
parser.add_argument("--max_len", type=int, default=-1)
parser.add_argument(
"--output_prob_file",
default=None,
type=str,
help="The output probability file.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
args = parser.parse_args()
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.device = device
# Set seed
set_seed(args)
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
model = AutoModelForMaskedLM.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
word_align(args, model, tokenizer)
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"os.path.isfile",
"transformers.AutoModelForMaskedLM.from_pretrained",
"torch.nn.Softmax",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.utils.data.SequentialSampler",
"random.seed",
"torch.zeros",
"itertools.chain",
"torch.mean",
"tor... | [((13000, 13026), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (13017, 13026), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler\n'), ((13044, 13133), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'args.batch_size', 'collate_fn': 'collate'}), '(dataset, sampler=sampler, batch_size=args.batch_size, collate_fn\n =collate)\n', (13054, 13133), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler\n'), ((14941, 14966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14964, 14966), False, 'import argparse\n'), ((16727, 16812), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.model_name_or_path'], {'cache_dir': 'args.cache_dir'}), '(args.model_name_or_path, cache_dir=args.cache_dir\n )\n', (16756, 16812), False, 'from transformers import AutoModelForMaskedLM, AutoTokenizer, BertForMaskedLM, PreTrainedModel, PreTrainedTokenizer, RobertaForMaskedLM\n'), ((16834, 16926), 'transformers.AutoModelForMaskedLM.from_pretrained', 'AutoModelForMaskedLM.from_pretrained', (['args.model_name_or_path'], {'cache_dir': 'args.cache_dir'}), '(args.model_name_or_path, cache_dir=\n args.cache_dir)\n', (16870, 16926), False, 'from transformers import AutoModelForMaskedLM, AutoTokenizer, BertForMaskedLM, PreTrainedModel, PreTrainedTokenizer, RobertaForMaskedLM\n'), ((2718, 2755), 'torch.sum', 'torch.sum', (['(1 - attention_mask_src)', '(-1)'], {}), '(1 - attention_mask_src, -1)\n', (2727, 2755), False, 'import torch\n'), ((2774, 2811), 'torch.sum', 'torch.sum', (['(1 - attention_mask_tgt)', '(-1)'], {}), '(1 - attention_mask_tgt, -1)\n', (2783, 2811), False, 'import torch\n'), ((9110, 9154), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)', 'src_len', 'tgt_len'], {}), '(batch_size, 1, src_len, tgt_len)\n', (9121, 9154), False, 'import torch\n'), ((9670, 9692), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (9681, 9692), False, 'import random\n'), ((9701, 9726), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (9715, 9726), True, 'import numpy as np\n'), ((9735, 9763), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9752, 9763), False, 'import torch\n'), ((9772, 9809), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (9798, 9809), False, 'import torch\n'), ((9938, 9963), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (9952, 9963), False, 'import os\n'), ((12620, 12697), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['ids_src'], {'batch_first': '(True)', 'padding_value': 'tokenizer.pad_token_id'}), '(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)\n', (12632, 12697), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((12738, 12815), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['ids_tgt'], {'batch_first': '(True)', 'padding_value': 'tokenizer.pad_token_id'}), '(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)\n', (12750, 12815), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((5166, 5193), 'torch.min', 'torch.min', (['len_src', 'len_tgt'], {}), '(len_src, len_tgt)\n', (5175, 5193), False, 'import torch\n'), ((5350, 5379), 'torch.einsum', 'torch.einsum', (['"""bii->b"""', 'trace'], {}), "('bii->b', trace)\n", (5362, 5379), False, 'import torch\n'), ((3580, 3598), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (3590, 3598), False, 'from torch import nn\n'), ((3736, 3754), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-2)'}), '(dim=-2)\n', (3746, 3754), False, 'from torch import nn\n'), ((4246, 4264), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (4256, 4264), False, 'from torch import nn\n'), ((4391, 4409), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-2)'}), '(dim=-2)\n', (4401, 4409), False, 'from torch import nn\n'), ((5082, 5101), 'torch.mean', 'torch.mean', (['so_loss'], {}), '(so_loss)\n', (5092, 5101), False, 'import torch\n'), ((5403, 5430), 'torch.mean', 'torch.mean', (['(trace / min_len)'], {}), '(trace / min_len)\n', (5413, 5430), False, 'import torch\n'), ((7024, 7039), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7037, 7039), False, 'import torch\n'), ((8502, 8526), 'torch.nonzero', 'torch.nonzero', (['attention'], {}), '(attention)\n', (8515, 8526), False, 'import torch\n'), ((13487, 13502), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13500, 13502), False, 'import torch\n'), ((16587, 16612), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16610, 16612), False, 'import torch\n'), ((4797, 4839), 'torch.sum', 'torch.sum', (['(attention_probs_src * guide)', '(-1)'], {}), '(attention_probs_src * guide, -1)\n', (4806, 4839), False, 'import torch\n'), ((4920, 4962), 'torch.sum', 'torch.sum', (['(attention_probs_tgt * guide)', '(-1)'], {}), '(attention_probs_tgt * guide, -1)\n', (4929, 4962), False, 'import torch\n'), ((11268, 11293), 'itertools.chain', 'itertools.chain', (['*wid_src'], {}), '(*wid_src)\n', (11283, 11293), False, 'import itertools\n'), ((11525, 11550), 'itertools.chain', 'itertools.chain', (['*wid_tgt'], {}), '(*wid_tgt)\n', (11540, 11550), False, 'import itertools\n')] |
# -*- coding: utf-8 -*-
"""Noisy Skyline Implementation
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WF8UXH8EeQFBPR5nhNmrQ0IBtdNSAmhZ
"""
import random
import math
def oracle(v1, v2, dim, deltaMain):
# return v1[dim] >= v2[dim] with error probability deltaMain
v1_comp = v1[dim]
v2_comp = v2[dim]
truthful = random.random()
if v1_comp >= v2_comp:
return True if truthful > deltaMain else False
else:
return False if truthful > deltaMain else True
def BoostProb(command, p, q, i, deltaMain, delta1, delta2):
num_true = 0
num_false = 0
num_calls = 0
while num_true - num_false < math.log(1/delta1) and num_false - num_true < math.log(1/delta2):
if command == "dominates":
query, calls = Dominates(p, q, deltaMain)
num_calls += calls
elif command == "oracle":
query = not oracle(q, p, i, deltaMain)
num_calls += 1
else:
return False, num_calls
if query:
num_true += 1
else:
num_false += 1
if num_true - num_false >= math.log(1/delta1):
return True, num_calls
else:
return False, num_calls
def brute_force(s, delta, error):
start = time.time()
n = len(s)
dims = len(s[0])
optimal = []
sorted_i = []
optimal = []
num_calls = 0
for i in range(dims):
s_i, calls = msort2(s, i, error)
num_calls += calls
sorted_i.append(s_i)
changed = True
while changed:
changed = False
for i in range(dims):
optima_i = []
compl = False
curr = -1
while not compl and len(sorted_i[i]) > 0:
dominated, calls = SetDominates(optimal, sorted_i[i][curr], delta/2, delta/2, error)
num_calls += calls
if not np.any(dominated):
optimal.append(sorted_i[i][curr])
changed = True
sorted_i[i].pop(curr)
else:
compl = True
# check internally:
new_optimal = []
for i in range(len(optimal)):
sublist = optimal[:i] + optimal[i + 1:]
dominated, calls = SetDominates(sublist, optimal[i], delta/2, delta/2, error)
num_calls += calls
if not dominated:
new_optimal.append(optimal[i])
end = time.time()
return new_optimal, end - start, num_calls
def msort2(x, dim, error):
if len(x) < 2:
return x, 0
num_calls = 0
result = []
mid = int(len(x) / 2)
y, calls1 = msort2(x[:mid], dim, error)
z, calls2 = msort2(x[mid:], dim, error)
num_calls += calls1
num_calls += calls2
while (len(y) > 0) and (len(z) > 0):
comp, calls = BoostProb("oracle", z[0], y[0], dim, error, 1/(16*len(y)), 1/16)
num_calls += calls
if not comp:
result.append(z[0])
z.pop(0)
else:
result.append(y[0])
y.pop(0)
result += y
result += z
return result, num_calls
v1 = (1, 2)
v2 = (2, 1)
trials = 1000
num_correct = 0
p = 0.3
d1 = 0.01
d2 = 0.1
dim = 1
for _ in range(trials):
output, calls = BoostProb("dominates", v1, v2, dim, p, d1, d2)
if output == False:
num_correct += 1
# print([Dominates(v1, v2, p)[0] for _ in range(5)])
print(num_correct/trials, 1 - d2 if (v1[dim] >= v2[dim]) else 1 - d1)
v1 = (1, 2)
v2 = (2, 1)
trials = 10000
num_correct = 0
p = 0.3
d1 = 0.01
d2 = 0.1
dim = 1
for _ in range(trials):
if BoostProb("oracle", v1, v2, dim, p, d1, d2) == (v1[dim] >= v2[dim]):
num_correct += 1
print(num_correct/trials, 1 - d2 if (v1[dim] >= v2[dim]) else 1 - d1)
v1 = (1, 2)
v2 = (2, 1)
trials = 10000
num_correct = 0
p = 0.1
d1 = 0.01
d2 = 0.1
dim = 0
for _ in range(trials):
if BoostProb("oracle", v1, v2, dim, p, d1, d2) == (v1[dim] >= v2[dim]):
num_correct += 1
print(num_correct/trials, 1 - d2 if (v1[dim] >= v2[dim]) else 1 - d1)
v1 = (2, 1)
v2 = (1, 2)
trials = 10000
num_correct = 0
p = 0.1
d1 = 0.01
d2 = 0.1
dim = 0
for _ in range(trials):
if BoostProb("oracle", v1, v2, dim, p, d1, d2) == (v1[dim] >= v2[dim]):
num_correct += 1
print(num_correct/trials, 1 - d2 if (v1[dim] >= v2[dim]) else 1 - d1)
def Dominates(p,q, deltaMain):
num_calls = 0
for i in range(0,len(p)):
cond, calls = BoostProb("oracle", q, p, i, deltaMain, 1/(16*len(p)), 1/16)
num_calls += calls
if cond:
# print(q, p, i)
return False, num_calls
return True, num_calls
import numpy as np
v1 = (1, 1)
v2 = (1, 1)
print(np.all([v1[i] >= v2[i] for i in range(len(v2))]))
trials = 10000
num_correct = 0
p = 0.3
for _ in range(trials):
output, calls = Dominates(v1, v2, p)
# print(output)
if output == np.all([v1[i] >= v2[i] for i in range(len(v2))]):
num_correct += 1
print(1 - num_correct/trials, 1/16)
import numpy as np
v1 = (2, 1)
v2 = (3, 3)
print(np.all([v1[i] >= v2[i] for i in range(len(v2))]))
trials = 10000
num_correct = 0
p = 0.3
for _ in range(trials):
output,calls = Dominates(v1, v2, p)
# print(output)
if output == np.all([v1[i] >= v2[i] for i in range(len(v2))]):
num_correct += 1
print(1 - num_correct/trials, 1/16)
import numpy as np
v1 = (2, 2)
v2 = (1.5, 1.5)
print(np.all([v1[i] >= v2[i] for i in range(len(v2))]))
trials = 10
num_correct = 0
p = 0.3
for _ in range(trials):
output, calls = Dominates(v1, v2, p)
# print(output)
if output == np.all([v1[i] >= v2[i] for i in range(len(v2))]):
num_correct += 1
print(1 - num_correct/trials, 1/16)
import numpy as np
v1 = (1, 1)
v2 = (5, 3)
print(np.all([v1[i] >= v2[i] for i in range(len(v2))]))
trials = 100
num_correct = 0
p = 0.3
d1 = 0.1
d2 = 0.1
for _ in range(trials):
output, calls = BoostProb("dominates", v1, v2, 0, p, d1, d2)
# print(output)
if output == np.all([v1[i] >= v2[i] for i in range(len(v2))]):
num_correct += 1
print(1 - num_correct/trials, 1/16)
def SetDominates(S, q, delta1, delta2, deltaMain):
num_calls = 0
for i in range(len(S)):
cond, calls = BoostProb("dominates", S[i], q, 0, deltaMain, delta1/len(S), delta2)
num_calls += calls
if cond:
print(i, S[i], q)
return True, num_calls
return False, num_calls
s1 = [(1, 1)]
v2 = (5, 3)
trials = 1000
num_correct = 0
p = 0.1
d1 = 0.01
d2 = 0.1
for _ in range(trials):
cond, calls = SetDominates(s2, v2, d1, d2, p)
if not cond:
num_correct += 1
print(num_correct/trials)
s1 = [(1, 1), (3,5)]
v2 = (5, 3)
trials = 10
num_correct = 0
p = 0.8
d1 = 0.01
d2 = 0.1
for _ in range(trials):
if not SetDominates(s2, v2, d1, d2, p):
num_correct += 1
print(num_correct/trials)
s1 = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (99, 1), (97, 5)]
v2 = (1, 99)
trials = 10
num_correct = 0
p = 0.8
d1 = 0.01
d2 = 0.1
for _ in range(trials):
cond, calls =SetDominates(s2, v2, d1, d2, p)
if not cond:
num_correct += 1
print(num_correct/trials)
def Lex(p,q,deltaMain):
num_calls = 0
for i in range(0,len(p)):
cond1, calls1 = BoostProb("oracle", p, q, i, deltaMain, 1/(32*len(p)), 1/32)
num_calls += calls1
if cond1:
return True, num_calls
else:
cond2, calls2 = BoostProb("oracle", q, p, i, deltaMain, 1/(32*len(p)), 1/32)
num_calls += calls2
if cond2:
return False, num_calls
return True, num_calls
v1 = (1, 2)
v2 = (2, 1)
trials = 10000
num_correct = 0
p = 0.7
d1 = 0.4
d2 = 0.4
for _ in range(trials):
if Lex(v2, v1, p):
num_correct += 1
print(num_correct/trials)
def argmax_lex(a):
return max(enumerate(a), key=lambda a:a[1])[0]
def argmax_rand(a):
b = np.array(a)
return np.random.choice(np.flatnonzero(b == b.max()))
def MaxLex(p, S, delta, deltaMain, use_argmax_lex = True, use_update = (1, 0.5, 1, -2), expected=None, use_cond = False):
if len(S) == 1:
return S[0], 0
c = []
for i in range(0,len(S)):
c.append(math.log(1/delta))
compl = False
num_calls = 0
if use_argmax_lex:
argmax = lambda x: argmax_lex(x)
else:
argmax = lambda x: argmax_rand(x)
rounds = 0
if expected:
ind = S.index(expected)
prev = c[ind]
num_increased = 0
while not compl:
q1Star = argmax(c)
q1 = S[q1Star]
cStar = c[:q1Star] + c[q1Star + 1:]
q2Star = argmax(cStar)
q2Star = q2Star + 1 if q2Star >= q1Star else q2Star
q2 = S[q2Star]
cond1, calls1 = Lex(q1,q2,delta)
num_calls += calls1
if cond1:
x = q1
xStar = q1Star
y = q2Star
else:
x = q2
xStar = q2Star
y = q1Star
c[y] = c[y] - use_update[0]
cond2, calls2 = Dominates(x, p, deltaMain)
num_calls += calls2
if cond2:
c[xStar] = c[xStar] + use_update[1]
else:
c[xStar] = c[xStar] - use_update[2]
cond = (c[q2Star] <= use_update[3])
if len(c) > 2 and use_cond:
remaining = c[:min(q1Star, q2Star)] + c[min(q1Star, q2Star)+1:max(q1Star, q2Star)] + c[max(q1Star, q2Star)+1:]
cond = cond and np.all([x <= -2 for x in remaining])
if cond:
compl = True
rounds += 1
if expected:
curr = c[ind]
if curr == prev + use_update[1]:
num_increased += 1
else:
num_increased = num_increased
prev = curr
print(c, S)
print(num_increased/rounds)
return S[argmax(c)], num_calls
x = [(5,3), (3, 3), (5, 7), (6, 1)]
v1 = (3, 5)
expected = (5, 7)
lexexpected = (9,1)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(5,3), (3, 3), (5, 7), (6, 1)]
v1 = (3, 5)
expected = (5, 7)
lexexpected = (9,1)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected, use_cond = False)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(7,3), (3, 3), (5, 7), (6, 1)]
v1 = (3, 5)
expected = (5, 7)
lexexpected = (6,1)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected, use_cond = False)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (3, 5), (5, 7), (9, 1)]
v1 = (3, 5)
expected = (5, 7)
lexexpected = (9,1)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (3, 5), (5, 7), (9, 1)]
v1 = (3, 5)
expected = (5, 7)
lexexpected = (9,1)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected, use_update = (1, 1, 1.1, -2))
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (6,6), (3, 5), (5, 7)]
v1 = (3, 5)
expected = (6, 6)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set((8,3)):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (3, 5), (1, 5), (5, 1)]
v1 = (3, 5)
expected = (3, 5)
lexcorrect = (8, 3)
trials = 1000
p = 0.
expected_p = 0.01
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected, use_update = (1, 4, 1.1, -2))
print(output)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexcorrect):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (3, 5), (1, 5), (5, 1)]
v1 = (3, 5)
expected = (3, 5)
lexcorrect = (8, 3)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected, use_update=(1, 1, 1, -2))
print(output)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexcorrect):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(8,3), (7,7), (3, 5)]
v1 = (3, 5)
expected = (7, 7)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set((8,3)):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
expected = (1, 99)
lexexpected = (99, 1)
trials = 1000
p = 0.3
expected_p = 0.1
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected, use_update = (1, 4, 3, -2), use_cond = True)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
expected = (1, 99)
lexexpected = (99, 1)
trials = 1000
p = 0.1
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected, use_update = (1, 4, 1.1, -2), use_cond = True)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (97, 5)
expected = (97, 5)
lexexpected = (99, 1)
trials = 1000
p = 0.1
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected, use_update = (1, 1, 1, -2), use_cond = True)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (97, 5)
expected = (97, 5)
lexexpected = (99, 1)
trials = 1000
p = 0.3
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, expected=expected, use_cond = True)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set(lexexpected):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
expected = (1, 99)
trials = 1000
p = 0.
expected_p = 0.05
wrong_answers = set([])
num_correct = 0
num_lexcorrect = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, expected=expected, use_cond = False)
if set(output) == set(expected):
num_correct += 1
elif set(output) == set((99, 1)):
num_lexcorrect += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
print(1 - num_lexcorrect/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
expected = (1, 99)
trials = 1000
p = 0.2
expected_p = 0.1
wrong_answers = set([])
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
x = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
expected = (1, 99)
trials = 500
values = []
for one in range(1, 11):
for two in range(1, 21):
for three in range(1, 31):
p = 0.2
expected_p = 0.1
wrong_answers = set([])
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, use_update = (one, two, three, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print("args;", one, two, three)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
values.append((1 - num_correct/trials, expected_p, wrong_answers, one, two, three))
x = [(1, 1), (0.5, 0.5), (0.25, 0.25), (0.1, 0.1), (0.01, 0.01), (2, 2), (1, 99), (99, 1)]
v1 = (1, 99)
trials = 100
expected = (1, 99)
num_correct = 0
p = 0.1
expected_p = 0.1
wrong_answers = set([])
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
s = [(1, 1), (2, 2), (1, 99), (99, 1)]
v1 = (1, 99)
expected = (1, 99)
trials = 1000
p = 0.2
expected_p = 0.1
wrong_answers = set([])
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
num_correct = 0
for _ in range(trials):
s = x.copy()
random.shuffle(s)
output, calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = False, use_update = (1, 1, 1, -2))
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
s = [(1, 1), (3, 3), (5, 3), (3, 5)]
v1 = (3, 3)
trials = 1000
expected = (5,3)
num_correct = 0
p = 0.3
expected_p = 0.1
wrong_answers = set([])
for _ in range(trials):
output, num_calls = MaxLex(v1, s, expected_p, p, use_argmax_lex = True)
if set(output) == set(expected):
num_correct += 1
else:
wrong_answers.add(output)
print(wrong_answers)
print(1 - num_correct/trials, expected_p)
s = [(1, 1), (2, 2), (3, 5), (5, 3), (7, 7), (1, 99), (99, 1), (97, 5)]
v1 = (1, 99)
trials = 10
num_correct = 0
p = 0.1
d1 = 0.4
d2 = 0.4
for _ in range(trials):
output, calls = MaxLex(v1, s, 0.01, p, use_argmax_lex = False, )
if output == (1, 99):
num_correct += 1
else:
print(output)
print(num_correct/trials)
def SkylineHighDim(k, X, delta, deltaMain, use_argmax_lex = True, use_update = (1, 0.5, 1, -2)):
S = []
C = X.copy()
num_calls = 0
for i in range(1,k+1):
#Finding a point p not dominated by current skyline points
found = False
while len(C) > 0 and not found:
p = C[random.randint(0,len(C) -1)]
cond1, calls1 = SetDominates(S, p, delta/(4*k), delta/(4*k*len(X)), deltaMain)
num_calls += calls1
if not cond1:
print(p, S, "not dominated")
found = True
else:
print(p, S, "dominated")
C.remove(p)
# print(C)
if len(C) == 0:
return S, num_calls
else:
#Finding a skyline point that dominates p
pStar, calls2 = MaxLex(p, C, delta/(2*k), deltaMain, use_argmax_lex = use_argmax_lex, use_update = use_update)
num_calls += calls2
C.remove(pStar)
print(pStar, C)
S.append(pStar)
return S, num_calls
s = [(1, 1), (3, 5), (5, 3)]
expected = s[-2:]
k = 4
delta = 0.1
deltaMain = 0.1
trials = 10
num_correct = 0
total_calls = []
wrong_answers = set()
for i in range(trials):
X = s.copy()
random.shuffle(X)
# print("trial", i)
output, num_calls = SkylineHighDim(k,X,delta, deltaMain, use_argmax_lex = True, use_update = (1, 0.5, 1, -2))
total_calls.append(num_calls)
# print(output)
if set(expected) == set(output):
num_correct += 1
else:
wrong_answers.add(tuple(output))
print(num_correct / (trials + 1), 1 - deltaMain, np.mean(total_calls), np.max(total_calls))
print(wrong_answers)
num_correct = 0
total_calls = []
wrong_answers = set()
for i in range(trials):
X = s.copy()
random.shuffle(X)
# print("trial", i)
output, num_calls = SkylineHighDim(k,X,delta, deltaMain, use_argmax_lex = False, use_update = (1, 0.5, 1, -2))
total_calls.append(num_calls)
# print(output)
if set(expected) == set(output):
num_correct += 1
else:
wrong_answers.add(tuple(output))
print(num_correct / (trials + 1), 1 - deltaMain, np.mean(total_calls), np.max(total_calls))
print(wrong_answers)
num_correct = 0
total_calls = []
wrong_answers = set()
for i in range(trials):
X = s.copy()
random.shuffle(X)
# print("trial", i)
output, num_calls = SkylineHighDim(k,X,delta, deltaMain, use_argmax_lex = True, use_update = (1, 1, 1, -2))
total_calls.append(num_calls)
# print(output)
if set(expected) == set(output):
num_correct += 1
else:
wrong_answers.add(tuple(output))
print(num_correct / (trials + 1), 1 - deltaMain, np.mean(total_calls), np.max(total_calls))
print(wrong_answers)
num_correct = 0
total_calls = []
wrong_answers = set()
for i in range(trials):
X = s.copy()
random.shuffle(X)
# print("trial", i)
output, num_calls = SkylineHighDim(k,X,delta, deltaMain, use_argmax_lex = False, use_update = (1, 0.5, 1, -2))
total_calls.append(num_calls)
# print(output)
if set(expected) == set(output):
num_correct += 1
else:
wrong_answers.add(tuple(output))
print(num_correct / (trials + 1), 1 - deltaMain, np.mean(total_calls), np.max(total_calls))
print(wrong_answers)
def is_dominated_noiseless(a, b):
# a is dominated by b
return np.all([a[i] <= b[i] for i in range(len(b))])
def brute_force_noiseless(s):
n = len(s)
dims = len(s[0])
optimal = []
sorted_i = []
optimal = set([])
for i in range(dims):
s_i = msort2_noiseless(s, i)
sorted_i.append(s_i)
max_i = s_i[-1][i]
optima_i = []
compl = False
curr = -1
while not compl and len(s_i) > 0:
if s_i[curr][i] == max_i:
optima_i.append(s_i[curr])
s_i.pop(-1)
else:
compl = True
if i == 0:
optimal = optima_i
else:
for marg in optima_i:
if marg not in optimal:
optimal.append(marg)
changed = True
while changed:
changed = False
for i in range(dims):
optima_i = []
compl = False
curr = -1
while not compl and len(s_i) > 0:
dominated = [is_dominated_noiseless(s_i[curr], x) for x in optimal]
if not np.any(dominated):
optimal.append(s_i[curr])
changed = True
s_i.pop(-1)
else:
compl = True
# check internally:
new_optimal = []
for i in range(len(optimal)):
sublist = optimal[:i] + optimal[i + 1:]
if not np.any([is_dominated_noiseless(optimal[i], x) for x in sublist]):
new_optimal.append(optimal[i])
return new_optimal
def msort2_noiseless(x, dim):
if len(x) < 2:
return x
result = []
mid = int(len(x) / 2)
y = msort2_noiseless(x[:mid], dim)
z = msort2_noiseless(x[mid:], dim)
while (len(y) > 0) and (len(z) > 0):
if y[0][dim] > z[0][dim]:
result.append(z[0])
z.pop(0)
else:
result.append(y[0])
y.pop(0)
result += y
result += z
return result
X = [tuple(x) for x in [[1,1],[2,2],[3,5],[5,3],[7,7], [1,99], [99,1],[97,5]]]
expected = brute_force_noiseless(X)
print(expected)
X = [tuple(x) for x in [[1,1],[2,2],[3,5],[5,3],[7,7], [1,99], [99,1],[97,5]]]
expected = X[-4:]
# print(expected)
k = 8
delta = 0.1
deltaMain = 0.1
trials = 100
num_correct = 0
total_calls = []
hamming_dist = []
for i in range(trials):
X = [tuple(x) for x in [[1,1],[2,2],[3,5],[5,3],[7,7], [1,99], [99,1],[97,5]]]
expected = X[-4:]
# print(X)
# print(expected)
# print("trial", i)
output, num_calls = SkylineHighDim(k,X,delta, deltaMain)
total_calls.append(num_calls)
hamming_dist.append(len(set(output) ^ set(expected)))
# print(output)
if set(expected) == set(output):
num_correct += 1
else:
print(output)
print(num_correct / (trials + 1), 1 - deltaMain, np.mean(total_calls), np.max(total_calls), np.mean(hamming_dist), np.max(hamming_dist))
import numpy as np
import random
import stats
def oracle_max(tup, dim, error):
v1, v2 = tup
v1_comp = v1[dim]
v2_comp = v2[dim]
truthful = random.random()
if v1_comp <= v2_comp:
return 0 if truthful < error else 1
else:
return 1 if truthful < error else 0
def Lex(p,q,deltaMain):
num_calls = 0
for i in range(0,len(p)):
cond1, calls1 = BoostProb("oracle", p, q, i, deltaMain, 1/(32*len(p)), 1/32)
num_calls += calls1
if cond1:
return True, num_calls
else:
cond2, calls2 = BoostProb("oracle", q, p, i, deltaMain, 1/(32*len(p)), 1/32)
num_calls += calls2
if cond2:
return False, num_calls
return True, num_calls
def max_4(s, dim, delta, error):
num_checks = int(2 * len(s) - (1/6)/(error)+1)
num_calls = 0
if num_checks % 2 == 0:
num_checks += 1
if len(s) == 0:
return None
if len(s) == 1:
return s[0]
else:
curr = s[0]
for i in range(1, len(s)):
temp, calls = Lex(curr, s[i], delta / 2)
num_calls += calls
if temp != 0:
curr = s[i]
return curr, num_calls
def find_max(s, dim, delta, error):
s = list(s)
size = len(s)
if size == 0:
return None, 0
if size == 1:
return s[0], 0
# partition s into groups of at most 4
s2 = []
start = 0
num_calls = 0
while start + 4 < size:
subset = s[start: start + 4]
max_picked, calls = max_4(subset, dim, delta, error)
s2.append(max_picked)
num_calls += calls
start += 4
subset = s[start: size]
max_picked, calls = max_4(subset, dim, delta, error)
num_calls += calls
s2.append(max_picked)
mx, calls = find_max(s2, dim, delta / 2, error)
return mx, num_calls + calls
def is_dominated(v, C, delta, error):
dims = len(v)
num_checks = int(np.log(1/delta) * 2)
num_calls = 0
if num_checks % 2 == 0:
num_checks += 1
for c in C:
dominated = np.zeros(dims)
comp = (v, c)
for dim in range(dims):
max_i = stats.mode([oracle_max(comp, dim, error) for _ in range(num_checks)])
num_calls += num_checks
dominated[dim] = max_i
if np.all(dominated == 1):
return True, num_calls
return False, num_calls
def skysample(khat, s, delta, error, use_argmax_lex = None, use_update = None):
assert len(s) > 0
sky = []
dims = len(s[0])
remaining = set(s)
num_calls = 0
for i in range(khat):
# find non-dominated points
to_remove = []
for r in remaining:
comp, calls = is_dominated(r, sky, delta, error)
num_calls += calls
if comp:
to_remove.append(r)
for r in to_remove:
remaining.remove(r)
if len(remaining) > 0:
remaining = list(remaining)
z, calls = MaxLex(remaining[0], remaining, delta/2, error)
num_calls += calls
sky.append(z)
remaining = set(remaining)
remaining.remove(z)
return sky, num_calls
def skyline_computation(s, delta, error, alg, use_argmax_lex = None, use_update = None):
i = 1
k = 4
compl = False
num_calls = 0
while not compl:
r, calls = alg(k, s, delta/(2** i), error, use_argmax_lex = use_argmax_lex, use_update = use_update)
num_calls += calls
if len(r) < k:
compl = True
else:
i += 1
k = k**2
return r, num_calls
trials = 100
dims = 6
data_num = 6
X = [tuple(x) for x in [[1,1],[2,2],[3,5], [1,99]]]
expected = brute_force_noiseless(X)
print(expected)
# num_vec = 2
# len_vec = 10
# s = [tuple([1 for _ in range(num_vec)]) for _ in range(len_vec)] + [tuple([5 for _ in range(num_vec)])]
# expected = [tuple([5 for _ in range(num_vec)])]
calls = []
for p in [1/6]:
num_correct = 0
for i in range(trials):
random.shuffle(X)
output, num_calls = SkyLineHighDim(X, 0.01, p)
calls.append(num_calls)
# print(output)
if set(output) == set(expected):
num_correct += 1
print(1 - num_correct/trials, p)
print(np.mean(num_calls), np.max(num_calls))
trials = 100
dims = 6
data_num = 6
X = [tuple(x) for x in [[1,1],[2,2],[3,5], [1,99]]]
expected = brute_force_noiseless(X)
print(expected)
# num_vec = 2
# len_vec = 10
# s = [tuple([1 for _ in range(num_vec)]) for _ in range(len_vec)] + [tuple([5 for _ in range(num_vec)])]
# expected = [tuple([5 for _ in range(num_vec)])]
calls = []
for p in [1/6]:
num_correct = 0
for i in range(trials):
random.shuffle(X)
output, num_calls = skyline_computation(X, 0.01, p, skysample)
calls.append(num_calls)
# print(output)
if set(output) == set(expected):
num_correct += 1
print(1 - num_correct/trials, p)
print(np.mean(num_calls), np.max(num_calls))
trials = 100
dims = 6
data_num = 6
X = [tuple(x) for x in [[1,1],[2,2],[3,5], [1,99]]]
expected = brute_force_noiseless(X)
print(expected)
# num_vec = 2
# len_vec = 10
# s = [tuple([1 for _ in range(num_vec)]) for _ in range(len_vec)] + [tuple([5 for _ in range(num_vec)])]
# expected = [tuple([5 for _ in range(num_vec)])]
calls = []
for p in [1/6]:
num_correct = 0
for i in range(trials):
random.shuffle(X)
output, num_calls = skyline_computation(X, 0.01, p, SkylineHighDim, use_argmax_lex = True, use_update = (1, 1, 1, -2))
calls.append(num_calls)
if set(output) == set(expected):
num_correct += 1
print(1 - num_correct/trials, p)
print(np.mean(num_calls), np.max(num_calls))
trials = 100
dims = 6
data_num = 6
X = [tuple(x) for x in [[1,1],[2,2],[3,5],[5,3],[7,7], [1,99], [99,1],[97,5]]]
expected = brute_force_noiseless(X)
print(expected)
# num_vec = 2
# len_vec = 10
# s = [tuple([1 for _ in range(num_vec)]) for _ in range(len_vec)] + [tuple([5 for _ in range(num_vec)])]
# expected = [tuple([5 for _ in range(num_vec)])]
for p in [1/9]:
num_correct = 0
for i in range(trials):
random.shuffle(s)
output = skyline_computation(X, 0.1, p)
# print(output)
if set(output) == set(expected):
num_correct += 1
print(1 - num_correct/trials, p)
def is_dominated(v, C, delta, error):
dims = len(v)
num_checks = int(np.log(1/delta) * 3)
if num_checks % 2 == 0:
num_checks += 1
for c in C:
dominated = np.zeros(dims)
comp = (v, c)
for dim in range(dims):
max_i = stats.mode([oracle_max(comp, dim, error) for _ in range(num_checks)])
dominated[dim] = max_i
if np.all(dominated == 1):
return True
else:
num_checks = num_checks
# print(c, dominated)
return False
s1 = [(1, 1), (3,5), (7, 7)]
v2 = (5, 3)
trials = 1000
num_correct = 0
p = 0.05
d1 = 0.05
for _ in range(trials):
if is_dominated(v2, s1, d1, p):
num_correct += 1
print(1 - num_correct/trials, d1)
def max_4(s, dim, delta, error):
num_checks = int(2 * len(s) - (1/6)/(error)+1)
if num_checks % 2 == 0:
num_checks += 1
if len(s) == 0:
return None
if len(s) == 1:
return s[0]
else:
curr = s[0]
for i in range(1, len(s)):
comp = (curr, s[i])
temp = stats.mode([oracle_max(comp, dim, error) for _ in range(num_checks)])
if temp != 0:
curr = s[i]
return curr
for num_vec in range(1, 4):
s = [(1, 1) for _ in range(num_vec)] + [(5, 5)]
random.shuffle(s)
dim = 1
expected = max(s)
trials = 10000
for p in [1/6, 1/9, 1/12, 1/18]:
num_correct = 0
for i in range(trials):
random.shuffle(s)
if max_4(s, dim, p/2, p) == expected:
num_correct += 1
print((1 - num_correct / trials)/p, 1 - num_correct / trials, delta, p, len(s))
| [
"numpy.log",
"random.shuffle",
"numpy.zeros",
"numpy.any",
"random.random",
"numpy.max",
"numpy.mean",
"numpy.array",
"math.log",
"numpy.all"
] | [((397, 412), 'random.random', 'random.random', ([], {}), '()\n', (410, 412), False, 'import random\n'), ((7897, 7908), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (7905, 7908), True, 'import numpy as np\n'), ((10088, 10105), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (10102, 10105), False, 'import random\n'), ((10711, 10728), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (10725, 10728), False, 'import random\n'), ((11352, 11369), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (11366, 11369), False, 'import random\n'), ((11994, 12011), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (12008, 12011), False, 'import random\n'), ((12618, 12635), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (12632, 12635), False, 'import random\n'), ((13250, 13267), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (13264, 13267), False, 'import random\n'), ((13867, 13884), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (13881, 13884), False, 'import random\n'), ((14537, 14554), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (14551, 14554), False, 'import random\n'), ((15174, 15191), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (15188, 15191), False, 'import random\n'), ((15832, 15849), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (15846, 15849), False, 'import random\n'), ((16542, 16559), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (16556, 16559), False, 'import random\n'), ((17254, 17271), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (17268, 17271), False, 'import random\n'), ((17964, 17981), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (17978, 17981), False, 'import random\n'), ((18623, 18640), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (18637, 18640), False, 'import random\n'), ((19259, 19276), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (19273, 19276), False, 'import random\n'), ((19579, 19596), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (19593, 19596), False, 'import random\n'), ((19900, 19917), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (19914, 19917), False, 'import random\n'), ((20248, 20265), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (20262, 20265), False, 'import random\n'), ((21716, 21733), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (21730, 21733), False, 'import random\n'), ((22036, 22053), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (22050, 22053), False, 'import random\n'), ((22357, 22374), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (22371, 22374), False, 'import random\n'), ((22705, 22722), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (22719, 22722), False, 'import random\n'), ((23189, 23206), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (23203, 23206), False, 'import random\n'), ((23509, 23526), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (23523, 23526), False, 'import random\n'), ((23830, 23847), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (23844, 23847), False, 'import random\n'), ((24178, 24195), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (24192, 24195), False, 'import random\n'), ((26488, 26505), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (26502, 26505), False, 'import random\n'), ((26860, 26880), 'numpy.mean', 'np.mean', (['total_calls'], {}), '(total_calls)\n', (26867, 26880), True, 'import numpy as np\n'), ((26882, 26901), 'numpy.max', 'np.max', (['total_calls'], {}), '(total_calls)\n', (26888, 26901), True, 'import numpy as np\n'), ((27024, 27041), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (27038, 27041), False, 'import random\n'), ((27397, 27417), 'numpy.mean', 'np.mean', (['total_calls'], {}), '(total_calls)\n', (27404, 27417), True, 'import numpy as np\n'), ((27419, 27438), 'numpy.max', 'np.max', (['total_calls'], {}), '(total_calls)\n', (27425, 27438), True, 'import numpy as np\n'), ((27561, 27578), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (27575, 27578), False, 'import random\n'), ((27931, 27951), 'numpy.mean', 'np.mean', (['total_calls'], {}), '(total_calls)\n', (27938, 27951), True, 'import numpy as np\n'), ((27953, 27972), 'numpy.max', 'np.max', (['total_calls'], {}), '(total_calls)\n', (27959, 27972), True, 'import numpy as np\n'), ((28095, 28112), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (28109, 28112), False, 'import random\n'), ((28468, 28488), 'numpy.mean', 'np.mean', (['total_calls'], {}), '(total_calls)\n', (28475, 28488), True, 'import numpy as np\n'), ((28490, 28509), 'numpy.max', 'np.max', (['total_calls'], {}), '(total_calls)\n', (28496, 28509), True, 'import numpy as np\n'), ((31401, 31421), 'numpy.mean', 'np.mean', (['total_calls'], {}), '(total_calls)\n', (31408, 31421), True, 'import numpy as np\n'), ((31423, 31442), 'numpy.max', 'np.max', (['total_calls'], {}), '(total_calls)\n', (31429, 31442), True, 'import numpy as np\n'), ((31444, 31465), 'numpy.mean', 'np.mean', (['hamming_dist'], {}), '(hamming_dist)\n', (31451, 31465), True, 'import numpy as np\n'), ((31467, 31487), 'numpy.max', 'np.max', (['hamming_dist'], {}), '(hamming_dist)\n', (31473, 31487), True, 'import numpy as np\n'), ((31647, 31662), 'random.random', 'random.random', ([], {}), '()\n', (31660, 31662), False, 'import random\n'), ((39219, 39236), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (39233, 39236), False, 'import random\n'), ((1176, 1196), 'math.log', 'math.log', (['(1 / delta1)'], {}), '(1 / delta1)\n', (1184, 1196), False, 'import math\n'), ((33587, 33601), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (33595, 33601), True, 'import numpy as np\n'), ((33828, 33850), 'numpy.all', 'np.all', (['(dominated == 1)'], {}), '(dominated == 1)\n', (33834, 33850), True, 'import numpy as np\n'), ((35549, 35566), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (35563, 35566), False, 'import random\n'), ((35795, 35813), 'numpy.mean', 'np.mean', (['num_calls'], {}), '(num_calls)\n', (35802, 35813), True, 'import numpy as np\n'), ((35815, 35832), 'numpy.max', 'np.max', (['num_calls'], {}), '(num_calls)\n', (35821, 35832), True, 'import numpy as np\n'), ((36242, 36259), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (36256, 36259), False, 'import random\n'), ((36504, 36522), 'numpy.mean', 'np.mean', (['num_calls'], {}), '(num_calls)\n', (36511, 36522), True, 'import numpy as np\n'), ((36524, 36541), 'numpy.max', 'np.max', (['num_calls'], {}), '(num_calls)\n', (36530, 36541), True, 'import numpy as np\n'), ((36951, 36968), 'random.shuffle', 'random.shuffle', (['X'], {}), '(X)\n', (36965, 36968), False, 'import random\n'), ((37245, 37263), 'numpy.mean', 'np.mean', (['num_calls'], {}), '(num_calls)\n', (37252, 37263), True, 'import numpy as np\n'), ((37265, 37282), 'numpy.max', 'np.max', (['num_calls'], {}), '(num_calls)\n', (37271, 37282), True, 'import numpy as np\n'), ((37708, 37725), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (37722, 37725), False, 'import random\n'), ((38092, 38106), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (38100, 38106), True, 'import numpy as np\n'), ((38297, 38319), 'numpy.all', 'np.all', (['(dominated == 1)'], {}), '(dominated == 1)\n', (38303, 38319), True, 'import numpy as np\n'), ((712, 732), 'math.log', 'math.log', (['(1 / delta1)'], {}), '(1 / delta1)\n', (720, 732), False, 'import math\n'), ((758, 778), 'math.log', 'math.log', (['(1 / delta2)'], {}), '(1 / delta2)\n', (766, 778), False, 'import math\n'), ((8190, 8209), 'math.log', 'math.log', (['(1 / delta)'], {}), '(1 / delta)\n', (8198, 8209), False, 'import math\n'), ((33460, 33477), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (33466, 33477), True, 'import numpy as np\n'), ((37983, 38000), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (37989, 38000), True, 'import numpy as np\n'), ((39395, 39412), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (39409, 39412), False, 'import random\n'), ((9461, 9499), 'numpy.all', 'np.all', (['[(x <= -2) for x in remaining]'], {}), '([(x <= -2) for x in remaining])\n', (9467, 9499), True, 'import numpy as np\n'), ((20949, 20966), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (20963, 20966), False, 'import random\n'), ((1942, 1959), 'numpy.any', 'np.any', (['dominated'], {}), '(dominated)\n', (1948, 1959), True, 'import numpy as np\n'), ((29650, 29667), 'numpy.any', 'np.any', (['dominated'], {}), '(dominated)\n', (29656, 29667), True, 'import numpy as np\n')] |
"""Example by <NAME>."""
import numpy as np
import phonopy
phonon = phonopy.load(
unitcell_filename="POSCAR-unitcell", supercell_matrix=[2, 2, 1], log_level=1
)
print("Space group: %s" % phonon.symmetry.get_international_table())
# Example to obtain dynamical matrix
dmat = phonon.get_dynamical_matrix_at_q([0, 0, 0])
print(dmat)
# Example of band structure calculation
bands = []
q_start = np.array([1.0 / 3, 1.0 / 3, 0])
q_end = np.array([0, 0, 0])
band = []
for i in range(51):
band.append(q_start + (q_end - q_start) / 50 * i)
bands.append(band)
q_start = np.array([0, 0, 0])
q_end = np.array([1.0 / 3, 1.0 / 3, 1.0 / 2])
band = []
for i in range(51):
band.append(q_start + (q_end - q_start) / 50 * i)
bands.append(band)
print("\nPhonon dispersion:")
phonon.run_band_structure(bands, with_eigenvectors=True, labels=["X", r"$\Gamma$", "L"])
band_plot = phonon.plot_band_structure()
band_plot.show()
bs = phonon.get_band_structure_dict()
distances = bs["distances"]
frequencies = bs["frequencies"]
qpoints = bs["qpoints"]
for (qs_at_segments, dists_at_segments, freqs_at_segments) in zip(
qpoints, distances, frequencies
):
for q, d, f in zip(qs_at_segments, dists_at_segments, freqs_at_segments):
print("# %f %f %f" % tuple(q))
print(("%s " + "%f " * len(f)) % ((d,) + tuple(f)))
| [
"phonopy.load",
"numpy.array"
] | [((70, 165), 'phonopy.load', 'phonopy.load', ([], {'unitcell_filename': '"""POSCAR-unitcell"""', 'supercell_matrix': '[2, 2, 1]', 'log_level': '(1)'}), "(unitcell_filename='POSCAR-unitcell', supercell_matrix=[2, 2, 1\n ], log_level=1)\n", (82, 165), False, 'import phonopy\n'), ((399, 430), 'numpy.array', 'np.array', (['[1.0 / 3, 1.0 / 3, 0]'], {}), '([1.0 / 3, 1.0 / 3, 0])\n', (407, 430), True, 'import numpy as np\n'), ((439, 458), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (447, 458), True, 'import numpy as np\n'), ((573, 592), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (581, 592), True, 'import numpy as np\n'), ((601, 638), 'numpy.array', 'np.array', (['[1.0 / 3, 1.0 / 3, 1.0 / 2]'], {}), '([1.0 / 3, 1.0 / 3, 1.0 / 2])\n', (609, 638), True, 'import numpy as np\n')] |
import scipy
from scipy.signal.signaltools import wiener
import matplotlib.pyplot as plt
import torch
from util.reservoir_w_cur_replay_buffer import Reservoir_with_Cur_Replay_Memory
import numpy as np
s_c = torch.load("forward_curiosity")
a_c = torch.load("inverse_curiosity")
mul = 1000
change_var_at = [0, 100, 150, 350]
change_var_at = [change_var_at[i]*mul for i in range(len(change_var_at))]
avg_len = 60
mean = []
std = []
SNR = []
bool = []
bool_cur = []
for i in range(len(a_c)-avg_len):
mean.append(np.mean(a_c[i:i+avg_len]))
std.append(np.std(a_c[i:i+avg_len]))
SNR.append(mean[-1]/std[-1])
if SNR[-1] < 3*mean[-1]:
bool.append(1)
bool_cur.append(mean[-1])
else:
bool.append(0)
bool_cur.append(0)
plt.plot(SNR)
plt.plot(mean)
plt.legend(["SNR", "mean"])
plt.show()
plt.close()
plt.plot(bool)
plt.savefig("bool")
plt.close()
plt.plot(bool_cur)
plt.savefig("bool_cur")
torch.save(bool_cur, "../bool_cur")
torch.save(bool, "../bool")
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.close",
"torch.load",
"matplotlib.pyplot.legend",
"torch.save",
"numpy.mean",
"matplotlib.pyplot.savefig"
] | [((207, 238), 'torch.load', 'torch.load', (['"""forward_curiosity"""'], {}), "('forward_curiosity')\n", (217, 238), False, 'import torch\n'), ((245, 276), 'torch.load', 'torch.load', (['"""inverse_curiosity"""'], {}), "('inverse_curiosity')\n", (255, 276), False, 'import torch\n'), ((764, 777), 'matplotlib.pyplot.plot', 'plt.plot', (['SNR'], {}), '(SNR)\n', (772, 777), True, 'import matplotlib.pyplot as plt\n'), ((778, 792), 'matplotlib.pyplot.plot', 'plt.plot', (['mean'], {}), '(mean)\n', (786, 792), True, 'import matplotlib.pyplot as plt\n'), ((793, 820), 'matplotlib.pyplot.legend', 'plt.legend', (["['SNR', 'mean']"], {}), "(['SNR', 'mean'])\n", (803, 820), True, 'import matplotlib.pyplot as plt\n'), ((821, 831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((832, 843), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (841, 843), True, 'import matplotlib.pyplot as plt\n'), ((845, 859), 'matplotlib.pyplot.plot', 'plt.plot', (['bool'], {}), '(bool)\n', (853, 859), True, 'import matplotlib.pyplot as plt\n'), ((860, 879), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bool"""'], {}), "('bool')\n", (871, 879), True, 'import matplotlib.pyplot as plt\n'), ((880, 891), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (889, 891), True, 'import matplotlib.pyplot as plt\n'), ((893, 911), 'matplotlib.pyplot.plot', 'plt.plot', (['bool_cur'], {}), '(bool_cur)\n', (901, 911), True, 'import matplotlib.pyplot as plt\n'), ((912, 935), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bool_cur"""'], {}), "('bool_cur')\n", (923, 935), True, 'import matplotlib.pyplot as plt\n'), ((937, 972), 'torch.save', 'torch.save', (['bool_cur', '"""../bool_cur"""'], {}), "(bool_cur, '../bool_cur')\n", (947, 972), False, 'import torch\n'), ((973, 1000), 'torch.save', 'torch.save', (['bool', '"""../bool"""'], {}), "(bool, '../bool')\n", (983, 1000), False, 'import torch\n'), ((515, 542), 'numpy.mean', 'np.mean', (['a_c[i:i + avg_len]'], {}), '(a_c[i:i + avg_len])\n', (522, 542), True, 'import numpy as np\n'), ((557, 583), 'numpy.std', 'np.std', (['a_c[i:i + avg_len]'], {}), '(a_c[i:i + avg_len])\n', (563, 583), True, 'import numpy as np\n')] |
import numpy as np
# call ft_uneven for a single time series and ft_uneven_bulk for multiple time series
# This Block of methods is used to parse the arguments for bulk calculations
def indexed(array2d):
def dummy(index):
return array2d[index]
return dummy
def not_indexed(array):
def dummy(index):
return array
return dummy
def is_none():
def dummy(index):
return None
return dummy
def select_indexed(array):
if array is None:
return is_none()
if type(array[0]) == list or type(array[0]) == np.ndarray:
return indexed(times)
else:
return not_indexed(times)
# main methods for ft_uneven calculation
# values, times, omegas: list or ndarray(1 dim), ft_sign, time_zero: float, weights: list or ndarray(1 dim), return_ls, lin_weights: boolean
def ft_uneven(values, times, omegas, ft_sign, time_zero, weights=None, return_ls=False, lin_weights=False):
num_val = len(values)
num_omg = len(omegas)
# raise error if no frequencies are given
if num_omg == 0:
raise ValueError('omegas argument cannot be empty')
lss = np.zeros(num_omg)
fts = np.zeros(num_omg, dtype=np.cdouble)
# if there are no weights given
if weights is None:
for i in range(num_omg):
omg = omegas[i]
# if omg is not 0
if omg:
csum = np.sum(np.cos(2.0 * omg * times))
ssum = np.sum(np.sin(2.0 * omg * times))
tau = 0.5 * np.arctan2(ssum, csum)
sumr = np.sum(values * np.cos(omg * times - tau))
sumi = np.sum(values * np.sin(omg * times - tau))
scos2 = np.sum((np.cos(omg * times - tau))**2)
ssin2 = np.sum((np.sin(omg * times - tau))**2)
ft_real = sumr/(2**0.5 * scos2**0.5)
ft_imag = ft_sign * sumi/(2**0.5 * ssin2**0.5)
phi_this = tau - omg * time_zero
fts[i] = (ft_real + ft_imag * 1j) * np.exp(1j*phi_this)
lss[i] = (sumr**2/scos2) + (sumi**2/ssin2)
else:
fts[i] = np.sum(values)/np.sqrt(num_val)
lss[i] = fts[i]**2
else:
#if lin_weights:
values = weights * values
for i in range(num_omg):
omg = omegas[i]
#if not lin_weights:
#values = weights * values
# if omg is not 0
if omg:
csum = np.sum(weights * np.cos(2.0 * omg * times))
ssum = np.sum(weights * np.sin(2.0 * omg * times))
tau = 0.5 * np.arctan2(ssum, csum)
sumr = np.sum(values * cos(omg * times - tau))
sumi = np.sum(values * sin(omg * times - tau))
scos2 = np.sum(weights * (np.cos(omg * times - tau))**2)
ssin2 = np.sum(weights * (np.sin(omg * times - tau))**2)
ft_real = sumr/(2**0.5 * scos2**0.5)
ft_imag = ft_sign * sumi / (2**0.5 * ssin2**0.5)
phi_this = tau - omg * time_zero
fts[i] = (ft_real + ft_imag * 1j) * np.exp(1j*phi_this)
lss[i] = (sumr**2/scos2) + (sumi**2/ssin2)
else:
fts[i] = np.sum(values)/np.sqrt(num_val)
lss[i] = fts[i]**2
if return_ls:
return fts, lss
else:
return fts
# values: list (containing lists or ndarrays(1 dim)) or ndarray(2 dim) or ndarray(1 dim containing ndarrays (1 dim))
# times, omegas: list (containing lists or ndarrays(1 dim)) or ndarray(2 dim) or ndarray(1 dim containing ndarrays (1 dim)) or list or ndarray (1 dim)
# if times, omegas is 1-dim, times and omegas are used for all time series
#ft_sign, time_zero: float, weights: list or ndarray(1 dim), return_ls, lin_weights: boolean
# mulitthreading required multiprocessing module (should be preinstalled)
def ft_uneven_bulk(values, times, omegas, ft_sign, time_zero, weights=None, return_ls=False, lin_weights=False, multithreading=False):
# parse times, omegas and weights
times = select_indexed(times)
omegas = select_indexed(omegas)
weights = select_indexed(weights)
# different ways of envoking calculations depending if multiprocessing should be used
if not multithreading:
# straight forward, one loop going over each times series one at the time
results = []
for i in range(len(values)):
results.append(ft_uneven(values[i], times(i), omegas(i), ft_sign, time_zero, weights=weights(i), return_ls=return_ls, lin_weights=lin_weights))
else:
from multiprocessing import Pool
pool = Pool()
n = len(values)
results = pool.starmap(ft_uneven, zip(values, [times(i) for i in range(n)], [omegas(i) for i in range(n)], \
[ft_sign]*n, [time_zero]*n, [weights(i) for i in range(n)], [return_ls]*n, [lin_weights]*n))
return results
# test if code runs
if __name__ == '__main__':
ran = np.random.standard_normal
values = ran(size=(100, 100))
times = ran(size=(100))
omegas = ran(size=(100))
ft_uneven_bulk(values, times, omegas, 1, 0)
| [
"numpy.arctan2",
"numpy.sum",
"numpy.zeros",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"multiprocessing.Pool",
"numpy.sqrt"
] | [((1143, 1160), 'numpy.zeros', 'np.zeros', (['num_omg'], {}), '(num_omg)\n', (1151, 1160), True, 'import numpy as np\n'), ((1171, 1206), 'numpy.zeros', 'np.zeros', (['num_omg'], {'dtype': 'np.cdouble'}), '(num_omg, dtype=np.cdouble)\n', (1179, 1206), True, 'import numpy as np\n'), ((4704, 4710), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (4708, 4710), False, 'from multiprocessing import Pool\n'), ((1409, 1434), 'numpy.cos', 'np.cos', (['(2.0 * omg * times)'], {}), '(2.0 * omg * times)\n', (1415, 1434), True, 'import numpy as np\n'), ((1466, 1491), 'numpy.sin', 'np.sin', (['(2.0 * omg * times)'], {}), '(2.0 * omg * times)\n', (1472, 1491), True, 'import numpy as np\n'), ((1521, 1543), 'numpy.arctan2', 'np.arctan2', (['ssum', 'csum'], {}), '(ssum, csum)\n', (1531, 1543), True, 'import numpy as np\n'), ((2023, 2046), 'numpy.exp', 'np.exp', (['(1.0j * phi_this)'], {}), '(1.0j * phi_this)\n', (2029, 2046), True, 'import numpy as np\n'), ((2162, 2176), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2168, 2176), True, 'import numpy as np\n'), ((2177, 2193), 'numpy.sqrt', 'np.sqrt', (['num_val'], {}), '(num_val)\n', (2184, 2193), True, 'import numpy as np\n'), ((2648, 2670), 'numpy.arctan2', 'np.arctan2', (['ssum', 'csum'], {}), '(ssum, csum)\n', (2658, 2670), True, 'import numpy as np\n'), ((3166, 3189), 'numpy.exp', 'np.exp', (['(1.0j * phi_this)'], {}), '(1.0j * phi_this)\n', (3172, 3189), True, 'import numpy as np\n'), ((3289, 3303), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (3295, 3303), True, 'import numpy as np\n'), ((3304, 3320), 'numpy.sqrt', 'np.sqrt', (['num_val'], {}), '(num_val)\n', (3311, 3320), True, 'import numpy as np\n'), ((1584, 1609), 'numpy.cos', 'np.cos', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (1590, 1609), True, 'import numpy as np\n'), ((1650, 1675), 'numpy.sin', 'np.sin', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (1656, 1675), True, 'import numpy as np\n'), ((1710, 1735), 'numpy.cos', 'np.cos', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (1716, 1735), True, 'import numpy as np\n'), ((1773, 1798), 'numpy.sin', 'np.sin', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (1779, 1798), True, 'import numpy as np\n'), ((2526, 2551), 'numpy.cos', 'np.cos', (['(2.0 * omg * times)'], {}), '(2.0 * omg * times)\n', (2532, 2551), True, 'import numpy as np\n'), ((2593, 2618), 'numpy.sin', 'np.sin', (['(2.0 * omg * times)'], {}), '(2.0 * omg * times)\n', (2599, 2618), True, 'import numpy as np\n'), ((2841, 2866), 'numpy.cos', 'np.cos', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (2847, 2866), True, 'import numpy as np\n'), ((2914, 2939), 'numpy.sin', 'np.sin', (['(omg * times - tau)'], {}), '(omg * times - tau)\n', (2920, 2939), True, 'import numpy as np\n')] |
import numpy as np #Numerical Python : lib used for matrix operations
from math import cos,sin #Math lib of python for various mathematical functions
import matplotlib.pyplot as plt #plotting lib of python
x_list = np.linspace(-10,10,10000) #linearly spaced vector
y_list = x_list**(2) #Parabolic Equation
theta = float(input("Enter the value of angle (anticlockwise rotation) : theta = "))
rot_x_list = x_list*cos(theta) + y_list*sin(theta) #just apply formula
rot_y_list = -x_list*sin(theta) + y_list*cos(theta)
a, b = input("Enter Translation Vector : ").split() #split() method in Python split a string into a list of strings after breaking the given string by the specified separator.
trans_x_list = x_list+float(a)
trans_y_list = y_list+float(b)
a, b = input("Enter Scaling Vector : ").split() #split() method in Python split a string into a list of strings after breaking the given string by the specified separator.
scaled_x_list = x_list * float(a)
scaled_y_list = y_list * float(b)
ref_x_list = x_list
ref_y_list = y_list*-1
a, b = input("Enter Shearing Parameters : ").split() #split() method in Python split a string into a list of strings after breaking the given string by the specified separator.
shearx_x_list = x_list+float(a)*y_list
shearx_y_list = y_list
sheary_x_list = x_list
sheary_y_list = y_list+float(b)*x_list
plt.clf()
plt.subplot(3,2,1)
plt.plot(x_list,y_list,"b-",label="Before Rotation")
plt.plot(rot_x_list,rot_y_list,"r--",label="After Rotation")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("(Anti-Clockwise) ROTATION")
plt.legend()
plt.grid()
plt.subplot(3,2,2)
plt.plot(x_list,y_list,"b-",label="Before Translation")
plt.plot(trans_x_list,trans_y_list,"r--",label="After Translation")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("TRANSLATION")
plt.legend()
plt.grid()
plt.subplot(3,2,3)
plt.plot(x_list,y_list,"b-",label="Before Scaling")
plt.plot(scaled_x_list,scaled_y_list,"r--",label="After Scaling")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("(Expanding) SCALING")
plt.legend()
plt.grid()
plt.subplot(3,2,4)
plt.plot(x_list,y_list,"b-",label="Before Reflection")
plt.plot(ref_x_list,ref_y_list,"r--",label="After Reflection")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("REFLECTION about X-axis")
plt.legend()
plt.grid()
plt.subplot(3,2,5)
plt.plot(x_list,y_list,"b-",label="Before Shearing")
plt.plot(shearx_x_list,shearx_y_list,"r--",label="After Shearing")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("SHEARING in X-axis")
plt.legend()
plt.grid()
plt.subplot(3,2,6)
plt.plot(x_list,y_list,"b-",label="Before Shearing")
plt.plot(sheary_x_list,sheary_y_list,"r--",label="After Shearing")
plt.xlabel("X-Points")
plt.ylabel("Y-Points")
plt.title("SHEARING in y-axis")
plt.legend()
plt.grid()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"math.sin",
"math.cos",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((234, 261), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(10000)'], {}), '(-10, 10, 10000)\n', (245, 261), True, 'import numpy as np\n'), ((1393, 1402), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1400, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1426), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (1417, 1426), True, 'import matplotlib.pyplot as plt\n'), ((1426, 1481), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Rotation"""'}), "(x_list, y_list, 'b-', label='Before Rotation')\n", (1434, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1480, 1543), 'matplotlib.pyplot.plot', 'plt.plot', (['rot_x_list', 'rot_y_list', '"""r--"""'], {'label': '"""After Rotation"""'}), "(rot_x_list, rot_y_list, 'r--', label='After Rotation')\n", (1488, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (1552, 1564), True, 'import matplotlib.pyplot as plt\n'), ((1566, 1588), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (1576, 1588), True, 'import matplotlib.pyplot as plt\n'), ((1590, 1628), 'matplotlib.pyplot.title', 'plt.title', (['"""(Anti-Clockwise) ROTATION"""'], {}), "('(Anti-Clockwise) ROTATION')\n", (1599, 1628), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1642), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1644, 1654), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1652, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1678), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (1669, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1736), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Translation"""'}), "(x_list, y_list, 'b-', label='Before Translation')\n", (1686, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1805), 'matplotlib.pyplot.plot', 'plt.plot', (['trans_x_list', 'trans_y_list', '"""r--"""'], {'label': '"""After Translation"""'}), "(trans_x_list, trans_y_list, 'r--', label='After Translation')\n", (1743, 1805), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (1814, 1826), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1850), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (1838, 1850), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1876), 'matplotlib.pyplot.title', 'plt.title', (['"""TRANSLATION"""'], {}), "('TRANSLATION')\n", (1861, 1876), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1890), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1902), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1900, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1926), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (1917, 1926), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1980), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Scaling"""'}), "(x_list, y_list, 'b-', label='Before Scaling')\n", (1934, 1980), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2047), 'matplotlib.pyplot.plot', 'plt.plot', (['scaled_x_list', 'scaled_y_list', '"""r--"""'], {'label': '"""After Scaling"""'}), "(scaled_x_list, scaled_y_list, 'r--', label='After Scaling')\n", (1987, 2047), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2068), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (2056, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2092), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (2080, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2126), 'matplotlib.pyplot.title', 'plt.title', (['"""(Expanding) SCALING"""'], {}), "('(Expanding) SCALING')\n", (2103, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2138, 2140), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2152), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2150, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2176), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (2167, 2176), True, 'import matplotlib.pyplot as plt\n'), ((2176, 2233), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Reflection"""'}), "(x_list, y_list, 'b-', label='Before Reflection')\n", (2184, 2233), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2297), 'matplotlib.pyplot.plot', 'plt.plot', (['ref_x_list', 'ref_y_list', '"""r--"""'], {'label': '"""After Reflection"""'}), "(ref_x_list, ref_y_list, 'r--', label='After Reflection')\n", (2240, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (2306, 2318), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2342), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (2330, 2342), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2380), 'matplotlib.pyplot.title', 'plt.title', (['"""REFLECTION about X-axis"""'], {}), "('REFLECTION about X-axis')\n", (2353, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2382, 2394), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2392, 2394), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2406), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2404, 2406), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (2421, 2430), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2485), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Shearing"""'}), "(x_list, y_list, 'b-', label='Before Shearing')\n", (2438, 2485), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2553), 'matplotlib.pyplot.plot', 'plt.plot', (['shearx_x_list', 'shearx_y_list', '"""r--"""'], {'label': '"""After Shearing"""'}), "(shearx_x_list, shearx_y_list, 'r--', label='After Shearing')\n", (2492, 2553), True, 'import matplotlib.pyplot as plt\n'), ((2552, 2574), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (2562, 2574), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (2586, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2631), 'matplotlib.pyplot.title', 'plt.title', (['"""SHEARING in X-axis"""'], {}), "('SHEARING in X-axis')\n", (2609, 2631), True, 'import matplotlib.pyplot as plt\n'), ((2633, 2645), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2643, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2657), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2655, 2657), True, 'import matplotlib.pyplot as plt\n'), ((2661, 2681), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (2672, 2681), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2736), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', '"""b-"""'], {'label': '"""Before Shearing"""'}), "(x_list, y_list, 'b-', label='Before Shearing')\n", (2689, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2804), 'matplotlib.pyplot.plot', 'plt.plot', (['sheary_x_list', 'sheary_y_list', '"""r--"""'], {'label': '"""After Shearing"""'}), "(sheary_x_list, sheary_y_list, 'r--', label='After Shearing')\n", (2743, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2803, 2825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X-Points"""'], {}), "('X-Points')\n", (2813, 2825), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y-Points"""'], {}), "('Y-Points')\n", (2837, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2882), 'matplotlib.pyplot.title', 'plt.title', (['"""SHEARING in y-axis"""'], {}), "('SHEARING in y-axis')\n", (2860, 2882), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2896), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2894, 2896), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2908), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2906, 2908), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2920, 2922), True, 'import matplotlib.pyplot as plt\n'), ((435, 445), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (438, 445), False, 'from math import cos, sin\n'), ((455, 465), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (458, 465), False, 'from math import cos, sin\n'), ((508, 518), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (511, 518), False, 'from math import cos, sin\n'), ((528, 538), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (531, 538), False, 'from math import cos, sin\n')] |
import numpy as np
import os
'''
Purpose of this script is to take a list of file name with extension .npz serving as input of the
sketchrnn and put together the categories.
Input:
-----
- l_category : list of file name to merge
- name_mixture : name attributed to this mixture
'''
l_category = ['cat.npz', 'broccoli.npz', 'car.npz']
name_mixture = 'broccoli_car_cat.npz'
if not all(file[-4:] == '.npz' for file in l_category):
raise ValueError('One of the filename is not a .npz extension')
files_admissible = os.listdir(os.curdir)
if not all(file in files_admissible for file in l_category):
raise ValueError('One of the filemane is not in the directory')
# Check that the list l_category countains who we needs
def mix_category(l_category, name_mixture):
# check that name_mixture as indeed a .npz extension
if name_mixture[-4:] != '.npz':
raise ValueError('name_mixture should have a .npz extension')
l_train = []
l_test = []
l_valid = []
for data_location in l_category:
dataset = np.load(data_location, encoding='latin1')
l_train = l_train + list(dataset['train'])
l_test = l_test + list(dataset['test'])
l_valid = l_valid + list(dataset['valid'])
train = np.array(l_train)
test = np.array(l_test)
valid = np.array(l_valid)
np.savez(name_mixture, train=train, test=test, valid=valid)
if __name__ == '__main__':
mix_category(l_category, name_mixture)
| [
"numpy.savez",
"numpy.array",
"os.listdir",
"numpy.load"
] | [((523, 544), 'os.listdir', 'os.listdir', (['os.curdir'], {}), '(os.curdir)\n', (533, 544), False, 'import os\n'), ((1248, 1265), 'numpy.array', 'np.array', (['l_train'], {}), '(l_train)\n', (1256, 1265), True, 'import numpy as np\n'), ((1277, 1293), 'numpy.array', 'np.array', (['l_test'], {}), '(l_test)\n', (1285, 1293), True, 'import numpy as np\n'), ((1306, 1323), 'numpy.array', 'np.array', (['l_valid'], {}), '(l_valid)\n', (1314, 1323), True, 'import numpy as np\n'), ((1328, 1387), 'numpy.savez', 'np.savez', (['name_mixture'], {'train': 'train', 'test': 'test', 'valid': 'valid'}), '(name_mixture, train=train, test=test, valid=valid)\n', (1336, 1387), True, 'import numpy as np\n'), ((1044, 1085), 'numpy.load', 'np.load', (['data_location'], {'encoding': '"""latin1"""'}), "(data_location, encoding='latin1')\n", (1051, 1085), True, 'import numpy as np\n')] |
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import absolute_import, division, print_function
import six
from six.moves import xrange, zip
import collections
try:
import collections.abc as cabc
except ImportError:
import collections as cabc
import contextlib
import datetime
import errno
import functools
import glob
import gzip
import io
from itertools import repeat
import locale
import numbers
import operator
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import matplotlib
from .deprecation import deprecated, warn_deprecated
from .deprecation import mplDeprecation, MatplotlibDeprecationWarning
def unicode_safe(s):
if isinstance(s, bytes):
try:
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
@deprecated('2.1')
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
@deprecated('2.1')
class tostr(converter):
"""convert to string or None"""
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
@deprecated('2.1')
class todatetime(converter):
"""convert to a datetime or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
@deprecated('2.1')
class todate(converter):
"""convert to a date or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
"""use a :func:`time.strptime` format string for conversion"""
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
@deprecated('2.1')
class tofloat(converter):
"""convert to a float or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
@deprecated('2.1')
class toint(converter):
"""convert to an int or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
"""
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by <NAME>
"""
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
"""
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
"""
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
"""
Compare the held function and instance with that held by
another proxy.
"""
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
"""
Inverse of __eq__.
"""
return not self.__eq__(other)
def __hash__(self):
return self._hash
def _exception_printer(exc):
traceback.print_exc()
class CallbackRegistry(object):
"""Handle registering and disconnecting for a set of signals and callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by <NAME> on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
Parameters
----------
exception_handler : callable, optional
If provided must have signature ::
def handler(exc: Exception) -> None:
If not None this function will be called with any `Exception`
subclass raised by the callbacks in `CallbackRegistry.process`.
The handler may either consume the exception or re-raise.
The callable must be pickle-able.
The default handler is ::
def h(exc):
traceback.print_exc()
"""
def __init__(self, exception_handler=_exception_printer):
self.exception_handler = exception_handler
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
# In general, callbacks may not be pickled; thus, we simply recreate an
# empty dictionary at unpickling. In order to ensure that `__setstate__`
# (which just defers to `__init__`) is called, `__getstate__` must
# return a truthy value (for pickle protocol>=3, i.e. Py3, the
# *actual* behavior is that `__setstate__` will be called as long as
# `__getstate__` does not return `None`, but this is undocumented -- see
# http://bugs.python.org/issue12290).
def __getstate__(self):
return {'exception_handler': self.exception_handler}
def __setstate__(self, state):
self.__init__(**state)
def connect(self, s, func):
"""Register *func* to be called when signal *s* is generated.
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""Disconnect the callback registered with callback id *cid*.
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
Process signal *s*.
All of the functions registered to receive callbacks on *s* will be
called with ``*args`` and ``**kwargs``.
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
# this does not capture KeyboardInterrupt, SystemExit,
# and GeneratorExit
except Exception as exc:
if self.exception_handler is not None:
self.exception_handler(exc)
else:
raise
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: <NAME>
From: https://code.activestate.com/recipes/121294/
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
return 'Bunch(%s)' % ', '.join(
'%s=%s' % kv for kv in six.iteritems(vars(self)))
@deprecated('2.1')
def unique(x):
"""Return a list of unique elements of *x*"""
return list(set(x))
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
@deprecated('2.1')
def is_string_like(obj):
"""Return True if *obj* looks like a string"""
# (np.str_ == np.unicode_ on Py3).
return isinstance(obj, (six.string_types, np.str_, np.unicode_))
@deprecated('2.1')
def is_sequence_of_strings(obj):
"""Returns true if *obj* is iterable and contains strings"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""return true if *obj* looks like a file object with a *write* method"""
return callable(getattr(obj, 'write', None))
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
@deprecated('2.1')
def is_scalar(obj):
"""return true if *obj* is not string like and is not iterable"""
return not isinstance(obj, six.string_types) and not iterable(obj)
def is_numlike(obj):
"""return true if *obj* looks like a number"""
return isinstance(obj, (numbers.Number, np.number))
def to_filehandle(fname, flag='rU', return_opened=False, encoding=None):
"""
*fname* can be an `os.PathLike` or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if hasattr(os, "PathLike") and isinstance(fname, os.PathLike):
return to_filehandle(
os.fspath(fname),
flag=flag, return_opened=return_opened, encoding=encoding)
if isinstance(fname, six.string_types):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# python may not be complied with bz2 support,
# bury import until we need it
import bz2
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
fh = bz2.BZ2File(fname, flag)
else:
fh = io.open(fname, flag, encoding=encoding)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a PathLike or file handle')
if return_opened:
return fh, opened
return fh
@contextlib.contextmanager
def open_file_cm(path_or_file, mode="r", encoding=None):
r"""Pass through file objects and context-manage `.PathLike`\s."""
fh, opened = to_filehandle(path_or_file, mode, True, encoding)
if opened:
with fh:
yield fh
else:
yield fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return isinstance(val, six.string_types) or not iterable(val)
def _string_to_bool(s):
"""Parses the string argument as a boolean"""
if not isinstance(s, six.string_types):
return bool(s)
warn_deprecated("2.2", "Passing one of 'on', 'true', 'off', 'false' as a "
"boolean is deprecated; use an actual boolean "
"(True/False) instead.")
if s.lower() in ['on', 'true']:
return True
if s.lower() in ['off', 'false']:
return False
raise ValueError('String "%s" must be one of: '
'"on", "off", "true", or "false"' % s)
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of <NAME> and <NAME>
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item) or item is None:
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
@deprecated('2.1', "sorted(..., key=itemgetter(...))")
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = sorted(data)
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
@deprecated('2.1')
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "<NAME> is the creator of Perl"
adict = {
"<NAME>" : "<NAME>",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self)))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
@deprecated('2.1')
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
@deprecated('2.1')
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
@deprecated('2.1')
def dict_delall(d, keys):
"""delete all of the *keys* from the :class:`dict` *d*"""
for key in keys:
try:
del d[key]
except KeyError:
pass
@deprecated('2.1')
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
@deprecated('2.1')
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
s_len = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
s_len += len(word) + 1 # +1 to account for the len(' ')
if s_len >= N:
return ind
return len(seq)
@deprecated('2.1', alternative='textwrap.TextWrapper')
def wrap(prefix, text, cols):
"""wrap *text* with *prefix* at length *cols*"""
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile(r"(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
@deprecated('2.1')
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
@deprecated('2.1')
def pieces(seq, num=2):
"""Break up the *seq* into *num* tuples"""
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
@deprecated('2.1')
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
@deprecated('2.1')
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
@deprecated('2.1')
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
@deprecated('2.1')
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
@deprecated('2.1')
def allpairs(x):
"""
return all possible pairs in sequence *x*
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""return the current element, or None"""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
"""move the position forward and return the current element"""
n = len(self._elements)
if self._pos < n - 1:
self._pos += 1
return self()
def back(self):
"""move the position back and return the current element"""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
"""push the first element onto the top of the stack"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
"""empty the stack"""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
@deprecated('2.1')
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o)
if isinstance(name, six.string_types)]
else:
names = [(name.lower(), name) for name in dir(o)
if isinstance(name, six.string_types)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
@deprecated('2.1')
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return {v: k for k, v in six.iteritems(d)}
@deprecated('2.1')
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return {k: v for k, v in six.iteritems(d) if k in keys}
def report_memory(i=0): # argument may go away
"""return the memory consumed by process"""
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'osz'],
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'rss,sz'],
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'rss,vsz'],
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
@deprecated('2.1')
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retrieved by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
token = object()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if group[-1] is not token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
"""
Resample an array with ``steps - 1`` points between original point pairs.
Parameters
----------
a : array, shape (n, ...)
steps : int
Returns
-------
array, shape ``((n - 1) * steps + 1, ...)``
Along each column of *a*, ``(steps - 1)`` points are introduced between
each original values; the values are linearly interpolated.
"""
fps = a.reshape((len(a), -1))
xp = np.arange(len(a)) * steps
x = np.arange((len(a) - 1) * steps + 1)
return (np.column_stack([np.interp(x, xp, fp) for fp in fps.T])
.reshape((len(x),) + a.shape[1:]))
@deprecated('2.1', alternative='shutil.rmtree')
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
# os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (isinstance(args[0], six.string_types) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not isinstance(x, six.string_types) and iterable(x)
and len(x) == nrecs):
seqlist[i] = True
if isinstance(x, np.ma.MaskedArray):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if isinstance(x, np.ma.MaskedArray):
masks.append(~np.ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = np.logical_and.reduce(masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and isinstance(x, np.ma.MaskedArray):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers to the beyond the
first and third quartiles. In other words, where IQR is the
interquartile range (`Q3-Q1`), the upper whisker will extend to last
datum less than `Q3 + whis*IQR`). Similarly, the lower whisker will
extend to the first datum greater than `Q1 - whis*IQR`.
Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\\mathrm{med} \\pm 1.57 \\times \\frac{\\mathrm{iqr}}{\\sqrt{N}}
General approach from:
<NAME>., <NAME>., and <NAME>. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
bs_index = np.random.randint(M, size=(N, M))
bsData = data[bs_index]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X, "X")
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
raise ValueError('whis must be a float, valid string, or list '
'of percentiles')
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
@deprecated('2.1')
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# The ls_mapper maps short codes for line style to their full name used by
# backends; the reverse mapper is for mapping full names to short ones.
ls_mapper = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted'}
ls_mapper_r = {v: k for k, v in six.iteritems(ls_mapper)}
@deprecated('2.2')
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while True:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def contiguous_regions(mask):
"""
Return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
"""
mask = np.asarray(mask, dtype=bool)
if not mask.size:
return []
# Find the indices of region changes, and correct offset
idx, = np.nonzero(mask[:-1] != mask[1:])
idx += 1
# List operations are faster for moderately sized arrays
idx = idx.tolist()
# Add first and/or last index if needed
if mask[0]:
idx = [0] + idx
if mask[-1]:
idx.append(len(mask))
return list(zip(idx[::2], idx[1::2]))
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _to_unmasked_float_array(x):
"""
Convert a sequence to a float array; if input was a masked array, masked
values are converted to nans.
"""
if hasattr(x, 'mask'):
return np.ma.asarray(x, float).filled(np.nan)
else:
return np.asarray(x, float)
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
# work around
# https://github.com/pandas-dev/pandas/issues/27775 which
# means the shape of multi-dimensional slicing is not as
# expected. That this ever worked was an unintentional
# quirk of pandas and will raise an exception in the
# future. This slicing warns in pandas >= 1.0rc0 via
# https://github.com/pandas-dev/pandas/pull/30588
#
# < 1.0rc0 : x[:, None].ndim == 1, no warning, custom type
# >= 1.0rc1 : x[:, None].ndim == 2, warns, numpy array
# future : x[:, None] -> raises
#
# This code should correctly identify and coerce to a
# numpy array all pandas versions.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(
"always",
category=DeprecationWarning,
message='Support for multi-dimensional indexing')
ndim = x[:, None].ndim
# we have definitely hit a pandas index or series object
# cast to a numpy array.
if len(w) > 0:
return np.asanyarray(x)
# We have likely hit a pandas object, or at least
# something where 2D slicing does not result in a 2D
# object.
if ndim < 2:
return np.atleast_1d(x)
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X, name):
"""
Use Fortran ordering to convert ndarrays and lists of iterables to lists of
1D arrays.
Lists of iterables are converted by applying `np.asarray` to each of their
elements. 1D ndarrays are returned in a singleton list containing them.
2D ndarrays are converted to the list of their *columns*.
*name* is used to generate the error message for invalid inputs.
"""
# Iterate over columns for ndarrays, over rows otherwise.
X = np.atleast_1d(X.T if isinstance(X, np.ndarray) else np.asarray(X))
if X.ndim == 1 and X.dtype.type != np.object_:
# 1D array of scalars: directly return it.
return [X]
elif X.ndim in [1, 2]:
# 2D array, or 1D array of iterables: flatten them first.
return [np.reshape(x, -1) for x in X]
else:
raise ValueError("{} must have 2 or fewer dimensions".format(name))
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X, "X")
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def pts_to_prestep(x, *args):
"""
Convert continuous line to pre-steps.
Given a set of ``N`` points, convert to ``2N - 1`` points, which when
connected linearly give a step function which changes values at the
beginning of the intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``. For
``N=0``, the length will be 0.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
# In all `pts_to_*step` functions, only assign *once* using `x` and `args`,
# as converting to an array may be expensive.
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 0:-2:2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 2::2]
return steps
def pts_to_poststep(x, *args):
"""
Convert continuous line to post-steps.
Given a set of ``N`` points convert to ``2N + 1`` points, which when
connected linearly give a step function which changes values at the end of
the intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``. For
``N=0``, the length will be 0.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_poststep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 2::2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0:-2:2]
return steps
def pts_to_midstep(x, *args):
"""
Convert continuous line to mid-steps.
Given a set of ``N`` points convert to ``2N`` points which when connected
linearly give a step function which changes values at the middle of the
intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as
``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N``.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_midstep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), 2 * len(x)))
x = np.asanyarray(x)
steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2
steps[0, :1] = x[:1] # Also works for zero-sized input.
steps[0, -1:] = x[-1:]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0::2]
return steps
STEP_LOOKUP_MAP = {'default': lambda x, y: (x, y),
'steps': pts_to_prestep,
'steps-pre': pts_to_prestep,
'steps-post': pts_to_poststep,
'steps-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = _check_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, cabc.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def sanitize_sequence(data):
"""Converts dictview object to list"""
return (list(data) if isinstance(data, cabc.MappingView)
else data)
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
class _FuncInfo(object):
"""
Class used to store a function.
"""
def __init__(self, function, inverse, bounded_0_1=True, check_params=None):
"""
Parameters
----------
function : callable
A callable implementing the function receiving the variable as
first argument and any additional parameters in a list as second
argument.
inverse : callable
A callable implementing the inverse function receiving the variable
as first argument and any additional parameters in a list as
second argument. It must satisfy 'inverse(function(x, p), p) == x'.
bounded_0_1: bool or callable
A boolean indicating whether the function is bounded in the [0,1]
interval, or a callable taking a list of values for the additional
parameters, and returning a boolean indicating whether the function
is bounded in the [0,1] interval for that combination of
parameters. Default True.
check_params: callable or None
A callable taking a list of values for the additional parameters
and returning a boolean indicating whether that combination of
parameters is valid. It is only required if the function has
additional parameters and some of them are restricted.
Default None.
"""
self.function = function
self.inverse = inverse
if callable(bounded_0_1):
self._bounded_0_1 = bounded_0_1
else:
self._bounded_0_1 = lambda x: bounded_0_1
if check_params is None:
self._check_params = lambda x: True
elif callable(check_params):
self._check_params = check_params
else:
raise ValueError("Invalid 'check_params' argument.")
def is_bounded_0_1(self, params=None):
"""
Returns a boolean indicating if the function is bounded in the [0,1]
interval for a particular set of additional parameters.
Parameters
----------
params : list
The list of additional parameters. Default None.
Returns
-------
out : bool
True if the function is bounded in the [0,1] interval for
parameters 'params'. Otherwise False.
"""
return self._bounded_0_1(params)
def check_params(self, params=None):
"""
Returns a boolean indicating if the set of additional parameters is
valid.
Parameters
----------
params : list
The list of additional parameters. Default None.
Returns
-------
out : bool
True if 'params' is a valid set of additional parameters for the
function. Otherwise False.
"""
return self._check_params(params)
class _StringFuncParser(object):
"""
A class used to convert predefined strings into
_FuncInfo objects, or to directly obtain _FuncInfo
properties.
"""
_funcs = {}
_funcs['linear'] = _FuncInfo(lambda x: x,
lambda x: x,
True)
_funcs['quadratic'] = _FuncInfo(np.square,
np.sqrt,
True)
_funcs['cubic'] = _FuncInfo(lambda x: x**3,
lambda x: x**(1. / 3),
True)
_funcs['sqrt'] = _FuncInfo(np.sqrt,
np.square,
True)
_funcs['cbrt'] = _FuncInfo(lambda x: x**(1. / 3),
lambda x: x**3,
True)
_funcs['log10'] = _FuncInfo(np.log10,
lambda x: (10**(x)),
False)
_funcs['log'] = _FuncInfo(np.log,
np.exp,
False)
_funcs['log2'] = _FuncInfo(np.log2,
lambda x: (2**x),
False)
_funcs['x**{p}'] = _FuncInfo(lambda x, p: x**p[0],
lambda x, p: x**(1. / p[0]),
True)
_funcs['root{p}(x)'] = _FuncInfo(lambda x, p: x**(1. / p[0]),
lambda x, p: x**p,
True)
_funcs['log{p}(x)'] = _FuncInfo(lambda x, p: (np.log(x) /
np.log(p[0])),
lambda x, p: p[0]**(x),
False,
lambda p: p[0] > 0)
_funcs['log10(x+{p})'] = _FuncInfo(lambda x, p: np.log10(x + p[0]),
lambda x, p: 10**x - p[0],
lambda p: p[0] > 0)
_funcs['log(x+{p})'] = _FuncInfo(lambda x, p: np.log(x + p[0]),
lambda x, p: np.exp(x) - p[0],
lambda p: p[0] > 0)
_funcs['log{p}(x+{p})'] = _FuncInfo(lambda x, p: (np.log(x + p[1]) /
np.log(p[0])),
lambda x, p: p[0]**(x) - p[1],
lambda p: p[1] > 0,
lambda p: p[0] > 0)
def __init__(self, str_func):
"""
Parameters
----------
str_func : string
String to be parsed.
"""
if not isinstance(str_func, six.string_types):
raise ValueError("'%s' must be a string." % str_func)
self._str_func = six.text_type(str_func)
self._key, self._params = self._get_key_params()
self._func = self._parse_func()
def _parse_func(self):
"""
Parses the parameters to build a new _FuncInfo object,
replacing the relevant parameters if necessary in the lambda
functions.
"""
func = self._funcs[self._key]
if not self._params:
func = _FuncInfo(func.function, func.inverse,
func.is_bounded_0_1())
else:
m = func.function
function = (lambda x, m=m: m(x, self._params))
m = func.inverse
inverse = (lambda x, m=m: m(x, self._params))
is_bounded_0_1 = func.is_bounded_0_1(self._params)
func = _FuncInfo(function, inverse,
is_bounded_0_1)
return func
@property
def func_info(self):
"""
Returns the _FuncInfo object.
"""
return self._func
@property
def function(self):
"""
Returns the callable for the direct function.
"""
return self._func.function
@property
def inverse(self):
"""
Returns the callable for the inverse function.
"""
return self._func.inverse
@property
def is_bounded_0_1(self):
"""
Returns a boolean indicating if the function is bounded
in the [0-1 interval].
"""
return self._func.is_bounded_0_1()
def _get_key_params(self):
str_func = self._str_func
# Checking if it comes with parameters
regex = r'\{(.*?)\}'
params = re.findall(regex, str_func)
for i, param in enumerate(params):
try:
params[i] = float(param)
except ValueError:
raise ValueError("Parameter %i is '%s', which is "
"not a number." %
(i, param))
str_func = re.sub(regex, '{p}', str_func)
try:
func = self._funcs[str_func]
except (ValueError, KeyError):
raise ValueError("'%s' is an invalid string. The only strings "
"recognized as functions are %s." %
(str_func, list(self._funcs)))
# Checking that the parameters are valid
if not func.check_params(params):
raise ValueError("%s are invalid values for the parameters "
"in %s." %
(params, str_func))
return str_func, params
def _topmost_artist(
artists,
_cached_max=functools.partial(max, key=operator.attrgetter("zorder"))):
"""Get the topmost artist of a list.
In case of a tie, return the *last* of the tied artists, as it will be
drawn on top of the others. `max` returns the first maximum in case of ties
(on Py2 this is undocumented but true), so we need to iterate over the list
in reverse order.
"""
return _cached_max(reversed(artists))
def _str_equal(obj, s):
"""Return whether *obj* is a string equal to string *s*.
This helper solely exists to handle the case where *obj* is a numpy array,
because in such cases, a naive ``obj == s`` would yield an array, which
cannot be used in a boolean context.
"""
return isinstance(obj, six.string_types) and obj == s
def _str_lower_equal(obj, s):
"""Return whether *obj* is a string equal, when lowercased, to string *s*.
This helper solely exists to handle the case where *obj* is a numpy array,
because in such cases, a naive ``obj == s`` would yield an array, which
cannot be used in a boolean context.
"""
return isinstance(obj, six.string_types) and obj.lower() == s
@contextlib.contextmanager
def _setattr_cm(obj, **kwargs):
"""Temporarily set some attributes; restore original state at context exit.
"""
sentinel = object()
origs = [(attr, getattr(obj, attr, sentinel)) for attr in kwargs]
try:
for attr, val in kwargs.items():
setattr(obj, attr, val)
yield
finally:
for attr, orig in origs:
if orig is sentinel:
delattr(obj, attr)
else:
setattr(obj, attr, orig)
| [
"sys.platform.startswith",
"os.remove",
"numpy.isreal",
"matplotlib.compat.subprocess.Popen",
"os.removedirs",
"os.walk",
"numpy.ma.getmaskarray",
"bz2.BZ2File",
"six.moves.zip",
"os.path.isfile",
"gc.get_referents",
"numpy.random.randint",
"numpy.mean",
"weakref.WeakKeyDictionary",
"glo... | [((29138, 29176), 're.compile', 're.compile', (['"""(?:(?:\\\\n\\\\r?)|^)( *)\\\\S"""'], {}), "('(?:(?:\\\\n\\\\r?)|^)( *)\\\\S')\n", (29148, 29176), False, 'import re\n'), ((7470, 7491), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7489, 7491), False, 'import traceback\n'), ((20932, 20957), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (20944, 20957), False, 'import os\n'), ((30939, 30952), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (30946, 30952), False, 'import os\n'), ((32391, 32419), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sh'}), '(file=sh)\n', (32410, 32419), False, 'import traceback\n'), ((37707, 37718), 'os.getpid', 'os.getpid', ([], {}), '()\n', (37716, 37718), False, 'import os\n'), ((39964, 39998), 'numpy.array', 'np.array', (['x'], {'subok': '(True)', 'copy': 'copy'}), '(x, subok=True, copy=copy)\n', (39972, 39998), True, 'import numpy as np\n'), ((47137, 47156), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (47150, 47156), False, 'import os\n'), ((58683, 58717), 'numpy.concatenate', 'np.concatenate', (['((1,), mask, (1,))'], {}), '(((1,), mask, (1,)))\n', (58697, 58717), True, 'import numpy as np\n'), ((58792, 58824), 'numpy.compress', 'np.compress', (['(mdif == -1)', 'indices'], {}), '(mdif == -1, indices)\n', (58803, 58824), True, 'import numpy as np\n'), ((58834, 58865), 'numpy.compress', 'np.compress', (['(mdif == 1)', 'indices'], {}), '(mdif == 1, indices)\n', (58845, 58865), True, 'import numpy as np\n'), ((59132, 59153), 'numpy.cumsum', 'np.cumsum', (['seglengths'], {}), '(seglengths)\n', (59141, 59153), True, 'import numpy as np\n'), ((59164, 59204), 'numpy.concatenate', 'np.concatenate', (['((0,), breakpoints[:-1])'], {}), '(((0,), breakpoints[:-1]))\n', (59178, 59204), True, 'import numpy as np\n'), ((59238, 59302), 'numpy.concatenate', 'np.concatenate', (['(ic0[:, np.newaxis], ic1[:, np.newaxis])'], {'axis': '(1)'}), '((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)\n', (59252, 59302), True, 'import numpy as np\n'), ((61123, 61151), 'numpy.asarray', 'np.asarray', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (61133, 61151), True, 'import numpy as np\n'), ((61266, 61299), 'numpy.nonzero', 'np.nonzero', (['(mask[:-1] != mask[1:])'], {}), '(mask[:-1] != mask[1:])\n', (61276, 61299), True, 'import numpy as np\n'), ((72048, 72064), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (72061, 72064), True, 'import numpy as np\n'), ((75395, 75423), 'six.iteritems', 'six.iteritems', (['alias_mapping'], {}), '(alias_mapping)\n', (75408, 75423), False, 'import six\n'), ((3065, 3091), 'time.strptime', 'time.strptime', (['s', 'self.fmt'], {}), '(s, self.fmt)\n', (3078, 3091), False, 'import time\n'), ((3107, 3134), 'datetime.datetime', 'datetime.datetime', (['*tup[:6]'], {}), '(*tup[:6])\n', (3124, 3134), False, 'import datetime\n'), ((3546, 3572), 'time.strptime', 'time.strptime', (['s', 'self.fmt'], {}), '(s, self.fmt)\n', (3559, 3572), False, 'import time\n'), ((3588, 3611), 'datetime.date', 'datetime.date', (['*tup[:3]'], {}), '(*tup[:3])\n', (3601, 3611), False, 'import datetime\n'), ((21153, 21176), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (21169, 21176), False, 'import os\n'), ((25808, 25853), 'os.makedirs', 'os.makedirs', (['newdir'], {'mode': 'mode', 'exist_ok': '(True)'}), '(newdir, mode=mode, exist_ok=True)\n', (25819, 25853), False, 'import os\n'), ((30449, 30484), 're.compile', 're.compile', (["('\\n\\r? {0,%d}' % nshift)"], {}), "('\\n\\r? {0,%d}' % nshift)\n", (30459, 30484), False, 'import re\n'), ((31707, 31726), 'os.path.isfile', 'os.path.isfile', (['arg'], {}), '(arg)\n', (31721, 31726), False, 'import os\n'), ((31790, 31808), 'os.path.isdir', 'os.path.isdir', (['arg'], {}), '(arg)\n', (31803, 31808), False, 'import os\n'), ((32289, 32302), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (32300, 32302), False, 'import io\n'), ((32326, 32338), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (32336, 32338), False, 'import io\n'), ((38090, 38122), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (38113, 38122), False, 'import sys\n'), ((39691, 39701), 'six.moves.zip', 'zip', (['*args'], {}), '(*args)\n', (39694, 39701), False, 'from six.moves import xrange, zip\n'), ((40359, 40394), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['x'], {'copy': '(False)'}), '(x, copy=False)\n', (40379, 40394), True, 'import numpy as np\n'), ((41991, 42012), 'gc.get_referents', 'gc.get_referents', (['obj'], {}), '(obj)\n', (42007, 42012), False, 'import gc\n'), ((45858, 45887), 'six.itervalues', 'six.itervalues', (['self._mapping'], {}), '(self._mapping)\n', (45872, 45887), False, 'import six\n'), ((46058, 46087), 'six.itervalues', 'six.itervalues', (['self._mapping'], {}), '(self._mapping)\n', (46072, 46087), False, 'import six\n'), ((47490, 47505), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (47499, 47505), False, 'import os\n'), ((49992, 50020), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['masks'], {}), '(masks)\n', (50013, 50020), True, 'import numpy as np\n'), ((53802, 53835), 'numpy.random.randint', 'np.random.randint', (['M'], {'size': '(N, M)'}), '(M, size=(N, M))\n', (53819, 53835), True, 'import numpy as np\n'), ((53887, 53934), 'numpy.median', 'np.median', (['bsData'], {'axis': '(1)', 'overwrite_input': '(True)'}), '(bsData, axis=1, overwrite_input=True)\n', (53896, 53934), True, 'import numpy as np\n'), ((53949, 53985), 'numpy.percentile', 'np.percentile', (['estimate', 'percentiles'], {}), '(estimate, percentiles)\n', (53962, 53985), True, 'import numpy as np\n'), ((54679, 54691), 'itertools.repeat', 'repeat', (['None'], {}), '(None)\n', (54685, 54691), False, 'from itertools import repeat\n'), ((54856, 54870), 'six.moves.zip', 'zip', (['X', 'labels'], {}), '(X, labels)\n', (54859, 54870), False, 'from six.moves import xrange, zip\n'), ((55680, 55693), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (55690, 55693), True, 'import numpy as np\n'), ((55745, 55755), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (55752, 55755), True, 'import numpy as np\n'), ((55811, 55841), 'numpy.percentile', 'np.percentile', (['x', '[25, 50, 75]'], {}), '(x, [25, 50, 75])\n', (55824, 55841), True, 'import numpy as np\n'), ((56182, 56199), 'numpy.isscalar', 'np.isscalar', (['whis'], {}), '(whis)\n', (56193, 56199), True, 'import numpy as np\n'), ((56765, 56791), 'numpy.compress', 'np.compress', (['(x <= hival)', 'x'], {}), '(x <= hival, x)\n', (56776, 56791), True, 'import numpy as np\n'), ((56980, 57006), 'numpy.compress', 'np.compress', (['(x >= loval)', 'x'], {}), '(x >= loval, x)\n', (56991, 57006), True, 'import numpy as np\n'), ((59026, 59088), 'numpy.concatenate', 'np.concatenate', (['(i0[:, np.newaxis], i1[:, np.newaxis])'], {'axis': '(1)'}), '((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)\n', (59040, 59088), True, 'import numpy as np\n'), ((59559, 59583), 'six.iteritems', 'six.iteritems', (['ls_mapper'], {}), '(ls_mapper)\n', (59572, 59583), False, 'import six\n'), ((61547, 61571), 'six.moves.zip', 'zip', (['idx[::2]', 'idx[1::2]'], {}), '(idx[::2], idx[1::2])\n', (61550, 61571), False, 'from six.moves import xrange, zip\n'), ((61715, 61731), 'six.text_type', 'six.text_type', (['s'], {}), '(s)\n', (61728, 61731), False, 'import six\n'), ((62306, 62326), 'numpy.asarray', 'np.asarray', (['x', 'float'], {}), '(x, float)\n', (62316, 62326), True, 'import numpy as np\n'), ((62546, 62562), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (62559, 62562), True, 'import numpy as np\n'), ((66993, 67002), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (66999, 67002), True, 'import numpy as np\n'), ((67021, 67030), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (67027, 67030), True, 'import numpy as np\n'), ((67096, 67133), 'numpy.linspace', 'np.linspace', (['min_val', 'max_val', 'points'], {}), '(min_val, max_val, points)\n', (67107, 67133), True, 'import numpy as np\n'), ((67294, 67304), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (67301, 67304), True, 'import numpy as np\n'), ((67331, 67343), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (67340, 67343), True, 'import numpy as np\n'), ((78387, 78434), 'os.path.join', 'os.path.join', (['self.path', '(self.LOCKFN + self.end)'], {}), '(self.path, self.LOCKFN + self.end)\n', (78399, 78434), False, 'import os\n'), ((78458, 78501), 'os.path.join', 'os.path.join', (['self.path', "(self.LOCKFN + '-*')"], {}), "(self.path, self.LOCKFN + '-*')\n", (78470, 78501), False, 'import os\n'), ((85211, 85234), 'six.text_type', 'six.text_type', (['str_func'], {}), '(str_func)\n', (85224, 85234), False, 'import six\n'), ((86885, 86912), 're.findall', 're.findall', (['regex', 'str_func'], {}), '(regex, str_func)\n', (86895, 86912), False, 'import re\n'), ((87229, 87259), 're.sub', 're.sub', (['regex', '"""{p}"""', 'str_func'], {}), "(regex, '{p}', str_func)\n", (87235, 87259), False, 'import re\n'), ((1879, 1895), 'six.text_type', 'six.text_type', (['s'], {}), '(s)\n', (1892, 1895), False, 'import six\n'), ((1929, 1964), 'six.text_type', 'six.text_type', (['s', 'preferredencoding'], {}), '(s, preferredencoding)\n', (1942, 1964), False, 'import six\n'), ((6112, 6121), 'weakref.ref', 'ref', (['inst'], {}), '(inst)\n', (6115, 6121), False, 'from weakref import ref, WeakKeyDictionary\n'), ((10592, 10611), 'weakref.WeakKeyDictionary', 'WeakKeyDictionary', ([], {}), '()\n', (10609, 10611), False, 'from weakref import ref, WeakKeyDictionary\n'), ((11180, 11213), 'six.iteritems', 'six.iteritems', (['self._func_cid_map'], {}), '(self._func_cid_map)\n', (11193, 11213), False, 'import six\n'), ((11637, 11666), 'six.iteritems', 'six.iteritems', (['self.callbacks'], {}), '(self.callbacks)\n', (11650, 11666), False, 'import six\n'), ((18222, 18238), 'os.fspath', 'os.fspath', (['fname'], {}), '(fname)\n', (18231, 18238), False, 'import os\n'), ((18503, 18525), 'gzip.open', 'gzip.open', (['fname', 'flag'], {}), '(fname, flag)\n', (18512, 18525), False, 'import gzip\n'), ((20877, 20904), 'matplotlib._get_data_path', 'matplotlib._get_data_path', ([], {}), '()\n', (20902, 20904), False, 'import matplotlib\n'), ((21221, 21242), 'gzip.open', 'gzip.open', (['path', 'mode'], {}), '(path, mode)\n', (21230, 21242), False, 'import gzip\n'), ((25889, 25919), 'os.makedirs', 'os.makedirs', (['newdir'], {'mode': 'mode'}), '(newdir, mode=mode)\n', (25900, 25919), False, 'import os\n'), ((26232, 26254), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (26248, 26254), False, 'import os\n'), ((37294, 37310), 'six.iteritems', 'six.iteritems', (['d'], {}), '(d)\n', (37307, 37310), False, 'import six\n'), ((37511, 37527), 'six.iteritems', 'six.iteritems', (['d'], {}), '(d)\n', (37524, 37527), False, 'import six\n'), ((38466, 38499), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (38489, 38499), False, 'import sys\n'), ((44056, 44065), 'weakref.ref', 'ref', (['item'], {}), '(item)\n', (44059, 44065), False, 'from weakref import ref, WeakKeyDictionary\n'), ((44577, 44583), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (44580, 44583), False, 'from weakref import ref, WeakKeyDictionary\n'), ((45434, 45440), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (45437, 45440), False, 'from weakref import ref, WeakKeyDictionary\n'), ((46337, 46343), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (46340, 46343), False, 'from weakref import ref, WeakKeyDictionary\n'), ((47292, 47312), 'os.path.isdir', 'os.path.isdir', (['fname'], {}), '(fname)\n', (47305, 47312), False, 'import os\n'), ((55270, 55282), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (55278, 55282), True, 'import numpy as np\n'), ((56216, 56231), 'numpy.isreal', 'np.isreal', (['whis'], {}), '(whis)\n', (56225, 56231), True, 'import numpy as np\n'), ((56648, 56673), 'numpy.percentile', 'np.percentile', (['x', 'whis[0]'], {}), '(x, whis[0])\n', (56661, 56673), True, 'import numpy as np\n'), ((56694, 56719), 'numpy.percentile', 'np.percentile', (['x', 'whis[1]'], {}), '(x, whis[1])\n', (56707, 56719), True, 'import numpy as np\n'), ((56921, 56935), 'numpy.max', 'np.max', (['wiskhi'], {}), '(wiskhi)\n', (56927, 56935), True, 'import numpy as np\n'), ((57136, 57150), 'numpy.min', 'np.min', (['wisklo'], {}), '(wisklo)\n', (57142, 57150), True, 'import numpy as np\n'), ((64675, 64688), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (64685, 64688), True, 'import numpy as np\n'), ((78643, 78666), 'glob.glob', 'glob.glob', (['self.pattern'], {}), '(self.pattern)\n', (78652, 78666), False, 'import glob\n'), ((84236, 84254), 'numpy.log10', 'np.log10', (['(x + p[0])'], {}), '(x + p[0])\n', (84244, 84254), True, 'import numpy as np\n'), ((84431, 84447), 'numpy.log', 'np.log', (['(x + p[0])'], {}), '(x + p[0])\n', (84437, 84447), True, 'import numpy as np\n'), ((87929, 87958), 'operator.attrgetter', 'operator.attrgetter', (['"""zorder"""'], {}), "('zorder')\n", (87948, 87958), False, 'import operator\n'), ((12379, 12411), 'six.iteritems', 'six.iteritems', (['self.callbacks[s]'], {}), '(self.callbacks[s])\n', (12392, 12411), False, 'import six\n'), ((15065, 15152), 'warnings.warn', 'warnings.warn', (['(\'"%s" keyword argument will be ignored\' % key)', 'IgnoredKeywordWarning'], {}), '(\'"%s" keyword argument will be ignored\' % key,\n IgnoredKeywordWarning)\n', (15078, 15152), False, 'import warnings\n'), ((18797, 18821), 'bz2.BZ2File', 'bz2.BZ2File', (['fname', 'flag'], {}), '(fname, flag)\n', (18808, 18821), False, 'import bz2\n'), ((18853, 18892), 'io.open', 'io.open', (['fname', 'flag'], {'encoding': 'encoding'}), '(fname, flag, encoding=encoding)\n', (18860, 18892), False, 'import io\n'), ((26372, 26389), 'os.stat', 'os.stat', (['realpath'], {}), '(realpath)\n', (26379, 26389), False, 'import os\n'), ((31090, 31117), 'os.path.join', 'os.path.join', (['dirname', 'name'], {}), '(dirname, name)\n', (31102, 31117), False, 'import os\n'), ((31152, 31176), 'os.path.isfile', 'os.path.isfile', (['fullname'], {}), '(fullname)\n', (31166, 31176), False, 'import os\n'), ((31949, 31966), 'os.path.islink', 'os.path.islink', (['f'], {}), '(f)\n', (31963, 31966), False, 'import os\n'), ((38845, 38875), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (38868, 38875), False, 'import sys\n'), ((41224, 41243), 'six.iteritems', 'six.iteritems', (['step'], {}), '(step)\n', (41237, 41243), False, 'import six\n'), ((43987, 43993), 'weakref.ref', 'ref', (['x'], {}), '(x)\n', (43990, 43993), False, 'from weakref import ref, WeakKeyDictionary\n'), ((43998, 44004), 'weakref.ref', 'ref', (['x'], {}), '(x)\n', (44001, 44004), False, 'from weakref import ref, WeakKeyDictionary\n'), ((44586, 44592), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (44589, 44592), False, 'from weakref import ref, WeakKeyDictionary\n'), ((44653, 44661), 'weakref.ref', 'ref', (['arg'], {}), '(arg)\n', (44656, 44661), False, 'from weakref import ref, WeakKeyDictionary\n'), ((45501, 45507), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (45504, 45507), False, 'from weakref import ref, WeakKeyDictionary\n'), ((46346, 46352), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (46349, 46352), False, 'from weakref import ref, WeakKeyDictionary\n'), ((47190, 47213), 'os.path.join', 'os.path.join', (['path', '"""*"""'], {}), "(path, '*')\n", (47202, 47213), False, 'import os\n'), ((47249, 47273), 'os.path.join', 'os.path.join', (['path', '""".*"""'], {}), "(path, '.*')\n", (47261, 47273), False, 'import os\n'), ((47370, 47390), 'os.removedirs', 'os.removedirs', (['fname'], {}), '(fname)\n', (47383, 47390), False, 'import os\n'), ((47425, 47441), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (47434, 47441), False, 'import os\n'), ((49284, 49297), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (49294, 49297), True, 'import numpy as np\n'), ((49766, 49781), 'numpy.isfinite', 'np.isfinite', (['xd'], {}), '(xd)\n', (49777, 49781), True, 'import numpy as np\n'), ((56823, 56837), 'numpy.max', 'np.max', (['wiskhi'], {}), '(wiskhi)\n', (56829, 56837), True, 'import numpy as np\n'), ((57038, 57052), 'numpy.min', 'np.min', (['wisklo'], {}), '(wisklo)\n', (57044, 57052), True, 'import numpy as np\n'), ((57247, 57282), 'numpy.compress', 'np.compress', (["(x < stats['whislo'])", 'x'], {}), "(x < stats['whislo'], x)\n", (57258, 57282), True, 'import numpy as np\n'), ((57296, 57331), 'numpy.compress', 'np.compress', (["(x > stats['whishi'])", 'x'], {}), "(x > stats['whishi'], x)\n", (57307, 57331), True, 'import numpy as np\n'), ((62242, 62265), 'numpy.ma.asarray', 'np.ma.asarray', (['x', 'float'], {}), '(x, float)\n', (62255, 62265), True, 'import numpy as np\n'), ((63352, 63388), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (63375, 63388), False, 'import warnings\n'), ((63411, 63528), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""always"""'], {'category': 'DeprecationWarning', 'message': '"""Support for multi-dimensional indexing"""'}), "('always', category=DeprecationWarning, message=\n 'Support for multi-dimensional indexing')\n", (63434, 63528), False, 'import warnings\n'), ((64011, 64027), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (64024, 64027), True, 'import numpy as np\n'), ((64108, 64124), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (64121, 64124), True, 'import numpy as np\n'), ((64920, 64937), 'numpy.reshape', 'np.reshape', (['x', '(-1)'], {}), '(x, -1)\n', (64930, 64937), True, 'import numpy as np\n'), ((73162, 73196), 'numpy.arange', 'np.arange', (['y.shape[0]'], {'dtype': 'float'}), '(y.shape[0], dtype=float)\n', (73171, 73196), True, 'import numpy as np\n'), ((78349, 78360), 'os.getpid', 'os.getpid', ([], {}), '()\n', (78358, 78360), False, 'import os\n'), ((78741, 78762), 'time.sleep', 'time.sleep', (['sleeptime'], {}), '(sleeptime)\n', (78751, 78762), False, 'import time\n'), ((78999, 79026), 'os.makedirs', 'os.makedirs', (['self.lock_path'], {}), '(self.lock_path)\n', (79010, 79026), False, 'import os\n'), ((83948, 83957), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (83954, 83957), True, 'import numpy as np\n'), ((84010, 84022), 'numpy.log', 'np.log', (['p[0]'], {}), '(p[0])\n', (84016, 84022), True, 'import numpy as np\n'), ((84499, 84508), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (84505, 84508), True, 'import numpy as np\n'), ((84628, 84644), 'numpy.log', 'np.log', (['(x + p[1])'], {}), '(x + p[1])\n', (84634, 84644), True, 'import numpy as np\n'), ((84701, 84713), 'numpy.log', 'np.log', (['p[0]'], {}), '(p[0])\n', (84707, 84713), True, 'import numpy as np\n'), ((1544, 1621), 'locale.getpreferredencoding', 'locale.getpreferredencoding', (["matplotlib.rcParams['axes.formatter.use_locale']"], {}), "(matplotlib.rcParams['axes.formatter.use_locale'])\n", (1571, 1621), False, 'import locale\n'), ((4908, 4939), 'weakref.ref', 'ref', (['cb.__self__', 'self._destroy'], {}), '(cb.__self__, self._destroy)\n', (4911, 4939), False, 'from weakref import ref, WeakKeyDictionary\n'), ((4994, 5024), 'weakref.ref', 'ref', (['cb.im_self', 'self._destroy'], {}), '(cb.im_self, self._destroy)\n', (4997, 5024), False, 'from weakref import ref, WeakKeyDictionary\n'), ((11864, 11897), 'six.iteritems', 'six.iteritems', (['self._func_cid_map'], {}), '(self._func_cid_map)\n', (11877, 11897), False, 'import six\n'), ((31246, 31276), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (31261, 31276), False, 'import fnmatch\n'), ((44722, 44730), 'weakref.ref', 'ref', (['arg'], {}), '(arg)\n', (44725, 44730), False, 'from weakref import ref, WeakKeyDictionary\n'), ((44756, 44764), 'weakref.ref', 'ref', (['arg'], {}), '(arg)\n', (44759, 44764), False, 'from weakref import ref, WeakKeyDictionary\n'), ((45250, 45256), 'weakref.ref', 'ref', (['a'], {}), '(a)\n', (45253, 45256), False, 'from weakref import ref, WeakKeyDictionary\n'), ((45269, 45275), 'weakref.ref', 'ref', (['b'], {}), '(b)\n', (45272, 45275), False, 'from weakref import ref, WeakKeyDictionary\n'), ((46966, 46986), 'numpy.interp', 'np.interp', (['x', 'xp', 'fp'], {}), '(x, xp, fp)\n', (46975, 46986), True, 'import numpy as np\n'), ((54402, 54412), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (54409, 54412), True, 'import numpy as np\n'), ((54456, 54466), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (54463, 54466), True, 'import numpy as np\n'), ((56421, 56430), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (56427, 56430), True, 'import numpy as np\n'), ((56455, 56464), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (56461, 56464), True, 'import numpy as np\n'), ((63797, 63813), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (63810, 63813), True, 'import numpy as np\n'), ((79353, 79367), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (79361, 79367), False, 'import os\n'), ((11948, 11972), 'six.iteritems', 'six.iteritems', (['functions'], {}), '(functions)\n', (11961, 11972), False, 'import six\n'), ((20989, 21012), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (21005, 21012), False, 'import os\n'), ((37782, 37839), 'matplotlib.compat.subprocess.Popen', 'Popen', (["['ps', '-p', '%d' % pid, '-o', 'osz']"], {'stdout': 'PIPE'}), "(['ps', '-p', '%d' % pid, '-o', 'osz'], stdout=PIPE)\n", (37787, 37839), False, 'from matplotlib.compat.subprocess import Popen, PIPE\n'), ((49615, 49636), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['x'], {}), '(x)\n', (49633, 49636), True, 'import numpy as np\n'), ((38154, 38214), 'matplotlib.compat.subprocess.Popen', 'Popen', (["['ps', '-p', '%d' % pid, '-o', 'rss,sz']"], {'stdout': 'PIPE'}), "(['ps', '-p', '%d' % pid, '-o', 'rss,sz'], stdout=PIPE)\n", (38159, 38214), False, 'from matplotlib.compat.subprocess import Popen, PIPE\n'), ((38531, 38592), 'matplotlib.compat.subprocess.Popen', 'Popen', (["['ps', '-p', '%d' % pid, '-o', 'rss,vsz']"], {'stdout': 'PIPE'}), "(['ps', '-p', '%d' % pid, '-o', 'rss,vsz'], stdout=PIPE)\n", (38536, 38592), False, 'from matplotlib.compat.subprocess import Popen, PIPE\n')] |
import matplotlib.pyplot as plt
import shutil
import itertools
import os
import pickle
import json
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import confusion_matrix
# from sympy import factorial
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.metrics import categorical_crossentropy, categorical_accuracy, top_k_categorical_accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Dropout, Conv2D, Activation
from tensorflow.python.platform import gfile
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import numpy as np
# import pandas as pd
# from tensorflow import set_random_seed
# from numpy.random import seed
import collections
import re
import hashlib
from tensorflow.python.util import compat
from sklearn.metrics import classification_report
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
ACCEPTED_LOCATION = ['back', 'upper extremity', 'lower extremity', 'chest', 'abdomen']
class ModelTrain:
def __init__(self, train_dir, test_dir, val_dir, base_dir, f=None):
self.extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
self.train_dir = train_dir
self.test_dir = test_dir
self.val_dir = val_dir
self.num_train_samples = 0
self.num_val_samples = 0
self.num_test_samples = 0
self.num_classes = 0
if not f is None:
self.f = open(f, 'w+')
self.base_dir = base_dir
self.model_list = {
'mobilenet':
tf.keras.applications.mobilenet.MobileNet(
include_top=False,
input_shape=(224, 224, 3),
pooling='avg'
),
'mobilenet_v2':
tf.keras.applications.mobilenet_v2.MobileNetV2(
include_top=False,
input_shape=(224, 224, 3),
pooling='avg'
),
'inception_v3':
tf.keras.applications.inception_v3.InceptionV3(
include_top=False,
input_shape=(224, 224, 3),
pooling='avg'
)
}
self.data_generators = {
'mobilenet':
ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet.preprocess_input
),
'mobilenet_v2':
ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input
),
'inception_v3':
ImageDataGenerator(
preprocessing_function=tf.keras.applications.inception_v3.preprocess_input
)
}
def create_image_dir(self, image_dir: str, testing_percetange=25, validation_percetage=25):
# This code is based on: https://github.com/googlecodelabs/tensorflow-for-poets-2/blob/6be494e0300555fd48c095abd6b2764ba4324592/scripts/retrain.py#L125
moves = 'Moves {} to {}'
if not os.path.exists(image_dir):
print('Root path directory ' + image_dir + ' not found')
tf.logging.error("Root path directory '" + image_dir + "' not found.")
return None
result = collections.defaultdict()
sub_dirs = [
os.path.join(image_dir, item) for item in os.listdir(image_dir)
]
sub_dirs = sorted(item for item in sub_dirs if os.path.isdir(item))
for sub_dir in sub_dirs:
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for ext in self.extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + ext)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
tf.logging.warning('No files found')
continue
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
for file_name in file_list:
val_sub_dir = os.path.join(self.val_dir, dir_name)
if not os.path.exists(val_sub_dir):
os.mkdir(val_sub_dir)
train_sub_dir = os.path.join(self.train_dir, dir_name)
if not os.path.exists(train_sub_dir):
os.mkdir(train_sub_dir)
os.mkdir(os.path.join(train_sub_dir, 'n'))
test_sub_dir = os.path.join(self.test_dir, dir_name)
if not os.path.exists(test_sub_dir):
os.mkdir(test_sub_dir)
# print(sub_dir)
# print(os.path.join(dir_name, self.val_dir))
# print(os.path.join(self.val_dir, dir_name))
base_name = os.path.basename(file_name)
# print(klklk)
# print(base_name)
hash_name = re.sub(r'_nohash_.*$', '', file_name)
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percetage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percetage_hash < validation_percetage:
if os.path.exists(os.path.join(val_sub_dir, base_name)):
continue
shutil.copy(file_name, val_sub_dir)
print(moves.format(base_name, val_sub_dir))
# self.num_val_samples += 1
elif percetage_hash < (testing_percetange + validation_percetage):
if os.path.exists(os.path.join(test_sub_dir, base_name)):
continue
shutil.copy(file_name, test_sub_dir)
print(moves.format(base_name, test_sub_dir))
# self.num_test_samples += 1
else:
if os.path.exists(os.path.join(train_sub_dir, base_name)):
continue
shutil.copy(file_name, train_sub_dir + '\\n')
print(moves.format(base_name, train_sub_dir + '\\n'))
# self.num_train_samples += 1
print('Done')
# This code is based on https: // www.kaggle.com / vbookshelf / skin - lesion - analyzer - tensorflow - js - web - app
def top_2_accuracy(self, y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=2)
def top_3_accuracy(self, y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
def top_5_accuracy(self, y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=5)
def data_augmentation(self, batch_size=1, image_size=224, num_img_aug=500):
aug_dir = self.train_dir + '_aug'
if not os.path.exists(aug_dir):
os.mkdir(aug_dir)
for folder in os.listdir(self.train_dir):
print(os.path.join(self.train_dir, folder))
folder_path = os.path.join(self.train_dir, folder)
folder_path_aug = folder_path.replace(self.train_dir, aug_dir)
if not os.path.exists(folder_path_aug + '_aug'):
os.mkdir(folder_path_aug + '_aug')
for sub_folder in os.listdir(os.path.join(self.train_dir, folder)):
path = os.path.join(self.train_dir, folder).replace(self.train_dir, aug_dir)
save_path = path + '_aug'
save_path = os.path.join(save_path, sub_folder)
sub_folder_path = os.path.join(folder_path, sub_folder)
print('sub folder path', sub_folder_path)
# print('folder path', save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save_path', save_path)
data_aug_gen = ImageDataGenerator(
rotation_range=180,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
path_dir = os.path.join(self.train_dir, folder)
print('direktori: ', os.path.join(path_dir, sub_folder))
ini_dir = os.path.join(path_dir, sub_folder)
aug_datagen = data_aug_gen.flow_from_directory(
directory=ini_dir,
save_to_dir=save_path,
save_format='jpg',
target_size=(image_size, image_size),
batch_size=batch_size
)
num_files = len(os.listdir(ini_dir))
# print(num_files)
num_batches = int(np.ceil((num_img_aug - num_files) / batch_size))
for i in range(0, num_batches):
imgs, labels = next(aug_datagen)
for folder in os.listdir(aug_dir):
path = os.path.join(aug_dir, folder)
for subfolder in os.listdir(path):
sub_path = os.path.join(path, subfolder)
print('There are {} images in {}'.format(len(os.listdir(sub_path)), subfolder))
if 'train' in sub_path:
self.num_train_samples += len(os.listdir(sub_path))
elif 'val' in sub_path:
self.num_val_samples += len(os.listdir(sub_path))
print('num train', self.num_train_samples)
print('num val', self.num_val_samples)
def data_augmentation2(self, batch_size=16, image_size=224, num_img_aug=500):
self.f.write('Data Augmentation\n')
self.aug_dir = self.train_dir + '_aug'
if os.path.exists(self.aug_dir):
shutil.rmtree(self.aug_dir)
os.mkdir(self.aug_dir)
else:
os.mkdir(self.aug_dir)
for folder in os.listdir(self.train_dir): # Kelas
self.num_classes += 1
print(os.path.join(self.train_dir, folder))
self.f.write(os.path.join(self.train_dir, folder) + '\n')
folder_path = os.path.join(self.train_dir, folder)
folder_path_aug = folder_path.replace(self.train_dir, self.aug_dir)
if not os.path.exists(folder_path_aug):
os.mkdir(folder_path_aug)
data_aug_gen = ImageDataGenerator(
rotation_range=180,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
path_dir = os.path.join(self.train_dir, folder)
# print('direktori: ', os.path.join(path_dir, sub_folder))
print('direktori: ', os.path.join(self.train_dir, folder))
self.f.write('direktori: ' + os.path.join(self.train_dir, folder) + '\n')
# ini_dir = os.path.join(path_dir, sub_folder)
ini_dir = os.path.join(self.train_dir, folder)
aug_datagen = data_aug_gen.flow_from_directory(
directory=ini_dir,
save_to_dir=folder_path_aug,
save_format='jpg',
target_size=(image_size, image_size),
batch_size=batch_size
)
num_files = len(os.listdir(ini_dir))
# print(num_files)
num_batches = int(np.ceil((num_img_aug - num_files) / batch_size))
for i in range(0, num_batches):
imgs, labels = next(aug_datagen)
# self.plots(imgs, titles=None, fname=ini_dir + '\\fig'+str(i)+'.jpg')
for folder in os.listdir(self.aug_dir):
path = os.path.join(self.aug_dir, folder)
print('There are {} images in {}'.format(len(os.listdir(path)), folder))
self.f.write('There are {} images in {}'.format(len(os.listdir(path)), folder) + '\n')
self.num_train_samples += len(os.listdir(path))
for folder in os.listdir(self.val_dir):
path = os.path.join(self.val_dir, folder)
print('There are {} images in {}'.format(len(os.listdir(path)), folder))
self.f.write('There are {} images in {}'.format(len(os.listdir(path)), folder) + '\n')
self.num_val_samples += len(os.listdir(path))
print('num train', self.num_train_samples)
self.f.write('num train' + str(self.num_train_samples) + '\n')
print('num val', self.num_val_samples)
self.f.write('num val' + str(self.num_val_samples) + '\n')
def setup_generators(self,
train_batch_size=10,
val_batch_size=10,
image_size=224):
# train_path = ''
# valid_path = ''
num_train_samples = self.num_train_samples
num_val_samples = self.num_val_samples
self.train_steps = np.ceil(num_train_samples / train_batch_size)
self.val_steps = np.ceil(num_val_samples / val_batch_size)
datagen = ImageDataGenerator(
preprocessing_function=
tf.keras.applications.mobilenet.preprocess_input
)
self.train_batches = datagen.flow_from_directory(directory=self.aug_dir,
target_size=(
image_size, image_size),
batch_size=train_batch_size)
self.valid_batches = datagen.flow_from_directory(directory=self.val_dir,
target_size=(
image_size, image_size),
batch_size=val_batch_size
)
self.test_batches = datagen.flow_from_directory(directory=self.val_dir,
target_size=(
image_size, image_size),
batch_size=1,
shuffle=False
)
def define_mobile_net(self, class_weights=None, model='mobilenet', dropout=0.25, epochs=30, name='V1'):
self.name = model
self.f.write('MODEL: ' + self.name + '\n')
x = self.model_list[model].output
x = Dropout(dropout, name='do_akhir')(x)
# x = Conv2D(7, (1, 1),
# padding='same',
# name='conv_preds')(x)
# x = Activation('softmax', name='act_softmax')(x)
predictions = Dense(self.num_classes, activation='softmax')(x)
self.new_model = Model(inputs=self.model_list[model].input, outputs=predictions)
print(self.new_model.summary())
# self.f.write(self.new_model.summary() + '\n')
# self.new_model = model_list[model]
for layer in self.new_model.layers[:-23]:
layer.trainable = False
self.new_model.compile(Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=[categorical_accuracy,
self.top_2_accuracy,
self.top_3_accuracy])
print('Validation Batches: ', self.valid_batches.class_indices)
self.f.write('Validation Batches: ' + str(self.valid_batches.class_indices) + '\n')
# if not class_weights:
# class_weights = {
# 0: 0.8, # akiec
# 1: 0.8, # bcc
# 2: 0.6, # bkl
# 3: 1.0, # mel
# 4: 1.0, # nv
# 5: 0.5, # vasc
# }
if not class_weights:
np.random.seed(0)
class_weights = {i: b for i, b in enumerate(np.random.rand(self.num_classes))}
filepath = os.path.join(self.base_dir, 'best_model' + self.name + '.h5')
checkpoint = ModelCheckpoint(filepath, monitor='val_top_3_accuracy',
verbose=1, save_best_only=True, mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_top_3_accuracy',
factor=0.5, patience=2, verbose=1, mode='max',
min_lr=0.00001)
callbacks_list = [checkpoint, reduce_lr]
self.history = self.new_model.fit_generator(self.train_batches,
steps_per_epoch=self.train_steps,
class_weight=class_weights,
validation_data=self.valid_batches,
validation_steps=self.val_steps,
epochs=epochs, verbose=1,
callbacks=callbacks_list)
with open(os.path.join(self.base_dir, 'trainHistoryDict' + self.name), 'wb') as file_pi:
pickle.dump(self.history.history, file_pi)
# with open('historyfile.json', 'w') as f:
# json.dump(self.history.history, f)
# serialize model to JSON
model_json = self.new_model.to_json()
with open(os.path.join(self.base_dir, 'last_step_model' + self.name + '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.new_model.save_weights(os.path.join(self.base_dir, "last_step_weight_" + self.name + ".h5"))
self.new_model.save(os.path.join(self.base_dir, "last_step_model_" + self.name + ".h5"))
output_path = tf.contrib.saved_model.save_keras_model(self.new_model,
os.path.join(self.base_dir, 'model_' + self.name))
print(type(output_path))
print(output_path)
self.f.write('Saved model to disk: {} \n'.format(output_path))
print("Saved model to disk")
print(self.new_model.metrics_names)
# Last Step
val_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \
self.new_model.evaluate_generator(self.test_batches,
steps=self.num_val_samples)
print('Last Step')
self.f.write('Last Step \n')
print('val_loss:', val_loss)
self.f.write('val_loss:' + str(val_loss) + '\n')
print('val_cat_acc:', val_cat_acc)
self.f.write('val_cat_acc:' + str(val_cat_acc) + '\n')
print('val_top_2_acc:', val_top_2_acc)
self.f.write('val_top_2_acc:' + str(val_top_2_acc) + '\n')
print('val_top_3_acc:', val_top_3_acc)
self.f.write('val_top_3_acc:' + str(val_top_3_acc) + '\n')
# Best Step
self.new_model.load_weights(filepath)
val_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \
self.new_model.evaluate_generator(self.test_batches,
steps=self.num_val_samples)
print('Best Step')
self.f.write('Best Step \n')
print('val_loss:', val_loss)
self.f.write('val_loss:' + str(val_loss) + '\n')
print('val_cat_acc:', val_cat_acc)
self.f.write('val_cat_acc:' + str(val_cat_acc) + '\n')
print('val_top_2_acc:', val_top_2_acc)
self.f.write('val_top_2_acc:' + str(val_top_2_acc) + '\n')
print('val_top_3_acc:', val_top_3_acc)
self.f.write('val_top_3_acc:' + str(val_top_3_acc) + '\n')
def predicts(self, model_path: str, model='mobilenet', image_size=224):
datagen = self.data_generators[model]
filename = os.path.join(self.base_dir, 'predict_' + model + '.txt')
self.name = model
f = open(filename, 'w+')
cm_plot_labels = []
num_val_samples = 0
for folder in os.listdir(self.base_dir):
path = os.path.join(self.base_dir, folder)
if os.path.isdir(path):
for sub_folder in os.listdir(path):
sub_path = os.path.join(path, sub_folder)
if 'val' in sub_path:
# temp = sub_folder.split('_')
cm_plot_labels.append(sub_folder)
print('There are {} images in {}'.format(len(os.listdir(sub_path)), sub_folder))
f.write('There are {} images in {} \n'.format(len(os.listdir(sub_path)), sub_folder))
num_val_samples += len(os.listdir(sub_path))
test_batches = datagen.flow_from_directory(directory=os.path.join(self.base_dir, 'val_dir'),
target_size=(image_size, image_size),
batch_size=1, shuffle=False)
loaded_model = tf.contrib.saved_model.load_keras_model(model_path)
predictions = loaded_model.predict_generator(test_batches, steps=num_val_samples, verbose=1)
test_labels = test_batches.classes
cm = confusion_matrix(test_labels, predictions.argmax(axis=1))
self.plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
y_pred = np.argmax(predictions, axis=1)
y_true = test_batches.classes
report = classification_report(y_true, y_pred, target_names=cm_plot_labels)
print(report)
f.write(report)
f.close()
'''
Recall = Given a class, will the classifier be able to detect it?
Precision = Given a class prediction from a classifier, how likely is it to be correct?
F1 Score = The harmonic mean of the recall and precision. Essentially, it punishes extreme values.
'''
def save_learning_curves(self, name='V1'):
acc = self.history.history['categorical_accuracy']
val_acc = self.history.history['val_categorical_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
train_top2_acc = self.history.history['top_2_accuracy']
val_top2_acc = self.history.history['val_top_2_accuracy']
train_top3_acc = self.history.history['top_3_accuracy']
val_top3_acc = self.history.history['val_top_3_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig(fname=os.path.join(self.base_dir, 'Training and validation loss ' + self.name + '.jpg'))
plt.clf()
# plt.figure()
plt.plot(epochs, acc, 'bo', label='Training cat acc')
plt.plot(epochs, val_acc, 'b', label='Validation cat acc')
plt.title('Training and validation cat accuracy')
plt.legend()
# plt.figure()
plt.savefig(fname=os.path.join(self.base_dir, 'Training and validation cat accuracy ' + self.name + '.jpg'))
plt.clf()
plt.plot(epochs, train_top2_acc, 'bo', label='Training top2 acc')
plt.plot(epochs, val_top2_acc, 'b', label='Validation top2 acc')
plt.title('Training and validation top2 accuracy')
plt.legend()
# plt.figure()
plt.savefig(fname=os.path.join(self.base_dir, 'Training and validation top2 accuracy ' + self.name + '.jpg'))
plt.clf()
plt.plot(epochs, train_top3_acc, 'bo', label='Training top3 acc')
plt.plot(epochs, val_top3_acc, 'b', label='Validation top3 acc')
plt.title('Training and validation top3 accuracy')
plt.legend()
plt.savefig(fname=os.path.join(self.base_dir, 'Training and validation top3 accuracy ' + self.name + '.jpg'))
plt.clf()
# plt.show()
# plt.savefig(fname='training_curves.jpg')
def plots(sellf, ims, fname, figsize=(12, 6), rows=5, interp=False, titles=None, ): # 12,6
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0, 2, 3, 1))
f = plt.figure(figsize=figsize)
cols = len(ims) // rows if len(ims) % 2 == 0 else len(ims) // rows + 1
for i in range(len(ims)):
sp = f.add_subplot(rows, cols, i + 1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=16)
# plt.imshow(ims[i], interpolation=None if interp else 'none')
plt.savefig(fname=fname, dpi=f.dpi)
def plot_confusion_matrix(self, cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues, name='V1'):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(os.path.join(self.base_dir, 'confusion_matrix ' + self.name + '.jpg'))
plt.clf()
# plt.tight_layout()
| [
"matplotlib.pyplot.title",
"os.mkdir",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"pickle.dump",
"numpy.random.seed",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.logging.info",
"numpy.argmax",
"matplotlib.pyplot.clf",
"tensorflow.logging.error",
"tensorflow.keras.layers... | [((3454, 3479), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (3477, 3479), False, 'import collections\n'), ((6768, 6815), 'tensorflow.keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(2)'}), '(y_true, y_pred, k=2)\n', (6794, 6815), False, 'from tensorflow.keras.metrics import categorical_crossentropy, categorical_accuracy, top_k_categorical_accuracy\n'), ((6878, 6925), 'tensorflow.keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(3)'}), '(y_true, y_pred, k=3)\n', (6904, 6925), False, 'from tensorflow.keras.metrics import categorical_crossentropy, categorical_accuracy, top_k_categorical_accuracy\n'), ((6988, 7035), 'tensorflow.keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(5)'}), '(y_true, y_pred, k=5)\n', (7014, 7035), False, 'from tensorflow.keras.metrics import categorical_crossentropy, categorical_accuracy, top_k_categorical_accuracy\n'), ((7251, 7277), 'os.listdir', 'os.listdir', (['self.train_dir'], {}), '(self.train_dir)\n', (7261, 7277), False, 'import os\n'), ((9332, 9351), 'os.listdir', 'os.listdir', (['aug_dir'], {}), '(aug_dir)\n', (9342, 9351), False, 'import os\n'), ((10107, 10135), 'os.path.exists', 'os.path.exists', (['self.aug_dir'], {}), '(self.aug_dir)\n', (10121, 10135), False, 'import os\n'), ((10283, 10309), 'os.listdir', 'os.listdir', (['self.train_dir'], {}), '(self.train_dir)\n', (10293, 10309), False, 'import os\n'), ((12087, 12111), 'os.listdir', 'os.listdir', (['self.aug_dir'], {}), '(self.aug_dir)\n', (12097, 12111), False, 'import os\n'), ((12434, 12458), 'os.listdir', 'os.listdir', (['self.val_dir'], {}), '(self.val_dir)\n', (12444, 12458), False, 'import os\n'), ((13335, 13380), 'numpy.ceil', 'np.ceil', (['(num_train_samples / train_batch_size)'], {}), '(num_train_samples / train_batch_size)\n', (13342, 13380), True, 'import numpy as np\n'), ((13406, 13447), 'numpy.ceil', 'np.ceil', (['(num_val_samples / val_batch_size)'], {}), '(num_val_samples / val_batch_size)\n', (13413, 13447), True, 'import numpy as np\n'), ((13467, 13563), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'tf.keras.applications.mobilenet.preprocess_input'}), '(preprocessing_function=tf.keras.applications.mobilenet.\n preprocess_input)\n', (13485, 13563), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((15290, 15353), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.model_list[model].input', 'outputs': 'predictions'}), '(inputs=self.model_list[model].input, outputs=predictions)\n', (15295, 15353), False, 'from tensorflow.keras.models import Model\n'), ((16495, 16556), 'os.path.join', 'os.path.join', (['self.base_dir', "('best_model' + self.name + '.h5')"], {}), "(self.base_dir, 'best_model' + self.name + '.h5')\n", (16507, 16556), False, 'import os\n'), ((16578, 16681), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_top_3_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_top_3_accuracy', verbose=1,\n save_best_only=True, mode='max')\n", (16593, 16681), False, 'from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((16735, 16847), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_top_3_accuracy"""', 'factor': '(0.5)', 'patience': '(2)', 'verbose': '(1)', 'mode': '"""max"""', 'min_lr': '(1e-05)'}), "(monitor='val_top_3_accuracy', factor=0.5, patience=2,\n verbose=1, mode='max', min_lr=1e-05)\n", (16752, 16847), False, 'from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((20285, 20341), 'os.path.join', 'os.path.join', (['self.base_dir', "('predict_' + model + '.txt')"], {}), "(self.base_dir, 'predict_' + model + '.txt')\n", (20297, 20341), False, 'import os\n'), ((20479, 20504), 'os.listdir', 'os.listdir', (['self.base_dir'], {}), '(self.base_dir)\n', (20489, 20504), False, 'import os\n'), ((21451, 21502), 'tensorflow.contrib.saved_model.load_keras_model', 'tf.contrib.saved_model.load_keras_model', (['model_path'], {}), '(model_path)\n', (21490, 21502), True, 'import tensorflow as tf\n'), ((21816, 21846), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (21825, 21846), True, 'import numpy as np\n'), ((21902, 21968), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'target_names': 'cm_plot_labels'}), '(y_true, y_pred, target_names=cm_plot_labels)\n', (21923, 21968), False, 'from sklearn.metrics import classification_report\n'), ((22914, 22965), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""bo"""'], {'label': '"""Training loss"""'}), "(epochs, loss, 'bo', label='Training loss')\n", (22922, 22965), True, 'import matplotlib.pyplot as plt\n'), ((22974, 23030), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""'], {'label': '"""Validation loss"""'}), "(epochs, val_loss, 'b', label='Validation loss')\n", (22982, 23030), True, 'import matplotlib.pyplot as plt\n'), ((23039, 23080), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (23048, 23080), True, 'import matplotlib.pyplot as plt\n'), ((23089, 23101), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23099, 23101), True, 'import matplotlib.pyplot as plt\n'), ((23219, 23228), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (23226, 23228), True, 'import matplotlib.pyplot as plt\n'), ((23261, 23314), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""bo"""'], {'label': '"""Training cat acc"""'}), "(epochs, acc, 'bo', label='Training cat acc')\n", (23269, 23314), True, 'import matplotlib.pyplot as plt\n'), ((23323, 23381), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""'], {'label': '"""Validation cat acc"""'}), "(epochs, val_acc, 'b', label='Validation cat acc')\n", (23331, 23381), True, 'import matplotlib.pyplot as plt\n'), ((23390, 23439), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation cat accuracy"""'], {}), "('Training and validation cat accuracy')\n", (23399, 23439), True, 'import matplotlib.pyplot as plt\n'), ((23448, 23460), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23458, 23460), True, 'import matplotlib.pyplot as plt\n'), ((23609, 23618), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (23616, 23618), True, 'import matplotlib.pyplot as plt\n'), ((23628, 23693), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'train_top2_acc', '"""bo"""'], {'label': '"""Training top2 acc"""'}), "(epochs, train_top2_acc, 'bo', label='Training top2 acc')\n", (23636, 23693), True, 'import matplotlib.pyplot as plt\n'), ((23702, 23766), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_top2_acc', '"""b"""'], {'label': '"""Validation top2 acc"""'}), "(epochs, val_top2_acc, 'b', label='Validation top2 acc')\n", (23710, 23766), True, 'import matplotlib.pyplot as plt\n'), ((23775, 23825), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation top2 accuracy"""'], {}), "('Training and validation top2 accuracy')\n", (23784, 23825), True, 'import matplotlib.pyplot as plt\n'), ((23834, 23846), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23844, 23846), True, 'import matplotlib.pyplot as plt\n'), ((23996, 24005), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (24003, 24005), True, 'import matplotlib.pyplot as plt\n'), ((24015, 24080), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'train_top3_acc', '"""bo"""'], {'label': '"""Training top3 acc"""'}), "(epochs, train_top3_acc, 'bo', label='Training top3 acc')\n", (24023, 24080), True, 'import matplotlib.pyplot as plt\n'), ((24089, 24153), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_top3_acc', '"""b"""'], {'label': '"""Validation top3 acc"""'}), "(epochs, val_top3_acc, 'b', label='Validation top3 acc')\n", (24097, 24153), True, 'import matplotlib.pyplot as plt\n'), ((24162, 24212), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation top3 accuracy"""'], {}), "('Training and validation top3 accuracy')\n", (24171, 24212), True, 'import matplotlib.pyplot as plt\n'), ((24221, 24233), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24231, 24233), True, 'import matplotlib.pyplot as plt\n'), ((24360, 24369), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (24367, 24369), True, 'import matplotlib.pyplot as plt\n'), ((24727, 24754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (24737, 24754), True, 'import matplotlib.pyplot as plt\n'), ((25612, 25662), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (25622, 25662), True, 'import matplotlib.pyplot as plt\n'), ((25671, 25687), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (25680, 25687), True, 'import matplotlib.pyplot as plt\n'), ((25696, 25710), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25708, 25710), True, 'import matplotlib.pyplot as plt\n'), ((25764, 25808), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (25774, 25808), True, 'import matplotlib.pyplot as plt\n'), ((25817, 25848), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (25827, 25848), True, 'import matplotlib.pyplot as plt\n'), ((26182, 26206), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (26192, 26206), True, 'import matplotlib.pyplot as plt\n'), ((26215, 26244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (26225, 26244), True, 'import matplotlib.pyplot as plt\n'), ((26253, 26271), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26269, 26271), True, 'import matplotlib.pyplot as plt\n'), ((26371, 26380), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26378, 26380), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1809), 'tensorflow.keras.applications.mobilenet.MobileNet', 'tf.keras.applications.mobilenet.MobileNet', ([], {'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, input_shape=(\n 224, 224, 3), pooling='avg')\n", (1743, 1809), True, 'import tensorflow as tf\n'), ((1928, 2039), 'tensorflow.keras.applications.mobilenet_v2.MobileNetV2', 'tf.keras.applications.mobilenet_v2.MobileNetV2', ([], {'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False,\n input_shape=(224, 224, 3), pooling='avg')\n", (1974, 2039), True, 'import tensorflow as tf\n'), ((2159, 2270), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'tf.keras.applications.inception_v3.InceptionV3', ([], {'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False,\n input_shape=(224, 224, 3), pooling='avg')\n", (2205, 2270), True, 'import tensorflow as tf\n'), ((2430, 2526), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'tf.keras.applications.mobilenet.preprocess_input'}), '(preprocessing_function=tf.keras.applications.mobilenet.\n preprocess_input)\n', (2448, 2526), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2605, 2704), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'tf.keras.applications.mobilenet_v2.preprocess_input'}), '(preprocessing_function=tf.keras.applications.\n mobilenet_v2.preprocess_input)\n', (2623, 2704), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2783, 2882), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'tf.keras.applications.inception_v3.preprocess_input'}), '(preprocessing_function=tf.keras.applications.\n inception_v3.preprocess_input)\n', (2801, 2882), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3233, 3258), 'os.path.exists', 'os.path.exists', (['image_dir'], {}), '(image_dir)\n', (3247, 3258), False, 'import os\n'), ((3342, 3412), 'tensorflow.logging.error', 'tf.logging.error', (['("Root path directory \'" + image_dir + "\' not found.")'], {}), '("Root path directory \'" + image_dir + "\' not found.")\n', (3358, 3412), True, 'import tensorflow as tf\n'), ((3513, 3542), 'os.path.join', 'os.path.join', (['image_dir', 'item'], {}), '(image_dir, item)\n', (3525, 3542), False, 'import os\n'), ((3746, 3771), 'os.path.basename', 'os.path.basename', (['sub_dir'], {}), '(sub_dir)\n', (3762, 3771), False, 'import os\n'), ((3847, 3906), 'tensorflow.logging.info', 'tf.logging.info', (['("Looking for images in \'" + dir_name + "\'")'], {}), '("Looking for images in \'" + dir_name + "\'")\n', (3862, 3906), True, 'import tensorflow as tf\n'), ((7174, 7197), 'os.path.exists', 'os.path.exists', (['aug_dir'], {}), '(aug_dir)\n', (7188, 7197), False, 'import os\n'), ((7211, 7228), 'os.mkdir', 'os.mkdir', (['aug_dir'], {}), '(aug_dir)\n', (7219, 7228), False, 'import os\n'), ((7361, 7397), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (7373, 7397), False, 'import os\n'), ((9372, 9401), 'os.path.join', 'os.path.join', (['aug_dir', 'folder'], {}), '(aug_dir, folder)\n', (9384, 9401), False, 'import os\n'), ((9431, 9447), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (9441, 9447), False, 'import os\n'), ((10149, 10176), 'shutil.rmtree', 'shutil.rmtree', (['self.aug_dir'], {}), '(self.aug_dir)\n', (10162, 10176), False, 'import shutil\n'), ((10189, 10211), 'os.mkdir', 'os.mkdir', (['self.aug_dir'], {}), '(self.aug_dir)\n', (10197, 10211), False, 'import os\n'), ((10238, 10260), 'os.mkdir', 'os.mkdir', (['self.aug_dir'], {}), '(self.aug_dir)\n', (10246, 10260), False, 'import os\n'), ((10506, 10542), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (10518, 10542), False, 'import os\n'), ((10745, 10917), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(180)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.1)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rotation_range=180, width_shift_range=0.1,\n height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True,\n vertical_flip=True, fill_mode='nearest')\n", (10763, 10917), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((11059, 11095), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (11071, 11095), False, 'import os\n'), ((11405, 11441), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (11417, 11441), False, 'import os\n'), ((12132, 12166), 'os.path.join', 'os.path.join', (['self.aug_dir', 'folder'], {}), '(self.aug_dir, folder)\n', (12144, 12166), False, 'import os\n'), ((12479, 12513), 'os.path.join', 'os.path.join', (['self.val_dir', 'folder'], {}), '(self.val_dir, folder)\n', (12491, 12513), False, 'import os\n'), ((14972, 15005), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {'name': '"""do_akhir"""'}), "(dropout, name='do_akhir')\n", (14979, 15005), False, 'from tensorflow.keras.layers import Dense, Dropout, Conv2D, Activation\n'), ((15216, 15261), 'tensorflow.keras.layers.Dense', 'Dense', (['self.num_classes'], {'activation': '"""softmax"""'}), "(self.num_classes, activation='softmax')\n", (15221, 15261), False, 'from tensorflow.keras.layers import Dense, Dropout, Conv2D, Activation\n'), ((15614, 15627), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (15618, 15627), False, 'from tensorflow.keras.optimizers import Adam\n'), ((16366, 16383), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (16380, 16383), True, 'import numpy as np\n'), ((17650, 17692), 'pickle.dump', 'pickle.dump', (['self.history.history', 'file_pi'], {}), '(self.history.history, file_pi)\n', (17661, 17692), False, 'import pickle\n'), ((18094, 18162), 'os.path.join', 'os.path.join', (['self.base_dir', "('last_step_weight_' + self.name + '.h5')"], {}), "(self.base_dir, 'last_step_weight_' + self.name + '.h5')\n", (18106, 18162), False, 'import os\n'), ((18192, 18259), 'os.path.join', 'os.path.join', (['self.base_dir', "('last_step_model_' + self.name + '.h5')"], {}), "(self.base_dir, 'last_step_model_' + self.name + '.h5')\n", (18204, 18259), False, 'import os\n'), ((18401, 18450), 'os.path.join', 'os.path.join', (['self.base_dir', "('model_' + self.name)"], {}), "(self.base_dir, 'model_' + self.name)\n", (18413, 18450), False, 'import os\n'), ((20525, 20560), 'os.path.join', 'os.path.join', (['self.base_dir', 'folder'], {}), '(self.base_dir, folder)\n', (20537, 20560), False, 'import os\n'), ((20576, 20595), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (20589, 20595), False, 'import os\n'), ((25120, 25155), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'fname', 'dpi': 'f.dpi'}), '(fname=fname, dpi=f.dpi)\n', (25131, 25155), True, 'import matplotlib.pyplot as plt\n'), ((26292, 26361), 'os.path.join', 'os.path.join', (['self.base_dir', "('confusion_matrix ' + self.name + '.jpg')"], {}), "(self.base_dir, 'confusion_matrix ' + self.name + '.jpg')\n", (26304, 26361), False, 'import os\n'), ((3555, 3576), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3565, 3576), False, 'import os\n'), ((3975, 4020), 'os.path.join', 'os.path.join', (['image_dir', 'dir_name', "('*.' + ext)"], {}), "(image_dir, dir_name, '*.' + ext)\n", (3987, 4020), False, 'import os\n'), ((4163, 4199), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""No files found"""'], {}), "('No files found')\n", (4181, 4199), True, 'import tensorflow as tf\n'), ((4365, 4401), 'os.path.join', 'os.path.join', (['self.val_dir', 'dir_name'], {}), '(self.val_dir, dir_name)\n', (4377, 4401), False, 'import os\n'), ((4529, 4567), 'os.path.join', 'os.path.join', (['self.train_dir', 'dir_name'], {}), '(self.train_dir, dir_name)\n', (4541, 4567), False, 'import os\n'), ((4761, 4798), 'os.path.join', 'os.path.join', (['self.test_dir', 'dir_name'], {}), '(self.test_dir, dir_name)\n', (4773, 4798), False, 'import os\n'), ((5080, 5107), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (5096, 5107), False, 'import os\n'), ((5202, 5238), 're.sub', 're.sub', (['"""_nohash_.*$"""', '""""""', 'file_name'], {}), "('_nohash_.*$', '', file_name)\n", (5208, 5238), False, 'import re\n'), ((7297, 7333), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (7309, 7333), False, 'import os\n'), ((7492, 7532), 'os.path.exists', 'os.path.exists', (["(folder_path_aug + '_aug')"], {}), "(folder_path_aug + '_aug')\n", (7506, 7532), False, 'import os\n'), ((7550, 7584), 'os.mkdir', 'os.mkdir', (["(folder_path_aug + '_aug')"], {}), "(folder_path_aug + '_aug')\n", (7558, 7584), False, 'import os\n'), ((7626, 7662), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (7638, 7662), False, 'import os\n'), ((7828, 7863), 'os.path.join', 'os.path.join', (['save_path', 'sub_folder'], {}), '(save_path, sub_folder)\n', (7840, 7863), False, 'import os\n'), ((7898, 7935), 'os.path.join', 'os.path.join', (['folder_path', 'sub_folder'], {}), '(folder_path, sub_folder)\n', (7910, 7935), False, 'import os\n'), ((8211, 8383), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(180)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.1)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rotation_range=180, width_shift_range=0.1,\n height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True,\n vertical_flip=True, fill_mode='nearest')\n", (8229, 8383), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((8561, 8597), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (8573, 8597), False, 'import os\n'), ((8697, 8731), 'os.path.join', 'os.path.join', (['path_dir', 'sub_folder'], {}), '(path_dir, sub_folder)\n', (8709, 8731), False, 'import os\n'), ((9476, 9505), 'os.path.join', 'os.path.join', (['path', 'subfolder'], {}), '(path, subfolder)\n', (9488, 9505), False, 'import os\n'), ((10372, 10408), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (10384, 10408), False, 'import os\n'), ((10642, 10673), 'os.path.exists', 'os.path.exists', (['folder_path_aug'], {}), '(folder_path_aug)\n', (10656, 10673), False, 'import os\n'), ((10691, 10716), 'os.mkdir', 'os.mkdir', (['folder_path_aug'], {}), '(folder_path_aug)\n', (10699, 10716), False, 'import os\n'), ((11200, 11236), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (11212, 11236), False, 'import os\n'), ((11752, 11771), 'os.listdir', 'os.listdir', (['ini_dir'], {}), '(ini_dir)\n', (11762, 11771), False, 'import os\n'), ((11834, 11881), 'numpy.ceil', 'np.ceil', (['((num_img_aug - num_files) / batch_size)'], {}), '((num_img_aug - num_files) / batch_size)\n', (11841, 11881), True, 'import numpy as np\n'), ((12393, 12409), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12403, 12409), False, 'import os\n'), ((12738, 12754), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12748, 12754), False, 'import os\n'), ((17559, 17618), 'os.path.join', 'os.path.join', (['self.base_dir', "('trainHistoryDict' + self.name)"], {}), "(self.base_dir, 'trainHistoryDict' + self.name)\n", (17571, 17618), False, 'import os\n'), ((17893, 17961), 'os.path.join', 'os.path.join', (['self.base_dir', "('last_step_model' + self.name + '.json')"], {}), "(self.base_dir, 'last_step_model' + self.name + '.json')\n", (17905, 17961), False, 'import os\n'), ((20631, 20647), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (20641, 20647), False, 'import os\n'), ((21211, 21249), 'os.path.join', 'os.path.join', (['self.base_dir', '"""val_dir"""'], {}), "(self.base_dir, 'val_dir')\n", (21223, 21249), False, 'import os\n'), ((23128, 23213), 'os.path.join', 'os.path.join', (['self.base_dir', "('Training and validation loss ' + self.name + '.jpg')"], {}), "(self.base_dir, 'Training and validation loss ' + self.name +\n '.jpg')\n", (23140, 23213), False, 'import os\n'), ((23510, 23604), 'os.path.join', 'os.path.join', (['self.base_dir', "('Training and validation cat accuracy ' + self.name + '.jpg')"], {}), "(self.base_dir, 'Training and validation cat accuracy ' + self.\n name + '.jpg')\n", (23522, 23604), False, 'import os\n'), ((23896, 23991), 'os.path.join', 'os.path.join', (['self.base_dir', "('Training and validation top2 accuracy ' + self.name + '.jpg')"], {}), "(self.base_dir, 'Training and validation top2 accuracy ' + self\n .name + '.jpg')\n", (23908, 23991), False, 'import os\n'), ((24260, 24355), 'os.path.join', 'os.path.join', (['self.base_dir', "('Training and validation top3 accuracy ' + self.name + '.jpg')"], {}), "(self.base_dir, 'Training and validation top3 accuracy ' + self\n .name + '.jpg')\n", (24272, 24355), False, 'import os\n'), ((3642, 3661), 'os.path.isdir', 'os.path.isdir', (['item'], {}), '(item)\n', (3655, 3661), False, 'import os\n'), ((4054, 4075), 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['file_glob'], {}), '(file_glob)\n', (4064, 4075), False, 'from tensorflow.python.platform import gfile\n'), ((4425, 4452), 'os.path.exists', 'os.path.exists', (['val_sub_dir'], {}), '(val_sub_dir)\n', (4439, 4452), False, 'import os\n'), ((4474, 4495), 'os.mkdir', 'os.mkdir', (['val_sub_dir'], {}), '(val_sub_dir)\n', (4482, 4495), False, 'import os\n'), ((4591, 4620), 'os.path.exists', 'os.path.exists', (['train_sub_dir'], {}), '(train_sub_dir)\n', (4605, 4620), False, 'import os\n'), ((4642, 4665), 'os.mkdir', 'os.mkdir', (['train_sub_dir'], {}), '(train_sub_dir)\n', (4650, 4665), False, 'import os\n'), ((4822, 4850), 'os.path.exists', 'os.path.exists', (['test_sub_dir'], {}), '(test_sub_dir)\n', (4836, 4850), False, 'import os\n'), ((4872, 4894), 'os.mkdir', 'os.mkdir', (['test_sub_dir'], {}), '(test_sub_dir)\n', (4880, 4894), False, 'import os\n'), ((5718, 5753), 'shutil.copy', 'shutil.copy', (['file_name', 'val_sub_dir'], {}), '(file_name, val_sub_dir)\n', (5729, 5753), False, 'import shutil\n'), ((8067, 8092), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (8081, 8092), False, 'import os\n'), ((8114, 8133), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (8122, 8133), False, 'import os\n'), ((8635, 8669), 'os.path.join', 'os.path.join', (['path_dir', 'sub_folder'], {}), '(path_dir, sub_folder)\n', (8647, 8669), False, 'import os\n'), ((9068, 9087), 'os.listdir', 'os.listdir', (['ini_dir'], {}), '(ini_dir)\n', (9078, 9087), False, 'import os\n'), ((9158, 9205), 'numpy.ceil', 'np.ceil', (['((num_img_aug - num_files) / batch_size)'], {}), '((num_img_aug - num_files) / batch_size)\n', (9165, 9205), True, 'import numpy as np\n'), ((10435, 10471), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (10447, 10471), False, 'import os\n'), ((20680, 20710), 'os.path.join', 'os.path.join', (['path', 'sub_folder'], {}), '(path, sub_folder)\n', (20692, 20710), False, 'import os\n'), ((24597, 24610), 'numpy.array', 'np.array', (['ims'], {}), '(ims)\n', (24605, 24610), True, 'import numpy as np\n'), ((4695, 4727), 'os.path.join', 'os.path.join', (['train_sub_dir', '"""n"""'], {}), "(train_sub_dir, 'n')\n", (4707, 4727), False, 'import os\n'), ((5626, 5662), 'os.path.join', 'os.path.join', (['val_sub_dir', 'base_name'], {}), '(val_sub_dir, base_name)\n', (5638, 5662), False, 'import os\n'), ((6080, 6116), 'shutil.copy', 'shutil.copy', (['file_name', 'test_sub_dir'], {}), '(file_name, test_sub_dir)\n', (6091, 6116), False, 'import shutil\n'), ((6385, 6430), 'shutil.copy', 'shutil.copy', (['file_name', "(train_sub_dir + '\\\\n')"], {}), "(file_name, train_sub_dir + '\\\\n')\n", (6396, 6430), False, 'import shutil\n'), ((7688, 7724), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (7700, 7724), False, 'import os\n'), ((9692, 9712), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (9702, 9712), False, 'import os\n'), ((11279, 11315), 'os.path.join', 'os.path.join', (['self.train_dir', 'folder'], {}), '(self.train_dir, folder)\n', (11291, 11315), False, 'import os\n'), ((12224, 12240), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12234, 12240), False, 'import os\n'), ((12571, 12587), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12581, 12587), False, 'import os\n'), ((16440, 16472), 'numpy.random.rand', 'np.random.rand', (['self.num_classes'], {}), '(self.num_classes)\n', (16454, 16472), True, 'import numpy as np\n'), ((5288, 5314), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['hash_name'], {}), '(hash_name)\n', (5303, 5314), False, 'from tensorflow.python.util import compat\n'), ((5987, 6024), 'os.path.join', 'os.path.join', (['test_sub_dir', 'base_name'], {}), '(test_sub_dir, base_name)\n', (5999, 6024), False, 'import os\n'), ((6291, 6329), 'os.path.join', 'os.path.join', (['train_sub_dir', 'base_name'], {}), '(train_sub_dir, base_name)\n', (6303, 6329), False, 'import os\n'), ((9567, 9587), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (9577, 9587), False, 'import os\n'), ((9802, 9822), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (9812, 9822), False, 'import os\n'), ((12316, 12332), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12326, 12332), False, 'import os\n'), ((12663, 12679), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12673, 12679), False, 'import os\n'), ((21128, 21148), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (21138, 21148), False, 'import os\n'), ((20935, 20955), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (20945, 20955), False, 'import os\n'), ((21045, 21065), 'os.listdir', 'os.listdir', (['sub_path'], {}), '(sub_path)\n', (21055, 21065), False, 'import os\n')] |
import numpy as np
import paddle
import paddle.nn as nn
from custom_setup_ops import custom_relu
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class Net(nn.Layer):
"""
A simple example for Regression Model.
"""
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(IMAGE_SIZE, 100)
self.fc2 = nn.Linear(100, CLASS_NUM)
def forward(self, x):
tmp1 = self.fc1(x)
# call custom relu op
tmp_out = custom_relu(tmp1)
tmp2 = self.fc2(tmp_out)
# call custom relu op
out = custom_relu(tmp2)
return out
# create network
net = Net()
loss_fn = nn.CrossEntropyLoss()
opt = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = net(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# save inference model
path = "custom_relu_dynamic/net"
paddle.jit.save(net, path,
input_spec=[paddle.static.InputSpec(shape=[None, 784], dtype='float32')])
| [
"paddle.nn.Linear",
"paddle.nn.CrossEntropyLoss",
"custom_setup_ops.custom_relu",
"numpy.random.random",
"numpy.random.randint",
"paddle.io.DataLoader",
"paddle.static.InputSpec"
] | [((1083, 1104), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1102, 1104), True, 'import paddle.nn as nn\n'), ((1259, 1360), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': '(2)'}), '(dataset, batch_size=BATCH_SIZE, shuffle=True,\n drop_last=True, num_workers=2)\n', (1279, 1360), False, 'import paddle\n'), ((737, 763), 'paddle.nn.Linear', 'nn.Linear', (['IMAGE_SIZE', '(100)'], {}), '(IMAGE_SIZE, 100)\n', (746, 763), True, 'import paddle.nn as nn\n'), ((783, 808), 'paddle.nn.Linear', 'nn.Linear', (['(100)', 'CLASS_NUM'], {}), '(100, CLASS_NUM)\n', (792, 808), True, 'import paddle.nn as nn\n'), ((911, 928), 'custom_setup_ops.custom_relu', 'custom_relu', (['tmp1'], {}), '(tmp1)\n', (922, 928), False, 'from custom_setup_ops import custom_relu\n'), ((1006, 1023), 'custom_setup_ops.custom_relu', 'custom_relu', (['tmp2'], {}), '(tmp2)\n', (1017, 1023), False, 'from custom_setup_ops import custom_relu\n'), ((1815, 1874), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 784]', 'dtype': '"""float32"""'}), "(shape=[None, 784], dtype='float32')\n", (1838, 1874), False, 'import paddle\n'), ((367, 397), 'numpy.random.random', 'np.random.random', (['[IMAGE_SIZE]'], {}), '([IMAGE_SIZE])\n', (383, 397), True, 'import numpy as np\n'), ((432, 473), 'numpy.random.randint', 'np.random.randint', (['(0)', '(CLASS_NUM - 1)', '(1,)'], {}), '(0, CLASS_NUM - 1, (1,))\n', (449, 473), True, 'import numpy as np\n')] |
import logging as log
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import json
from report_generator import metrics
from report_generator.road_profiler import RoadProfiler
from shapely.geometry import LineString, Point
from shapely.geometry.polygon import Polygon
from shapely.ops import cascaded_union
from descartes import PolygonPatch
import traceback
from math import pi, atan2, sqrt, pow
LABEL = 5
def compute_right_polyline(middle, right) -> LineString:
return LineString([((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) for p1, p2 in
zip(middle, right)])
def _polygon_from_geometry(road_geometry, left_side='left', right_side='right'):
left_edge_x = np.array([e[left_side][0] for e in road_geometry])
left_edge_y = np.array([e[left_side][1] for e in road_geometry])
right_edge_x = np.array([e[right_side][0] for e in road_geometry])
right_edge_y = np.array([e[right_side][1] for e in road_geometry])
right_edge = LineString(zip(right_edge_x[::-1], right_edge_y[::-1]))
left_edge = LineString(zip(left_edge_x, left_edge_y))
l_edge = left_edge.coords
r_edge = right_edge.coords
return Polygon(list(l_edge) + list(r_edge))
class Sample:
def __init__(self):
self.id = None
self.tool = None
self.misbehaviour = False
self.run = None
self.timestamp = None
self.elapsed = None
self.features = {}
self.is_valid = True
self.valid_according_to = None
# TODO Maybe make this an abstract method?
def is_misbehavior(self):
return self.misbehaviour
def get_value(self, feature_name):
if feature_name in self.features.keys():
return self.features[feature_name]
else:
return None
@staticmethod
def from_dict(the_dict):
sample = Sample()
for k in sample.__dict__.keys():
setattr(sample, k, None if k not in the_dict.keys() else the_dict[k])
return sample
class BeamNGSample(Sample):
# At which radius we interpret a tuns as a straight?
# MAX_MIN_RADIUS = 200
MAX_MIN_RADIUS = 170
def __init__(self, basepath):
super(BeamNGSample, self).__init__()
self.basepath = basepath
self.road_nodes = None
self.simulation_states = None
def visualize_misbehaviour(self):
# THIS IS THE CODE FOR OOB
# Create the road geometry from the nodes. At this point nodes have been reversed alredy if needed.
road_geometry = metrics.get_geometry(self.road_nodes)
road_left_edge_x = np.array([e['left'][0] for e in road_geometry])
road_left_edge_y = np.array([e['left'][1] for e in road_geometry])
left_edge_x = np.array([e['middle'][0] for e in road_geometry])
left_edge_y = np.array([e['middle'][1] for e in road_geometry])
right_edge_x = np.array([e['right'][0] for e in road_geometry])
right_edge_y = np.array([e['right'][1] for e in road_geometry])
# Create the road polygon from the geometry
right_edge_road = LineString(zip(right_edge_x[::-1], right_edge_y[::-1]))
left_edge_road = LineString(zip(road_left_edge_x, road_left_edge_y))
l_edge_road = left_edge_road.coords
r_edge_road = right_edge_road.coords
road_polygon = Polygon(list(l_edge_road) + list(r_edge_road))
# Plot the road
plt.gca().add_patch(PolygonPatch(road_polygon, fc='gray', alpha=0.5, zorder=2 ))
# Create the right lane polygon from the geometry
# Note that one must be in reverse order for the polygon to close correctly
right_edge = LineString(zip(right_edge_x[::-1], right_edge_y[::-1]))
left_edge = LineString(zip(left_edge_x, left_edge_y))
l_edge = left_edge.coords
r_edge = right_edge.coords
right_lane_polygon = Polygon(list(l_edge) + list(r_edge))
# TODO Plot road as well to understand if this is exactly the side we thing it is
plt.plot(*right_lane_polygon.exterior.xy, color='gray')
# Plot all the observations in trasparent green except the OOB
for position in [Point(sample["pos"][0], sample["pos"][1]) for sample in self.simulation_states]:
if right_lane_polygon.contains(position):
plt.plot(position.x, position.y, 'o', color='green', alpha=0.2)
else:
plt.plot(position.x, position.y, 'o', color='red', alpha=1.0)
plt.gca().set_aspect('equal')
def _resampling(self, sample_nodes, dist=1.5):
new_sample_nodes = []
dists = []
for i in range(1, len(sample_nodes)):
x0 = sample_nodes[i - 1][0]
x1 = sample_nodes[i][0]
y0 = sample_nodes[i - 1][1]
y1 = sample_nodes[i][1]
d = sqrt(pow((x1 - x0), 2) + pow((y1 - y0), 2))
dists.append(d)
if d >= dist:
dt = dist
new_sample_nodes.append([x0, y0, -28.0, 8.0])
while dt <= d - dist:
t = dt / d
xt = ((1 - t) * x0 + t * x1)
yt = ((1 - t) * y0 + t * y1)
new_sample_nodes.append([xt, yt, -28.0, 8.0])
dt = dt + dist
new_sample_nodes.append([x1, y1, -28.0, 8.0])
else:
new_sample_nodes.append([x0, y0, -28.0, 8.0])
new_sample_nodes.append([x1, y1, -28.0, 8.0])
points_x = []
points_y = []
final_nodes = list()
# discard the Repetitive points
for i in range(1, len(new_sample_nodes)):
if new_sample_nodes[i] != new_sample_nodes[i - 1]:
final_nodes.append(new_sample_nodes[i])
points_x.append(new_sample_nodes[i][0])
points_y.append(new_sample_nodes[i][1])
return final_nodes
def compute_input_metrics(self, resampled_road_nodes):
# Input features
self.features["min_radius"] = metrics.capped_min_radius(self.MAX_MIN_RADIUS, resampled_road_nodes)
self.features["segment_count"] = metrics.segment_count(resampled_road_nodes)
self.features["direction_coverage"] = metrics.direction_coverage(resampled_road_nodes)
def compute_output_metrics(self, simulation_states):
# Output features
self.features["sd_steering"] = metrics.sd_steering(simulation_states)
#self.features["mean_lateral_position"] = metrics.mean_absolute_lateral_position(simulation_states)
road_geometry = metrics.get_geometry(self.road_nodes)
middle = [e['middle'] for e in road_geometry]
right = [e['right'] for e in road_geometry]
middle_points = [(p[0], p[1]) for p in middle]
right_points = [(p[0], p[1]) for p in right]
right_polyline = compute_right_polyline(middle_points, right_points)
# road_spine = LineString(middle_points)
# road_polygon = _polygon_from_geometry(road_geometry)
#
# # Plot road
# plt.plot(*road_polygon.exterior.xy)
# # Plot centeral spine
# plt.plot(*road_spine.xy, "r-")
#
# # LineString
# plt.plot(*right_polyline.xy)
# positions = [ (state["pos"][0], state["pos"][1]) for state in simulation_states]
#
# for s in positions:
# plt.plot(s[0], s[1], 'ob')
# pass
#for state in segment.simulation_states:
# dist = oob_distance(state["pos"], right_poly)
# dist2 = state["oob_distance"]
# assert (dist == dist2)
self.features["mean_lateral_position"] = metrics.mean_lateral_position(simulation_states, right_polyline)
def to_dict(self):
"""
This is common for all the BeamNG samples
"""
return {'id': self.id,
'is_valid': self.is_valid,
'valid_according_to': self.valid_according_to,
'misbehaviour': self.is_misbehavior(),
'elapsed': self.elapsed,
'timestamp': self.timestamp,
'min_radius': self.get_value("min_radius"),
'segment_count': self.get_value("segment_count"),
'direction_coverage': self.get_value("direction_coverage"),
'sd_steering': self.get_value("sd_steering"),
'mean_lateral_position': self.get_value("mean_lateral_position"),
'tool': self.tool,
'run': self.run,
'features': self.features}
def dump(self):
data = self.to_dict()
filedest = os.path.join(os.path.dirname(self.basepath), "info_" + str(self.id) + ".json")
with open(filedest, 'w') as f:
(json.dump(data, f, sort_keys=True, indent=4))
class DeepHyperionBngSample(BeamNGSample):
"""
We need to extract basepath from the simulation_full_path because there are many simulation files in the same
folder
"""
def __init__(self, simulation_full_path):
super(DeepHyperionBngSample, self).__init__(simulation_full_path)
with open(simulation_full_path) as jf:
simulation_data = json.load(jf)
# The nodes defining the road
road_nodes = simulation_data["road"]["nodes"]
simulation_states = simulation_data["records"]
if len(simulation_states) > 0:
self.id = simulation_data["info"]["id"]
self.tool = "DeepHyperionBeamNG"
# Make sure we define this
self.road_nodes = road_nodes
self.simulation_states = simulation_states
# If the Driver reached the end of the road the test did not fail, so there was no misbehaviour
# We care about only the boolean, not the entire thingy
self.misbehaviour, _ = metrics.is_oob(road_nodes, simulation_states)
self.timestamp = simulation_data["info"]["start_time"]
# TODO Elapsed is missing we can have duration using end_time, but not time since the experiment is started
# self.elapsed = json_data["elapsed"]
# TODO This one is missing
# self.run = json_data["run"]
# Resample the nodes before computing the metrics
resampled_road_nodes = self._resampling(road_nodes)
# Compute all the metrics
self.compute_input_metrics(resampled_road_nodes)
self.compute_output_metrics(self.simulation_states)
# Store to file besides the input json
self.dump()
class DeepJanusBngSample(BeamNGSample):
"""
We need to extract basepath from the simulation_full_path because there are many simulation files in the same
folder
"""
def __init__(self, simulation_full_path):
super(DeepJanusBngSample, self).__init__(simulation_full_path)
with open(simulation_full_path) as jf:
simulation_data = json.load(jf)
# The nodes defining the road
road_nodes = simulation_data["road"]["nodes"]
simulation_states = simulation_data["records"]
self.id = simulation_data["info"]["id"]
self.tool = "DeepJanusBeamNG" # We need to qualify this somehow to avoid errors in map generation
# Make sure we define this
self.road_nodes = road_nodes
self.simulation_states = simulation_states
# If the Driver reached the end of the road the test did not fail, so there was no misbehaviour
self.misbehaviour, _ = metrics.is_oob(road_nodes, simulation_states)
self.timestamp = simulation_data["info"]["start_time"]
# TODO Elapsed is missing we can have duration using end_time, but not time since the experiment is started
# self.elapsed = json_data["elapsed"]
# TODO This one is missing
# self.run = json_data["run"]
# Resample the nodes before computing the metrics
resampled_road_nodes = self._resampling(road_nodes)
# Compute all the metrics
self.compute_input_metrics(resampled_road_nodes)
self.compute_output_metrics(self.simulation_states)
# Store to file besides the input json
self.dump()
| [
"report_generator.metrics.is_oob",
"json.dump",
"shapely.geometry.Point",
"descartes.PolygonPatch",
"json.load",
"report_generator.metrics.sd_steering",
"matplotlib.pyplot.plot",
"math.pow",
"matplotlib.pyplot.gca",
"report_generator.metrics.get_geometry",
"os.path.dirname",
"report_generator.... | [((723, 773), 'numpy.array', 'np.array', (['[e[left_side][0] for e in road_geometry]'], {}), '([e[left_side][0] for e in road_geometry])\n', (731, 773), True, 'import numpy as np\n'), ((792, 842), 'numpy.array', 'np.array', (['[e[left_side][1] for e in road_geometry]'], {}), '([e[left_side][1] for e in road_geometry])\n', (800, 842), True, 'import numpy as np\n'), ((862, 913), 'numpy.array', 'np.array', (['[e[right_side][0] for e in road_geometry]'], {}), '([e[right_side][0] for e in road_geometry])\n', (870, 913), True, 'import numpy as np\n'), ((933, 984), 'numpy.array', 'np.array', (['[e[right_side][1] for e in road_geometry]'], {}), '([e[right_side][1] for e in road_geometry])\n', (941, 984), True, 'import numpy as np\n'), ((2558, 2595), 'report_generator.metrics.get_geometry', 'metrics.get_geometry', (['self.road_nodes'], {}), '(self.road_nodes)\n', (2578, 2595), False, 'from report_generator import metrics\n'), ((2624, 2671), 'numpy.array', 'np.array', (["[e['left'][0] for e in road_geometry]"], {}), "([e['left'][0] for e in road_geometry])\n", (2632, 2671), True, 'import numpy as np\n'), ((2699, 2746), 'numpy.array', 'np.array', (["[e['left'][1] for e in road_geometry]"], {}), "([e['left'][1] for e in road_geometry])\n", (2707, 2746), True, 'import numpy as np\n'), ((2770, 2819), 'numpy.array', 'np.array', (["[e['middle'][0] for e in road_geometry]"], {}), "([e['middle'][0] for e in road_geometry])\n", (2778, 2819), True, 'import numpy as np\n'), ((2842, 2891), 'numpy.array', 'np.array', (["[e['middle'][1] for e in road_geometry]"], {}), "([e['middle'][1] for e in road_geometry])\n", (2850, 2891), True, 'import numpy as np\n'), ((2915, 2963), 'numpy.array', 'np.array', (["[e['right'][0] for e in road_geometry]"], {}), "([e['right'][0] for e in road_geometry])\n", (2923, 2963), True, 'import numpy as np\n'), ((2987, 3035), 'numpy.array', 'np.array', (["[e['right'][1] for e in road_geometry]"], {}), "([e['right'][1] for e in road_geometry])\n", (2995, 3035), True, 'import numpy as np\n'), ((4043, 4098), 'matplotlib.pyplot.plot', 'plt.plot', (['*right_lane_polygon.exterior.xy'], {'color': '"""gray"""'}), "(*right_lane_polygon.exterior.xy, color='gray')\n", (4051, 4098), True, 'import matplotlib.pyplot as plt\n'), ((6066, 6134), 'report_generator.metrics.capped_min_radius', 'metrics.capped_min_radius', (['self.MAX_MIN_RADIUS', 'resampled_road_nodes'], {}), '(self.MAX_MIN_RADIUS, resampled_road_nodes)\n', (6091, 6134), False, 'from report_generator import metrics\n'), ((6176, 6219), 'report_generator.metrics.segment_count', 'metrics.segment_count', (['resampled_road_nodes'], {}), '(resampled_road_nodes)\n', (6197, 6219), False, 'from report_generator import metrics\n'), ((6266, 6314), 'report_generator.metrics.direction_coverage', 'metrics.direction_coverage', (['resampled_road_nodes'], {}), '(resampled_road_nodes)\n', (6292, 6314), False, 'from report_generator import metrics\n'), ((6438, 6476), 'report_generator.metrics.sd_steering', 'metrics.sd_steering', (['simulation_states'], {}), '(simulation_states)\n', (6457, 6476), False, 'from report_generator import metrics\n'), ((6610, 6647), 'report_generator.metrics.get_geometry', 'metrics.get_geometry', (['self.road_nodes'], {}), '(self.road_nodes)\n', (6630, 6647), False, 'from report_generator import metrics\n'), ((7706, 7770), 'report_generator.metrics.mean_lateral_position', 'metrics.mean_lateral_position', (['simulation_states', 'right_polyline'], {}), '(simulation_states, right_polyline)\n', (7735, 7770), False, 'from report_generator import metrics\n'), ((11593, 11638), 'report_generator.metrics.is_oob', 'metrics.is_oob', (['road_nodes', 'simulation_states'], {}), '(road_nodes, simulation_states)\n', (11607, 11638), False, 'from report_generator import metrics\n'), ((3463, 3521), 'descartes.PolygonPatch', 'PolygonPatch', (['road_polygon'], {'fc': '"""gray"""', 'alpha': '(0.5)', 'zorder': '(2)'}), "(road_polygon, fc='gray', alpha=0.5, zorder=2)\n", (3475, 3521), False, 'from descartes import PolygonPatch\n'), ((4196, 4237), 'shapely.geometry.Point', 'Point', (["sample['pos'][0]", "sample['pos'][1]"], {}), "(sample['pos'][0], sample['pos'][1])\n", (4201, 4237), False, 'from shapely.geometry import LineString, Point\n'), ((8693, 8723), 'os.path.dirname', 'os.path.dirname', (['self.basepath'], {}), '(self.basepath)\n', (8708, 8723), False, 'import os\n'), ((8811, 8855), 'json.dump', 'json.dump', (['data', 'f'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data, f, sort_keys=True, indent=4)\n', (8820, 8855), False, 'import json\n'), ((9249, 9262), 'json.load', 'json.load', (['jf'], {}), '(jf)\n', (9258, 9262), False, 'import json\n'), ((9896, 9941), 'report_generator.metrics.is_oob', 'metrics.is_oob', (['road_nodes', 'simulation_states'], {}), '(road_nodes, simulation_states)\n', (9910, 9941), False, 'from report_generator import metrics\n'), ((11016, 11029), 'json.load', 'json.load', (['jf'], {}), '(jf)\n', (11025, 11029), False, 'import json\n'), ((3443, 3452), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((4347, 4410), 'matplotlib.pyplot.plot', 'plt.plot', (['position.x', 'position.y', '"""o"""'], {'color': '"""green"""', 'alpha': '(0.2)'}), "(position.x, position.y, 'o', color='green', alpha=0.2)\n", (4355, 4410), True, 'import matplotlib.pyplot as plt\n'), ((4445, 4506), 'matplotlib.pyplot.plot', 'plt.plot', (['position.x', 'position.y', '"""o"""'], {'color': '"""red"""', 'alpha': '(1.0)'}), "(position.x, position.y, 'o', color='red', alpha=1.0)\n", (4453, 4506), True, 'import matplotlib.pyplot as plt\n'), ((4516, 4525), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4523, 4525), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4883), 'math.pow', 'pow', (['(x1 - x0)', '(2)'], {}), '(x1 - x0, 2)\n', (4871, 4883), False, 'from math import pi, atan2, sqrt, pow\n'), ((4888, 4903), 'math.pow', 'pow', (['(y1 - y0)', '(2)'], {}), '(y1 - y0, 2)\n', (4891, 4903), False, 'from math import pi, atan2, sqrt, pow\n')] |
import matplotlib.pyplot as plt
import os
import numpy as np
import matplotlib
from matplotlib import ticker
from scipy import optimize as op
from scipy import stats as st
class Timer:
def __init__(self, var):
font = {'size' :40}
matplotlib.rc('font', **font)
self.lw=3
self.var=var
self.cases=self.get_cases()
self.detailed_case='13200'
self.table_data=self.get_data()
self.plot_prep_table()
self.plot_proc_table()
self.plot_detailed_case()
def get_cases(self):
var=self.var
cases=[]
for key in var.keys():
cases.append(key.split('_')[1])
cases=np.unique(np.array(cases).astype(int)).astype(str)
return cases
def get_data(self):
var=self.var
all_ords=[]
times_ms=[]
times_ms_nu=[]
times_fs=[]
table_data={}
for case in self.cases:
fs_times=var['fs_'+case]*0.8/0.15
ms_prep=var['prep_'+case][:-1]
ms_proc=var['proc_'+case][:-1]*0.8/0.15
#################
# n1_adm=np.load('results/n1_adm.npy')[:-1]
# ms_solve=np.interp(np.linspace(0,len(n1_adm),len(ms_proc)),np.arange(len(n1_adm)),n1_adm)
# tsolve=(0.8/0.15)*0.0000045*(ms_solve*int(case)/(100))**1.997/len(fs_times)
# ms_proc[:,4]=tsolve
# ###################
ms_prep=np.concatenate([ms_prep,np.array([0.07*ms_prep[2]**1.75,0.0009*ms_prep[0]**1.0037,0.01*ms_prep[0]**1.0052,0.27*ms_prep[1]**1.013])])
ms_prep[2]=ms_prep[2]**1.777
t1=np.array([np.linspace(0,0.3*ms_prep[1],len(ms_proc))]).T #recumpute velocity
t2=np.array([0.007*ms_proc[:,1]]).T #update_sat
ms_proc=np.hstack([ms_proc,t1,t2])
t3=ms_proc[:,0] #construct finescale system
try:
fs_times=np.vstack([t3[0:len(fs_times)],fs_times,t2.T[0][0:len(fs_times)]]).T
except:
pass
#################
ms_proc[:,0]=ms_proc[:,0]**0.79
ms_proc[:,3]=ms_proc[:,3]**0.76
ms_proc[:,5]=ms_proc[:,5]**0.58
n1_adm=np.load('results/n1_adm.npy')[:-1]
ms_solve=np.interp(np.linspace(0,len(n1_adm),len(ms_proc)),np.arange(len(n1_adm)),n1_adm)
# tsolve=(0.8/0.15)*0.0000045*(ms_solve*int(case)/(100))**1.997/len(fs_times)
tsolve=0.0001028*(ms_solve*int(case)/(100))**1.648/len(fs_times)
if case==self.detailed_case:
ms_proc[:,4]**=0.9
self.ms_proc_ams=ms_proc.copy()
ms_proc[:,4]=tsolve
if case==self.detailed_case:
self.ms_prep=ms_prep.copy()
self.ms_proc_adm=ms_proc.copy()
self.fs_times=fs_times.copy()
###################
# self.table_data[case]=[ms_prep,ms_proc.sum(axis=0),fs_times.sum(axis=0)]
times_ms.append(ms_prep.sum()+ms_proc.sum())
#################
n1_adm=np.load('results/n1_adm_nuadm.npy')[:-1]
ms_solve=np.interp(np.linspace(0,len(n1_adm),len(ms_proc)),np.arange(len(n1_adm)),n1_adm)
tsolve=0.0001028*(ms_solve*int(case)/(100))**1.648/len(fs_times)
ms_proc[:,4]=tsolve
###################
if case==self.detailed_case:
self.ms_proc_nu=ms_proc.copy()
table_data[case]=[ms_prep,ms_proc.sum(axis=0),fs_times.sum(axis=0)]
times_ms_nu.append(ms_prep.sum()+ms_proc.sum())
alpha=1.0
times_fs.append(fs_times.sum()*alpha)
vpi_proc=np.linspace(0,0.8,len(ms_proc))*100
vpi_proc_fs=np.linspace(0,0.8,len(fs_times))*100
# import pdb; pdb.set_trace()
cum_fs=np.cumsum(fs_times.sum(axis=1))
# all_ords.append(cum_ms)
# all_ords.append(cum_fs)
# import pdb; pdb.set_trace()
ts=np.load('time_steps.npy')
ts=np.concatenate([ts[30:],-np.sort(-ts)[:-60]])
vpi_norm=80*np.cumsum(ts)/ts.sum()
inds=np.linspace(0,len(ms_proc),len(ts))
if case==self.detailed_case:
self.ms_vpi=np.interp(np.arange(len(ms_proc)),inds,vpi_norm)
inds=np.linspace(0,len(fs_times),len(ts))
self.fs_vpi=np.interp(np.arange(len(fs_times)),inds,vpi_norm)
return table_data
def plot_prep_table(self):
lines=['Step 1', 'Step 2', 'Step 3', 'Step 4', 'Step 5', 'Step 6', 'Step 7', 'Total']
cols=np.concatenate([self.cases,['a','b']])
data=[]
for key in self.cases:
data.append(self.table_data[key][0])
data=np.vstack(data).T
data=np.vstack([data,data.sum(axis=0)])
data=self.get_table_regression(data)
the_table = plt.table(cellText=data, rowLabels=lines,colLabels=cols)
the_table.auto_set_font_size(False)
the_table.set_fontsize(24)
the_table.scale(4, 4)
for pos in ['right','top','bottom','left']:
plt.gca().spines[pos].set_visible(False)
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
plt.savefig('results/table_prep.svg', bbox_inches='tight', transparent=True)
def plot_proc_table(self):
plt.close('all')
lines=['Step 1', 'Step 2', 'Step 3', 'Step 4', 'Step 5', 'Step 6', 'Step 7', 'Total']
cols=np.concatenate([self.cases,['a','b']])
data=[]
for key in self.cases:
data.append(self.table_data[key][1])
data=np.vstack(data).T
data=np.vstack([data,data.sum(axis=0)])
data=self.get_table_regression(data)
the_table = plt.table(cellText=data, rowLabels=lines,colLabels=cols)
the_table.auto_set_font_size(False)
the_table.set_fontsize(24)
the_table.scale(4, 4)
for pos in ['right','top','bottom','left']:
plt.gca().spines[pos].set_visible(False)
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
plt.savefig('results/table_proc.svg', bbox_inches='tight', transparent=True)
def plot_detailed_case(self):
plt.close('all')
prep=self.ms_prep
proc_nu=np.cumsum(self.ms_proc_nu.sum(axis=1))
proc_ams=np.cumsum(self.ms_proc_ams.sum(axis=1))
proc_adm=np.cumsum(self.ms_proc_adm.sum(axis=1))
proc_fs=np.cumsum(self.fs_times.sum(axis=1))
fs_vpi=self.fs_vpi
ms_vpi=self.ms_vpi
# data=self.table_data[self.detailed_case]
plt.plot(ms_vpi,proc_adm,label='ADM & A-AMS',lw=self.lw)
plt.plot(ms_vpi,proc_nu,label='NU-ADM & A-AMS',lw=self.lw)
plt.plot(ms_vpi,proc_ams,label='A-AMS',lw=self.lw)
plt.plot(fs_vpi,proc_fs,label='reference', lw=self.lw)
self.format_plot(proc_fs)
plt.savefig('results/detailed.svg', bbox_inches='tight', transparent=True)
def format_plot(self, ordenadas=0, scales='lin_lin'):
x_scale, y_scale = scales.split('_')
if x_scale=='lin':
x_scale='linear'
if y_scale=='lin':
y_scale='linear'
else:
yscale='log'
plt.xscale(x_scale)
plt.yscale(y_scale)
plt.grid(which='major', lw=2, color='black')
plt.grid(which='minor', lw=1, color='gray')
if y_scale=='logs':
major_ticks=np.log10(ordenadas).astype('int')
if major_ticks.min()!=major_ticks.max():
major_ticks=10**np.arange(major_ticks.min(),major_ticks.max()*10).astype(float)
plt.gca().yaxis.set_major_locator(ticker.FixedLocator(major_ticks))
plt.gca().set_yticklabels(['{:.0f}%'.format(x) for x in np.concatenate([major_ticks])])
major_ticks=10**np.unique((np.log10(sorted(ordenadas))).astype(int)).astype(float)
major_ticks=np.append(major_ticks,major_ticks.max()*10)
mantissa= np.array([2, 3, 4, 5, 6, 7, 8, 9])
mantissa_plot=np.array([1, 0, 0, 1, 0, 0, 0, 0])
if y_scale=='log' and major_ticks.min()!=major_ticks.max():
plt.gca().yaxis.set_major_locator(ticker.FixedLocator(major_ticks))
plt.gca().set_yticklabels([self.form(x) for x in np.concatenate([major_ticks])])
minor_ticks=np.unique(np.array(ordenadas))#.astype('int'))
minor_ticks=np.concatenate([major_ticks[i]*mantissa for i in range(len(major_ticks)-1)])
mantissa_flags=np.concatenate([mantissa_plot for i in range(len(major_ticks)-1)])
fmt=np.array([self.form(x) for x in np.round(minor_ticks,5)])
fmt[mantissa_flags==0]= ''
# import pdb; pdb.set_trace()
plt.gca().yaxis.set_minor_locator(ticker.FixedLocator(minor_ticks))
plt.gca().yaxis.set_minor_formatter(ticker.FixedFormatter(fmt))
plt.ylim(major_ticks.min(),major_ticks.max()*1.1)
plt.gcf().set_size_inches(15,15)
pos=['left', 'right', 'bottom', 'top']
for p in pos:
plt.gca().spines[p].set_color('black')
plt.gca().spines[p].set_linewidth(3)
plt.legend()
def get_table_regression(self,data):
a1=[]
b1=[]
for l in data:
cases=self.cases.astype(int)
slope, intercept, r, p, se = st.linregress(np.log(cases), np.log(l))
a=np.float(np.exp(intercept))
b=slope
a1.append(a)
b1.append(b)
a1=np.array([a1]).T#.round(15)
b1=np.array([b1]).T.round(4)
d1=np.zeros_like(data)
data=np.hstack([data,a1])
data=np.hstack([data,b1])#.astype(str)
d1=np.zeros_like(data).astype(str)
for i in range(len(data)):
for j in range(len(data[0])):
if j<6:
d1[i,j]=np.format_float_scientific(data[i,j], precision=2)#'{:9f}'.format(data[i,j])
else:
n=str(data[i,j])
if len(n)<5:
n+='0'
d1[i,j]=n
# import pdb; pdb.set_trace()
return d1
def organize_results():
cases_ms=[]
for root, dirs, files in os.walk('results/'):
for dir in dirs:
variables={}
for r, ds, fs in os.walk(os.path.join(root,dir)):
for file in fs:
var_name=os.path.splitext(file)[0]
var_extention=os.path.splitext(file)[1]
if var_extention=='.npy' and var_name!='time_steps':
variables[var_name]=np.load(os.path.join(os.path.join(root,dir),file))
return variables
| [
"matplotlib.rc",
"matplotlib.pyplot.yscale",
"numpy.load",
"os.walk",
"numpy.exp",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tick_params",
"os.path.join",
"numpy.round",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.ticker.FixedLocator",
"numpy.cumsum",
"numpy.format_float_s... | [((10464, 10483), 'os.walk', 'os.walk', (['"""results/"""'], {}), "('results/')\n", (10471, 10483), False, 'import os\n'), ((253, 282), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (266, 282), False, 'import matplotlib\n'), ((4612, 4652), 'numpy.concatenate', 'np.concatenate', (["[self.cases, ['a', 'b']]"], {}), "([self.cases, ['a', 'b']])\n", (4626, 4652), True, 'import numpy as np\n'), ((4891, 4948), 'matplotlib.pyplot.table', 'plt.table', ([], {'cellText': 'data', 'rowLabels': 'lines', 'colLabels': 'cols'}), '(cellText=data, rowLabels=lines, colLabels=cols)\n', (4900, 4948), True, 'import matplotlib.pyplot as plt\n'), ((5170, 5257), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)'}), "(axis='x', which='both', bottom=False, top=False,\n labelbottom=False)\n", (5185, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5348), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'right': '(False)', 'left': '(False)', 'labelleft': '(False)'}), "(axis='y', which='both', right=False, left=False, labelleft=\n False)\n", (5277, 5348), True, 'import matplotlib.pyplot as plt\n'), ((5352, 5428), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/table_prep.svg"""'], {'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "('results/table_prep.svg', bbox_inches='tight', transparent=True)\n", (5363, 5428), True, 'import matplotlib.pyplot as plt\n'), ((5469, 5485), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5478, 5485), True, 'import matplotlib.pyplot as plt\n'), ((5593, 5633), 'numpy.concatenate', 'np.concatenate', (["[self.cases, ['a', 'b']]"], {}), "([self.cases, ['a', 'b']])\n", (5607, 5633), True, 'import numpy as np\n'), ((5872, 5929), 'matplotlib.pyplot.table', 'plt.table', ([], {'cellText': 'data', 'rowLabels': 'lines', 'colLabels': 'cols'}), '(cellText=data, rowLabels=lines, colLabels=cols)\n', (5881, 5929), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6238), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)'}), "(axis='x', which='both', bottom=False, top=False,\n labelbottom=False)\n", (6166, 6238), True, 'import matplotlib.pyplot as plt\n'), ((6243, 6329), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'right': '(False)', 'left': '(False)', 'labelleft': '(False)'}), "(axis='y', which='both', right=False, left=False, labelleft=\n False)\n", (6258, 6329), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6409), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/table_proc.svg"""'], {'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "('results/table_proc.svg', bbox_inches='tight', transparent=True)\n", (6344, 6409), True, 'import matplotlib.pyplot as plt\n'), ((6453, 6469), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6462, 6469), True, 'import matplotlib.pyplot as plt\n'), ((6831, 6890), 'matplotlib.pyplot.plot', 'plt.plot', (['ms_vpi', 'proc_adm'], {'label': '"""ADM & A-AMS"""', 'lw': 'self.lw'}), "(ms_vpi, proc_adm, label='ADM & A-AMS', lw=self.lw)\n", (6839, 6890), True, 'import matplotlib.pyplot as plt\n'), ((6896, 6958), 'matplotlib.pyplot.plot', 'plt.plot', (['ms_vpi', 'proc_nu'], {'label': '"""NU-ADM & A-AMS"""', 'lw': 'self.lw'}), "(ms_vpi, proc_nu, label='NU-ADM & A-AMS', lw=self.lw)\n", (6904, 6958), True, 'import matplotlib.pyplot as plt\n'), ((6964, 7017), 'matplotlib.pyplot.plot', 'plt.plot', (['ms_vpi', 'proc_ams'], {'label': '"""A-AMS"""', 'lw': 'self.lw'}), "(ms_vpi, proc_ams, label='A-AMS', lw=self.lw)\n", (6972, 7017), True, 'import matplotlib.pyplot as plt\n'), ((7023, 7079), 'matplotlib.pyplot.plot', 'plt.plot', (['fs_vpi', 'proc_fs'], {'label': '"""reference"""', 'lw': 'self.lw'}), "(fs_vpi, proc_fs, label='reference', lw=self.lw)\n", (7031, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7120, 7194), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/detailed.svg"""'], {'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "('results/detailed.svg', bbox_inches='tight', transparent=True)\n", (7131, 7194), True, 'import matplotlib.pyplot as plt\n'), ((7458, 7477), 'matplotlib.pyplot.xscale', 'plt.xscale', (['x_scale'], {}), '(x_scale)\n', (7468, 7477), True, 'import matplotlib.pyplot as plt\n'), ((7486, 7505), 'matplotlib.pyplot.yscale', 'plt.yscale', (['y_scale'], {}), '(y_scale)\n', (7496, 7505), True, 'import matplotlib.pyplot as plt\n'), ((7515, 7559), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'lw': '(2)', 'color': '"""black"""'}), "(which='major', lw=2, color='black')\n", (7523, 7559), True, 'import matplotlib.pyplot as plt\n'), ((7568, 7611), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'lw': '(1)', 'color': '"""gray"""'}), "(which='minor', lw=1, color='gray')\n", (7576, 7611), True, 'import matplotlib.pyplot as plt\n'), ((8212, 8246), 'numpy.array', 'np.array', (['[2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([2, 3, 4, 5, 6, 7, 8, 9])\n', (8220, 8246), True, 'import numpy as np\n'), ((8269, 8303), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 0, 0, 0, 0]'], {}), '([1, 0, 0, 1, 0, 0, 0, 0])\n', (8277, 8303), True, 'import numpy as np\n'), ((9406, 9418), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9416, 9418), True, 'import matplotlib.pyplot as plt\n'), ((9832, 9851), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (9845, 9851), True, 'import numpy as np\n'), ((9866, 9887), 'numpy.hstack', 'np.hstack', (['[data, a1]'], {}), '([data, a1])\n', (9875, 9887), True, 'import numpy as np\n'), ((9900, 9921), 'numpy.hstack', 'np.hstack', (['[data, b1]'], {}), '([data, b1])\n', (9909, 9921), True, 'import numpy as np\n'), ((1793, 1821), 'numpy.hstack', 'np.hstack', (['[ms_proc, t1, t2]'], {}), '([ms_proc, t1, t2])\n', (1802, 1821), True, 'import numpy as np\n'), ((4004, 4029), 'numpy.load', 'np.load', (['"""time_steps.npy"""'], {}), "('time_steps.npy')\n", (4011, 4029), True, 'import numpy as np\n'), ((4760, 4775), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (4769, 4775), True, 'import numpy as np\n'), ((5741, 5756), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (5750, 5756), True, 'import numpy as np\n'), ((9756, 9770), 'numpy.array', 'np.array', (['[a1]'], {}), '([a1])\n', (9764, 9770), True, 'import numpy as np\n'), ((1728, 1761), 'numpy.array', 'np.array', (['[0.007 * ms_proc[:, 1]]'], {}), '([0.007 * ms_proc[:, 1]])\n', (1736, 1761), True, 'import numpy as np\n'), ((2210, 2239), 'numpy.load', 'np.load', (['"""results/n1_adm.npy"""'], {}), "('results/n1_adm.npy')\n", (2217, 2239), True, 'import numpy as np\n'), ((3074, 3109), 'numpy.load', 'np.load', (['"""results/n1_adm_nuadm.npy"""'], {}), "('results/n1_adm_nuadm.npy')\n", (3081, 3109), True, 'import numpy as np\n'), ((8418, 8450), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['major_ticks'], {}), '(major_ticks)\n', (8437, 8450), False, 'from matplotlib import ticker\n'), ((8580, 8599), 'numpy.array', 'np.array', (['ordenadas'], {}), '(ordenadas)\n', (8588, 8599), True, 'import numpy as np\n'), ((9014, 9046), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['minor_ticks'], {}), '(minor_ticks)\n', (9033, 9046), False, 'from matplotlib import ticker\n'), ((9096, 9122), 'matplotlib.ticker.FixedFormatter', 'ticker.FixedFormatter', (['fmt'], {}), '(fmt)\n', (9117, 9122), False, 'from matplotlib import ticker\n'), ((9196, 9205), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9203, 9205), True, 'import matplotlib.pyplot as plt\n'), ((9607, 9620), 'numpy.log', 'np.log', (['cases'], {}), '(cases)\n', (9613, 9620), True, 'import numpy as np\n'), ((9622, 9631), 'numpy.log', 'np.log', (['l'], {}), '(l)\n', (9628, 9631), True, 'import numpy as np\n'), ((9656, 9673), 'numpy.exp', 'np.exp', (['intercept'], {}), '(intercept)\n', (9662, 9673), True, 'import numpy as np\n'), ((9945, 9964), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (9958, 9964), True, 'import numpy as np\n'), ((10572, 10595), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (10584, 10595), False, 'import os\n'), ((1470, 1600), 'numpy.array', 'np.array', (['[0.07 * ms_prep[2] ** 1.75, 0.0009 * ms_prep[0] ** 1.0037, 0.01 * ms_prep[0\n ] ** 1.0052, 0.27 * ms_prep[1] ** 1.013]'], {}), '([0.07 * ms_prep[2] ** 1.75, 0.0009 * ms_prep[0] ** 1.0037, 0.01 * \n ms_prep[0] ** 1.0052, 0.27 * ms_prep[1] ** 1.013])\n', (1478, 1600), True, 'import numpy as np\n'), ((4115, 4128), 'numpy.cumsum', 'np.cumsum', (['ts'], {}), '(ts)\n', (4124, 4128), True, 'import numpy as np\n'), ((7664, 7683), 'numpy.log10', 'np.log10', (['ordenadas'], {}), '(ordenadas)\n', (7672, 7683), True, 'import numpy as np\n'), ((7897, 7929), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['major_ticks'], {}), '(major_ticks)\n', (7916, 7929), False, 'from matplotlib import ticker\n'), ((8464, 8473), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8471, 8473), True, 'import matplotlib.pyplot as plt\n'), ((9795, 9809), 'numpy.array', 'np.array', (['[b1]'], {}), '([b1])\n', (9803, 9809), True, 'import numpy as np\n'), ((10106, 10157), 'numpy.format_float_scientific', 'np.format_float_scientific', (['data[i, j]'], {'precision': '(2)'}), '(data[i, j], precision=2)\n', (10132, 10157), True, 'import numpy as np\n'), ((7947, 7956), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7954, 7956), True, 'import matplotlib.pyplot as plt\n'), ((8384, 8393), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8391, 8393), True, 'import matplotlib.pyplot as plt\n'), ((8513, 8542), 'numpy.concatenate', 'np.concatenate', (['[major_ticks]'], {}), '([major_ticks])\n', (8527, 8542), True, 'import numpy as np\n'), ((8861, 8885), 'numpy.round', 'np.round', (['minor_ticks', '(5)'], {}), '(minor_ticks, 5)\n', (8869, 8885), True, 'import numpy as np\n'), ((8980, 8989), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8987, 8989), True, 'import matplotlib.pyplot as plt\n'), ((9060, 9069), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9067, 9069), True, 'import matplotlib.pyplot as plt\n'), ((10658, 10680), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (10674, 10680), False, 'import os\n'), ((10718, 10740), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (10734, 10740), False, 'import os\n'), ((692, 707), 'numpy.array', 'np.array', (['cases'], {}), '(cases)\n', (700, 707), True, 'import numpy as np\n'), ((4070, 4082), 'numpy.sort', 'np.sort', (['(-ts)'], {}), '(-ts)\n', (4077, 4082), True, 'import numpy as np\n'), ((5121, 5130), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5128, 5130), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6111), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6109, 6111), True, 'import matplotlib.pyplot as plt\n'), ((7863, 7872), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7870, 7872), True, 'import matplotlib.pyplot as plt\n'), ((8003, 8032), 'numpy.concatenate', 'np.concatenate', (['[major_ticks]'], {}), '([major_ticks])\n', (8017, 8032), True, 'import numpy as np\n'), ((9310, 9319), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9317, 9319), True, 'import matplotlib.pyplot as plt\n'), ((9361, 9370), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9368, 9370), True, 'import matplotlib.pyplot as plt\n'), ((10882, 10905), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (10894, 10905), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import struct
import numpy as np
def load_tensor_binary(fobj):
"""
Load a tensor dumped by the :class:`BinaryOprIODump` plugin; the actual
tensor value dump is implemented by ``mgb::debug::dump_tensor``.
Multiple values can be compared by ``tools/compare_binary_iodump.py``.
:param fobj: file object, or a string that contains the file name.
:return: tuple ``(tensor_value, tensor_name)``.
"""
if isinstance(fobj, str):
with open(fobj, "rb") as fin:
return load_tensor_binary(fin)
DTYPE_LIST = {
0: np.float32,
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
# 5: _mgb.intb1,
# 6: _mgb.intb2,
# 7: _mgb.intb4,
8: None,
9: np.float16,
# quantized dtype start from 100000
# see MEGDNN_PARAMETERIZED_DTYPE_ENUM_BASE in
# dnn/include/megdnn/dtype.h
100000: np.uint8,
100001: np.int32,
100002: np.int8,
}
header_fmt = struct.Struct("III")
name_len, dtype, max_ndim = header_fmt.unpack(fobj.read(header_fmt.size))
assert (
DTYPE_LIST[dtype] is not None
), "Cannot load this tensor: dtype Byte is unsupported."
shape = list(struct.unpack("I" * max_ndim, fobj.read(max_ndim * 4)))
while shape[-1] == 0:
shape.pop(-1)
name = fobj.read(name_len).decode("ascii")
return np.fromfile(fobj, dtype=DTYPE_LIST[dtype]).reshape(shape), name
| [
"numpy.fromfile",
"struct.Struct"
] | [((1392, 1412), 'struct.Struct', 'struct.Struct', (['"""III"""'], {}), "('III')\n", (1405, 1412), False, 'import struct\n'), ((1783, 1825), 'numpy.fromfile', 'np.fromfile', (['fobj'], {'dtype': 'DTYPE_LIST[dtype]'}), '(fobj, dtype=DTYPE_LIST[dtype])\n', (1794, 1825), True, 'import numpy as np\n')] |
from typing import List, Any, Sequence
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import qcodes as qc
from .data_export import get_data_by_id, flatten_1D_data_for_plot
from .data_export import datatype_from_setpoints_2d, reshape_2D_data
log = logging.getLogger(__name__)
DB = qc.config["core"]["db_location"]
def plot_by_id(run_id: int) -> Figure:
def set_axis_labels(ax, data):
if data[0]['label'] == '':
lbl = data[0]['name']
else:
lbl = data[0]['label']
if data[0]['unit'] == '':
unit = ''
else:
unit = data[0]['unit']
unit = f"({unit})"
ax.set_xlabel(f'{lbl} {unit}')
if data[1]['label'] == '':
lbl = data[1]['name']
else:
lbl = data[1]['label']
if data[1]['unit'] == '':
unit = ''
else:
unit = data[1]['unit']
unit = f'({unit})'
ax.set_ylabel(f'{lbl} {unit}')
"""
Construct all plots for a given run
Implemented so far:
* 1D plots
* 2D plots on filled out rectangular grids
"""
alldata = get_data_by_id(run_id)
for data in alldata:
if len(data) == 2: # 1D PLOTTING
log.debug('Plotting by id, doing a 1D plot')
figure, ax = plt.subplots()
# sort for plotting
order = data[0]['data'].argsort()
ax.plot(data[0]['data'][order], data[1]['data'][order])
set_axis_labels(ax, data)
return figure
elif len(data) == 3: # 2D PLOTTING
log.debug('Plotting by id, doing a 2D plot')
# From the setpoints, figure out which 2D plotter to use
# TODO: The "decision tree" for what gets plotted how and how
# we check for that is still unfinished/not optimised
how_to_plot = {'grid': plot_on_a_plain_grid,
'equidistant': plot_on_a_plain_grid}
log.debug('Plotting by id, determining plottype')
plottype = datatype_from_setpoints_2d([data[0]['data'],
data[1]['data']])
if plottype in how_to_plot.keys():
log.debug('Plotting by id, doing the actual plot')
xpoints = flatten_1D_data_for_plot(data[0]['data'])
ypoints = flatten_1D_data_for_plot(data[1]['data'])
zpoints = flatten_1D_data_for_plot(data[2]['data'])
figure = how_to_plot[plottype](xpoints, ypoints, zpoints)
ax = figure.axes[0]
set_axis_labels(ax, data)
# TODO: get a colorbar
return figure
else:
log.warning('2D data does not seem to be on a '
'grid. Falling back to scatter plot')
fig, ax = plt.subplots(1,1)
xpoints = flatten_1D_data_for_plot(data[0]['data'])
ypoints = flatten_1D_data_for_plot(data[1]['data'])
zpoints = flatten_1D_data_for_plot(data[2]['data'])
ax.scatter(x=xpoints, y=ypoints, c=zpoints)
set_axis_labels(ax, data)
else:
raise ValueError('Multi-dimensional data encountered. '
f'parameter {data[-1].name} depends on '
f'{len(data-1)} parameters, cannot plot '
f'that.')
def plot_on_a_plain_grid(x: np.ndarray, y: np.ndarray,
z: np.ndarray) -> Figure:
"""
Plot a heatmap of z using x and y as axes. Assumes that the data
are rectangular, i.e. that x and y together describe a rectangular
grid. The arrays of x and y need not be sorted in any particular
way, but data must belong together such that z[n] has x[n] and
y[n] as setpoints. The setpoints need not be equidistantly
spaced, but linear interpolation is used to find the edges of the
plotted squares.
Args:
x: The x values
y: The y values
z: The z values
Returns:
The matplotlib figure handle
"""
xrow, yrow, z_to_plot = reshape_2D_data(x, y, z)
# we use a general edge calculator,
# in the case of non-equidistantly spaced data
# TODO: is this appropriate for a log ax?
dxs = np.diff(xrow)/2
dys = np.diff(yrow)/2
x_edges = np.concatenate((np.array([xrow[0] - dxs[0]]),
xrow[:-1] + dxs,
np.array([xrow[-1] + dxs[-1]])))
y_edges = np.concatenate((np.array([yrow[0] - dys[0]]),
yrow[:-1] + dys,
np.array([yrow[-1] + dys[-1]])))
fig, ax = plt.subplots()
ax.pcolormesh(x_edges, y_edges, np.ma.masked_invalid(z_to_plot))
return fig
| [
"numpy.ma.masked_invalid",
"numpy.diff",
"numpy.array",
"matplotlib.pyplot.subplots",
"logging.getLogger"
] | [((307, 334), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (324, 334), False, 'import logging\n'), ((4833, 4847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4845, 4847), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4449), 'numpy.diff', 'np.diff', (['xrow'], {}), '(xrow)\n', (4443, 4449), True, 'import numpy as np\n'), ((4462, 4475), 'numpy.diff', 'np.diff', (['yrow'], {}), '(yrow)\n', (4469, 4475), True, 'import numpy as np\n'), ((4884, 4915), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['z_to_plot'], {}), '(z_to_plot)\n', (4904, 4915), True, 'import numpy as np\n'), ((1376, 1390), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1388, 1390), True, 'import matplotlib.pyplot as plt\n'), ((4508, 4536), 'numpy.array', 'np.array', (['[xrow[0] - dxs[0]]'], {}), '([xrow[0] - dxs[0]])\n', (4516, 4536), True, 'import numpy as np\n'), ((4615, 4645), 'numpy.array', 'np.array', (['[xrow[-1] + dxs[-1]]'], {}), '([xrow[-1] + dxs[-1]])\n', (4623, 4645), True, 'import numpy as np\n'), ((4678, 4706), 'numpy.array', 'np.array', (['[yrow[0] - dys[0]]'], {}), '([yrow[0] - dys[0]])\n', (4686, 4706), True, 'import numpy as np\n'), ((4785, 4815), 'numpy.array', 'np.array', (['[yrow[-1] + dys[-1]]'], {}), '([yrow[-1] + dys[-1]])\n', (4793, 4815), True, 'import numpy as np\n'), ((2958, 2976), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2970, 2976), True, 'import matplotlib.pyplot as plt\n')] |
def neo_preprocess(payload, content_type):
import logging
import numpy as np
import io
import PIL.Image
def _read_input_shape(signature):
shape = signature[-1]['shape']
shape[0] = 1
return shape
def _transform_image(image, shape_info):
# Fetch image size
input_shape = _read_input_shape(shape_info)
# Perform color conversion
if input_shape[-3] == 3:
# training input expected is 3 channel RGB
image = image.convert('RGB')
elif input_shape[-3] == 1:
# training input expected is grayscale
image = image.convert('L')
else:
# shouldn't get here
raise RuntimeError('Wrong number of channels in input shape')
# Resize
image = np.asarray(image.resize((input_shape[-2], input_shape[-1])))
# Normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
image = (image/255- mean_vec)/stddev_vec
# Transpose
if len(image.shape) == 2: # for greyscale image
image = np.expand_dims(image, axis=2)
image = np.rollaxis(image, axis=2, start=0)[np.newaxis, :]
return image
logging.info('Invoking user-defined pre-processing function')
if content_type != 'image/jpeg':
raise RuntimeError('Content type must be image/jpeg')
shape_info = [{"shape":[1,3,512,512], "name":"data"}]
f = io.BytesIO(payload)
dtest = _transform_image(PIL.Image.open(f), shape_info)
return {'data':dtest}
### NOTE: this function cannot use MXNet
def neo_postprocess(result):
import logging
import numpy as np
import json
logging.info('Invoking user-defined post-processing function')
js = {'prediction':[],'instance':[]}
for r in result:
r = np.squeeze(r)
js['instance'].append(r.tolist())
idx, score, bbox = js['instance']
bbox = np.asarray(bbox)/512
res = np.hstack((np.column_stack((idx,score)),bbox))
for r in res:
js['prediction'].append(r.tolist())
del js['instance']
response_body = json.dumps(js)
content_type = 'application/json'
return response_body, content_type | [
"io.BytesIO",
"numpy.asarray",
"numpy.column_stack",
"numpy.expand_dims",
"json.dumps",
"logging.info",
"numpy.array",
"numpy.rollaxis",
"numpy.squeeze"
] | [((1270, 1331), 'logging.info', 'logging.info', (['"""Invoking user-defined pre-processing function"""'], {}), "('Invoking user-defined pre-processing function')\n", (1282, 1331), False, 'import logging\n'), ((1503, 1522), 'io.BytesIO', 'io.BytesIO', (['payload'], {}), '(payload)\n', (1513, 1522), False, 'import io\n'), ((1749, 1811), 'logging.info', 'logging.info', (['"""Invoking user-defined post-processing function"""'], {}), "('Invoking user-defined post-processing function')\n", (1761, 1811), False, 'import logging\n'), ((2176, 2190), 'json.dumps', 'json.dumps', (['js'], {}), '(js)\n', (2186, 2190), False, 'import json\n'), ((910, 941), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (918, 941), True, 'import numpy as np\n'), ((963, 994), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (971, 994), True, 'import numpy as np\n'), ((1888, 1901), 'numpy.squeeze', 'np.squeeze', (['r'], {}), '(r)\n', (1898, 1901), True, 'import numpy as np\n'), ((1993, 2009), 'numpy.asarray', 'np.asarray', (['bbox'], {}), '(bbox)\n', (2003, 2009), True, 'import numpy as np\n'), ((1142, 1171), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (1156, 1171), True, 'import numpy as np\n'), ((1188, 1223), 'numpy.rollaxis', 'np.rollaxis', (['image'], {'axis': '(2)', 'start': '(0)'}), '(image, axis=2, start=0)\n', (1199, 1223), True, 'import numpy as np\n'), ((2035, 2064), 'numpy.column_stack', 'np.column_stack', (['(idx, score)'], {}), '((idx, score))\n', (2050, 2064), True, 'import numpy as np\n')] |
from ell import *
import numpy as np
_filter = Filter.from_name('db4')
def check(a, mi, ma, shape):
return np.all(a.min_index == mi) and np.all(a.max_index == ma) and np.all(np.equal(a.shape, shape))
def test_reduce_1d():
signal = Ell1d(np.sin(np.linspace(0,3,100)))
reduced = signal.reduce(_filter)
min_index = signal.min_index - _filter.max_index
max_index = signal.max_index - _filter.min_index
assert np.all(np.abs(reduced.min_index) == np.abs(min_index) // 2)
assert np.all(np.abs(reduced.max_index) == np.abs(max_index) // 2)
def test_filter_1d():
signal = Ell1d(np.sin(np.linspace(0,3,10)))
reduced = signal.filter(_filter)
assert reduced.min_index==-6 and reduced.max_index==15
def test_reduce_2d():
signal = Ell1d(np.sin(np.linspace(0,3,10))).tensor()
reduced = signal.reduce(_filter)
min_index = np.subtract(signal.min_index, _filter.max_index)
max_index = np.subtract(signal.max_index, _filter.min_index)
assert np.all(np.abs(reduced.min_index) == np.abs(min_index) // 2) and np.all(np.abs(reduced.max_index) == np.abs(max_index) // 2)
signal = Ell1d(np.sin(np.linspace(0,3,10))).tensor()
reduced1 = signal.reduce(_filter, axis=0).reduce(_filter, axis=1)
reduced2 = signal.reduce(_filter)
reduced3 = signal.reduce(_filter.tensor())
print(f"{reduced1:s}, {reduced2:s}, {reduced3:s}")
assert reduced1.shape == reduced2.shape
test_reduce_2d()
def test_filter_2d():
signal = Ell1d(np.sin(np.linspace(0,3,10))).tensor()
reduced = signal.filter(_filter)
assert reduced.min_index==(-6,-6) and reduced.max_index==(15,15)
def test_filter_m2d():
signal = MultiEll2d(np.ones((10,20,3)))
expanded = signal.filter(_filter)
assert expanded.min_index==(-6,-6) and expanded.max_index==(15,25)
reduced = signal.reduce(_filter)
expanded= reduced.expand(_filter)
test_filter_2d()
| [
"numpy.abs",
"numpy.subtract",
"numpy.ones",
"numpy.equal",
"numpy.linspace",
"numpy.all"
] | [((864, 912), 'numpy.subtract', 'np.subtract', (['signal.min_index', '_filter.max_index'], {}), '(signal.min_index, _filter.max_index)\n', (875, 912), True, 'import numpy as np\n'), ((929, 977), 'numpy.subtract', 'np.subtract', (['signal.max_index', '_filter.min_index'], {}), '(signal.max_index, _filter.min_index)\n', (940, 977), True, 'import numpy as np\n'), ((113, 138), 'numpy.all', 'np.all', (['(a.min_index == mi)'], {}), '(a.min_index == mi)\n', (119, 138), True, 'import numpy as np\n'), ((143, 168), 'numpy.all', 'np.all', (['(a.max_index == ma)'], {}), '(a.max_index == ma)\n', (149, 168), True, 'import numpy as np\n'), ((1676, 1696), 'numpy.ones', 'np.ones', (['(10, 20, 3)'], {}), '((10, 20, 3))\n', (1683, 1696), True, 'import numpy as np\n'), ((180, 204), 'numpy.equal', 'np.equal', (['a.shape', 'shape'], {}), '(a.shape, shape)\n', (188, 204), True, 'import numpy as np\n'), ((256, 278), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(100)'], {}), '(0, 3, 100)\n', (267, 278), True, 'import numpy as np\n'), ((440, 465), 'numpy.abs', 'np.abs', (['reduced.min_index'], {}), '(reduced.min_index)\n', (446, 465), True, 'import numpy as np\n'), ((511, 536), 'numpy.abs', 'np.abs', (['reduced.max_index'], {}), '(reduced.max_index)\n', (517, 536), True, 'import numpy as np\n'), ((613, 634), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (624, 634), True, 'import numpy as np\n'), ((469, 486), 'numpy.abs', 'np.abs', (['min_index'], {}), '(min_index)\n', (475, 486), True, 'import numpy as np\n'), ((540, 557), 'numpy.abs', 'np.abs', (['max_index'], {}), '(max_index)\n', (546, 557), True, 'import numpy as np\n'), ((996, 1021), 'numpy.abs', 'np.abs', (['reduced.min_index'], {}), '(reduced.min_index)\n', (1002, 1021), True, 'import numpy as np\n'), ((1060, 1085), 'numpy.abs', 'np.abs', (['reduced.max_index'], {}), '(reduced.max_index)\n', (1066, 1085), True, 'import numpy as np\n'), ((780, 801), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (791, 801), True, 'import numpy as np\n'), ((1025, 1042), 'numpy.abs', 'np.abs', (['min_index'], {}), '(min_index)\n', (1031, 1042), True, 'import numpy as np\n'), ((1089, 1106), 'numpy.abs', 'np.abs', (['max_index'], {}), '(max_index)\n', (1095, 1106), True, 'import numpy as np\n'), ((1139, 1160), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (1150, 1160), True, 'import numpy as np\n'), ((1491, 1512), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (1502, 1512), True, 'import numpy as np\n')] |
#%% imports
import numpy as np
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC, SVC
# 8. Train a LinearSVC on a linearly separable dataset. Then train an SVC and a SGDClassifier on
# the same dataset. See if you can get them to produce roughly the same model.
#%%
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
#%% LinearSVC
lsvc_clf = Pipeline([
("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge"))
])
lsvc_clf.fit(X, y)
lsvc_clf.named_steps['linear_svc'].coef_, lsvc_clf.named_steps['linear_svc'].intercept_
#%% SVC
svc_clf = Pipeline([
("scaler", StandardScaler()),
("svc", SVC(kernel="linear", C=1))
])
svc_clf.fit(X, y)
svc_clf.named_steps['svc'].coef_, svc_clf.named_steps['svc'].intercept_
#%% SGDClassifier
sgd_clf = Pipeline([
("scaler", StandardScaler()),
("sgd", SGDClassifier(loss="hinge"))
])
sgd_clf.fit(X, y)
sgd_clf.named_steps['sgd'].coef_, sgd_clf.named_steps['sgd'].intercept_
#%% 9. Train an SVM classifier on the MNIST dataset. Since SVM classifiers are binary classifiers, you
# will need to use one-versus-all to classify all 10 digits. You may want to tune the
# hyperparameters using small validation sets to speed up the process. What accuracy can you
# reach?
mnist = datasets.fetch_mldata('MNIST original')
X, y = mnist["data"], mnist["target"]
svm_clf = Pipeline([
("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge")),
])
ovr_clf = OneVsRestClassifier(svm_clf)
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# use small subset for initial development
X_train, y_train = X_train[:500], y_train[:500]
#%%
ovr_clf.fit(X_train, y_train)
#%%
# try a prediction
predictions = ovr_clf.predict(X_test)
predictions
#%%
# get cross val score
# TODO see http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics
scores = cross_val_score(ovr_clf, X_train, y_train, cv=3)
'Accuracy: %{0:2f} (+/- %{1:2f})'.format(scores.mean(), scores.std() * 2)
# => 'Accuracy: %0.802210 (+/- %0.021665)'
| [
"sklearn.datasets.load_iris",
"sklearn.preprocessing.StandardScaler",
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.cross_val_score",
"sklearn.multiclass.OneVsRestClassifier",
"sklearn.svm.SVC",
"numpy.random.permutation",
"sklearn.svm.LinearSVC",
"sklearn.datasets.fetch_mldata"
] | [((526, 546), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (544, 546), False, 'from sklearn import datasets\n'), ((1672, 1711), 'sklearn.datasets.fetch_mldata', 'datasets.fetch_mldata', (['"""MNIST original"""'], {}), "('MNIST original')\n", (1693, 1711), False, 'from sklearn import datasets\n'), ((1870, 1898), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['svm_clf'], {}), '(svm_clf)\n', (1889, 1898), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((1995, 2023), 'numpy.random.permutation', 'np.random.permutation', (['(60000)'], {}), '(60000)\n', (2016, 2023), True, 'import numpy as np\n'), ((2438, 2486), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ovr_clf', 'X_train', 'y_train'], {'cv': '(3)'}), '(ovr_clf, X_train, y_train, cv=3)\n', (2453, 2486), False, 'from sklearn.model_selection import cross_val_score\n'), ((777, 793), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (791, 793), False, 'from sklearn.preprocessing import StandardScaler\n'), ((815, 843), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1)', 'loss': '"""hinge"""'}), "(C=1, loss='hinge')\n", (824, 843), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((1003, 1019), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1017, 1019), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1034, 1059), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(1)'}), "(kernel='linear', C=1)\n", (1037, 1059), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((1214, 1230), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1228, 1230), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1245, 1272), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (1258, 1272), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1787, 1803), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1801, 1803), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1825, 1853), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1)', 'loss': '"""hinge"""'}), "(C=1, loss='hinge')\n", (1834, 1853), False, 'from sklearn.svm import LinearSVC, SVC\n')] |
import numpy as np
from xarray import Dataset, DataArray
#from xray.core.ops import allclose_or_equiv
import pytest
# skip everythong
pytestmark = pytest.mark.xfail(True, reason='deprecated')
try:
from xgcm import GCMDataset
except ImportError:
# this import syntax is old
pass
@pytest.fixture
def test_dataset():
# need to create all the dimensions that GCMDataset likes
# oceanic parameters, cartesian coordinates, doubly periodic
H = 5000.
Lx = 4e6
Ly = 3e6
Nz = 10
Nx = 25
Ny = 20
dz = H / Nz
dx = Lx / Nx
dy = Ly / Ny
ds = Dataset()
ds.attrs['H'] = H
ds.attrs['Lx'] = Lx
ds.attrs['Ly'] = Ly
ds.attrs['Nz'] = Nz
ds.attrs['Nx'] = Nx
ds.attrs['Ny'] = Ny
ds.attrs['dz'] = dz
ds.attrs['dx'] = dx
ds.attrs['dy'] = dy
# vertical grid
ds['Z'] = ('Z', dz/2 + dz*np.arange(Nz))
ds['Zp1'] = ('Zp1', dz*np.arange(Nz+1))
ds['Zl'] = ('Zl', dz*np.arange(Nz))
ds['Zu'] = ('Zu', dz + dz*np.arange(Nz))
# vertical spacing
ds['drF'] = ('Z', np.full(Nz, dz))
ds['drC'] = ('Zp1', np.hstack([dz/2, np.full(Nz-1, dz), dz/2]))
# horizontal grid
ds['X'] = ('X', dx/2 + dx*np.arange(Nx))
ds['Xp1'] = ('Xp1', dx*np.arange(Nx))
ds['Y'] = ('Y', dy/2 + dy*np.arange(Ny))
ds['Yp1'] = ('Yp1', dy*np.arange(Ny))
xc, yc = np.meshgrid(ds.X, ds.Y)
xg, yg = np.meshgrid(ds.Xp1, ds.Yp1)
ds['XC'] = (('Y','X'), xc)
ds['YC'] = (('Y','X'), yc)
ds['XG'] = (('Yp1','Xp1'), xg)
ds['YG'] = (('Yp1','Xp1'), yg)
# horizontal spacing
ds['dxC'] = (('Y','Xp1'), np.full((Ny,Nx), dx))
ds['dyC'] = (('Yp1','X'), np.full((Ny,Nx), dy))
ds['dxG'] = (('Yp1','X'), np.full((Ny,Nx), dx))
ds['dyG'] = (('Y','Xp1'), np.full((Ny,Nx), dx))
return ds
#class TestGCMDataset(unittest.TestCase):
def test_create_gcm_dataset(test_dataset):
ds = test_dataset
gcm = GCMDataset(ds)
# should fail if any of the variables is missing
for v in ds:
with pytest.raises(KeyError):
gcm = GCMDataset(ds.drop(v))
def test_vertical_derivatives(test_dataset):
ds = test_dataset
H = ds.attrs['H']
dz = ds.attrs['dz']
# vertical function of z at cell interface
f = np.sin(np.pi * ds.Zp1.values / H)
ds['f'] = (('Zp1'), f)
ds['fl'] = ('Zl', f[:-1])
# TODO: build in negative sign logic more carefully
df = -np.diff(f)
ds['df'] = ('Z', df)
fill_value = 0.
ds['dfl'] = ('Z', np.hstack([df[:-1], f[-2]-fill_value]))
ds['dfdz'] = ds['df'] / dz
ds['dfldz'] = ds['dfl'] / dz
# vertical function at cell center
g = np.sin(np.pi * ds.Z.values / H)
ds['g'] = ('Z', g)
dg = -np.diff(g)
dsdg = DataArray(dg, {'Zp1': ds.Zp1[1:-1]}, 'Zp1')
dsdgdf = dsdg / dz
gcm = GCMDataset(ds)
gcm_df = gcm.diff_zp1_to_z(ds.f)
assert gcm_df.equals(ds.df), (gcm_df, ds.df)
gcm_dfdz = gcm.derivative_zp1_to_z(ds.f)
assert gcm_dfdz.equals(ds.dfdz), (gcm_dfdz, ds.dfdz)
gcm_dfl = gcm.diff_zl_to_z(ds.fl, fill_value)
assert gcm_dfl.equals(ds.dfl), (gcm_dfl, ds.dfl)
gcm_dfldz = gcm.derivative_zl_to_z(ds.fl, fill_value)
assert gcm_dfldz.equals(ds.dfldz), (gcm_dfldz, ds.dfldz)
gcm_dg = gcm.diff_z_to_zp1(ds.g)
assert gcm_dg.equals(dsdg), (gcm_dg, dsdg)
gcm_dgdf = gcm.derivative_z_to_zp1(ds.g)
assert gcm_dgdf.equals(dsdgdf), (gcm_dgdf, dsdgdf)
def test_vertical_integral(test_dataset):
ds = test_dataset
H = ds.attrs['H']
dz = ds.attrs['dz']
f = np.sin(np.pi * ds.Z.values / H)
ds['f'] = (('Z'), f)
ds['fint'] = (f*dz).sum()
ds['favg'] = ds['fint'] / H
gcm = GCMDataset(ds)
gcm_fint = gcm.integrate_z(ds.f)
assert gcm_fint.equals(ds.fint), (gcm_fint, ds.fint)
gcm_favg = gcm.integrate_z(ds.f, average=True)
assert gcm_favg.equals(ds.favg), (gcm_favg, ds.favg)
def test_horizontal_derivatives(test_dataset):
ds = test_dataset
dx = ds.attrs['dx']
dy = ds.attrs['dy']
Lx = ds.attrs['Lx']
Ly = ds.attrs['Ly']
# perdiodic function of Xp1
f = np.sin(np.pi * ds.Xp1.values / Lx)
ds['f'] = ('Xp1', f)
ds['df'] = ('X', np.roll(f,-1) - f)
ds['dfdx'] = ds.df/dx
# periodic function of Yp1
g = np.cos(np.pi * ds.Yp1.values / Ly)
ds['g'] = ('Yp1', g)
ds['dg'] = ('Y', np.roll(g,-1) - g)
ds['dgdy'] = ds.dg/dy
gcm = GCMDataset(ds)
gcm_df = gcm.diff_xp1_to_x(ds.f)
assert gcm_df.equals(ds.df), (gcm_df, ds.df)
gcm_dg = gcm.diff_yp1_to_y(ds.g)
assert gcm_dg.equals(ds.dg), (gcm_dg, ds.dg)
| [
"numpy.full",
"numpy.meshgrid",
"numpy.roll",
"xarray.Dataset",
"numpy.hstack",
"xgcm.GCMDataset",
"numpy.sin",
"numpy.diff",
"xarray.DataArray",
"numpy.cos",
"numpy.arange",
"pytest.raises",
"pytest.mark.xfail"
] | [((148, 192), 'pytest.mark.xfail', 'pytest.mark.xfail', (['(True)'], {'reason': '"""deprecated"""'}), "(True, reason='deprecated')\n", (165, 192), False, 'import pytest\n'), ((592, 601), 'xarray.Dataset', 'Dataset', ([], {}), '()\n', (599, 601), False, 'from xarray import Dataset, DataArray\n'), ((1349, 1372), 'numpy.meshgrid', 'np.meshgrid', (['ds.X', 'ds.Y'], {}), '(ds.X, ds.Y)\n', (1360, 1372), True, 'import numpy as np\n'), ((1386, 1413), 'numpy.meshgrid', 'np.meshgrid', (['ds.Xp1', 'ds.Yp1'], {}), '(ds.Xp1, ds.Yp1)\n', (1397, 1413), True, 'import numpy as np\n'), ((1914, 1928), 'xgcm.GCMDataset', 'GCMDataset', (['ds'], {}), '(ds)\n', (1924, 1928), False, 'from xgcm import GCMDataset\n'), ((2248, 2281), 'numpy.sin', 'np.sin', (['(np.pi * ds.Zp1.values / H)'], {}), '(np.pi * ds.Zp1.values / H)\n', (2254, 2281), True, 'import numpy as np\n'), ((2635, 2666), 'numpy.sin', 'np.sin', (['(np.pi * ds.Z.values / H)'], {}), '(np.pi * ds.Z.values / H)\n', (2641, 2666), True, 'import numpy as np\n'), ((2722, 2765), 'xarray.DataArray', 'DataArray', (['dg', "{'Zp1': ds.Zp1[1:-1]}", '"""Zp1"""'], {}), "(dg, {'Zp1': ds.Zp1[1:-1]}, 'Zp1')\n", (2731, 2765), False, 'from xarray import Dataset, DataArray\n'), ((2800, 2814), 'xgcm.GCMDataset', 'GCMDataset', (['ds'], {}), '(ds)\n', (2810, 2814), False, 'from xgcm import GCMDataset\n'), ((3529, 3560), 'numpy.sin', 'np.sin', (['(np.pi * ds.Z.values / H)'], {}), '(np.pi * ds.Z.values / H)\n', (3535, 3560), True, 'import numpy as np\n'), ((3659, 3673), 'xgcm.GCMDataset', 'GCMDataset', (['ds'], {}), '(ds)\n', (3669, 3673), False, 'from xgcm import GCMDataset\n'), ((4083, 4117), 'numpy.sin', 'np.sin', (['(np.pi * ds.Xp1.values / Lx)'], {}), '(np.pi * ds.Xp1.values / Lx)\n', (4089, 4117), True, 'import numpy as np\n'), ((4248, 4282), 'numpy.cos', 'np.cos', (['(np.pi * ds.Yp1.values / Ly)'], {}), '(np.pi * ds.Yp1.values / Ly)\n', (4254, 4282), True, 'import numpy as np\n'), ((4385, 4399), 'xgcm.GCMDataset', 'GCMDataset', (['ds'], {}), '(ds)\n', (4395, 4399), False, 'from xgcm import GCMDataset\n'), ((1055, 1070), 'numpy.full', 'np.full', (['Nz', 'dz'], {}), '(Nz, dz)\n', (1062, 1070), True, 'import numpy as np\n'), ((1601, 1622), 'numpy.full', 'np.full', (['(Ny, Nx)', 'dx'], {}), '((Ny, Nx), dx)\n', (1608, 1622), True, 'import numpy as np\n'), ((1653, 1674), 'numpy.full', 'np.full', (['(Ny, Nx)', 'dy'], {}), '((Ny, Nx), dy)\n', (1660, 1674), True, 'import numpy as np\n'), ((1705, 1726), 'numpy.full', 'np.full', (['(Ny, Nx)', 'dx'], {}), '((Ny, Nx), dx)\n', (1712, 1726), True, 'import numpy as np\n'), ((1757, 1778), 'numpy.full', 'np.full', (['(Ny, Nx)', 'dx'], {}), '((Ny, Nx), dx)\n', (1764, 1778), True, 'import numpy as np\n'), ((2405, 2415), 'numpy.diff', 'np.diff', (['f'], {}), '(f)\n', (2412, 2415), True, 'import numpy as np\n'), ((2483, 2523), 'numpy.hstack', 'np.hstack', (['[df[:-1], f[-2] - fill_value]'], {}), '([df[:-1], f[-2] - fill_value])\n', (2492, 2523), True, 'import numpy as np\n'), ((2700, 2710), 'numpy.diff', 'np.diff', (['g'], {}), '(g)\n', (2707, 2710), True, 'import numpy as np\n'), ((908, 925), 'numpy.arange', 'np.arange', (['(Nz + 1)'], {}), '(Nz + 1)\n', (917, 925), True, 'import numpy as np\n'), ((950, 963), 'numpy.arange', 'np.arange', (['Nz'], {}), '(Nz)\n', (959, 963), True, 'import numpy as np\n'), ((1234, 1247), 'numpy.arange', 'np.arange', (['Nx'], {}), '(Nx)\n', (1243, 1247), True, 'import numpy as np\n'), ((1321, 1334), 'numpy.arange', 'np.arange', (['Ny'], {}), '(Ny)\n', (1330, 1334), True, 'import numpy as np\n'), ((2012, 2035), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2025, 2035), False, 'import pytest\n'), ((4164, 4178), 'numpy.roll', 'np.roll', (['f', '(-1)'], {}), '(f, -1)\n', (4171, 4178), True, 'import numpy as np\n'), ((4329, 4343), 'numpy.roll', 'np.roll', (['g', '(-1)'], {}), '(g, -1)\n', (4336, 4343), True, 'import numpy as np\n'), ((866, 879), 'numpy.arange', 'np.arange', (['Nz'], {}), '(Nz)\n', (875, 879), True, 'import numpy as np\n'), ((995, 1008), 'numpy.arange', 'np.arange', (['Nz'], {}), '(Nz)\n', (1004, 1008), True, 'import numpy as np\n'), ((1113, 1132), 'numpy.full', 'np.full', (['(Nz - 1)', 'dz'], {}), '(Nz - 1, dz)\n', (1120, 1132), True, 'import numpy as np\n'), ((1192, 1205), 'numpy.arange', 'np.arange', (['Nx'], {}), '(Nx)\n', (1201, 1205), True, 'import numpy as np\n'), ((1279, 1292), 'numpy.arange', 'np.arange', (['Ny'], {}), '(Ny)\n', (1288, 1292), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# All rights reserved. Use of this source code is governed by a modified BSD
# license that can be found in the LICENSE file.
from .ABSdata import LVCData, O3InjectionsData
import numpy as np
import h5py
import os
import sys
#from pesummary.io import read
#import glob
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
#import astropy.units as u
#from astropy.cosmology import Planck15
#from cosmology.cosmo import Cosmo
import Globals
class O3bData(LVCData):
def __init__(self, fname, suffix_name = 'nocosmo', which_metadata = 'GWOSC', **kwargs):#nObsUse=None, nSamplesUse=None, dist_unit=u.Gpc, events_use=None, which_spins='skip' ):
self.suffix_name = suffix_name
import pandas as pd
self.post_file_extension='.h5'
if which_metadata=='GWOSC':
print('Using SNRS and far from the public version of the GWTC-3 catalog from the GWOSC')
self.metadata = pd.read_csv(os.path.join(fname, 'GWTC-3-confident.csv'))
else:
print('Using best SNRS and far from all pipelines as reported in the GWTC-3 catalog paper')
self.metadata = pd.read_csv(os.path.join(Globals.dataPath, 'all_metadata_pipelines_best.csv'))
LVCData.__init__(self, fname, **kwargs)
def _set_Tobs(self):
self.Tobs= 147.083/365. # difference between the two GPS times below, in sec
# O3b dates: 1st November 2019 15:00 UTC (GPS 1256655618) to 27th March 2020 17:00 UTC (GPS 1269363618)
# 147.083
# 148 days in total
# 142.0 days with at least one detector for O3b
# second half of the third observing run (O3b) between 1 November 2019, 15:00 UTC and 27 March 2020, 17:00 UTC
# for 96.6% of the time (142.0 days) at least one interferometer was observing,
# while for 85.3% (125.5 days) at least two interferometers were observing
def _get_not_BBHs(self):
return ['GW200115_042309','GW200105_162426', 'GW191219_163120', 'GW200210_092254', 'GW200210_092255', 'GW190917_114630' ]
# events in this line have secondary mass compatible with NS
#+['GW190413_05954', 'GW190426_152155', 'GW190719_215514', 'GW190725_174728', 'GW190731_140936', 'GW190805_211137', 'GW190917_114630', 'GW191103_012549', 'GW200216_220804' ]
# events in the second list are those with ifar>=1yr, table 1 of 2111.03634
def _name_conditions(self, f ):
return self.suffix_name in f
def _get_name_from_fname(self, fname):
return ('_').join(fname.split('-')[-1].split('_')[:2] )
def _load_data_event(self, fname, event, nSamplesUse, which_spins='skip'):
### Using pesummary read function
#data = read(os.path.join(fname, event+self.post_file_extension))
#samples_dict = data.samples_dict
#posterior_samples = samples_dict['PublicationSamples']
#m1z, m2z, dL, chieff = posterior_samples['mass_1'], posterior_samples['mass_2'], posterior_samples['luminosity_distance'], posterior_samples['chi_eff']
# By hand:
data_path = os.path.join(fname, 'IGWN-GWTC3p0-v1-'+event+'_PEDataRelease_mixed_'+self.suffix_name+self.post_file_extension)
with h5py.File(data_path, 'r') as f:
dataset = f['C01:IMRPhenomXPHM']
posterior_samples = dataset['posterior_samples']
m1z = posterior_samples['mass_1']
m2z = posterior_samples['mass_2']
dL = posterior_samples['luminosity_distance']
try:
w = posterior_samples['weights_bin']
except Exception as e:
print(e)
w = np.ones(1)
if which_spins=='skip':
spins=[]
elif which_spins=='chiEff':
chieff = posterior_samples['chi_eff']
chiP = posterior_samples['chi_p']
spins = [chieff, chiP]
else:
raise NotImplementedError()
# Downsample if needed
#all_ds = self._downsample( [m1z, m2z, dL, w, *spins], nSamplesUse)
#m1z = all_ds[0]
#m2z= all_ds[1]
#dL = all_ds[2]
#spins = all_ds[4:]
#w = all_ds[3]
return np.squeeze(m1z), np.squeeze(m2z), np.squeeze(dL), [np.squeeze(s) for s in spins], w
class O3bInjectionsData(O3InjectionsData, ):
def __init__(self, fname, **kwargs):
self.Tobs=147.083/365.
print('Obs time: %s yrs' %self.Tobs )
O3InjectionsData.__init__(self, fname, **kwargs)
| [
"os.path.expanduser",
"h5py.File",
"os.getcwd",
"numpy.ones",
"numpy.squeeze",
"os.path.join"
] | [((508, 548), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'PACKAGE_PARENT'], {}), '(SCRIPT_DIR, PACKAGE_PARENT)\n', (520, 548), False, 'import os\n'), ((3382, 3505), 'os.path.join', 'os.path.join', (['fname', "('IGWN-GWTC3p0-v1-' + event + '_PEDataRelease_mixed_' + self.suffix_name +\n self.post_file_extension)"], {}), "(fname, 'IGWN-GWTC3p0-v1-' + event + '_PEDataRelease_mixed_' +\n self.suffix_name + self.post_file_extension)\n", (3394, 3505), False, 'import os\n'), ((430, 441), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (439, 441), False, 'import os\n'), ((443, 471), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((3508, 3533), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (3517, 3533), False, 'import h5py\n'), ((4550, 4565), 'numpy.squeeze', 'np.squeeze', (['m1z'], {}), '(m1z)\n', (4560, 4565), True, 'import numpy as np\n'), ((4567, 4582), 'numpy.squeeze', 'np.squeeze', (['m2z'], {}), '(m2z)\n', (4577, 4582), True, 'import numpy as np\n'), ((4584, 4598), 'numpy.squeeze', 'np.squeeze', (['dL'], {}), '(dL)\n', (4594, 4598), True, 'import numpy as np\n'), ((1179, 1222), 'os.path.join', 'os.path.join', (['fname', '"""GWTC-3-confident.csv"""'], {}), "(fname, 'GWTC-3-confident.csv')\n", (1191, 1222), False, 'import os\n'), ((1382, 1447), 'os.path.join', 'os.path.join', (['Globals.dataPath', '"""all_metadata_pipelines_best.csv"""'], {}), "(Globals.dataPath, 'all_metadata_pipelines_best.csv')\n", (1394, 1447), False, 'import os\n'), ((4601, 4614), 'numpy.squeeze', 'np.squeeze', (['s'], {}), '(s)\n', (4611, 4614), True, 'import numpy as np\n'), ((3959, 3969), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (3966, 3969), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# /usr/bin/python2
from __future__ import print_function
import argparse
from models import Net2
import numpy as np
from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db
import datetime
import tensorflow as tf
from hparam import hparam as hp
from data_load import Net2DataFlow
from tensorpack.predict.base import OfflinePredictor
from tensorpack.predict.config import PredictConfig
from tensorpack.tfutils.sessinit import SaverRestore
from tensorpack.tfutils.sessinit import ChainInit
from tensorpack.callbacks.base import Callback
# class ConvertCallback(Callback):
# def __init__(self, logdir, test_per_epoch=1):
# self.df = Net2DataFlow(hp.convert.data_path, hp.convert.batch_size)
# self.logdir = logdir
# self.test_per_epoch = test_per_epoch
#
# def _setup_graph(self):
# self.predictor = self.trainer.get_predictor(
# get_eval_input_names(),
# get_eval_output_names())
#
# def _trigger_epoch(self):
# if self.epoch_num % self.test_per_epoch == 0:
# audio, y_audio, _ = convert(self.predictor, self.df)
# # self.trainer.monitors.put_scalar('eval/accuracy', acc)
#
# # Write the result
# # tf.summary.audio('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size)
# # tf.summary.audio('B', audio, hp.default.sr, max_outputs=hp.convert.batch_size)
def convert(pred_spec, y_spec, ppgs):
#pred_spec, y_spec, ppgs = predictor(next(df().get_data()))
# Denormalizatoin
pred_spec = denormalize_db(pred_spec, hp.default.max_db, hp.default.min_db)
y_spec = denormalize_db(y_spec, hp.default.max_db, hp.default.min_db)
# Db to amp
pred_spec = db2amp(pred_spec)
y_spec = db2amp(y_spec)
# Emphasize the magnitude
pred_spec = np.power(pred_spec, hp.convert.emphasis_magnitude)
y_spec = np.power(y_spec, hp.convert.emphasis_magnitude)
# Spectrogram to waveform
audio = np.array(list(map(lambda spec: spec2wav(spec.T, hp.default.n_fft, hp.default.win_length, hp.default.hop_length,
hp.default.n_iter), pred_spec)))
y_audio = np.array(list(map(lambda spec: spec2wav(spec.T, hp.default.n_fft, hp.default.win_length, hp.default.hop_length,
hp.default.n_iter), y_spec)))
# Apply inverse pre-emphasis
audio = inv_preemphasis(audio, coeff=hp.default.preemphasis)
y_audio = inv_preemphasis(y_audio, coeff=hp.default.preemphasis)
# if hp.convert.one_full_wav:
# # Concatenate to a wav
# y_audio = np.reshape(y_audio, (1, y_audio.size), order='C')
# audio = np.reshape(audio, (1, audio.size), order='C')
return audio, y_audio, ppgs
def get_eval_input_names():
return ['x_mfccs', 'y_spec', 'y_mel']
def get_eval_output_names():
return ['pred_spec', 'y_spec', 'ppgs']
def do_convert(args, logdir1, logdir2):
# Load graph
model = Net2()
df = Net2DataFlow(hp.convert.data_path, hp.convert.batch_size)
ckpt1 = tf.train.latest_checkpoint(logdir1)
ckpt2 = '{}/{}'.format(logdir2, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2)
session_inits = []
if ckpt2:
session_inits.append(SaverRestore(ckpt2))
if ckpt1:
session_inits.append(SaverRestore(ckpt1, ignore=['global_step']))
pred_conf = PredictConfig(
model=model,
input_names=get_eval_input_names(),
output_names=get_eval_output_names(),
session_init=ChainInit(session_inits))
predictor = OfflinePredictor(pred_conf)
#audio, y_audio, ppgs = convert(predictor, df)
pr, y_s, pp = next(df().get_data())
pred_spec, y_spec, ppgs = predictor(pr, y_s, pp)
audio, y_audio, ppgs = convert(predictor, df, pred_spec, y_spec, ppgs)
# Write the result
tf.summary.audio('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size)
tf.summary.audio('B', audio, hp.default.sr, max_outputs=hp.convert.batch_size)
# Visualize PPGs
heatmap = np.expand_dims(ppgs, 3) # channel=1
tf.summary.image('PPG', heatmap, max_outputs=ppgs.shape[0])
writer = tf.summary.FileWriter(logdir2)
with tf.Session() as sess:
summ = sess.run(tf.summary.merge_all())
writer.add_summary(summ)
writer.close()
# session_conf = tf.ConfigProto(
# allow_soft_placement=True,
# device_count={'CPU': 1, 'GPU': 0},
# gpu_options=tf.GPUOptions(
# allow_growth=True,
# per_process_gpu_memory_fraction=0.6
# ),
# )
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('case1', type=str, help='experiment case name of train1')
parser.add_argument('case2', type=str, help='experiment case name of train2')
parser.add_argument('-ckpt', help='checkpoint to load model.')
arguments = parser.parse_args()
return arguments
if __name__ == '__main__':
args = get_arguments()
hp.set_hparam_yaml(args.case2)
logdir_train1 = '{}/{}/train1'.format(hp.logdir_path, args.case1)
logdir_train2 = '{}/{}/train2'.format(hp.logdir_path, args.case2)
print('case1: {}, case2: {}, logdir1: {}, logdir2: {}'.format(args.case1, args.case2, logdir_train1, logdir_train2))
s = datetime.datetime.now()
do_convert(args, logdir1=logdir_train1, logdir2=logdir_train2)
e = datetime.datetime.now()
diff = e - s
print("Done. elapsed time:{}s".format(diff.seconds))
| [
"argparse.ArgumentParser",
"tensorflow.summary.audio",
"tensorflow.train.latest_checkpoint",
"audio.spec2wav",
"audio.inv_preemphasis",
"audio.denormalize_db",
"numpy.power",
"data_load.Net2DataFlow",
"tensorflow.summary.FileWriter",
"datetime.datetime.now",
"tensorflow.summary.merge_all",
"te... | [((1581, 1644), 'audio.denormalize_db', 'denormalize_db', (['pred_spec', 'hp.default.max_db', 'hp.default.min_db'], {}), '(pred_spec, hp.default.max_db, hp.default.min_db)\n', (1595, 1644), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((1658, 1718), 'audio.denormalize_db', 'denormalize_db', (['y_spec', 'hp.default.max_db', 'hp.default.min_db'], {}), '(y_spec, hp.default.max_db, hp.default.min_db)\n', (1672, 1718), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((1752, 1769), 'audio.db2amp', 'db2amp', (['pred_spec'], {}), '(pred_spec)\n', (1758, 1769), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((1783, 1797), 'audio.db2amp', 'db2amp', (['y_spec'], {}), '(y_spec)\n', (1789, 1797), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((1845, 1895), 'numpy.power', 'np.power', (['pred_spec', 'hp.convert.emphasis_magnitude'], {}), '(pred_spec, hp.convert.emphasis_magnitude)\n', (1853, 1895), True, 'import numpy as np\n'), ((1909, 1956), 'numpy.power', 'np.power', (['y_spec', 'hp.convert.emphasis_magnitude'], {}), '(y_spec, hp.convert.emphasis_magnitude)\n', (1917, 1956), True, 'import numpy as np\n'), ((2443, 2495), 'audio.inv_preemphasis', 'inv_preemphasis', (['audio'], {'coeff': 'hp.default.preemphasis'}), '(audio, coeff=hp.default.preemphasis)\n', (2458, 2495), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((2510, 2564), 'audio.inv_preemphasis', 'inv_preemphasis', (['y_audio'], {'coeff': 'hp.default.preemphasis'}), '(y_audio, coeff=hp.default.preemphasis)\n', (2525, 2564), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((3017, 3023), 'models.Net2', 'Net2', ([], {}), '()\n', (3021, 3023), False, 'from models import Net2\n'), ((3034, 3091), 'data_load.Net2DataFlow', 'Net2DataFlow', (['hp.convert.data_path', 'hp.convert.batch_size'], {}), '(hp.convert.data_path, hp.convert.batch_size)\n', (3046, 3091), False, 'from data_load import Net2DataFlow\n'), ((3105, 3140), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['logdir1'], {}), '(logdir1)\n', (3131, 3140), True, 'import tensorflow as tf\n'), ((3622, 3649), 'tensorpack.predict.base.OfflinePredictor', 'OfflinePredictor', (['pred_conf'], {}), '(pred_conf)\n', (3638, 3649), False, 'from tensorpack.predict.base import OfflinePredictor\n'), ((3898, 3983), 'tensorflow.summary.audio', 'tf.summary.audio', (['"""A"""', 'y_audio', 'hp.default.sr'], {'max_outputs': 'hp.convert.batch_size'}), "('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size\n )\n", (3914, 3983), True, 'import tensorflow as tf\n'), ((3983, 4061), 'tensorflow.summary.audio', 'tf.summary.audio', (['"""B"""', 'audio', 'hp.default.sr'], {'max_outputs': 'hp.convert.batch_size'}), "('B', audio, hp.default.sr, max_outputs=hp.convert.batch_size)\n", (3999, 4061), True, 'import tensorflow as tf\n'), ((4098, 4121), 'numpy.expand_dims', 'np.expand_dims', (['ppgs', '(3)'], {}), '(ppgs, 3)\n', (4112, 4121), True, 'import numpy as np\n'), ((4139, 4198), 'tensorflow.summary.image', 'tf.summary.image', (['"""PPG"""', 'heatmap'], {'max_outputs': 'ppgs.shape[0]'}), "('PPG', heatmap, max_outputs=ppgs.shape[0])\n", (4155, 4198), True, 'import tensorflow as tf\n'), ((4213, 4243), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir2'], {}), '(logdir2)\n', (4234, 4243), True, 'import tensorflow as tf\n'), ((4668, 4693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4691, 4693), False, 'import argparse\n'), ((5042, 5072), 'hparam.hparam.set_hparam_yaml', 'hp.set_hparam_yaml', (['args.case2'], {}), '(args.case2)\n', (5060, 5072), True, 'from hparam import hparam as hp\n'), ((5344, 5367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5365, 5367), False, 'import datetime\n'), ((5445, 5468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5466, 5468), False, 'import datetime\n'), ((3206, 3241), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['logdir2'], {}), '(logdir2)\n', (3232, 3241), True, 'import tensorflow as tf\n'), ((4253, 4265), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4263, 4265), True, 'import tensorflow as tf\n'), ((3308, 3327), 'tensorpack.tfutils.sessinit.SaverRestore', 'SaverRestore', (['ckpt2'], {}), '(ckpt2)\n', (3320, 3327), False, 'from tensorpack.tfutils.sessinit import SaverRestore\n'), ((3372, 3415), 'tensorpack.tfutils.sessinit.SaverRestore', 'SaverRestore', (['ckpt1'], {'ignore': "['global_step']"}), "(ckpt1, ignore=['global_step'])\n", (3384, 3415), False, 'from tensorpack.tfutils.sessinit import SaverRestore\n'), ((3580, 3604), 'tensorpack.tfutils.sessinit.ChainInit', 'ChainInit', (['session_inits'], {}), '(session_inits)\n', (3589, 3604), False, 'from tensorpack.tfutils.sessinit import ChainInit\n'), ((4299, 4321), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4319, 4321), True, 'import tensorflow as tf\n'), ((2031, 2135), 'audio.spec2wav', 'spec2wav', (['spec.T', 'hp.default.n_fft', 'hp.default.win_length', 'hp.default.hop_length', 'hp.default.n_iter'], {}), '(spec.T, hp.default.n_fft, hp.default.win_length, hp.default.\n hop_length, hp.default.n_iter)\n', (2039, 2135), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n'), ((2237, 2341), 'audio.spec2wav', 'spec2wav', (['spec.T', 'hp.default.n_fft', 'hp.default.win_length', 'hp.default.hop_length', 'hp.default.n_iter'], {}), '(spec.T, hp.default.n_fft, hp.default.win_length, hp.default.\n hop_length, hp.default.n_iter)\n', (2245, 2341), False, 'from audio import spec2wav, inv_preemphasis, db2amp, denormalize_db\n')] |
# Copyright (C) 2021-present CompatibL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import attr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from util.file_util import FileUtil
from util.plot_util import PlotUtil
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
@attr.s(slots=True, auto_attribs=True)
class ShortRateDnn:
"""
Deep neural network for performing regression as a function
of short rate only.
"""
input_file: str = attr.ib(default=None, kw_only=True)
"""
Determines the file from which the data is taken.
The file name format is {caller_name}.{feature}.csv
The plot includes every column for every feature.
"""
learning_rate: float = attr.ib(default=None, kw_only=True)
"""
Learning rate used for the optimizer.
"""
skip_samples: int = attr.ib(default=None, kw_only=True)
"""
Optional number of samples to skip in input_file.
"""
take_samples: int = attr.ib(default=None, kw_only=True)
"""
Optional number of samples to take in input_file,
after skipping skip_samples.
"""
__input_dataset: pd.DataFrame = attr.ib(default=None, kw_only=True)
"""
Complete input dataset (without applying skip_samples and take_samples).
This object is only used for comparison.
"""
__train_dataset: pd.DataFrame = attr.ib(default=None, kw_only=True)
"""
Train dataset for comparison to model results.
"""
__model: keras.Sequential = attr.ib(default=None, kw_only=True)
"""
TF model object.
"""
__short_rate_feature = "short_rate(t)"
"""Regression is performed with respect to this feature."""
__lag_short_rate_feature = "short_rate(t+5y)"
"""Regression is performed to find mean of this feature."""
__use_mathplotlib = True
"""Whether to use mathplotlib plots."""
def train_model(self, *, caller_file: str):
"""
Perform model training on data from input_file.
Pass __file__ variable of the caller script as caller_file
parameter. It will be used as output file prefix.
"""
# Make numpy printouts easier to read
np.set_printoptions(precision=3, suppress=True)
# Set random seed for both Python and TF
# to make the results reproducible
seed = 0
np.random.RandomState(seed)
tf.random.set_seed(seed)
# Input file has the same name as the caller script
# and csv extension, unless specified otherwise.
if self.input_file is None:
input_file = f"{FileUtil.get_caller_name(caller_file=caller_file)}.csv"
else:
input_file = self.input_file
# Read the dataset
self.__input_dataset = pd.read_csv(input_file)
# Skip the specified number of samples
if self.skip_samples is not None:
self.__input_dataset = self.__input_dataset.tail(self.skip_samples)
# Then take the specified number of samples
if self.take_samples is not None:
self.__input_dataset = self.__input_dataset.head(self.take_samples)
# Convert LOCATION column to one-hot encoding to avoid
# model bias due to the currency order in sample.
self.__train_dataset = self.__input_dataset.copy()
# Split features from labels
# Remove target series from train dataset and save it to a separate variable
target_series = self.__train_dataset.pop(self.__lag_short_rate_feature)
# Create a normalizer layer and adapt it to data
short_rate = np.array(self.__train_dataset[self.__short_rate_feature])
normalizer = preprocessing.Normalization(input_shape=[1, ])
normalizer.adapt(short_rate)
# Create DNN model (not yet deep in this example)
self.__model = keras.Sequential([
normalizer,
layers.Dense(64, activation='sigmoid'),
layers.Dense(1)
])
# Compile the model
self.__model.compile(
loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(self.learning_rate)
)
# Print model summary
print(self.__model.summary())
# Perform training and save training history in a variable
# Fit is performed by using validation_split fraction of the data
# to train and the remaining data to minimize.
training_history = self.__model.fit(
self.__train_dataset[self.__short_rate_feature],
target_series,
validation_split=0.5,
verbose=0,
epochs=100)
def run_model(self, *, caller_file: str) -> None:
"""
Run model on the specified data.
Pass __file__ variable of the caller script as caller_file
parameter. It will be used as output file prefix.
"""
short_rate_grid = tf.linspace(-5, 25, 31, True)
test_predictions = self.__model.predict(short_rate_grid).flatten()
# Data only for all countries
all_args = self.__input_dataset[self.__short_rate_feature]
all_values = self.__input_dataset[self.__lag_short_rate_feature]
# Plot where predictions are compared to all of the data
skip_label = f"skip={self.skip_samples}, " if self.skip_samples is not None else ""
take_label = f"skip={self.take_samples}" if self.take_samples is not None else ""
PlotUtil.plot_scatter(x_values=all_args,
y_values=all_values,
scatter_label='data',
line_grid=short_rate_grid,
line_values=test_predictions,
line_label='regression',
title=f"all({skip_label}, {take_label})",
x_lable=self.__short_rate_feature,
y_lable=self.__lag_short_rate_feature)
if self.__use_mathplotlib:
plt.scatter(all_args, all_values, label='data')
plt.plot(short_rate_grid, test_predictions, color='k', label='regression')
plt.xlabel(self.__short_rate_feature)
plt.ylabel(self.__lag_short_rate_feature)
plt.title(f"all({skip_label}, {take_label})")
plt.ylim([-2.5, 15])
plt.xlim([-5, 25])
plt.legend()
plt.show() | [
"tensorflow.random.set_seed",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"attr.s",
"pandas.read_csv",
"numpy.set_printoptions",
"numpy.random.RandomState",
"tensorflow.keras.optimizers.Adam",
"util.file_util.FileUtil.get_caller_name",
"matplotlib.pyplot.show",
"matplotlib.pyplot... | [((946, 983), 'attr.s', 'attr.s', ([], {'slots': '(True)', 'auto_attribs': '(True)'}), '(slots=True, auto_attribs=True)\n', (952, 983), False, 'import attr\n'), ((1131, 1166), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (1138, 1166), False, 'import attr\n'), ((1376, 1411), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (1383, 1411), False, 'import attr\n'), ((1495, 1530), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (1502, 1530), False, 'import attr\n'), ((1626, 1661), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (1633, 1661), False, 'import attr\n'), ((1802, 1837), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (1809, 1837), False, 'import attr\n'), ((2018, 2053), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (2025, 2053), False, 'import attr\n'), ((2154, 2189), 'attr.ib', 'attr.ib', ([], {'default': 'None', 'kw_only': '(True)'}), '(default=None, kw_only=True)\n', (2161, 2189), False, 'import attr\n'), ((2834, 2881), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (2853, 2881), True, 'import numpy as np\n'), ((3000, 3027), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3021, 3027), True, 'import numpy as np\n'), ((3036, 3060), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (3054, 3060), True, 'import tensorflow as tf\n'), ((3413, 3436), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (3424, 3436), True, 'import pandas as pd\n'), ((4246, 4303), 'numpy.array', 'np.array', (['self.__train_dataset[self.__short_rate_feature]'], {}), '(self.__train_dataset[self.__short_rate_feature])\n', (4254, 4303), True, 'import numpy as np\n'), ((4325, 4369), 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'preprocessing.Normalization', ([], {'input_shape': '[1]'}), '(input_shape=[1])\n', (4352, 4369), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), ((5553, 5582), 'tensorflow.linspace', 'tf.linspace', (['(-5)', '(25)', '(31)', '(True)'], {}), '(-5, 25, 31, True)\n', (5564, 5582), True, 'import tensorflow as tf\n'), ((6094, 6388), 'util.plot_util.PlotUtil.plot_scatter', 'PlotUtil.plot_scatter', ([], {'x_values': 'all_args', 'y_values': 'all_values', 'scatter_label': '"""data"""', 'line_grid': 'short_rate_grid', 'line_values': 'test_predictions', 'line_label': '"""regression"""', 'title': 'f"""all({skip_label}, {take_label})"""', 'x_lable': 'self.__short_rate_feature', 'y_lable': 'self.__lag_short_rate_feature'}), "(x_values=all_args, y_values=all_values, scatter_label\n ='data', line_grid=short_rate_grid, line_values=test_predictions,\n line_label='regression', title=f'all({skip_label}, {take_label})',\n x_lable=self.__short_rate_feature, y_lable=self.__lag_short_rate_feature)\n", (6115, 6388), False, 'from util.plot_util import PlotUtil\n'), ((6664, 6711), 'matplotlib.pyplot.scatter', 'plt.scatter', (['all_args', 'all_values'], {'label': '"""data"""'}), "(all_args, all_values, label='data')\n", (6675, 6711), True, 'import matplotlib.pyplot as plt\n'), ((6724, 6798), 'matplotlib.pyplot.plot', 'plt.plot', (['short_rate_grid', 'test_predictions'], {'color': '"""k"""', 'label': '"""regression"""'}), "(short_rate_grid, test_predictions, color='k', label='regression')\n", (6732, 6798), True, 'import matplotlib.pyplot as plt\n'), ((6811, 6848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.__short_rate_feature'], {}), '(self.__short_rate_feature)\n', (6821, 6848), True, 'import matplotlib.pyplot as plt\n'), ((6861, 6902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.__lag_short_rate_feature'], {}), '(self.__lag_short_rate_feature)\n', (6871, 6902), True, 'import matplotlib.pyplot as plt\n'), ((6915, 6960), 'matplotlib.pyplot.title', 'plt.title', (['f"""all({skip_label}, {take_label})"""'], {}), "(f'all({skip_label}, {take_label})')\n", (6924, 6960), True, 'import matplotlib.pyplot as plt\n'), ((6973, 6993), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2.5, 15]'], {}), '([-2.5, 15])\n', (6981, 6993), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7024), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-5, 25]'], {}), '([-5, 25])\n', (7014, 7024), True, 'import matplotlib.pyplot as plt\n'), ((7037, 7049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7047, 7049), True, 'import matplotlib.pyplot as plt\n'), ((7062, 7072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7070, 7072), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4584), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""sigmoid"""'}), "(64, activation='sigmoid')\n", (4558, 4584), False, 'from tensorflow.keras import layers\n'), ((4598, 4613), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4610, 4613), False, 'from tensorflow.keras import layers\n'), ((4745, 4789), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (4769, 4789), True, 'import tensorflow as tf\n'), ((3243, 3292), 'util.file_util.FileUtil.get_caller_name', 'FileUtil.get_caller_name', ([], {'caller_file': 'caller_file'}), '(caller_file=caller_file)\n', (3267, 3292), False, 'from util.file_util import FileUtil\n')] |
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import force_fp32, auto_fp16
from mmcv.cnn import ConvModule, build_upsample_layer
from mmdet.core import bbox_rescale
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.roi_heads.mask_heads.fcn_mask_head import (
_do_paste_mask, BYTES_PER_FLOAT, GPU_MEM_LIMIT)
from pcan.core import cal_similarity
def gen_pos_emb(x, temperature=10000, scale=2 * math.pi, normalize=False):
"""
This is a more standard version of the position embedding, very similar to
the one used by the Attention is all you need paper, generalized to work on
images.
"""
R, C, H, W = x.size()
mask = x.new_ones((R, H, W))
y_embed = mask.cumsum(1, dtype=torch.float32)
x_embed = mask.cumsum(2, dtype=torch.float32)
if normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale
num_pos_feats = C // 2
assert num_pos_feats * 2 == C, (
'The input channel number must be an even number.')
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2).contiguous()
return pos
@HEADS.register_module
class HREMMatchHeadPlus(nn.Module):
"""HR means high-resolution. This version refine the mask projecting to the
1/4 or 1/8 size of the original input, instead of refining in the RoI level.
"""
def __init__(self,
num_feats=3,
num_convs=4,
in_channels=256,
conv_kernel_size=3,
conv_channels=128,
out_channels=8,
num_classes=80,
feat_stride=8,
out_stride=4,
pos_proto_num=4,
neg_proto_num=4,
stage_num=6,
with_mask_key=True,
with_both_feat=False,
with_pos_emb=False,
match_score_thr=0.5,
rect_scale_factor=1.5,
upsample_cfg=dict(type='deconv'),
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='DiceLoss',
loss_weight=1.0)):
super().__init__()
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_feats = num_feats
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_channels = conv_channels
self.out_channels = out_channels
self.feat_stride = feat_stride
self.out_stride = out_stride
self.num_classes = num_classes
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = feat_stride // out_stride
self.pos_proto_num = pos_proto_num
self.neg_proto_num = neg_proto_num
self.stage_num = stage_num
self.with_mask_key = with_mask_key
self.with_both_feat = with_both_feat
self.with_pos_emb = with_pos_emb
self.match_score_thr = match_score_thr
self.rect_scale_factor = rect_scale_factor
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.positioanl_embeddings = None
self.refines = nn.ModuleList()
for i in range(self.num_feats):
in_channels = self.in_channels
padding = (self.conv_kernel_size - 1) // 2
self.refines.append(
ConvModule(
self.in_channels, self.conv_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg))
padding = (self.conv_kernel_size - 1) // 2
self.conv1 = ConvModule(
self.conv_channels, self.out_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, act_cfg=None)
self.conv3 = ConvModule(
3, self.out_channels, self.conv_kernel_size, padding=padding,
conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.conv4 = ConvModule(
self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, act_cfg=None)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
self.out_channels, self.out_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg))
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=self.out_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
self.conv_logits = nn.Conv2d(self.out_channels, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.init_protos(pos_proto_num, 'pos_mu')
self.init_protos(neg_proto_num, 'neg_mu')
def pos_emb(self, x):
if not self.with_pos_emb:
return 0.
if self.positioanl_embeddings is None:
self.positioanl_embeddings = gen_pos_emb(x, normalize=True)
return self.positioanl_embeddings
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
def match(self, key_embeds, ref_embeds, key_pids, ref_pids):
num_imgs = len(key_embeds)
valids, ref_inds = [], []
for i in range(num_imgs):
cos_dist = cal_similarity(
key_embeds[i], ref_embeds[i], method='cosine')
same_pids = key_pids[i][:, None] == ref_pids[i][None, :]
zeros = cos_dist.new_zeros(cos_dist.size())
scores = torch.where(same_pids, cos_dist, zeros)
conf, ref_ind = torch.max(scores, dim=1)
valid = conf > self.match_score_thr
ref_ind = ref_ind[valid]
valids.append(valid)
ref_inds.append(ref_ind)
return valids, ref_inds
def init_protos(self, proto_num, proto_name):
protos = torch.Tensor(1, self.conv_channels, proto_num)
protos.normal_(0, math.sqrt(2. / proto_num))
protos = self._l2norm(protos, dim=1)
self.register_buffer(proto_name, protos)
@auto_fp16()
def forward_feat(self, x):
start_lvl = int(math.log2(self.feat_stride // 4))
end_lvl = min(start_lvl + self.num_feats, len(x))
feats = [
refine(lvl)
for refine, lvl in zip(self.refines, x[start_lvl:end_lvl])]
for i in range(1, len(feats)):
feats[i] = F.interpolate(feats[i], size=feats[0].size()[-2:],
mode='bilinear')
feat = sum(feats)
# for conv in self.convs:
# feat = conv(feat)
return feat
@force_fp32(apply_to=('inp', ))
def _l1norm(self, inp, dim):
return inp / (1e-6 + inp.sum(dim=dim, keepdim=True))
@force_fp32(apply_to=('inp', ))
def _l2norm(self, inp, dim):
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
@force_fp32(apply_to=('feat', 'mask', 'mu'))
@torch.no_grad()
def _em_iter(self, feat, mask, mu):
# n = h * w
_, C = feat.size()[:2]
R, _ = mask.size()[:2]
pos_feat = feat + self.pos_emb(x=feat)
x = pos_feat.view(1, C, -1) # 1 * C * N
y = feat.view(1, C, -1) # 1 * C * N
m = mask.view(R, 1, -1) # R * 1 * N
mu = mu.repeat(R, 1, 1) # R * C * K
for i in range(self.stage_num):
z = torch.einsum('ocn,rck->rnk', (x, mu)) # R * N * K
z = F.softmax(z, dim=2) # R * N * K
z = torch.einsum('rnk,ron->rnk', (z, m)) # R * N * K
z = self._l1norm(z, dim=1) # R * N * K
mu = torch.einsum('ocn,rnk->rck', (x, z)) # R * C * K
mu = self._l2norm(mu, dim=1) # R * C * K
nu = torch.einsum('ocn,rnk->rck', (y, z)) # R * C * K
nu = self._l2norm(nu, dim=1) # R * C * K
return mu, nu
@force_fp32(apply_to=('feat', 'mu'))
def _prop(self, feat, mu):
R = mu.size(0)
_, C, H, W = feat.size()
pos_feat = feat + self.pos_emb(x=feat)
x = pos_feat.view(1, C, -1) # 1 * C * N
z = torch.einsum('rck,ocn->rkn', (mu, x)) # R * K * N
z = F.softmax(z, dim=1) # R * K * N
z = z.view(R, 2, -1, H, W).sum(dim=2) # R * 2 * H * W
return z
def em_match(self, feat_a, mask_a, rect_a, feat_b, mask_b, rect_b):
if not self.with_both_feat:
pos_mask, neg_mask = rect_a * mask_a, rect_a * (1 - mask_a)
pos_mu, pos_nu = self._em_iter(feat_a, pos_mask, self.pos_mu)
neg_mu, neg_nu = self._em_iter(feat_a, neg_mask, self.neg_mu)
else:
feat = torch.cat((feat_a, feat_b), dim=2)
mask = torch.cat((mask_a, mask_b), dim=2)
rect = torch.cat((rect_a, rect_b), dim=2)
pos_mask, neg_mask = rect * mask, rect * (1 - mask)
pos_mu, pos_nu = self._em_iter(feat, pos_mask, self.pos_mu)
neg_mu, neg_nu = self._em_iter(feat, neg_mask, self.neg_mu)
mu = torch.cat((pos_mu, neg_mu), dim=2)
z = self._prop(feat_b, mu)
R = mask_b.size(0)
pos_nu = pos_nu.permute(0, 2, 1).contiguous().view(R, -1, 1, 1)
return pos_nu, z
def compute_context(self, feat, mask, eps=1e-5):
_, C = feat.size()[:2]
R, _ = mask.size()[:2]
fore_feat = (feat * mask).view(R, C, -1).sum(dim=2)
fore_sum = mask.view(R, 1, -1).sum(dim=2)
fore_feat = fore_feat / (fore_sum + eps)
fore_feat = fore_feat.view(R, C, 1, 1)
return fore_feat
def gather_context(self, feat, mask, gap, z, pos_mu):
mask = torch.cat((mask, z), dim=1)
res = self.conv1(feat) + self.conv2(gap) + self.conv3(mask)
res = res + F.conv2d(pos_mu, self.conv4.conv.weight,
self.conv4.conv.bias, groups=self.pos_proto_num)
res = F.relu(res)
return res
@auto_fp16()
def forward(self, x_a, mask_a, rect_a, x_b, mask_b, rect_b):
assert len(mask_a) == len(mask_b) == x_a[0].size(0) == x_b[0].size(0)
feat_a = self.forward_feat(x_a)
feat_b = self.forward_feat(x_b)
B, C, H, W = feat_a.size()
feat_a = torch.chunk(feat_a, B, dim=0)
feat_b = torch.chunk(feat_b, B, dim=0)
xs = []
for i in range(B):
if len(mask_a[i]) == 0:
continue
m_a = mask_a[i].clone()
m_b = mask_b[i].clone()
mask_a[i] = mask_a[i].sigmoid()
mask_b[i] = mask_b[i].sigmoid()
# pos_mu: [R, K * C, 1, 1]
# pos_z: [R, 1, H, W]
pos_mu, z = self.em_match(feat_a[i], mask_a[i], rect_a[i],
feat_b[i], mask_b[i], rect_b[i])
# pos_feat: [R, C, 1, 1]
gap = self.compute_context(feat_a[i], mask_a[i])
# x: [R, C, H, W]
mask = m_b if self.with_mask_key else m_a
x = self.gather_context(feat_b[i], mask, gap, z, pos_mu)
xs.append(x)
x = torch.cat(xs, dim=0)
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, valids, gt_masks):
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds[valid]
for res, valid in zip(sampling_results, valids)]
mask_targets = map(self.get_target_single, pos_assigned_gt_inds,
gt_masks)
mask_targets = list(mask_targets)
if len(mask_targets) > 0:
mask_targets = torch.cat(mask_targets)
return mask_targets
def get_target_single(self, pos_assigned_gt_inds, gt_masks):
device = pos_assigned_gt_inds.device
num_pos = pos_assigned_gt_inds.size(0)
if num_pos > 0:
mask_targets = torch.from_numpy(gt_masks.to_ndarray()).float()
start = self.out_stride // 2
stride = self.out_stride
mask_targets = mask_targets[:, start::stride, start::stride]
mask_targets = mask_targets.to(device)[pos_assigned_gt_inds]
else:
mask_targets = pos_assigned_gt_inds.new_zeros((
0, gt_masks.height // self.out_stride,
gt_masks.width // self.out_stride))
return mask_targets
def get_seg_masks(self, mask_pred, det_labels, rcnn_test_cfg, ori_shape,
scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_labels.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
segms = []
labels = det_labels
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = det_labels.new_tensor(scale_factor)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
out_h, out_w = mask_pred.size()[-2:]
out_h = out_h * self.out_stride
out_w = out_w * self.out_stride
im_mask = torch.zeros(
N,
out_h,
out_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
for inds in chunks:
masks_chunk = _do_paste_mask_hr(
mask_pred[inds],
out_h,
out_w,
offset=0.)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[inds] = masks_chunk
for i in range(N):
segm = im_mask[i, :img_h, :img_w].cpu().numpy()
cls_segms[labels[i]].append(segm)
segms.append(segm)
return cls_segms, segms
def get_hr_masks(self, feat, mask_pred, det_bboxes, det_labels,
scale_factor):
"""Get high-resolution masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
feat_shape (Tensor): shape (3, )
Returns:
list[list]: encoded masks
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
bboxes = det_bboxes[:, :4]
labels = det_labels
level = int(math.log2(self.feat_stride // 4))
mask_h, mask_w = feat[level].size()[-2:]
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor / self.feat_stride
rects = bbox_rescale(bboxes, self.rect_scale_factor)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * mask_h * mask_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
im_mask = torch.zeros(
N,
1,
mask_h,
mask_w,
device=device,
dtype=torch.float32)
im_rect = torch.zeros(
N, 1, mask_h, mask_w, device=device, dtype=torch.float32)
mask_pred = mask_pred[range(N), labels][:, None]
rect_pred = mask_pred.new_ones(mask_pred.size())
if N == 0:
return im_mask, im_rect
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
mask_h,
mask_w,
skip_empty=device.type == 'cpu')
im_mask[(inds, 0) + spatial_inds] = masks_chunk
rects_chunk, spatial_inds = _do_paste_mask(
rect_pred[inds], rects[inds], mask_h, mask_w, skip_empty=False)
im_rect[(inds, 0) + spatial_inds] = rects_chunk
return im_mask, im_rect
def _do_paste_mask_hr(masks, img_h, img_w, offset):
"""Paste instance masks acoording to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
The whole image will be pasted. It will return a mask of shape
(N, img_h, img_w) and an empty tuple.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
N = masks.shape[0]
img_y = torch.arange(
y0_int, y1_int, device=device, dtype=torch.float32) + offset
img_x = torch.arange(
x0_int, x1_int, device=device, dtype=torch.float32) + offset
img_y = img_y / img_h * 2 - 1
img_x = img_x / img_w * 2 - 1
img_y = img_y.unsqueeze(dim=0).repeat(N, 1)
img_x = img_x.unsqueeze(dim=0).repeat(N, 1)
# img_x, img_y have shapes (N, w), (N, h)
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
return img_masks[:, 0] | [
"pcan.core.cal_similarity",
"mmdet.core.bbox_rescale",
"torch.cat",
"mmcv.cnn.build_upsample_layer",
"torch.nn.init.constant_",
"torch.arange",
"mmdet.models.roi_heads.mask_heads.fcn_mask_head._do_paste_mask",
"torch.no_grad",
"numpy.round",
"mmcv.cnn.ConvModule",
"torch.nn.init.kaiming_normal_"... | [((1161, 1226), 'torch.arange', 'torch.arange', (['num_pos_feats'], {'dtype': 'torch.float32', 'device': 'x.device'}), '(num_pos_feats, dtype=torch.float32, device=x.device)\n', (1173, 1226), False, 'import torch\n'), ((8406, 8417), 'mmcv.runner.auto_fp16', 'auto_fp16', ([], {}), '()\n', (8415, 8417), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((8964, 8993), 'mmcv.runner.force_fp32', 'force_fp32', ([], {'apply_to': "('inp',)"}), "(apply_to=('inp',))\n", (8974, 8993), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((9095, 9124), 'mmcv.runner.force_fp32', 'force_fp32', ([], {'apply_to': "('inp',)"}), "(apply_to=('inp',))\n", (9105, 9124), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((9227, 9270), 'mmcv.runner.force_fp32', 'force_fp32', ([], {'apply_to': "('feat', 'mask', 'mu')"}), "(apply_to=('feat', 'mask', 'mu'))\n", (9237, 9270), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((9276, 9291), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9289, 9291), False, 'import torch\n'), ((10442, 10477), 'mmcv.runner.force_fp32', 'force_fp32', ([], {'apply_to': "('feat', 'mu')"}), "(apply_to=('feat', 'mu'))\n", (10452, 10477), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((12566, 12577), 'mmcv.runner.auto_fp16', 'auto_fp16', ([], {}), '()\n', (12575, 12577), False, 'from mmcv.runner import force_fp32, auto_fp16\n'), ((22710, 22738), 'torch.stack', 'torch.stack', (['[gx, gy]'], {'dim': '(3)'}), '([gx, gy], dim=3)\n', (22721, 22738), False, 'import torch\n'), ((4127, 4148), 'mmdet.models.builder.build_loss', 'build_loss', (['loss_mask'], {}), '(loss_mask)\n', (4137, 4148), False, 'from mmdet.models.builder import HEADS, build_loss\n'), ((4215, 4230), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4228, 4230), True, 'import torch.nn as nn\n'), ((4660, 4805), 'mmcv.cnn.ConvModule', 'ConvModule', (['self.conv_channels', 'self.out_channels', 'self.conv_kernel_size'], {'padding': 'padding', 'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg', 'act_cfg': 'None'}), '(self.conv_channels, self.out_channels, self.conv_kernel_size,\n padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)\n', (4670, 4805), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((4848, 4956), 'mmcv.cnn.ConvModule', 'ConvModule', (['self.conv_channels', 'self.out_channels', '(1)'], {'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg', 'act_cfg': 'None'}), '(self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,\n norm_cfg=norm_cfg, act_cfg=None)\n', (4858, 4956), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((4999, 5127), 'mmcv.cnn.ConvModule', 'ConvModule', (['(3)', 'self.out_channels', 'self.conv_kernel_size'], {'padding': 'padding', 'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg', 'act_cfg': 'None'}), '(3, self.out_channels, self.conv_kernel_size, padding=padding,\n conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)\n', (5009, 5127), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((5170, 5278), 'mmcv.cnn.ConvModule', 'ConvModule', (['self.conv_channels', 'self.out_channels', '(1)'], {'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg', 'act_cfg': 'None'}), '(self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,\n norm_cfg=norm_cfg, act_cfg=None)\n', (5180, 5278), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((5322, 5337), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5335, 5337), True, 'import torch.nn as nn\n'), ((6743, 6777), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.out_channels', '(1)', '(1)'], {}), '(self.out_channels, 1, 1)\n', (6752, 6777), True, 'import torch.nn as nn\n'), ((6798, 6819), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6805, 6819), True, 'import torch.nn as nn\n'), ((8206, 8252), 'torch.Tensor', 'torch.Tensor', (['(1)', 'self.conv_channels', 'proto_num'], {}), '(1, self.conv_channels, proto_num)\n', (8218, 8252), False, 'import torch\n'), ((10275, 10311), 'torch.einsum', 'torch.einsum', (['"""ocn,rnk->rck"""', '(y, z)'], {}), "('ocn,rnk->rck', (y, z))\n", (10287, 10311), False, 'import torch\n'), ((10700, 10737), 'torch.einsum', 'torch.einsum', (['"""rck,ocn->rkn"""', '(mu, x)'], {}), "('rck,ocn->rkn', (mu, x))\n", (10712, 10737), False, 'import torch\n'), ((10776, 10795), 'torch.nn.functional.softmax', 'F.softmax', (['z'], {'dim': '(1)'}), '(z, dim=1)\n', (10785, 10795), True, 'import torch.nn.functional as F\n'), ((11663, 11697), 'torch.cat', 'torch.cat', (['(pos_mu, neg_mu)'], {'dim': '(2)'}), '((pos_mu, neg_mu), dim=2)\n', (11672, 11697), False, 'import torch\n'), ((12280, 12307), 'torch.cat', 'torch.cat', (['(mask, z)'], {'dim': '(1)'}), '((mask, z), dim=1)\n', (12289, 12307), False, 'import torch\n'), ((12529, 12540), 'torch.nn.functional.relu', 'F.relu', (['res'], {}), '(res)\n', (12535, 12540), True, 'import torch.nn.functional as F\n'), ((12855, 12884), 'torch.chunk', 'torch.chunk', (['feat_a', 'B'], {'dim': '(0)'}), '(feat_a, B, dim=0)\n', (12866, 12884), False, 'import torch\n'), ((12902, 12931), 'torch.chunk', 'torch.chunk', (['feat_b', 'B'], {'dim': '(0)'}), '(feat_b, B, dim=0)\n', (12913, 12931), False, 'import torch\n'), ((13701, 13721), 'torch.cat', 'torch.cat', (['xs'], {'dim': '(0)'}), '(xs, dim=0)\n', (13710, 13721), False, 'import torch\n'), ((17203, 17303), 'torch.zeros', 'torch.zeros', (['N', 'out_h', 'out_w'], {'device': 'device', 'dtype': '(torch.bool if threshold >= 0 else torch.uint8)'}), '(N, out_h, out_w, device=device, dtype=torch.bool if threshold >=\n 0 else torch.uint8)\n', (17214, 17303), False, 'import torch\n'), ((19224, 19268), 'mmdet.core.bbox_rescale', 'bbox_rescale', (['bboxes', 'self.rect_scale_factor'], {}), '(bboxes, self.rect_scale_factor)\n', (19236, 19268), False, 'from mmdet.core import bbox_rescale\n'), ((19991, 20060), 'torch.zeros', 'torch.zeros', (['N', '(1)', 'mask_h', 'mask_w'], {'device': 'device', 'dtype': 'torch.float32'}), '(N, 1, mask_h, mask_w, device=device, dtype=torch.float32)\n', (20002, 20060), False, 'import torch\n'), ((20152, 20221), 'torch.zeros', 'torch.zeros', (['N', '(1)', 'mask_h', 'mask_w'], {'device': 'device', 'dtype': 'torch.float32'}), '(N, 1, mask_h, mask_w, device=device, dtype=torch.float32)\n', (20163, 20221), False, 'import torch\n'), ((21968, 22032), 'torch.arange', 'torch.arange', (['y0_int', 'y1_int'], {'device': 'device', 'dtype': 'torch.float32'}), '(y0_int, y1_int, device=device, dtype=torch.float32)\n', (21980, 22032), False, 'import torch\n'), ((22063, 22127), 'torch.arange', 'torch.arange', (['x0_int', 'x1_int'], {'device': 'device', 'dtype': 'torch.float32'}), '(x0_int, x1_int, device=device, dtype=torch.float32)\n', (22075, 22127), False, 'import torch\n'), ((7309, 7379), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (7332, 7379), True, 'import torch.nn as nn\n'), ((7409, 7437), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (7426, 7437), True, 'import torch.nn as nn\n'), ((7631, 7692), 'pcan.core.cal_similarity', 'cal_similarity', (['key_embeds[i]', 'ref_embeds[i]'], {'method': '"""cosine"""'}), "(key_embeds[i], ref_embeds[i], method='cosine')\n", (7645, 7692), False, 'from pcan.core import cal_similarity\n'), ((7856, 7895), 'torch.where', 'torch.where', (['same_pids', 'cos_dist', 'zeros'], {}), '(same_pids, cos_dist, zeros)\n', (7867, 7895), False, 'import torch\n'), ((7925, 7949), 'torch.max', 'torch.max', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (7934, 7949), False, 'import torch\n'), ((8279, 8305), 'math.sqrt', 'math.sqrt', (['(2.0 / proto_num)'], {}), '(2.0 / proto_num)\n', (8288, 8305), False, 'import math\n'), ((8473, 8505), 'math.log2', 'math.log2', (['(self.feat_stride // 4)'], {}), '(self.feat_stride // 4)\n', (8482, 8505), False, 'import math\n'), ((9822, 9859), 'torch.einsum', 'torch.einsum', (['"""ocn,rck->rnk"""', '(x, mu)'], {}), "('ocn,rck->rnk', (x, mu))\n", (9834, 9859), False, 'import torch\n'), ((9898, 9917), 'torch.nn.functional.softmax', 'F.softmax', (['z'], {'dim': '(2)'}), '(z, dim=2)\n', (9907, 9917), True, 'import torch.nn.functional as F\n'), ((9974, 10010), 'torch.einsum', 'torch.einsum', (['"""rnk,ron->rnk"""', '(z, m)'], {}), "('rnk,ron->rnk', (z, m))\n", (9986, 10010), False, 'import torch\n'), ((10127, 10163), 'torch.einsum', 'torch.einsum', (['"""ocn,rnk->rck"""', '(x, z)'], {}), "('ocn,rnk->rck', (x, z))\n", (10139, 10163), False, 'import torch\n'), ((11299, 11333), 'torch.cat', 'torch.cat', (['(feat_a, feat_b)'], {'dim': '(2)'}), '((feat_a, feat_b), dim=2)\n', (11308, 11333), False, 'import torch\n'), ((11353, 11387), 'torch.cat', 'torch.cat', (['(mask_a, mask_b)'], {'dim': '(2)'}), '((mask_a, mask_b), dim=2)\n', (11362, 11387), False, 'import torch\n'), ((11407, 11441), 'torch.cat', 'torch.cat', (['(rect_a, rect_b)'], {'dim': '(2)'}), '((rect_a, rect_b), dim=2)\n', (11416, 11441), False, 'import torch\n'), ((12396, 12490), 'torch.nn.functional.conv2d', 'F.conv2d', (['pos_mu', 'self.conv4.conv.weight', 'self.conv4.conv.bias'], {'groups': 'self.pos_proto_num'}), '(pos_mu, self.conv4.conv.weight, self.conv4.conv.bias, groups=self.\n pos_proto_num)\n', (12404, 12490), True, 'import torch.nn.functional as F\n'), ((14411, 14434), 'torch.cat', 'torch.cat', (['mask_targets'], {}), '(mask_targets)\n', (14420, 14434), False, 'import torch\n'), ((16768, 16828), 'numpy.ceil', 'np.ceil', (['(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)'], {}), '(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)\n', (16775, 16828), True, 'import numpy as np\n'), ((16965, 16995), 'torch.arange', 'torch.arange', (['N'], {'device': 'device'}), '(N, device=device)\n', (16977, 16995), False, 'import torch\n'), ((18943, 18975), 'math.log2', 'math.log2', (['(self.feat_stride // 4)'], {}), '(self.feat_stride // 4)\n', (18952, 18975), False, 'import math\n'), ((20436, 20466), 'torch.arange', 'torch.arange', (['N'], {'device': 'device'}), '(N, device=device)\n', (20448, 20466), False, 'import torch\n'), ((20548, 20647), 'mmdet.models.roi_heads.mask_heads.fcn_mask_head._do_paste_mask', '_do_paste_mask', (['mask_pred[inds]', 'bboxes[inds]', 'mask_h', 'mask_w'], {'skip_empty': "(device.type == 'cpu')"}), "(mask_pred[inds], bboxes[inds], mask_h, mask_w, skip_empty=\n device.type == 'cpu')\n", (20562, 20647), False, 'from mmdet.models.roi_heads.mask_heads.fcn_mask_head import _do_paste_mask, BYTES_PER_FLOAT, GPU_MEM_LIMIT\n'), ((20826, 20904), 'mmdet.models.roi_heads.mask_heads.fcn_mask_head._do_paste_mask', '_do_paste_mask', (['rect_pred[inds]', 'rects[inds]', 'mask_h', 'mask_w'], {'skip_empty': '(False)'}), '(rect_pred[inds], rects[inds], mask_h, mask_w, skip_empty=False)\n', (20840, 20904), False, 'from mmdet.models.roi_heads.mask_heads.fcn_mask_head import _do_paste_mask, BYTES_PER_FLOAT, GPU_MEM_LIMIT\n'), ((22363, 22381), 'torch.isinf', 'torch.isinf', (['img_x'], {}), '(img_x)\n', (22374, 22381), False, 'import torch\n'), ((22416, 22434), 'torch.isinf', 'torch.isinf', (['img_x'], {}), '(img_x)\n', (22427, 22434), False, 'import torch\n'), ((22467, 22485), 'torch.isinf', 'torch.isinf', (['img_y'], {}), '(img_y)\n', (22478, 22485), False, 'import torch\n'), ((22520, 22538), 'torch.isinf', 'torch.isinf', (['img_y'], {}), '(img_y)\n', (22531, 22538), False, 'import torch\n'), ((4418, 4548), 'mmcv.cnn.ConvModule', 'ConvModule', (['self.in_channels', 'self.conv_channels', 'self.conv_kernel_size'], {'padding': 'padding', 'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg'}), '(self.in_channels, self.conv_channels, self.conv_kernel_size,\n padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)\n', (4428, 4548), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((5480, 5610), 'mmcv.cnn.ConvModule', 'ConvModule', (['self.out_channels', 'self.out_channels', 'self.conv_kernel_size'], {'padding': 'padding', 'conv_cfg': 'conv_cfg', 'norm_cfg': 'norm_cfg'}), '(self.out_channels, self.out_channels, self.conv_kernel_size,\n padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)\n', (5490, 5610), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((6066, 6101), 'mmcv.cnn.build_upsample_layer', 'build_upsample_layer', (['upsample_cfg_'], {}), '(upsample_cfg_)\n', (6086, 6101), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((19794, 19856), 'numpy.ceil', 'np.ceil', (['(N * mask_h * mask_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)'], {}), '(N * mask_h * mask_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)\n', (19801, 19856), True, 'import numpy as np\n'), ((1636, 1668), 'torch.cat', 'torch.cat', (['(pos_y, pos_x)'], {'dim': '(3)'}), '((pos_y, pos_x), dim=3)\n', (1645, 1668), False, 'import torch\n'), ((6287, 6322), 'mmcv.cnn.build_upsample_layer', 'build_upsample_layer', (['upsample_cfg_'], {}), '(upsample_cfg_)\n', (6307, 6322), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((6679, 6714), 'mmcv.cnn.build_upsample_layer', 'build_upsample_layer', (['upsample_cfg_'], {}), '(upsample_cfg_)\n', (6699, 6714), False, 'from mmcv.cnn import ConvModule, build_upsample_layer\n'), ((16307, 16344), 'numpy.round', 'np.round', (['(ori_shape[0] * scale_factor)'], {}), '(ori_shape[0] * scale_factor)\n', (16315, 16344), True, 'import numpy as np\n'), ((16382, 16419), 'numpy.round', 'np.round', (['(ori_shape[1] * scale_factor)'], {}), '(ori_shape[1] * scale_factor)\n', (16390, 16419), True, 'import numpy as np\n')] |
#!/usr/bin.env python
# coding: utf-8
import numpy as np
import scipy.io as sio
from pyhsiclasso import HSICLasso
def main():
# Numpy array input example
hsic_lasso = HSICLasso()
data = sio.loadmat("../tests/test_data/matlab_data.mat")
X = data["X"].transpose()
Y = data["Y"][0]
featname = ["Feat%d" % x for x in range(1, X.shape[1] + 1)]
# Get rid of the effect of feature 100 and 300
covars_index = np.array([99, 299])
hsic_lasso.input(X, Y, featname=featname)
hsic_lasso.regression(5, covars=X[:, covars_index], covars_kernel="Gaussian")
hsic_lasso.dump()
hsic_lasso.plot_path()
# Save parameters
hsic_lasso.save_param()
if __name__ == "__main__":
main()
| [
"pyhsiclasso.HSICLasso",
"numpy.array",
"scipy.io.loadmat"
] | [((182, 193), 'pyhsiclasso.HSICLasso', 'HSICLasso', ([], {}), '()\n', (191, 193), False, 'from pyhsiclasso import HSICLasso\n'), ((205, 254), 'scipy.io.loadmat', 'sio.loadmat', (['"""../tests/test_data/matlab_data.mat"""'], {}), "('../tests/test_data/matlab_data.mat')\n", (216, 254), True, 'import scipy.io as sio\n'), ((441, 460), 'numpy.array', 'np.array', (['[99, 299]'], {}), '([99, 299])\n', (449, 460), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import ambit
import sys, traceback
import numpy as np
from pathlib import Path
import results_check
def main():
basepath = str(Path(__file__).parent.absolute())
IO_PARAMS = {'problem_type' : 'flow0d', # solid, fluid, flow0d, solid_flow0d, fluid_flow0d
'write_results_every' : -999,
'output_path' : ''+basepath+'/tmp',
'simname' : 'test'}
SOLVER_PARAMS = {'tol_res' : 1.0e-8,
'tol_inc' : 1.0e-8}
TIME_PARAMS = {'maxtime' : 10*1.0,
'numstep' : 10*100,
'timint' : 'ost', # ost
'theta_ost' : 0.5,
'initial_conditions' : init(),
'eps_periodic' : 0.03,
'periodic_checktype' : 'pQvar'}
MODEL_PARAMS = {'modeltype' : 'syspulcap',
'parameters' : param(),
'chamber_models' : {'lv' : {'type' : '0D_elast', 'activation_curve' : 2}, 'rv' : {'type' : '0D_elast', 'activation_curve' : 2}, 'la' : {'type' : '0D_elast', 'activation_curve' : 1}, 'ra' : {'type' : '0D_elast', 'activation_curve' : 1}}}
# define your time curves here (syntax: tcX refers to curve X)
class time_curves():
def tc1(self, t): # atrial activation
act_dur = 2.*param()['t_ed']
t0 = 0.
if t >= t0 and t <= t0 + act_dur:
return 0.5*(1.-np.cos(2.*np.pi*(t-t0)/act_dur))
else:
return 0.0
def tc2(self, t): # ventricular activation
act_dur = 1.8*(param()['t_es'] - param()['t_ed'])
t0 = param()['t_ed']
if t >= t0 and t <= t0 + act_dur:
return 0.5*(1.-np.cos(2.*np.pi*(t-t0)/act_dur))
else:
return 0.0
# problem setup
problem = ambit.Ambit(IO_PARAMS, TIME_PARAMS, SOLVER_PARAMS, constitutive_params=MODEL_PARAMS, time_curves=time_curves())
# solve time-dependent problem
problem.solve_problem()
# --- results check
tol = 1.0e-6
s_corr = np.zeros(problem.mp.cardvasc0D.numdof)
# correct results
s_corr[0] = 2.9433925704892139E+04
s_corr[1] = 7.0386145039016035E-01
s_corr[2] = 4.2678351841238388E-01
s_corr[3] = 6.7442752468526668E-01
s_corr[4] = 7.8447337550894032E+00
s_corr[5] = 4.4646238616069248E+04
s_corr[6] = 7.8447338419731594E+00
s_corr[7] = 4.6065602353069931E+04
s_corr[8] = 7.5426971045033193E+00
s_corr[9] = -1.1478896183043669E+04
s_corr[10] = -1.0718235021322913E+04
s_corr[11] = -8.4007375425592309E+03
s_corr[12] = -5.7419302216561446E+03
s_corr[13] = -1.9132871898718197E+03
s_corr[14] = 2.1087309382923807E+00
s_corr[15] = 1.9898868714375822E+04
s_corr[16] = 2.0751858496852389E+00
s_corr[17] = 1.7186287111159349E+04
s_corr[18] = 2.0732041530378402E+00
s_corr[19] = 1.3479807613911051E+04
s_corr[20] = 2.0782223465117489E+00
s_corr[21] = 9.2042337849724045E+03
s_corr[22] = 2.0766706092420457E+00
s_corr[23] = 3.0595543607658747E+03
s_corr[24] = 1.7839814008737576E+00
s_corr[25] = -4.6650425953686863E+04
s_corr[26] = 3.4387976270712024E+04
s_corr[27] = 5.3317892475132389E-01
s_corr[28] = 8.9862422931251962E-02
s_corr[29] = 4.9879094848061262E-01
s_corr[30] = 1.8877949978218918E+00
s_corr[31] = 1.2785936777320912E+04
s_corr[32] = 1.7919004719919820E+00
s_corr[33] = -8.4133350626452477E+04
s_corr[34] = 1.6151509582279193E+00
s_corr[35] = -5.9085665923784400E+03
check1 = results_check.results_check_vec(problem.mp.s, s_corr, problem.mp.comm, tol=tol)
success = results_check.success_check([check1], problem.mp.comm)
return success
def init():
return {'q_vin_l_0' : 2.9122879355134799E+04,
'p_at_l_0' : 6.8885657594702698E-01,
'q_vout_l_0' : 4.4126414250284074E-01,
'p_v_l_0' : 6.5973369659189085E-01,
'p_ar_sys_0' : 7.6852336907652035E+00,
'q_ar_sys_0' : 4.4646253096693101E+04,
'p_arperi_sys_0' : 7.3924415864114579E+00,
'q_arspl_sys_0' : -1.1942736655945399E+04,
'q_arespl_sys_0' : -1.1129301639510835E+04,
'q_armsc_sys_0' : -8.7229603630348920E+03,
'q_arcer_sys_0' : -5.9623606948858287E+03,
'q_arcor_sys_0' : -1.9864253416510032E+03,
'p_venspl_sys_0' : 2.1337514581004355E+00,
'q_venspl_sys_0' : 2.0406124240978173E+04,
'p_venespl_sys_0' : 2.0900015313258282E+00,
'q_venespl_sys_0' : 1.7072593297814688E+04,
'p_venmsc_sys_0' : 2.0879628079853534E+00,
'q_venmsc_sys_0' : 1.3387364723046561E+04,
'p_vencer_sys_0' : 2.0933161349988683E+00,
'q_vencer_sys_0' : 9.1526721881635949E+03,
'p_vencor_sys_0' : 2.0910022623881237E+00,
'q_vencor_sys_0' : 3.0343572493359602E+03,
'p_ven_sys_0' : 1.8007235104876642E+00,
'q_ven_sys_0' : -4.5989218100751634E+04,
'q_vin_r_0' : 3.4747706215569546E+04,
'p_at_r_0' : 5.3722584358891634E-01,
'q_vout_r_0' : 9.2788006831497391E-02,
'p_v_r_0' : 5.0247813737334734E-01,
'p_ar_pul_0' : 1.8622263176170106E+00,
'q_ar_pul_0' : 1.2706171472263239E+04,
'p_cap_pul_0' : 1.7669300315750378E+00,
'q_cap_pul_0' : -8.4296230468394206E+04,
'p_ven_pul_0' : 1.5914021166255159E+00,
'q_ven_pul_0' : -6.4914977363299859E+03}
def param():
# parameters in kg-mm-s unit system
R_ar_sys = 120.0e-6
tau_ar_sys = 1.0311433159
tau_ar_pul = 0.3
# Diss Hirschvogel tab. 2.7
C_ar_sys = tau_ar_sys/R_ar_sys
Z_ar_sys = R_ar_sys/20.
R_ven_sys = R_ar_sys/5.
C_ven_sys = 30.*C_ar_sys
R_ar_pul = R_ar_sys/8.
C_ar_pul = tau_ar_pul/R_ar_pul
R_ven_pul = R_ar_pul
C_ven_pul = 2.5*C_ar_pul
L_ar_sys = 0.667e-6
L_ven_sys = 0.
L_ar_pul = 0.
L_ven_pul = 0.
# timings
t_ed = 0.2
t_es = 0.53
T_cycl = 1.0
# atrial elastances
E_at_max_l = 2.9e-5
E_at_min_l = 9.0e-6
E_at_max_r = 1.8e-5
E_at_min_r = 8.0e-6
# ventricular elastances
E_v_max_l = 30.0e-5
E_v_min_l = 12.0e-6
E_v_max_r = 20.0e-5
E_v_min_r = 10.0e-6
## systemic arterial
# now we have to separate the resistance into a proximal and a peripheral part
frac_Rprox_Rtotal = 0.06 # Ursino et al. factor: 0.06 - OK
R_arperi_sys = (1.-frac_Rprox_Rtotal)*R_ar_sys
R_ar_sys *= frac_Rprox_Rtotal # now R_ar_sys(prox)
frac_Cprox_Ctotal = 0.95#0.07 # Ursino et al. factor: 0.07 - XXXX too small???!!!!! - keep in mind that most compliance lies in the aorta / proximal!
C_arperi_sys = (1.-frac_Cprox_Ctotal)*C_ar_sys
C_ar_sys *= frac_Cprox_Ctotal # now C_ar_sys(prox)
# R in parallel:
# R_arperi_sys = (1/R_arspl_sys + 1/R_arespl_sys + 1/R_armsc_sys + 1/R_arcer_sys + 1/R_arcor_sys)^(-1)
R_arspl_sys = 3.35 * R_arperi_sys # Ursino et al. factor: 3.35 - OK
R_arespl_sys = 3.56 * R_arperi_sys # Ursino et al. factor: 3.56 - OK
R_armsc_sys = 4.54 * R_arperi_sys # Ursino et al. factor: 4.54 - OK
R_arcer_sys = 6.65 * R_arperi_sys # Ursino et al. factor: 6.65 - OK
R_arcor_sys = 19.95 * R_arperi_sys # Ursino et al. factor: 19.95 - OK
# C in parallel (fractions have to sum to 1):
# C_arperi_sys = C_arspl_sys + C_arespl_sys + C_armsc_sys + C_arcer_sys + C_arcor_sys
C_arspl_sys = 0.55 * C_arperi_sys # Ursino et al. factor: 0.55 - OK
C_arespl_sys = 0.18 * C_arperi_sys # Ursino et al. factor: 0.18 - OK
C_armsc_sys = 0.14 * C_arperi_sys # Ursino et al. factor: 0.14 - OK
C_arcer_sys = 0.11 * C_arperi_sys # Ursino et al. factor: 0.11 - OK
C_arcor_sys = 0.03 * C_arperi_sys # Ursino et al. factor: 0.03 - OK
## systemic venous
frac_Rprox_Rtotal = 0.8 # no Ursino et al. factor since they do not have that extra compartment!
R_venperi_sys = (1.-frac_Rprox_Rtotal) * R_ven_sys
R_ven_sys *= frac_Rprox_Rtotal # now R_ven_sys(prox)
frac_Cprox_Ctotal = 0.2 # no Ursino et al. factor since they do not have that extra compartment!
C_venperi_sys = (1.-frac_Cprox_Ctotal)*C_ven_sys
C_ven_sys *= frac_Cprox_Ctotal # now C_ven_sys(prox)
# R in parallel:
# R_venperi_sys = (1/R_venspl_sys + 1/R_venespl_sys + 1/R_venmsc_sys + 1/R_vencer_sys + 1/R_vencor_sys)^(-1)
R_venspl_sys = 3.4 * R_venperi_sys # Ursino et al. factor: 3.4 - OK
R_venespl_sys = 3.53 * R_venperi_sys # Ursino et al. factor: 3.53 - OK
R_venmsc_sys = 4.47 * R_venperi_sys # Ursino et al. factor: 4.47 - OK
R_vencer_sys = 6.66 * R_venperi_sys # Ursino et al. factor: 6.66 - OK
R_vencor_sys = 19.93 * R_venperi_sys # Ursino et al. factor: 19.93 - OK
# C in parallel (fractions have to sum to 1):
# C_venperi_sys = C_venspl_sys + C_venespl_sys + C_venmsc_sys + C_vencer_sys + C_vencor_sys
C_venspl_sys = 0.55 * C_venperi_sys # Ursino et al. factor: 0.55 - OK
C_venespl_sys = 0.18 * C_venperi_sys # Ursino et al. factor: 0.18 - OK
C_venmsc_sys = 0.14 * C_venperi_sys # Ursino et al. factor: 0.14 - OK
C_vencer_sys = 0.1 * C_venperi_sys # Ursino et al. factor: 0.1 - OK
C_vencor_sys = 0.03 * C_venperi_sys # Ursino et al. factor: 0.03 - OK
## pulmonary arterial
frac_Rprox_Rtotal = 0.5#0.72 # Ursino et al. factor: 0.72 - hm... doubt that - stick with 0.5
R_cap_pul = (1.-frac_Rprox_Rtotal)*R_ar_pul
R_ar_pul *= frac_Rprox_Rtotal # now R_ar_pul(prox)
## pulmonary venous
frac_Cprox_Ctotal = 0.5#0.12 # Ursino et al. factor: 0.12 - XXX?: gives shitty p_puls... - stick with 0.5
C_cap_pul = (1.-frac_Cprox_Ctotal)*C_ar_pul
C_ar_pul *= frac_Cprox_Ctotal # now C_ar_pul(prox)
### unstressed compartment volumes, diffult to estimate - use literature values!
# these volumes only become relevant for the gas transport models as they determine the capacity of each
# compartment to store constituents - however, they are also used for postprocessing of the flow models...
V_at_l_u = 5000.0 # applies only in case of 0D or prescribed atria
V_at_r_u = 4000.0 # applies only in case of 0D or prescribed atria
V_v_l_u = 10000.0 # applies only in case of 0D or prescribed ventricles
V_v_r_u = 8000.0 # applies only in case of 0D or prescribed ventricles
V_ar_sys_u = 0.0 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_ar_pul_u = 0.0 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_ven_pul_u = 120.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
# peripheral systemic arterial
V_arspl_sys_u = 274.4e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_arespl_sys_u = 134.64e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_armsc_sys_u = 105.8e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_arcer_sys_u = 72.13e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_arcor_sys_u = 24.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
# peripheral systemic venous
V_venspl_sys_u = 1121.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_venespl_sys_u = 550.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_venmsc_sys_u = 432.14e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_vencer_sys_u = 294.64e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_vencor_sys_u = 98.21e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
V_ven_sys_u = 100.0e3 # estimated (Ursino et al. do not have that extra venous compartment...)
# pulmonary capillary
V_cap_pul_u = 123.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3
return {'R_ar_sys' : R_ar_sys,
'C_ar_sys' : C_ar_sys,
'L_ar_sys' : L_ar_sys,
'Z_ar_sys' : Z_ar_sys,
'R_arspl_sys' : R_arspl_sys,
'C_arspl_sys' : C_arspl_sys,
'R_arespl_sys' : R_arespl_sys,
'C_arespl_sys' : C_arespl_sys,
'R_armsc_sys' : R_armsc_sys,
'C_armsc_sys' : C_armsc_sys,
'R_arcer_sys' : R_arcer_sys,
'C_arcer_sys' : C_arcer_sys,
'R_arcor_sys' : R_arcor_sys,
'C_arcor_sys' : C_arcor_sys,
'R_venspl_sys' : R_venspl_sys,
'C_venspl_sys' : C_venspl_sys,
'R_venespl_sys' : R_venespl_sys,
'C_venespl_sys' : C_venespl_sys,
'R_venmsc_sys' : R_venmsc_sys,
'C_venmsc_sys' : C_venmsc_sys,
'R_vencer_sys' : R_vencer_sys,
'C_vencer_sys' : C_vencer_sys,
'R_vencor_sys' : R_vencor_sys,
'C_vencor_sys' : C_vencor_sys,
'R_ar_pul' : R_ar_pul,
'C_ar_pul' : C_ar_pul,
'L_ar_pul' : L_ar_pul,
'R_cap_pul' : R_cap_pul,
'C_cap_pul' : C_cap_pul,
'R_ven_sys' : R_ven_sys,
'C_ven_sys' : C_ven_sys,
'L_ven_sys' : L_ven_sys,
'R_ven_pul' : R_ven_pul,
'C_ven_pul' : C_ven_pul,
'L_ven_pul' : L_ven_pul,
# atrial elastances
'E_at_max_l' : E_at_max_l,
'E_at_min_l' : E_at_min_l,
'E_at_max_r' : E_at_max_r,
'E_at_min_r' : E_at_min_r,
# ventricular elastances
'E_v_max_l' : E_v_max_l,
'E_v_min_l' : E_v_min_l,
'E_v_max_r' : E_v_max_r,
'E_v_min_r' : E_v_min_r,
# valve resistances
'R_vin_l_min' : 1.0e-6,
'R_vin_l_max' : 1.0e1,
'R_vout_l_min' : 1.0e-6,
'R_vout_l_max' : 1.0e1,
'R_vin_r_min' : 1.0e-6,
'R_vin_r_max' : 1.0e1,
'R_vout_r_min' : 1.0e-6,
'R_vout_r_max' : 1.0e1,
# timings
't_ed' : t_ed,
't_es' : t_es,
'T_cycl' : T_cycl,
# unstressed compartment volumes (for post-processing)
'V_at_l_u' : V_at_l_u,
'V_at_r_u' : V_at_r_u,
'V_v_l_u' : V_v_l_u,
'V_v_r_u' : V_v_r_u,
'V_ar_sys_u' : V_ar_sys_u,
'V_arspl_sys_u' : V_arspl_sys_u,
'V_arespl_sys_u' : V_arespl_sys_u,
'V_armsc_sys_u' : V_armsc_sys_u,
'V_arcer_sys_u' : V_arcer_sys_u,
'V_arcor_sys_u' : V_arcor_sys_u,
'V_venspl_sys_u' : V_venspl_sys_u,
'V_venespl_sys_u' : V_venespl_sys_u,
'V_venmsc_sys_u' : V_venmsc_sys_u,
'V_vencer_sys_u' : V_vencer_sys_u,
'V_vencor_sys_u' : V_vencor_sys_u,
'V_ven_sys_u' : V_ven_sys_u,
'V_ar_pul_u' : V_ar_pul_u,
'V_cap_pul_u' : V_cap_pul_u,
'V_ven_pul_u' : V_ven_pul_u}
if __name__ == "__main__":
success = False
try:
success = main()
except:
print(traceback.format_exc())
if success:
sys.exit(0)
else:
sys.exit(1)
| [
"results_check.results_check_vec",
"numpy.zeros",
"results_check.success_check",
"pathlib.Path",
"traceback.format_exc",
"numpy.cos",
"sys.exit"
] | [((2426, 2464), 'numpy.zeros', 'np.zeros', (['problem.mp.cardvasc0D.numdof'], {}), '(problem.mp.cardvasc0D.numdof)\n', (2434, 2464), True, 'import numpy as np\n'), ((3944, 4023), 'results_check.results_check_vec', 'results_check.results_check_vec', (['problem.mp.s', 's_corr', 'problem.mp.comm'], {'tol': 'tol'}), '(problem.mp.s, s_corr, problem.mp.comm, tol=tol)\n', (3975, 4023), False, 'import results_check\n'), ((4038, 4092), 'results_check.success_check', 'results_check.success_check', (['[check1]', 'problem.mp.comm'], {}), '([check1], problem.mp.comm)\n', (4065, 4092), False, 'import results_check\n'), ((15498, 15509), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (15506, 15509), False, 'import sys, traceback\n'), ((15528, 15539), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15536, 15539), False, 'import sys, traceback\n'), ((15445, 15467), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15465, 15467), False, 'import sys, traceback\n'), ((164, 178), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'from pathlib import Path\n'), ((1747, 1787), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * (t - t0) / act_dur)'], {}), '(2.0 * np.pi * (t - t0) / act_dur)\n', (1753, 1787), True, 'import numpy as np\n'), ((2075, 2115), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * (t - t0) / act_dur)'], {}), '(2.0 * np.pi * (t - t0) / act_dur)\n', (2081, 2115), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 10:52:41 2018
@author: cuijiaxu
"""
import numpy as np
import pylab as pl
def simpleBasis(x):
if len(x)==1:
return np.array([1.0,x[0],x[0]*x[0]])
else:
xx=[]
for i in range(len(x)):
xx.append(np.array([1.0,x[i,0],x[i,0]*x[i,0]]))
return np.array(xx)
def AdaptiveBasis(data,info,x,retrain=False,OneTest=False,update=True):
"""test 345 start"""
"""
if info.gcn==True:
if len(data.candidates)==len(x):
return rseed345_basis(info)
else:
return rseed345_basis(info)[info.observedx]
"""
"""test 345 end"""
if retrain==True:
if info.gcn==False:
info.dnn.rng=info.rng
info.dnn.train(data.candidates[info.observedx],np.array(info.observedy).reshape(len(info.observedy),))
#info.set_w_m0(dnn.get_weight().reshape(dnn.get_weight().shape[1],1))
basis=info.dnn.get_basis(x)
else:
info.dgcn.info=info
info.dgcn.dataset=info.dataset
info.dgcn.All_cand_node_num=info.All_cand_node_num
#basis=dgcn.train_minibatch(data.candidates[info.observedx],np.array(info.observedy).reshape(len(info.observedy),),True)
basis=info.dgcn.train(data.candidates[info.observedx],np.array(info.observedy).reshape(len(info.observedy),),True)
#print basis,basis.shape
else:
if info.gcn==False:
basis=info.dnn.get_basis(x)
else:
info.dgcn.info=info
info.dgcn.dataset=info.dataset
if OneTest==False:
#get part
if update==True:
basis=info.dgcn.train(data.candidates[info.observedx],np.array(info.observedy).reshape(len(info.observedy),),False)
else:
basis=info.dgcn.get_basis_part(x)
else:
basis=info.dgcn.get_basis_one(x)
#basis=info.dgcn.get_basis(x)
#print basis,basis.shape
"""
if info.gcn==True:
#print basis
np.savetxt("results/basis-RGBODGCN-r%s.txt"%info.rseed, basis, fmt='%s')
exit(1)
"""
return basis
"""
return simpleBasis(x)
"""
def rseed345_basis(info):
basis=np.loadtxt("results/basis-RGBODGCN-r%s.txt"%info.rseed)
pl.figure(4)
pl.matshow(basis.dot(basis.T))
pl.colorbar()
pl.title("Similarity matrix.")
pl.savefig("results/matshow-RGBODGCN-r%s.pdf"%info.rseed)
#exit(1)
return basis
| [
"pylab.title",
"pylab.savefig",
"pylab.colorbar",
"pylab.figure",
"numpy.loadtxt",
"numpy.array"
] | [((2351, 2408), 'numpy.loadtxt', 'np.loadtxt', (["('results/basis-RGBODGCN-r%s.txt' % info.rseed)"], {}), "('results/basis-RGBODGCN-r%s.txt' % info.rseed)\n", (2361, 2408), True, 'import numpy as np\n'), ((2411, 2423), 'pylab.figure', 'pl.figure', (['(4)'], {}), '(4)\n', (2420, 2423), True, 'import pylab as pl\n'), ((2463, 2476), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (2474, 2476), True, 'import pylab as pl\n'), ((2481, 2511), 'pylab.title', 'pl.title', (['"""Similarity matrix."""'], {}), "('Similarity matrix.')\n", (2489, 2511), True, 'import pylab as pl\n'), ((2516, 2575), 'pylab.savefig', 'pl.savefig', (["('results/matshow-RGBODGCN-r%s.pdf' % info.rseed)"], {}), "('results/matshow-RGBODGCN-r%s.pdf' % info.rseed)\n", (2526, 2575), True, 'import pylab as pl\n'), ((204, 238), 'numpy.array', 'np.array', (['[1.0, x[0], x[0] * x[0]]'], {}), '([1.0, x[0], x[0] * x[0]])\n', (212, 238), True, 'import numpy as np\n'), ((368, 380), 'numpy.array', 'np.array', (['xx'], {}), '(xx)\n', (376, 380), True, 'import numpy as np\n'), ((315, 358), 'numpy.array', 'np.array', (['[1.0, x[i, 0], x[i, 0] * x[i, 0]]'], {}), '([1.0, x[i, 0], x[i, 0] * x[i, 0]])\n', (323, 358), True, 'import numpy as np\n'), ((835, 859), 'numpy.array', 'np.array', (['info.observedy'], {}), '(info.observedy)\n', (843, 859), True, 'import numpy as np\n'), ((1364, 1388), 'numpy.array', 'np.array', (['info.observedy'], {}), '(info.observedy)\n', (1372, 1388), True, 'import numpy as np\n'), ((1793, 1817), 'numpy.array', 'np.array', (['info.observedy'], {}), '(info.observedy)\n', (1801, 1817), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def solidBackground(shape = (480, 640, 3), color = (0, 255, 0)):
bg_image = np.ones(shape, dtype = np.uint8)
bg_image[:] = color
return bg_image
def imageBackground(source):
return cv2.imread(source) | [
"cv2.imread",
"numpy.ones"
] | [((111, 141), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (118, 141), True, 'import numpy as np\n'), ((229, 247), 'cv2.imread', 'cv2.imread', (['source'], {}), '(source)\n', (239, 247), False, 'import cv2\n')] |
# Copyright (c) <NAME>.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
# This example uses the "noisy Travelers Salesman Problem" and applies a machine learning
# approach to avoid unnecessary function calls. Works only with the Python variant of
# differential evolution, both single threaded or with parallel function evaluation.
# A machine learning based filter should only be used with expensive objective functions.
# See https://github.com/dietmarwo/fast-cma-es/blob/master/tutorials/Filter.adoc for a detailed description.
import numpy as np
from fcmaes.optimizer import logger
from fcmaes import de
import xgboost
from collections import deque
from noisy_tsp import TSP, load_tsplib
# do 'pip install tsplib95'
class filter():
def __init__(self, size, interval, filter_prob = 0.9):
self.xq = deque(maxlen=size)
self.yq = deque(maxlen=size)
self.interval = interval
self.filter_prob = filter_prob # probability filter is applied
self.num = 0
self.model = None
def add(self, x, y):
self.xq.append(x)
self.yq.append(y)
self.num += 1
if self.num % self.interval == 0:
try:
self.learn()
except Exception as ex:
print(ex)
def x(self):
return np.array(self.xq)
def y(self):
return np.array(self.yq)
def learn(self):
if self.model is None:
self.model = xgboost.XGBRegressor(objective='rank:pairwise')
self.model.fit(self.x(), self.y())
pass
def is_improve(self, x, x_old, y_old):
if self.model is None or np.random.random() > self.filter_prob :
return True
else:
try:
y = self.model.predict([x, x_old])
return y[0] < y[1]
except Exception as ex:
print(ex)
return True
def optimize(self, problem):
return de.minimize(problem,
dim = problem.d,
bounds = problem.bounds(),
popsize = 16,
max_evaluations = 60000,
workers = 32,
filter = self
# logger = logger()
)
if __name__ == '__main__':
filter = filter(96,32)
tsp = load_tsplib('data/tsp/br17.tsp')
filter.optimize(tsp)
| [
"noisy_tsp.load_tsplib",
"numpy.random.random",
"numpy.array",
"xgboost.XGBRegressor",
"collections.deque"
] | [((2392, 2424), 'noisy_tsp.load_tsplib', 'load_tsplib', (['"""data/tsp/br17.tsp"""'], {}), "('data/tsp/br17.tsp')\n", (2403, 2424), False, 'from noisy_tsp import TSP, load_tsplib\n'), ((888, 906), 'collections.deque', 'deque', ([], {'maxlen': 'size'}), '(maxlen=size)\n', (893, 906), False, 'from collections import deque\n'), ((925, 943), 'collections.deque', 'deque', ([], {'maxlen': 'size'}), '(maxlen=size)\n', (930, 943), False, 'from collections import deque\n'), ((1386, 1403), 'numpy.array', 'np.array', (['self.xq'], {}), '(self.xq)\n', (1394, 1403), True, 'import numpy as np\n'), ((1441, 1458), 'numpy.array', 'np.array', (['self.yq'], {}), '(self.yq)\n', (1449, 1458), True, 'import numpy as np\n'), ((1545, 1592), 'xgboost.XGBRegressor', 'xgboost.XGBRegressor', ([], {'objective': '"""rank:pairwise"""'}), "(objective='rank:pairwise')\n", (1565, 1592), False, 'import xgboost\n'), ((1734, 1752), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1750, 1752), True, 'import numpy as np\n')] |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import Base, Entity
from ...serialize import KeyField, AnyField, StringField, DataTypeField, \
BoolField, Int32Field
from ...tensor.core import TENSOR_TYPE
from ...tensor.datasource import empty, tensor as astensor, \
from_series as tensor_from_series, from_dataframe as tensor_from_dataframe
from ...tensor.statistics.quantile import quantile as tensor_quantile
from ...tensor.utils import recursive_tile
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..core import DATAFRAME_TYPE
from ..datasource.from_tensor import series_from_tensor, dataframe_from_tensor
from ..initializer import DataFrame as create_df
from ..utils import parse_index, build_empty_df, find_common_type, validate_axis
class DataFrameQuantile(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.QUANTILE
_input = KeyField('input')
_q = AnyField('q')
_axis = Int32Field('axis')
_numeric_only = BoolField('numeric_only')
_interpolation = StringField('interpolation')
_dtype = DataTypeField('dtype')
def __init__(self, q=None, interpolation=None, axis=None, numeric_only=None,
dtype=None, gpu=None, object_type=None, **kw):
super().__init__(_q=q, _interpolation=interpolation, _axis=axis,
_numeric_only=numeric_only, _dtype=dtype, _gpu=gpu,
_object_type=object_type, **kw)
@property
def input(self):
return self._input
@property
def q(self):
return self._q
@property
def interpolation(self):
return self._interpolation
@property
def axis(self):
return self._axis
@property
def numeric_only(self):
return self._numeric_only
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
if isinstance(self._q, TENSOR_TYPE):
self._q = self._inputs[-1]
def _calc_dtype_on_axis_1(self, a, dtypes):
quantile_dtypes = []
for name in dtypes.index:
dt = tensor_quantile(tensor_from_series(a[name]), self._q,
interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype
quantile_dtypes.append(dt)
return find_common_type(quantile_dtypes)
def _call_dataframe(self, a, inputs):
if self._numeric_only:
empty_df = build_empty_df(a.dtypes)
dtypes = empty_df._get_numeric_data().dtypes
else:
dtypes = a.dtypes
if isinstance(self._q, TENSOR_TYPE):
q_val = self._q
pd_index = pd.Index([], dtype=q_val.dtype)
name = None
store_index_value = False
else:
q_val = np.asanyarray(self._q)
pd_index = pd.Index(q_val)
name = self._q if q_val.size == 1 else None
store_index_value = True
tokenize_objects = (a, q_val, self._interpolation, type(self).__name__)
if q_val.ndim == 0 and self._axis == 0:
self._object_type = ObjectType.series
index_value = parse_index(dtypes.index, store_data=store_index_value)
shape = (len(dtypes),)
# calc dtype
dtype = self._calc_dtype_on_axis_1(a, dtypes)
return self.new_series(inputs, shape=shape, dtype=dtype,
index_value=index_value, name=name or dtypes.index.name)
elif q_val.ndim == 0 and self._axis == 1:
self._object_type = ObjectType.series
index_value = a.index_value
shape = (len(a),)
# calc dtype
dt = tensor_quantile(empty(a.shape[1], dtype=find_common_type(dtypes)),
self._q, interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype
return self.new_series(inputs, shape=shape, dtype=dt,
index_value=index_value, name=name or index_value.name)
elif q_val.ndim == 1 and self._axis == 0:
self._object_type = ObjectType.dataframe
shape = (len(q_val), len(dtypes))
index_value = parse_index(pd_index, *tokenize_objects, store_data=store_index_value)
dtype_list = []
for name in dtypes.index:
dtype_list.append(
tensor_quantile(tensor_from_series(a[name]), self._q,
interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype)
dtypes = pd.Series(dtype_list, index=dtypes.index)
return self.new_dataframe(inputs, shape=shape, dtypes=dtypes,
index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True))
else:
assert q_val.ndim == 1 and self._axis == 1
self._object_type = ObjectType.dataframe
shape = (len(q_val), a.shape[0])
index_value = parse_index(pd_index, *tokenize_objects, store_data=store_index_value)
pd_columns = a.index_value.to_pandas()
dtype_list = np.full(len(pd_columns), self._calc_dtype_on_axis_1(a, dtypes))
dtypes = pd.Series(dtype_list, index=pd_columns)
return self.new_dataframe(inputs, shape=shape,
dtypes=dtypes,
index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True,
key=a.index_value.key))
def _call_series(self, a, inputs):
if isinstance(self._q, TENSOR_TYPE):
q_val = self._q
index_val = pd.Index([], dtype=q_val.dtype)
store_index_value = False
else:
q_val = np.asanyarray(self._q)
index_val = pd.Index(q_val)
store_index_value = True
# get dtype by tensor
a_t = astensor(a)
self._dtype = dtype = tensor_quantile(
a_t, self._q, interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype
if q_val.ndim == 0:
self._object_type = ObjectType.scalar
return self.new_scalar(inputs, dtype=dtype)
else:
self._object_type = ObjectType.series
return self.new_series(
inputs, shape=q_val.shape, dtype=dtype,
index_value=parse_index(index_val, a, q_val, self._interpolation,
type(self).__name__, store_data=store_index_value),
name=a.name)
def __call__(self, a, q_input=None):
inputs = [a]
if q_input is not None:
inputs.append(q_input)
if isinstance(a, DATAFRAME_TYPE):
return self._call_dataframe(a, inputs)
else:
return self._call_series(a, inputs)
@classmethod
def _tile_dataframe(cls, op):
from ...tensor.merge.stack import TensorStack
df = op.outputs[0]
if op.object_type == ObjectType.series:
if op.axis == 0:
ts = []
for name in df.index_value.to_pandas():
a = tensor_from_series(op.input[name])
t = tensor_quantile(a, op.q, interpolation=op.interpolation,
handle_non_numeric=not op.numeric_only)
ts.append(t)
try:
dtype = np.result_type(*[it.dtype for it in ts])
except TypeError:
dtype = np.dtype(object)
stack_op = TensorStack(axis=0, dtype=dtype)
tr = stack_op(ts)
r = series_from_tensor(tr, index=df.index_value.to_pandas(),
name=np.asscalar(ts[0].op.q))
else:
assert op.axis == 1
empty_df = build_empty_df(op.input.dtypes)
fields = empty_df._get_numeric_data().columns.tolist()
t = tensor_from_dataframe(op.input[fields])
tr = tensor_quantile(t, op.q, axis=1, interpolation=op.interpolation,
handle_non_numeric=not op.numeric_only)
r = series_from_tensor(tr, name=np.asscalar(tr.op.q))
r._index_value = op.input.index_value
else:
assert op.object_type == ObjectType.dataframe
if op.axis == 0:
d = OrderedDict()
for name in df.dtypes.index:
a = tensor_from_series(op.input[name])
t = tensor_quantile(a, op.q, interpolation=op.interpolation,
handle_non_numeric=not op.numeric_only)
d[name] = t
r = create_df(d, index=op.q)
else:
assert op.axis == 1
empty_df = build_empty_df(op.input.dtypes)
fields = empty_df._get_numeric_data().columns.tolist()
t = tensor_from_dataframe(op.input[fields])
tr = tensor_quantile(t, op.q, axis=1, interpolation=op.interpolation,
handle_non_numeric=not op.numeric_only)
if not op.input.index_value.has_value():
raise NotImplementedError
# TODO(xuye.qin): use index=op.input.index when we support DataFrame.index
r = dataframe_from_tensor(tr, index=op.q,
columns=op.input.index_value.to_pandas())
return [recursive_tile(r)]
@classmethod
def _tile_series(cls, op):
a = tensor_from_series(op.input)
t = tensor_quantile(a, op.q, interpolation=op.interpolation,
handle_non_numeric=not op.numeric_only)
if op.object_type == ObjectType.scalar:
r = t
else:
r = series_from_tensor(t, index=op.q, name=op.outputs[0].name)
return [recursive_tile(r)]
@classmethod
def tile(cls, op):
if isinstance(op.input, DATAFRAME_TYPE):
return cls._tile_dataframe(op)
else:
return cls._tile_series(op)
def quantile_series(series, q=0.5, interpolation='linear'):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array or a tensor, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> import mars.dataframe as md
>>> s = md.Series([1, 2, 3, 4])
>>> s.quantile(.5).execute()
2.5
>>> s.quantile([.25, .5, .75]).execute()
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
if isinstance(q, (Base, Entity)):
q = astensor(q)
q_input = q
else:
q_input = None
op = DataFrameQuantile(q=q, interpolation=interpolation,
gpu=series.op.gpu)
return op(series, q_input=q_input)
def quantile_dataframe(df, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
If ``q`` is an array or a tensor, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1).execute()
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5]).execute()
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = md.DataFrame({'A': [1, 2],
... 'B': [md.Timestamp('2010'),
... md.Timestamp('2011')],
... 'C': [md.Timedelta('1 days'),
... md.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False).execute()
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
if isinstance(q, (Base, Entity)):
q = astensor(q)
q_input = q
else:
q_input = None
axis = validate_axis(axis, df)
op = DataFrameQuantile(q=q, interpolation=interpolation,
axis=axis, numeric_only=numeric_only,
gpu=df.op.gpu)
return op(df, q_input=q_input)
| [
"numpy.result_type",
"numpy.asanyarray",
"numpy.dtype",
"pandas.Index",
"pandas.Series",
"collections.OrderedDict",
"numpy.asscalar"
] | [((3402, 3433), 'pandas.Index', 'pd.Index', (['[]'], {'dtype': 'q_val.dtype'}), '([], dtype=q_val.dtype)\n', (3410, 3433), True, 'import pandas as pd\n'), ((3530, 3552), 'numpy.asanyarray', 'np.asanyarray', (['self._q'], {}), '(self._q)\n', (3543, 3552), True, 'import numpy as np\n'), ((3576, 3591), 'pandas.Index', 'pd.Index', (['q_val'], {}), '(q_val)\n', (3584, 3591), True, 'import pandas as pd\n'), ((6654, 6685), 'pandas.Index', 'pd.Index', (['[]'], {'dtype': 'q_val.dtype'}), '([], dtype=q_val.dtype)\n', (6662, 6685), True, 'import pandas as pd\n'), ((6758, 6780), 'numpy.asanyarray', 'np.asanyarray', (['self._q'], {}), '(self._q)\n', (6771, 6780), True, 'import numpy as np\n'), ((6805, 6820), 'pandas.Index', 'pd.Index', (['q_val'], {}), '(q_val)\n', (6813, 6820), True, 'import pandas as pd\n'), ((9468, 9481), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9479, 9481), False, 'from collections import OrderedDict\n'), ((5419, 5460), 'pandas.Series', 'pd.Series', (['dtype_list'], {'index': 'dtypes.index'}), '(dtype_list, index=dtypes.index)\n', (5428, 5460), True, 'import pandas as pd\n'), ((6119, 6158), 'pandas.Series', 'pd.Series', (['dtype_list'], {'index': 'pd_columns'}), '(dtype_list, index=pd_columns)\n', (6128, 6158), True, 'import pandas as pd\n'), ((8456, 8496), 'numpy.result_type', 'np.result_type', (['*[it.dtype for it in ts]'], {}), '(*[it.dtype for it in ts])\n', (8470, 8496), True, 'import numpy as np\n'), ((8559, 8575), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (8567, 8575), True, 'import numpy as np\n'), ((8791, 8814), 'numpy.asscalar', 'np.asscalar', (['ts[0].op.q'], {}), '(ts[0].op.q)\n', (8802, 8814), True, 'import numpy as np\n'), ((9271, 9291), 'numpy.asscalar', 'np.asscalar', (['tr.op.q'], {}), '(tr.op.q)\n', (9282, 9291), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
from __future__ import print_function, division, absolute_import
import os
# DEFINE CONFIG
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
libs = []
if os.name == 'posix':
libs.append('m')
config = Configuration('bear', parent_package, top_path)
# modules
config.add_subpackage('core')
config.add_subpackage('templates')
config.add_subpackage('utils')
# module tests -- must be added after others!
config.add_subpackage('core/tests')
config.add_subpackage('templates/tests')
config.add_subpackage('utils/tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"numpy.distutils.misc_util.Configuration"
] | [((338, 385), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""bear"""', 'parent_package', 'top_path'], {}), "('bear', parent_package, top_path)\n", (351, 385), False, 'from numpy.distutils.misc_util import Configuration\n')] |
"""
EXPERIMENT
"""
from tools.model import *
from tools.data import DataNPZ
import argparse
import logging.config
from traceback import format_exc
import torch
import torch.nn as nn
from torch.optim import optimizer
from torch.utils.data import DataLoader
from torchmetrics import MeanAbsolutePercentageError
from torchmetrics import SymmetricMeanAbsolutePercentageError
from torchmetrics import WeightedMeanAbsolutePercentageError
from tools.settings import LOGGING_CONFIG
from tools.tools import experiment
from tools.metrics import *
import os
import random
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
plt.style.use('seaborn')
SOLVERS = ['euler', 'rk4']
CRITERION = {
'MSE': nn.MSELoss(),
'MSE': nn.MSELoss(),
'MAE': nn.L1Loss(),
'SmoothMAE': nn.SmoothL1Loss(),
'MAPE': MeanAbsolutePercentageError(),
'WAPE': WeightedMeanAbsolutePercentageError(),
'SMAPE': SymmetricMeanAbsolutePercentageError(),
'RMSE': RMSE(),
'MAGE': MAGE(),
'MyMetric': MyMetric(),
'WAE': WAE(),
}
OPTIM = {
'AMSGrad': torch.optim.Adam
}
METRICS = {
'MyMetric': MyMetric(),
'R2Score': R2Score(),
'MAPE': MAPE(),
'WAPE': WAPE(),
}
ACTIVATION = {
'Tanh': nn.Tanh,
'Tanhshrink': nn.Tanhshrink,
'Sigmoid': nn.Sigmoid,
'LogSigmoid': nn.LogSigmoid,
'RELU': nn.ReLU,
'ELU': nn.ELU,
'SELU': nn.SELU,
'CELU': nn.CELU,
'GELU': nn.GELU
}
MODEL = {
'Linear': LinearODEF,
'EmbededLinear': EmbededLinearODEF,
'MultyLayer': MultyLayerODEF
}
parser = argparse.ArgumentParser(prog='NeuralODE soil experiment',
description="""Скрипт запускает эксперимент""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-adjoint', action='store_true', help='Использовать adjoint_odeint или дефолтный')
parser.add_argument('-lr', type=float, default=0.01, help='Скорость обучения')
parser.add_argument('-batch_size', type=int, default=32, help='Размер батча')
parser.add_argument('-interval', type=int, default=1, help='Интервал для валидации и чекпоинта')
parser.add_argument('-n', '--name', type=str, required=True, help='Название эксперимента')
parser.add_argument('-m', '--method', type=str, choices=SOLVERS, default='euler', help='Выбор метода решения ОДУ')
parser.add_argument('-lf', '--loss', type=str, choices=CRITERION.keys(), default='MSE', help='Выбор функции потерь')
parser.add_argument('-mx', '--metric', type=str, choices=METRICS.keys(), default='MyMetric', help='Выбор метрики')
parser.add_argument('-opt', '--optim', type=str, choices=OPTIM.keys(), default='AMSGrad', help='Выбор меотда оптимизации')
parser.add_argument('-e', '--num_epoch', type=int, default=250, help='Количество эпох')
parser.add_argument('-l' ,'--layers', nargs='+', type=int, required=True, help='Кол-во весов скрытого слоя')
parser.add_argument('-emb' ,'--embeding', nargs='+', type=int, required=True, help='Расмерность вектора эмбединга')
parser.add_argument('-af' ,'--act_fun', type=str, choices=ACTIVATION.keys(), default='Tanh', help='Функция активации')
parser.add_argument('-mod' ,'--model', type=str, choices=MODEL.keys(), default='Linear', help='Вид модели аппроксимирующей производную')
opt = parser.parse_args()
Path(f'logs/').mkdir(exist_ok=True)
LOGGING_CONFIG['handlers']['file_handler']['filename'] = f'logs/{opt.name}.log'
if opt.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger(__name__)
if opt.act_fun == 'RELU':
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 2/m.in_features)
m.bias.data.fill_(0)
else:
def init_weights(m):
if isinstance(m, nn.Linear):
a = np.sqrt(6)/np.sqrt(m.in_features + m.out_features)
m.weight.data.uniform_(-a, a)
m.bias.data.fill_(0)
if __name__ == "__main__":
try:
logger.info(f'Start {opt.name}')
random.seed(42)
os.environ["PL_GLOBAL_SEED"] = str(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
torch.cuda.manual_seed_all(42)
Path(f'tensorboard/').mkdir(exist_ok=True)
Path(f"assets/{opt.name}").mkdir(exist_ok=True)
Path(f"assets/{opt.name}/imgs").mkdir(exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
norm = np.load('data/norm.npz')
embs = np.load('data/embeddings.npz')
embeding = [torch.tensor(embs['cult_emb'], device=device), torch.tensor(embs['soil_emb'], device=device), torch.tensor(embs['cover_emb'], device=device)]
criterion = CRITERION[opt.loss].to(device)
metric = METRICS[opt.metric]
func = MODEL[opt.model](opt.layers, opt.embeding, ACTIVATION[opt.act_fun], torch.tensor(norm['mean'], device=device), torch.tensor(norm['std'], device=device)).apply(init_weights).to(device)
optimizer = OPTIM[opt.optim](func.parameters(), lr=opt.lr, amsgrad=True)
dataloader = DataLoader(DataNPZ('train'), batch_size=opt.batch_size, shuffle=True)
val = DataLoader(DataNPZ('val'), batch_size=opt.batch_size, shuffle=False)
sample = DataLoader(DataNPZ('sample'), batch_size=16, shuffle=False)
experiment(odeint, func, dataloader, val, sample, optimizer, criterion, metric, opt, LOGGING_CONFIG, streamlit=False)
except Exception as exp:
err = format_exc()
logger.error(err)
raise(exp)
logger.info(f'End {opt.name}')
| [
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"matplotlib.pyplot.style.use",
"pathlib.Path",
"torch.nn.MSELoss",
"tools.data.DataNPZ",
"torchmetrics.WeightedMeanAbsolutePercentageError",
"random.seed",
"traceback.format_exc",
"torchmetrics.SymmetricMeanAbsolutePercentageError",
... | [((645, 669), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (658, 669), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1726), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""NeuralODE soil experiment"""', 'description': '"""Скрипт запускает эксперимент"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), "(prog='NeuralODE soil experiment', description=\n 'Скрипт запускает эксперимент', formatter_class=argparse.\n RawTextHelpFormatter)\n", (1591, 1726), False, 'import argparse\n'), ((724, 736), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (734, 736), True, 'import torch.nn as nn\n'), ((749, 761), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (759, 761), True, 'import torch.nn as nn\n'), ((774, 785), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (783, 785), True, 'import torch.nn as nn\n'), ((804, 821), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (819, 821), True, 'import torch.nn as nn\n'), ((835, 864), 'torchmetrics.MeanAbsolutePercentageError', 'MeanAbsolutePercentageError', ([], {}), '()\n', (862, 864), False, 'from torchmetrics import MeanAbsolutePercentageError\n'), ((878, 915), 'torchmetrics.WeightedMeanAbsolutePercentageError', 'WeightedMeanAbsolutePercentageError', ([], {}), '()\n', (913, 915), False, 'from torchmetrics import WeightedMeanAbsolutePercentageError\n'), ((930, 968), 'torchmetrics.SymmetricMeanAbsolutePercentageError', 'SymmetricMeanAbsolutePercentageError', ([], {}), '()\n', (966, 968), False, 'from torchmetrics import SymmetricMeanAbsolutePercentageError\n'), ((3302, 3316), 'pathlib.Path', 'Path', (['f"""logs/"""'], {}), "(f'logs/')\n", (3306, 3316), False, 'from pathlib import Path\n'), ((4088, 4103), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (4099, 4103), False, 'import random\n'), ((4159, 4177), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4173, 4177), True, 'import numpy as np\n'), ((4186, 4207), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (4203, 4207), False, 'import torch\n'), ((4216, 4242), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(42)'], {}), '(42)\n', (4238, 4242), False, 'import torch\n'), ((4251, 4281), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(42)'], {}), '(42)\n', (4277, 4281), False, 'import torch\n'), ((4546, 4570), 'numpy.load', 'np.load', (['"""data/norm.npz"""'], {}), "('data/norm.npz')\n", (4553, 4570), True, 'import numpy as np\n'), ((4586, 4616), 'numpy.load', 'np.load', (['"""data/embeddings.npz"""'], {}), "('data/embeddings.npz')\n", (4593, 4616), True, 'import numpy as np\n'), ((5408, 5529), 'tools.tools.experiment', 'experiment', (['odeint', 'func', 'dataloader', 'val', 'sample', 'optimizer', 'criterion', 'metric', 'opt', 'LOGGING_CONFIG'], {'streamlit': '(False)'}), '(odeint, func, dataloader, val, sample, optimizer, criterion,\n metric, opt, LOGGING_CONFIG, streamlit=False)\n', (5418, 5529), False, 'from tools.tools import experiment\n'), ((4637, 4682), 'torch.tensor', 'torch.tensor', (["embs['cult_emb']"], {'device': 'device'}), "(embs['cult_emb'], device=device)\n", (4649, 4682), False, 'import torch\n'), ((4684, 4729), 'torch.tensor', 'torch.tensor', (["embs['soil_emb']"], {'device': 'device'}), "(embs['soil_emb'], device=device)\n", (4696, 4729), False, 'import torch\n'), ((4731, 4777), 'torch.tensor', 'torch.tensor', (["embs['cover_emb']"], {'device': 'device'}), "(embs['cover_emb'], device=device)\n", (4743, 4777), False, 'import torch\n'), ((5180, 5196), 'tools.data.DataNPZ', 'DataNPZ', (['"""train"""'], {}), "('train')\n", (5187, 5196), False, 'from tools.data import DataNPZ\n'), ((5264, 5278), 'tools.data.DataNPZ', 'DataNPZ', (['"""val"""'], {}), "('val')\n", (5271, 5278), False, 'from tools.data import DataNPZ\n'), ((5350, 5367), 'tools.data.DataNPZ', 'DataNPZ', (['"""sample"""'], {}), "('sample')\n", (5357, 5367), False, 'from tools.data import DataNPZ\n'), ((5571, 5583), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (5581, 5583), False, 'from traceback import format_exc\n'), ((3872, 3882), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (3879, 3882), True, 'import numpy as np\n'), ((3883, 3922), 'numpy.sqrt', 'np.sqrt', (['(m.in_features + m.out_features)'], {}), '(m.in_features + m.out_features)\n', (3890, 3922), True, 'import numpy as np\n'), ((4291, 4312), 'pathlib.Path', 'Path', (['f"""tensorboard/"""'], {}), "(f'tensorboard/')\n", (4295, 4312), False, 'from pathlib import Path\n'), ((4342, 4368), 'pathlib.Path', 'Path', (['f"""assets/{opt.name}"""'], {}), "(f'assets/{opt.name}')\n", (4346, 4368), False, 'from pathlib import Path\n'), ((4398, 4429), 'pathlib.Path', 'Path', (['f"""assets/{opt.name}/imgs"""'], {}), "(f'assets/{opt.name}/imgs')\n", (4402, 4429), False, 'from pathlib import Path\n'), ((4492, 4517), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4515, 4517), False, 'import torch\n'), ((4951, 4992), 'torch.tensor', 'torch.tensor', (["norm['mean']"], {'device': 'device'}), "(norm['mean'], device=device)\n", (4963, 4992), False, 'import torch\n'), ((4994, 5034), 'torch.tensor', 'torch.tensor', (["norm['std']"], {'device': 'device'}), "(norm['std'], device=device)\n", (5006, 5034), False, 'import torch\n')] |
import numpy as np
def load_data():
data = np.loadtxt('input.csv', dtype='int32', delimiter=',')
return data
def find_noun_verb(data):
for noun in range(0, 100):
for verb in range(0, 100):
d = np.array(data, copy=True)
d[1] = noun
d[2] = verb
index = 0
while(True):
cmd = d[index * 4]
if cmd == 99:
break
else:
try:
in1 = d[index*4 + 1]
val1 = d[in1]
in2 = d[index*4 + 2]
val2 = d[in2]
out = d[index*4 + 3]
if cmd == 1:
d[out] = val1 + val2
elif cmd == 2:
d[out] = val1 * val2
except:
break
index += 1
print(f'noun={noun}, verb={verb}, total={d[0]}')
if d[0] == 19690720:
return noun, verb
def main():
data = load_data()
noun, verb = find_noun_verb(data)
print(f'noun={noun}, verb={verb}, total={100*noun + verb}')
if __name__ == "__main__":
main()
| [
"numpy.array",
"numpy.loadtxt"
] | [((46, 99), 'numpy.loadtxt', 'np.loadtxt', (['"""input.csv"""'], {'dtype': '"""int32"""', 'delimiter': '""","""'}), "('input.csv', dtype='int32', delimiter=',')\n", (56, 99), True, 'import numpy as np\n'), ((211, 236), 'numpy.array', 'np.array', (['data'], {'copy': '(True)'}), '(data, copy=True)\n', (219, 236), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from utils import AverageMeter, save
from .face_evaluate import evaluate
from ..base_training_agent import MetaTrainingAgent
class FRTrainingAgent(MetaTrainingAgent):
"""The training agent to train the supernet and the searched architecture.
By implementing TrainingAgent class, users can adapt the searching and evaluating agent into
various tasks easily.
"""
def _search_validate_step(self, model, val_loader, agent, epoch):
""" The validate step for searching process.
Args:
model (nn.Module)
val_loader (torch.utils.data.DataLoader)
agent (Object): The search agent.
epoch (int)
Return:
evaluate_metric (float): The performance of the supernet
"""
model.eval()
start_time = time.time()
agent._iteration_preprocess()
minus_losses_avg = self.searching_evaluate(model, val_loader, agent.device, agent.criterion)[0]
agent.writer.add_scalar("Valid/_losses/", -minus_losses_avg, epoch)
agent.logger.info(
f"Valid : [{epoch+1:3d}/{agent.epochs}]"
f"Final Losses: {-minus_losses_avg:.2f}"
f"Time {time.time() - start_time:.2f}")
return minus_losses_avg
def _evaluate_validate_step(self, model, val_loader, agent, epoch):
""" The training step for evaluating process (training from scratch).
Args:
model (nn.Module)
val_loader (torch.utils.data.DataLoader)
agent (Object): The evaluate agent
epoch (int)
Return:
evaluate_metric (float): The performance of the searched model.
"""
model.eval()
start_time = time.time()
all_labels = []
all_embeds1, all_embeds2 = [], []
with torch.no_grad():
for idx, ((imgs1, imgs2), labels) in enumerate(val_loader):
# Move data sample
batch_size = labels.size(0)
imgs1 = imgs1.to(agent.device)
imgs2 = imgs2.to(agent.device)
labels = labels.to(agent.device)
# Extract embeddings
embeds1 = model(imgs1)
embeds2 = model(imgs2)
if agent.config["criterion"]["normalize"]:
# For angular based ==============
embeds1 = F.normalize(embeds1, p=2)
embeds2 = F.normalize(embeds2, p=2)
# ================================
# Accumulates
all_labels.append(labels.detach().cpu().numpy())
all_embeds1.append(embeds1.detach().cpu().numpy())
all_embeds2.append(embeds2.detach().cpu().numpy())
# Evaluate
labels = np.concatenate(all_labels)
embeds1 = np.concatenate(all_embeds1)
embeds2 = np.concatenate(all_embeds2)
TP_ratio, FP_ratio, accs, best_thresholds = evaluate(embeds1, embeds2, labels)
# Save Checkpoint
acc_avg = accs.mean()
thresh_avg = best_thresholds.mean()
agent.writer.add_scalar("Valid/_acc/", acc_avg, epoch)
agent.writer.add_scalar("Valid/_thresh/", thresh_avg, epoch)
agent.logger.info(
f"Valid : [{epoch+1:3d}/{agent.epochs}] "
f"Final Acc : {acc_avg:.5f} Final Thresh : {thresh_avg:.5f} "
f"Time {time.time() - start_time:.2f}")
return acc_avg
@staticmethod
def searching_evaluate(model, val_loader, device, criterion):
""" Evaluating the performance of the supernet. The search strategy will evaluate
the architectures by this static method to search.
Args:
model (nn.Module)
val_loader (torch.utils.data.DataLoader)
device (torch.device)
criterion (nn.Module)
Return:
evaluate_metric (float): The performance of the supernet.
"""
losses = AverageMeter()
with torch.no_grad():
for step, (X, y) in enumerate(val_loader):
X, y = X.to(device, non_blocking=True), \
y.to(device, non_blocking=True)
N = X.shape[0]
outs = model(X)
loss = criterion(outs, y)
losses.update(loss.item(), N)
# Make search strategy cam compare the architecture performance
return -losses.get_avg(),
def _training_step(
self,
model,
train_loader,
agent,
epoch,
print_freq=100):
"""
Args:
model (nn.Module)
train_loader (torch.utils.data.DataLoader)
agent (Object): The evaluate agent
epoch (int)
"""
losses = AverageMeter()
model.train()
start_time = time.time()
for step, (X, y) in enumerate(train_loader):
if agent.agent_state == "search":
agent._iteration_preprocess()
X, y = X.to(
agent.device, non_blocking=True), y.to(
agent.device, non_blocking=True)
N = X.shape[0]
agent.optimizer.zero_grad()
outs = model(X)
loss = agent.criterion(outs, y)
loss.backward()
agent.optimizer.step()
agent.lr_scheduler.step()
losses.update(loss.item(), N)
if (step > 1 and step % print_freq == 0) or (step == len(train_loader) - 1):
agent.logger.info(f"Train : [{(epoch+1):3d}/{agent.epochs}] "
f"Step {step:3d}/{len(train_loader)-1:3d} Loss {losses.get_avg():.3f} ")
agent.writer.add_scalar("Train/_loss/", losses.get_avg(), epoch)
agent.logger.info(
f"Train: [{epoch+1:3d}/{agent.epochs}] Final Loss {losses.get_avg():.3f} "
f"Time {time.time() - start_time:.2f} ")
| [
"utils.AverageMeter",
"time.time",
"torch.nn.functional.normalize",
"torch.no_grad",
"numpy.concatenate"
] | [((900, 911), 'time.time', 'time.time', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1819, 1830), 'time.time', 'time.time', ([], {}), '()\n', (1828, 1830), False, 'import time\n'), ((2887, 2913), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {}), '(all_labels)\n', (2901, 2913), True, 'import numpy as np\n'), ((2932, 2959), 'numpy.concatenate', 'np.concatenate', (['all_embeds1'], {}), '(all_embeds1)\n', (2946, 2959), True, 'import numpy as np\n'), ((2978, 3005), 'numpy.concatenate', 'np.concatenate', (['all_embeds2'], {}), '(all_embeds2)\n', (2992, 3005), True, 'import numpy as np\n'), ((4075, 4089), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4087, 4089), False, 'from utils import AverageMeter, save\n'), ((4913, 4927), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4925, 4927), False, 'from utils import AverageMeter, save\n'), ((4972, 4983), 'time.time', 'time.time', ([], {}), '()\n', (4981, 4983), False, 'import time\n'), ((1912, 1927), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1925, 1927), False, 'import torch\n'), ((4103, 4118), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4116, 4118), False, 'import torch\n'), ((2483, 2508), 'torch.nn.functional.normalize', 'F.normalize', (['embeds1'], {'p': '(2)'}), '(embeds1, p=2)\n', (2494, 2508), True, 'import torch.nn.functional as F\n'), ((2539, 2564), 'torch.nn.functional.normalize', 'F.normalize', (['embeds2'], {'p': '(2)'}), '(embeds2, p=2)\n', (2550, 2564), True, 'import torch.nn.functional as F\n'), ((1287, 1298), 'time.time', 'time.time', ([], {}), '()\n', (1296, 1298), False, 'import time\n'), ((3503, 3514), 'time.time', 'time.time', ([], {}), '()\n', (3512, 3514), False, 'import time\n'), ((6031, 6042), 'time.time', 'time.time', ([], {}), '()\n', (6040, 6042), False, 'import time\n')] |
#!/usr/bin/env python
"""
Created on May 15, 2012
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "May 15, 2012"
import unittest
import numpy as np
from pyhull.simplex import Simplex
class SimplexTest(unittest.TestCase):
def setUp(self):
coords = []
coords.append([0, 0, 0])
coords.append([0, 1, 0])
coords.append([0, 0, 1])
coords.append([1, 0, 0])
self.simplex = Simplex(coords)
def test_in_simplex(self):
self.assertTrue(self.simplex.in_simplex([0.1, 0.1, 0.1]))
self.assertFalse(self.simplex.in_simplex([0.6, 0.6, 0.6]))
for i in range(10):
coord = np.random.random_sample(size=3) / 3
self.assertTrue(self.simplex.in_simplex(coord))
def test_2dtriangle(self):
s = Simplex([[0, 1], [1, 1], [1, 0]])
np.testing.assert_almost_equal(s.bary_coords([0.5, 0.5]), [0.5, 0, 0.5])
np.testing.assert_almost_equal(s.bary_coords([0.5, 1]), [0.5, 0.5, 0])
np.testing.assert_almost_equal(s.bary_coords([0.5, 0.75]), [0.5, 0.25, 0.25])
np.testing.assert_almost_equal(s.bary_coords([0.75, 0.75]), [0.25, 0.5, 0.25])
s = Simplex([[1, 1], [1, 0]])
self.assertRaises(ValueError, s.bary_coords, [0.5, 0.5])
def test_volume(self):
# Should be value of a right tetrahedron.
self.assertAlmostEqual(self.simplex.volume, 1/6)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"unittest.main",
"pyhull.simplex.Simplex",
"numpy.random.random_sample"
] | [((1626, 1641), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1639, 1641), False, 'import unittest\n'), ((570, 585), 'pyhull.simplex.Simplex', 'Simplex', (['coords'], {}), '(coords)\n', (577, 585), False, 'from pyhull.simplex import Simplex\n'), ((939, 972), 'pyhull.simplex.Simplex', 'Simplex', (['[[0, 1], [1, 1], [1, 0]]'], {}), '([[0, 1], [1, 1], [1, 0]])\n', (946, 972), False, 'from pyhull.simplex import Simplex\n'), ((1319, 1344), 'pyhull.simplex.Simplex', 'Simplex', (['[[1, 1], [1, 0]]'], {}), '([[1, 1], [1, 0]])\n', (1326, 1344), False, 'from pyhull.simplex import Simplex\n'), ((799, 830), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': '(3)'}), '(size=3)\n', (822, 830), True, 'import numpy as np\n')] |
import sys
import os
import numpy as np
import time
from torch.utils import data
import shutil
import cv2
import nrrd
import torch
from torch.autograd import Variable
sys.path.insert(0, "../image_fusion/")
sys.path.insert(0, "../cross_validation/scripts/models/")
#
import predictForSubject
import dicomToVolume
import fuseVolumes
import measure
from model_resUnet import UNet
c_write_nrrd = False # Write output volumes in nrrd format
c_write_mip = True # write output as mean intensity projections in png format
c_target_spacing = np.array((2.23214293, 2.23214293, 4.5)) # Target spacing to which all voxels are resampled when fusing stations
def main(argv):
path_ids = "ids.txt" # List of subject ids to be processed
path_dicom = "/media/veracrypt1/UKB_DICOM/" # Path to UKB dicoms
path_checkpoint = "/media/taro/DATA/Taro/Projects/ukb_segmentation/cross-validation/networks/kidney_122_traintest_watRoi192_deform_80kLR/subset_0/snapshots/iteration_080000.pth.tar"
path_out = "/media/taro/DATA/Taro/Projects/ukb_segmentation/github/inference_kidney_122/"
# Select which MRI stations to perform inference on
station_ids = [1, 2]
#
time_start = time.time()
###
if not os.path.exists(path_out):
os.makedirs(path_out)
os.makedirs(path_out + "NRRD/")
os.makedirs(path_out + "MIP/")
else:
print("ABORT: Output folder already exists!")
sys.exit()
# Open output measurement file and later keep appending to it
with open(path_out + "measurements.txt", "w") as f:
f.write("eid,total_kidney_tissue_in_ml,kidney_left_tissue_in_ml,kidney_right_tissue_in_ml,distance_x_in_mm,distance_y_in_mm,distance_z_in_mm\n")
# Open quality metric file and later keep appending to it
with open(path_out + "quality.txt", "w") as f:
f.write("eid,img_fusion_cost,seg_fusion_cost,seg_smoothness,kidney_z_cost\n")
# Read ids
with open(path_ids) as f: entries = f.readlines()
subject_ids = [f.split(",")[0].replace("\n","") for f in entries]
subject_ids = np.ascontiguousarray(np.array(subject_ids).astype("int"))
#
N = len(subject_ids)
print("Found {} subject ids...".format(N))
#
print("Initializing network...")
net = UNet(3, 2).cuda()
checkpoint = torch.load(path_checkpoint, map_location={"cuda" : "cpu"})
net.load_state_dict(checkpoint['state_dict'])
net.eval()
# Use pytorch data loader for parallel loading and prediction
loader = getDataloader(path_dicom, subject_ids, station_ids)
#
i = 0
for station_vols, headers, subject_id in loader:
subject_id = subject_id[0]
print("Processing subject {0} ({1:0.3f}% completed)".format(subject_id, 100 * i / N))
processSubject(station_vols, headers, net, subject_id, path_out)
i += 1
# Write runtime
time_end = time.time()
runtime = time_end - time_start
print("Elapsed time: {}".format(runtime))
with open(path_out + "runtime.txt", "w") as f:
f.write("{}".format(runtime))
def processSubject(station_vols, headers, net, subject_id, path_out):
# Revert batch and tensor formatting by dataloader
# Fite me irl
for i in range(len(station_vols)):
station_vols[i] = station_vols[i].data.numpy()[0, :, :, :]
headers[i]["space origin"] = headers[i]["space origin"].data.numpy()[0, :]
headers[i]["space directions"] = headers[i]["space directions"].data.numpy()[0, :]
headers[i]["encoding"] = headers[i]["encoding"][0]
headers[i]["dimension"] = headers[i]["dimension"].data.numpy()[0]
headers[i]["space dimension"] = headers[i]["space dimension"].data.numpy()[0]
# Predict and fuse stations
(img, out, header, img_fusion_cost, seg_fusion_cost) = predictForSubject.predictForSubject(station_vols, headers, net, c_target_spacing, True)
#
voxel_dim = c_target_spacing
# Get measurements and quality ratings from output
(volume_left, volume_right, volume_total, offsets, kidney_z_cost, label_mask) = measure.measureKidneys(out, voxel_dim)
seg_smoothness = rateSegmentationSmoothness(out)
# Append to previously opened text files
writeTxtLine(path_out + "measurements.txt", [subject_id, volume_total, volume_left, volume_right, offsets[0], offsets[1], offsets[2]])
writeTxtLine(path_out + "quality.txt", [subject_id, img_fusion_cost, seg_fusion_cost, seg_smoothness, kidney_z_cost])
# Write volumes
if c_write_nrrd:
nrrd.write(path_out + "NRRD/{}_img.nrrd".format(subject_id), img, header, compression_level=1)
nrrd.write(path_out + "NRRD/{}_out.nrrd".format(subject_id), out, header, compression_level=1)
# Write mean intensity projections
if c_write_mip:
proj_out = formatMip(img, out, label_mask)
cv2.imwrite(path_out + "MIP/{}_mip.png".format(subject_id), proj_out)
def writeTxtLine(input_path, values):
with open(input_path, "a") as f:
f.write("{}".format(values[0]))
for i in range(1, len(values)):
f.write(",{}".format(values[i]))
f.write("\n")
# Mean intensity projection
def formatMip(img, out, label_mask):
# Project water signal intensities
img_proj = normalize(np.sum(img, axis=1).astype("float"))
# Prepare coloured overlay
proj_out = np.zeros((img_proj.shape[0], img_proj.shape[1], 3))
# Use label mask to identify components
label_proj_left = normalize(np.sum(label_mask == 1, axis=1).astype("float"))
label_proj_right = normalize(np.sum(label_mask == 2, axis=1).astype("float"))
label_proj_scrap = normalize(np.sum(label_mask > 2, axis=1).astype("float"))
# Blue: Left kidney, Green: Scrap volume, Red: Right kidney
proj_out[:, :, 0] = 0.5 * img_proj + 0.5 * label_proj_left
proj_out[:, :, 1] = 0.5 * img_proj + 0.5 * label_proj_scrap
proj_out[:, :, 2] = 0.5 * img_proj + 0.5 * label_proj_right
proj_out = (normalize(proj_out)*255).astype("uint8")
proj_out = np.rot90(proj_out)
return proj_out
# Copy the 3d segmentation by one voxel along longitudinal axis
# Form sum of absolute differences to rate smoothness
def rateSegmentationSmoothness(seg):
seg_0 = np.zeros(seg.shape)
# Get abs difference to vertically shifted copy
seg_0[:, :, 1:] = seg[:, :, :-1]
dif_0 = np.sum(np.abs(seg - seg_0))
size = np.sum(seg)
if size == 0:
rating = -1
else:
rating = -dif_0 / size
return rating
def normalize(values):
if len(np.unique(values)) > 1:
values = (values - np.amin(values)) / (np.amax(values) - np.amin(values))
else:
values[:] = 0
return values
def getDataloader(path_dicom, subject_ids, station_ids):
dataset = DicomDataset(path_dicom, subject_ids, station_ids)
loader = torch.utils.data.DataLoader(dataset,
num_workers=8,
batch_size=1,
shuffle=False,
pin_memory=True)
return loader
class DicomDataset(data.Dataset):
def __init__(self, path_dicom, subject_ids, station_ids):
self.path_dicom = path_dicom
self.subject_ids = subject_ids
self.station_ids = station_ids
def __len__(self):
return len(self.subject_ids)
def __getitem__(self, index):
# Load intensity image
subject_id = self.subject_ids[index]
(station_vols, headers) = dicomToVolume.dicomToVolume(self.path_dicom + "{}_20201_2_0.zip".format(subject_id), self.station_ids)
for i in range(len(station_vols)):
station_vols[i] = np.ascontiguousarray(station_vols[i])
return station_vols, headers, subject_id
if __name__ == '__main__':
main(sys.argv)
| [
"numpy.sum",
"numpy.abs",
"numpy.amin",
"model_resUnet.UNet",
"numpy.rot90",
"numpy.unique",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"predictForSubject.predictForSubject",
"sys.exit",
"os.makedirs",
"numpy.zeros",
"sys.path.insert",
"measure.measureKidneys",
"tim... | [((173, 211), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../image_fusion/"""'], {}), "(0, '../image_fusion/')\n", (188, 211), False, 'import sys\n'), ((212, 269), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../cross_validation/scripts/models/"""'], {}), "(0, '../cross_validation/scripts/models/')\n", (227, 269), False, 'import sys\n'), ((543, 582), 'numpy.array', 'np.array', (['(2.23214293, 2.23214293, 4.5)'], {}), '((2.23214293, 2.23214293, 4.5))\n', (551, 582), True, 'import numpy as np\n'), ((1192, 1203), 'time.time', 'time.time', ([], {}), '()\n', (1201, 1203), False, 'import time\n'), ((2307, 2364), 'torch.load', 'torch.load', (['path_checkpoint'], {'map_location': "{'cuda': 'cpu'}"}), "(path_checkpoint, map_location={'cuda': 'cpu'})\n", (2317, 2364), False, 'import torch\n'), ((2889, 2900), 'time.time', 'time.time', ([], {}), '()\n', (2898, 2900), False, 'import time\n'), ((3811, 3902), 'predictForSubject.predictForSubject', 'predictForSubject.predictForSubject', (['station_vols', 'headers', 'net', 'c_target_spacing', '(True)'], {}), '(station_vols, headers, net,\n c_target_spacing, True)\n', (3846, 3902), False, 'import predictForSubject\n'), ((4080, 4118), 'measure.measureKidneys', 'measure.measureKidneys', (['out', 'voxel_dim'], {}), '(out, voxel_dim)\n', (4102, 4118), False, 'import measure\n'), ((5365, 5416), 'numpy.zeros', 'np.zeros', (['(img_proj.shape[0], img_proj.shape[1], 3)'], {}), '((img_proj.shape[0], img_proj.shape[1], 3))\n', (5373, 5416), True, 'import numpy as np\n'), ((6035, 6053), 'numpy.rot90', 'np.rot90', (['proj_out'], {}), '(proj_out)\n', (6043, 6053), True, 'import numpy as np\n'), ((6245, 6264), 'numpy.zeros', 'np.zeros', (['seg.shape'], {}), '(seg.shape)\n', (6253, 6264), True, 'import numpy as np\n'), ((6408, 6419), 'numpy.sum', 'np.sum', (['seg'], {}), '(seg)\n', (6414, 6419), True, 'import numpy as np\n'), ((6852, 6954), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'num_workers': '(8)', 'batch_size': '(1)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(dataset, num_workers=8, batch_size=1, shuffle=\n False, pin_memory=True)\n', (6879, 6954), False, 'import torch\n'), ((1228, 1252), 'os.path.exists', 'os.path.exists', (['path_out'], {}), '(path_out)\n', (1242, 1252), False, 'import os\n'), ((1262, 1283), 'os.makedirs', 'os.makedirs', (['path_out'], {}), '(path_out)\n', (1273, 1283), False, 'import os\n'), ((1292, 1323), 'os.makedirs', 'os.makedirs', (["(path_out + 'NRRD/')"], {}), "(path_out + 'NRRD/')\n", (1303, 1323), False, 'import os\n'), ((1332, 1362), 'os.makedirs', 'os.makedirs', (["(path_out + 'MIP/')"], {}), "(path_out + 'MIP/')\n", (1343, 1362), False, 'import os\n'), ((1435, 1445), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1443, 1445), False, 'import sys\n'), ((6375, 6394), 'numpy.abs', 'np.abs', (['(seg - seg_0)'], {}), '(seg - seg_0)\n', (6381, 6394), True, 'import numpy as np\n'), ((2272, 2282), 'model_resUnet.UNet', 'UNet', (['(3)', '(2)'], {}), '(3, 2)\n', (2276, 2282), False, 'from model_resUnet import UNet\n'), ((6556, 6573), 'numpy.unique', 'np.unique', (['values'], {}), '(values)\n', (6565, 6573), True, 'import numpy as np\n'), ((7728, 7765), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['station_vols[i]'], {}), '(station_vols[i])\n', (7748, 7765), True, 'import numpy as np\n'), ((2101, 2122), 'numpy.array', 'np.array', (['subject_ids'], {}), '(subject_ids)\n', (2109, 2122), True, 'import numpy as np\n'), ((5281, 5300), 'numpy.sum', 'np.sum', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (5287, 5300), True, 'import numpy as np\n'), ((5494, 5525), 'numpy.sum', 'np.sum', (['(label_mask == 1)'], {'axis': '(1)'}), '(label_mask == 1, axis=1)\n', (5500, 5525), True, 'import numpy as np\n'), ((5576, 5607), 'numpy.sum', 'np.sum', (['(label_mask == 2)'], {'axis': '(1)'}), '(label_mask == 2, axis=1)\n', (5582, 5607), True, 'import numpy as np\n'), ((5658, 5688), 'numpy.sum', 'np.sum', (['(label_mask > 2)'], {'axis': '(1)'}), '(label_mask > 2, axis=1)\n', (5664, 5688), True, 'import numpy as np\n'), ((6607, 6622), 'numpy.amin', 'np.amin', (['values'], {}), '(values)\n', (6614, 6622), True, 'import numpy as np\n'), ((6627, 6642), 'numpy.amax', 'np.amax', (['values'], {}), '(values)\n', (6634, 6642), True, 'import numpy as np\n'), ((6645, 6660), 'numpy.amin', 'np.amin', (['values'], {}), '(values)\n', (6652, 6660), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program for
Created on %(date)
@author : trismonock
@mail : <EMAIL>
"""
import numpy as np
def uvspec_input(lib_data_dir,cloud_file,input_file,sza,phi0,phi,zout,doy,\
albedo_file,lambda0,lambda1,tau550,aerosol_season,aerosol_haze):
# +++++++++++++++++++++++++
# define standard parameter
# +++++++++++++++++++++++++
# standard parameter (change accordingly)
atm_profile = "afglms"
source = "solar"
solar_type = "kurudz_1.0nm"
rte_solver = "fdisort2"
mol_abs_parameter = "lowtran"
umu = np.array([-1.0, 1.0], dtype = float) # default setting = upward and nadir looking
nstream = 16 # for better radiance accuracy, increase nstream (number of stream)
ic_model = "yang2013" # for ice cloud (consistent with MODIS L2 V6)
ic_habit = "column_8elements" # for ice cloud (consistent with MODIS L2 V6)
ic_rough = "severe" # for ice cloud (consistent with MODIS L2 V6)
# +++++++++++++++++
# create input file
# +++++++++++++++++
# open file
file = open(input_file, "w")
# write input parameters
file.write("data_files_path " + lib_data_dir +"/\n")
file.write("atmosphere_file " + lib_data_dir + "/atmmod/" + atm_profile + ".dat\n")
file.write("source " + source + " " + lib_data_dir + "/solar_flux/" + solar_type + ".dat\n")
file.write("albedo_file " + albedo_file + "\n")
file.write("day_of_year " + doy + "\n")
file.write("sza " + sza + "\n")
file.write("phi0 " + phi0 + "\n")
file.write("phi " + phi + "\n")
file.write("umu %.1f %.1f\n" %(umu[0], umu[1]))
file.write("zout " + zout + "\n")
file.write("rte_solver " + rte_solver + "\n")
file.write("mol_abs_param " + mol_abs_parameter + "\n")
file.write("number_of_streams %i\n" %nstream)
file.write("wavelength " + lambda0 + " " + lambda1 + "\n")
file.write("aerosol_default\n")
file.write("aerosol_haze " + aerosol_haze + "\n")
file.write("aerosol_season " + aerosol_season + "\n")
file.write("ic_file 1D " + cloud_file + "\n")
file.write("ic_properties " + ic_model + " interpolate\n")
file.write("ic_habit_" + ic_model + " " + ic_habit + " " + ic_rough + "\n")
file.write("ic_modify tau550 set " + tau550 + "\n")
file.write("quiet") # quiet
# file.write("verbose") # print verbose file (useful for post analysis)
# close file
file.close() | [
"numpy.array"
] | [((605, 639), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {'dtype': 'float'}), '([-1.0, 1.0], dtype=float)\n', (613, 639), True, 'import numpy as np\n')] |
import numpy as np
from numpy.core.fromnumeric import std
from scipy.stats.stats import ttest_ind
np.random.seed(42)
# create array of controllgroup
controll_group_f = np.random.randint(0, 100, (1, 50), dtype=np.int32)
controll_group_m = np.random.randint(0, 100, (1, 50), dtype=np.int32)
# create array of errorgroup
controll_error_f = np.random.randint(0, 100, (1, 50), dtype=np.int32)
controll_error_m = np.random.randint(0, 100, (1, 50), dtype=np.int32)
# calc standart deviation and mean
gf = (np.mean(controll_group_f), np.std(controll_group_f))
gm = (np.mean(controll_group_m), np.std(controll_group_m))
ef = (np.mean(controll_error_f), np.std(controll_error_f))
em = (np.mean(controll_error_m), np.std(controll_error_m))
print(f"The mean of the controllgroup (50f): {gf[0]} with an SD: {gf[1]}")
print(f"The mean of the controllgroup (50m): {gm[0]} with an SD: {gm[1]}")
print(f"The mean of the errorgroup (50f): {ef[0]} with an SD: {ef[1]}")
print(f"The mean of the errorgroup (50m): {em[0]} with an SD: {em[1]}")
# calc t-test
# controllgroup vs errorgroup
a = ttest_ind(
controll_group_f + controll_group_m, controll_error_f + controll_error_m, 1
)
print(
f"The values for controllgroup (50f/50m) vs errorgroup (50f/50m) are t-value: {a[0]} and p-value: {a[1]}"
)
# controll with in the groups
b = ttest_ind(controll_group_f, controll_group_m, 1)
c = ttest_ind(controll_error_f, controll_error_m, 1)
print(
f"The values for controllgroup (50f) vs controllgroup (50m) are t-value: {b[0]} and p-value: {b[1]}"
)
print(
f"The values for errorgroup (50f) vs errorgroup (50m) are t-value: {c[0]} and p-value: {c[1]}"
)
# values between the groups same sex
d = ttest_ind(controll_group_f, controll_error_f, 1)
e = ttest_ind(controll_group_m, controll_error_m, 1)
print(
f"The values for controllgroup (50f) vs errorgroup (50f) are t-value: {d[0]} and p-value: {d[1]}"
)
print(
f"The values for controllgroup (50m) vs errorgroup (50m) are t-value: {e[0]} and p-value: {e[1]}"
)
# values between the groups not same sex
f = ttest_ind(controll_group_f, controll_error_m, 1)
g = ttest_ind(controll_group_m, controll_error_f, 1)
print(
f"The values for controllgroup (50f) vs errorgroup (50m) are t-value: {f[0]} and p-value: {f[1]}"
)
print(
f"The values for controllgroup (50m) vs errorgroup (50f) are t-value: {g[0]} and p-value: {g[1]}"
)
import seaborn as sns
np.random.seed(111)
all_arr = [
np.random.uniform(size=20),
np.random.uniform(size=20),
np.random.uniform(size=20),
np.random.uniform(size=20),
np.random.uniform(size=20),
]
sns.boxplot(data=all_arr)
| [
"numpy.random.uniform",
"numpy.random.seed",
"scipy.stats.stats.ttest_ind",
"numpy.std",
"numpy.mean",
"seaborn.boxplot",
"numpy.random.randint"
] | [((99, 117), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (113, 117), True, 'import numpy as np\n'), ((169, 219), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(1, 50)'], {'dtype': 'np.int32'}), '(0, 100, (1, 50), dtype=np.int32)\n', (186, 219), True, 'import numpy as np\n'), ((239, 289), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(1, 50)'], {'dtype': 'np.int32'}), '(0, 100, (1, 50), dtype=np.int32)\n', (256, 289), True, 'import numpy as np\n'), ((339, 389), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(1, 50)'], {'dtype': 'np.int32'}), '(0, 100, (1, 50), dtype=np.int32)\n', (356, 389), True, 'import numpy as np\n'), ((409, 459), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(1, 50)'], {'dtype': 'np.int32'}), '(0, 100, (1, 50), dtype=np.int32)\n', (426, 459), True, 'import numpy as np\n'), ((1077, 1167), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['(controll_group_f + controll_group_m)', '(controll_error_f + controll_error_m)', '(1)'], {}), '(controll_group_f + controll_group_m, controll_error_f +\n controll_error_m, 1)\n', (1086, 1167), False, 'from scipy.stats.stats import ttest_ind\n'), ((1324, 1372), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_group_f', 'controll_group_m', '(1)'], {}), '(controll_group_f, controll_group_m, 1)\n', (1333, 1372), False, 'from scipy.stats.stats import ttest_ind\n'), ((1377, 1425), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_error_f', 'controll_error_m', '(1)'], {}), '(controll_error_f, controll_error_m, 1)\n', (1386, 1425), False, 'from scipy.stats.stats import ttest_ind\n'), ((1691, 1739), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_group_f', 'controll_error_f', '(1)'], {}), '(controll_group_f, controll_error_f, 1)\n', (1700, 1739), False, 'from scipy.stats.stats import ttest_ind\n'), ((1744, 1792), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_group_m', 'controll_error_m', '(1)'], {}), '(controll_group_m, controll_error_m, 1)\n', (1753, 1792), False, 'from scipy.stats.stats import ttest_ind\n'), ((2062, 2110), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_group_f', 'controll_error_m', '(1)'], {}), '(controll_group_f, controll_error_m, 1)\n', (2071, 2110), False, 'from scipy.stats.stats import ttest_ind\n'), ((2115, 2163), 'scipy.stats.stats.ttest_ind', 'ttest_ind', (['controll_group_m', 'controll_error_f', '(1)'], {}), '(controll_group_m, controll_error_f, 1)\n', (2124, 2163), False, 'from scipy.stats.stats import ttest_ind\n'), ((2411, 2430), 'numpy.random.seed', 'np.random.seed', (['(111)'], {}), '(111)\n', (2425, 2430), True, 'import numpy as np\n'), ((2607, 2632), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'all_arr'}), '(data=all_arr)\n', (2618, 2632), True, 'import seaborn as sns\n'), ((502, 527), 'numpy.mean', 'np.mean', (['controll_group_f'], {}), '(controll_group_f)\n', (509, 527), True, 'import numpy as np\n'), ((529, 553), 'numpy.std', 'np.std', (['controll_group_f'], {}), '(controll_group_f)\n', (535, 553), True, 'import numpy as np\n'), ((561, 586), 'numpy.mean', 'np.mean', (['controll_group_m'], {}), '(controll_group_m)\n', (568, 586), True, 'import numpy as np\n'), ((588, 612), 'numpy.std', 'np.std', (['controll_group_m'], {}), '(controll_group_m)\n', (594, 612), True, 'import numpy as np\n'), ((620, 645), 'numpy.mean', 'np.mean', (['controll_error_f'], {}), '(controll_error_f)\n', (627, 645), True, 'import numpy as np\n'), ((647, 671), 'numpy.std', 'np.std', (['controll_error_f'], {}), '(controll_error_f)\n', (653, 671), True, 'import numpy as np\n'), ((679, 704), 'numpy.mean', 'np.mean', (['controll_error_m'], {}), '(controll_error_m)\n', (686, 704), True, 'import numpy as np\n'), ((706, 730), 'numpy.std', 'np.std', (['controll_error_m'], {}), '(controll_error_m)\n', (712, 730), True, 'import numpy as np\n'), ((2448, 2474), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2465, 2474), True, 'import numpy as np\n'), ((2480, 2506), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2497, 2506), True, 'import numpy as np\n'), ((2512, 2538), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2529, 2538), True, 'import numpy as np\n'), ((2544, 2570), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2561, 2570), True, 'import numpy as np\n'), ((2576, 2602), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2593, 2602), True, 'import numpy as np\n')] |
# for Logging handling
import logging.config
import pickle
from functools import lru_cache
import numpy as np
import sympy
logger = logging.getLogger(__name__)
def c(ixs):
"""The sum of a range of integers
Parameters
----------
ixs : list
data list
Returns
----------
sum : int
values
"""
return sum(range(1, sum((i > 0 for i in ixs)) + 1))
def BezierIndex(dim, deg):
"""Iterator indexing control points of a Bezier simplex.
Parameters
----------
dim : int
Number of dimensions
deg : int
Number of degree
Returns
-------
None
"""
def iterate(c, r):
if len(c) == dim - 1:
yield c + (r, )
else:
for i in range(r, -1, -1):
yield from iterate(c + (i, ), r - i)
yield from iterate((), deg)
def count_nonzero(a):
"""Count the number of elements whose value is not 0.
Parameters
----------
a : numpy.ndarray
array
Returns
-------
np.count_nonzero() : int
Count the number
"""
return (np.count_nonzero(a))
def nonzero_indices(a):
"""Get an index with non-zero element.
Parameters
----------
a : numpy.ndarray
array
Returns
-------
np.nonzero() : numpy.ndarray
Index with non-zero element
"""
return (np.nonzero(a)[0])
def construct_simplex_meshgrid(ng, dimSimplex):
"""Construct a mesh grid.
Parameters
----------
ng : int
Number of elements in the arithmetic progression.
dimSimplex : int
Number of dimensions
Returns
-------
m[] : numpy.ndarray
Array of mesh grid
"""
t_list = np.linspace(0, 1, ng)
tmp = np.array(np.meshgrid(*[t_list for i in range(dimSimplex - 1)]))
m = np.zeros([tmp[0].ravel().shape[0], dimSimplex])
for i in range(dimSimplex - 1):
m[:, i] = tmp[i].ravel()
m[:, dimSimplex - 1] = 1 - np.sum(m, axis=1)
return (m[m[:, -1] >= 0, :])
class BezierSimplex:
"""BezierSimplex. access subsample data.
Attributes
----------
dimSpace : int
degree of bezier simplex
dimSimplex : int
dimension of bezier simplex
degree : int
dimension of constol point
"""
def __init__(self, dimSpace, dimSimplex, degree):
"""BezierSimplex initialize.
Parameters
----------
dimSpace : int
degree of bezier simplex
dimSimplex : int
dimension of bezier simplex
degree : int
dimension of constol point
Returns
----------
None
"""
self.dimSpace = dimSpace # degree of bezier simplex
self.dimSimplex = dimSimplex # dimension of bezier simplex
self.degree = degree # dimension of constol point
self.define_monomial(dimSpace=dimSpace,
dimSimplex=dimSimplex,
degree=degree)
def define_monomial(self, dimSpace, dimSimplex, degree):
"""Define of monomial for BezierSimplex.
Parameters
----------
dimSpace : int
degree of bezier simplex
dimSimplex : int
dimension of bezier simplex
degree : int
dimension of constol point
Returns
----------
None
"""
T = [sympy.Symbol('t' + str(i)) for i in range(self.dimSimplex - 1)]
def poly(i, n):
eq = c(i)
for k in range(n):
eq *= (T[k]**i[k])
return eq * (1 - sum(T[k] for k in range(n)))**i[n]
'''M[multi_index]'''
M = {
i: poly(i, self.dimSimplex - 1)
for i in BezierIndex(dim=self.dimSimplex,
deg=self.degree)
}
'''Mf[multi_index]'''
Mf = {}
for i in BezierIndex(dim=self.dimSimplex, deg=self.degree):
f = poly(i, self.dimSimplex - 1)
b = compile('Mf[i] = lambda t0, t1=None, t2=None, t3=None: ' +
str(f),
'<string>',
'exec',
optimize=2)
exec(b)
'''Mf_DIFF[multi_index][t]'''
M_DIFF = [{k: sympy.diff(v, t)
for k, v in M.items()} for j, t in enumerate(T)]
Mf_DIFF = {}
for k, v in M.items():
Mf_DIFF[k] = []
for j, t in enumerate(T):
Mf_DIFF[k].append([])
f = sympy.diff(v, t)
b = compile(
'Mf_DIFF[k][-1] = lambda t0, t1=None, t2=None, t3=None: ' +
str(f),
'<string>',
'exec',
optimize=2)
exec(b)
'''Mf_DIFF2[multi_index][t][t]'''
Mf_DIFF2 = {}
for k, v in M.items():
Mf_DIFF2[k] = []
for h, t in enumerate(T):
Mf_DIFF2[k].append([])
for j in range(self.dimSimplex - 1):
Mf_DIFF2[k][-1].append([])
f = sympy.diff(M_DIFF[j][k], t)
b = compile(
'Mf_DIFF2[k][-1][-1] = '
'lambda t0, t1=None, t2=None, t3=None: '
+ str(f),
'<string>',
'exec',
optimize=2)
exec(b)
Mf_all = {}
for k, v in M.items():
Mf_all[k] = {}
Mf_all[k][None] = {}
Mf_all[k][None][None] = Mf[k]
for i in range(len(Mf_DIFF[k])):
Mf_all[k][i] = {}
Mf_all[k][i][None] = Mf_DIFF[k][i]
for i in range(len(Mf_DIFF2[k])):
for j in range(len(Mf_DIFF2[k][i])):
Mf_all[k][i][j] = Mf_DIFF2[k][i][j]
self.Mf_all = Mf_all
@lru_cache(maxsize=1000)
def monomial_diff(self, multi_index, d0=None, d1=None):
"""Difference of monomial for BezierSimplex.
Parameters
----------
multi_index : int
dimention
d0 : int
dimention
d1 : int
dimention
Returns
----------
Mf_all : numpy.ndarray
difference data of monomial
"""
return (self.Mf_all[multi_index][d0][d1])
def sampling(self, c, t):
"""Sampling result
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
x : numpy.ndarray
sampling result
"""
x = np.zeros(self.dimSpace)
for key in BezierIndex(dim=self.dimSimplex,
deg=self.degree):
for i in range(self.dimSpace):
x[i] += self.monomial_diff(key, d0=None, d1=None)(
*t[0:self.dimSimplex - 1]) * c[key][i]
return (x)
def sampling_array(self, c, t):
"""Sampling result
Parameters
----------
c : dict
control point
t : numdata*dimsimplex
parameter
Returns
----------
x : numpy.ndarray
sampling result
"""
x = np.zeros((t.shape[0],self.dimSpace))
for i in range(t.shape[0]):
for key in BezierIndex(dim=self.dimSimplex,
deg=self.degree):
for j in range(self.dimSpace):
x[i,j] += self.monomial_diff(key, d0=None, d1=None)(
*t[i,0:self.dimSimplex - 1]) * c[key][j]
return (x)
def meshgrid(self, c):
"""Meshgrid
Parameters
----------
c : dict
control point
Returns
----------
tt : numpy.ndarray
Array of mesh grid
xx : numpy.ndarray
The concatenated array
"""
tt = construct_simplex_meshgrid(21, self.dimSimplex)
for i in range(tt.shape[0]):
t = tt[i, :]
if i == 0:
x = self.sampling(c, t)
xx = np.zeros([1, self.dimSpace])
xx[i, :] = x
else:
x = self.sampling(c, t)
x = x.reshape(1, self.dimSpace)
xx = np.concatenate((xx, x), axis=0)
return (tt, xx)
def initialize_control_point(self, data):
"""Initialize control point
Parameters
----------
data : list
test data
Returns
----------
C : dict
control point
"""
data_extreme_points = {}
if isinstance(data, dict) == True:
logger.debug(data.keys())
for i in range(self.dimSimplex):
logger.debug(i)
data_extreme_points[i + 1] = data[(i + 1, )]
else: # array
argmin = data.argmin(axis=0)
for i in range(self.dimSimplex):
key = i+1
data_extreme_points[i+1] = data[argmin[i],:]
C = {}
list_base_function_index = [
i for i in BezierIndex(dim=self.dimSimplex,
deg=self.degree)
]
list_extreme_point_index = [
i for i in list_base_function_index if count_nonzero(i) == 1
]
for key in list_extreme_point_index:
index = int(nonzero_indices(key)[0])
C[key] = data_extreme_points[index + 1]
for key in list_base_function_index:
if key not in C:
C[key] = np.zeros(self.dimSpace)
for key_extreme_points in list_extreme_point_index:
index = int(nonzero_indices(key_extreme_points)[0])
C[key] = C[key] + C[key_extreme_points] * (key[index] /
self.degree)
return (C)
def read_control_point(self, filename):
"""Read control point
Parameters
----------
filename : str(file path and name)
write data file name
Returns
----------
c : dict
control point
"""
with open(filename, mode="rb") as f:
c = pickle.load(f)
return (c)
def write_control_point(self, C, filename):
"""Output control point
Parameters
----------
C : dict
control point
filename : str(file path and name)
write data file name
Returns
----------
None
"""
with open(filename, 'wb') as f:
pickle.dump(C, f)
def write_meshgrid(self, C, filename):
"""Output meshgrid
Parameters
----------
C : dict
control point
filename : str(file path and name)
write data file name
Returns
----------
xx_ : numpy.ndarray
Data to be saved to a text file
"""
tt_, xx_ = self.meshgrid(C)
np.savetxt(filename, xx_)
return (xx_)
if __name__ == '__main__':
import model
from itertools import combinations
DEGREE = 3 # ベジエ単体の次数
DIM_SIMPLEX = 5 # ベジエ単体の次元
DIM_SPACE = 5 # 制御点が含まれるユークリッド空間の次元
NG = 21
NEWTON_ITR = 20
MAX_ITR = 30 # 制御点の更新回数の上界
# input data
base_index = ['1', '2', '3', '4', '5']
subsets = []
for i in range(len(base_index) + 1):
for c_num in combinations(base_index, i):
subsets.append(c_num)
data = {}
for e in subsets:
if len(e) == 1:
data[e] = np.loadtxt('../data/normalized_pf/normalized_5-MED.pf_' +
e[0])
if len(e) == 5:
# data[e] = np.loadtxt('data/normalized_5-MED.pf_1_2_3_4_5')
data[e] = np.loadtxt(
'../data/normalized_pf/normalized_5-MED.pf_1_2_3_4_5_itr0')
bezier_simplex = model.BezierSimplex(dimSpace=DIM_SPACE,
dimSimplex=DIM_SIMPLEX,
degree=DEGREE)
C_init = bezier_simplex.initialize_control_point(data)
for key in C_init:
logger.debug("{} {}".format(key, C_init[key]))
x = bezier_simplex.sampling(C_init, [1, 0, 0, 0])
logger.debug(x)
tt, xx = bezier_simplex.meshgrid(C_init)
logger.debug("{} {}".format(tt.shape, xx.shape))
bezier_simplex.write_meshgrid(C_init, "sample_mesghgrid")
| [
"pickle.dump",
"numpy.count_nonzero",
"model.BezierSimplex",
"numpy.sum",
"numpy.savetxt",
"numpy.zeros",
"sympy.diff",
"numpy.nonzero",
"itertools.combinations",
"pickle.load",
"numpy.loadtxt",
"numpy.linspace",
"functools.lru_cache",
"numpy.concatenate"
] | [((1113, 1132), 'numpy.count_nonzero', 'np.count_nonzero', (['a'], {}), '(a)\n', (1129, 1132), True, 'import numpy as np\n'), ((1731, 1752), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ng'], {}), '(0, 1, ng)\n', (1742, 1752), True, 'import numpy as np\n'), ((5982, 6005), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1000)'}), '(maxsize=1000)\n', (5991, 6005), False, 'from functools import lru_cache\n'), ((12142, 12220), 'model.BezierSimplex', 'model.BezierSimplex', ([], {'dimSpace': 'DIM_SPACE', 'dimSimplex': 'DIM_SIMPLEX', 'degree': 'DEGREE'}), '(dimSpace=DIM_SPACE, dimSimplex=DIM_SIMPLEX, degree=DEGREE)\n', (12161, 12220), False, 'import model\n'), ((1384, 1397), 'numpy.nonzero', 'np.nonzero', (['a'], {}), '(a)\n', (1394, 1397), True, 'import numpy as np\n'), ((1983, 2000), 'numpy.sum', 'np.sum', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (1989, 2000), True, 'import numpy as np\n'), ((6766, 6789), 'numpy.zeros', 'np.zeros', (['self.dimSpace'], {}), '(self.dimSpace)\n', (6774, 6789), True, 'import numpy as np\n'), ((7391, 7428), 'numpy.zeros', 'np.zeros', (['(t.shape[0], self.dimSpace)'], {}), '((t.shape[0], self.dimSpace))\n', (7399, 7428), True, 'import numpy as np\n'), ((11234, 11259), 'numpy.savetxt', 'np.savetxt', (['filename', 'xx_'], {}), '(filename, xx_)\n', (11244, 11259), True, 'import numpy as np\n'), ((11671, 11698), 'itertools.combinations', 'combinations', (['base_index', 'i'], {}), '(base_index, i)\n', (11683, 11698), False, 'from itertools import combinations\n'), ((10437, 10451), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10448, 10451), False, 'import pickle\n'), ((10823, 10840), 'pickle.dump', 'pickle.dump', (['C', 'f'], {}), '(C, f)\n', (10834, 10840), False, 'import pickle\n'), ((11816, 11879), 'numpy.loadtxt', 'np.loadtxt', (["('../data/normalized_pf/normalized_5-MED.pf_' + e[0])"], {}), "('../data/normalized_pf/normalized_5-MED.pf_' + e[0])\n", (11826, 11879), True, 'import numpy as np\n'), ((12032, 12102), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/normalized_pf/normalized_5-MED.pf_1_2_3_4_5_itr0"""'], {}), "('../data/normalized_pf/normalized_5-MED.pf_1_2_3_4_5_itr0')\n", (12042, 12102), True, 'import numpy as np\n'), ((4312, 4328), 'sympy.diff', 'sympy.diff', (['v', 't'], {}), '(v, t)\n', (4322, 4328), False, 'import sympy\n'), ((4573, 4589), 'sympy.diff', 'sympy.diff', (['v', 't'], {}), '(v, t)\n', (4583, 4589), False, 'import sympy\n'), ((8282, 8310), 'numpy.zeros', 'np.zeros', (['[1, self.dimSpace]'], {}), '([1, self.dimSpace])\n', (8290, 8310), True, 'import numpy as np\n'), ((8467, 8498), 'numpy.concatenate', 'np.concatenate', (['(xx, x)'], {'axis': '(0)'}), '((xx, x), axis=0)\n', (8481, 8498), True, 'import numpy as np\n'), ((9760, 9783), 'numpy.zeros', 'np.zeros', (['self.dimSpace'], {}), '(self.dimSpace)\n', (9768, 9783), True, 'import numpy as np\n'), ((5168, 5195), 'sympy.diff', 'sympy.diff', (['M_DIFF[j][k]', 't'], {}), '(M_DIFF[j][k], t)\n', (5178, 5195), False, 'import sympy\n')] |
import numpy as np
import algos
import utils
import viz
import threading
def cornerDetectionAndSuppression(I, Imask, anms, cmax, out):
if not anms:
_, Ixy = algos.harris(I, maxPeaks=cmax)
out.append(Ixy)
Id = algos.makeDescriptors(I, Ixy)
out.append(Id)
else:
Ih, Ixy = algos.harris(Imask if Imask is not None else I, maxPeaks=-1)
Ixy_ = algos.anms(Ih, Ixy, cmax=cmax)
out.append(Ixy)
out.append(Ixy_)
Id = algos.makeDescriptors(I, Ixy_)
out.append(Id)
def stitch(S, T, Tpre, anms, cmax, maskpow=1., intermediates=None):
# 1. Operate on grayscale images (red channel chosen arbitrarily):
S_, T_ = S[..., 0], T[..., 0]
# 2. Corner Detection + Non-Maximal Suppression:
Tmask = np.where(Tpre != 0, T, 0)[..., 0] if anms else None
out = [[], []]
tasks = [
threading.Thread(target=cornerDetectionAndSuppression, args=(S_, None, anms, cmax, out[0])),
threading.Thread(target=cornerDetectionAndSuppression, args=(T_, Tmask, anms, cmax, out[1]))
]
[t.start() for t in tasks]
[t.join() for t in tasks]
# All detected corners + descriptors
Sxy_, Txy_ = out[0][0], out[1][0]
Sd, Td = out[0][-1], out[1][-1]
if not anms:
# Keep lower of N most prominent between S and T
hmin = min(Sxy_.shape[0], Txy_.shape[0])
Sxy, Txy = Sxy_[:hmin], Txy_[:hmin]
Sd, Td = Sd[..., :hmin], Td[..., :hmin]
else:
# ANMS already dropped some
Sxy, Txy = out[0][1], out[1][1]
print('[total corners]:\t\t\t\tS: {} | T: {}'.format(len(Sxy_), len(Txy_)))
print('[after suppression ({})]:\t\t\t\tS: {} | T: {}'.format('ANMS' if anms else 'rank+min', len(Sxy), len(Txy)))
if intermediates is not None:
# plot all corners found
S1_, T1_ = viz.plotImages(S, T, Sxy_, Txy_)
intermediates.append(S1_)
intermediates.append(T1_)
# plot corners left after suppression
S1_, T1_ = viz.plotImages(S, T, Sxy, Txy)
intermediates.append(S1_)
intermediates.append(T1_)
# 3. Match 9x9 descriptors out of detected corners:
idx = algos.matchDescriptors(Sd, Td, nnMax=0.55)
print('[matched descriptors]:\t\t{}'.format(len(idx)))
if intermediates is not None:
# plot matched descriptors:
S1_ = viz.plotDescriptors(S, Sxy[idx[:, 0], :], size=9)
T1_ = viz.plotDescriptors(T, Txy[idx[:, 1], :], size=9)
intermediates.append(S1_)
intermediates.append(T1_)
# 4. Create homography from source to target, based on the best
# set of descriptors computed via RANSAC:
H, c = algos.ransac(Sxy, Txy, idx, e=6, n=1000)
print('[RANSAC set length]:\t\t{}'.format(len(c)))
if H is None:
print('skip')
return T, T
if intermediates is not None:
# plot best matched descriptors after RANSAC:
S1_ = viz.plotDescriptors(S, Sxy[idx[c, 0], :], size=9)
T1_ = viz.plotDescriptors(T, Txy[idx[c, 1], :], size=9)
f = viz.plotMatches(S1_, T1_, Sxy[idx[c, 0], :], Txy[idx[c, 1], :])
if f:
intermediates.append(f)
else:
intermediates.append(S1_)
intermediates.append(T1_)
th, tw = T.shape[0], T.shape[1]
sh, sw = S.shape[0], S.shape[1]
# 5. Forward warp source corners onto target space to compute final composite size:
Sc_ = np.column_stack([(0, 0, 1), (sw - 1, 0, 1), (sw - 1, sh - 1, 1), (0, sh - 1, 1)])
Tc_ = H @ Sc_
Tc = (Tc_ / Tc_[-1])[:-1]
if (Tc_[:2, 0] < Sc_[:2, 0]).any():
maskRange = (0., 1.)
else:
maskRange = (1., 0.)
cmin = np.minimum(np.amin(Tc, axis=1), (0, 0))
cmax = np.maximum(np.amax(Tc, axis=1), (tw - 1, th - 1))
csize = np.ceil((cmax - cmin) + 1).astype(np.int)[::-1]
if len(T.shape) is 3:
csize = (*csize, T.shape[2])
# 6. Copy target to new size:
T_ = np.zeros(csize)
cmin = np.abs(cmin).astype(np.int)
T_[cmin[1]: cmin[1] + th, cmin[0]: cmin[0] + tw] = T
# 7. Inverse warp target onto source space (accounting for offset in new target size):
i = np.meshgrid(np.arange(csize[1]), np.arange(csize[0]))
Txy_ = np.vstack((i[0].flatten(), i[1].flatten(), np.ones(csize[0] * csize[1]))).astype(np.int)
cmin_ = np.row_stack((*cmin, 0))
H_ = np.linalg.inv(H)
Sxy_ = H_ @ (Txy_ - cmin_)
Sxy = (Sxy_ / Sxy_[-1])[:-1]
Txy = Txy_[:-1]
# 8. Copy source to new size (from points in source space range to target space).
S_ = np.zeros(csize)
i = ((Sxy.T >= (0, 0)) & (Sxy.T <= (sw - 1, sh - 1))).all(axis=1).nonzero()[0]
Txy = Txy[:, i]
Sxy = Sxy[:, i]
S_[Txy[1], Txy[0]] = algos.binterp(S, Sxy[0], Sxy[1])
# 9. Final composite (a quick alpha blending):
m = np.where((S_ != 0) & (T_ != 0))
mvals = np.interp(m[1], (m[1].min(), m[1].max()), maskRange) ** maskpow
C = np.where(S_ != 0, S_, T_)
C[m] = (1.-mvals)*S_[m] + mvals*T_[m]
if intermediates is not None:
S1_ = S_.copy()
T1_ = T_.copy()
S1_[m] = (1. - mvals) * S1_[m]
T1_[m] = mvals * T1_[m]
intermediates.append(S_)
intermediates.append(T_)
intermediates.append(S1_)
intermediates.append(T1_)
return C, T_
def testPanorama(example, outprefix, anms, cmax, intermediates=False):
if example == 1:
# example 1: living room
outpath = './data/panorama/livingroom/processed/'
paths = [
'./data/panorama/livingroom/lr-l.jpg',
'./data/panorama/livingroom/lr-c.jpg',
'./data/panorama/livingroom/lr-r.jpg'
]
else:
# example 2: balcony
outpath = './data/panorama/balcony/processed/'
paths = [
'./data/panorama/balcony/IMG_4189.jpg',
'./data/panorama/balcony/IMG_4190.jpg',
'./data/panorama/balcony/IMG_4191.jpg',
'./data/panorama/balcony/IMG_4188.jpg',
'./data/panorama/balcony/IMG_4192.jpg',
'./data/panorama/balcony/IMG_4187.jpg',
'./data/panorama/balcony/IMG_4193.jpg',
'./data/panorama/balcony/IMG_4186.jpg',
'./data/panorama/balcony/IMG_4194.jpg',
'./data/panorama/balcony/IMG_4185.jpg',
'./data/panorama/balcony/IMG_4195.jpg'
]
imgs = []
np.random.seed(12);
S, T = paths[:2]
with utils.Profiler():
print(paths[0], paths[1])
try:
S, T = utils.Image.load(S, T, float=True)
with utils.Profiler():
T, T_ = stitch(S, T, T, anms, cmax, maskpow=.2, intermediates=imgs if intermediates else None)
imgs.append(T)
except Exception as e:
print(e)
print('error processing: ', paths[0], paths[1], ' skip')
for path in paths[2:]:
print(path)
try:
S = utils.Image.load(path, float=True)
with utils.Profiler():
T, T_ = stitch(S, T, T_, anms, cmax, maskpow=6., intermediates=imgs if intermediates else None)
imgs.append(T)
except Exception as e:
print(e)
print('error processing: ', path, ' skip.')
print('done')
print('saving images...')
if not intermediates:
imgs = imgs[-1:]
for i, img in enumerate(imgs):
if type(img) is np.ndarray:
utils.Image.save((
img, outpath + outprefix + str(i) + '.jpg'
))
else:
img.savefig(
outpath + outprefix + str(i) + '.svg',
dpi=1200, transparent=True, bbox_inches = 'tight', pad_inches=0
)
print(i+1, ' saved...')
# testPanorama(1, 'livingroom-', anms=False, cmax=300, intermediates=False)
# testPanorama(1, 'anms/livingroom-anms-', anms=True, cmax=300, intermediates=False)
# testPanorama(2, 'balcony-', anms=False, cmax=300)
# testPanorama(2, 'balcony-anms-', anms=True, cmax=300)
| [
"viz.plotDescriptors",
"numpy.random.seed",
"numpy.amin",
"numpy.abs",
"numpy.ones",
"viz.plotMatches",
"algos.makeDescriptors",
"algos.ransac",
"numpy.arange",
"algos.matchDescriptors",
"algos.harris",
"algos.anms",
"utils.Image.load",
"viz.plotImages",
"threading.Thread",
"numpy.ceil... | [((2179, 2221), 'algos.matchDescriptors', 'algos.matchDescriptors', (['Sd', 'Td'], {'nnMax': '(0.55)'}), '(Sd, Td, nnMax=0.55)\n', (2201, 2221), False, 'import algos\n'), ((2674, 2714), 'algos.ransac', 'algos.ransac', (['Sxy', 'Txy', 'idx'], {'e': '(6)', 'n': '(1000)'}), '(Sxy, Txy, idx, e=6, n=1000)\n', (2686, 2714), False, 'import algos\n'), ((3437, 3522), 'numpy.column_stack', 'np.column_stack', (['[(0, 0, 1), (sw - 1, 0, 1), (sw - 1, sh - 1, 1), (0, sh - 1, 1)]'], {}), '([(0, 0, 1), (sw - 1, 0, 1), (sw - 1, sh - 1, 1), (0, sh - 1,\n 1)])\n', (3452, 3522), True, 'import numpy as np\n'), ((3957, 3972), 'numpy.zeros', 'np.zeros', (['csize'], {}), '(csize)\n', (3965, 3972), True, 'import numpy as np\n'), ((4335, 4359), 'numpy.row_stack', 'np.row_stack', (['(*cmin, 0)'], {}), '((*cmin, 0))\n', (4347, 4359), True, 'import numpy as np\n'), ((4370, 4386), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (4383, 4386), True, 'import numpy as np\n'), ((4567, 4582), 'numpy.zeros', 'np.zeros', (['csize'], {}), '(csize)\n', (4575, 4582), True, 'import numpy as np\n'), ((4731, 4763), 'algos.binterp', 'algos.binterp', (['S', 'Sxy[0]', 'Sxy[1]'], {}), '(S, Sxy[0], Sxy[1])\n', (4744, 4763), False, 'import algos\n'), ((4824, 4855), 'numpy.where', 'np.where', (['((S_ != 0) & (T_ != 0))'], {}), '((S_ != 0) & (T_ != 0))\n', (4832, 4855), True, 'import numpy as np\n'), ((4940, 4965), 'numpy.where', 'np.where', (['(S_ != 0)', 'S_', 'T_'], {}), '(S_ != 0, S_, T_)\n', (4948, 4965), True, 'import numpy as np\n'), ((6392, 6410), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (6406, 6410), True, 'import numpy as np\n'), ((171, 201), 'algos.harris', 'algos.harris', (['I'], {'maxPeaks': 'cmax'}), '(I, maxPeaks=cmax)\n', (183, 201), False, 'import algos\n'), ((239, 268), 'algos.makeDescriptors', 'algos.makeDescriptors', (['I', 'Ixy'], {}), '(I, Ixy)\n', (260, 268), False, 'import algos\n'), ((320, 380), 'algos.harris', 'algos.harris', (['(Imask if Imask is not None else I)'], {'maxPeaks': '(-1)'}), '(Imask if Imask is not None else I, maxPeaks=-1)\n', (332, 380), False, 'import algos\n'), ((396, 426), 'algos.anms', 'algos.anms', (['Ih', 'Ixy'], {'cmax': 'cmax'}), '(Ih, Ixy, cmax=cmax)\n', (406, 426), False, 'import algos\n'), ((489, 519), 'algos.makeDescriptors', 'algos.makeDescriptors', (['I', 'Ixy_'], {}), '(I, Ixy_)\n', (510, 519), False, 'import algos\n'), ((878, 973), 'threading.Thread', 'threading.Thread', ([], {'target': 'cornerDetectionAndSuppression', 'args': '(S_, None, anms, cmax, out[0])'}), '(target=cornerDetectionAndSuppression, args=(S_, None, anms,\n cmax, out[0]))\n', (894, 973), False, 'import threading\n'), ((979, 1075), 'threading.Thread', 'threading.Thread', ([], {'target': 'cornerDetectionAndSuppression', 'args': '(T_, Tmask, anms, cmax, out[1])'}), '(target=cornerDetectionAndSuppression, args=(T_, Tmask,\n anms, cmax, out[1]))\n', (995, 1075), False, 'import threading\n'), ((1846, 1878), 'viz.plotImages', 'viz.plotImages', (['S', 'T', 'Sxy_', 'Txy_'], {}), '(S, T, Sxy_, Txy_)\n', (1860, 1878), False, 'import viz\n'), ((2013, 2043), 'viz.plotImages', 'viz.plotImages', (['S', 'T', 'Sxy', 'Txy'], {}), '(S, T, Sxy, Txy)\n', (2027, 2043), False, 'import viz\n'), ((2366, 2415), 'viz.plotDescriptors', 'viz.plotDescriptors', (['S', 'Sxy[idx[:, 0], :]'], {'size': '(9)'}), '(S, Sxy[idx[:, 0], :], size=9)\n', (2385, 2415), False, 'import viz\n'), ((2430, 2479), 'viz.plotDescriptors', 'viz.plotDescriptors', (['T', 'Txy[idx[:, 1], :]'], {'size': '(9)'}), '(T, Txy[idx[:, 1], :], size=9)\n', (2449, 2479), False, 'import viz\n'), ((2934, 2983), 'viz.plotDescriptors', 'viz.plotDescriptors', (['S', 'Sxy[idx[c, 0], :]'], {'size': '(9)'}), '(S, Sxy[idx[c, 0], :], size=9)\n', (2953, 2983), False, 'import viz\n'), ((2998, 3047), 'viz.plotDescriptors', 'viz.plotDescriptors', (['T', 'Txy[idx[c, 1], :]'], {'size': '(9)'}), '(T, Txy[idx[c, 1], :], size=9)\n', (3017, 3047), False, 'import viz\n'), ((3060, 3123), 'viz.plotMatches', 'viz.plotMatches', (['S1_', 'T1_', 'Sxy[idx[c, 0], :]', 'Txy[idx[c, 1], :]'], {}), '(S1_, T1_, Sxy[idx[c, 0], :], Txy[idx[c, 1], :])\n', (3075, 3123), False, 'import viz\n'), ((3699, 3718), 'numpy.amin', 'np.amin', (['Tc'], {'axis': '(1)'}), '(Tc, axis=1)\n', (3706, 3718), True, 'import numpy as np\n'), ((3750, 3769), 'numpy.amax', 'np.amax', (['Tc'], {'axis': '(1)'}), '(Tc, axis=1)\n', (3757, 3769), True, 'import numpy as np\n'), ((4181, 4200), 'numpy.arange', 'np.arange', (['csize[1]'], {}), '(csize[1])\n', (4190, 4200), True, 'import numpy as np\n'), ((4202, 4221), 'numpy.arange', 'np.arange', (['csize[0]'], {}), '(csize[0])\n', (4211, 4221), True, 'import numpy as np\n'), ((6443, 6459), 'utils.Profiler', 'utils.Profiler', ([], {}), '()\n', (6457, 6459), False, 'import utils\n'), ((784, 809), 'numpy.where', 'np.where', (['(Tpre != 0)', 'T', '(0)'], {}), '(Tpre != 0, T, 0)\n', (792, 809), True, 'import numpy as np\n'), ((3984, 3996), 'numpy.abs', 'np.abs', (['cmin'], {}), '(cmin)\n', (3990, 3996), True, 'import numpy as np\n'), ((6528, 6562), 'utils.Image.load', 'utils.Image.load', (['S', 'T'], {'float': '(True)'}), '(S, T, float=True)\n', (6544, 6562), False, 'import utils\n'), ((3801, 3825), 'numpy.ceil', 'np.ceil', (['(cmax - cmin + 1)'], {}), '(cmax - cmin + 1)\n', (3808, 3825), True, 'import numpy as np\n'), ((6580, 6596), 'utils.Profiler', 'utils.Profiler', ([], {}), '()\n', (6594, 6596), False, 'import utils\n'), ((6952, 6986), 'utils.Image.load', 'utils.Image.load', (['path'], {'float': '(True)'}), '(path, float=True)\n', (6968, 6986), False, 'import utils\n'), ((4277, 4305), 'numpy.ones', 'np.ones', (['(csize[0] * csize[1])'], {}), '(csize[0] * csize[1])\n', (4284, 4305), True, 'import numpy as np\n'), ((7008, 7024), 'utils.Profiler', 'utils.Profiler', ([], {}), '()\n', (7022, 7024), False, 'import utils\n')] |
from scipy.ndimage import zoom
import numpy as np
import config as c
import data
class Visualizer:
def __init__(self, loss_labels):
self.n_losses = len(loss_labels)
self.loss_labels = loss_labels
self.counter = 0
header = 'Epoch'
for l in loss_labels:
header += '\t\t%s' % (l)
print(header)
def update_losses(self, losses, *args):
print('\r', ' '*20, end='')
line = '\r%.3i' % (self.counter)
for l in losses:
line += '\t\t%.4f' % (l)
print(line)
self.counter += 1
def update_images(self, *args):
pass
def update_hist(self, *args):
pass
if c.live_visualization:
import visdom
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
n_imgs = 10
n_plots = 2
figsize = (4,4)
im_width = c.img_dims[1]
class LiveVisualizer(Visualizer):
def __init__(self, loss_labels):
super().__init__(loss_labels)
self.viz = visdom.Visdom()#env='mnist')
self.viz.close()
self.l_plots = self.viz.line(X = np.zeros((1,self.n_losses)),
Y = np.zeros((1,self.n_losses)),
opts = {'legend':self.loss_labels})
self.imgs = self.viz.image(np.random.random((3, im_width*n_imgs*c.preview_upscale,
im_width*n_imgs*c.preview_upscale)))
self.fig, self.axes = plt.subplots(n_plots, n_plots, figsize=figsize)
self.hist = self.viz.matplot(self.fig)
def update_losses(self, losses, logscale=True):
super().update_losses(losses)
its = min(len(data.train_loader), c.n_its_per_epoch)
y = np.array([losses])
if logscale:
y = np.log10(y)
self.viz.line(X = (self.counter-1) * its * np.ones((1,self.n_losses)),
Y = y,
opts = {'legend':self.loss_labels},
win = self.l_plots,
update = 'append')
def update_images(self, *img_list):
w = img_list[0].shape[2]
k = 0
k_img = 0
show_img = np.zeros((3, w*n_imgs, w*n_imgs), dtype=np.uint8)
img_list_np = []
for im in img_list:
im_np = im.cpu().data.numpy()
img_list_np.append(np.clip((255. * im_np), 0, 255).astype(np.uint8))
for i in range(n_imgs):
for j in range(n_imgs):
show_img[:, w*i:w*i+w, w*j:w*j+w] = img_list_np[k][k_img]
k += 1
if k >= len(img_list_np):
k = 0
k_img += 1
show_img = zoom(show_img, (1., c.preview_upscale, c.preview_upscale), order=0)
self.viz.image(show_img, win = self.imgs)
def update_hist(self, data):
for i in range(n_plots):
for j in range(n_plots):
try:
self.axes[i,j].clear()
self.axes[i,j].hist(data[:, i*n_plots + j], bins=20, histtype='step')
except ValueError:
pass
self.fig.tight_layout()
self.viz.matplot(self.fig, win=self.hist)
def close(self):
self.viz.close(win=self.hist)
self.viz.close(win=self.imgs)
self.viz.close(win=self.l_plots)
visualizer = LiveVisualizer(c.loss_names)
else:
visualizer = Visualizer(c.loss_names)
def show_loss(losses, logscale=True):
visualizer.update_losses(losses, logscale)
def show_imgs(*imgs):
visualizer.update_images(*imgs)
def show_hist(data):
visualizer.update_hist(data.data)
def close():
visualizer.close()
| [
"numpy.zeros",
"visdom.Visdom",
"scipy.ndimage.zoom",
"numpy.ones",
"numpy.clip",
"matplotlib.use",
"numpy.array",
"numpy.random.random",
"numpy.log10",
"matplotlib.pyplot.subplots"
] | [((790, 811), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (804, 811), False, 'import matplotlib\n'), ((1075, 1090), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (1088, 1090), False, 'import visdom\n'), ((1587, 1634), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_plots', 'n_plots'], {'figsize': 'figsize'}), '(n_plots, n_plots, figsize=figsize)\n', (1599, 1634), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1885), 'numpy.array', 'np.array', (['[losses]'], {}), '([losses])\n', (1875, 1885), True, 'import numpy as np\n'), ((2365, 2418), 'numpy.zeros', 'np.zeros', (['(3, w * n_imgs, w * n_imgs)'], {'dtype': 'np.uint8'}), '((3, w * n_imgs, w * n_imgs), dtype=np.uint8)\n', (2373, 2418), True, 'import numpy as np\n'), ((2925, 2993), 'scipy.ndimage.zoom', 'zoom', (['show_img', '(1.0, c.preview_upscale, c.preview_upscale)'], {'order': '(0)'}), '(show_img, (1.0, c.preview_upscale, c.preview_upscale), order=0)\n', (2929, 2993), False, 'from scipy.ndimage import zoom\n'), ((1399, 1502), 'numpy.random.random', 'np.random.random', (['(3, im_width * n_imgs * c.preview_upscale, im_width * n_imgs * c.\n preview_upscale)'], {}), '((3, im_width * n_imgs * c.preview_upscale, im_width *\n n_imgs * c.preview_upscale))\n', (1415, 1502), True, 'import numpy as np\n'), ((1931, 1942), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (1939, 1942), True, 'import numpy as np\n'), ((1179, 1207), 'numpy.zeros', 'np.zeros', (['(1, self.n_losses)'], {}), '((1, self.n_losses))\n', (1187, 1207), True, 'import numpy as np\n'), ((1253, 1281), 'numpy.zeros', 'np.zeros', (['(1, self.n_losses)'], {}), '((1, self.n_losses))\n', (1261, 1281), True, 'import numpy as np\n'), ((1999, 2026), 'numpy.ones', 'np.ones', (['(1, self.n_losses)'], {}), '((1, self.n_losses))\n', (2006, 2026), True, 'import numpy as np\n'), ((2557, 2587), 'numpy.clip', 'np.clip', (['(255.0 * im_np)', '(0)', '(255)'], {}), '(255.0 * im_np, 0, 255)\n', (2564, 2587), True, 'import numpy as np\n')] |
import haiku as hk
import jax.numpy as jnp
import numpy as np
import pytest
import optax as optix
from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay
@pytest.mark.parametrize("lr, w, x", [(0.1, 1.0, 1.0), (0.1, 20.0, 10.0), (1e-3, 0.0, -10.0)])
def test_optimize(lr, w, x):
net = hk.without_apply_rng(hk.transform(lambda x: hk.Linear(1, with_bias=False, w_init=hk.initializers.Constant(w))(x)))
params = net.init(next(hk.PRNGSequence(0)), jnp.zeros((1, 1)))
opt_init, opt = optix.sgd(lr)
opt_state = opt_init(params)
def _loss(params, x):
return net.apply(params, x).mean(), None
opt_state, params, loss, _ = optimize(_loss, opt, opt_state, params, None, x=jnp.ones((1, 1)) * x)
assert np.isclose(loss, w * x)
assert np.isclose(params["linear"]["w"], w - lr * x)
def test_clip_gradient():
grad = {"w": np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)}
assert np.isclose(clip_gradient(grad, 2.0)["w"], [-2.0, -1.0, 0.0, 1.0, 2.0]).all()
assert np.isclose(clip_gradient(grad, 1.5)["w"], [-1.5, -1.0, 0.0, 1.0, 1.5]).all()
assert np.isclose(clip_gradient(grad, 1.0)["w"], [-1.0, -1.0, 0.0, 1.0, 1.0]).all()
assert np.isclose(clip_gradient(grad, 0.5)["w"], [-0.5, -0.5, 0.0, 0.5, 0.5]).all()
def test_clip_gradient_norm():
grad = {"w": np.array([1.0, 0.0], dtype=np.float32)}
assert np.isclose(clip_gradient_norm(grad, 0.0)["w"], [0.0, 0.0]).all()
assert np.isclose(clip_gradient_norm(grad, 0.5)["w"], [0.5, 0.0]).all()
assert np.isclose(clip_gradient_norm(grad, 1.0)["w"], [1.0, 0.0]).all()
assert np.isclose(clip_gradient_norm(grad, 2.0)["w"], [1.0, 0.0]).all()
grad = {"w": np.array([3.0, 4.0], dtype=np.float32)}
assert np.isclose(clip_gradient_norm(grad, 0.0)["w"], [0.0, 0.0]).all()
assert np.isclose(clip_gradient_norm(grad, 1.0)["w"], [0.6, 0.8]).all()
assert np.isclose(clip_gradient_norm(grad, 2.0)["w"], [1.2, 1.6]).all()
assert np.isclose(clip_gradient_norm(grad, 5.0)["w"], [3.0, 4.0]).all()
assert np.isclose(clip_gradient_norm(grad, 10.0)["w"], [3.0, 4.0]).all()
def test_soft_update():
source = {"w": np.array([-10.0, -5.0, 0.0, 5.0, 10.0], dtype=np.float32)}
target = {"w": np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)}
assert np.isclose(soft_update(target, source, 0.0)["w"], target["w"]).all()
assert np.isclose(soft_update(target, source, 1.0)["w"], source["w"]).all()
assert np.isclose(soft_update(target, source, 0.5)["w"], [-6.0, -3.0, 0.0, 3.0, 6.0]).all()
def test_weight_decay():
assert np.isclose(weight_decay({"w": np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)}), 0.0)
assert np.isclose(weight_decay({"w": np.array([-1.0, -1.0, 0.0, 1.0, 1.0], dtype=np.float32)}), 2.0)
assert np.isclose(weight_decay({"w": np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)}), 5.0)
| [
"rljax.util.optim.soft_update",
"haiku.initializers.Constant",
"rljax.util.optim.clip_gradient_norm",
"jax.numpy.zeros",
"haiku.PRNGSequence",
"numpy.isclose",
"numpy.array",
"rljax.util.optim.clip_gradient",
"optax.sgd",
"jax.numpy.ones",
"pytest.mark.parametrize"
] | [((202, 301), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lr, w, x"""', '[(0.1, 1.0, 1.0), (0.1, 20.0, 10.0), (0.001, 0.0, -10.0)]'], {}), "('lr, w, x', [(0.1, 1.0, 1.0), (0.1, 20.0, 10.0), (\n 0.001, 0.0, -10.0)])\n", (225, 301), False, 'import pytest\n'), ((537, 550), 'optax.sgd', 'optix.sgd', (['lr'], {}), '(lr)\n', (546, 550), True, 'import optax as optix\n'), ((775, 798), 'numpy.isclose', 'np.isclose', (['loss', '(w * x)'], {}), '(loss, w * x)\n', (785, 798), True, 'import numpy as np\n'), ((810, 855), 'numpy.isclose', 'np.isclose', (["params['linear']['w']", '(w - lr * x)'], {}), "(params['linear']['w'], w - lr * x)\n", (820, 855), True, 'import numpy as np\n'), ((498, 515), 'jax.numpy.zeros', 'jnp.zeros', (['(1, 1)'], {}), '((1, 1))\n', (507, 515), True, 'import jax.numpy as jnp\n'), ((901, 956), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)\n', (909, 956), True, 'import numpy as np\n'), ((1361, 1399), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {'dtype': 'np.float32'}), '([1.0, 0.0], dtype=np.float32)\n', (1369, 1399), True, 'import numpy as np\n'), ((1724, 1762), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'np.float32'}), '([3.0, 4.0], dtype=np.float32)\n', (1732, 1762), True, 'import numpy as np\n'), ((2191, 2248), 'numpy.array', 'np.array', (['[-10.0, -5.0, 0.0, 5.0, 10.0]'], {'dtype': 'np.float32'}), '([-10.0, -5.0, 0.0, 5.0, 10.0], dtype=np.float32)\n', (2199, 2248), True, 'import numpy as np\n'), ((2269, 2324), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)\n', (2277, 2324), True, 'import numpy as np\n'), ((477, 495), 'haiku.PRNGSequence', 'hk.PRNGSequence', (['(0)'], {}), '(0)\n', (492, 495), True, 'import haiku as hk\n'), ((742, 758), 'jax.numpy.ones', 'jnp.ones', (['(1, 1)'], {}), '((1, 1))\n', (750, 758), True, 'import jax.numpy as jnp\n'), ((2651, 2704), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)\n', (2659, 2704), True, 'import numpy as np\n'), ((2754, 2809), 'numpy.array', 'np.array', (['[-1.0, -1.0, 0.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([-1.0, -1.0, 0.0, 1.0, 1.0], dtype=np.float32)\n', (2762, 2809), True, 'import numpy as np\n'), ((2859, 2914), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)\n', (2867, 2914), True, 'import numpy as np\n'), ((981, 1005), 'rljax.util.optim.clip_gradient', 'clip_gradient', (['grad', '(2.0)'], {}), '(grad, 2.0)\n', (994, 1005), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1069, 1093), 'rljax.util.optim.clip_gradient', 'clip_gradient', (['grad', '(1.5)'], {}), '(grad, 1.5)\n', (1082, 1093), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1157, 1181), 'rljax.util.optim.clip_gradient', 'clip_gradient', (['grad', '(1.0)'], {}), '(grad, 1.0)\n', (1170, 1181), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1245, 1269), 'rljax.util.optim.clip_gradient', 'clip_gradient', (['grad', '(0.5)'], {}), '(grad, 0.5)\n', (1258, 1269), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1424, 1453), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(0.0)'], {}), '(grad, 0.0)\n', (1442, 1453), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1500, 1529), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(0.5)'], {}), '(grad, 0.5)\n', (1518, 1529), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1576, 1605), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(1.0)'], {}), '(grad, 1.0)\n', (1594, 1605), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1652, 1681), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(2.0)'], {}), '(grad, 2.0)\n', (1670, 1681), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1787, 1816), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(0.0)'], {}), '(grad, 0.0)\n', (1805, 1816), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1863, 1892), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(1.0)'], {}), '(grad, 1.0)\n', (1881, 1892), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((1939, 1968), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(2.0)'], {}), '(grad, 2.0)\n', (1957, 1968), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((2015, 2044), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(5.0)'], {}), '(grad, 5.0)\n', (2033, 2044), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((2091, 2121), 'rljax.util.optim.clip_gradient_norm', 'clip_gradient_norm', (['grad', '(10.0)'], {}), '(grad, 10.0)\n', (2109, 2121), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((2349, 2381), 'rljax.util.optim.soft_update', 'soft_update', (['target', 'source', '(0.0)'], {}), '(target, source, 0.0)\n', (2360, 2381), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((2429, 2461), 'rljax.util.optim.soft_update', 'soft_update', (['target', 'source', '(1.0)'], {}), '(target, source, 1.0)\n', (2440, 2461), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((2509, 2541), 'rljax.util.optim.soft_update', 'soft_update', (['target', 'source', '(0.5)'], {}), '(target, source, 0.5)\n', (2520, 2541), False, 'from rljax.util.optim import clip_gradient, clip_gradient_norm, optimize, soft_update, weight_decay\n'), ((416, 443), 'haiku.initializers.Constant', 'hk.initializers.Constant', (['w'], {}), '(w)\n', (440, 443), True, 'import haiku as hk\n')] |
import numpy as np
import pickle
import time
class off_policy_mc:
def __init__(self,q,c,state_name,action_name,exploration_space,epsilon=None,discount=None,theta=None,episode_step=None,save_episode=True):
self.q=q
self.c=c
self.episode=[]
self.state_name=state_name
self.action_name=action_name
self.exploration_space=exploration_space
self.action_len=len(self.action_name)
self.epsilon=epsilon
self.discount=discount
self.theta=theta
self.episode_step=episode_step
self.save_episode=save_episode
self.delta=0
self.epi_num=0
self.episode_num=0
self.total_episode=0
self.time=0
self.total_time=0
def init(self,dtype=np.int32):
t3=time.time()
if len(self.action_name)>self.action_len:
self.action=np.concatenate((self.action,np.arange(len(self.action_name)-self.action_len,dtype=dtype)+self.action_len))
self.action_prob=np.concatenate((self.action_prob,np.ones(len(self.action_name)-self.action_len,dtype=dtype)))
else:
self.action=np.arange(len(self.action_name),dtype=dtype)
self.action_prob=np.ones(len(self.action_name),dtype=dtype)
if len(self.state_name)>self.q.shape[0] or len(self.action_name)>self.q.shape[1]:
self.q=np.concatenate((self.q,np.zeros([len(self.state_name),len(self.action_name)-self.action_len],dtype=self.q.dtype)),axis=1)
self.q=np.concatenate((self.q,np.zeros([len(self.state_name)-self.state_len,len(self.action_name)],dtype=self.q.dtype)))
self.q=self.q.numpy()
if len(self.state_name)>self.c.shape[0] or len(self.action_name)>self.c.shape[1]:
self.c=np.concatenate((self.c,np.zeros([len(self.state_name),len(self.action_name)-self.action_len],dtype=self.c.dtype)),axis=1)
self.c=np.concatenate((self.c,np.zeros([len(self.state_name)-self.state_len,len(self.action_name)],dtype=self.c.dtype)))
self.c=self.c.numpy()
t4=time.time()
self.time+=t4-t3
return
def set_up(self,epsilon=None,discount=None,theta=None,episode_step=None,init=True):
if epsilon!=None:
self.epsilon=epsilon
if discount!=None:
self.discount=discount
if theta!=None:
self.theta=theta
if episode_step!=None:
self.episode_step=episode_step
if init==True:
self.r_sum=dict()
self.r_count=dict()
self.episode=[]
self.delta=0
self.epi_num=0
self.episode_num=0
self.total_episode=0
self.time=0
self.total_time=0
return
def epsilon_greedy_policy(self,q,s,action_one):
action_prob=action_one
action_prob=action_prob*self.epsilon/len(action_one)
best_a=np.argmax(q[s])
action_prob[best_a]+=1-self.epsilon
return action_prob
def _explore(self,episode_num,q,s,action,action_one,exploration_space):
episode=[]
_episode=[]
for _ in range(episode_num):
if self.episode_step==None:
while True:
action_prob=self.epsilon_greedy_policy(q,s,action_one)
a=np.random.choice(action,p=action_prob)
next_s,r,end=exploration_space[self.state_name[s]][self.action_name[a]]
episode.append([s,a,r])
if end:
if self.save_episode==True:
_episode.append([self.state_name[s],self.action_name[a],self.state_name[next_s],r,end])
break
if self.save_episode==True:
_episode.append([self.state_name[s],self.action_name[a],self.state_name[next_s],r])
s=next_s
else:
for _ in range(self.episode_step):
action_prob=self.epsilon_greedy_policy(q,s,action_one)
a=np.random.choice(action,p=action_prob)
next_s,r,end=exploration_space[self.state_name[s]][self.action_name[a]]
episode.append([s,a,r])
if end:
if self.save_episode==True:
_episode.append([self.state_name[s],self.action_name[a],self.state_name[next_s],r,end])
break
if self.save_episode==True:
_episode.append([self.state_name[s],self.action_name[a],self.state_name[next_s],r])
s=next_s
if self.save_episode==True:
self.episode.append(_episode)
self.epi_num+=1
return episode
def importance_sampling(self,episode,q,discount,action_one):
w=1
temp=0
a=0
delta=0
self.delta=0
for i,[s,a,r] in enumerate(episode):
a+=1
first_visit_index=i
G=sum(np.power(discount,i)*x[2] for i,x in enumerate(episode[first_visit_index:]))
self.c[s][a]+=w
delta+=np.abs(temp-(w/self.c[s][a])*(G-q[s][a]))
q[s][a]+=(w/self.c[s][a])*(G-q[s][a])
if a!=np.argmax(q[s]):
break
action_prob=self.epsilon_greedy_policy(q,s,action_one)
w=w*1/action_prob
temp=(w/self.c[s][a])*(G-q[s][a])
self.delta+=delta/a
return q
def explore(self,episode_num):
s=int(np.random.uniform(0,len(self.state_name)))
return self._explore(episode_num,self.q,s,self.action,self.action_prob,self.exploration_space,self.episode_step)
def learn(self,episode,i):
self.delta=0
self.q=self.importance_sampling(episode,self.q,self.discount)
self.delta=self.delta/(i+1)
return
def save_policy(self,path):
policy_file=open(path+'.dat','wb')
pickle.dump(self.q,policy_file)
policy_file.close()
return
def save_e(self,path):
episode_file=open(path+'.dat','wb')
pickle.dump(self.episode,episode_file)
episode_file.close()
return
def save(self,path,i=None,one=True):
if one==True:
output_file=open(path+'\save.dat','wb')
path=path+'\save.dat'
index=path.rfind('\\')
if self.save_episode==True:
episode_file=open(path.replace(path[index+1:],'episode.dat'),'wb')
pickle.dump(self.episode,episode_file)
episode_file.close()
else:
output_file=open(path+'\save-{0}.dat'.format(i+1),'wb')
path=path+'\save-{0}.dat'.format(i+1)
index=path.rfind('\\')
if self.save_episode==True:
episode_file=open(path.replace(path[index+1:],'episode-{0}.dat'.format(i+1)),'wb')
pickle.dump(self.episode,episode_file)
episode_file.close()
self.episode_num=self.epi_num
pickle.dump(self.action_len,output_file)
pickle.dump(self.action,output_file)
pickle.dump(self.action_prob,output_file)
pickle.dump(self.epsilon,output_file)
pickle.dump(self.discount,output_file)
pickle.dump(self.theta,output_file)
pickle.dump(self.episode_step,output_file)
pickle.dump(self.save_episode,output_file)
pickle.dump(self.delta,output_file)
pickle.dump(self.episode_num,output_file)
pickle.dump(self.total_episode,output_file)
pickle.dump(self.total_time,output_file)
output_file.close()
return
def restore(self,s_path,e_path=None):
input_file=open(s_path,'rb')
if self.save_episode==True:
episode_file=open(e_path,'rb')
self.episode=pickle.load(episode_file)
episode_file.close()
self.action_len=pickle.load(input_file)
self.action=pickle.load(input_file)
self.action_prob=pickle.load(input_file)
self.epsilon=pickle.load(input_file)
self.discount=pickle.load(input_file)
self.theta=pickle.load(input_file)
self.episode_step=pickle.load(input_file)
self.save_episode=pickle.load(input_file)
self.delta=pickle.load(input_file)
self.episode_num=pickle.load(input_file)
self.total_episode=pickle.load(input_file)
self.total_time=pickle.load(input_file)
input_file.close()
return
| [
"pickle.dump",
"numpy.abs",
"numpy.argmax",
"numpy.power",
"time.time",
"pickle.load",
"numpy.random.choice"
] | [((837, 848), 'time.time', 'time.time', ([], {}), '()\n', (846, 848), False, 'import time\n'), ((2130, 2141), 'time.time', 'time.time', ([], {}), '()\n', (2139, 2141), False, 'import time\n'), ((3017, 3032), 'numpy.argmax', 'np.argmax', (['q[s]'], {}), '(q[s])\n', (3026, 3032), True, 'import numpy as np\n'), ((6206, 6238), 'pickle.dump', 'pickle.dump', (['self.q', 'policy_file'], {}), '(self.q, policy_file)\n', (6217, 6238), False, 'import pickle\n'), ((6377, 6416), 'pickle.dump', 'pickle.dump', (['self.episode', 'episode_file'], {}), '(self.episode, episode_file)\n', (6388, 6416), False, 'import pickle\n'), ((7336, 7377), 'pickle.dump', 'pickle.dump', (['self.action_len', 'output_file'], {}), '(self.action_len, output_file)\n', (7347, 7377), False, 'import pickle\n'), ((7386, 7423), 'pickle.dump', 'pickle.dump', (['self.action', 'output_file'], {}), '(self.action, output_file)\n', (7397, 7423), False, 'import pickle\n'), ((7432, 7474), 'pickle.dump', 'pickle.dump', (['self.action_prob', 'output_file'], {}), '(self.action_prob, output_file)\n', (7443, 7474), False, 'import pickle\n'), ((7483, 7521), 'pickle.dump', 'pickle.dump', (['self.epsilon', 'output_file'], {}), '(self.epsilon, output_file)\n', (7494, 7521), False, 'import pickle\n'), ((7530, 7569), 'pickle.dump', 'pickle.dump', (['self.discount', 'output_file'], {}), '(self.discount, output_file)\n', (7541, 7569), False, 'import pickle\n'), ((7578, 7614), 'pickle.dump', 'pickle.dump', (['self.theta', 'output_file'], {}), '(self.theta, output_file)\n', (7589, 7614), False, 'import pickle\n'), ((7623, 7666), 'pickle.dump', 'pickle.dump', (['self.episode_step', 'output_file'], {}), '(self.episode_step, output_file)\n', (7634, 7666), False, 'import pickle\n'), ((7675, 7718), 'pickle.dump', 'pickle.dump', (['self.save_episode', 'output_file'], {}), '(self.save_episode, output_file)\n', (7686, 7718), False, 'import pickle\n'), ((7727, 7763), 'pickle.dump', 'pickle.dump', (['self.delta', 'output_file'], {}), '(self.delta, output_file)\n', (7738, 7763), False, 'import pickle\n'), ((7772, 7814), 'pickle.dump', 'pickle.dump', (['self.episode_num', 'output_file'], {}), '(self.episode_num, output_file)\n', (7783, 7814), False, 'import pickle\n'), ((7823, 7867), 'pickle.dump', 'pickle.dump', (['self.total_episode', 'output_file'], {}), '(self.total_episode, output_file)\n', (7834, 7867), False, 'import pickle\n'), ((7876, 7917), 'pickle.dump', 'pickle.dump', (['self.total_time', 'output_file'], {}), '(self.total_time, output_file)\n', (7887, 7917), False, 'import pickle\n'), ((8247, 8270), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8258, 8270), False, 'import pickle\n'), ((8292, 8315), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8303, 8315), False, 'import pickle\n'), ((8342, 8365), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8353, 8365), False, 'import pickle\n'), ((8388, 8411), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8399, 8411), False, 'import pickle\n'), ((8435, 8458), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8446, 8458), False, 'import pickle\n'), ((8479, 8502), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8490, 8502), False, 'import pickle\n'), ((8530, 8553), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8541, 8553), False, 'import pickle\n'), ((8581, 8604), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8592, 8604), False, 'import pickle\n'), ((8625, 8648), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8636, 8648), False, 'import pickle\n'), ((8675, 8698), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8686, 8698), False, 'import pickle\n'), ((8727, 8750), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8738, 8750), False, 'import pickle\n'), ((8776, 8799), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (8787, 8799), False, 'import pickle\n'), ((5345, 5392), 'numpy.abs', 'np.abs', (['(temp - w / self.c[s][a] * (G - q[s][a]))'], {}), '(temp - w / self.c[s][a] * (G - q[s][a]))\n', (5351, 5392), True, 'import numpy as np\n'), ((8162, 8187), 'pickle.load', 'pickle.load', (['episode_file'], {}), '(episode_file)\n', (8173, 8187), False, 'import pickle\n'), ((5457, 5472), 'numpy.argmax', 'np.argmax', (['q[s]'], {}), '(q[s])\n', (5466, 5472), True, 'import numpy as np\n'), ((6805, 6844), 'pickle.dump', 'pickle.dump', (['self.episode', 'episode_file'], {}), '(self.episode, episode_file)\n', (6816, 6844), False, 'import pickle\n'), ((7211, 7250), 'pickle.dump', 'pickle.dump', (['self.episode', 'episode_file'], {}), '(self.episode, episode_file)\n', (7222, 7250), False, 'import pickle\n'), ((3443, 3482), 'numpy.random.choice', 'np.random.choice', (['action'], {'p': 'action_prob'}), '(action, p=action_prob)\n', (3459, 3482), True, 'import numpy as np\n'), ((4208, 4247), 'numpy.random.choice', 'np.random.choice', (['action'], {'p': 'action_prob'}), '(action, p=action_prob)\n', (4224, 4247), True, 'import numpy as np\n'), ((5219, 5240), 'numpy.power', 'np.power', (['discount', 'i'], {}), '(discount, i)\n', (5227, 5240), True, 'import numpy as np\n')] |
# FORCsensei module
# compile using: python3 setup.py sdist bdist_wheel
import os
import numpy as np
import codecs as cd
import scipy as sp
from scipy import linalg
from IPython.display import YouTubeVideo
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.tri as tri
import matplotlib.colors as colors
from matplotlib.colors import LinearSegmentedColormap
from dask.distributed import Client, LocalCluster, progress #needed for multiprocessing
##### BEGIN SECTION: TUTORIALS #################################################
def play_tutorial(index):
#define list of tutorial videos
tutorial = ['ilyS6K4ry3U'] #tutorial 1
tutorial.append('b1hkT0sj1h8') #tutorial 2
tutorial.append('g0NT6aUwN8c') #tutorial 3
if index>-1:
vid = YouTubeVideo(id = tutorial[index],autoplay=True)
display(vid)
def video_tutorials(*arg):
style={'description_width': 'initial'}
tut_widge = widgets.Dropdown(
options=[
['Select topic',-1],
['1: Introduction - working with FORCsensei',0],
['2: Plotting options',1],
['3: download options',2],
],
value = -1,
description='Video tutorials:',
style=style,
)
X = interactive(play_tutorial,index=tut_widge)
display(X)
##### END SECTION: TUTORIALS #################################################
##### BEGIN SECTION: PREPROCESSING #################################################
def preprocessing_options(X):
style = {'description_width': 'initial'} #general style settings
### Define sample properties ###
fn = X['fn']
prop_title = widgets.HTML(value='<h3>Sample preprocessing options</h3>')
mass_title = widgets.HTML(value='To disable mass normalization use a value of -1')
sample, unit, mass = sample_details(fn)
sample_widge = widgets.Text(value=sample,description='Sample name:',style=style)
if mass == "N/A":
mass_widge = widgets.FloatText(value=-1, description = 'Sample mass (g):',style=style)
else:
mass_widge = widgets.FloatText(value=mass, description = 'Sample mass (g):',style=style)
mass_widge1 = HBox([mass_widge,mass_title])
### Define measurement corrections ###
correct_title = widgets.HTML(value='<h3>Select preprocessing options:</h3>')
slope_widge = widgets.FloatSlider(
value=70,
min=1,
max=100.0,
step=1,
description='Slope correction [%]:',
style=style,
readout_format='.0f',
)
slope_title = widgets.HTML(value='To disable high-field slope correction use a value of 100%')
slope_widge1 = HBox([slope_widge,slope_title])
drift_widge = widgets.Checkbox(value=False, description='Measurement drift correction')
fpa_widge = widgets.Checkbox(value=False, description='Remove first point artifact')
lpa_widge = widgets.Checkbox(value=False, description='Remove last point artifact')
outlier_widge = widgets.Checkbox(value=False, description='Remove measurement outliers')
correct_widge = VBox([correct_title,sample_widge,mass_widge1,slope_widge1,drift_widge,fpa_widge,lpa_widge,outlier_widge])
preprocess_nest = widgets.Tab()
preprocess_nest.children = [correct_widge]
preprocess_nest.set_title(0, 'PREPROCESSING')
display(preprocess_nest)
X["sample"] = sample_widge
X["mass"] = mass_widge
X["unit"] = unit
X["drift"] = drift_widge
X["slope"] = slope_widge
X["fpa"] = fpa_widge
X["lpa"] = lpa_widge
X["outlier"] = outlier_widge
return X
def plot_delta_hysteresis(X,ax):
#unpack
M = X["DM"]
H = X["H"]
Fk = X["Fk"]
hfont = {'fontname':'STIXGeneral'}
for i in range(5,int(np.max(Fk)),5):
if X["mass"].value > 0.0: #SI and mass normalized (T and Am2/kg)
ax.plot(H[Fk==i],M[Fk==i]/(X["mass"].value/1000.0),'-k')
else: #SI not mass normalized (T and Am2)
ax.plot(H[Fk==i],M[Fk==i],'-k')
ax.grid(False)
ax.minorticks_on()
ax.tick_params(axis='both',which='major',direction='out',length=5,width=1,labelsize=12,color='k')
ax.tick_params(axis='both',which='minor',direction='out',length=5,width=1,color='k')
ax.spines['left'].set_position('zero')
ax.spines['left'].set_color('k')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
ylim=np.max(np.abs(ax.get_ylim()))
ax.set_ylim([-ylim*0.1,ylim])
yticks0 = ax.get_yticks()
yticks = yticks0[yticks0 != 0]
ax.set_yticks(yticks)
# set the y-spine
ax.spines['bottom'].set_position('zero')
ax.spines['bottom'].set_color('k')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
Xticks = ax.get_xticks()
Xidx = np.argwhere(np.abs(Xticks)>0.01)
ax.set_xticks(Xticks[Xidx])
xmax = X["xmax"]
ax.set_xlim([-xmax,xmax])
#label x-axis according to unit system
ax.set_xlabel('$\mu_0 H [T]$',horizontalalignment='right', position=(1,25), fontsize=12)
#label y-axis according to unit system
if X["mass"].value > 0.0:
ax.set_ylabel('$M - M_{hys} [Am^2/kg]$',verticalalignment='top',position=(25,0.9), fontsize=12,**hfont)
else:
ax.set_ylabel('$M - M_{hys} [Am^2]$',verticalalignment='top',position=(25,0.9), fontsize=12,**hfont)
return X
def data_preprocessing(X):
#parse measurements
H, Hr, M, Fk, Fj, Ft, dH = parse_measurements(X["fn"])
Hcal, Mcal, tcal = parse_calibration(X["fn"])
# make a data dictionary for passing large numbers of arguments
# should unpack in functions for consistency
X["H"] = H
X["Hr"] = Hr
X["M"] = M
X["dH"] = dH
X["Fk"] = Fk
X["Fj"] = Fj
X["Ft"] = Ft
X["Hcal"] = Hcal
X["Mcal"] = Mcal
X["tcal"] = tcal
if X['unit']=='Cgs':
X = CGS2SI(X)
if X["drift"].value == True:
X = drift_correction(X)
#if X["mass"].value > 0.0:
# X = mass_normalize(X)
if X["slope"].value < 100:
X = slope_correction(X)
if X["fpa"].value == True:
X = remove_fpa(X)
if X["lpa"].value == True:
X = remove_lpa(X)
if X["outlier"].value == True:
data = remove_outliers(data)
X["lbs"] = lowerbranch_subtract(X)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(121)
X = plot_hysteresis(X,ax1)
ax2 = fig.add_subplot(122)
X = plot_delta_hysteresis(X,ax2)
outputfile = X["sample"].value+'_hys.eps'
plt.savefig(outputfile, bbox_inches="tight")
plt.show()
return X
def plot_hysteresis(X,ax):
#unpack
M = X["M"]
H = X["H"]
Fk = X["Fk"]
#mpl.style.use('seaborn-whitegrid')
hfont = {'fontname':'STIXGeneral'}
for i in range(5,int(np.max(Fk)),5):
if X["mass"].value > 0.0: #SI and mass normalized (T and Am2/kg)
ax.plot(H[Fk==i],M[Fk==i]/(X["mass"].value/1000.0),'-k')
else: #SI not mass normalized (T and Am2)
ax.plot(H[Fk==i],M[Fk==i],'-k')
ax.grid(False)
ax.minorticks_on()
ax.tick_params(axis='both',which='major',direction='out',length=5,width=1,labelsize=12,color='k')
ax.tick_params(axis='both',which='minor',direction='out',length=5,width=1,color='k')
ax.spines['left'].set_position('zero')
ax.spines['left'].set_color('k')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
ylim=np.max(np.abs(ax.get_ylim()))
ax.set_ylim([-ylim,ylim])
#ax.set_ylim([-1,1])
yticks0 = ax.get_yticks()
yticks = yticks0[yticks0 != 0]
ax.set_yticks(yticks)
# set the y-spine
ax.spines['bottom'].set_position('zero')
ax.spines['bottom'].set_color('k')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
xmax = np.max(np.abs(ax.get_xlim()))
ax.set_xlim([-xmax,xmax])
#label x-axis
ax.set_xlabel('$\mu_0 H [T]$',horizontalalignment='right', position=(1,25), fontsize=12)
#label y-axis according to unit system
if X["mass"].value > 0.0:
ax.set_ylabel('$M [Am^2/kg]$',verticalalignment='top',position=(25,0.9), fontsize=12,**hfont)
else:
ax.set_ylabel('$M [Am^2]$',verticalalignment='top',position=(25,0.9), fontsize=12,**hfont)
X["xmax"]=xmax
return X
def sample_details(fn):
sample = fn.split('/')[-1]
sample = sample.split('.')
if type(sample) is list:
sample=sample[0]
units=parse_units(fn)
mass=parse_mass(fn)
return sample, units, mass
def slope_correction(X):
#unpack
H = X["H"]
M = X["M"]
# high field slope correction
Hidx = H > (X["slope"].value/100) * np.max(H)
p = np.polyfit(H[Hidx],M[Hidx],1)
M = M - H*p[0]
#repack
X["M"]=M
return X
def mass_normalize(X):
X["M"] = X["M"] / (X["mass"].value/1000.) #convert to AM^2/kg
return X
def remove_outliers(X):
"""Function to replace "bad" measurements to zero.
Inputs:
H: Measurement applied field [float, SI units]
Hr: Reversal field [float, SI units]
M: Measured magnetization [float, SI units]
Fk: Index of measured FORC (int)
Fj: Index of given measurement within a given FORC (int)
Outputs:
Fmask: mask, accepted points = 1, rejected points = 0
R: residuals from fitting process
Rcrit: critical residual threshold
"""
#unpack variables
H = X["H"]
Hr = X["Hr"]
M = X["M"]
Fk = X["Fk"]
Fj = X["Fj"]
SF=2 #half width of the smooth (full width = 2SF+1)
Mst=np.zeros(M.size)*np.nan #initialize output of smoothed magnetizations
for i in range(M.size): #loop through each measurement
idx=((Fk==Fk[i]) & (Fj<=Fj[i]+SF) & (Fj>=Fj[i]-SF)) #finding smoothing window in terms of H
Npts=np.sum(idx) #check enough points are available (may not be the case as edges)
if Npts>3:
#create centered quadratic design matrix WRT H
A = np.concatenate((np.ones(Npts)[:,np.newaxis],\
(H[idx]-H[i])[:,np.newaxis],\
((H[idx]-H[i])**2)[:,np.newaxis]),axis=1)
Mst[i] = np.linalg.lstsq(A,M[idx],rcond=None)[0][0] #regression estimate of M
else:
Mst[i] = M[i] #not enough points, so used M
Mstst=np.zeros(M.size)*np.nan
for i in range(M.size):
idx=((Fk<=Fk[i]+SF) & (Fk>=Fk[i]-SF) & (Fk[i]-Fk+(Fj-Fj[i])==0))
Npts=np.sum(idx)
if Npts>3:
#create centered quadratic design matrix WRT Hr
A = np.concatenate((np.ones(Npts)[:,np.newaxis],\
(Hr[idx]-Hr[i])[:,np.newaxis],\
((Hr[idx]-Hr[i])**2)[:,np.newaxis]),axis=1)
Mstst[i] = np.linalg.lstsq(A,Mst[idx],rcond=None)[0][0] #regression estimate of Mst
else:
Mstst[i] = Mst[i] #not enough points, so used Mst
R = Mstst-Mst #estimated residuals
Rcrit = np.std(R)*2.5 #set cut-off at 2.5 sigma
Fmask=np.ones(M.size) #initialize mask
Fmask[np.abs(R)>Rcrit]=0.0
idx = (np.abs(R)<Rcrit) #points flagged as outliers
#remove points deemed to be outliers
H = H[idx]
Hr = Hr[idx]
M = M[idx]
Fk = Fk[idx]
Fj = Fj[idx]
#reset indicies as required
Fk = Fk - np.min(Fk)+1
Nforc = int(np.max(Fk))
for i in range(Nforc):
idx = (Fk == i)
idx0 = np.argsort(Fj[idx])
for i in range(idx.size):
Fj[idx[idx0[i]]] = i+1
#repack variables
X["H"] = H
X["Hr"] = Hr
X["M"] = M
X["Fk"] = Fk
X["Fj"] = Fj
return X
def remove_lpa(X):
#unpack
Fj = X["Fj"]
H = X["H"]
Hr = X["Hr"]
M = X["M"]
Fk = X["Fk"]
Fj = X["Fj"]
Ft = X["Ft"]
#remove last point artifact
Nforc = int(np.max(Fk))
W = np.ones(Fk.size)
for i in range(Nforc):
Fj_max=np.sum((Fk==i))
idx = ((Fk==i) & (Fj==Fj_max))
W[idx]=0.0
idx = (W > 0.5)
H=H[idx]
Hr=Hr[idx]
M=M[idx]
Fk=Fk[idx]
Fj=Fj[idx]
Ft=Ft[idx]
Fk=Fk-np.min(Fk)+1. #reset FORC number if required
#repack
X["Fj"] = Fj
X["H"] = H
X["Hr"] = Hr
X["M"] = M
X["Fk"] = Fk
X["Fj"] = Fj
X["Ft"] = Ft
return X
def remove_fpa(X):
#unpack
Fj = X["Fj"]
H = X["H"]
Hr = X["Hr"]
M = X["M"]
Fk = X["Fk"]
Fj = X["Fj"]
Ft = X["Ft"]
#remove first point artifact
idx=((Fj==1.0))
H=H[~idx]
Hr=Hr[~idx]
M=M[~idx]
Fk=Fk[~idx]
Fj=Fj[~idx]
Ft=Ft[~idx]
Fk=Fk-np.min(Fk)+1. #reset FORC number if required
Fj=Fj-1.
#repack
X["Fj"] = Fj
X["H"] = H
X["Hr"] = Hr
X["M"] = M
X["Fk"] = Fk
X["Fj"] = Fj
X["Ft"] = Ft
return X
def drift_correction(X):
#unpack
M = X["M"]
Mcal = X["Mcal"]
Ft = X["Ft"]
tcal = X["tcal"]
#perform drift correction
M=M*Mcal[0]/np.interp(Ft,tcal,Mcal,left=np.nan) #drift correction
#repack
X["M"] = M
return X
def CGS2SI(X):
X["H"] = X["H"]/1E4 #convert Oe into T
X["M"] = X["M"]/1E3 #convert emu to Am2
return X
def lowerbranch_subtract(X):
"""Function to subtract lower hysteresis branch from FORC magnetizations
Inputs:
H: Measurement applied field [float, SI units]
Hr: Reversal field [float, SI units]
M: Measured magnetization [float, SI units]
Fk: Index of measured FORC (int)
Fj: Index of given measurement within a given FORC (int)
Outputs:
M: lower branch subtracted magnetization [float, SI units]
"""
#unpack
H = X["H"]
Hr = X["Hr"]
M = X["M"]
Fk = X["Fk"]
Fj = X["Fj"]
dH = X["dH"]
Hmin = np.min(H)
Hmax = np.max(H)
Hbar = np.zeros(1)
Mbar = np.zeros(1)
Nbar = 10
nH = int((Hmax - Hmin)/dH)
Hi = np.linspace(Hmin,Hmax,nH*50+1)
for i in range(Hi.size):
idx = (H>=Hi[i]-dH/2) & (H<=Hi[i]+dH/2)
H0 = H[idx][-Nbar:]
M0 = M[idx][-Nbar:]
Hbar = np.concatenate((Hbar,H0))
Mbar = np.concatenate((Mbar,M0))
Hbar = Hbar[1:]
Mbar = Mbar[1:]
Mhat = np.zeros(Hi.size)
#perform basic loess
for i in range(Hi.size):
idx = (Hbar>=Hi[i]-2.5*dH) & (Hbar<=Hi[i]+2.5*dH)
p = np.polyfit(Hbar[idx],Mbar[idx],2)
Mhat[i] = np.polyval(p,Hi[i])
Hlower = Hi
Mlower = Mhat
Mcorr=M-np.interp(H,Hlower,Mlower,left=np.nan,right=np.nan) #subtracted lower branch from FORCs via interpolation
Fk=Fk[~np.isnan(Mcorr)] #remove any nan
Fj=Fj[~np.isnan(Mcorr)] #remove any nan
H=H[~np.isnan(Mcorr)] #remove any nan
Hr=Hr[~np.isnan(Mcorr)] #remove any nan
M=M[~np.isnan(Mcorr)] #remove any nan
Mcorr = Mcorr[~np.isnan(Mcorr)] #remove any nan
#repack
X["H"] = H
X["Hr"] = Hr
X["M"] = M
X["Fk"] = Fk
X["Fj"] = Fj
X["DM"] = Mcorr
return X
##### END SECTION: PREPROCESSING #################################################
##### BEGIN SECTION: MODEL FUNCTIONS #################################################
def model_options(X):
style = {'description_width': 'initial'} #general style settings
#horizontal line widget
HL = widgets.HTML(value='<hr style="height:3px;border:none;color:#333;background-color:#333;" />')
M_title = widgets.HTML(value='<h3>Select data type:</h3>')
M_widge = widgets.RadioButtons(options=['Magnetisations', 'Lower branch subtracted'],
value='Magnetisations',
style=style)
### Horizontal smoothing ###
S_title = widgets.HTML(value='<h3>Set smoothing parameters:</h3>')
#SC widgets
Sc_widge = widgets.FloatRangeSlider(
value=[3,7],
min=2,
max=10,
step=0.25,
description='Select $s_c$ range:',
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
style = style
)
Sb_widge = widgets.FloatRangeSlider(
value=[3,7],
min=2,
max=10,
step=0.25,
description='Select $s_u$ range:',
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
style = style
)
lambdaSc_widge = widgets.FloatSlider(
value=0.05,
min=0,
max=0.2,
step=0.025,
description='Select $\lambda_{c}$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
style = style
)
lambdaSb_widge = widgets.FloatSlider(
value=0.05,
min=0,
max=0.2,
step=0.025,
description='Select $\lambda_{u}$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
style = style
)
down_title = widgets.HTML(value='<h3>Specify downsampling:</h3>')
down_widge = widgets.IntSlider(
value=1000,
min=100,
max=X['M'].size,
step=1,
description='Number of points:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style = style
)
#combined widget
DS = VBox([down_title,down_widge])
SC = VBox([M_title,M_widge,HL,S_title,Sc_widge,Sb_widge,lambdaSc_widge,lambdaSb_widge])
### Setup Multiprocessing tab ####################
#start cluster to test for number of cores
#if 'cluster' in X:
# X['cluster'].close()
#X['cluster'] = LocalCluster()
#X['ncore'] = len(X['cluster'].workers)
#X['cluster'].close()
X['ncore']=os.cpu_count()
#header
dask_title = widgets.HTML(value='<h3>DASK multiprocessing:</h3>')
#selection widget
dask_widge=widgets.IntSlider(
value=X['ncore'],
min=1,
max=X['ncore'],
step=1,
description='Number of cores:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style=style
)
#final multiprocessing widget
mpl_widge = VBox([dask_title,dask_widge])
### CONSTRUCT TAB MENU #############
method_nest = widgets.Tab()
method_nest.children = [SC,DS,mpl_widge]
method_nest.set_title(0, 'REGRESSION')
method_nest.set_title(1, 'DOWNSAMPLING')
method_nest.set_title(2, 'PROCESSING')
display(method_nest)
### SETUP OUTPUT ####
X['Mtype']=M_widge
X['SC']=Sc_widge
X['SB']=Sb_widge
X['lambdaSC']=lambdaSc_widge
X['lambdaSB']=lambdaSb_widge
X['Ndown']=down_widge
X['workers']=dask_widge
return X
def MK92_weighted_regress(X,y,w,alpha,beta):
#PERFORM WEIGHTED REGRESSION FOLLOWING MACKAY 1992
w = np.sqrt(w / np.linalg.norm(w))
w = w/np.sum(w)*y.size
W = np.diag(w)
XTW = X.T @ W
M = XTW @ X
N = np.size(y)
I = np.eye(np.size(M,axis=1))
XT_y = np.dot(XTW, y)
lamb,VhT = linalg.eigh(M)
lamb = np.real(lamb)
Vh = VhT.T
for i in range(15):
ab0=[alpha, beta]
Wprec = alpha*I + beta*M # Bishop eq 3.54
Wbar = np.dot(VhT,Vh / (lamb + alpha / beta)[:, np.newaxis]) # Bishop eq 3.53
Wbar = np.dot(Wbar, XT_y) # Bishop eq 3.53 (cont.)
gamma = np.sum(lamb / (alpha + lamb)) # Bishop eq 3.91
alpha = gamma / np.maximum(np.sum(np.square(Wbar)),1.0e-10) # Bishop eq 3.91 (avoid division by zero)
beta = (N - gamma) / np.sum(np.square(y - X @ Wbar)) # Bishop eq 3.95
if np.allclose(ab0,[alpha,beta]):
break
#Once model is optimized estimate the log-evidence (Bishop equ 3.86)
M = X.shape[1]
ln_p = 0.5*M*np.log(alpha)
ln_p +=0.5*N*np.log(beta)
ln_p -=0.5*beta*np.sum(np.square(y - X @ Wbar))
ln_p -=0.5*alpha*np.sum(Wbar**2)
ln_p -= 0.5*np.linalg.slogdet(Wprec)[1]
ln_p -= 0.5*N*np.log(2.0*np.pi)
return Wbar, Wprec, ln_p, alpha, beta
def variforc_regression_evidence(sc0,sc1,lamb_sc,sb0,sb1,lamb_sb,Hc,Hb,dH,M,Hc0,Hb0,X):
# function to test all models
H0 = Hc0+Hb0
Hr0 = Hb0-Hc0
rho = np.zeros(Hc0.size)
Midx = np.zeros(Hc0.size)
for i in range(Hc0.size):
ln_p = np.zeros(5)
w, idx = vari_weights(sc0,sc1,lamb_sc,sb0,sb1,lamb_sb,Hc,Hb,dH,Hc0[i],Hb0[i])
#perform 2nd-order least squares to estimate magnitude of beta
Aw = X[idx,0:6] * np.sqrt(w[:,np.newaxis])
Bw = M[idx] * np.sqrt(w)
p=np.linalg.lstsq(Aw, Bw, rcond=0)[0]
hat = np.dot(X[idx,0:6],p)
beta = 1/np.mean((M[idx]-hat)**2)
alpha = beta*0.0002
#perform 1st regression for model selection
_, _, ln_p[0], _, _ = MK92_weighted_regress(X[idx,0:3],M[idx],w,alpha=alpha,beta=beta)
_, _, ln_p[1], _, _ = MK92_weighted_regress(X[idx,0:5],M[idx],w,alpha=alpha,beta=beta)
Wbar, _, ln_p[2], _, _ = MK92_weighted_regress(X[idx,0:6],M[idx],w,alpha=alpha,beta=beta)
rho2 = -0.5*Wbar[5]
Wbar, _, ln_p[3], _, _ = MK92_weighted_regress(X[idx,0:10],M[idx],w,alpha=alpha,beta=beta)
rho3 = -0.5*Wbar[5]-Wbar[8]*H0[i]-Wbar[9]*Hr0[i]
_, _, ln_p[4], _, _ = MK92_weighted_regress(X[idx,:],M[idx],w,alpha=alpha,beta=beta)
#rho4[i] = -0.5*Wbar[5]-Wbar[8]*H0[i]-Wbar[9]*Hr0[i]-(Wbar[11]*3*H[i]**2)/2-(Wbar[13]*3*Hr[i]**2)/2-Wbar[12]*2*H[i]*Hr[i]
Midx[i]=np.argmax(ln_p)
if Midx[i]==2:
rho[i]=rho2
elif Midx[i]>2:
rho[i]=rho3
return np.column_stack((Midx,rho))
def triangulate_rho(X):
rho = X['rho']
Hc = X['Hc']
Hb = X['Hb']
dH = X['dH']
#PERFORM GRIDDING AND INTERPOLATION FOR FORC PLOT
X['Hc1'], X['Hc2'], X['Hb1'], X['Hb2'] = measurement_limts(X)
Hc1 = 0-3*dH
Hc2 = X['Hc2']
Hb1 = X['Hb1']-X['Hc2']
Hb2 = X['Hb2']
#create grid for interpolation
Nx = np.ceil((Hc2-Hc1)/dH)+1 #number of points along x
Ny = np.ceil((Hb2-Hb1)/dH)+1 #number of points along y
xi = np.linspace(Hc1,Hc2,int(Nx))
yi = np.linspace(Hb1,Hb2,int(Ny))
#perform triangluation and interpolation
triang = tri.Triangulation(Hc, Hb)
interpolator = tri.LinearTriInterpolator(triang, rho)
Xi, Yi = np.meshgrid(xi, yi)
Zi = interpolator(Xi, Yi)
Xi[Xi==np.min(Xi[Xi>0])]=0
X['Hc1'] = Hc1
X['Xi']=Xi
X['Yi']=Yi
X['Zi']=Zi
return X
def plot_model_results_down(X):
#PLOT FULL MODEL
## UNPACK VARIABLE ##
Midx = X['Midx']
Hb1 = X['Hb1']-X['Hc2']
Hb2 = X['Hb2']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hc = X['Hci']
Hb = X['Hbi']
fig = plt.figure(figsize=(12,4.75))
##################### BOOTSTRAP PSI ######################
np.random.seed(999)
MC_psi = np.zeros(int(2E3))
for i in range(MC_psi.size):
bs = np.random.randint(0,Hc.size,Hc.size)
MC_psi[i] = np.sum((Midx[bs]>0) & (Midx[bs]<4)) / Hc.size
ax2 = fig.add_subplot(1,3,2)
ax2.hist(MC_psi,bins=25,density=True)
ax2.set_xlabel('$\psi$',fontsize=12)
ax2.set_ylabel('Proportion of cases [0-1]',fontsize=12)
ylim2 = ax2.get_ylim()
xlim2 = ax2.get_xlim()
clow = np.percentile(MC_psi,2.5)
cupp = np.percentile(MC_psi,97.5)
ax2.plot((clow,clow),ylim2,'-r')
ax2.plot((cupp,cupp),ylim2,'-r')
ax2.text(clow-(xlim2[1]-xlim2[0])/12,ylim2[1]/1.1,'$\psi$ (2.5) = {:.3f}'.format(clow),fontsize=12, rotation=90,color='r')
ax2.text(cupp+(xlim2[1]-xlim2[0])/30,ylim2[1]/1.1,'$\psi$ (97.5) = {:.3f}'.format(cupp),fontsize=12, rotation=90,color='r')
ax2.set_ylim(ylim2)
ax2.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
##################### PLOT MODEL ORDER ######################
#DEFINE COLORMAP
cseq=[]
cseq.append((68/255,119/255,170/255,1))
cseq.append((102/255,204/255,238/255,1))
cseq.append((34/255,136/255,51/255,1))
cseq.append((204/255,187/255,68/255,1))
cseq.append((238/255,102/255,119/255,1))
ax1 = fig.add_subplot(1,3,1)
ax1.plot(Hc[Midx==0],Hb[Midx==0],'.',label='$H_1$',markeredgecolor=cseq[0],markerfacecolor=cseq[0],markersize=3)
ax1.plot(Hc[Midx==1],Hb[Midx==1],'.',label='$H_{2a}$',markeredgecolor=cseq[1],markerfacecolor=cseq[1],markersize=3)
ax1.plot(Hc[Midx==2],Hb[Midx==2],'.',label='$H_{2b}$',markeredgecolor=cseq[2],markerfacecolor=cseq[2],markersize=3)
ax1.plot(Hc[Midx==3],Hb[Midx==3],'.',label='$H_3$',markeredgecolor=cseq[3],markerfacecolor=cseq[3],markersize=3)
ax1.plot(Hc[Midx==4],Hb[Midx==4],'.',label='$H_4$',markeredgecolor=cseq[4],markerfacecolor=cseq[4],markersize=3)
ax1.set_xlim((0,Hc2))
ax1.set_ylim((Hb1,Hb2))
ax1.set_xlabel('$\mu_0H_c$ [T]',fontsize=12)
ax1.set_ylabel('$\mu_0H_u$ [T]',fontsize=12)
ax1.set_aspect('equal')
ax1.minorticks_on()
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
ax1.legend(fontsize=12,labelspacing=0,handletextpad=-0.6,loc=4,bbox_to_anchor=(1.035,-0.02),frameon=False,markerscale=2.5)
########## PLOT HISTOGRAM #############
ax3 = fig.add_subplot(1,3,3)
N, bins, patches = ax3.hist(Midx,bins=(-0.5,0.5,1.5,2.5,3.5,4.5),rwidth=0.8,density=True)
for i in range(5):
patches[i].set_facecolor(cseq[i])
ax3.set_xticks(range(5))
ax3.set_xticklabels(('$H_1$', '$H_{2a}$', '$H_{2b}$', '$H_3$', '$H_4$'),size=12)
ax3.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
ax3.set_xlabel('Selected model',fontsize=12)
ax3.set_ylabel('Proportion of cases [0-1]: $\psi$ = {:.3f}'.format(np.sum((Midx>0) & (Midx<4)) / Hc.size),fontsize=12)
ax3.set_xlim((-0.5,4.5))
##################### OUTPUT PLOTS ######################
outputfile = X["sample"].value+'_model.eps'
plt.tight_layout()
plt.savefig(outputfile)
plt.show()
return X
def plot_model_results_full(X):
#PLOT FULL MODEL
## UNPACK VARIABLE ##
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
Midx = X['Midx']
Hb1 = X['Hb1']-X['Hc2']
Hb2 = X['Hb2']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hc = X['Hc']
Hb = X['Hb']
fig = plt.figure(figsize=(12,4.75))
##################### PLOT FORC ######################
cmap,vmin,vmax = FORCinel_colormap(Zi)
ax2 = fig.add_subplot(1,3,2)
CS = ax2.contourf(Xi, Yi, Zi, 50, cmap = cmap, vmin=vmin, vmax=vmax)
cbar2 = fig.colorbar(CS,fraction=0.055, pad=0.05,label='$Am^2$ $T^{-2}$')
cbar2.ax.tick_params(labelsize=10)
#cbar2.ax.set_title('$Am^2 T^{-2} (x10^{-6})$',fontsize=10)
ax2.set_xlim((0,Hc2))
ax2.set_ylim((Hb1,Hb2))
ax2.set_ylabel('$\mu_0H_u$ [T]',fontsize=12)
ax2.set_xlabel('$\mu_0H_c$ [T]',fontsize=12)
ax2.set_aspect('equal')
ax2.minorticks_on()
ax2.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
ax2.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
ax2.plot((0,Hc2),(Hb1,X['Hb1']),'--k')
##################### PLOT MODEL ORDER ######################
#DEFINE COLORMAP
cseq=[]
cseq.append((68/255,119/255,170/255,1))
cseq.append((102/255,204/255,238/255,1))
cseq.append((34/255,136/255,51/255,1))
cseq.append((204/255,187/255,68/255,1))
cseq.append((238/255,102/255,119/255,1))
ax1 = fig.add_subplot(1,3,1)
ax1.plot(Hc[Midx==0],Hb[Midx==0],'.',label='$H_1$',markeredgecolor=cseq[0],markerfacecolor=cseq[0],markersize=3)
ax1.plot(Hc[Midx==1],Hb[Midx==1],'.',label='$H_{2a}$',markeredgecolor=cseq[1],markerfacecolor=cseq[1],markersize=3)
ax1.plot(Hc[Midx==2],Hb[Midx==2],'.',label='$H_{2b}$',markeredgecolor=cseq[2],markerfacecolor=cseq[2],markersize=3)
ax1.plot(Hc[Midx==3],Hb[Midx==3],'.',label='$H_3$',markeredgecolor=cseq[3],markerfacecolor=cseq[3],markersize=3)
ax1.plot(Hc[Midx==4],Hb[Midx==4],'.',label='$H_4$',markeredgecolor=cseq[4],markerfacecolor=cseq[4],markersize=3)
ax1.set_xlim((0,Hc2))
ax1.set_ylim((Hb1,Hb2))
ax1.set_xlabel('$\mu_0H_c$ [T]',fontsize=12)
ax1.set_ylabel('$\mu_0H_u$ [T]',fontsize=12)
ax1.set_aspect('equal')
ax1.minorticks_on()
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
ax1.legend(fontsize=12,labelspacing=0,handletextpad=-0.6,loc=4,bbox_to_anchor=(1.035,-0.02),frameon=False,markerscale=2.5)
cbar1 = fig.colorbar(CS,fraction=0.055, pad=0.05)
cbar1.ax.tick_params(labelsize=10)
cbar1.remove()
########## PLOT HISTOGRAM #############
ax3 = fig.add_subplot(1,3,3)
N, bins, patches = ax3.hist(Midx,bins=(-0.5,0.5,1.5,2.5,3.5,4.5),rwidth=0.8,density=True)
for i in range(5):
patches[i].set_facecolor(cseq[i])
ax3.set_xticks(range(5))
ax3.set_xticklabels(('$H_1$', '$H_{2a}$', '$H_{2b}$', '$H_3$', '$H_4$'),size=12)
ax3.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='12')
ax3.set_xlabel('Selected model',fontsize=12)
ax3.set_ylabel('Proportion of cases [0-1]: $\psi$ = {:.3f}'.format(np.sum((Midx>0) & (Midx<4)) / Hc.size),fontsize=12)
ax3.set_xlim((-0.5,4.5))
##################### OUTPUT PLOTS ######################
outputfile = X["sample"].value+'_model.eps'
plt.tight_layout()
plt.savefig(outputfile)
plt.show()
return X
def measurement_limts(X):
"""Function to find measurement limits and conver units if required
Inputs:
file: name of data file (string)
Outputs:
Hc1: minimum Hc
Hc2: maximum Hc
Hb1: minimum Hb
Hb2: maximum Hb
"""
string='Hb2' #upper Hb value for the FORC box
Hb2=parse_header(X["fn"],string)
string='Hb1' #lower Hb value for the FORC box
Hb1=parse_header(X["fn"],string)
string='Hc2' #upper Hc value for the FORC box
Hc2=parse_header(X["fn"],string)
string='Hc1' #lower Hc value for the FORC box
Hc1=parse_header(X["fn"],string)
if X['unit']=='Cgs': #convert CGS to SI
Hc2=Hc2/1E4 #convert from Oe to T
Hc1=Hc1/1E4 #convert from Oe to T
Hb2=Hb2/1E4 #convert from Oe to T
Hb1=Hb1/1E4 #convert from Oe to T
return Hc1, Hc2, Hb1, Hb2
def FORCinel_colormap(Z):
#setup initial colormap assuming that negative range does not require extension
cdict = {'red': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
#(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 204/255, 204/255),
(0.6975, 204/255, 204/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255)),
'green': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
#(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 178/255, 178/255),
(0.563, 204/255, 204/255),
(0.6975, 76/255, 76/255),
(0.8319, 102/255, 102/255),
(0.9748, 25/255, 25/255),
(1.0, 25/255, 25/255)),
'blue': ((0.0, 255/255, 255/255),
(0.1387, 255/255, 255/255),
#(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 76/255, 76/255),
(0.6975, 76/255, 76/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255))}
if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required
#cmap = LinearSegmentedColormap('forc_cmap', cdict)
vmin = -np.max(Z)*0.19
vmax = np.max(Z)
else: #negative extension is required
vmin=np.min(Z)
vmax=np.max(Z)
anchors = np.zeros(9)
anchors[1]=(-0.015*vmax-vmin)/(vmax-vmin)
anchors[2]=(0.015*vmax-vmin)/(vmax-vmin)
anchors[3]=(0.19*vmax-vmin)/(vmax-vmin)
anchors[4]=(0.48*vmax-vmin)/(vmax-vmin)
anchors[5]=(0.64*vmax-vmin)/(vmax-vmin)
anchors[6]=(0.80*vmax-vmin)/(vmax-vmin)
anchors[7]=(0.97*vmax-vmin)/(vmax-vmin)
anchors[8]=1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2]))
Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2]))
Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
def calculate_model(X):
if ('client' in X) == False: #start DASK if required
c = LocalCluster(n_workers=X['workers'].value)
X['client'] = Client(c)
H = X['H']
Hr = X['Hr']
dH = X['dH']
#form top-level design matrix
X0 = np.column_stack((np.ones(H.size),H,Hr,H**2,Hr**2,H*Hr,H**3,Hr**3,H**2*Hr,H*Hr**2,
H**4,H**3*Hr,H**2*Hr**2,H*Hr**3,Hr**4))
H = X['H']
Hr = X['Hr']
Hc = (H-Hr)/2
Hb = (H+Hr)/2
X['Hc'] = Hc
X['Hb'] = Hb
if X['Mtype'].value=='Magnetisations':
M = X['M']
else:
M = X['DM']
D_Hc = X['client'].scatter(Hc,broadcast=True)
D_Hb = X['client'].scatter(Hb,broadcast=True)
D_M = X['client'].scatter(M,broadcast=True)
D_X = X['client'].scatter(X0,broadcast=True)
#Down-sample and Split arrays for DASK
Nsplit = 30
if X['Ndown'].value<Hc.size:
X['Hc1'], X['Hc2'], X['Hb1'], X['Hb2'] = measurement_limts(X)
gradient = (X['Hb1']-(X['Hb1']-X['Hc2']))/(X['Hc2']-X['Hc1'])
intercept = X['Hb1']-gradient*X['Hc2']
#generate random points to down-sample
np.random.seed(999)
#Hci = np.random.rand(X['Ndown'].value*3)*(X['Hc2']-X['Hc1'])+X['Hc1']
#Hbi = np.random.rand(X['Ndown'].value*3)*(X['Hb2']-(X['Hb1']-X['Hc2']))+(X['Hb1']-X['Hc2'])
#Hidx = np.argwhere(Hbi>=Hci*gradient+intercept)
#X['Hci'] = Hci[Hidx[0:X['Ndown'].value]]
#X['Hbi'] = Hbi[Hidx[0:X['Ndown'].value]]
Ridx = np.random.choice(M.size, size=X['Ndown'].value, replace=False)
X['Hci']=Hc[Ridx]
X['Hbi']=Hb[Ridx]
Hc0 = np.array_split(X['Hci'],Nsplit)
Hb0 = np.array_split(X['Hbi'],Nsplit)
else:
Hc0 = np.array_split(Hc,Nsplit)
Hb0 = np.array_split(Hb,Nsplit)
sc0 = X['SC'].value[0]
sc1 = X['SC'].value[1]
sb0 = X['SB'].value[0]
sb1 = X['SB'].value[1]
lamb_sc = X['lambdaSC'].value
lamb_sb = X['lambdaSB'].value
#Split jobs over DASK
jobs = []
for i in range(len(Hc0)):
job = X['client'].submit(variforc_regression_evidence,sc0,sc1,lamb_sc,sb0,sb1,lamb_sb,D_Hc,D_Hb,dH*np.sqrt(2),D_M,Hc0[i],Hb0[i],D_X)
jobs.append(job)
results = X['client'].gather(jobs)
Midx = results[0][:,0]
rho = results[0][:,1]
for i in range(len(results)-1):
Midx=np.concatenate((Midx,results[i+1][:,0]))
rho=np.concatenate((rho,results[i+1][:,1]))
X['rho'] = rho
X['Midx'] = Midx
if X['Ndown'].value<Hc.size:
X['Xi']='NA'
X = plot_model_results_down(X)
else:
X = triangulate_rho(X)
X = plot_model_results_full(X)
return X
##### END SECTION: MODEL FUNCTIONS #################################################
##### BEGIN SECTION: FORC plotting #################################################
def FORC_plot(X):
#unpack data
#rho = data['rho']
#H = data['H']
#Hc = data['Hc']
#Hb = data['Hb']
Xi = X['Xi']
if type(Xi) is str:
print('Error: The current model is based on downsampled data. Run a full model using "calculate_model"') #return error
return X
Yi = X['Yi']
Zi = X['Zi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
#Set up widgets for interactive plot
style = {'description_width': 'initial'} #general style settings
#DEFINE INTERACTIVE WIDGETS
#should a colorbar be included
colorbar_widge = widgets.Checkbox(value=False, description = 'Include color scalebar',style=style)
#Frequency for contour lines to be included in plot
contour_widge = widgets.Select(
options=[['Select contour frequency',-1],
['Every level',1],
['Every 2nd level',2],
['Every 3rd level',3],
['Every 4th level',4],
['Every 5th level',5],
['Every 10th level',10],
['Every 20th level',20],
['Every 50th level',50],
],
value=-1,
rows=1,
description='Plot contours',style=style)
contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style)
#check box for plot download
download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style)
#How many contour levels should be included
level_widge = widgets.Select(
options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]],
value=50,
rows=1,
description='Number of color levels',style=style)
#X-axis minimum value
xmin_widge = widgets.FloatText(value=0,description='Minimum $\mu_0H_c$ [T]',style=style,step=0.001)
xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum $\mu_0H_c$ [T]',style=style,step=0.001)
ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum $\mu_0H_u$ [T]',style=style,step=0.001)
ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum $\mu_0H_u$ [T]',style=style,step=0.001)
#launch the interactive FORC plot
x = interactive(forcplot,
Xi=fixed(Xi), #X point grid
Yi=fixed(Yi), #Y point grid
Zi=fixed(Zi), #interpolated Z values
fn=fixed(X['sample']), #File information
mass=fixed(X['mass']), #Preprocessing information
colorbar=colorbar_widge, #Include colorbar
level=level_widge, #Number of levels to plot
contour=contour_widge, #Contour levels to plot
contourpts=contourpts_widge, #Contour line width
xmin=xmin_widge, #X-minimum
xmax=xmax_widge, #X-maximum
ymin=ymin_widge, #Y-minimum
ymax=ymax_widge, #Y-maximum
download = download_widge #download plot
)
#create tabs
tab_nest = widgets.Tab()
# tab_nest.children = [tab_visualise]
tab_nest.set_title(0, 'FORC PLOTTING')
#interact function in isolation
tab_nest.children = [VBox(children = x.children)]
display(tab_nest)
#display(x) #display the interactive plot
def forcplot(Xi,Yi,Zi,fn,mass,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,download):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
if mass.value<0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
xlabel_text = '$\mu_0 H_c [T]$' #label Hc axis [SI units]
ylabel_text = '$\mu_0 H_u [T]$' #label Hu axis [SI units]
cbar_text = '$Am^2 T^{-2}$'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value/1000.0)
xlabel_text = '$\mu_0 H_c [T]$' #label Hc axis [SI units]
ylabel_text = '$\mu_0 H_u [T]$' #label Hu axis [SI units]
cbar_text = '$Am^2 T^{-2} kg^{-1}$'
#define colormaps
idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view
cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_new, level, cmap = cmap, vmin=vmin, vmax=vmax)
if (contour>0) & (contour<level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts)
ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units]
ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units]
# Set plot Xlimits
xlimits = np.sort((xmin,xmax))
ax.set_xlim(xlimits)
#Set plot Ylimits
ylimits = np.sort((ymin,ymax))
ax.set_ylim(ylimits)
#Set ticks and plot aspect ratio
ax.tick_params(labelsize=14)
ax.set_aspect('equal') #set 1:1 aspect ratio
ax.minorticks_on() #add minor ticks
#Add colorbar
if colorbar == True:
cbar = fig.colorbar(CS,fraction=0.04, pad=0.08)
cbar.ax.tick_params(labelsize=14)
#cbar.ax.set_title(cbar_text,fontsize=14)
cbar.set_label(cbar_text,fontsize=14)
#Activate download to same folder as data file
if download==True:
outputfile = fn.value+'_FORC.eps'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
#show the final plot
plt.show()
##### END SECTION: FORC plotting #################################################
##### BEGIN SECTION: HELPER FUNCTIONS #################################################
# define function which will look for lines in the header that start with certain strings
def find_data_lines(fp):
"""Helper function to identify measurement lines in a FORC data file.
Given the various FORC file formats, measurements lines are considered to be those which:
Start with a '+' or,
Start with a '-' or,
Are blank (i.e. lines between FORCs and calibration points) or,
Contain a ','
Inputs:
fp: file identifier
Outputs:
line: string corresponding to data line that meets the above conditions
"""
return [line for line in fp if ((line.startswith('+')) or (line.startswith('-')) or (line.strip()=='') or line.find(',')>-1.)]
#function to parse calibration points and provide time stamps
def lines_that_start_with(string, fp):
"""Helper function to lines in a FORC data file that start with a given string
Inputs:
string: string to compare lines to
fp: file identifier
Outputs:
line: string corresponding to data line that meets the above conditions
"""
return [line for line in fp if line.startswith(string)]
def parse_header(file,string):
"""Function to extract instrument settings from FORC data file header
Inputs:
file: name of data file (string)
string: instrument setting to be extracted (string)
Outputs:
output: value of instrument setting [-1 if setting doesn't exist] (float)
"""
output=-1 #default output (-1 corresponds to no result, i.e. setting doesn't exist)
with cd.open(file,"r",encoding='latin9') as fp: #open the data file (latin9 encoding seems to work, UTF and ASCII don't)
for line in lines_that_start_with(string, fp): #find the line starting with the setting name
idx = line.find('=') #Some file formats may contain an '='
if idx>-1.: #if '=' found
output=float(line[idx+1:]) #value taken as everything to right of '='
else: # '=' not found
idx = len(string) #length of the setting string
output=float(line[idx+1:]) #value taken as everything to right of the setting name
return output
def parse_units(file):
"""Function to extract instrument unit settings ('') from FORC data file header
Inputs:
file: name of data file (string)
Outputs:
CGS [Cgs setting] or SI [Hybrid SI] (string)
"""
string = 'Units of measure' #header definition of units
with cd.open(file,"r",encoding='latin9') as fp: #open the data file (latin9 encoding seems to work, UTF and ASCII don't)
for line in lines_that_start_with(string, fp): #find the line starting with the setting name
idxSI = line.find('Hybrid SI') #will return location if string is found, otherwise returns -1
idxCGS = line.find('Cgs') #will return location if string is found, otherwise returns -1
if idxSI>idxCGS: #determine which unit string was found in the headerline and output
return 'SI'
else:
return 'Cgs'
def parse_mass(file):
"""Function to extract sample from FORC data file header
Inputs:
file: name of data file (string)
Outputs:
Mass in g or N/A
"""
output = 'N/A'
string = 'Mass' #header definition of units
with cd.open(file,"r",encoding='latin9') as fp: #open the data file (latin9 encoding seems to work, UTF and ASCII don't)
for line in lines_that_start_with(string, fp): #find the line starting with the setting name
idx = line.find('=') #Some file formats may contain an '='
if idx>-1.: #if '=' found
output=(line[idx+1:]) #value taken as everything to right of '='
else: # '=' not found
idx = len(string) #length of the setting string
output=(line[idx+1:]) #value taken as everything to right of the setting name
if output.find('N/A') > -1:
output = 'N/A'
else:
output = float(output)
return output
def calibration_times(file, Npts):
"""Function to estimate the time at which calibration points were measured in a FORC sequence
Follows the procedure given in:
<NAME> (2013) VARIFORC: An optimized protocol for calculating non-regular first-order reversal curve (FORC) diagrams. Global and Planetary Change, 110, 302-320, doi:10.1016/j.gloplacha.2013.08.003.
Inputs:
file: name of data file (string)
Npts: number of calibration points (int)
Outputs:
tcal_k: Estimated times at which the calibration points were measured (float)
"""
unit=parse_units(file) #determine measurement system (CGS or SI)
string='PauseRvrsl' #Pause at reversal field (new file format, -1 if not available)
tr0=parse_header(file,string)
string='PauseNtl' #Pause at reversal field (old file format, -1 if not available)
tr1=parse_header(file,string)
tr=np.max((tr0,tr1)) #select Pause value depending on file format
string='Averaging time' #Measurement averaging time
tau=parse_header(file,string)
string='PauseCal' #Pause at calibration point
tcal=parse_header(file,string)
string='PauseSat' #Pause at saturation field
ts=parse_header(file,string)
string='SlewRate' #Field slewrate
alpha=parse_header(file,string)
string='HSat' #Satuation field
Hs=parse_header(file,string)
string='Hb2' #upper Hb value for the FORC box
Hb2=parse_header(file,string)
string='Hb1' #lower Hb value for the FORC box
Hb1=parse_header(file,string)
string='Hc2' #upper Hc value for the FORC box (n.b. Hc1 is assumed to be 0)
Hc2=parse_header(file,string)
string='NForc' # Numer of measured FORCs (new file format, -1 if not available)
N0=parse_header(file,string)
string='NCrv' # Numer of measured FORCs (old file format, -1 if not available)
N1=parse_header(file,string)
N=np.max((N0,N1)) #select Number of FORCs depending on file format
if unit=='Cgs':
alpha=alpha/1E4 #convert from Oe to T
Hs=Hs/1E4 #convert from Oe to T
Hb2=Hb2/1E4 #convert from Oe to T
Hb1=Hb1/1E4 #convert from Oe to T
dH = (Hc2-Hb1+Hb2)/N #estimated field spacing
#now following Elgi's estimate of the measurement time
nc2 = Hc2/dH
Dt1 = tr + tau + tcal + ts + 2.*(Hs-Hb2-dH)/alpha
Dt2 = tr + tau + (Hc2-Hb2-dH)/alpha
Npts=int(Npts)
tcal_k=np.zeros(Npts)
for k in range(1,Npts+1):
if k<=1+nc2:
tcal_k[k-1]=k*Dt1-Dt2+dH/alpha*k**2+(tau-dH/alpha)*(k-1)**2
else:
tcal_k[k-1]=k*Dt1-Dt2+dH/alpha*k**2+(tau-dH/alpha)*((k-1)*(1+nc2)-nc2)
return tcal_k
def measurement_times(file,Fk,Fj):
"""Function to estimate the time at which magnetization points were measured in a FORC sequence
Follows the procedure given in:
<NAME> (2013) VARIFORC: An optimized protocol for calculating non-regular first-order reversal curve (FORC) diagrams. Global and Planetary Change, 110, 302-320, doi:10.1016/j.gloplacha.2013.08.003.
Inputs:
file: name of data file (string)
Fk: FORC indicies (int)
Fj: Measurement indicies within given FORC
Outputs:
Ft: Estimated times at which the magnetization points were measured (float)
"""
unit=parse_units(file) #determine measurement system (CGS or SI)
string='PauseRvrsl' #Pause at reversal field (new file format, -1 if not available)
tr0=parse_header(file,string)
string='PauseNtl' #Pause at reversal field (old file format, -1 if not available)
tr1=parse_header(file,string)
tr=np.max((tr0,tr1)) #select Pause value depending on file format
string='Averaging time' #Measurement averaging time
tau=parse_header(file,string)
string='PauseCal' #Pause at calibration point
tcal=parse_header(file,string)
string='PauseSat' #Pause at saturation field
ts=parse_header(file,string)
string='SlewRate' #Field slewrate
alpha=parse_header(file,string)
string='HSat' #Satuation field
Hs=parse_header(file,string)
string='Hb2' #upper Hb value for the FORC box
Hb2=parse_header(file,string)
string='Hb1' #lower Hb value for the FORC box
Hb1=parse_header(file,string)
string='Hc2' #upper Hc value for the FORC box (n.b. Hc1 is assumed to be 0)
Hc2=parse_header(file,string)
string='NForc' # Numer of measured FORCs (new file format, -1 if not available)
N0=parse_header(file,string)
string='NCrv' # Numer of measured FORCs (old file format, -1 if not available)
N1=parse_header(file,string)
N=np.max((N0,N1)) #select Number of FORCs depending on file format
if unit=='Cgs':
alpha=alpha/1E4 #convert from Oe to T
Hs=Hs/1E4 #convert from Oe to T
Hb2=Hb2/1E4 #convert from Oe to T
Hb1=Hb1/1E4 #convert from Oe to T
dH = (Hc2-Hb1+Hb2)/N #estimated field spacing
#now following Elgi's estimate of the measurement time
nc2 = Hc2/dH
Dt1 = tr + tau + tcal + ts + 2.*(Hs-Hb2-dH)/alpha
Dt3 = Hb2/alpha
Npts=int(Fk.size)
Ft=np.zeros(Npts)
for i in range(Npts):
if Fk[i]<=1+nc2:
Ft[i]=Fk[i]*Dt1+Dt3+Fj[i]*tau+dH/alpha*(Fk[i]*(Fk[i]-1))+(tau-dH/alpha)*(Fk[i]-1)**2
else:
Ft[i]=Fk[i]*Dt1+Dt3+Fj[i]*tau+dH/alpha*(Fk[i]*(Fk[i]-1))+(tau-dH/alpha)*((Fk[i]-1)*(1+nc2)-nc2)
return Ft
def parse_calibration(file):
"""Function to extract measured calibration points from a FORC sequence
Inputs:
file: name of data file (string)
Outputs:
Hcal: sequence of calibration fields [float, SI units]
Mcal: sequence of calibration magnetizations [float, SI units]
tcal: Estimated times at which the calibration points were measured (float, seconds)
"""
dum=-9999.99 #dum value to indicate break in measurement seqence between FORCs and calibration points
N0=int(1E6) #assume that any file will have less than 1E6 measurements
H0=np.zeros(N0)*np.nan #initialize NaN array to contain field values
M0=np.zeros(N0)*np.nan #initialize NaN array to contain magnetization values
H0[0]=dum #first field entry is dummy value
M0[0]=dum #first magnetization entry is dummy value
count=0 #counter to place values in arrays
with cd.open(file,"r",encoding='latin9') as fp: #open the data file (latin9 encoding seems to work, UTF and ASCII don't)
for line in find_data_lines(fp): #does the current line contain measurement data
count=count+1 #increase counter
idx = line.find(',') #no comma indicates a blank linw
if idx>-1: #line contains a comma
H0[count]=float(line[0:idx]) #assign field value (1st column)
line=line[idx+1:] #remove the leading part of the line (only characters after the first comma remain)
idx = line.find(',') #find next comman
if idx>-1: #comma found in line
M0[count]=float(line[0:idx]) #read values up to next comma (assumes 2nd column is magnetizations)
else: #comma wasn't found
M0[count]=float(line) # magnetization value is just the remainder of the line
else:
H0[count]=dum #line is blank, so fill with dummy value
M0[count]=dum #line is blank, so fill with dummy value
idx_start=np.argmax(H0!=dum) #find the first line that contains data
M0=M0[idx_start-1:-1] #strip out leading dummy values from magnetizations, leaving 1 dummy at start of vector
M0=M0[~np.isnan(M0)] #remove any NaNs at the end of the array
H0=H0[idx_start-1:-1] #strip out leading dummy values from magnetizations, leaving 1 dummy at start of vector
H0=H0[~np.isnan(H0)] #remove any NaNs at the end of the array
## now need to pull out the calibration points, will be after alternate -9999.99 entries
idxSAT = np.array(np.where(np.isin(H0, dum))) #location of dummy values
idxSAT = np.ndarray.squeeze(idxSAT) #squeeze into 1D
idxSAT = idxSAT[0::2]+1 #every second index+1 should be calibration points
Hcal=H0[idxSAT[0:-1]] #calibration fields
Mcal=M0[idxSAT[0:-1]] #calibration magnetizations
tcal=calibration_times(file,Hcal.size) #estimate the time of each calibratio measurement
unit = parse_units(file)
if unit=='Cgs': #ensure SI units
Hcal=Hcal/1E4 #convert from Oe to T
Mcal=Mcal/1E3 #convert from emu to Am^2
return Hcal, Mcal, tcal
def parse_measurements(file):
"""Function to extract measurement points from a FORC sequence
Inputs:
file: name of data file (string)
Outputs:
H: Measurement applied field [float, SI units]
Hr: Reversal field [float, SI units]
M: Measured magnetization [float, SI units]
Fk: Index of measured FORC (int)
Fj: Index of given measurement within a given FORC (int)
Ft: Estimated times at which the points were measured (float, seconds)
dH: Measurement field spacing [float SI units]
"""
dum=-9999.99 #dum value to indicate break in measurement seqence between FORCs and calibration points
N0=int(1E6) #assume that any file will have less than 1E6 measurements
H0=np.zeros(N0)*np.nan #initialize NaN array to contain field values
M0=np.zeros(N0)*np.nan #initialize NaN array to contain magnetization values
H0[0]=dum #first field entry is dummy value
M0[0]=dum #first magnetization entry is dummy value
count=0 #counter to place values in arrays
with cd.open(file,"r",encoding='latin9') as fp: #open the data file (latin9 encoding seems to work, UTF and ASCII don't)
for line in find_data_lines(fp): #does the current line contain measurement data
count=count+1 #increase counter
idx = line.find(',') #no comma indicates a blank linw
if idx>-1: #line contains a comma
H0[count]=float(line[0:idx]) #assign field value (1st column)
line=line[idx+1:] #remove the leading part of the line (only characters after the first comma remain)
idx = line.find(',') #find next comman
if idx>-1: #comma found in line
M0[count]=float(line[0:idx]) #read values up to next comma (assumes 2nd column is magnetizations)
else: #comma wasn't found
M0[count]=float(line) # magnetization value is just the remainder of the line
else:
H0[count]=dum #line is blank, so fill with dummy value
M0[count]=dum #line is blank, so fill with dummy value
idx_start=np.argmax(H0!=dum) #find the first line that contains data
M0=M0[idx_start-1:-1] #strip out leading dummy values from magnetizations, leaving 1 dummy at start of vector
M0=M0[~np.isnan(M0)] #remove any NaNs at the end of the array
H0=H0[idx_start-1:-1] #strip out leading dummy values from magnetizations, leaving 1 dummy at start of vector
H0=H0[~np.isnan(H0)] #remove any NaNs at the end of the array
## determine indicies of each FORC
idxSAT = np.array(np.where(np.isin(H0, dum))) #find start address of each blank line
idxSAT = np.ndarray.squeeze(idxSAT) #squeeze into 1D
idxSTART = idxSAT[1::2]+1 #find start address of each FORC
idxEND = idxSAT[2::2]-1 ##find end address of each FORC
#Extract first FORC to initialize arrays
M=M0[idxSTART[0]:idxEND[0]+1] #Magnetization values
H=H0[idxSTART[0]:idxEND[0]+1] #Field values
Hr=np.ones(idxEND[0]+1-idxSTART[0])*H0[idxSTART[0]] #Reversal field values
Fk=np.ones(idxEND[0]+1-idxSTART[0]) #index number of FORC
Fj=np.arange(1,1+idxEND[0]+1-idxSTART[0])# measurement index within given FORC
#Extract remaining FORCs one by one into into a long-vector
for i in range(1,idxSTART.size):
M=np.concatenate((M,M0[idxSTART[i]:idxEND[i]+1]))
H=np.concatenate((H,H0[idxSTART[i]:idxEND[i]+1]))
Hr=np.concatenate((Hr,np.ones(idxEND[i]+1-idxSTART[i])*H0[idxSTART[i]]))
Fk=np.concatenate((Fk,np.ones(idxEND[i]+1-idxSTART[i])+i))
Fj=np.concatenate((Fj,np.arange(1,1+idxEND[i]+1-idxSTART[i])))
unit = parse_units(file) #Ensure use of SI units
if unit=='Cgs':
H=H/1E4 #Convert Oe into T
Hr=Hr/1E4 #Convert Oe into T
M=M/1E3 #Convert emu to Am^2
dH = np.mean(np.diff(H[Fk==np.max(Fk)])) #mean field spacing
Ft=measurement_times(file,Fk,Fj) #estimated time of each measurement point
return H, Hr, M, Fk, Fj, Ft, dH
####### Define VARIFORC window functions #######
def vari_T(u,s):
T=np.zeros(u.shape) #initialize array
absu=np.abs(u)
absu_s=absu-s
idx=(absu<=s)
T[idx]= 2.*absu_s[idx]**2 #3rd condition
idx=(absu<=s-0.5)
T[idx]=1.-2.*(absu_s[idx]+1.)**2 #2nd condition
idx=(absu<=s-1.)
T[idx]=1.0 #first condition
return T
def vari_W(Hc,Hc0,Hb,Hb0,dH,Sc,Sb):
# calculate grid of weights
#Hc = Hc grid
#Hb = Hb grid
#Hc0,Hb0 = center of weighting function
#dH = field spacing
#Sc = Hc-axis smoothing factor
#Sb = Hb-axis smoothing factor
x=Hc-Hc0
y=Hb-Hb0
return vari_T(x/dH,Sc)*vari_T(y/dH,Sb)
def vari_s(s0,s1,lamb,H,dH):
#calculate local smoothing factor
RH = np.maximum(s0,np.abs(H)/dH)
LH = (1-lamb)*s1+lamb*np.abs(H)/dH
return np.min((LH,RH))
def vari_weights(sc0,sc1,lamb_sc,sb0,sb1,lamb_sb,Hc,Hb,dH,Hc0,Hb0):
Sc=vari_s(sc0,sc1,lamb_sc,Hc0,dH)
Sb=vari_s(sb0,sb1,lamb_sb,Hb0,dH)
idx=((np.abs(Hc-Hc0)/dH<Sc) & (np.abs(Hb-Hb0)/dH<Sb))
weights=vari_W(Hc[idx],Hc0,Hb[idx],Hb0,dH,Sc,Sb)
return weights, idx | [
"numpy.isin",
"numpy.random.seed",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.abs",
"numpy.polyfit",
"numpy.argmax",
"IPython.display.YouTubeVideo",
"numpy.sum",
"ipywidgets.Text",
"numpy.allclose",
"numpy.ones",
"numpy.isnan",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy... | [((1054, 1277), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "[['Select topic', -1], ['1: Introduction - working with FORCsensei', 0], [\n '2: Plotting options', 1], ['3: download options', 2]]", 'value': '(-1)', 'description': '"""Video tutorials:"""', 'style': 'style'}), "(options=[['Select topic', -1], [\n '1: Introduction - working with FORCsensei', 0], ['2: Plotting options',\n 1], ['3: download options', 2]], value=-1, description=\n 'Video tutorials:', style=style)\n", (1070, 1277), True, 'import ipywidgets as widgets\n'), ((1397, 1440), 'ipywidgets.interactive', 'interactive', (['play_tutorial'], {'index': 'tut_widge'}), '(play_tutorial, index=tut_widge)\n', (1408, 1440), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((1796, 1855), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>Sample preprocessing options</h3>"""'}), "(value='<h3>Sample preprocessing options</h3>')\n", (1808, 1855), True, 'import ipywidgets as widgets\n'), ((1873, 1942), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""To disable mass normalization use a value of -1"""'}), "(value='To disable mass normalization use a value of -1')\n", (1885, 1942), True, 'import ipywidgets as widgets\n'), ((2008, 2075), 'ipywidgets.Text', 'widgets.Text', ([], {'value': 'sample', 'description': '"""Sample name:"""', 'style': 'style'}), "(value=sample, description='Sample name:', style=style)\n", (2020, 2075), True, 'import ipywidgets as widgets\n'), ((2322, 2352), 'ipywidgets.HBox', 'HBox', (['[mass_widge, mass_title]'], {}), '([mass_widge, mass_title])\n', (2326, 2352), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((2420, 2480), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>Select preprocessing options:</h3>"""'}), "(value='<h3>Select preprocessing options:</h3>')\n", (2432, 2480), True, 'import ipywidgets as widgets\n'), ((2504, 2636), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(70)', 'min': '(1)', 'max': '(100.0)', 'step': '(1)', 'description': '"""Slope correction [%]:"""', 'style': 'style', 'readout_format': '""".0f"""'}), "(value=70, min=1, max=100.0, step=1, description=\n 'Slope correction [%]:', style=style, readout_format='.0f')\n", (2523, 2636), True, 'import ipywidgets as widgets\n'), ((2718, 2803), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""To disable high-field slope correction use a value of 100%"""'}), "(value='To disable high-field slope correction use a value of 100%'\n )\n", (2730, 2803), True, 'import ipywidgets as widgets\n'), ((2818, 2850), 'ipywidgets.HBox', 'HBox', (['[slope_widge, slope_title]'], {}), '([slope_widge, slope_title])\n', (2822, 2850), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((2874, 2947), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Measurement drift correction"""'}), "(value=False, description='Measurement drift correction')\n", (2890, 2947), True, 'import ipywidgets as widgets\n'), ((2964, 3036), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Remove first point artifact"""'}), "(value=False, description='Remove first point artifact')\n", (2980, 3036), True, 'import ipywidgets as widgets\n'), ((3053, 3124), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Remove last point artifact"""'}), "(value=False, description='Remove last point artifact')\n", (3069, 3124), True, 'import ipywidgets as widgets\n'), ((3145, 3217), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Remove measurement outliers"""'}), "(value=False, description='Remove measurement outliers')\n", (3161, 3217), True, 'import ipywidgets as widgets\n'), ((3238, 3354), 'ipywidgets.VBox', 'VBox', (['[correct_title, sample_widge, mass_widge1, slope_widge1, drift_widge,\n fpa_widge, lpa_widge, outlier_widge]'], {}), '([correct_title, sample_widge, mass_widge1, slope_widge1, drift_widge,\n fpa_widge, lpa_widge, outlier_widge])\n', (3242, 3354), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((3367, 3380), 'ipywidgets.Tab', 'widgets.Tab', ([], {}), '()\n', (3378, 3380), True, 'import ipywidgets as widgets\n'), ((6572, 6599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6582, 6599), True, 'import matplotlib.pyplot as plt\n'), ((6788, 6832), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {'bbox_inches': '"""tight"""'}), "(outputfile, bbox_inches='tight')\n", (6799, 6832), True, 'import matplotlib.pyplot as plt\n'), ((6837, 6847), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6845, 6847), True, 'import matplotlib.pyplot as plt\n'), ((9060, 9091), 'numpy.polyfit', 'np.polyfit', (['H[Hidx]', 'M[Hidx]', '(1)'], {}), '(H[Hidx], M[Hidx], 1)\n', (9070, 9091), True, 'import numpy as np\n'), ((11438, 11453), 'numpy.ones', 'np.ones', (['M.size'], {}), '(M.size)\n', (11445, 11453), True, 'import numpy as np\n'), ((12303, 12319), 'numpy.ones', 'np.ones', (['Fk.size'], {}), '(Fk.size)\n', (12310, 12319), True, 'import numpy as np\n'), ((14300, 14309), 'numpy.min', 'np.min', (['H'], {}), '(H)\n', (14306, 14309), True, 'import numpy as np\n'), ((14321, 14330), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (14327, 14330), True, 'import numpy as np\n'), ((14342, 14353), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (14350, 14353), True, 'import numpy as np\n'), ((14365, 14376), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (14373, 14376), True, 'import numpy as np\n'), ((14432, 14468), 'numpy.linspace', 'np.linspace', (['Hmin', 'Hmax', '(nH * 50 + 1)'], {}), '(Hmin, Hmax, nH * 50 + 1)\n', (14443, 14468), True, 'import numpy as np\n'), ((14745, 14762), 'numpy.zeros', 'np.zeros', (['Hi.size'], {}), '(Hi.size)\n', (14753, 14762), True, 'import numpy as np\n'), ((15837, 15935), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<hr style="height:3px;border:none;color:#333;background-color:#333;" />"""'}), '(value=\n \'<hr style="height:3px;border:none;color:#333;background-color:#333;" />\')\n', (15849, 15935), True, 'import ipywidgets as widgets\n'), ((15951, 15999), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>Select data type:</h3>"""'}), "(value='<h3>Select data type:</h3>')\n", (15963, 15999), True, 'import ipywidgets as widgets\n'), ((16014, 16130), 'ipywidgets.RadioButtons', 'widgets.RadioButtons', ([], {'options': "['Magnetisations', 'Lower branch subtracted']", 'value': '"""Magnetisations"""', 'style': 'style'}), "(options=['Magnetisations', 'Lower branch subtracted'],\n value='Magnetisations', style=style)\n", (16034, 16130), True, 'import ipywidgets as widgets\n'), ((16246, 16302), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>Set smoothing parameters:</h3>"""'}), "(value='<h3>Set smoothing parameters:</h3>')\n", (16258, 16302), True, 'import ipywidgets as widgets\n'), ((16339, 16547), 'ipywidgets.FloatRangeSlider', 'widgets.FloatRangeSlider', ([], {'value': '[3, 7]', 'min': '(2)', 'max': '(10)', 'step': '(0.25)', 'description': '"""Select $s_c$ range:"""', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '""".2f"""', 'style': 'style'}), "(value=[3, 7], min=2, max=10, step=0.25,\n description='Select $s_c$ range:', continuous_update=False, orientation\n ='horizontal', readout=True, readout_format='.2f', style=style)\n", (16363, 16547), True, 'import ipywidgets as widgets\n'), ((16642, 16850), 'ipywidgets.FloatRangeSlider', 'widgets.FloatRangeSlider', ([], {'value': '[3, 7]', 'min': '(2)', 'max': '(10)', 'step': '(0.25)', 'description': '"""Select $s_u$ range:"""', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '""".2f"""', 'style': 'style'}), "(value=[3, 7], min=2, max=10, step=0.25,\n description='Select $s_u$ range:', continuous_update=False, orientation\n ='horizontal', readout=True, readout_format='.2f', style=style)\n", (16666, 16850), True, 'import ipywidgets as widgets\n'), ((16960, 17182), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(0.05)', 'min': '(0)', 'max': '(0.2)', 'step': '(0.025)', 'description': '"""Select $\\\\lambda_{c}$:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '""".3f"""', 'style': 'style'}), "(value=0.05, min=0, max=0.2, step=0.025, description=\n 'Select $\\\\lambda_{c}$:', disabled=False, continuous_update=False,\n orientation='horizontal', readout=True, readout_format='.3f', style=style)\n", (16979, 17182), True, 'import ipywidgets as widgets\n'), ((17291, 17513), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(0.05)', 'min': '(0)', 'max': '(0.2)', 'step': '(0.025)', 'description': '"""Select $\\\\lambda_{u}$:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '""".3f"""', 'style': 'style'}), "(value=0.05, min=0, max=0.2, step=0.025, description=\n 'Select $\\\\lambda_{u}$:', disabled=False, continuous_update=False,\n orientation='horizontal', readout=True, readout_format='.3f', style=style)\n", (17310, 17513), True, 'import ipywidgets as widgets\n'), ((17618, 17670), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>Specify downsampling:</h3>"""'}), "(value='<h3>Specify downsampling:</h3>')\n", (17630, 17670), True, 'import ipywidgets as widgets\n'), ((17688, 17907), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'value': '(1000)', 'min': '(100)', 'max': "X['M'].size", 'step': '(1)', 'description': '"""Number of points:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '"""d"""', 'style': 'style'}), "(value=1000, min=100, max=X['M'].size, step=1, description\n ='Number of points:', disabled=False, continuous_update=False,\n orientation='horizontal', readout=True, readout_format='d', style=style)\n", (17705, 17907), True, 'import ipywidgets as widgets\n'), ((18035, 18065), 'ipywidgets.VBox', 'VBox', (['[down_title, down_widge]'], {}), '([down_title, down_widge])\n', (18039, 18065), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((18074, 18167), 'ipywidgets.VBox', 'VBox', (['[M_title, M_widge, HL, S_title, Sc_widge, Sb_widge, lambdaSc_widge,\n lambdaSb_widge]'], {}), '([M_title, M_widge, HL, S_title, Sc_widge, Sb_widge, lambdaSc_widge,\n lambdaSb_widge])\n', (18078, 18167), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((18444, 18458), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (18456, 18458), False, 'import os\n'), ((18493, 18545), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': '"""<h3>DASK multiprocessing:</h3>"""'}), "(value='<h3>DASK multiprocessing:</h3>')\n", (18505, 18545), True, 'import ipywidgets as widgets\n'), ((18584, 18804), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'value': "X['ncore']", 'min': '(1)', 'max': "X['ncore']", 'step': '(1)', 'description': '"""Number of cores:"""', 'disabled': '(False)', 'continuous_update': '(False)', 'orientation': '"""horizontal"""', 'readout': '(True)', 'readout_format': '"""d"""', 'style': 'style'}), "(value=X['ncore'], min=1, max=X['ncore'], step=1,\n description='Number of cores:', disabled=False, continuous_update=False,\n orientation='horizontal', readout=True, readout_format='d', style=style)\n", (18601, 18804), True, 'import ipywidgets as widgets\n'), ((18946, 18976), 'ipywidgets.VBox', 'VBox', (['[dask_title, dask_widge]'], {}), '([dask_title, dask_widge])\n', (18950, 18976), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((19040, 19053), 'ipywidgets.Tab', 'widgets.Tab', ([], {}), '()\n', (19051, 19053), True, 'import ipywidgets as widgets\n'), ((19674, 19684), 'numpy.diag', 'np.diag', (['w'], {}), '(w)\n', (19681, 19684), True, 'import numpy as np\n'), ((19732, 19742), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (19739, 19742), True, 'import numpy as np\n'), ((19789, 19803), 'numpy.dot', 'np.dot', (['XTW', 'y'], {}), '(XTW, y)\n', (19795, 19803), True, 'import numpy as np\n'), ((19819, 19833), 'scipy.linalg.eigh', 'linalg.eigh', (['M'], {}), '(M)\n', (19830, 19833), False, 'from scipy import linalg\n'), ((19845, 19858), 'numpy.real', 'np.real', (['lamb'], {}), '(lamb)\n', (19852, 19858), True, 'import numpy as np\n'), ((21004, 21022), 'numpy.zeros', 'np.zeros', (['Hc0.size'], {}), '(Hc0.size)\n', (21012, 21022), True, 'import numpy as np\n'), ((21034, 21052), 'numpy.zeros', 'np.zeros', (['Hc0.size'], {}), '(Hc0.size)\n', (21042, 21052), True, 'import numpy as np\n'), ((22489, 22517), 'numpy.column_stack', 'np.column_stack', (['(Midx, rho)'], {}), '((Midx, rho))\n', (22504, 22517), True, 'import numpy as np\n'), ((23110, 23135), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['Hc', 'Hb'], {}), '(Hc, Hb)\n', (23127, 23135), True, 'import matplotlib.tri as tri\n'), ((23155, 23193), 'matplotlib.tri.LinearTriInterpolator', 'tri.LinearTriInterpolator', (['triang', 'rho'], {}), '(triang, rho)\n', (23180, 23193), True, 'import matplotlib.tri as tri\n'), ((23207, 23226), 'numpy.meshgrid', 'np.meshgrid', (['xi', 'yi'], {}), '(xi, yi)\n', (23218, 23226), True, 'import numpy as np\n'), ((23610, 23640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4.75)'}), '(figsize=(12, 4.75))\n', (23620, 23640), True, 'import matplotlib.pyplot as plt\n'), ((23708, 23727), 'numpy.random.seed', 'np.random.seed', (['(999)'], {}), '(999)\n', (23722, 23727), True, 'import numpy as np\n'), ((24156, 24182), 'numpy.percentile', 'np.percentile', (['MC_psi', '(2.5)'], {}), '(MC_psi, 2.5)\n', (24169, 24182), True, 'import numpy as np\n'), ((24193, 24220), 'numpy.percentile', 'np.percentile', (['MC_psi', '(97.5)'], {}), '(MC_psi, 97.5)\n', (24206, 24220), True, 'import numpy as np\n'), ((26934, 26952), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26950, 26952), True, 'import matplotlib.pyplot as plt\n'), ((26957, 26980), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {}), '(outputfile)\n', (26968, 26980), True, 'import matplotlib.pyplot as plt\n'), ((26985, 26995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26993, 26995), True, 'import matplotlib.pyplot as plt\n'), ((27301, 27331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4.75)'}), '(figsize=(12, 4.75))\n', (27311, 27331), True, 'import matplotlib.pyplot as plt\n'), ((30539, 30557), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30555, 30557), True, 'import matplotlib.pyplot as plt\n'), ((30562, 30585), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {}), '(outputfile)\n', (30573, 30585), True, 'import matplotlib.pyplot as plt\n'), ((30590, 30600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30598, 30600), True, 'import matplotlib.pyplot as plt\n'), ((33451, 33462), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (33459, 33462), True, 'import numpy as np\n'), ((34208, 34251), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""forc_cmap"""', 'cdict'], {}), "('forc_cmap', cdict)\n", (34231, 34251), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((37812, 37897), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Include color scalebar"""', 'style': 'style'}), "(value=False, description='Include color scalebar', style=style\n )\n", (37828, 37897), True, 'import ipywidgets as widgets\n'), ((37976, 38307), 'ipywidgets.Select', 'widgets.Select', ([], {'options': "[['Select contour frequency', -1], ['Every level', 1], ['Every 2nd level', \n 2], ['Every 3rd level', 3], ['Every 4th level', 4], ['Every 5th level',\n 5], ['Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]]", 'value': '(-1)', 'rows': '(1)', 'description': '"""Plot contours"""', 'style': 'style'}), "(options=[['Select contour frequency', -1], ['Every level', 1\n ], ['Every 2nd level', 2], ['Every 3rd level', 3], ['Every 4th level', \n 4], ['Every 5th level', 5], ['Every 10th level', 10], [\n 'Every 20th level', 20], ['Every 50th level', 50]], value=-1, rows=1,\n description='Plot contours', style=style)\n", (37990, 38307), True, 'import ipywidgets as widgets\n'), ((38494, 38610), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'value': '(1.0)', 'min': '(0.5)', 'max': '(3.0)', 'step': '(0.5)', 'description': '"""Contour line width [pts]"""', 'style': 'style'}), "(value=1.0, min=0.5, max=3.0, step=0.5, description=\n 'Contour line width [pts]', style=style)\n", (38513, 38610), True, 'import ipywidgets as widgets\n'), ((38659, 38730), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Download plot"""', 'style': 'style'}), "(value=False, description='Download plot', style=style)\n", (38675, 38730), True, 'import ipywidgets as widgets\n'), ((38804, 38997), 'ipywidgets.Select', 'widgets.Select', ([], {'options': "[['20', 20], ['30', 30], ['50', 50], ['75', 75], ['100', 100], ['200', 200],\n ['500', 500]]", 'value': '(50)', 'rows': '(1)', 'description': '"""Number of color levels"""', 'style': 'style'}), "(options=[['20', 20], ['30', 30], ['50', 50], ['75', 75], [\n '100', 100], ['200', 200], ['500', 500]], value=50, rows=1, description\n ='Number of color levels', style=style)\n", (38818, 38997), True, 'import ipywidgets as widgets\n'), ((39051, 39146), 'ipywidgets.FloatText', 'widgets.FloatText', ([], {'value': '(0)', 'description': '"""Minimum $\\\\mu_0H_c$ [T]"""', 'style': 'style', 'step': '(0.001)'}), "(value=0, description='Minimum $\\\\mu_0H_c$ [T]', style=\n style, step=0.001)\n", (39068, 39146), True, 'import ipywidgets as widgets\n'), ((40363, 40376), 'ipywidgets.Tab', 'widgets.Tab', ([], {}), '()\n', (40374, 40376), True, 'import ipywidgets as widgets\n'), ((40738, 40764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (40748, 40764), True, 'import matplotlib.pyplot as plt\n'), ((41921, 41942), 'numpy.sort', 'np.sort', (['(xmin, xmax)'], {}), '((xmin, xmax))\n', (41928, 41942), True, 'import numpy as np\n'), ((42008, 42029), 'numpy.sort', 'np.sort', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (42015, 42029), True, 'import numpy as np\n'), ((42681, 42691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (42689, 42691), True, 'import matplotlib.pyplot as plt\n'), ((47836, 47854), 'numpy.max', 'np.max', (['(tr0, tr1)'], {}), '((tr0, tr1))\n', (47842, 47854), True, 'import numpy as np\n'), ((48836, 48852), 'numpy.max', 'np.max', (['(N0, N1)'], {}), '((N0, N1))\n', (48842, 48852), True, 'import numpy as np\n'), ((49353, 49367), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (49361, 49367), True, 'import numpy as np\n'), ((50551, 50569), 'numpy.max', 'np.max', (['(tr0, tr1)'], {}), '((tr0, tr1))\n', (50557, 50569), True, 'import numpy as np\n'), ((51551, 51567), 'numpy.max', 'np.max', (['(N0, N1)'], {}), '((N0, N1))\n', (51557, 51567), True, 'import numpy as np\n'), ((52044, 52058), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (52052, 52058), True, 'import numpy as np\n'), ((54346, 54366), 'numpy.argmax', 'np.argmax', (['(H0 != dum)'], {}), '(H0 != dum)\n', (54355, 54366), True, 'import numpy as np\n'), ((54971, 54997), 'numpy.ndarray.squeeze', 'np.ndarray.squeeze', (['idxSAT'], {}), '(idxSAT)\n', (54989, 54997), True, 'import numpy as np\n'), ((57619, 57639), 'numpy.argmax', 'np.argmax', (['(H0 != dum)'], {}), '(H0 != dum)\n', (57628, 57639), True, 'import numpy as np\n'), ((58203, 58229), 'numpy.ndarray.squeeze', 'np.ndarray.squeeze', (['idxSAT'], {}), '(idxSAT)\n', (58221, 58229), True, 'import numpy as np\n'), ((58612, 58648), 'numpy.ones', 'np.ones', (['(idxEND[0] + 1 - idxSTART[0])'], {}), '(idxEND[0] + 1 - idxSTART[0])\n', (58619, 58648), True, 'import numpy as np\n'), ((58674, 58719), 'numpy.arange', 'np.arange', (['(1)', '(1 + idxEND[0] + 1 - idxSTART[0])'], {}), '(1, 1 + idxEND[0] + 1 - idxSTART[0])\n', (58683, 58719), True, 'import numpy as np\n'), ((59640, 59657), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (59648, 59657), True, 'import numpy as np\n'), ((59686, 59695), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (59692, 59695), True, 'import numpy as np\n'), ((60416, 60432), 'numpy.min', 'np.min', (['(LH, RH)'], {}), '((LH, RH))\n', (60422, 60432), True, 'import numpy as np\n'), ((891, 938), 'IPython.display.YouTubeVideo', 'YouTubeVideo', ([], {'id': 'tutorial[index]', 'autoplay': '(True)'}), '(id=tutorial[index], autoplay=True)\n', (903, 938), False, 'from IPython.display import YouTubeVideo\n'), ((2122, 2194), 'ipywidgets.FloatText', 'widgets.FloatText', ([], {'value': '(-1)', 'description': '"""Sample mass (g):"""', 'style': 'style'}), "(value=-1, description='Sample mass (g):', style=style)\n", (2139, 2194), True, 'import ipywidgets as widgets\n'), ((2227, 2301), 'ipywidgets.FloatText', 'widgets.FloatText', ([], {'value': 'mass', 'description': '"""Sample mass (g):"""', 'style': 'style'}), "(value=mass, description='Sample mass (g):', style=style)\n", (2244, 2301), True, 'import ipywidgets as widgets\n'), ((9947, 9963), 'numpy.zeros', 'np.zeros', (['M.size'], {}), '(M.size)\n', (9955, 9963), True, 'import numpy as np\n'), ((10189, 10200), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (10195, 10200), True, 'import numpy as np\n'), ((10714, 10730), 'numpy.zeros', 'np.zeros', (['M.size'], {}), '(M.size)\n', (10722, 10730), True, 'import numpy as np\n'), ((10853, 10864), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (10859, 10864), True, 'import numpy as np\n'), ((11388, 11397), 'numpy.std', 'np.std', (['R'], {}), '(R)\n', (11394, 11397), True, 'import numpy as np\n'), ((11518, 11527), 'numpy.abs', 'np.abs', (['R'], {}), '(R)\n', (11524, 11527), True, 'import numpy as np\n'), ((11775, 11785), 'numpy.max', 'np.max', (['Fk'], {}), '(Fk)\n', (11781, 11785), True, 'import numpy as np\n'), ((11853, 11872), 'numpy.argsort', 'np.argsort', (['Fj[idx]'], {}), '(Fj[idx])\n', (11863, 11872), True, 'import numpy as np\n'), ((12283, 12293), 'numpy.max', 'np.max', (['Fk'], {}), '(Fk)\n', (12289, 12293), True, 'import numpy as np\n'), ((12373, 12388), 'numpy.sum', 'np.sum', (['(Fk == i)'], {}), '(Fk == i)\n', (12379, 12388), True, 'import numpy as np\n'), ((13477, 13515), 'numpy.interp', 'np.interp', (['Ft', 'tcal', 'Mcal'], {'left': 'np.nan'}), '(Ft, tcal, Mcal, left=np.nan)\n', (13486, 13515), True, 'import numpy as np\n'), ((14622, 14648), 'numpy.concatenate', 'np.concatenate', (['(Hbar, H0)'], {}), '((Hbar, H0))\n', (14636, 14648), True, 'import numpy as np\n'), ((14663, 14689), 'numpy.concatenate', 'np.concatenate', (['(Mbar, M0)'], {}), '((Mbar, M0))\n', (14677, 14689), True, 'import numpy as np\n'), ((14888, 14923), 'numpy.polyfit', 'np.polyfit', (['Hbar[idx]', 'Mbar[idx]', '(2)'], {}), '(Hbar[idx], Mbar[idx], 2)\n', (14898, 14923), True, 'import numpy as np\n'), ((14940, 14960), 'numpy.polyval', 'np.polyval', (['p', 'Hi[i]'], {}), '(p, Hi[i])\n', (14950, 14960), True, 'import numpy as np\n'), ((15011, 15066), 'numpy.interp', 'np.interp', (['H', 'Hlower', 'Mlower'], {'left': 'np.nan', 'right': 'np.nan'}), '(H, Hlower, Mlower, left=np.nan, right=np.nan)\n', (15020, 15066), True, 'import numpy as np\n'), ((19758, 19776), 'numpy.size', 'np.size', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (19765, 19776), True, 'import numpy as np\n'), ((20000, 20054), 'numpy.dot', 'np.dot', (['VhT', '(Vh / (lamb + alpha / beta)[:, np.newaxis])'], {}), '(VhT, Vh / (lamb + alpha / beta)[:, np.newaxis])\n', (20006, 20054), True, 'import numpy as np\n'), ((20086, 20104), 'numpy.dot', 'np.dot', (['Wbar', 'XT_y'], {}), '(Wbar, XT_y)\n', (20092, 20104), True, 'import numpy as np\n'), ((20156, 20185), 'numpy.sum', 'np.sum', (['(lamb / (alpha + lamb))'], {}), '(lamb / (alpha + lamb))\n', (20162, 20185), True, 'import numpy as np\n'), ((20407, 20438), 'numpy.allclose', 'np.allclose', (['ab0', '[alpha, beta]'], {}), '(ab0, [alpha, beta])\n', (20418, 20438), True, 'import numpy as np\n'), ((20570, 20583), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (20576, 20583), True, 'import numpy as np\n'), ((20601, 20613), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (20607, 20613), True, 'import numpy as np\n'), ((20687, 20704), 'numpy.sum', 'np.sum', (['(Wbar ** 2)'], {}), '(Wbar ** 2)\n', (20693, 20704), True, 'import numpy as np\n'), ((20765, 20784), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (20771, 20784), True, 'import numpy as np\n'), ((21106, 21117), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (21114, 21117), True, 'import numpy as np\n'), ((21437, 21459), 'numpy.dot', 'np.dot', (['X[idx, 0:6]', 'p'], {}), '(X[idx, 0:6], p)\n', (21443, 21459), True, 'import numpy as np\n'), ((22341, 22356), 'numpy.argmax', 'np.argmax', (['ln_p'], {}), '(ln_p)\n', (22350, 22356), True, 'import numpy as np\n'), ((22866, 22891), 'numpy.ceil', 'np.ceil', (['((Hc2 - Hc1) / dH)'], {}), '((Hc2 - Hc1) / dH)\n', (22873, 22891), True, 'import numpy as np\n'), ((22925, 22950), 'numpy.ceil', 'np.ceil', (['((Hb2 - Hb1) / dH)'], {}), '((Hb2 - Hb1) / dH)\n', (22932, 22950), True, 'import numpy as np\n'), ((23806, 23844), 'numpy.random.randint', 'np.random.randint', (['(0)', 'Hc.size', 'Hc.size'], {}), '(0, Hc.size, Hc.size)\n', (23823, 23844), True, 'import numpy as np\n'), ((33326, 33335), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (33332, 33335), True, 'import numpy as np\n'), ((33391, 33400), 'numpy.min', 'np.min', (['Z'], {}), '(Z)\n', (33397, 33400), True, 'import numpy as np\n'), ((33414, 33423), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (33420, 33423), True, 'import numpy as np\n'), ((34376, 34418), 'dask.distributed.LocalCluster', 'LocalCluster', ([], {'n_workers': "X['workers'].value"}), "(n_workers=X['workers'].value)\n", (34388, 34418), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((34441, 34450), 'dask.distributed.Client', 'Client', (['c'], {}), '(c)\n', (34447, 34450), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((35424, 35443), 'numpy.random.seed', 'np.random.seed', (['(999)'], {}), '(999)\n', (35438, 35443), True, 'import numpy as np\n'), ((35805, 35867), 'numpy.random.choice', 'np.random.choice', (['M.size'], {'size': "X['Ndown'].value", 'replace': '(False)'}), "(M.size, size=X['Ndown'].value, replace=False)\n", (35821, 35867), True, 'import numpy as np\n'), ((35943, 35975), 'numpy.array_split', 'np.array_split', (["X['Hci']", 'Nsplit'], {}), "(X['Hci'], Nsplit)\n", (35957, 35975), True, 'import numpy as np\n'), ((35989, 36021), 'numpy.array_split', 'np.array_split', (["X['Hbi']", 'Nsplit'], {}), "(X['Hbi'], Nsplit)\n", (36003, 36021), True, 'import numpy as np\n'), ((36045, 36071), 'numpy.array_split', 'np.array_split', (['Hc', 'Nsplit'], {}), '(Hc, Nsplit)\n', (36059, 36071), True, 'import numpy as np\n'), ((36085, 36111), 'numpy.array_split', 'np.array_split', (['Hb', 'Nsplit'], {}), '(Hb, Nsplit)\n', (36099, 36111), True, 'import numpy as np\n'), ((36669, 36713), 'numpy.concatenate', 'np.concatenate', (['(Midx, results[i + 1][:, 0])'], {}), '((Midx, results[i + 1][:, 0]))\n', (36683, 36713), True, 'import numpy as np\n'), ((36722, 36765), 'numpy.concatenate', 'np.concatenate', (['(rho, results[i + 1][:, 1])'], {}), '((rho, results[i + 1][:, 1]))\n', (36736, 36765), True, 'import numpy as np\n'), ((40524, 40549), 'ipywidgets.VBox', 'VBox', ([], {'children': 'x.children'}), '(children=x.children)\n', (40528, 40549), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((42593, 42646), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(outputfile, dpi=300, bbox_inches='tight')\n", (42604, 42646), True, 'import matplotlib.pyplot as plt\n'), ((44403, 44440), 'codecs.open', 'cd.open', (['file', '"""r"""'], {'encoding': '"""latin9"""'}), "(file, 'r', encoding='latin9')\n", (44410, 44440), True, 'import codecs as cd\n'), ((45340, 45377), 'codecs.open', 'cd.open', (['file', '"""r"""'], {'encoding': '"""latin9"""'}), "(file, 'r', encoding='latin9')\n", (45347, 45377), True, 'import codecs as cd\n'), ((46170, 46207), 'codecs.open', 'cd.open', (['file', '"""r"""'], {'encoding': '"""latin9"""'}), "(file, 'r', encoding='latin9')\n", (46177, 46207), True, 'import codecs as cd\n'), ((52940, 52952), 'numpy.zeros', 'np.zeros', (['N0'], {}), '(N0)\n', (52948, 52952), True, 'import numpy as np\n'), ((53013, 53025), 'numpy.zeros', 'np.zeros', (['N0'], {}), '(N0)\n', (53021, 53025), True, 'import numpy as np\n'), ((53249, 53286), 'codecs.open', 'cd.open', (['file', '"""r"""'], {'encoding': '"""latin9"""'}), "(file, 'r', encoding='latin9')\n", (53256, 53286), True, 'import codecs as cd\n'), ((56213, 56225), 'numpy.zeros', 'np.zeros', (['N0'], {}), '(N0)\n', (56221, 56225), True, 'import numpy as np\n'), ((56286, 56298), 'numpy.zeros', 'np.zeros', (['N0'], {}), '(N0)\n', (56294, 56298), True, 'import numpy as np\n'), ((56522, 56559), 'codecs.open', 'cd.open', (['file', '"""r"""'], {'encoding': '"""latin9"""'}), "(file, 'r', encoding='latin9')\n", (56529, 56559), True, 'import codecs as cd\n'), ((58533, 58569), 'numpy.ones', 'np.ones', (['(idxEND[0] + 1 - idxSTART[0])'], {}), '(idxEND[0] + 1 - idxSTART[0])\n', (58540, 58569), True, 'import numpy as np\n'), ((58862, 58912), 'numpy.concatenate', 'np.concatenate', (['(M, M0[idxSTART[i]:idxEND[i] + 1])'], {}), '((M, M0[idxSTART[i]:idxEND[i] + 1]))\n', (58876, 58912), True, 'import numpy as np\n'), ((58920, 58970), 'numpy.concatenate', 'np.concatenate', (['(H, H0[idxSTART[i]:idxEND[i] + 1])'], {}), '((H, H0[idxSTART[i]:idxEND[i] + 1]))\n', (58934, 58970), True, 'import numpy as np\n'), ((3909, 3919), 'numpy.max', 'np.max', (['Fk'], {}), '(Fk)\n', (3915, 3919), True, 'import numpy as np\n'), ((5031, 5045), 'numpy.abs', 'np.abs', (['Xticks'], {}), '(Xticks)\n', (5037, 5045), True, 'import numpy as np\n'), ((7059, 7069), 'numpy.max', 'np.max', (['Fk'], {}), '(Fk)\n', (7065, 7069), True, 'import numpy as np\n'), ((9042, 9051), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (9048, 9051), True, 'import numpy as np\n'), ((11481, 11490), 'numpy.abs', 'np.abs', (['R'], {}), '(R)\n', (11487, 11490), True, 'import numpy as np\n'), ((11743, 11753), 'numpy.min', 'np.min', (['Fk'], {}), '(Fk)\n', (11749, 11753), True, 'import numpy as np\n'), ((12568, 12578), 'numpy.min', 'np.min', (['Fk'], {}), '(Fk)\n', (12574, 12578), True, 'import numpy as np\n'), ((13090, 13100), 'numpy.min', 'np.min', (['Fk'], {}), '(Fk)\n', (13096, 13100), True, 'import numpy as np\n'), ((15129, 15144), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15137, 15144), True, 'import numpy as np\n'), ((15173, 15188), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15181, 15188), True, 'import numpy as np\n'), ((15215, 15230), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15223, 15230), True, 'import numpy as np\n'), ((15259, 15274), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15267, 15274), True, 'import numpy as np\n'), ((15301, 15316), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15309, 15316), True, 'import numpy as np\n'), ((15353, 15368), 'numpy.isnan', 'np.isnan', (['Mcorr'], {}), '(Mcorr)\n', (15361, 15368), True, 'import numpy as np\n'), ((19620, 19637), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (19634, 19637), True, 'import numpy as np\n'), ((19649, 19658), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (19655, 19658), True, 'import numpy as np\n'), ((20641, 20664), 'numpy.square', 'np.square', (['(y - X @ Wbar)'], {}), '(y - X @ Wbar)\n', (20650, 20664), True, 'import numpy as np\n'), ((20719, 20743), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Wprec'], {}), '(Wprec)\n', (20736, 20743), True, 'import numpy as np\n'), ((21319, 21344), 'numpy.sqrt', 'np.sqrt', (['w[:, np.newaxis]'], {}), '(w[:, np.newaxis])\n', (21326, 21344), True, 'import numpy as np\n'), ((21366, 21376), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (21373, 21376), True, 'import numpy as np\n'), ((21387, 21419), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['Aw', 'Bw'], {'rcond': '(0)'}), '(Aw, Bw, rcond=0)\n', (21402, 21419), True, 'import numpy as np\n'), ((21475, 21503), 'numpy.mean', 'np.mean', (['((M[idx] - hat) ** 2)'], {}), '((M[idx] - hat) ** 2)\n', (21482, 21503), True, 'import numpy as np\n'), ((23268, 23286), 'numpy.min', 'np.min', (['Xi[Xi > 0]'], {}), '(Xi[Xi > 0])\n', (23274, 23286), True, 'import numpy as np\n'), ((23863, 23902), 'numpy.sum', 'np.sum', (['((Midx[bs] > 0) & (Midx[bs] < 4))'], {}), '((Midx[bs] > 0) & (Midx[bs] < 4))\n', (23869, 23902), True, 'import numpy as np\n'), ((33156, 33165), 'numpy.min', 'np.min', (['Z'], {}), '(Z)\n', (33162, 33165), True, 'import numpy as np\n'), ((33168, 33177), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (33174, 33177), True, 'import numpy as np\n'), ((34566, 34581), 'numpy.ones', 'np.ones', (['H.size'], {}), '(H.size)\n', (34573, 34581), True, 'import numpy as np\n'), ((39611, 39620), 'ipywidgets.fixed', 'fixed', (['Xi'], {}), '(Xi)\n', (39616, 39620), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((39652, 39661), 'ipywidgets.fixed', 'fixed', (['Yi'], {}), '(Yi)\n', (39657, 39661), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((39693, 39702), 'ipywidgets.fixed', 'fixed', (['Zi'], {}), '(Zi)\n', (39698, 39702), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((39743, 39761), 'ipywidgets.fixed', 'fixed', (["X['sample']"], {}), "(X['sample'])\n", (39748, 39761), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((39799, 39815), 'ipywidgets.fixed', 'fixed', (["X['mass']"], {}), "(X['mass'])\n", (39804, 39815), False, 'from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\n'), ((54553, 54565), 'numpy.isnan', 'np.isnan', (['M0'], {}), '(M0)\n', (54561, 54565), True, 'import numpy as np\n'), ((54733, 54745), 'numpy.isnan', 'np.isnan', (['H0'], {}), '(H0)\n', (54741, 54745), True, 'import numpy as np\n'), ((54913, 54929), 'numpy.isin', 'np.isin', (['H0', 'dum'], {}), '(H0, dum)\n', (54920, 54929), True, 'import numpy as np\n'), ((57826, 57838), 'numpy.isnan', 'np.isnan', (['M0'], {}), '(M0)\n', (57834, 57838), True, 'import numpy as np\n'), ((58006, 58018), 'numpy.isnan', 'np.isnan', (['H0'], {}), '(H0)\n', (58014, 58018), True, 'import numpy as np\n'), ((58132, 58148), 'numpy.isin', 'np.isin', (['H0', 'dum'], {}), '(H0, dum)\n', (58139, 58148), True, 'import numpy as np\n'), ((60347, 60356), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (60353, 60356), True, 'import numpy as np\n'), ((20349, 20372), 'numpy.square', 'np.square', (['(y - X @ Wbar)'], {}), '(y - X @ Wbar)\n', (20358, 20372), True, 'import numpy as np\n'), ((26738, 26769), 'numpy.sum', 'np.sum', (['((Midx > 0) & (Midx < 4))'], {}), '((Midx > 0) & (Midx < 4))\n', (26744, 26769), True, 'import numpy as np\n'), ((30343, 30374), 'numpy.sum', 'np.sum', (['((Midx > 0) & (Midx < 4))'], {}), '((Midx > 0) & (Midx < 4))\n', (30349, 30374), True, 'import numpy as np\n'), ((33296, 33305), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (33302, 33305), True, 'import numpy as np\n'), ((36466, 36476), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (36473, 36476), True, 'import numpy as np\n'), ((39183, 39203), 'numpy.round', 'np.round', (['(Hc2 * 1000)'], {}), '(Hc2 * 1000)\n', (39191, 39203), True, 'import numpy as np\n'), ((39309, 39337), 'numpy.round', 'np.round', (['((Hb1 - Hc2) * 1000)'], {}), '((Hb1 - Hc2) * 1000)\n', (39317, 39337), True, 'import numpy as np\n'), ((39441, 39461), 'numpy.round', 'np.round', (['(Hb2 * 1000)'], {}), '(Hb2 * 1000)\n', (39449, 39461), True, 'import numpy as np\n'), ((59146, 59191), 'numpy.arange', 'np.arange', (['(1)', '(1 + idxEND[i] + 1 - idxSTART[i])'], {}), '(1, 1 + idxEND[i] + 1 - idxSTART[i])\n', (59155, 59191), True, 'import numpy as np\n'), ((60387, 60396), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (60393, 60396), True, 'import numpy as np\n'), ((60605, 60621), 'numpy.abs', 'np.abs', (['(Hc - Hc0)'], {}), '(Hc - Hc0)\n', (60611, 60621), True, 'import numpy as np\n'), ((60630, 60646), 'numpy.abs', 'np.abs', (['(Hb - Hb0)'], {}), '(Hb - Hb0)\n', (60636, 60646), True, 'import numpy as np\n'), ((10564, 10602), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'M[idx]'], {'rcond': 'None'}), '(A, M[idx], rcond=None)\n', (10579, 10602), True, 'import numpy as np\n'), ((11169, 11209), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'Mst[idx]'], {'rcond': 'None'}), '(A, Mst[idx], rcond=None)\n', (11184, 11209), True, 'import numpy as np\n'), ((20245, 20260), 'numpy.square', 'np.square', (['Wbar'], {}), '(Wbar)\n', (20254, 20260), True, 'import numpy as np\n'), ((58998, 59034), 'numpy.ones', 'np.ones', (['(idxEND[i] + 1 - idxSTART[i])'], {}), '(idxEND[i] + 1 - idxSTART[i])\n', (59005, 59034), True, 'import numpy as np\n'), ((59079, 59115), 'numpy.ones', 'np.ones', (['(idxEND[i] + 1 - idxSTART[i])'], {}), '(idxEND[i] + 1 - idxSTART[i])\n', (59086, 59115), True, 'import numpy as np\n'), ((59411, 59421), 'numpy.max', 'np.max', (['Fk'], {}), '(Fk)\n', (59417, 59421), True, 'import numpy as np\n'), ((10377, 10390), 'numpy.ones', 'np.ones', (['Npts'], {}), '(Npts)\n', (10384, 10390), True, 'import numpy as np\n'), ((10976, 10989), 'numpy.ones', 'np.ones', (['Npts'], {}), '(Npts)\n', (10983, 10989), True, 'import numpy as np\n')] |
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from evalml.exceptions import EnsembleMissingPipelinesError
from evalml.model_family import ModelFamily
from evalml.pipelines import (
BinaryClassificationPipeline,
MulticlassClassificationPipeline,
)
from evalml.pipelines.components import (
BaselineClassifier,
RandomForestClassifier,
)
from evalml.pipelines.components.ensemble import StackedEnsembleClassifier
from evalml.problem_types import ProblemTypes
def test_stacked_model_family():
assert StackedEnsembleClassifier.model_family == ModelFamily.ENSEMBLE
def test_stacked_default_parameters():
assert StackedEnsembleClassifier.default_parameters == {
"final_estimator": None,
"cv": None,
"n_jobs": -1,
}
def test_stacked_ensemble_init_with_invalid_estimators_parameter():
with pytest.raises(
EnsembleMissingPipelinesError, match="must not be None or an empty list."
):
StackedEnsembleClassifier()
with pytest.raises(
EnsembleMissingPipelinesError, match="must not be None or an empty list."
):
StackedEnsembleClassifier(input_pipelines=[])
def test_stacked_ensemble_nonstackable_model_families():
with pytest.raises(
ValueError,
match="Pipelines with any of the following model families cannot be used as base pipelines",
):
StackedEnsembleClassifier(
input_pipelines=[BinaryClassificationPipeline([BaselineClassifier])]
)
def test_stacked_different_input_pipelines_classification():
input_pipelines = [
BinaryClassificationPipeline([RandomForestClassifier]),
MulticlassClassificationPipeline([RandomForestClassifier]),
]
with pytest.raises(
ValueError, match="All pipelines must have the same problem type."
):
StackedEnsembleClassifier(input_pipelines=input_pipelines)
def test_stacked_ensemble_init_with_multiple_same_estimators(
X_y_binary, logistic_regression_binary_pipeline_class
):
# Checks that it is okay to pass multiple of the same type of estimator
X, y = X_y_binary
input_pipelines = [
logistic_regression_binary_pipeline_class(parameters={}),
logistic_regression_binary_pipeline_class(parameters={}),
]
clf = StackedEnsembleClassifier(input_pipelines=input_pipelines, n_jobs=1)
expected_parameters = {
"input_pipelines": input_pipelines,
"final_estimator": None,
"cv": None,
"n_jobs": 1,
}
assert clf.parameters == expected_parameters
fitted = clf.fit(X, y)
assert isinstance(fitted, StackedEnsembleClassifier)
y_pred = clf.predict(X)
assert len(y_pred) == len(y)
assert not np.isnan(y_pred).all()
def test_stacked_ensemble_n_jobs_negative_one(
X_y_binary, logistic_regression_binary_pipeline_class
):
X, y = X_y_binary
input_pipelines = [logistic_regression_binary_pipeline_class(parameters={})]
clf = StackedEnsembleClassifier(input_pipelines=input_pipelines, n_jobs=-1)
expected_parameters = {
"input_pipelines": input_pipelines,
"final_estimator": None,
"cv": None,
"n_jobs": -1,
}
assert clf.parameters == expected_parameters
clf.fit(X, y)
y_pred = clf.predict(X)
assert len(y_pred) == len(y)
assert not np.isnan(y_pred).all()
@patch(
"evalml.pipelines.components.ensemble.StackedEnsembleClassifier._stacking_estimator_class"
)
def test_stacked_ensemble_does_not_overwrite_pipeline_random_seed(
mock_stack, logistic_regression_binary_pipeline_class
):
input_pipelines = [
logistic_regression_binary_pipeline_class(parameters={}, random_seed=3),
logistic_regression_binary_pipeline_class(parameters={}, random_seed=4),
]
clf = StackedEnsembleClassifier(
input_pipelines=input_pipelines, random_seed=5, n_jobs=1
)
estimators_used_in_ensemble = mock_stack.call_args[1]["estimators"]
assert clf.random_seed == 5
assert estimators_used_in_ensemble[0][1].pipeline.random_seed == 3
assert estimators_used_in_ensemble[1][1].pipeline.random_seed == 4
def test_stacked_ensemble_multilevel(logistic_regression_binary_pipeline_class):
# checks passing a stacked ensemble classifier as a final estimator
X = pd.DataFrame(np.random.rand(50, 5))
y = pd.Series([1, 0] * 25)
base = StackedEnsembleClassifier(
input_pipelines=[logistic_regression_binary_pipeline_class(parameters={})],
n_jobs=1,
)
clf = StackedEnsembleClassifier(
input_pipelines=[logistic_regression_binary_pipeline_class(parameters={})],
final_estimator=base,
n_jobs=1,
)
clf.fit(X, y)
y_pred = clf.predict(X)
assert len(y_pred) == len(y)
assert not np.isnan(y_pred).all()
def test_stacked_problem_types():
assert ProblemTypes.BINARY in StackedEnsembleClassifier.supported_problem_types
assert ProblemTypes.MULTICLASS in StackedEnsembleClassifier.supported_problem_types
assert StackedEnsembleClassifier.supported_problem_types == [
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
@pytest.mark.parametrize("problem_type", [ProblemTypes.BINARY, ProblemTypes.MULTICLASS])
def test_stacked_fit_predict_classification(
X_y_binary, X_y_multi, stackable_classifiers, problem_type
):
if problem_type == ProblemTypes.BINARY:
X, y = X_y_binary
num_classes = 2
pipeline_class = BinaryClassificationPipeline
elif problem_type == ProblemTypes.MULTICLASS:
X, y = X_y_multi
num_classes = 3
pipeline_class = MulticlassClassificationPipeline
input_pipelines = [
pipeline_class([classifier]) for classifier in stackable_classifiers
]
clf = StackedEnsembleClassifier(input_pipelines=input_pipelines, n_jobs=1)
clf.fit(X, y)
y_pred = clf.predict(X)
assert len(y_pred) == len(y)
assert isinstance(y_pred, pd.Series)
assert not np.isnan(y_pred).all()
y_pred_proba = clf.predict_proba(X)
assert isinstance(y_pred_proba, pd.DataFrame)
assert y_pred_proba.shape == (len(y), num_classes)
assert not np.isnan(y_pred_proba).all().all()
clf = StackedEnsembleClassifier(
input_pipelines=input_pipelines,
final_estimator=RandomForestClassifier(),
n_jobs=1,
)
clf.fit(X, y)
y_pred = clf.predict(X)
assert len(y_pred) == len(y)
assert isinstance(y_pred, pd.Series)
assert not np.isnan(y_pred).all()
y_pred_proba = clf.predict_proba(X)
assert y_pred_proba.shape == (len(y), num_classes)
assert isinstance(y_pred_proba, pd.DataFrame)
assert not np.isnan(y_pred_proba).all().all()
@pytest.mark.parametrize("problem_type", [ProblemTypes.BINARY, ProblemTypes.MULTICLASS])
@patch("evalml.pipelines.components.ensemble.StackedEnsembleClassifier.fit")
def test_stacked_feature_importance(
mock_fit, X_y_binary, X_y_multi, stackable_classifiers, problem_type
):
if problem_type == ProblemTypes.BINARY:
X, y = X_y_binary
pipeline_class = BinaryClassificationPipeline
elif problem_type == ProblemTypes.MULTICLASS:
X, y = X_y_multi
pipeline_class = MulticlassClassificationPipeline
input_pipelines = [
pipeline_class([classifier]) for classifier in stackable_classifiers
]
clf = StackedEnsembleClassifier(input_pipelines=input_pipelines, n_jobs=1)
clf.fit(X, y)
mock_fit.assert_called()
clf._is_fitted = True
with pytest.raises(
NotImplementedError, match="feature_importance is not implemented"
):
clf.feature_importance
| [
"numpy.random.rand",
"evalml.pipelines.components.RandomForestClassifier",
"numpy.isnan",
"unittest.mock.patch",
"pytest.raises",
"pandas.Series",
"evalml.pipelines.components.ensemble.StackedEnsembleClassifier",
"pytest.mark.parametrize",
"evalml.pipelines.MulticlassClassificationPipeline",
"eval... | [((3391, 3498), 'unittest.mock.patch', 'patch', (['"""evalml.pipelines.components.ensemble.StackedEnsembleClassifier._stacking_estimator_class"""'], {}), "(\n 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier._stacking_estimator_class'\n )\n", (3396, 3498), False, 'from unittest.mock import patch\n'), ((5268, 5360), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""problem_type"""', '[ProblemTypes.BINARY, ProblemTypes.MULTICLASS]'], {}), "('problem_type', [ProblemTypes.BINARY, ProblemTypes.\n MULTICLASS])\n", (5291, 5360), False, 'import pytest\n'), ((6822, 6914), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""problem_type"""', '[ProblemTypes.BINARY, ProblemTypes.MULTICLASS]'], {}), "('problem_type', [ProblemTypes.BINARY, ProblemTypes.\n MULTICLASS])\n", (6845, 6914), False, 'import pytest\n'), ((6911, 6986), 'unittest.mock.patch', 'patch', (['"""evalml.pipelines.components.ensemble.StackedEnsembleClassifier.fit"""'], {}), "('evalml.pipelines.components.ensemble.StackedEnsembleClassifier.fit')\n", (6916, 6986), False, 'from unittest.mock import patch\n'), ((2321, 2389), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines', 'n_jobs': '(1)'}), '(input_pipelines=input_pipelines, n_jobs=1)\n', (2346, 2389), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((2999, 3068), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines', 'n_jobs': '(-1)'}), '(input_pipelines=input_pipelines, n_jobs=-1)\n', (3024, 3068), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((3825, 3912), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines', 'random_seed': '(5)', 'n_jobs': '(1)'}), '(input_pipelines=input_pipelines, random_seed=5,\n n_jobs=1)\n', (3850, 3912), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((4376, 4398), 'pandas.Series', 'pd.Series', (['([1, 0] * 25)'], {}), '([1, 0] * 25)\n', (4385, 4398), True, 'import pandas as pd\n'), ((5889, 5957), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines', 'n_jobs': '(1)'}), '(input_pipelines=input_pipelines, n_jobs=1)\n', (5914, 5957), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((7474, 7542), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines', 'n_jobs': '(1)'}), '(input_pipelines=input_pipelines, n_jobs=1)\n', (7499, 7542), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((884, 977), 'pytest.raises', 'pytest.raises', (['EnsembleMissingPipelinesError'], {'match': '"""must not be None or an empty list."""'}), "(EnsembleMissingPipelinesError, match=\n 'must not be None or an empty list.')\n", (897, 977), False, 'import pytest\n'), ((996, 1023), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {}), '()\n', (1021, 1023), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((1033, 1126), 'pytest.raises', 'pytest.raises', (['EnsembleMissingPipelinesError'], {'match': '"""must not be None or an empty list."""'}), "(EnsembleMissingPipelinesError, match=\n 'must not be None or an empty list.')\n", (1046, 1126), False, 'import pytest\n'), ((1145, 1190), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': '[]'}), '(input_pipelines=[])\n', (1170, 1190), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((1259, 1387), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Pipelines with any of the following model families cannot be used as base pipelines"""'}), "(ValueError, match=\n 'Pipelines with any of the following model families cannot be used as base pipelines'\n )\n", (1272, 1387), False, 'import pytest\n'), ((1623, 1677), 'evalml.pipelines.BinaryClassificationPipeline', 'BinaryClassificationPipeline', (['[RandomForestClassifier]'], {}), '([RandomForestClassifier])\n', (1651, 1677), False, 'from evalml.pipelines import BinaryClassificationPipeline, MulticlassClassificationPipeline\n'), ((1687, 1745), 'evalml.pipelines.MulticlassClassificationPipeline', 'MulticlassClassificationPipeline', (['[RandomForestClassifier]'], {}), '([RandomForestClassifier])\n', (1719, 1745), False, 'from evalml.pipelines import BinaryClassificationPipeline, MulticlassClassificationPipeline\n'), ((1762, 1848), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""All pipelines must have the same problem type."""'}), "(ValueError, match=\n 'All pipelines must have the same problem type.')\n", (1775, 1848), False, 'import pytest\n'), ((1867, 1925), 'evalml.pipelines.components.ensemble.StackedEnsembleClassifier', 'StackedEnsembleClassifier', ([], {'input_pipelines': 'input_pipelines'}), '(input_pipelines=input_pipelines)\n', (1892, 1925), False, 'from evalml.pipelines.components.ensemble import StackedEnsembleClassifier\n'), ((4345, 4366), 'numpy.random.rand', 'np.random.rand', (['(50)', '(5)'], {}), '(50, 5)\n', (4359, 4366), True, 'import numpy as np\n'), ((7625, 7711), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""feature_importance is not implemented"""'}), "(NotImplementedError, match=\n 'feature_importance is not implemented')\n", (7638, 7711), False, 'import pytest\n'), ((6415, 6439), 'evalml.pipelines.components.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (6437, 6439), False, 'from evalml.pipelines.components import BaselineClassifier, RandomForestClassifier\n'), ((2753, 2769), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (2761, 2769), True, 'import numpy as np\n'), ((3365, 3381), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (3373, 3381), True, 'import numpy as np\n'), ((4814, 4830), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (4822, 4830), True, 'import numpy as np\n'), ((6093, 6109), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (6101, 6109), True, 'import numpy as np\n'), ((6600, 6616), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (6608, 6616), True, 'import numpy as np\n'), ((1466, 1516), 'evalml.pipelines.BinaryClassificationPipeline', 'BinaryClassificationPipeline', (['[BaselineClassifier]'], {}), '([BaselineClassifier])\n', (1494, 1516), False, 'from evalml.pipelines import BinaryClassificationPipeline, MulticlassClassificationPipeline\n'), ((6277, 6299), 'numpy.isnan', 'np.isnan', (['y_pred_proba'], {}), '(y_pred_proba)\n', (6285, 6299), True, 'import numpy as np\n'), ((6784, 6806), 'numpy.isnan', 'np.isnan', (['y_pred_proba'], {}), '(y_pred_proba)\n', (6792, 6806), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, required=True,
help='either "train" or "test"')
args = parser.parse_args()
a = np.load(f'APP4_RL_StockTrader_rewards/{args.mode}.npy')
print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}")
if args.mode == 'train':
# show the training progress
plt.plot(a)
else:
# test - show a histogram of rewards
plt.hist(a, bins=20)
plt.title(args.mode)
plt.show() | [
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist"
] | [((77, 102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (100, 102), False, 'import argparse\n'), ((249, 304), 'numpy.load', 'np.load', (['f"""APP4_RL_StockTrader_rewards/{args.mode}.npy"""'], {}), "(f'APP4_RL_StockTrader_rewards/{args.mode}.npy')\n", (256, 304), True, 'import numpy as np\n'), ((535, 555), 'matplotlib.pyplot.title', 'plt.title', (['args.mode'], {}), '(args.mode)\n', (544, 555), True, 'import matplotlib.pyplot as plt\n'), ((556, 566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (564, 566), True, 'import matplotlib.pyplot as plt\n'), ((450, 461), 'matplotlib.pyplot.plot', 'plt.plot', (['a'], {}), '(a)\n', (458, 461), True, 'import matplotlib.pyplot as plt\n'), ((513, 533), 'matplotlib.pyplot.hist', 'plt.hist', (['a'], {'bins': '(20)'}), '(a, bins=20)\n', (521, 533), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
#from intervalt import *
import sys
def __MAX__(x, y):
if x>y: return x
else: return y
class NNett:
def __init__(self, _weights, _biases):
self.weights=_weights
self.biases=_biases
self.weights.append([]) # output layer has empty weight vector
self.biases=[[]]+self.biases # input layer has empty bias vector
def eval(self, X):
# act[i][j] is the activation value (after ReLU) of the j-th neuron at the i-th layer
act=[]
act.append(X) # X is the input vector to be evaluated
N=len(self.weights) # N is the #layers
for i in range(1, N):
act.append([])
M=len(self.weights[i-1][0]) # M is the #neurons at layer (i+1)
# to compute the activation value for each neuron at layer i
for j in range(0, M):
val=0 # the activation value is the weighted sum of input from previous layer, plus the bias
for k in range(0, len(self.weights[i-1])):
val+=__MAX__(act[i-1][k],0) * self.weights[i-1][k][j]
val+=self.biases[i][j]
#if i<N-1 and val<=0: # ReLU
# val=0
act[i].append(val)
label=np.argmax(act[N-1])
return label, act
## X are interval inputs
#def intervalt_eval(self, X):
# # act[i][j] is the activation value (after ReLU) of the j-th neuron at the i-th layer
# act=[]
# act.append(X) # X is the input vector to be evaluated
# N=len(self.weights) # N is the #layers
# for i in range(1, N):
# act.append([])
# M=len(self.weights[i-1][0]) # M is the #neurons at layer (i+1)
# # to compute the activation value for each neuron at layer i
# for j in range(0, M):
# val=intervalt(0) # the activation value is the weighted sum of input from previous layer, plus the bias
# for k in range(0, len(self.weights[i-1])):
# #val+=act[i-1][k] * self.weights[i-1][k][j]
# val=intervalt_add(val, intervalt_times_const(act[i-1][k], self.weights[i-1][k][j]))
# #val+=self.biases[i][j]
# val=intervalt_add_const(val, self.biases[i][j])
# if i<N-1: # ReLU
# val=intervalt_relu(val)
# act[i].append(val)
# #label=np.argmax(act[N-1])
# #return label, act
# return act
#### convolutional neural networks (cnn)
#
#class Layert:
# def __init__(self, _w, _b, _is_conv=False, _mp_size=0):
# self.w=_w
# self.b=_b
# self.is_conv=_is_conv
# self.mp_size_x=_mp_size
# self.mp_size_y=_mp_size
#
#class CNNett:
# def __init__(self, _hidden_layers):
# self.hidden_layers=_hidden_layers ## hidden layers
#
# def eval(self, X):
# ### X shall be an array ==> 28x28
# #X=np.reshape(X, (28, 28))
# X=X.reshape(28, 28)
#
# N=len(self.hidden_layers)+1
#
# ## the final activation vector
# ## act shall be a vector of 'arrays'
# act=[]
#
# act.append(np.array([X])) ## input layer
# index=0
#
# ## to propagate through each hidden layer
# for layer in self.hidden_layers:
# print 'We are at layer {0}'.format(index+1)
# if layer.is_conv: ## is convolutional layer
# nf=len(layer.w) ## number of filters
# print '** number of filters: {0}'.format(nf)
# conv_act=[] ## conv_act contains these filtered activations
# ## to apply each filter
# for i in range(0, nf):
# _nf=len(act[index]) ## number of filter from the preceding layer
# #print '**** number of preceding filters: {0}'.format(_nf)
# #acts_for_mp=[]
# ## there may be multiple filtered pieces from last layer
# nr=act[index][0].shape[0] # number of rows
# nc=act[index][0].shape[1] # number of columns
# nfr=layer.w[i][0].shape[0] # number of filter rows
# nfc=layer.w[i][0].shape[1] # number of filter columns
# f_act=np.zeros((nr-nfr+1,nc-nfc+1))
# for J in range(0, f_act.shape[0]):
# for K in range(0, f_act.shape[1]):
#
# for j in range(0, _nf):
# ## act[index][j] is the input
# a=act[index][j]
#
# for l in range(0, nfr):
# for m in range(0, nfc):
# f_act[J][K]+=layer.w[i][j][l][m]*a[J+l][K+m]
# f_act[J][K]+=layer.b[i]
# if f_act[J][K]<0: f_act[J][K]=0
#
# #########
# #acts_for_mp.append(np.array(f_act))
#
# ### max-pool
# nr=f_act.shape[0]
# nc=f_act.shape[1]
# #### shape after max-pooling
# p_act=np.zeros((nr/layer.mp_size_x, nc/layer.mp_size_y))
# for I in range(0, p_act.shape[0]):
# for J in range(0, p_act.shape[1]):
# ##########
# for ii in range(layer.mp_size_x*I, layer.mp_size_x*(I+1)):
# for jj in range(layer.mp_size_y*J, layer.mp_size_y*(J+1)):
# if f_act[ii][jj]> p_act[I][J]: p_act[I][J]=f_act[ii][jj]
# conv_act.append(np.array(p_act))
# #conv_act=np.array(conv_act) ## ==> array
# act.append(np.array(conv_act))
# else: ## fully connected layer
# a=act[index] # the preceeding layer
# print '*** shape: {0}'.format(a.shape)
# print '*** w shape: {0}'.format(layer.w.shape)
# nr=layer.w.shape[0]
# nc=layer.w.shape[1]
# ### reshape
# aa=a.reshape(1, nr)
#
# this_act=np.zeros((1,nc))
# for I in range(0, nc):
# for II in range(0, nr):
# this_act[0][I]+=aa[0][II]*layer.w[II][I]
# this_act[0][I]+=layer.b[I]
# if index < N-2 and this_act[0][I]<0: this_act[0][I]=0
# act.append(np.array(this_act))
# ### next layer
# index+=1
#
# label=np.argmax(act[index][0])
# print act[index][0]
# print 'label is {0}'.format(label)
# return label, act
| [
"numpy.argmax"
] | [((1129, 1150), 'numpy.argmax', 'np.argmax', (['act[N - 1]'], {}), '(act[N - 1])\n', (1138, 1150), True, 'import numpy as np\n')] |
# encoding=utf8
# 基于Doc2vec训练句子向量 https://zhuanlan.zhihu.com/p/36886191
import jieba
import numpy as np
from bert4keras.backend import keras
from bert4keras.models import build_transformer_model
def ___():
build_transformer_model()
from bert4keras.tokenizers import Tokenizer
from numpy.linalg import linalg
__all__ = [
]
g_model = None
g_tokenizer = None
def load_model(model_file, bert_model_path, do_lower_case):
global g_model, g_tokenizer
g_model = keras.models.load_model(model_file)
# print(model.predict([np.array([token_ids]), np.array([segment_ids])]))
dict_path = '%s/vocab.txt' % bert_model_path
g_tokenizer = Tokenizer(dict_path, do_lower_case=do_lower_case) # 建立分词器
# 要求
def gen_doc_embedding(text):
global g_tokenizer, g_model
"""
:param text:
:return: 向量 [float] 注意它不能保证是float32 因为float32在python中是不存在的
当前实际值是 numpy.array(dtype=float32)
"""
text = jieba.strdecode(text)
token_ids, segment_ids = g_tokenizer.encode(text)
vec = g_model.predict([np.array([token_ids]), np.array([segment_ids])], batch_size=1)
vec = vec[0][-2] # 用-2层的隐层作为向量
vec = vec / linalg.norm(vec)
return vec
def main():
model_file = '../data/bert_model.bin'
bert_model_path = '../data/multi_cased_L-12_H-768_A-12'
load_model(model_file, bert_model_path, do_lower_case=False)
print("load model finish")
text = "谁的高清图"
vec = gen_doc_embedding(text)
print(vec)
if __name__ == '__main__':
main()
| [
"bert4keras.models.build_transformer_model",
"bert4keras.tokenizers.Tokenizer",
"numpy.linalg.linalg.norm",
"numpy.array",
"jieba.strdecode",
"bert4keras.backend.keras.models.load_model"
] | [((212, 237), 'bert4keras.models.build_transformer_model', 'build_transformer_model', ([], {}), '()\n', (235, 237), False, 'from bert4keras.models import build_transformer_model\n'), ((474, 509), 'bert4keras.backend.keras.models.load_model', 'keras.models.load_model', (['model_file'], {}), '(model_file)\n', (497, 509), False, 'from bert4keras.backend import keras\n'), ((654, 703), 'bert4keras.tokenizers.Tokenizer', 'Tokenizer', (['dict_path'], {'do_lower_case': 'do_lower_case'}), '(dict_path, do_lower_case=do_lower_case)\n', (663, 703), False, 'from bert4keras.tokenizers import Tokenizer\n'), ((930, 951), 'jieba.strdecode', 'jieba.strdecode', (['text'], {}), '(text)\n', (945, 951), False, 'import jieba\n'), ((1148, 1164), 'numpy.linalg.linalg.norm', 'linalg.norm', (['vec'], {}), '(vec)\n', (1159, 1164), False, 'from numpy.linalg import linalg\n'), ((1033, 1054), 'numpy.array', 'np.array', (['[token_ids]'], {}), '([token_ids])\n', (1041, 1054), True, 'import numpy as np\n'), ((1056, 1079), 'numpy.array', 'np.array', (['[segment_ids]'], {}), '([segment_ids])\n', (1064, 1079), True, 'import numpy as np\n')] |
import sys
import numpy as np
'''
(1)
对于字符串z 和 y,index i, j分别从0开始指示z和y的字符,lcs(i, j)表示 z(i, ... len(z)和y(j, ..., len(y))的最长公共子串,
存在如下递归计算式:
如果 z(i) == y(j),那么lcs(i, j) = z(i) + lcs(i+1, j+1);
否则,即z(i) != y(j), 那么lcs(i, j) = max(lcs(i, j+1), lcs(i+1, j))
(2)
注意到,lcs(i, j+1) 和lcs(i+1, j)的子调用中存在大量重复的计算,因此,符合动态规划的子问题分解和共享的特点
同时,如果存在lcs(i, j+1) == lcs(i+1, j)的情况,说明z和y的lcs不止一种
(3)
下面的实现中,其实是用了查表的实现方式,即先查表,如果计算过了,则直接返回结果
(4)
因此,lcs的时间复杂度其实是O(mn), m,n 分别是两个字符串的长度,因为每个字符串中的一个字符,都和另一个字符串中的每个字符必须且仅比较一次
(5)
打印出lcs,打印出lcs是逆向算法过程。依照lcs(i, j)最大的子问题 max(lcs(i+1, j), lcs(i, j+1)),逆向找;
当lcs(i, j) > max(lcs(i+1, j), lcs(i, j+1))时,说明z(i) = y(j)是lcs的一个字符
'''
class Alg_Lcs(object):
def __init__(self):
self.MAX_STR_SIZE = 1000
self.record = np.zeros((1000, 1000)) - 1
self.s_lcs = ""
def lcs(self, str1, str2, i, j):
if len(str1) > self.MAX_STR_SIZE or len(str2) > self.MAX_STR_SIZE:
sys.stderr.write("Input String Size is Over MAX_STR_SIZE, %s" % (self.MAX_STR_SIZE))
return -1
if i < 0 or j < 0:
sys.stderr.write("Index Out of Range {:d}, {:d}".format(i, j))
return 0
if i >= len(str1) or j >= len(str2):
return 0
if self.record[i, j] != -1:
return self.record[i, j]
if str1[i] == str2[j]:
self.record[i, j] = 1 + self.lcs(str1, str2, i+1, j+1);
else:
self.record[i, j] = max(self.lcs(str1, str2, i, j+1), self.lcs(str1, str2, i+1, j))
return self.record[i, j]
def get_lcs(self, str1, str2):
self.lcs(str1, str2, 0, 0)
i = 0
j = 0
while i < len(str1) and j < len(str2) and self.record[i, j] > 0:
if self.record[i, j] > max(self.record[i+1, j], self.record[i, j+1]):
self.s_lcs += str1[i]
i += 1
j += 1
elif self.record[i, j] == self.record[i+1, j]:
i += 1
elif self.record[i, j] == self.record[i, j+1]:
j += 1
print("The Longest Common String {:s} and {:s} is ###{:s}### with length {:f}".format(str1, str2, self.s_lcs, self.record[0, 0]))
if __name__ == '__main__':
ins = Alg_Lcs()
#string1 = "abcdsgshhdfghgdfjjl;jdlgj;dfjhj;jfdj"
#string2 = "becfdsgrgjsjhlasjgglkfsk'hfdglkfjgfdgfsj"
string1 = "abcdds"
string2 = "acf"
ins.get_lcs(string1, string2)
| [
"sys.stderr.write",
"numpy.zeros"
] | [((757, 779), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {}), '((1000, 1000))\n', (765, 779), True, 'import numpy as np\n'), ((933, 1020), 'sys.stderr.write', 'sys.stderr.write', (["('Input String Size is Over MAX_STR_SIZE, %s' % self.MAX_STR_SIZE)"], {}), "('Input String Size is Over MAX_STR_SIZE, %s' % self.\n MAX_STR_SIZE)\n", (949, 1020), False, 'import sys\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.